Next changeset 1:57f1260ca94e (2022-03-11) |
Commit message:
"planemo upload for repository https://github.com/ohsu-comp-bio/UNetCoreograph commit fb90660a1805b3f68fcff80d525b5459c3f7dfd6-dirty" |
added:
UNet2DtCycifTRAINCoreograph.py UNetCoreograph.py coreograph.xml images/TMA_MAP.jpg images/TMA_MAP.tif images/probmap.jpg images/probmap.tif images/raw.jpg images/raw.tif macros.xml model/checkpoint model/datasetMean.data model/datasetStDev.data model/hp.data model/model.ckpt.data-00000-of-00001 model/model.ckpt.index model/model.ckpt.meta toolbox/PartitionOfImage.py toolbox/__pycache__/PartitionOfImage.cpython-36.pyc toolbox/__pycache__/__init__.cpython-36.pyc toolbox/__pycache__/ftools.cpython-36.pyc toolbox/__pycache__/imtools.cpython-36.pyc toolbox/ftools.py toolbox/imtools.py |
b |
diff -r 000000000000 -r 99308601eaa6 UNet2DtCycifTRAINCoreograph.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/UNet2DtCycifTRAINCoreograph.py Wed May 19 21:34:38 2021 +0000 |
[ |
b'@@ -0,0 +1,586 @@\n+import numpy as np\r\n+from scipy import misc\r\n+import tensorflow as tf\r\n+import shutil\r\n+import scipy.io as sio\r\n+import os,fnmatch,PIL,glob\r\n+\r\n+import sys\r\n+sys.path.insert(0, \'C:\\\\Users\\\\Public\\\\Documents\\\\ImageScience\')\r\n+from toolbox.imtools import *\r\n+from toolbox.ftools import *\r\n+from toolbox.PartitionOfImage import PI2D\r\n+\r\n+\r\n+def concat3(lst):\r\n+\t\treturn tf.concat(lst,3)\r\n+\r\n+class UNet2D:\r\n+\thp = None # hyper-parameters\r\n+\tnn = None # network\r\n+\ttfTraining = None # if training or not (to handle batch norm)\r\n+\ttfData = None # data placeholder\r\n+\tSession = None\r\n+\tDatasetMean = 0\r\n+\tDatasetStDev = 0\r\n+\r\n+\tdef setupWithHP(hp):\r\n+\t\tUNet2D.setup(hp[\'imSize\'],\r\n+\t\t\t\t\t hp[\'nChannels\'],\r\n+\t\t\t\t\t hp[\'nClasses\'],\r\n+\t\t\t\t\t hp[\'nOut0\'],\r\n+\t\t\t\t\t hp[\'featMapsFact\'],\r\n+\t\t\t\t\t hp[\'downSampFact\'],\r\n+\t\t\t\t\t hp[\'ks\'],\r\n+\t\t\t\t\t hp[\'nExtraConvs\'],\r\n+\t\t\t\t\t hp[\'stdDev0\'],\r\n+\t\t\t\t\t hp[\'nLayers\'],\r\n+\t\t\t\t\t hp[\'batchSize\'])\r\n+\r\n+\tdef setup(imSize,nChannels,nClasses,nOut0,featMapsFact,downSampFact,kernelSize,nExtraConvs,stdDev0,nDownSampLayers,batchSize):\r\n+\t\tUNet2D.hp = {\'imSize\':imSize,\r\n+\t\t\t\t\t \'nClasses\':nClasses,\r\n+\t\t\t\t\t \'nChannels\':nChannels,\r\n+\t\t\t\t\t \'nExtraConvs\':nExtraConvs,\r\n+\t\t\t\t\t \'nLayers\':nDownSampLayers,\r\n+\t\t\t\t\t \'featMapsFact\':featMapsFact,\r\n+\t\t\t\t\t \'downSampFact\':downSampFact,\r\n+\t\t\t\t\t \'ks\':kernelSize,\r\n+\t\t\t\t\t \'nOut0\':nOut0,\r\n+\t\t\t\t\t \'stdDev0\':stdDev0,\r\n+\t\t\t\t\t \'batchSize\':batchSize}\r\n+\r\n+\t\tnOutX = [UNet2D.hp[\'nChannels\'],UNet2D.hp[\'nOut0\']]\r\n+\t\tdsfX = []\r\n+\t\tfor i in range(UNet2D.hp[\'nLayers\']):\r\n+\t\t\tnOutX.append(nOutX[-1]*UNet2D.hp[\'featMapsFact\'])\r\n+\t\t\tdsfX.append(UNet2D.hp[\'downSampFact\'])\r\n+\r\n+\r\n+\t\t# --------------------------------------------------\r\n+\t\t# downsampling layer\r\n+\t\t# --------------------------------------------------\r\n+\r\n+\t\twith tf.name_scope(\'placeholders\'):\r\n+\t\t\tUNet2D.tfTraining = tf.placeholder(tf.bool, name=\'training\')\r\n+\t\t\tUNet2D.tfData = tf.placeholder("float", shape=[None,UNet2D.hp[\'imSize\'],UNet2D.hp[\'imSize\'],UNet2D.hp[\'nChannels\']],name=\'data\')\r\n+\r\n+\t\tdef down_samp_layer(data,index):\r\n+\t\t\twith tf.name_scope(\'ld%d\' % index):\r\n+\t\t\t\tldXWeights1 = tf.Variable(tf.truncated_normal([UNet2D.hp[\'ks\'], UNet2D.hp[\'ks\'], nOutX[index], nOutX[index+1]], stddev=stdDev0),name=\'kernel1\')\r\n+\t\t\t\tldXWeightsExtra = []\r\n+\t\t\t\tfor i in range(nExtraConvs):\r\n+\t\t\t\t\tldXWeightsExtra.append(tf.Variable(tf.truncated_normal([UNet2D.hp[\'ks\'], UNet2D.hp[\'ks\'], nOutX[index+1], nOutX[index+1]], stddev=stdDev0),name=\'kernelExtra%d\' % i))\r\n+\t\t\t\t\r\n+\t\t\t\tc00 = tf.nn.conv2d(data, ldXWeights1, strides=[1, 1, 1, 1], padding=\'SAME\')\r\n+\t\t\t\tfor i in range(nExtraConvs):\r\n+\t\t\t\t\tc00 = tf.nn.conv2d(tf.nn.relu(c00), ldXWeightsExtra[i], strides=[1, 1, 1, 1], padding=\'SAME\')\r\n+\r\n+\t\t\t\tldXWeightsShortcut = tf.Variable(tf.truncated_normal([1, 1, nOutX[index], nOutX[index+1]], stddev=stdDev0),name=\'shortcutWeights\')\r\n+\t\t\t\tshortcut = tf.nn.conv2d(data, ldXWeightsShortcut, strides=[1, 1, 1, 1], padding=\'SAME\')\r\n+\r\n+\t\t\t\tbn = tf.layers.batch_normalization(tf.nn.relu(c00+shortcut), training=UNet2D.tfTraining)\r\n+\r\n+\t\t\t\treturn tf.nn.max_pool(bn, ksize=[1, dsfX[index], dsfX[index], 1], strides=[1, dsfX[index], dsfX[index], 1], padding=\'SAME\',name=\'maxpool\')\r\n+\r\n+\t\t# --------------------------------------------------\r\n+\t\t# bottom layer\r\n+\t\t# --------------------------------------------------\r\n+\r\n+\t\twith tf.name_scope(\'lb\'):\r\n+\t\t\tlbWeights1 = tf.Variable(tf.truncated_normal([UNet2D.hp[\'ks\'], UNet2D.hp[\'ks\'], nOutX[UNet2D.hp[\'nLayers\']], nOutX[UNet2D.hp[\'nLayers\']+1]], stddev=stdDev0),name=\'kernel1\')\r\n+\t\t\tdef lb(hidden):\r\n+\t\t\t\treturn tf.nn.relu(tf.nn.conv2d(hidden, lbWeights1, strides=[1, 1, 1, 1], padding=\'SAME\'),name=\'conv\')\r\n+\r\n+\t\t# --------------------------------------------------\r\n+\t\t# downsampling\r\n+\t\t# --------------------------------------------------\r\n+\r\n+\t\twith tf.name_scope(\'downsampling\'): \r\n+\t\t\tdsX = []\r\n+\t\t\tdsX.append(UNet2D.tfData)\r\n+\r\n+\t\t\tfor i in range(UNet2D.hp[\'nLayers\']):\r\n+\t\t\t\tdsX.append(down_samp_layer(dsX[i],i))\r\n+\r\n+\t\t\tb = lb(dsX[U'..b"hSize = UNet2D.hp['batchSize']\r\n+\t\timSize = UNet2D.hp['imSize']\r\n+\t\tnChannels = UNet2D.hp['nChannels']\r\n+\r\n+\t\tPI2D.setup(image,imSize,int(imSize/8),mode)\r\n+\t\tPI2D.createOutput(nChannels)\r\n+\r\n+\t\tbatchData = np.zeros((batchSize,imSize,imSize,nChannels))\r\n+\t\tfor i in range(PI2D.NumPatches):\r\n+\t\t\tj = np.mod(i,batchSize)\r\n+\t\t\tbatchData[j,:,:,0] = (PI2D.getPatch(i)-UNet2D.DatasetMean)/UNet2D.DatasetStDev\r\n+\t\t\tif j == batchSize-1 or i == PI2D.NumPatches-1:\r\n+\t\t\t\toutput = UNet2D.Session.run(UNet2D.nn,feed_dict={UNet2D.tfData: batchData, UNet2D.tfTraining: 0})\r\n+\t\t\t\tfor k in range(j+1):\r\n+\t\t\t\t\tpm = output[k,:,:,pmIndex]\r\n+\t\t\t\t\tPI2D.patchOutput(i-j+k,pm)\r\n+\t\t\t\t\t# PI2D.patchOutput(i-j+k,normalize(imgradmag(PI2D.getPatch(i-j+k),1)))\r\n+\r\n+\t\treturn PI2D.getValidOutput()\r\n+\r\n+\r\n+if __name__ == '__main__':\r\n+\tlogPath = 'D:\\\\LSP\\\\UNet\\\\Coreograph\\\\TFLogs'\r\n+\tmodelPath = 'D:\\\\LSP\\\\Coreograph\\\\model-4layersMaskAug20New'\r\n+\tpmPath = 'D:\\\\LSP\\\\UNet\\\\Coreograph\\\\TFProbMaps'\r\n+\r\n+\r\n+\t# ----- test 1 -----\r\n+\r\n+\t# imPath = 'D:\\\\LSP\\\\UNet\\\\tonsil20x1bin1chan\\\\tonsilAnnotations'\r\n+\timPath = 'Z:/IDAC/Clarence/LSP/CyCIF/TMA/training data custom unaveraged'\r\n+\t# UNet2D.setup(128,1,2,8,2,2,3,1,0.1,2,8)\r\n+\t# UNet2D.train(imPath,logPath,modelPath,pmPath,500,100,40,True,20000,1,0)\r\n+\tUNet2D.setup(128, 1, 2, 20, 2, 2, 3, 2, 0.03, 4, 32)\r\n+\tUNet2D.train(imPath, logPath, modelPath, pmPath, 2053, 513 , 641, True, 10, 1, 1)\r\n+\tUNet2D.deploy(imPath,100,modelPath,pmPath,1,1)\r\n+\r\n+\t# I = im2double(tifread('/home/mc457/files/CellBiology/IDAC/Marcelo/Etc/UNetTestSets/SinemSaka_NucleiSegmentation_SingleImageInferenceTest3.tif'))\r\n+\t# UNet2D.singleImageInferenceSetup(modelPath,0)\r\n+\t# J = UNet2D.singleImageInference(I,'accumulate',0)\r\n+\t# UNet2D.singleImageInferenceCleanup()\r\n+\t# # imshowlist([I,J])\r\n+\t# # sys.exit(0)\r\n+\t# # tifwrite(np.uint8(255*I),'/home/mc457/Workspace/I1.tif')\r\n+\t# # tifwrite(np.uint8(255*J),'/home/mc457/Workspace/I2.tif')\r\n+\t# K = np.zeros((2,I.shape[0],I.shape[1]))\r\n+\t# K[0,:,:] = I\r\n+\t# K[1,:,:] = J\r\n+\t# tifwrite(np.uint8(255*K),'/home/mc457/Workspace/Sinem_NucSeg.tif')\r\n+\r\n+\t# UNet2D.singleImageInferenceSetup(modelPath,0)\r\n+\t# imagePath = 'Y://sorger//data//RareCyte//Connor//Topacio_P2_AF//ashlar//C0078'\r\n+\t#\r\n+\t# fileList = glob.glob(imagePath + '//registration//C0078.ome.tif')\r\n+\t# print(fileList)\r\n+\t# for iFile in fileList:\r\n+\t# \tfileName = os.path.basename(iFile)\r\n+\t# \tfileNamePrefix = fileName.split(os.extsep, 1)\r\n+\t# \tI = im2double(tifffile.imread(iFile, key=0))\r\n+\t# \thsize = int((float(I.shape[0])*float(0.75)))\r\n+\t# \tvsize = int((float(I.shape[1])*float(0.75)))\r\n+\t# \tI = resize(I,(hsize,vsize))\r\n+\t# \tJ = UNet2D.singleImageInference(I,'accumulate',1)\r\n+\t# \tK = np.zeros((3,I.shape[0],I.shape[1]))\r\n+\t# \tK[2,:,:] = I\r\n+\t# \tK[0,:,:] = J\r\n+\t# \tJ = UNet2D.singleImageInference(I, 'accumulate', 2)\r\n+\t# \tK[1, :, :] = J\r\n+\t# \toutputPath = imagePath + '//prob_maps'\r\n+\t# \tif not os.path.exists(outputPath):\r\n+\t# \t\tos.makedirs(outputPath)\r\n+\t# \ttifwrite(np.uint8(255*K),outputPath + '//' + fileNamePrefix[0] +'_NucSeg.tif')\r\n+\t# UNet2D.singleImageInferenceCleanup()\r\n+\r\n+\r\n+\t# ----- test 2 -----\r\n+\r\n+\t# imPath = '/home/mc457/files/CellBiology/IDAC/Marcelo/Etc/UNetTestSets/ClarenceYapp_NucleiSegmentation'\r\n+\t# UNet2D.setup(128,1,2,8,2,2,3,1,0.1,3,4)\r\n+\t# UNet2D.train(imPath,logPath,modelPath,pmPath,800,100,100,False,10,1)\r\n+\t# UNet2D.deploy(imPath,100,modelPath,pmPath,1)\r\n+\r\n+\r\n+\t# ----- test 3 -----\r\n+\r\n+\t# imPath = '/home/mc457/files/CellBiology/IDAC/Marcelo/Etc/UNetTestSets/CarmanLi_CellTypeSegmentation'\r\n+\t# # UNet2D.setup(256,1,2,8,2,2,3,1,0.1,3,4)\r\n+\t# # UNet2D.train(imPath,logPath,modelPath,pmPath,1400,100,164,False,10000,1)\r\n+\t# UNet2D.deploy(imPath,164,modelPath,pmPath,1)\r\n+\r\n+\r\n+\t# ----- test 4 -----\r\n+\r\n+\t# imPath = '/home/cicconet/Downloads/TrainSet1'\r\n+\t# UNet2D.setup(64,1,2,8,2,2,3,1,0.1,3,4)\r\n+\t# UNet2D.train(imPath,logPath,modelPath,pmPath,200,8,8,False,2000,1,0)\r\n+\t# # UNet2D.deploy(imPath,164,modelPath,pmPath,1)\n\\ No newline at end of file\n" |
b |
diff -r 000000000000 -r 99308601eaa6 UNetCoreograph.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/UNetCoreograph.py Wed May 19 21:34:38 2021 +0000 |
[ |
b'@@ -0,0 +1,802 @@\n+import numpy as np\r\n+from scipy import misc as sm\r\n+import shutil\r\n+import scipy.io as sio\r\n+import os\r\n+import skimage.exposure as sk\r\n+import cv2\r\n+import argparse\r\n+import pytiff\r\n+import tifffile\r\n+import tensorflow as tf\r\n+from skimage.morphology import *\r\n+from skimage.exposure import rescale_intensity\r\n+from skimage.segmentation import chan_vese, find_boundaries, morphological_chan_vese\r\n+from skimage.measure import regionprops,label, find_contours\r\n+from skimage.transform import resize\r\n+from skimage.filters import gaussian\r\n+from skimage.feature import peak_local_max,blob_log\r\n+from skimage.color import label2rgb\r\n+import skimage.io as skio\r\n+from skimage import img_as_bool\r\n+from skimage.draw import circle_perimeter\r\n+from scipy.ndimage.filters import uniform_filter\r\n+from scipy.ndimage import gaussian_laplace\r\n+from os.path import *\r\n+from os import listdir, makedirs, remove\r\n+\r\n+\r\n+\r\n+import sys\r\n+from typing import Any\r\n+\r\n+#sys.path.insert(0, \'C:\\\\Users\\\\Public\\\\Documents\\\\ImageScience\')\r\n+from toolbox.imtools import *\r\n+from toolbox.ftools import *\r\n+from toolbox.PartitionOfImage import PI2D\r\n+\r\n+\r\n+def concat3(lst):\r\n+\t\treturn tf.concat(lst,3)\r\n+\r\n+class UNet2D:\r\n+\thp = None # hyper-parameters\r\n+\tnn = None # network\r\n+\ttfTraining = None # if training or not (to handle batch norm)\r\n+\ttfData = None # data placeholder\r\n+\tSession = None\r\n+\tDatasetMean = 0\r\n+\tDatasetStDev = 0\r\n+\r\n+\tdef setupWithHP(hp):\r\n+\t\tUNet2D.setup(hp[\'imSize\'],\r\n+\t\t\t\t\t hp[\'nChannels\'],\r\n+\t\t\t\t\t hp[\'nClasses\'],\r\n+\t\t\t\t\t hp[\'nOut0\'],\r\n+\t\t\t\t\t hp[\'featMapsFact\'],\r\n+\t\t\t\t\t hp[\'downSampFact\'],\r\n+\t\t\t\t\t hp[\'ks\'],\r\n+\t\t\t\t\t hp[\'nExtraConvs\'],\r\n+\t\t\t\t\t hp[\'stdDev0\'],\r\n+\t\t\t\t\t hp[\'nLayers\'],\r\n+\t\t\t\t\t hp[\'batchSize\'])\r\n+\r\n+\tdef setup(imSize,nChannels,nClasses,nOut0,featMapsFact,downSampFact,kernelSize,nExtraConvs,stdDev0,nDownSampLayers,batchSize):\r\n+\t\tUNet2D.hp = {\'imSize\':imSize,\r\n+\t\t\t\t\t \'nClasses\':nClasses,\r\n+\t\t\t\t\t \'nChannels\':nChannels,\r\n+\t\t\t\t\t \'nExtraConvs\':nExtraConvs,\r\n+\t\t\t\t\t \'nLayers\':nDownSampLayers,\r\n+\t\t\t\t\t \'featMapsFact\':featMapsFact,\r\n+\t\t\t\t\t \'downSampFact\':downSampFact,\r\n+\t\t\t\t\t \'ks\':kernelSize,\r\n+\t\t\t\t\t \'nOut0\':nOut0,\r\n+\t\t\t\t\t \'stdDev0\':stdDev0,\r\n+\t\t\t\t\t \'batchSize\':batchSize}\r\n+\r\n+\t\tnOutX = [UNet2D.hp[\'nChannels\'],UNet2D.hp[\'nOut0\']]\r\n+\t\tdsfX = []\r\n+\t\tfor i in range(UNet2D.hp[\'nLayers\']):\r\n+\t\t\tnOutX.append(nOutX[-1]*UNet2D.hp[\'featMapsFact\'])\r\n+\t\t\tdsfX.append(UNet2D.hp[\'downSampFact\'])\r\n+\r\n+\r\n+\t\t# --------------------------------------------------\r\n+\t\t# downsampling layer\r\n+\t\t# --------------------------------------------------\r\n+\r\n+\t\twith tf.name_scope(\'placeholders\'):\r\n+\t\t\tUNet2D.tfTraining = tf.placeholder(tf.bool, name=\'training\')\r\n+\t\t\tUNet2D.tfData = tf.placeholder("float", shape=[None,UNet2D.hp[\'imSize\'],UNet2D.hp[\'imSize\'],UNet2D.hp[\'nChannels\']],name=\'data\')\r\n+\r\n+\t\tdef down_samp_layer(data,index):\r\n+\t\t\twith tf.name_scope(\'ld%d\' % index):\r\n+\t\t\t\tldXWeights1 = tf.Variable(tf.truncated_normal([UNet2D.hp[\'ks\'], UNet2D.hp[\'ks\'], nOutX[index], nOutX[index+1]], stddev=stdDev0),name=\'kernel1\')\r\n+\t\t\t\tldXWeightsExtra = []\r\n+\t\t\t\tfor i in range(nExtraConvs):\r\n+\t\t\t\t\tldXWeightsExtra.append(tf.Variable(tf.truncated_normal([UNet2D.hp[\'ks\'], UNet2D.hp[\'ks\'], nOutX[index+1], nOutX[index+1]], stddev=stdDev0),name=\'kernelExtra%d\' % i))\r\n+\t\t\t\t\r\n+\t\t\t\tc00 = tf.nn.conv2d(data, ldXWeights1, strides=[1, 1, 1, 1], padding=\'SAME\')\r\n+\t\t\t\tfor i in range(nExtraConvs):\r\n+\t\t\t\t\tc00 = tf.nn.conv2d(tf.nn.relu(c00), ldXWeightsExtra[i], strides=[1, 1, 1, 1], padding=\'SAME\')\r\n+\r\n+\t\t\t\tldXWeightsShortcut = tf.Variable(tf.truncated_normal([1, 1, nOutX[index], nOutX[index+1]], stddev=stdDev0),name=\'shortcutWeights\')\r\n+\t\t\t\tshortcut = tf.nn.conv2d(data, ldXWeightsShortcut, strides=[1, 1, 1, 1], padding=\'SAME\')\r\n+\r\n+\t\t\t\tbn = tf.layers.batch_normalization(tf.nn.relu(c00+shortcut), training=UNet2D.tfTraining)\r\n+\r\n+\t\t\t\treturn tf.nn.max_pool(bn, ksize=[1, dsfX[index], dsfX[index], 1], strides=[1, dsfX[index], dsfX[index], 1], padding=\'SAME\',name=\'maxpool\')\r\n+\r\n+\t\t# -----------'..b' blob_log(preMask,coreRad*0.6,threshold=sensitivity)\r\n+\tImax = np.zeros(preMask.shape,dtype=np.uint8)\r\n+\tfor iSpot in range(fgFiltered.shape[0]):\r\n+\t\tyi = np.uint32(round(fgFiltered[iSpot, 0]))\r\n+\t\txi = np.uint32(round(fgFiltered[iSpot, 1]))\r\n+\t\tImax[yi, xi] = 1\r\n+\tImax = Imax*preMask\r\n+\tIdist = distance_transform_edt(1-Imax)\r\n+\tmarkers = label(Imax)\r\n+\tcoreLabel = watershed(Idist,markers,watershed_line=True,mask = preMask)\r\n+\tP = regionprops(coreLabel)\r\n+\tcentroids = np.array([ele.centroid for ele in P])/dsFactor\r\n+\tnumCores = len(centroids)\r\n+\testCoreDiamX = np.ones(numCores)*estCoreDiam/dsFactor\r\n+\testCoreDiamY = np.ones(numCores)*estCoreDiam/dsFactor\r\n+\r\n+\tif numCores ==0 & args.cluster:\r\n+\t\tprint(\'No cores detected. Try adjusting the downsample factor\')\r\n+\t\tsys.exit(255)\r\n+\r\n+\tsingleMaskTMA = np.zeros(imagesub.shape)\r\n+\tmaskTMA = np.zeros(imagesub.shape)\r\n+\tbbox = [None] * numCores\r\n+\r\n+ \r\n+\tx=np.zeros(numCores)\r\n+\txLim=np.zeros(numCores)\r\n+\ty=np.zeros(numCores)\r\n+\tyLim=np.zeros(numCores)\r\n+\t\r\n+# segmenting each core \t\r\n+\t#######################\r\n+\tfor iCore in range(numCores):\r\n+\t\tx[iCore] = centroids[iCore,1] - estCoreDiamX[iCore]/2\r\n+\t\txLim[iCore] = x[iCore]+estCoreDiamX[iCore]\r\n+\t\tif xLim[iCore] > I.shape[1]:\r\n+\t\t\txLim[iCore] = I.shape[1]\r\n+\t\tif x[iCore]<1:\r\n+\t\t\tx[iCore]=1\r\n+\r\n+\t\ty[iCore] = centroids[iCore,0] - estCoreDiamY[iCore]/2\r\n+\t\tyLim[iCore] = y[iCore] + estCoreDiamY[iCore]\r\n+\t\tif yLim[iCore] > I.shape[0]:\r\n+\t\t\tyLim[iCore] = I.shape[0]\r\n+\t\tif y[iCore]<1:\r\n+\t\t\ty[iCore]=1\r\n+\r\n+\t\tbbox[iCore] = [round(x[iCore]), round(y[iCore]), round(xLim[iCore]), round(yLim[iCore])]\r\n+\t\t\r\n+\t\tfor iChan in range(outputChan[0],outputChan[1]+1):\r\n+\t\t\twith pytiff.Tiff(imagePath, "r", encoding=\'utf-8\') as handle:\r\n+\t\t\t\thandle.set_page(iChan)\r\n+\t\t\t\tcoreStack= handle[np.uint32(bbox[iCore][1]):np.uint32(bbox[iCore][3]-1), np.uint32(bbox[iCore][0]):np.uint32(bbox[iCore][2]-1)]\r\n+\t\t\tskio.imsave(outputPath + os.path.sep + str(iCore+1) + \'.tif\',coreStack,append=True)\t\r\n+\r\n+\t\twith pytiff.Tiff(imagePath, "r", encoding=\'utf-8\') as handle:\r\n+\t\t\thandle.set_page(args.channel)\r\n+\t\t\tcoreSlice= handle[np.uint32(bbox[iCore][1]):np.uint32(bbox[iCore][3]-1), np.uint32(bbox[iCore][0]):np.uint32(bbox[iCore][2]-1)]\r\n+\r\n+\t\tcore = (coreLabel ==(iCore+1))\r\n+\t\tinitialmask = core[np.uint32(y[iCore]*dsFactor):np.uint32(yLim[iCore]*dsFactor),np.uint32(x[iCore]*dsFactor):np.uint32(xLim[iCore]*dsFactor)]\r\n+\t\tinitialmask = resize(initialmask,size(coreSlice),cv2.INTER_NEAREST)\r\n+\r\n+\t\tsingleProbMap = classProbs[np.uint32(y[iCore]*dsFactor):np.uint32(yLim[iCore]*dsFactor),np.uint32(x[iCore]*dsFactor):np.uint32(xLim[iCore]*dsFactor)]\r\n+\t\tsingleProbMap = resize(np.uint8(255*singleProbMap),size(coreSlice),cv2.INTER_NEAREST)\r\n+\t\tTMAmask = coreSegmenterOutput(coreSlice,singleProbMap,initialmask,coreRad/20,False) \r\n+\t\tif np.sum(TMAmask)==0:\r\n+\t\t\tTMAmask = np.ones(TMAmask.shape)\r\n+\t\tvsize = int(float(coreSlice.shape[0]))\r\n+\t\thsize = int(float(coreSlice.shape[1]))\r\n+\t\tmasksub = resize(resize(TMAmask,(vsize,hsize),cv2.INTER_NEAREST),(int((float(coreSlice.shape[0])*dsFactor)),int((float(coreSlice.shape[1])*dsFactor))),cv2.INTER_NEAREST)\r\n+\t\tsingleMaskTMA[int(y[iCore]*dsFactor):int(y[iCore]*dsFactor)+masksub.shape[0],int(x[iCore]*dsFactor):int(x[iCore]*dsFactor)+masksub.shape[1]]=masksub\r\n+\t\tmaskTMA = maskTMA + resize(singleMaskTMA,maskTMA.shape,cv2.INTER_NEAREST)\r\n+\t\tcv2.putText(imagesub, str(iCore+1), (int(P[iCore].centroid[1]),int(P[iCore].centroid[0])), 0, 0.5, (np.amax(imagesub), np.amax(imagesub), np.amax(imagesub)), 1, cv2.LINE_AA)\r\n+\t\t\r\n+\t\tskio.imsave(maskOutputPath + os.path.sep + str(iCore+1) + \'_mask.tif\',np.uint8(TMAmask))\r\n+\t\tprint(\'Segmented core \' + str(iCore+1))\t\r\n+\t\t\r\n+\tboundaries = find_boundaries(maskTMA)\r\n+\timagesub = imagesub/np.percentile(imagesub,99.9)\r\n+\timagesub[boundaries==1] = 1\r\n+\tskio.imsave(outputPath + os.path.sep + \'TMA_MAP.tif\' ,np.uint8(imagesub*255))\r\n+\tprint(\'Segmented all cores!\')\r\n+\t\r\n+\r\n+#restore GPU to 0\r\n+\t#image load using tifffile\r\n' |
b |
diff -r 000000000000 -r 99308601eaa6 coreograph.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/coreograph.xml Wed May 19 21:34:38 2021 +0000 |
[ |
@@ -0,0 +1,56 @@ +<tool id="unet_coreograph" name="UNetCoreograph" version="@VERSION@.3" profile="17.09"> + <description>Coreograph uses UNet, a deep learning model, to identify complete/incomplete tissue cores on a tissue microarray. It has been trained on 9 TMA slides of different sizes and tissue types.</description> + <macros> + <import>macros.xml</import> + </macros> + + <expand macro="requirements"/> + @VERSION_CMD@ + + <command detect_errors="exit_code"><![CDATA[ + #set $type_corrected = str($source_image)[:-3]+'ome.tif' + ln -s $source_image `basename $type_corrected`; + + @CMD_BEGIN@ + --imagePath `basename $type_corrected` + --downsampleFactor $downsamplefactor + --channel $channel + --buffer $buffer + --sensitivity $sensitivity + + ##if $usegrid + ##--useGrid + ##end if + + #if $cluster + --cluster + #end if + + --outputPath .; + + ]]></command> + + + <inputs> + <param name="source_image" type="data" format="tiff" label="Registered TIFF"/> + <param name="downsamplefactor" type="integer" value="5" label="Down Sample Factor"/> + <param name="channel" type="integer" value="0" label="Channel"/> + <param name="buffer" type="float" value="2.0" label="Buffer"/> + <param name="sensitivity" type="float" value="0.3" label="Sensitivity"/> + <!--<param name="usegrid" type="boolean" label="Use Grid"/>--> + <param name="cluster" type="boolean" checked="false" label="Cluster"/> + </inputs> + + <outputs> + <collection name="tma_sections" type="list" label="${tool.name} on ${on_string}: Images"> + <discover_datasets pattern="(?P<designation>[0-9]+)\.tif" format="tiff" visible="false"/> + </collection> + <collection name="masks" type="list" label="${tool.name} on ${on_string}: Masks"> + <discover_datasets pattern="(?P<designation>[0-9]+)_mask\.tif" directory="masks" format="tiff" visible="false"/> + </collection> + <data name="TMA_MAP" format="tiff" label="${tool.name} on ${on_string}: TMA Map" from_work_dir="TMA_MAP.tif"/> + </outputs> + <help><![CDATA[ + ]]></help> + <expand macro="citations" /> +</tool> |
b |
diff -r 000000000000 -r 99308601eaa6 images/TMA_MAP.jpg |
b |
Binary file images/TMA_MAP.jpg has changed |
b |
diff -r 000000000000 -r 99308601eaa6 images/TMA_MAP.tif |
b |
Binary file images/TMA_MAP.tif has changed |
b |
diff -r 000000000000 -r 99308601eaa6 images/probmap.jpg |
b |
Binary file images/probmap.jpg has changed |
b |
diff -r 000000000000 -r 99308601eaa6 images/probmap.tif |
b |
Binary file images/probmap.tif has changed |
b |
diff -r 000000000000 -r 99308601eaa6 images/raw.jpg |
b |
Binary file images/raw.jpg has changed |
b |
diff -r 000000000000 -r 99308601eaa6 images/raw.tif |
b |
Binary file images/raw.tif has changed |
b |
diff -r 000000000000 -r 99308601eaa6 macros.xml --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/macros.xml Wed May 19 21:34:38 2021 +0000 |
b |
@@ -0,0 +1,29 @@ +<?xml version="1.0"?> +<macros> + <xml name="requirements"> + <requirements> + <container type="docker">labsyspharm/unetcoreograph:@VERSION@</container> + <requirement type="package" version="3.6">python</requirement> + <requirement type="package" version="1.15.1">tensorflow-estimator</requirement> + <requirement type="package" version="1.15">tensorflow</requirement> + <requirement type="package">cython</requirement> + <requirement type="package" version="0.14.2">scikit-image</requirement> + <requirement type="package">matplotlib</requirement> + <requirement type="package" version="2020.2.16">tifffile</requirement> + <requirement type="package" version="1.1.0">scipy</requirement> + <requirement type="package">opencv</requirement> + <requirement type="package" version="0.8.1">pytiff</requirement> + </requirements> + </xml> + + <xml name="version_cmd"> + <version_command>echo @VERSION@</version_command> + </xml> + <xml name="citations"> + <citations> + </citations> + </xml> + + <token name="@VERSION@">2.2.0</token> + <token name="@CMD_BEGIN@">python ${__tool_directory__}/UNetCoreograph.py</token> +</macros> |
b |
diff -r 000000000000 -r 99308601eaa6 model/checkpoint --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/model/checkpoint Wed May 19 21:34:38 2021 +0000 |
b |
@@ -0,0 +1,2 @@ +model_checkpoint_path: "D:\\LSP\\Coreograph\\model-4layersMaskAug20New\\model.ckpt" +all_model_checkpoint_paths: "D:\\LSP\\Coreograph\\model-4layersMaskAug20New\\model.ckpt" |
b |
diff -r 000000000000 -r 99308601eaa6 model/datasetMean.data --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/model/datasetMean.data Wed May 19 21:34:38 2021 +0000 |
b |
@@ -0,0 +1,3 @@ +�G?� +=p�� +. \ No newline at end of file |
b |
diff -r 000000000000 -r 99308601eaa6 model/datasetStDev.data --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/model/datasetStDev.data Wed May 19 21:34:38 2021 +0000 |
b |
@@ -0,0 +1,3 @@ +�G?� +=p�� +. \ No newline at end of file |
b |
diff -r 000000000000 -r 99308601eaa6 model/hp.data |
b |
Binary file model/hp.data has changed |
b |
diff -r 000000000000 -r 99308601eaa6 model/model.ckpt.data-00000-of-00001 |
b |
Binary file model/model.ckpt.data-00000-of-00001 has changed |
b |
diff -r 000000000000 -r 99308601eaa6 model/model.ckpt.index |
b |
Binary file model/model.ckpt.index has changed |
b |
diff -r 000000000000 -r 99308601eaa6 model/model.ckpt.meta |
b |
Binary file model/model.ckpt.meta has changed |
b |
diff -r 000000000000 -r 99308601eaa6 toolbox/PartitionOfImage.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolbox/PartitionOfImage.py Wed May 19 21:34:38 2021 +0000 |
[ |
b"@@ -0,0 +1,305 @@\n+import numpy as np\r\n+from toolbox.imtools import *\r\n+# from toolbox.ftools import *\r\n+# import sys\r\n+\r\n+class PI2D:\r\n+ Image = None\r\n+ PaddedImage = None\r\n+ PatchSize = 128\r\n+ Margin = 14\r\n+ SubPatchSize = 100\r\n+ PC = None # patch coordinates\r\n+ NumPatches = 0\r\n+ Output = None\r\n+ Count = None\r\n+ NR = None\r\n+ NC = None\r\n+ NRPI = None\r\n+ NCPI = None\r\n+ Mode = None\r\n+ W = None\r\n+\r\n+ def setup(image,patchSize,margin,mode):\r\n+ PI2D.Image = image\r\n+ PI2D.PatchSize = patchSize\r\n+ PI2D.Margin = margin\r\n+ subPatchSize = patchSize-2*margin\r\n+ PI2D.SubPatchSize = subPatchSize\r\n+\r\n+ W = np.ones((patchSize,patchSize))\r\n+ W[[0,-1],:] = 0\r\n+ W[:,[0,-1]] = 0\r\n+ for i in range(1,2*margin):\r\n+ v = i/(2*margin)\r\n+ W[i,i:-i] = v\r\n+ W[-i-1,i:-i] = v\r\n+ W[i:-i,i] = v\r\n+ W[i:-i,-i-1] = v\r\n+ PI2D.W = W\r\n+\r\n+ if len(image.shape) == 2:\r\n+ nr,nc = image.shape\r\n+ elif len(image.shape) == 3: # multi-channel image\r\n+ nz,nr,nc = image.shape\r\n+\r\n+ PI2D.NR = nr\r\n+ PI2D.NC = nc\r\n+\r\n+ npr = int(np.ceil(nr/subPatchSize)) # number of patch rows\r\n+ npc = int(np.ceil(nc/subPatchSize)) # number of patch cols\r\n+\r\n+ nrpi = npr*subPatchSize+2*margin # number of rows in padded image \r\n+ ncpi = npc*subPatchSize+2*margin # number of cols in padded image \r\n+\r\n+ PI2D.NRPI = nrpi\r\n+ PI2D.NCPI = ncpi\r\n+\r\n+ if len(image.shape) == 2:\r\n+ PI2D.PaddedImage = np.zeros((nrpi,ncpi))\r\n+ PI2D.PaddedImage[margin:margin+nr,margin:margin+nc] = image\r\n+ elif len(image.shape) == 3:\r\n+ PI2D.PaddedImage = np.zeros((nz,nrpi,ncpi))\r\n+ PI2D.PaddedImage[:,margin:margin+nr,margin:margin+nc] = image\r\n+\r\n+ PI2D.PC = [] # patch coordinates [r0,r1,c0,c1]\r\n+ for i in range(npr):\r\n+ r0 = i*subPatchSize\r\n+ r1 = r0+patchSize\r\n+ for j in range(npc):\r\n+ c0 = j*subPatchSize\r\n+ c1 = c0+patchSize\r\n+ PI2D.PC.append([r0,r1,c0,c1])\r\n+\r\n+ PI2D.NumPatches = len(PI2D.PC)\r\n+ PI2D.Mode = mode # 'replace' or 'accumulate'\r\n+\r\n+ def getPatch(i):\r\n+ r0,r1,c0,c1 = PI2D.PC[i]\r\n+ if len(PI2D.PaddedImage.shape) == 2:\r\n+ return PI2D.PaddedImage[r0:r1,c0:c1]\r\n+ if len(PI2D.PaddedImage.shape) == 3:\r\n+ return PI2D.PaddedImage[:,r0:r1,c0:c1]\r\n+\r\n+ def createOutput(nChannels):\r\n+ if nChannels == 1:\r\n+ PI2D.Output = np.zeros((PI2D.NRPI,PI2D.NCPI),np.float16)\r\n+ else:\r\n+ PI2D.Output = np.zeros((nChannels,PI2D.NRPI,PI2D.NCPI),np.float16)\r\n+ if PI2D.Mode == 'accumulate':\r\n+ PI2D.Count = np.zeros((PI2D.NRPI,PI2D.NCPI),np.float16)\r\n+\r\n+ def patchOutput(i,P):\r\n+ r0,r1,c0,c1 = PI2D.PC[i]\r\n+ if PI2D.Mode == 'accumulate':\r\n+ PI2D.Count[r0:r1,c0:c1] += PI2D.W\r\n+ if len(P.shape) == 2:\r\n+ if PI2D.Mode == 'accumulate':\r\n+ PI2D.Output[r0:r1,c0:c1] += np.multiply(P,PI2D.W)\r\n+ elif PI2D.Mode == 'replace':\r\n+ PI2D.Output[r0:r1,c0:c1] = P\r\n+ elif len(P.shape) == 3:\r\n+ if PI2D.Mode == 'accumulate':\r\n+ for i in range(P.shape[0]):\r\n+ PI2D.Output[i,r0:r1,c0:c1] += np.multiply(P[i,:,:],PI2D.W)\r\n+ elif PI2D.Mode == 'replace':\r\n+ PI2D.Output[:,r0:r1,c0:c1] = P\r\n+\r\n+ def getValidOutput():\r\n+ margin = PI2D.Margin\r\n+ nr, nc = PI2D.NR, PI2D.NC\r\n+ if PI2D.Mode == 'accumulate':\r\n+ C = PI2D.Count[margin:margin+nr,margin:margin+nc]\r\n+ if len(PI2D.Output.shape) == 2:\r\n+ if PI2D.Mode == 'accumulate':\r\n+ return np.divide(PI2D.Output[margin:margin+nr,margin:margin+nc],C)\r\n+ if PI2D.Mode ="..b" PI3D.NRPI = nrpi\r\n+ PI3D.NCPI = ncpi\r\n+ PI3D.NZPI = nzpi\r\n+\r\n+ if len(image.shape) == 3:\r\n+ PI3D.PaddedImage = np.zeros((nzpi,nrpi,ncpi))\r\n+ PI3D.PaddedImage[margin:margin+nz,margin:margin+nr,margin:margin+nc] = image\r\n+ elif len(image.shape) == 4:\r\n+ PI3D.PaddedImage = np.zeros((nzpi,nw,nrpi,ncpi))\r\n+ PI3D.PaddedImage[margin:margin+nz,:,margin:margin+nr,margin:margin+nc] = image\r\n+\r\n+ PI3D.PC = [] # patch coordinates [z0,z1,r0,r1,c0,c1]\r\n+ for iZ in range(npz):\r\n+ z0 = iZ*subPatchSize\r\n+ z1 = z0+patchSize\r\n+ for i in range(npr):\r\n+ r0 = i*subPatchSize\r\n+ r1 = r0+patchSize\r\n+ for j in range(npc):\r\n+ c0 = j*subPatchSize\r\n+ c1 = c0+patchSize\r\n+ PI3D.PC.append([z0,z1,r0,r1,c0,c1])\r\n+\r\n+ PI3D.NumPatches = len(PI3D.PC)\r\n+ PI3D.Mode = mode # 'replace' or 'accumulate'\r\n+\r\n+ def getPatch(i):\r\n+ z0,z1,r0,r1,c0,c1 = PI3D.PC[i]\r\n+ if len(PI3D.PaddedImage.shape) == 3:\r\n+ return PI3D.PaddedImage[z0:z1,r0:r1,c0:c1]\r\n+ if len(PI3D.PaddedImage.shape) == 4:\r\n+ return PI3D.PaddedImage[z0:z1,:,r0:r1,c0:c1]\r\n+\r\n+ def createOutput(nChannels):\r\n+ if nChannels == 1:\r\n+ PI3D.Output = np.zeros((PI3D.NZPI,PI3D.NRPI,PI3D.NCPI))\r\n+ else:\r\n+ PI3D.Output = np.zeros((PI3D.NZPI,nChannels,PI3D.NRPI,PI3D.NCPI))\r\n+ if PI3D.Mode == 'accumulate':\r\n+ PI3D.Count = np.zeros((PI3D.NZPI,PI3D.NRPI,PI3D.NCPI))\r\n+\r\n+ def patchOutput(i,P):\r\n+ z0,z1,r0,r1,c0,c1 = PI3D.PC[i]\r\n+ if PI3D.Mode == 'accumulate':\r\n+ PI3D.Count[z0:z1,r0:r1,c0:c1] += PI3D.W\r\n+ if len(P.shape) == 3:\r\n+ if PI3D.Mode == 'accumulate':\r\n+ PI3D.Output[z0:z1,r0:r1,c0:c1] += np.multiply(P,PI3D.W)\r\n+ elif PI3D.Mode == 'replace':\r\n+ PI3D.Output[z0:z1,r0:r1,c0:c1] = P\r\n+ elif len(P.shape) == 4:\r\n+ if PI3D.Mode == 'accumulate':\r\n+ for i in range(P.shape[1]):\r\n+ PI3D.Output[z0:z1,i,r0:r1,c0:c1] += np.multiply(P[:,i,:,:],PI3D.W)\r\n+ elif PI3D.Mode == 'replace':\r\n+ PI3D.Output[z0:z1,:,r0:r1,c0:c1] = P\r\n+\r\n+ def getValidOutput():\r\n+ margin = PI3D.Margin\r\n+ nz, nr, nc = PI3D.NZ, PI3D.NR, PI3D.NC\r\n+ if PI3D.Mode == 'accumulate':\r\n+ C = PI3D.Count[margin:margin+nz,margin:margin+nr,margin:margin+nc]\r\n+ if len(PI3D.Output.shape) == 3:\r\n+ if PI3D.Mode == 'accumulate':\r\n+ return np.divide(PI3D.Output[margin:margin+nz,margin:margin+nr,margin:margin+nc],C)\r\n+ if PI3D.Mode == 'replace':\r\n+ return PI3D.Output[margin:margin+nz,margin:margin+nr,margin:margin+nc]\r\n+ if len(PI3D.Output.shape) == 4:\r\n+ if PI3D.Mode == 'accumulate':\r\n+ for i in range(PI3D.Output.shape[1]):\r\n+ PI3D.Output[margin:margin+nz,i,margin:margin+nr,margin:margin+nc] = np.divide(PI3D.Output[margin:margin+nz,i,margin:margin+nr,margin:margin+nc],C)\r\n+ return PI3D.Output[margin:margin+nz,:,margin:margin+nr,margin:margin+nc]\r\n+\r\n+\r\n+ def demo():\r\n+ I = np.random.rand(128,128,128)\r\n+ PI3D.setup(I,64,4,'accumulate')\r\n+\r\n+ nChannels = 2\r\n+ PI3D.createOutput(nChannels)\r\n+\r\n+ for i in range(PI3D.NumPatches):\r\n+ P = PI3D.getPatch(i)\r\n+ Q = np.zeros((P.shape[0],nChannels,P.shape[1],P.shape[2]))\r\n+ for j in range(nChannels):\r\n+ Q[:,j,:,:] = P\r\n+ PI3D.patchOutput(i,Q)\r\n+\r\n+ J = PI3D.getValidOutput()\r\n+ J = J[:,0,:,:]\r\n+\r\n+ D = np.abs(I-J)\r\n+ print(np.max(D))\r\n+\r\n+ pI = I[64,:,:]\r\n+ pJ = J[64,:,:]\r\n+ pD = D[64,:,:]\r\n+\r\n+ K = cat(1,cat(1,pI,pJ),pD)\r\n+ imshow(K)\r\n+\r\n" |
b |
diff -r 000000000000 -r 99308601eaa6 toolbox/__pycache__/PartitionOfImage.cpython-36.pyc |
b |
Binary file toolbox/__pycache__/PartitionOfImage.cpython-36.pyc has changed |
b |
diff -r 000000000000 -r 99308601eaa6 toolbox/__pycache__/__init__.cpython-36.pyc |
b |
Binary file toolbox/__pycache__/__init__.cpython-36.pyc has changed |
b |
diff -r 000000000000 -r 99308601eaa6 toolbox/__pycache__/ftools.cpython-36.pyc |
b |
Binary file toolbox/__pycache__/ftools.cpython-36.pyc has changed |
b |
diff -r 000000000000 -r 99308601eaa6 toolbox/__pycache__/imtools.cpython-36.pyc |
b |
Binary file toolbox/__pycache__/imtools.cpython-36.pyc has changed |
b |
diff -r 000000000000 -r 99308601eaa6 toolbox/ftools.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolbox/ftools.py Wed May 19 21:34:38 2021 +0000 |
[ |
@@ -0,0 +1,55 @@ +from os.path import * +from os import listdir, makedirs, remove +import pickle +import shutil + +def fileparts(path): # path = file path + [p,f] = split(path) + [n,e] = splitext(f) + return [p,n,e] + +def listfiles(path,token): # path = folder path + l = [] + for f in listdir(path): + fullPath = join(path,f) + if isfile(fullPath) and token in f: + l.append(fullPath) + l.sort() + return l + +def listsubdirs(path): # path = folder path + l = [] + for f in listdir(path): + fullPath = join(path,f) + if isdir(fullPath): + l.append(fullPath) + l.sort() + return l + +def pathjoin(p,ne): # '/path/to/folder', 'name.extension' (or a subfolder) + return join(p,ne) + +def saveData(data,path): + print('saving data') + dataFile = open(path, 'wb') + pickle.dump(data, dataFile) + +def loadData(path): + print('loading data') + dataFile = open(path, 'rb') + return pickle.load(dataFile) + +def createFolderIfNonExistent(path): + if not exists(path): # from os.path + makedirs(path) + +def moveFile(fullPathSource,folderPathDestination): + [p,n,e] = fileparts(fullPathSource) + shutil.move(fullPathSource,pathjoin(folderPathDestination,n+e)) + +def copyFile(fullPathSource,folderPathDestination): + [p,n,e] = fileparts(fullPathSource) + shutil.copy(fullPathSource,pathjoin(folderPathDestination,n+e)) + +def removeFile(path): + remove(path) \ No newline at end of file |
b |
diff -r 000000000000 -r 99308601eaa6 toolbox/imtools.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/toolbox/imtools.py Wed May 19 21:34:38 2021 +0000 |
[ |
b"@@ -0,0 +1,312 @@\n+import matplotlib.pyplot as plt\r\n+import tifffile\r\n+import os\r\n+import numpy as np\r\n+from skimage import io as skio\r\n+from scipy.ndimage import *\r\n+from skimage.morphology import *\r\n+from skimage.transform import resize\r\n+\r\n+def tifread(path):\r\n+ return tifffile.imread(path)\r\n+\r\n+def tifwrite(I,path):\r\n+ tifffile.imsave(path, I)\r\n+\r\n+def imshow(I,**kwargs):\r\n+ if not kwargs:\r\n+ plt.imshow(I,cmap='gray')\r\n+ else:\r\n+ plt.imshow(I,**kwargs)\r\n+ \r\n+ plt.axis('off')\r\n+ plt.show()\r\n+\r\n+def imshowlist(L,**kwargs):\r\n+ n = len(L)\r\n+ for i in range(n):\r\n+ plt.subplot(1, n, i+1)\r\n+ if not kwargs:\r\n+ plt.imshow(L[i],cmap='gray')\r\n+ else:\r\n+ plt.imshow(L[i],**kwargs)\r\n+ plt.axis('off')\r\n+ plt.show()\r\n+\r\n+def imread(path):\r\n+ return skio.imread(path)\r\n+\r\n+def imwrite(I,path):\r\n+ skio.imsave(path,I)\r\n+\r\n+def im2double(I):\r\n+ if I.dtype == 'uint16':\r\n+ return I.astype('float64')/65535\r\n+ elif I.dtype == 'uint8':\r\n+ return I.astype('float64')/255\r\n+ elif I.dtype == 'float32':\r\n+ return I.astype('float64')\r\n+ elif I.dtype == 'float64':\r\n+ return I\r\n+ else:\r\n+ print('returned original image type: ', I.dtype)\r\n+ return I\r\n+\r\n+def size(I):\r\n+ return list(I.shape)\r\n+\r\n+def imresizeDouble(I,sizeOut): # input and output are double\r\n+ return resize(I,(sizeOut[0],sizeOut[1]),mode='reflect')\r\n+\r\n+def imresize3Double(I,sizeOut): # input and output are double\r\n+ return resize(I,(sizeOut[0],sizeOut[1],sizeOut[2]),mode='reflect')\r\n+\r\n+def imresizeUInt8(I,sizeOut): # input and output are UInt8\r\n+ return np.uint8(resize(I.astype(float),(sizeOut[0],sizeOut[1]),mode='reflect',order=0))\r\n+\r\n+def imresize3UInt8(I,sizeOut): # input and output are UInt8\r\n+ return np.uint8(resize(I.astype(float),(sizeOut[0],sizeOut[1],sizeOut[2]),mode='reflect',order=0))\r\n+\r\n+def normalize(I):\r\n+ m = np.min(I)\r\n+ M = np.max(I)\r\n+ if M > m:\r\n+ return (I-m)/(M-m)\r\n+ else:\r\n+ return I\r\n+\r\n+def snormalize(I):\r\n+ m = np.mean(I)\r\n+ s = np.std(I)\r\n+ if s > 0:\r\n+ return (I-m)/s\r\n+ else:\r\n+ return I\r\n+\r\n+def cat(a,I,J):\r\n+ return np.concatenate((I,J),axis=a)\r\n+\r\n+def imerode(I,r):\r\n+ return binary_erosion(I, disk(r))\r\n+\r\n+def imdilate(I,r):\r\n+ return binary_dilation(I, disk(r))\r\n+\r\n+def imerode3(I,r):\r\n+ return morphology.binary_erosion(I, ball(r))\r\n+\r\n+def imdilate3(I,r):\r\n+ return morphology.binary_dilation(I, ball(r))\r\n+\r\n+def sphericalStructuralElement(imShape,fRadius):\r\n+ if len(imShape) == 2:\r\n+ return disk(fRadius,dtype=float)\r\n+ if len(imShape) == 3:\r\n+ return ball(fRadius,dtype=float)\r\n+\r\n+def medfilt(I,filterRadius):\r\n+ return median_filter(I,footprint=sphericalStructuralElement(I.shape,filterRadius))\r\n+\r\n+def maxfilt(I,filterRadius):\r\n+ return maximum_filter(I,footprint=sphericalStructuralElement(I.shape,filterRadius))\r\n+\r\n+def minfilt(I,filterRadius):\r\n+ return minimum_filter(I,footprint=sphericalStructuralElement(I.shape,filterRadius))\r\n+\r\n+def ptlfilt(I,percentile,filterRadius):\r\n+ return percentile_filter(I,percentile,footprint=sphericalStructuralElement(I.shape,filterRadius))\r\n+\r\n+def imgaussfilt(I,sigma,**kwargs):\r\n+ return gaussian_filter(I,sigma,**kwargs)\r\n+\r\n+def imlogfilt(I,sigma,**kwargs):\r\n+ return -gaussian_laplace(I,sigma,**kwargs)\r\n+\r\n+def imgradmag(I,sigma):\r\n+ if len(I.shape) == 2:\r\n+ dx = imgaussfilt(I,sigma,order=[0,1])\r\n+ dy = imgaussfilt(I,sigma,order=[1,0])\r\n+ return np.sqrt(dx**2+dy**2)\r\n+ if len(I.shape) == 3:\r\n+ dx = imgaussfilt(I,sigma,order=[0,0,1])\r\n+ dy = imgaussfilt(I,sigma,order=[0,1,0])\r\n+ dz = imgaussfilt(I,sigma,order=[1,0,0])\r\n+ return np.sqrt(dx**2+dy**2+dz**2)\r\n+\r\n+def localstats(I,radius,justfeatnames=False):\r\n+ ptls = [10,30,50,70,90]\r\n+ featNames = []\r\n+ for i in range(len(ptls)):\r\n+ "..b":,:,nDerivativesPerSigma*i ] = imgaussfilt(I,sigma)\r\n+ D[:,:,:,nDerivativesPerSigma*i+1 ] = dx\r\n+ D[:,:,:,nDerivativesPerSigma*i+2 ] = dy\r\n+ D[:,:,:,nDerivativesPerSigma*i+3 ] = dz\r\n+ D[:,:,:,nDerivativesPerSigma*i+4 ] = dxx\r\n+ D[:,:,:,nDerivativesPerSigma*i+5 ] = imgaussfilt(I,sigma,order=[0,1,1])\r\n+ D[:,:,:,nDerivativesPerSigma*i+6 ] = imgaussfilt(I,sigma,order=[1,0,1])\r\n+ D[:,:,:,nDerivativesPerSigma*i+7 ] = dyy\r\n+ D[:,:,:,nDerivativesPerSigma*i+8 ] = imgaussfilt(I,sigma,order=[1,1,0])\r\n+ D[:,:,:,nDerivativesPerSigma*i+9 ] = dzz\r\n+ D[:,:,:,nDerivativesPerSigma*i+10] = np.sqrt(dx**2+dy**2+dz**2)\r\n+ D[:,:,:,nDerivativesPerSigma*i+11] = np.sqrt(dxx**2+dyy**2+dzz**2)\r\n+\r\n+ # D[:,:,:,nDerivativesPerSigma*i ] = imgaussfilt(I,sigma)\r\n+ # D[:,:,:,nDerivativesPerSigma*i+1 ] = np.sqrt(dx**2+dy**2+dz**2)\r\n+ # D[:,:,:,nDerivativesPerSigma*i+2 ] = np.sqrt(dxx**2+dyy**2+dzz**2)\r\n+ return D\r\n+ # derivatives are indexed by the last dimension, which is good for ML features but not for visualization,\r\n+ # in which case the expected dimensions are [plane,y(row),x(col)]; to obtain that ordering, do\r\n+ # D = np.moveaxis(D,[2,0,1],[0,1,2])\r\n+\r\n+def imfeatures(I=[],sigmaDeriv=1,sigmaLoG=1,locStatsRad=0,justfeatnames=False):\r\n+ if type(sigmaDeriv) is not list:\r\n+ sigmaDeriv = [sigmaDeriv]\r\n+ if type(sigmaLoG) is not list:\r\n+ sigmaLoG = [sigmaLoG]\r\n+ derivFeatNames = imderivatives([],sigmaDeriv,justfeatnames=True)\r\n+ nLoGFeats = len(sigmaLoG)\r\n+ locStatsFeatNames = []\r\n+ if locStatsRad > 1:\r\n+ locStatsFeatNames = localstats([],locStatsRad,justfeatnames=True)\r\n+ nLocStatsFeats = len(locStatsFeatNames)\r\n+ if justfeatnames == True:\r\n+ featNames = derivFeatNames\r\n+ for i in range(nLoGFeats):\r\n+ featNames.append('logSigma%d' % sigmaLoG[i])\r\n+ for i in range(nLocStatsFeats):\r\n+ featNames.append(locStatsFeatNames[i])\r\n+ return featNames\r\n+ nDerivFeats = len(derivFeatNames)\r\n+ nFeatures = nDerivFeats+nLoGFeats+nLocStatsFeats\r\n+ sI = size(I)\r\n+ F = np.zeros((sI[0],sI[1],nFeatures))\r\n+ F[:,:,:nDerivFeats] = imderivatives(I,sigmaDeriv)\r\n+ for i in range(nLoGFeats):\r\n+ F[:,:,nDerivFeats+i] = imlogfilt(I,sigmaLoG[i])\r\n+ if locStatsRad > 1:\r\n+ F[:,:,nDerivFeats+nLoGFeats:] = localstats(I,locStatsRad)\r\n+ return F\r\n+\r\n+def imfeatures3(I=[],sigmaDeriv=2,sigmaLoG=2,locStatsRad=0,justfeatnames=False):\r\n+ if type(sigmaDeriv) is not list:\r\n+ sigmaDeriv = [sigmaDeriv]\r\n+ if type(sigmaLoG) is not list:\r\n+ sigmaLoG = [sigmaLoG]\r\n+ derivFeatNames = imderivatives3([],sigmaDeriv,justfeatnames=True)\r\n+ nLoGFeats = len(sigmaLoG)\r\n+ locStatsFeatNames = []\r\n+ if locStatsRad > 1:\r\n+ locStatsFeatNames = localstats3([],locStatsRad,justfeatnames=True)\r\n+ nLocStatsFeats = len(locStatsFeatNames)\r\n+ if justfeatnames == True:\r\n+ featNames = derivFeatNames\r\n+ for i in range(nLoGFeats):\r\n+ featNames.append('logSigma%d' % sigmaLoG[i])\r\n+ for i in range(nLocStatsFeats):\r\n+ featNames.append(locStatsFeatNames[i])\r\n+ return featNames\r\n+ nDerivFeats = len(derivFeatNames)\r\n+ nFeatures = nDerivFeats+nLoGFeats+nLocStatsFeats\r\n+ sI = size(I)\r\n+ F = np.zeros((sI[0],sI[1],sI[2],nFeatures))\r\n+ F[:,:,:,:nDerivFeats] = imderivatives3(I,sigmaDeriv)\r\n+ for i in range(nLoGFeats):\r\n+ F[:,:,:,nDerivFeats+i] = imlogfilt(I,sigmaLoG[i])\r\n+ if locStatsRad > 1:\r\n+ F[:,:,:,nDerivFeats+nLoGFeats:] = localstats3(I,locStatsRad)\r\n+ return F\r\n+\r\n+def stack2list(S):\r\n+ L = []\r\n+ for i in range(size(S)[2]):\r\n+ L.append(S[:,:,i])\r\n+ return L\r\n+\r\n+def thrsegment(I,wsBlr,wsThr): # basic threshold segmentation\r\n+ G = imgaussfilt(I,sigma=(1-wsBlr)+wsBlr*5) # min 1, max 5\r\n+ M = G > wsThr\r\n+ return M\n\\ No newline at end of file\n" |