Mercurial > repos > perssond > coreograph
changeset 1:57f1260ca94e draft
"planemo upload commit fec9dc76b3dd17b14b02c2f04be9d30f71eba1ae"
author | watsocam |
---|---|
date | Fri, 11 Mar 2022 23:40:51 +0000 |
parents | 99308601eaa6 |
children | 224e0cf4aaeb |
files | Dockerfile LICENSE README.md UNet2DtCycifTRAINCoreograph.py UNetCoreograph.py coreograph.xml images/coreographbanner.png images/coreographbannerv2.png images/coreographbannerv3.png images/coreographbannerv4.png images/coreographbannerv5.png macros.xml |
diffstat | 12 files changed, 191 insertions(+), 177 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Dockerfile Fri Mar 11 23:40:51 2022 +0000 @@ -0,0 +1,12 @@ +FROM tensorflow/tensorflow:1.15.0-py3 + +RUN apt-get update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install tzdata +RUN apt-get install -y python3-opencv +RUN apt-get install -y libtiff5-dev git + +RUN pip install cython scikit-image==0.14.2 matplotlib tifffile==2020.2.16 scipy==1.1.0 opencv-python==4.3.0.36 + +RUN pip install git+https://github.com/FZJ-INM1-BDA/pytiff.git@0701f28e5862c26024e8daa34201005b16db4c8f + +COPY . /app
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/LICENSE Fri Mar 11 23:40:51 2022 +0000 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 HMS-IDAC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/README.md Fri Mar 11 23:40:51 2022 +0000 @@ -0,0 +1,26 @@ +![map](/images/coreographbannerv5.png) + +*Great*....yet **another** TMA dearray program. What does *this* one do? + +Coreograph uses UNet, a deep learning model, to identify complete/incomplete tissue cores on a tissue microarray. It has been trained on 9 TMA slides of different sizes and tissue types. + +<img src="/images/raw.jpg" width="425" height="315" /> <img src="/images/probmap.jpg" width="425" height="315" /> + +Training sets were acquired at 0.2micron/pixel resolution and downsampled 1/32 times to speed up performance. Once the center of each core has been identifed, active contours is used to generate a tissue mask of each core that can aid downstream single cell segmentation. A GPU is not required but will reduce computation time. + +*Coreograph exports these files:** +1. individual cores as tiff stacks with user-selectable channel ranges +2. binary tissue masks (saved in the 'mask' subfolder) +3. a TMA map showing the labels and outlines of each core for quality control purposes + +![map](/images/TMA_MAP.jpg) + +*Instructions for use:** +`python UNetCoreograph.py` +1. `--imagePath` : the path to the image file. Should be tif or ome.tif +2. `--outputPath` : the path to save the above-mentioned files +3. `--downsampleFactor` : how many times to downsample the raw image file. Default is 5 times to match the training data. +4. `--channel` : which is the channel to feed into UNet and generate probabiltiy maps from. This is usually a DAPI channel +5. `--buffer` : the extra space around a core before cropping it. A value of 2 means there is twice the width of the core added as buffer around it. 2 is default +6. `--outputChan` : a range of channels to be exported. -1 is default and will export all channels (takes awhile). Select a single channel or a continuous range. --outputChan 0 10 will export channel 0 up to (and including) channel 10 +
--- a/UNet2DtCycifTRAINCoreograph.py Wed May 19 21:34:38 2021 +0000 +++ b/UNet2DtCycifTRAINCoreograph.py Fri Mar 11 23:40:51 2022 +0000 @@ -524,63 +524,6 @@ UNet2D.train(imPath, logPath, modelPath, pmPath, 2053, 513 , 641, True, 10, 1, 1) UNet2D.deploy(imPath,100,modelPath,pmPath,1,1) - # I = im2double(tifread('/home/mc457/files/CellBiology/IDAC/Marcelo/Etc/UNetTestSets/SinemSaka_NucleiSegmentation_SingleImageInferenceTest3.tif')) - # UNet2D.singleImageInferenceSetup(modelPath,0) - # J = UNet2D.singleImageInference(I,'accumulate',0) - # UNet2D.singleImageInferenceCleanup() - # # imshowlist([I,J]) - # # sys.exit(0) - # # tifwrite(np.uint8(255*I),'/home/mc457/Workspace/I1.tif') - # # tifwrite(np.uint8(255*J),'/home/mc457/Workspace/I2.tif') - # K = np.zeros((2,I.shape[0],I.shape[1])) - # K[0,:,:] = I - # K[1,:,:] = J - # tifwrite(np.uint8(255*K),'/home/mc457/Workspace/Sinem_NucSeg.tif') - - # UNet2D.singleImageInferenceSetup(modelPath,0) - # imagePath = 'Y://sorger//data//RareCyte//Connor//Topacio_P2_AF//ashlar//C0078' - # - # fileList = glob.glob(imagePath + '//registration//C0078.ome.tif') - # print(fileList) - # for iFile in fileList: - # fileName = os.path.basename(iFile) - # fileNamePrefix = fileName.split(os.extsep, 1) - # I = im2double(tifffile.imread(iFile, key=0)) - # hsize = int((float(I.shape[0])*float(0.75))) - # vsize = int((float(I.shape[1])*float(0.75))) - # I = resize(I,(hsize,vsize)) - # J = UNet2D.singleImageInference(I,'accumulate',1) - # K = np.zeros((3,I.shape[0],I.shape[1])) - # K[2,:,:] = I - # K[0,:,:] = J - # J = UNet2D.singleImageInference(I, 'accumulate', 2) - # K[1, :, :] = J - # outputPath = imagePath + '//prob_maps' - # if not os.path.exists(outputPath): - # os.makedirs(outputPath) - # tifwrite(np.uint8(255*K),outputPath + '//' + fileNamePrefix[0] +'_NucSeg.tif') - # UNet2D.singleImageInferenceCleanup() - # ----- test 2 ----- - # imPath = '/home/mc457/files/CellBiology/IDAC/Marcelo/Etc/UNetTestSets/ClarenceYapp_NucleiSegmentation' - # UNet2D.setup(128,1,2,8,2,2,3,1,0.1,3,4) - # UNet2D.train(imPath,logPath,modelPath,pmPath,800,100,100,False,10,1) - # UNet2D.deploy(imPath,100,modelPath,pmPath,1) - - - # ----- test 3 ----- - - # imPath = '/home/mc457/files/CellBiology/IDAC/Marcelo/Etc/UNetTestSets/CarmanLi_CellTypeSegmentation' - # # UNet2D.setup(256,1,2,8,2,2,3,1,0.1,3,4) - # # UNet2D.train(imPath,logPath,modelPath,pmPath,1400,100,164,False,10000,1) - # UNet2D.deploy(imPath,164,modelPath,pmPath,1) - - - # ----- test 4 ----- - - # imPath = '/home/cicconet/Downloads/TrainSet1' - # UNet2D.setup(64,1,2,8,2,2,3,1,0.1,3,4) - # UNet2D.train(imPath,logPath,modelPath,pmPath,200,8,8,False,2000,1,0) - # # UNet2D.deploy(imPath,164,modelPath,pmPath,1) \ No newline at end of file
--- a/UNetCoreograph.py Wed May 19 21:34:38 2021 +0000 +++ b/UNetCoreograph.py Fri Mar 11 23:40:51 2022 +0000 @@ -3,6 +3,9 @@ import shutil import scipy.io as sio import os +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +import logging +logging.getLogger('tensorflow').setLevel(logging.FATAL) import skimage.exposure as sk import cv2 import argparse @@ -14,10 +17,11 @@ from skimage.segmentation import chan_vese, find_boundaries, morphological_chan_vese from skimage.measure import regionprops,label, find_contours from skimage.transform import resize -from skimage.filters import gaussian +from skimage.filters import gaussian, threshold_otsu from skimage.feature import peak_local_max,blob_log -from skimage.color import label2rgb +from skimage.color import gray2rgb as gray2rgb import skimage.io as skio +from scipy.ndimage.morphology import binary_fill_holes from skimage import img_as_bool from skimage.draw import circle_perimeter from scipy.ndimage.filters import uniform_filter @@ -525,27 +529,27 @@ def identifyNumChan(path): - tiff = tifffile.TiffFile(path) - shape = tiff.pages[0].shape - numChan=None - for i, page in enumerate(tiff.pages): - if page.shape != shape: - numChan = i - return numChan - break -# else: -# raise Exception("Did not find any pyramid subresolutions") - if not numChan: - numChan = len(tiff.pages) - return numChan + s = tifffile.TiffFile(path).series[0] + return s.shape[0] if len(s.shape) > 2 else 1 + # shape = tiff.pages[0].shape + # tiff = tifffile.TiffFile(path) + # for i, page in enumerate(tiff.pages): + # print(page.shape) + # if page.shape != shape: + # numChan = i + # return numChan + # break +# else: +# raise Exception("Did not find any pyramid subresolutions") + def getProbMaps(I,dsFactor,modelPath): hsize = int((float(I.shape[0]) * float(0.5))) vsize = int((float(I.shape[1]) * float(0.5))) imagesub = cv2.resize(I,(vsize,hsize),cv2.INTER_NEAREST) - UNet2D.singleImageInferenceSetup(modelPath, 1) + UNet2D.singleImageInferenceSetup(modelPath, 0) for iSize in range(dsFactor): hsize = int((float(I.shape[0]) * float(0.5))) @@ -557,42 +561,22 @@ UNet2D.singleImageInferenceCleanup() return probMaps -def coreSegmenterOutput(I,probMap,initialmask,preBlur,findCenter): +def coreSegmenterOutput(I,initialmask,findCenter): hsize = int((float(I.shape[0]) * float(0.1))) vsize = int((float(I.shape[1]) * float(0.1))) nucGF = cv2.resize(I,(vsize,hsize),cv2.INTER_CUBIC) -# Irs = cv2.resize(I,(vsize,hsize),cv2.INTER_CUBIC) -# I=I.astype(np.float) -# r,c = I.shape -# I+=np.random.rand(r,c)*1e-6 -# c1 = uniform_filter(I, 3, mode='reflect') -# c2 = uniform_filter(I*I, 3, mode='reflect') -# nucGF = np.sqrt(c2 - c1*c1)*np.sqrt(9./8) -# nucGF[np.isnan(nucGF)]=0 #active contours hsize = int(float(nucGF.shape[0])) vsize = int(float(nucGF.shape[1])) initialmask = cv2.resize(initialmask,(vsize,hsize),cv2.INTER_NEAREST) initialmask = dilation(initialmask,disk(15)) >0 - -# init=np.argwhere(eroded>0) + nucGF = gaussian(nucGF,0.7) nucGF=nucGF/np.amax(nucGF) - -# initialmask = nucGF>0 nuclearMask = morphological_chan_vese(nucGF, 100, init_level_set=initialmask, smoothing=10,lambda1=1.001, lambda2=1) -# nuclearMask = chan_vese(nucGF, mu=1.5, lambda1=6, lambda2=1, tol=0.0005, max_iter=2000, dt=15, init_level_set=initialmask, extended_output=True) -# nuclearMask = nuclearMask[0] - - TMAmask = nuclearMask -# nMaskDist =distance_transform_edt(nuclearMask) -# fgm = peak_local_max(h_maxima(nMaskDist, 2*preBlur),indices =False) -# markers= np.logical_or(erosion(1-nuclearMask,disk(3)),fgm) -# TMAmask=watershed(-nMaskDist,label(markers),watershed_line=True) -# TMAmask = nuclearMask*(TMAmask>0) TMAmask = remove_small_objects(TMAmask>0,round(TMAmask.shape[0])*round(TMAmask.shape[1])*0.005) TMAlabel = label(TMAmask) # find object closest to center @@ -632,7 +616,8 @@ parser.add_argument("--imagePath") parser.add_argument("--outputPath") parser.add_argument("--maskPath") - parser.add_argument("--downsampleFactor",type = int, default = 5) + parser.add_argument("--tissue", action='store_true') + parser.add_argument("--downsampleFactor", type=int, default = 5) parser.add_argument("--channel",type = int, default = 0) parser.add_argument("--buffer",type = float, default = 2) parser.add_argument("--outputChan", type=int, nargs = '+', default=[-1]) @@ -642,25 +627,11 @@ args = parser.parse_args() outputPath = args.outputPath - imagePath = args.imagePath + imagePath = args.imagePath sensitivity = args.sensitivity - #scriptPath = os.path.dirname(os.path.realpath(__file__)) - #modelPath = os.path.join(scriptPath, 'TFModel - 3class 16 kernels 5ks 2 layers') - #modelPath = 'D:\\LSP\\Coreograph\\model-4layersMaskAug20' scriptPath = os.path.dirname(os.path.realpath(__file__)) modelPath = os.path.join(scriptPath, 'model') -# outputPath = 'D:\\LSP\\cycif\\testsets\\exemplar-002\\dearrayPython' ############ maskOutputPath = os.path.join(outputPath, 'masks') -# imagePath = 'D:\\LSP\\cycif\\testsets\\exemplar-002\\registration\\exemplar-002.ome.tif'########### -# imagePath = 'Y:\\sorger\\data\\RareCyte\\Connor\\TMAs\\CAJ_TMA11_13\\original_data\\TMA11\\registration\\TMA11.ome.tif' -# imagePath = 'Y:\\sorger\\data\\RareCyte\\Connor\\TMAs\\Z124_TMA20_22\\TMA22\\registration\\TMA22.ome.tif' -# classProbsPath = 'D:\\unetcoreograph.tif' -# imagePath = 'Y:\\sorger\\data\\RareCyte\\Connor\\Z155_PTCL\\TMA_552\\registration\\TMA_552.ome.tif' -# classProbsPath = 'Y:\\sorger\\data\\RareCyte\\Connor\\Z155_PTCL\\TMA_552\\probMapCore\\TMA_552_CorePM_1.tif' -# imagePath = 'Y:\\sorger\\data\\RareCyte\\Zoltan\\Z112_TMA17_19\\190403_ashlar\\TMA17_1092.ome.tif' -# classProbsPath = 'Z:\\IDAC\\Clarence\\LSP\\CyCIF\\TMA\\probMapCore\\1new_CorePM_1.tif' -# imagePath = 'Y:\\sorger\\data\\RareCyte\\ANNIINA\\Julia\\2018\\TMA6\\julia_tma6.ome.tif' -# classProbsPath = 'Z:\\IDAC\\Clarence\\LSP\\CyCIF\\TMA\\probMapCore\\3new_CorePM_1.tif' # if not os.path.exists(outputPath): @@ -669,65 +640,85 @@ # shutil.rmtree(outputPath) if not os.path.exists(maskOutputPath): os.makedirs(maskOutputPath) - - - channel = args.channel + print( + 'WARNING! IF USING FOR TISSUE SPLITTING, IT IS ADVISED TO SET --downsampleFactor TO HIGHER THAN DEFAULT OF 5') + channel = args.channel dsFactor = 1/(2**args.downsampleFactor) -# I = tifffile.imread(imagePath, key=channel) I = skio.imread(imagePath, img_num=channel) - imagesub = resize(I,(int((float(I.shape[0]) * dsFactor)),int((float(I.shape[1]) * dsFactor)))) numChan = identifyNumChan(imagePath) - + outputChan = args.outputChan if len(outputChan)==1: if outputChan[0]==-1: outputChan = [0, numChan-1] else: outputChan.append(outputChan[0]) - - classProbs = getProbMaps(I,args.downsampleFactor,modelPath) -# classProbs = tifffile.imread(classProbsPath,key=0) - preMask = gaussian(np.uint8(classProbs*255),1)>0.8 - - P = regionprops(label(preMask),cache=False) - area = [ele.area for ele in P] - print(str(len(P)) + ' cores detected!') - if len(P) <3: - medArea = np.median(area) - maxArea = np.percentile(area,99) + classProbs = getProbMaps(I, args.downsampleFactor, modelPath) + + if not args.tissue: + print('TMA mode selected') + preMask = gaussian(np.uint8(classProbs*255),1)>0.8 + + P = regionprops(label(preMask),cache=False) + area = [ele.area for ele in P] + if len(P) <3: + medArea = np.median(area) + maxArea = np.percentile(area,99) + else: + count=0 + labelpreMask = np.zeros(preMask.shape,dtype=np.uint32) + for props in P: + count += 1 + yi = props.coords[:, 0] + xi = props.coords[:, 1] + labelpreMask[yi, xi] = count + P=regionprops(labelpreMask) + area = [ele.area for ele in P] + medArea = np.median(area) + maxArea = np.percentile(area,99) + preMask = remove_small_objects(preMask,0.2*medArea) + coreRad = round(np.sqrt(medArea/np.pi)) + estCoreDiam = round(np.sqrt(maxArea/np.pi)*1.2*args.buffer) + + #preprocessing + fgFiltered = blob_log(preMask,coreRad*0.6,threshold=sensitivity) + Imax = np.zeros(preMask.shape,dtype=np.uint8) + for iSpot in range(fgFiltered.shape[0]): + yi = np.uint32(round(fgFiltered[iSpot, 0])) + xi = np.uint32(round(fgFiltered[iSpot, 1])) + Imax[yi, xi] = 1 + Imax = Imax*preMask + Idist = distance_transform_edt(1-Imax) + markers = label(Imax) + coreLabel = watershed(Idist,markers,watershed_line=True,mask = preMask) + P = regionprops(coreLabel) + centroids = np.array([ele.centroid for ele in P]) / dsFactor + np.savetxt(outputPath + os.path.sep + 'centroidsY-X.txt', np.asarray(centroids), fmt='%10.5f') + numCores = len(centroids) + print(str(numCores) + ' cores detected!') + estCoreDiamX = np.ones(numCores) * estCoreDiam / dsFactor + estCoreDiamY = np.ones(numCores) * estCoreDiam / dsFactor else: - count=0 - labelpreMask = np.zeros(preMask.shape,dtype=np.uint32) - for props in P: - count += 1 - yi = props.coords[:, 0] - xi = props.coords[:, 1] - labelpreMask[yi, xi] = count - P=regionprops(labelpreMask) - area = [ele.area for ele in P] - medArea = np.median(area) - maxArea = np.percentile(area,99) - preMask = remove_small_objects(preMask,0.2*medArea) - coreRad = round(np.sqrt(medArea/np.pi)) - estCoreDiam = round(np.sqrt(maxArea/np.pi)*1.2*args.buffer) + print('Tissue mode selected') + imageblur = 5 + Iblur = gaussian(np.uint8(255*classProbs), imageblur) + coreMask = binary_fill_holes(binary_closing(Iblur > threshold_otsu(Iblur), np.ones((imageblur*2,imageblur*2)))) + coreMask = remove_small_objects(coreMask, min_size=0.001 * coreMask.shape[0] * coreMask.shape[1]) -#preprocessing - fgFiltered = blob_log(preMask,coreRad*0.6,threshold=sensitivity) - Imax = np.zeros(preMask.shape,dtype=np.uint8) - for iSpot in range(fgFiltered.shape[0]): - yi = np.uint32(round(fgFiltered[iSpot, 0])) - xi = np.uint32(round(fgFiltered[iSpot, 1])) - Imax[yi, xi] = 1 - Imax = Imax*preMask - Idist = distance_transform_edt(1-Imax) - markers = label(Imax) - coreLabel = watershed(Idist,markers,watershed_line=True,mask = preMask) - P = regionprops(coreLabel) - centroids = np.array([ele.centroid for ele in P])/dsFactor - numCores = len(centroids) - estCoreDiamX = np.ones(numCores)*estCoreDiam/dsFactor - estCoreDiamY = np.ones(numCores)*estCoreDiam/dsFactor + ## watershed + Idist = distance_transform_edt(coreMask) + markers = peak_local_max(h_maxima(Idist,20),indices=False) + markers = label(markers).astype(np.int8) + coreLabel = watershed(-Idist, markers, watershed_line=True,mask = coreMask) + + P = regionprops(coreLabel) + centroids = np.array([ele.centroid for ele in P]) / dsFactor + np.savetxt(outputPath + os.path.sep + 'centroidsY-X.txt', np.asarray(centroids), fmt='%10.5f') + numCores = len(centroids) + print(str(numCores) + ' tissues detected!') + estCoreDiamX = np.array([(ele.bbox[3]-ele.bbox[1])*1.1 for ele in P]) / dsFactor + estCoreDiamY = np.array([(ele.bbox[2]-ele.bbox[0])*1.1 for ele in P]) / dsFactor if numCores ==0 & args.cluster: print('No cores detected. Try adjusting the downsample factor') @@ -736,8 +727,9 @@ singleMaskTMA = np.zeros(imagesub.shape) maskTMA = np.zeros(imagesub.shape) bbox = [None] * numCores - - + imagesub = imagesub/np.percentile(imagesub,99.9) + imagesub = (imagesub * 255).round().astype(np.uint8) + imagesub = gray2rgb(imagesub) x=np.zeros(numCores) xLim=np.zeros(numCores) y=np.zeros(numCores) @@ -761,24 +753,31 @@ y[iCore]=1 bbox[iCore] = [round(x[iCore]), round(y[iCore]), round(xLim[iCore]), round(yLim[iCore])] - + coreStack = np.zeros((outputChan[1]-outputChan[0]+1,np.int(round(yLim[iCore])-round(y[iCore])-1),np.int(round(xLim[iCore])-round(x[iCore])-1)),dtype='uint16') + for iChan in range(outputChan[0],outputChan[1]+1): with pytiff.Tiff(imagePath, "r", encoding='utf-8') as handle: handle.set_page(iChan) - coreStack= handle[np.uint32(bbox[iCore][1]):np.uint32(bbox[iCore][3]-1), np.uint32(bbox[iCore][0]):np.uint32(bbox[iCore][2]-1)] - skio.imsave(outputPath + os.path.sep + str(iCore+1) + '.tif',coreStack,append=True) + coreStack[iChan,:,:] =handle[np.uint32(bbox[iCore][1]):np.uint32(bbox[iCore][3]-1), np.uint32(bbox[iCore][0]):np.uint32(bbox[iCore][2]-1)] + skio.imsave(outputPath + os.path.sep + str(iCore+1) + '.tif',np.uint16(coreStack),imagej=True,bigtiff=True) with pytiff.Tiff(imagePath, "r", encoding='utf-8') as handle: handle.set_page(args.channel) coreSlice= handle[np.uint32(bbox[iCore][1]):np.uint32(bbox[iCore][3]-1), np.uint32(bbox[iCore][0]):np.uint32(bbox[iCore][2]-1)] core = (coreLabel ==(iCore+1)) - initialmask = core[np.uint32(y[iCore]*dsFactor):np.uint32(yLim[iCore]*dsFactor),np.uint32(x[iCore]*dsFactor):np.uint32(xLim[iCore]*dsFactor)] - initialmask = resize(initialmask,size(coreSlice),cv2.INTER_NEAREST) + initialmask = core[np.uint32(y[iCore] * dsFactor):np.uint32(yLim[iCore] * dsFactor), + np.uint32(x[iCore] * dsFactor):np.uint32(xLim[iCore] * dsFactor)] + if not args.tissue: + initialmask = resize(initialmask,size(coreSlice),cv2.INTER_NEAREST) - singleProbMap = classProbs[np.uint32(y[iCore]*dsFactor):np.uint32(yLim[iCore]*dsFactor),np.uint32(x[iCore]*dsFactor):np.uint32(xLim[iCore]*dsFactor)] - singleProbMap = resize(np.uint8(255*singleProbMap),size(coreSlice),cv2.INTER_NEAREST) - TMAmask = coreSegmenterOutput(coreSlice,singleProbMap,initialmask,coreRad/20,False) + singleProbMap = classProbs[np.uint32(y[iCore]*dsFactor):np.uint32(yLim[iCore]*dsFactor),np.uint32(x[iCore]*dsFactor):np.uint32(xLim[iCore]*dsFactor)] + singleProbMap = resize(np.uint8(255*singleProbMap),size(coreSlice),cv2.INTER_NEAREST) + TMAmask = coreSegmenterOutput(coreSlice,initialmask,False) + else: + Irs = resize(coreSlice,(int((float(coreSlice.shape[0]) * 0.25)),int((float(coreSlice.shape[1]) * 0.25)))) + TMAmask = coreSegmenterOutput(Irs, np.uint8(initialmask), False) + if np.sum(TMAmask)==0: TMAmask = np.ones(TMAmask.shape) vsize = int(float(coreSlice.shape[0])) @@ -786,17 +785,16 @@ masksub = resize(resize(TMAmask,(vsize,hsize),cv2.INTER_NEAREST),(int((float(coreSlice.shape[0])*dsFactor)),int((float(coreSlice.shape[1])*dsFactor))),cv2.INTER_NEAREST) singleMaskTMA[int(y[iCore]*dsFactor):int(y[iCore]*dsFactor)+masksub.shape[0],int(x[iCore]*dsFactor):int(x[iCore]*dsFactor)+masksub.shape[1]]=masksub maskTMA = maskTMA + resize(singleMaskTMA,maskTMA.shape,cv2.INTER_NEAREST) - cv2.putText(imagesub, str(iCore+1), (int(P[iCore].centroid[1]),int(P[iCore].centroid[0])), 0, 0.5, (np.amax(imagesub), np.amax(imagesub), np.amax(imagesub)), 1, cv2.LINE_AA) + + cv2.putText(imagesub, str(iCore+1), (int(P[iCore].centroid[1]),int(P[iCore].centroid[0])), 0, 0.5, (0,255,0), 1, cv2.LINE_AA) skio.imsave(maskOutputPath + os.path.sep + str(iCore+1) + '_mask.tif',np.uint8(TMAmask)) - print('Segmented core ' + str(iCore+1)) + print('Segmented core/tissue ' + str(iCore+1)) boundaries = find_boundaries(maskTMA) - imagesub = imagesub/np.percentile(imagesub,99.9) - imagesub[boundaries==1] = 1 - skio.imsave(outputPath + os.path.sep + 'TMA_MAP.tif' ,np.uint8(imagesub*255)) - print('Segmented all cores!') - + imagesub[boundaries==1] = 255 + skio.imsave(outputPath + os.path.sep + 'TMA_MAP.tif' ,imagesub) + print('Segmented all cores/tissues!') #restore GPU to 0 #image load using tifffile
--- a/coreograph.xml Wed May 19 21:34:38 2021 +0000 +++ b/coreograph.xml Fri Mar 11 23:40:51 2022 +0000 @@ -12,6 +12,8 @@ ln -s $source_image `basename $type_corrected`; @CMD_BEGIN@ + + python \$UNET_PATH --imagePath `basename $type_corrected` --downsampleFactor $downsamplefactor --channel $channel @@ -26,6 +28,10 @@ --cluster #end if + #if $tissue + --tissue + #end if + --outputPath .; ]]></command> @@ -39,6 +45,7 @@ <param name="sensitivity" type="float" value="0.3" label="Sensitivity"/> <!--<param name="usegrid" type="boolean" label="Use Grid"/>--> <param name="cluster" type="boolean" checked="false" label="Cluster"/> + <param name="tissue" type="boolean" checked="false" label="Tissue"/> </inputs> <outputs>
--- a/macros.xml Wed May 19 21:34:38 2021 +0000 +++ b/macros.xml Fri Mar 11 23:40:51 2022 +0000 @@ -24,6 +24,13 @@ </citations> </xml> - <token name="@VERSION@">2.2.0</token> - <token name="@CMD_BEGIN@">python ${__tool_directory__}/UNetCoreograph.py</token> + <token name="@VERSION@">2.2.8</token> + <token name="@CMD_BEGIN@"><![CDATA[ + UNET_PATH=""; + if [ -f "/app/UNetCoreograph.py" ]; then + export UNET_PATH="/app/UNetCoreograph.py"; + else + export UNET_PATH="${__tool_directory__}/UNetCoreograph.py"; + fi; + ]]></token> </macros>