├── DirtOcr ├── ocr.py ├── errors.py ├── util.py ├── __init__.py └── pytesser.py ├── DIRTX-2D.png ├── dirt_logo.jpg ├── dirt_logo.png ├── images ├── body-bg.jpg ├── header-bg.jpg ├── sidebar-bg.jpg ├── github-button.png ├── highlight-bg.jpg └── download-button.png ├── requirements.txt ├── .gitignore ├── Dockerfile ├── traits.csv ├── Copyright ├── outputCrawler.py ├── runOnFolder.py ├── Singularity ├── Skeleton.py ├── fixImageOrientation.py ├── params.json ├── Masking.py ├── README.md ├── ransac.py ├── dirtIO.py ├── kmeans.py ├── RootTipPaths.py ├── index.html ├── Preprocessing.py ├── main.py ├── Analysis.py └── Segmentation.py /DirtOcr/ocr.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /DIRTX-2D.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Computational-Plant-Science/DIRT/HEAD/DIRTX-2D.png -------------------------------------------------------------------------------- /dirt_logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Computational-Plant-Science/DIRT/HEAD/dirt_logo.jpg -------------------------------------------------------------------------------- /dirt_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Computational-Plant-Science/DIRT/HEAD/dirt_logo.png -------------------------------------------------------------------------------- /images/body-bg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Computational-Plant-Science/DIRT/HEAD/images/body-bg.jpg -------------------------------------------------------------------------------- /images/header-bg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Computational-Plant-Science/DIRT/HEAD/images/header-bg.jpg -------------------------------------------------------------------------------- /images/sidebar-bg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Computational-Plant-Science/DIRT/HEAD/images/sidebar-bg.jpg -------------------------------------------------------------------------------- /images/github-button.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Computational-Plant-Science/DIRT/HEAD/images/github-button.png -------------------------------------------------------------------------------- /images/highlight-bg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Computational-Plant-Science/DIRT/HEAD/images/highlight-bg.jpg -------------------------------------------------------------------------------- /images/download-button.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Computational-Plant-Science/DIRT/HEAD/images/download-button.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | wheel==0.36.2 2 | Cython==0.29.21 3 | mahotas==1.4.11 4 | numpy==1.19.2 5 | scipy==1.5.2 6 | certifi==2020.12.5 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/*.pyc 2 | 3 | # Eclipse IDE's project files 4 | .project 5 | .pydevproject 6 | .settings 7 | .idea 8 | 9 | # Dirt output 10 | dirt_out.csv 11 | 12 | # test data 13 | tests 14 | data 15 | -------------------------------------------------------------------------------- /DirtOcr/errors.py: -------------------------------------------------------------------------------- 1 | """Test for exceptions raised in the tesseract.exe logfile""" 2 | 3 | class Tesser_General_Exception(Exception): 4 | pass 5 | 6 | class Tesser_Invalid_Filetype(Tesser_General_Exception): 7 | pass 8 | 9 | def check_for_errors(logfile = "tesseract.log"): 10 | inf = file(logfile) 11 | text = inf.read() 12 | inf.close() 13 | # All error conditions result in "Error" somewhere in logfile 14 | if text.find("Error") != -1: 15 | raise Tesser_General_Exception, text -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tiagopeixoto/graph-tool 2 | 3 | LABEL maintainer="Wes Bonelli" 4 | 5 | # RUN apt-get update && \ 6 | # echo 'deb http://downloads.skewed.de/apt/xenial xenial universe' | tee -a /etc/apt/sources.list && \ 7 | # echo 'deb-src http://downloads.skewed.de/apt/xenial xenial universe' | tee -a /etc/apt/sources.list && \ 8 | COPY . /opt/DIRT 9 | 10 | RUN pacman -S --noconfirm gcc git python-pip && \ 11 | cd /opt/DIRT && \ 12 | sed -i 's#/usr/local/bin/zbarimg#/usr/bin/zbarimg#' /opt/DIRT/DirtOcr/__init__.py && \ 13 | pip install -r /opt/DIRT/requirements.txt 14 | 15 | ENV LC_ALL=C 16 | ENV DISPLAY=:1 17 | 18 | CMD python /opt/DIRT/main.py "$@" 19 | -------------------------------------------------------------------------------- /DirtOcr/util.py: -------------------------------------------------------------------------------- 1 | """Utility functions for processing images for delivery to Tesseract""" 2 | 3 | import os 4 | 5 | def image_to_scratch(im, scratch_image_name): 6 | """Saves image in memory to scratch file. .bmp format will be read correctly by Tesseract""" 7 | im.save(scratch_image_name, dpi=(300,300)) 8 | 9 | def retrieve_text(scratch_text_name_root): 10 | inf = file(scratch_text_name_root + '.txt') 11 | text = inf.read() 12 | inf.close() 13 | return text 14 | 15 | def perform_cleanup(scratch_image_name, scratch_text_name_root): 16 | """Clean up temporary files from disk""" 17 | for name in (scratch_image_name, scratch_text_name_root + '.txt', "tesseract.log"): 18 | try: 19 | os.remove(name) 20 | except OSError: 21 | pass 22 | -------------------------------------------------------------------------------- /traits.csv: -------------------------------------------------------------------------------- 1 | TRAIT,VALUE DIA_STM,1 DIA_STM_SIMPLE,1 AREA,1 AVG_DENSITY,1 TD_MED,1 TD_AVG,1 WIDTH_MED,1 WIDTH_MAX,1 D10,1 D20,1 D30,1 D40,1 D50,1 D60,1 D70,1 D80,1 D90,1 DS10,1 DS20,1 DS30,1 DS40,1 DS50,1 DS60,1 DS70,1 DS80,1 DS90,1 RDISTR_X,1 RDISTR_Y,1 SKL_DEPTH,1 SKL_WIDTH,1 RTP_COUNT,1 ANG_TOP,1 ANG_BTM,1 STA_RANGE,1 STA_MIN,1 STA_MAX,1 STA_MED,1 RTA_RANGE,1 RTA_MIN,1 RTA_MAX,1 RTA_MED,1 STA_DOM_I,1 STA_DOM_II,1 RTA_DOM_I,1 RTA_DOM_II,1 STA_25_I,1 STA_25_II,1 STA_50_I,1 STA_50_II,1 STA_75_I,1 STA_75_II,1 STA_90_I,1 STA_90_II,1 NR_RTP_SEG_I,1 NR_RTP_SEG_II,1 ADVT_COUNT,1 BASAL_COUNT,1 ADVT_ANG,1 BASAL_ANG,1 HYP_DIA,1 TAP_DIA,1 MAX_DIA_90,1 DROP_50,1 CP_DIA25,1 CP_DIA50,1 CP_DIA75,1 CP_DIA90,1 NODAL_LEN,1 NODAL_AVG_DIA,1 LT_BRA_FRQ,1 LT_AVG_LEN,1 LT_AVG_ANG,1 LT_ANG_RANGE,1 LT_MIN_ANG,1 LT_MAX_ANG,1 LT_DIST_FIRST,1 LT_MED_DIA,1 LT_AVG_DIA,1 -------------------------------------------------------------------------------- /DirtOcr/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on Feb 12, 2013 3 | 4 | @author: koalaspirit 5 | ''' 6 | from pytesser import * 7 | import PIL as pil 8 | import numpy as np 9 | import subprocess 10 | import os 11 | import scipy.misc 12 | 13 | 14 | def getTextFromImage(tagImg,scratchPath,scratchText='temp'): 15 | 16 | scipy.misc.imsave(scratchPath+'temp.bmp',tagImg) 17 | set_scratch_text_name_root(scratchPath, 'temp.bmp') 18 | text = image_file_to_string(scratchPath+'temp.bmp', cleanup = cleanup_scratch_flag, graceful_errors=True) 19 | text = text.translate(None, ",!.;:'{}[]-=()*&^%$#@!~`<>?/|\_+") 20 | text = ''.join(c for c in text if (c.isalnum() or ' ' or ',')) 21 | text = ' '.join(text.split()) 22 | print 'Experiment code: '+text 23 | return text 24 | 25 | def getCodeFromImage(tagImg,scratchPath): 26 | 27 | scipy.misc.imsave(scratchPath+'temp.bmp',tagImg) 28 | args = ['/usr/local/bin/zbarimg','-q',scratchPath+'temp.bmp'] 29 | try: 30 | code = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0] 31 | except Exception as ex: 32 | print 'Exception while running zbarimg', ex 33 | print 'BarCode detected: '+str(code) 34 | return code 35 | -------------------------------------------------------------------------------- /Copyright: -------------------------------------------------------------------------------- 1 | The code is free for non-commercial use. 2 | Please contact the author for commercial use. 3 | 4 | ------------------------------------------------------------------------------------------- 5 | Author: Alexander Bucksch 6 | School of Biology and Interactive computing 7 | Georgia Institute of Technology 8 | 9 | Mail: bucksch@gatech.edu 10 | Web: http://www.bucksch.nl 11 | ------------------------------------------------------------------------------------------- 12 | 13 | Copyright (c) 2014 Alexander Bucksch 14 | All rights reserved. 15 | 16 | Redistribution and use in source and binary forms, with or without 17 | modification, are permitted provided that the following conditions are 18 | met: 19 | 20 | * Redistributions of source code must retain the above copyright 21 | notice, this list of conditions and the following disclaimer. 22 | 23 | * Redistributions in binary form must reproduce the above 24 | copyright notice, this list of conditions and the following 25 | disclaimer in the documentation and/or other materials provided 26 | with the distribution. 27 | 28 | * Neither the name of the DIRT Developers nor the names of its 29 | contributors may be used to endorse or promote products derived 30 | from this software without specific prior written permission. 31 | 32 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 35 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 36 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 37 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 38 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 39 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 40 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 41 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 42 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /outputCrawler.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on May 21, 2013 3 | 4 | @author: Alexander Bucksch 5 | ''' 6 | 7 | ''' 8 | # standard python imports 9 | ''' 10 | import os 11 | import csv 12 | 13 | def combineOutput(dirName): 14 | 15 | ''' save file''' 16 | 17 | ''' remove the old output ''' 18 | try: 19 | os.remove(os.path.join(dirName, 'outputAll.csv')) 20 | except: 21 | pass 22 | try: 23 | os.remove(os.path.join(dirName, 'crawler.out')) 24 | except: 25 | pass 26 | files = os.listdir(dirName) 27 | directories=[] 28 | for i in files: 29 | if os.path.isdir(os.path.join(dirName, i)): 30 | directories.append(i) 31 | 32 | ''' copy header''' 33 | print directories 34 | with open(os.path.join(dirName, directories[0], 'output.csv'), 'U') as csvfile: 35 | filedata = csv.reader(csvfile) 36 | rows = filedata.next() 37 | 38 | with open(os.path.join(dirName, 'outputAll.csv'), 'w') as f: 39 | filewriter = csv.writer(f) 40 | filewriter.writerow(rows) 41 | ''' append data''' 42 | countOK = 0 43 | countBAD = 0 44 | badFolder = [] 45 | for f in directories: 46 | try: 47 | with open(os.path.join(dirName , f , 'output.csv'), 'U') as csvfile: 48 | countOK += 1 49 | filedata = csv.reader(csvfile) 50 | with open(os.path.join(dirName, 'outputAll.csv'), 'a+') as of: 51 | filewriter=csv.writer(of) 52 | rows=filedata.next() 53 | rows=filedata.next() 54 | filewriter.writerow(rows) 55 | except: 56 | countBAD += 1 57 | badFolder.append(f) 58 | # Open a file 59 | with open(os.path.join(dirName, "crawler.out"), "wb") as fo: 60 | fo.write( str(countOK) + ' images are processed and ' +str(countBAD) + ' images failed: \n' + str(badFolder)) 61 | 62 | print str(countOK)+' images are processed and '+str(countBAD)+' images failed: \n'+str(badFolder) 63 | -------------------------------------------------------------------------------- /runOnFolder.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on Feb 12, 2014 3 | 4 | @author: Alexander Bucksch 5 | ''' 6 | 7 | ''' 8 | # external library imports 9 | ''' 10 | import outputCrawler as oc 11 | import time 12 | ''' 13 | # python standard imports 14 | ''' 15 | import os 16 | import sys 17 | import multiprocessing 18 | import subprocess 19 | 20 | 21 | def calculate(args): 22 | try: 23 | return subprocess.call(args) 24 | except: 25 | print "ERROR in File: "+str(args[2]) 26 | 27 | if __name__ == '__main__': 28 | print os.getcwd() 29 | print "runOnFolder.py " 30 | main_py_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'main.py')) 31 | startT=time.time() 32 | dir = os.path.abspath(sys.argv[1]) 33 | work_dir = os.path.abspath(sys.argv[2]) 34 | if not os.path.exists(work_dir): 35 | os.makedirs(work_dir) 36 | seg=sys.argv[3] 37 | marker=sys.argv[4] 38 | files=os.listdir(dir) 39 | pool = multiprocessing.Pool(processes=20) 40 | args=[] 41 | for idx,i in enumerate(files): 42 | if i != '.DS_Store' and os.path.isfile(os.path.join(dir, i)): 43 | if not os.path.isdir(os.path.join(dir, str(idx))): 44 | args.append(['python', main_py_path, 45 | os.path.join(dir, str(i)), # samples path 46 | str(idx), # unique identifier 47 | seg, # mask threshold 48 | '1', # excised root 49 | '1', # crown root 50 | '1', # segmentation 51 | marker, # marker diameter 52 | '0', # stem reconstruction 53 | '0', # plots 54 | '0', # output format 55 | work_dir, # working directory 56 | 'traits.csv']) # trait file path 57 | r = pool.map_async(calculate, args) 58 | r.wait() # Wait on the results 59 | print 'All files done in '+str(time.time()-startT)+'s !' 60 | print 'Collecting results' 61 | oc.combineOutput(work_dir) 62 | print 'Results written to ' + os.path.join(work_dir, 'outputAll.csv') 63 | -------------------------------------------------------------------------------- /Singularity: -------------------------------------------------------------------------------- 1 | BootStrap: docker 2 | From: ubuntu:16.04 3 | 4 | %help 5 | Container for running DIRT 1.1 - An automatic highthroughput root phenotyping platform 6 | (c) 2014 Alexander Bucksch - bucksch@uga.edu 7 | Web application by Abhiram Das - abhiram.das@gmail.com 8 | Singularity container by Chris Cotter - cotter@uga.edu 9 | 10 | http://dirt.iplantcollaborative.org 11 | 12 | University of Georgia 13 | ------------------------------------------------------------ 14 | Program usage: python main.py (please configure the program with the options.csv file) 15 | full path to file with the root image 16 | ID which will be a folder name in the working directory. Integer value needed 17 | multiplier for the automatically determined mask threshold. 1.0 works fine and is default. If flashlight is used, the 0.6 is a good choice. 18 | 1 - excised root analysis is on, 0 - excised root analysis is off 19 | 1 - crown root analysis is on, 0 - crown root analysis is off 20 | 1 - is on, 0 - is off 21 | a simple decimal e.g. 25.4. If 0.0 is used, then the output will have pixels as unit. 22 | 1 - reconstruction is turned on, 0 - reconstruction is turned off 23 | 1 - plotting data is stored, 0 - plotting data is not stored 24 | 1 - the full trait set is put into one excel file containing empty cells for traits that were not computed, 0 - only computed files are written to the output file 25 | full path to folder were the result is stored 26 | full path to .csv file containing the traits to be computed 27 | 28 | Example: 29 | singularity run DIRT.simg /Documents/image_name.jpg 8 25.0 1 1 1 25.1 0 0 0 /Documents/image_folder/ /Documents/traits.csv 30 | 31 | %labels 32 | Maintainer Chris Cotter 33 | Version v1.0 34 | 35 | %post 36 | # Required for graph-tools 37 | apt-key adv --keyserver pgp.skewed.de --recv-key 612DEFB798507F25 38 | echo 'deb http://downloads.skewed.de/apt/xenial xenial universe' | tee -a /etc/apt/sources.list 39 | echo 'deb-src http://downloads.skewed.de/apt/xenial xenial universe' | tee -a /etc/apt/sources.list 40 | 41 | apt-get update 42 | apt-get -y install git python2.7 python-pip python-graph-tool 43 | 44 | cd /opt 45 | git clone git://github.com/Computational-Plant-Science/DIRT.git 46 | cd /opt/DIRT 47 | sed -i 's#/usr/local/bin/zbarimg#/usr/bin/zbarimg#' /opt/DIRT/DirtOcr/__init__.py 48 | pip install -r /opt/DIRT/requirements.txt 49 | 50 | #cleanup 51 | apt-get clean 52 | apt-get purge 53 | 54 | %environment 55 | export LC_ALL=C #Required for pip to run correctly 56 | export DISPLAY=:0 #Addresses "Failed to connect to Mir:" error 57 | 58 | %runscript 59 | python /opt/DIRT/main.py "$@" 60 | -------------------------------------------------------------------------------- /DirtOcr/pytesser.py: -------------------------------------------------------------------------------- 1 | """OCR in Python using the Tesseract engine from Google 2 | http://code.google.com/p/pytesser/ 3 | by Michael J.T. O'Kelly 4 | V 0.0.1, 3/10/07""" 5 | 6 | from PIL import Image 7 | import subprocess 8 | import os 9 | import util 10 | import errors 11 | 12 | tesseract_exe_name = '/opt/local/bin/tesseract' # Name of executable to be called at command line 13 | scratch_image_name = "temp.bmp" # This file must be .bmp or other Tesseract-compatible format 14 | scratch_text_name_root = os.curdir+"/temp" # Leave out the .txt extension 15 | cleanup_scratch_flag = True # Temporary files cleaned up after OCR operation 16 | 17 | def set_scratch_text_name_root(path,text): 18 | global scratch_image_name 19 | scratch_image_name = text+".bmp" # This file must be .bmp or other Tesseract-compatible format 20 | global scratch_text_name_root 21 | scratch_text_name_root = path # Leave out the .txt extension 22 | 23 | def call_tesseract(input_filename, output_filename): 24 | """Calls external tesseract.exe on input file (restrictions on types), 25 | outputting output_filename+'txt'""" 26 | args = [tesseract_exe_name, input_filename, output_filename] 27 | proc = subprocess.call(args) 28 | #retcode = proc.wait() 29 | #if retcode!=0: 30 | #errors.check_for_errors() 31 | 32 | def image_to_string(im, cleanup = cleanup_scratch_flag): 33 | """Converts im to file, applies tesseract, and fetches resulting text. 34 | If cleanup=True, delete scratch files after operation.""" 35 | print "------------------" 36 | print scratch_text_name_root 37 | print scratch_image_name 38 | try: 39 | util.image_to_scratch(im, scratch_text_name_root+scratch_image_name) 40 | call_tesseract(scratch_image_name, scratch_text_name_root) 41 | text = util.retrieve_text(scratch_text_name_root) 42 | finally: 43 | if cleanup: 44 | util.perform_cleanup(scratch_image_name, scratch_text_name_root) 45 | return text 46 | 47 | def image_file_to_string(filename, cleanup = cleanup_scratch_flag, graceful_errors=True): 48 | """Applies tesseract to filename; or, if image is incompatible and graceful_errors=True, 49 | converts to compatible format and then applies tesseract. Fetches resulting text. 50 | If cleanup=True, delete scratch files after operation.""" 51 | try: 52 | try: 53 | call_tesseract(filename, scratch_text_name_root) 54 | text = util.retrieve_text(scratch_text_name_root) 55 | except errors.Tesser_General_Exception: 56 | if graceful_errors: 57 | im = Image.open(filename) 58 | text = image_to_string(im, cleanup) 59 | else: 60 | raise 61 | finally: 62 | if cleanup: 63 | util.perform_cleanup(scratch_image_name, scratch_text_name_root) 64 | return text 65 | 66 | 67 | if __name__=='__main__': 68 | im = Image.open('phototest.tif') 69 | text = image_to_string(im) 70 | print text 71 | try: 72 | text = image_file_to_string('fnord.tif', graceful_errors=False) 73 | except errors.Tesser_General_Exception, value: 74 | print "fnord.tif is incompatible filetype. Try graceful_errors=True" 75 | print value 76 | text = image_file_to_string('fnord.tif', graceful_errors=True) 77 | print "fnord.tif contents:", text 78 | text = image_file_to_string('fonts_test.png', graceful_errors=True) 79 | print text 80 | 81 | 82 | -------------------------------------------------------------------------------- /Skeleton.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Skeleton.py 3 | 4 | Simple class to compute the medial axis (skeleton) and the distance map. 5 | 6 | The code is free for non-commercial use. 7 | Please contact the author for commercial use. 8 | 9 | Please cite the DIRT Paper if you use the code for your scientific project. 10 | 11 | Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology 12 | 13 | ------------------------------------------------------------------------------------------- 14 | Author: Alexander Bucksch 15 | School of Biology and Interactive computing 16 | Georgia Institute of Technology 17 | 18 | Mail: bucksch@gatech.edu 19 | Web: http://www.bucksch.nl 20 | ------------------------------------------------------------------------------------------- 21 | 22 | Copyright (c) 2014 Alexander Bucksch 23 | All rights reserved. 24 | 25 | Redistribution and use in source and binary forms, with or without 26 | modification, are permitted provided that the following conditions are 27 | met: 28 | 29 | * Redistributions of source code must retain the above copyright 30 | notice, this list of conditions and the following disclaimer. 31 | 32 | * Redistributions in binary form must reproduce the above 33 | copyright notice, this list of conditions and the following 34 | disclaimer in the documentation and/or other materials provided 35 | with the distribution. 36 | 37 | * Neither the name of the DIRT Developers nor the names of its 38 | contributors may be used to endorse or promote products derived 39 | from this software without specific prior written permission. 40 | 41 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 42 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 43 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 44 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 45 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 46 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 47 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 48 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 49 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 50 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 51 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 52 | ''' 53 | ''' 54 | # external library imports 55 | ''' 56 | import mahotas as m 57 | import numpy as np 58 | 59 | class Skeleton(object): 60 | ''' 61 | classdocs 62 | ''' 63 | 64 | 65 | def __init__(self,img): 66 | ''' 67 | Constructor 68 | ''' 69 | self.__img=img 70 | 71 | def skel(self, img): 72 | img[0,:]=0 # make 1st line in the image black to achieve consistent result between distance field and medial axis skeleton. 73 | img[len(img)-1,:]=0 # make last line in the image black to achieve consistent result between distance field and medial axis skeleton. 74 | img[:,len(img[0])-1]=0 # make right column in the image black to achieve consistent result between distance field and medial axis skeleton. 75 | img[:,0]=0 # make left column in the image black to achieve consistent result between distance field and medial axis skeleton. 76 | dmap = m.distance(img>0,metric='euclidean') 77 | dmap=np.sqrt(dmap)*2 78 | skelImg=m.thin(img>0) 79 | 80 | return skelImg, dmap 81 | 82 | 83 | -------------------------------------------------------------------------------- /fixImageOrientation.py: -------------------------------------------------------------------------------- 1 | ''' 2 | fixImageOrientation.py 3 | 4 | All credits go to Kyle Fox who wrote this EXIF orientation patch. 5 | We just modified tiny pieces. https://github.com/kylefox 6 | 7 | The code is free for non-commercial use. 8 | Please contact the author for commercial use. 9 | 10 | Please cite the DIRT Paper if you use the code for your scientific project. 11 | 12 | Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology 13 | 14 | ------------------------------------------------------------------------------------------- 15 | Author: Alexander Bucksch 16 | School of Biology and Interactive computing 17 | Georgia Institute of Technology 18 | 19 | Mail: bucksch@gatech.edu 20 | Web: http://www.bucksch.nl 21 | ------------------------------------------------------------------------------------------- 22 | 23 | Copyright (c) 2014 Alexander Bucksch 24 | All rights reserved. 25 | 26 | Redistribution and use in source and binary forms, with or without 27 | modification, are permitted provided that the following conditions are 28 | met: 29 | 30 | * Redistributions of source code must retain the above copyright 31 | notice, this list of conditions and the following disclaimer. 32 | 33 | * Redistributions in binary form must reproduce the above 34 | copyright notice, this list of conditions and the following 35 | disclaimer in the documentation and/or other materials provided 36 | with the distribution. 37 | 38 | * Neither the name of the DIRT Developers nor the names of its 39 | contributors may be used to endorse or promote products derived 40 | from this software without specific prior written permission. 41 | 42 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 | 54 | ''' 55 | 56 | from PIL import Image, ImageFile 57 | 58 | __all__ = ['fix_orientation'] 59 | 60 | # PIL's Error "Suspension not allowed here" work around: 61 | # s. http://mail.python.org/pipermail/image-sig/1999-August/000816.html 62 | ImageFile.MAXBLOCK = 1024*1024 63 | 64 | # The EXIF tag that holds orientation data. 65 | EXIF_ORIENTATION_TAG = 274 66 | 67 | # Obviously the only ones to process are 3, 6 and 8. 68 | # All are documented here for thoroughness. 69 | ORIENTATIONS = { 70 | 1: ("Normal", 0), 71 | 2: ("Mirrored left-to-right", 0), 72 | 3: ("Rotated 180 degrees", 180), 73 | 4: ("Mirrored top-to-bottom", 0), 74 | 5: ("Mirrored along top-left diagonal", 0), 75 | 6: ("Rotated 90 degrees", -90), 76 | 7: ("Mirrored along top-right diagonal", 0), 77 | 8: ("Rotated 270 degrees", -270) 78 | } 79 | 80 | def fix_orientation(img, save_over=False): 81 | """ 82 | `img` can be an Image instance or a path to an image file. 83 | `save_over` indicates if the original image file should be replaced by the new image. 84 | * Note: `save_over` is only valid if `img` is a file path. 85 | """ 86 | path = None 87 | if not isinstance(img, Image.Image): 88 | path = img 89 | img = Image.open(path) 90 | elif save_over: 91 | raise ValueError("You can't use `save_over` when passing an Image instance. Use a file path instead.") 92 | try: 93 | orientation = img._getexif()[EXIF_ORIENTATION_TAG] 94 | except (TypeError, AttributeError, KeyError): 95 | print "WARNING: Image file has no EXIF data." 96 | orientation=-1 97 | pass 98 | if orientation in [3,6,8]: 99 | degrees = ORIENTATIONS[orientation][1] 100 | img = img.rotate(degrees) 101 | if save_over and path is not None: 102 | try: 103 | img.save(path, quality=100) 104 | except IOError: 105 | # Try again, without optimization (PIL can't optimize an image 106 | # larger than ImageFile.MAXBLOCK, which is 64k by default). 107 | # Setting ImageFile.MAXBLOCK should fix this....but who knows. 108 | img.save(path, quality=100) 109 | return (img, degrees) 110 | else: 111 | return (img, 0) -------------------------------------------------------------------------------- /params.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Dirt", 3 | "tagline": "The source code of the publication :Image-based high-throughput field phenotyping of crop roots\"", 4 | "body": "# DIRT 1.1 - An automatic high throughput root phenotyping platform \r\n(c) 2014 Alexander Bucksch - bucksch@gatech.edu, Georgia Institute of Technology \r\n(c) 2016 Alexander Bucksch - bucksch@uga.edu, University of Georgia, Athens \r\nWeb application by Abhiram Das - adas30@biology.gatech.edu\r\n\r\n\r\nWeb application: \r\n\r\n\r\nUser and developer group: \r\n\r\n## Dependencies\r\nThe software is written and tested in:\r\n- python 2.7 \r\n\r\n###Required packages:\r\n- the graphtools package \r\n- the mahotas package \r\n- the numpy package \r\n- the scipy package \r\n\r\n###Optional packages:\r\nOptionally binaries of standard OCR and BarCode software can be used for tag recognition: \r\n- tesseract \r\npaths have to be adjusted in /DIRTocr/pytesser.py (line 12-14)\r\n\r\n- zbar \r\npath has to be adjusted in /DIRTocr/__init__.py (line 28)\r\n\r\n##Command line usage\r\n- full path to the root image \r\n- ID which will be a folder name in the working directory. Integer value needed. \r\n- multiplier for the automatically determined mask threshold. 1.0 works fine and is default. For example, if a flashlight is used to take root images, then 0.6 is a good choice. \r\n- number of roots placed at the right of the root crown, 0 - excised root analysis is off \r\n- 1 - crown root analysis is on, 0 - crown root analysis is off \r\n- 1 - is on, 0 - is off. Off refers to a pre-existing segmention done with DIRT. Binary masks as input images are detected automatically. \r\n- a simple decimal e.g. 25.4. If 0.0 is used, then the output will have pixels as unit. \r\n- 1 - reconstruction is turned on, 0 - reconstruction is turned off \r\n- 1 - plotting data is stored, 0 - plotting data is not stored \r\n- 1 - the full trait set is put into one excel file containing empty cells for traits that were not computed, 0 - only computed files are written to the output file \r\n- full path to folder were the result is stored \r\n- full path to .csv file containing the traits to be computed' \r\n\r\n####Example: \r\n python main.py /Documents/image_name.jpg 8 25.0 1 1 1 25.1 0 0 0 /Documents/image_folder/ /Documents/traits.csv\r\n\r\nNotes on common questions:\r\n- Input is restricted to .jpg, .png and .tif images \r\n- It is not possible to analyze only an excised root when a root crown is in the image. However, it is possible to analyze compute images containing only excised roots. \r\n\r\n#### Running DIRT on folder content\r\nFor convenience we provide the runOnFolder script, that executes DIRT on all images in a specified folder. \r\nNote we made the masking threshold available on the command line because of user requests.\r\n\r\n####Example: \r\n python runOnFolder.py /Users/image_folder/ \r\n\r\nPlease adjust line 86 according to the description above and note that the script uses 6 cores to compute images in parallel. The number of cores can be adjusted in line 80.\r\n\r\n\r\n##Updates in DIRT 1.1 (11 January 2016):\r\n\r\n- Minor bug fixes in Preprocessing.py to allow smaller circle markers and fix a possible missdetection of the experiment tag as the circle. \r\nThanks to Linda Zamariola (U Bologna) for finding this issue. \r\n\r\n##Updates in DIRT 1.1 (4 November 2015):\r\n\r\n- Minor bug fixes in the excised root calculations. Thanks to Alexandre Grondin (U Nebraska) for discovering and validating the fixes. \r\n\r\n##Changes in DIRT 1.1 (14 January 2015):\r\n\r\n- storage of trait values is changed from a list data structure to a dictionary to allow trait selection controlled by the file traits.csv \r\n- added support for trait selection to reduce computation time. See example file traits.csv (1 - trait is computed, 0 - trait is not computed) \r\n- removed unused tip-diameter switch on the command line \r\n- add stem reconstruction switch on the command line to turn the experimental stem reconstruction on/off \r\n- output file now uses the codes in the trait.csv file and only contains selected traits \r\n- removed several unused variables and minor bugs fixed \r\n- added command line option to turn storage of numpy arrays on/off. These files can be used to plot the individual root statistics and can be found in the \"Plots\" folders. \r\n- new (experimental, not validated) traits added due to community requests: projected root area, width and depth of the skeleton (medial axis), top and bottom angle for monocots, segmentation of adventious and basal roots for legumes to retrieve taproot and hypocotyl diameter and adventious and basal root counts. \r\n- added computational statistics such as computation time and graph size to help balancing grid installations \r\n- added an option to have an output file with all possible traits that contains empty cells for not computed traits in the output.csv file. This was a developer request to enable faster ingestion into data bases \r\n", 5 | "note": "Don't delete this file! It's used internally to help with page regeneration." 6 | } -------------------------------------------------------------------------------- /Masking.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Masking.py 3 | 4 | The MAsking module for DIRT. This class contains all functions to compute the binary masked as described in the paper. 5 | 6 | The code is free for non-commercial use. 7 | Please contact the author for commercial use. 8 | 9 | Please cite the DIRT Paper if you use the code for your scientific project. 10 | 11 | Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology 12 | 13 | ------------------------------------------------------------------------------------------- 14 | Author: Alexander Bucksch 15 | School of Biology and Interactive computing 16 | Georgia Institute of Technology 17 | 18 | Mail: bucksch@gatech.edu 19 | Web: http://www.bucksch.nl 20 | ------------------------------------------------------------------------------------------- 21 | 22 | Copyright (c) 2014 Alexander Bucksch 23 | All rights reserved. 24 | 25 | Redistribution and use in source and binary forms, with or without 26 | modification, are permitted provided that the following conditions are 27 | met: 28 | 29 | * Redistributions of source code must retain the above copyright 30 | notice, this list of conditions and the following disclaimer. 31 | 32 | * Redistributions in binary form must reproduce the above 33 | copyright notice, this list of conditions and the following 34 | disclaimer in the documentation and/or other materials provided 35 | with the distribution. 36 | 37 | * Neither the name of the DIRT Developers nor the names of its 38 | contributors may be used to endorse or promote products derived 39 | from this software without specific prior written permission. 40 | 41 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 42 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 43 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 44 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 45 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 46 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 47 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 48 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 49 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 50 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 51 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 52 | ''' 53 | 54 | ''' 55 | external library imports 56 | ''' 57 | import mahotas as m 58 | import numpy as np 59 | import scipy.ndimage 60 | 61 | class Masking(object): 62 | ''' 63 | classdocs 64 | ''' 65 | 66 | 67 | def __init__(self,scale=1.0): 68 | ''' 69 | Constructor 70 | ''' 71 | self.__scale=scale 72 | 73 | 74 | def threshold_adaptive(self,image, block_size, method='gaussian', offset=0, 75 | mode='reflect', param=None): 76 | thresh_image = np.zeros(image.shape, 'double') 77 | if method == 'generic': 78 | scipy.ndimage.generic_filter(image, param, block_size, 79 | output=thresh_image, mode=mode) 80 | elif method == 'gaussian': 81 | if param is None: 82 | # automatically determine sigma which covers > 99% of distribution 83 | sigma = (block_size - 1) / 6.0 84 | else: 85 | sigma = param 86 | scipy.ndimage.gaussian_filter(image, sigma, output=thresh_image, 87 | mode=mode) 88 | elif method == 'mean': 89 | mask = 1. / block_size * np.ones((block_size,)) 90 | # separation of filters to speedup convolution 91 | scipy.ndimage.convolve1d(image, mask, axis=0, output=thresh_image, 92 | mode=mode) 93 | scipy.ndimage.convolve1d(thresh_image, mask, axis=1, 94 | output=thresh_image, mode=mode) 95 | elif method == 'median': 96 | scipy.ndimage.median_filter(image, block_size, output=thresh_image, 97 | mode=mode) 98 | 99 | return image > (thresh_image - offset) 100 | 101 | def calculateMask(self,img): 102 | print 'Masking input' 103 | if len(np.unique(img))<=2: 104 | print 'Binary input detected, no thresholding performed' 105 | idx1=np.where(img==np.unique(img)[0]) 106 | idx2=np.where(img==np.unique(img)[1]) 107 | img[idx1]=False 108 | img[idx2]=True 109 | else: 110 | print 'Grey input detected' 111 | T=m.otsu(img,ignore_zeros=False) 112 | T=T*self.__scale 113 | img = self.threshold_adaptive(img, 80, 'gaussian',offset=-20,param=T) 114 | img = m.morph.open(img) 115 | 116 | img = m.morph.close(img) 117 | ''' just a quick fix of the dilation function that caused the binary image to consist of 0 and 2. Now It should be a real binary image ''' 118 | idx1=np.where(img==np.unique(img)[0]) 119 | idx2=np.where(img==np.unique(img)[1]) 120 | img[idx1]=0 121 | img[idx2]=255 122 | 123 | w,h=np.shape(img) 124 | img[0,:]=0 125 | img[:,0]=0 126 | img[w-1,:]=0 127 | img[:,h-1]=0 128 | return img 129 | 130 | 131 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------ 2 | DIRT 1.1 - An automatic high throughput root phenotyping platform 3 | (c) 2014,2016 Alexander Bucksch - bucksch@uga.edu 4 | Web application by Abhiram Das - abhiram.das@gmail.com 5 | 6 | http://dirt.iplantcollaborative.org 7 | 8 | User and developer group: 9 | https://groups.google.com/forum/#!forum/dirt-users 10 | 11 | University of Georgia, Athens 12 | ------------------------------------------------------------ 13 | 14 | The software is written and tested in: 15 | - python 2.7 (https://www.python.org) 16 | 17 | The software depends on: 18 | - the graphtools package (http://graph-tool.skewed.de) 19 | - the mahotas package (http://luispedro.org/software/mahotas) 20 | - the numpy package (http://sourceforge.net/projects/numpy/) 21 | - the scipy package (http://www.scipy.org/SciPy) 22 | 23 | Optionally binaries of standard OCR and BarCode software can be used for tag recognition: 24 | 25 | - tesseract (https://code.google.com/p/tesseract-ocr/) 26 | paths have to be adjusted in /DIRTocr/pytesser.py (line 12-14) 27 | 28 | - zbar (http://zbar.sourceforge.net) 29 | path has to be adjusted in /DIRTocr/__init__.py (line 28) 30 | 31 | Usage: 32 | full path to the root image 33 | ID which will be a folder name in the working directory. Integer value needed. 34 | multiplier for the automatically determined mask threshold. 1.0 works fine and is default. For example, if a flashlight is used to take root images, then 0.6 is a good choice. 35 | number of roots placed at the right of the root crown, 0 - excised root analysis is off 36 | 1 - crown root analysis is on, 0 - crown root analysis is off 37 | 1 - is on, 0 - is off. Off refers to a pre-existing segmention done with DIRT. Binary masks as input images are detected automatically. 38 | a simple decimal e.g. 25.4. If 0.0 is used, then the output will have pixels as unit. 39 | 1 - reconstruction is turned on, 0 - reconstruction is turned off 40 | 1 - plotting data is stored, 0 - plotting data is not stored 41 | 1 - the full trait set is put into one excel file containing empty cells for traits that were not computed, 0 - only computed files are written to the output file 42 | full path to folder were the result is stored 43 | full path to .csv file containing the traits to be computed' 44 | 45 | Example: 46 | python main.py /Documents/image_name.jpg 8 25.0 1 1 1 25.1 0 0 0 /Documents/image_folder/ /Documents/traits.csv 47 | 48 | Notes on common questions: 49 | - Input is restricted to .jpg, .png and .tif images 50 | - It is not possible to analyze only an excised root when a root crown is in the image. However, it is possible to analyze compute images containing only excised roots. 51 | 52 | ------------------------------------------------------------ 53 | 54 | For convenience we provide the runOnFolder script, that executes DIRT on all images in a specified folder. 55 | Note we made the masking threshold available on the command line because of user requests. 56 | 57 | Example: python runOnFolder.py /Users/image_folder/ 58 | 59 | Please adjust line 86 according to the description above and note that the script uses 6 cores to compute images in parallel. The number of cores can be adjusted in line 80. 60 | 61 | ------------------------------------------------------------ 62 | 63 | Updates in DIRT 1.1 (21 June 2019): 64 | ------------------------------------------------------------ 65 | - Some bug fixes on the avg. root density. There was a problem with very young and sparse root system. The formula changed and is now normed to the max. width instead of the max. width of the line. 66 | The bug was found by Peng Wang at the University of Nebraska. 67 | 68 | Updates in DIRT 1.1 (11 January 2016): 69 | ------------------------------------------------------------ 70 | - Minor bug fixes in Preprocessing.py to allow smaller circle markers and fix a possible missdetection of the experiment tag as the circle. 71 | Thanks to Linda Zamariola (U Bologna) for finding this issue. 72 | 73 | Updates in DIRT 1.1 (4 November 2015): 74 | ------------------------------------------------------------ 75 | - Minor bug fixes in the excised root calculations. Thanks to Alexandre Grondin (U Nebraska) for discovering and validating the fixes. 76 | 77 | Changes in DIRT 1.1 (14 January 2015): 78 | ------------------------------------------------------------ 79 | - storage of trait values is changed from a list data structure to a dictionary to allow trait selection controlled by the file traits.csv 80 | - added support for trait selection to reduce computation time. See example file traits.csv (1 - trait is computed, 0 - trait is not computed) 81 | - removed unused tip-diameter switch on the command line 82 | - add stem reconstruction switch on the command line to turn the experimental stem reconstruction on/off 83 | - output file now uses the codes in the trait.csv file and only contains selected traits 84 | - removed several unused variables and minor bugs fixed 85 | - added command line option to turn storage of numpy arrays on/off. These files can be used to plot the individual root statistics and can be found in the "Plots" folders. 86 | - new (experimental, not validated) traits added due to community requests: projected root area, width and depth of the skeleton (medial axis), top and bottom angle for monocots, segmentation of adventious and basal roots for legumes to retrieve taproot and hypocotyl diameter and adventious and basal root counts. 87 | - added computational statistics such as computation time and graph size to help balancing grid installations 88 | - added an option to have an output file with all possible traits that contains empty cells for not computed traits in the output.csv file. This was a developer request to enable faster ingestion into data bases 89 | -------------------------------------------------------------------------------- /ransac.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import scipy # use numpy if scipy unavailable 3 | import scipy.linalg # use numpy if scipy unavailable 4 | import pylab 5 | 6 | ## Copyright (c) 2004-2007, Andrew D. Straw. All rights reserved. 7 | 8 | ## Redistribution and use in source and binary forms, with or without 9 | ## modification, are permitted provided that the following conditions are 10 | ## met: 11 | 12 | ## * Redistributions of source code must retain the above copyright 13 | ## notice, this list of conditions and the following disclaimer. 14 | 15 | ## * Redistributions in binary form must reproduce the above 16 | ## copyright notice, this list of conditions and the following 17 | ## disclaimer in the documentation and/or other materials provided 18 | ## with the distribution. 19 | 20 | ## * Neither the name of the Andrew D. Straw nor the names of its 21 | ## contributors may be used to endorse or promote products derived 22 | ## from this software without specific prior written permission. 23 | 24 | ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 | ## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 | ## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 27 | ## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 28 | ## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 29 | ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 30 | ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 31 | ## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 32 | ## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 33 | ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 34 | ## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 | 36 | def ransac(data,model,n,k,t,d,debug=False,return_all=False): 37 | """fit model parameters to data using the RANSAC algorithm 38 | 39 | This implementation written from pseudocode found at 40 | http://en.wikipedia.org/w/index.php?title=RANSAC&oldid=116358182 41 | and was adapted to the DIRT pipeline by Alexander Bucksch 42 | 43 | {{{ 44 | Given: 45 | data - a set of observed data points 46 | model - a model that can be fitted to data points 47 | n - the minimum number of data values required to fit the model 48 | k - the maximum number of iterations allowed in the algorithm 49 | t - a threshold value for determining when a data point fits a model 50 | d - the number of close data values required to assert that a model fits well to data 51 | Return: 52 | bestfit - model parameters which best fit the data (or nil if no good model is found) 53 | iterations = 0 54 | bestfit = nil 55 | besterr = something really large 56 | while iterations < k { 57 | maybeinliers = n randomly selected values from data 58 | maybemodel = model parameters fitted to maybeinliers 59 | alsoinliers = empty set 60 | for every point in data not in maybeinliers { 61 | if point fits maybemodel with an error smaller than t 62 | add point to alsoinliers 63 | } 64 | if the number of elements in alsoinliers is > d { 65 | % this implies that we may have found a good model 66 | % now test how good it is 67 | bettermodel = model parameters fitted to all points in maybeinliers and alsoinliers 68 | thiserr = a measure of how well model fits these points 69 | if thiserr < besterr { 70 | bestfit = bettermodel 71 | besterr = thiserr 72 | } 73 | } 74 | increment iterations 75 | } 76 | return bestfit 77 | }}} 78 | """ 79 | iterations = 0 80 | bestfit = None 81 | besterr = numpy.inf 82 | best_inlier_idxs = None 83 | while iterations < k: 84 | maybe_idxs, test_idxs = random_partition(n,data.shape[0]) 85 | maybeinliers = data[maybe_idxs,:] 86 | test_points = data[test_idxs] 87 | maybemodel = model.fit(maybeinliers) 88 | test_err = model.get_error( test_points, maybemodel) 89 | also_idxs = test_idxs[test_err < t] # select indices of rows with accepted points 90 | alsoinliers = data[also_idxs,:] 91 | if debug: 92 | print 'test_err.min()',test_err.min() 93 | print 'test_err.max()',test_err.max() 94 | print 'numpy.mean(test_err)',numpy.mean(test_err) 95 | print 'iteration %d:len(alsoinliers) = %d'%( 96 | iterations,len(alsoinliers)) 97 | if len(alsoinliers) > d: 98 | betterdata = numpy.concatenate( (maybeinliers, alsoinliers) ) 99 | bettermodel = model.fit(betterdata) 100 | better_errs = model.get_error( betterdata, bettermodel) 101 | #print besterr 102 | thiserr = numpy.mean( better_errs ) 103 | if thiserr < besterr: 104 | bestfit = bettermodel 105 | besterr = thiserr 106 | best_inlier_idxs = numpy.concatenate( (maybe_idxs, also_idxs) ) 107 | iterations+=1 108 | if bestfit is None: 109 | return 'nan' 110 | if return_all: 111 | return bestfit, {'inliers':best_inlier_idxs} 112 | else: 113 | return bestfit 114 | 115 | def random_partition(n,n_data): 116 | """return n random rows of data (and also the other len(data)-n rows)""" 117 | all_idxs = numpy.arange( n_data ) 118 | numpy.random.shuffle(all_idxs) 119 | idxs1 = all_idxs[:n] 120 | idxs2 = all_idxs[n:] 121 | return idxs1, idxs2 122 | 123 | class LinearLeastSquaresModel: 124 | """linear system solved using linear least squares 125 | 126 | This class serves as an example that fulfills the model interface 127 | needed by the ransac() function. 128 | 129 | """ 130 | def __init__(self,input_columns,output_columns,debug=False): 131 | self.input_columns = input_columns 132 | self.output_columns = output_columns 133 | self.debug = debug 134 | def fit(self, data): 135 | A = numpy.vstack([data[:,i] for i in self.input_columns]).T 136 | B = numpy.vstack([data[:,i] for i in self.output_columns]).T 137 | x,resids,rank,s = scipy.linalg.lstsq(A,B) 138 | return x 139 | def get_error( self, data, model): 140 | A = numpy.vstack([data[:,i] for i in self.input_columns]).T 141 | B = numpy.vstack([data[:,i] for i in self.output_columns]).T 142 | B_fit = scipy.dot(A,model) 143 | err_per_point = numpy.sum((B-B_fit)**2,axis=1) # sum squared error per row 144 | return err_per_point 145 | 146 | def ransacFit(X,Y): 147 | 148 | # setup model 149 | n_inputs = 1 150 | n_outputs = 1 151 | Xnew=[] 152 | Ynew=[] 153 | for idx,i in enumerate(X): 154 | Xnew.append([]) 155 | Xnew[idx].append(i) 156 | for idx,i in enumerate(Y): 157 | Ynew.append([]) 158 | Ynew[idx].append(i) 159 | all_data = numpy.hstack( (Xnew,Ynew) ) 160 | #all_data=zip(X, Y) 161 | input_columns = range(n_inputs) # the first columns of the array 162 | output_columns = [n_inputs+i for i in range(n_outputs)] # the last columns of the array 163 | debug = False 164 | model = LinearLeastSquaresModel(input_columns,output_columns,debug=debug) 165 | 166 | linear_fit,resids,rank,s = scipy.linalg.lstsq(all_data[:,input_columns], 167 | all_data[:,output_columns]) 168 | 169 | # run RANSAC algorithm 170 | ransac_fit, ransac_data = ransac(all_data,model, 171 | 20, 1000, 7e3, 30, # misc. parameters 172 | debug=debug,return_all=True) 173 | 174 | return X,numpy.dot(Xnew,ransac_fit)[:,0] 175 | 176 | if __name__=='__main__': 177 | fit() 178 | 179 | -------------------------------------------------------------------------------- /dirtIO.py: -------------------------------------------------------------------------------- 1 | ''' 2 | IO.py 3 | 4 | We handle the input and output of the software in this module 5 | 6 | The code is free for non-commercial use. 7 | Please contact the author for commercial use. 8 | 9 | Please cite the DIRT Paper if you use the code for your scientific project. 10 | 11 | Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology 12 | 13 | ------------------------------------------------------------------------------------------- 14 | Author: Alexander Bucksch 15 | School of Biology and Interactive computing 16 | Georgia Institute of Technology 17 | 18 | Mail: bucksch@gatech.edu 19 | Web: http://www.bucksch.nl 20 | ------------------------------------------------------------------------------------------- 21 | 22 | Copyright (c) 2014 Alexander Bucksch 23 | All rights reserved. 24 | 25 | Redistribution and use in source and binary forms, with or without 26 | modification, are permitted provided that the following conditions are 27 | met: 28 | 29 | * Redistributions of source code must retain the above copyright 30 | notice, this list of conditions and the following disclaimer. 31 | 32 | * Redistributions in binary form must reproduce the above 33 | copyright notice, this list of conditions and the following 34 | disclaimer in the documentation and/or other materials provided 35 | with the distribution. 36 | 37 | * Neither the name of the DIRT Developers nor the names of its 38 | contributors may be used to endorse or promote products derived 39 | from this software without specific prior written permission. 40 | 41 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 42 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 43 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 44 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 45 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 46 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 47 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 48 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 49 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 50 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 51 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 52 | 53 | ''' 54 | 55 | ''' 56 | # external library imports 57 | ''' 58 | import numpy as np 59 | 60 | ''' 61 | # standard python imports 62 | ''' 63 | import os 64 | 65 | 66 | class IO(object): 67 | ''' 68 | classdocs 69 | ''' 70 | 71 | __instance = None 72 | 73 | ## Class used with this Python singleton design pattern 74 | # @todo Add all variables, and methods needed for the Singleton class below 75 | class Singleton: 76 | def __init__(self): 77 | ## a foo class variable 78 | self.__path = None 79 | self.__name = None 80 | 81 | def __init__(self,homePath=None,name=None,ID=None,plots=True): 82 | ''' 83 | Constructor 84 | ''' 85 | self.__plots=plots 86 | self.__id=ID 87 | self.__currentID=None 88 | self.__path = homePath 89 | self.__name = name 90 | self.__serverPath=None 91 | self.__parameters=['Image ID','Image name','Failed', 92 | 'Experiment number', 93 | 'circle ratio', 94 | 'x pixel', 95 | 'y pixel', 96 | 'xScale', 97 | 'yScale','computation time','Skeleton Vertices'] 98 | 99 | def setServerPath(self,p): 100 | self.__serverPath=p 101 | def setFileName(self,name): 102 | self.__name = name 103 | def getFileName(self): 104 | return self.__name 105 | def getHomePath(self): 106 | return self.__path 107 | def getID(self): 108 | return self.__id 109 | def getCurrentID(self): 110 | return self.__currentID 111 | def setidIdx(self,idx): 112 | self.__currentID=idx 113 | def setHomePath(self,homePath): 114 | self.__path = homePath 115 | 116 | def scanDir(self,directory=0): 117 | 118 | scanPath='' 119 | if directory==0: scanPath=self.__path 120 | else: scanPath = directory 121 | files = [] 122 | print os.getcwd() 123 | listing = os.listdir(scanPath) 124 | for infile in listing: 125 | if os.path.isdir(scanPath + infile) == True: 126 | print infile + ' is not a file' 127 | elif infile[0]=='.': 128 | print infile + ' is not a file' 129 | elif infile[len(infile)-4:]=='.ini': 130 | print infile + ' is not a file' 131 | elif infile[len(infile)-4:]=='.csv': 132 | print infile + ' is not a file' 133 | else: 134 | print "current file is: " + infile 135 | files.append(scanPath+'/'+infile) 136 | return files 137 | 138 | def writeServerFile(self,serverFile,string): 139 | path=self.__serverPath 140 | print "server file (working directory): "+str(os.getcwd()) 141 | print "server file (relative): "+str(path) 142 | try: 143 | if os.path.isfile(path+serverFile): 144 | fout=open(path+serverFile, "a") 145 | else: 146 | raise 147 | except: 148 | fout = open(path+serverFile, "w") 149 | fout.write('# File path, image id, type') 150 | fout.write('\n') 151 | 152 | fout.write(string) 153 | fout.write('\n') 154 | fout.close() 155 | def writeRunFile(self,runfile,string): 156 | 157 | path=self.__serverPath+'/' 158 | try: 159 | if os.path.isfile(path+'dirtRun.csv'): 160 | fout=open(path+'dirtRun.csv', "a") 161 | else: raise 162 | except: 163 | fout = open(path+'dirtRun.csv', "w") 164 | fout.write('# File path, image id') 165 | fout.write('\n') 166 | 167 | 168 | fout.write(runfile+', '+string) 169 | fout.write('\n') 170 | fout.close() 171 | def writeFile(self,para,traitsCrown,traitDict,all=False): 172 | print "output directory: "+self.__path+"/output.csv" 173 | try: 174 | if os.path.isfile(self.__path+"/output.csv"): 175 | fout=open(self.__path+"/output.csv", "a") 176 | else: raise 177 | except: 178 | fout = open(self.__path+"/output.csv", "w") 179 | for i in self.__parameters: 180 | fout.write(str(i)+',') 181 | for k,v in traitDict.iteritems(): 182 | if all==False: 183 | if v==True: 184 | if k in traitsCrown: 185 | fout.write(str(k)+',') 186 | else: 187 | fout.write(str(k)+',') 188 | 189 | 190 | fout.write('\n') 191 | 192 | for i in para: 193 | fout.write(str(i)+',') 194 | for k,v in traitDict.iteritems(): 195 | if v==True and k in traitsCrown: 196 | fout.write(str(traitsCrown[k])+',') 197 | elif all==True: 198 | fout.write(str(' ,')) 199 | 200 | fout.write('\n') 201 | fout.close() 202 | 203 | def saveArray(self,arr,name): 204 | if self.__plots==False: return 0 205 | try: 206 | np.savetxt(name+'.gz',arr,delimiter=',') 207 | try: 208 | self.writeServerFile('dirt_out.csv',os.getcwd()+name[1:]+'.gz'+','+str(self.__id)+',1') 209 | except: 210 | raise 211 | except: 212 | raise 213 | 214 | 215 | -------------------------------------------------------------------------------- /kmeans.py: -------------------------------------------------------------------------------- 1 | ''' 2 | kmeans.py 3 | 4 | All credits go to to the unknown author who provided the code sniplet. 5 | (http://www.daniweb.com/software-development/python/threads/31449/k-means-clustering) 6 | We just modified tiny pieces. 7 | 8 | The code is free for non-commercial use. 9 | Please contact the author for commercial use. 10 | 11 | Please cite the DIRT Paper if you use the code for your scientific project. 12 | 13 | Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology 14 | 15 | ------------------------------------------------------------------------------------------- 16 | Author: Alexander Bucksch 17 | School of Biology and Interactive computing 18 | Georgia Institute of Technology 19 | 20 | Mail: bucksch@gatech.edu 21 | Web: http://www.bucksch.nl 22 | ------------------------------------------------------------------------------------------- 23 | 24 | Copyright (c) 2014 Alexander Bucksch 25 | All rights reserved. 26 | 27 | Redistribution and use in source and binary forms, with or without 28 | modification, are permitted provided that the following conditions are 29 | met: 30 | 31 | * Redistributions of source code must retain the above copyright 32 | notice, this list of conditions and the following disclaimer. 33 | 34 | * Redistributions in binary form must reproduce the above 35 | copyright notice, this list of conditions and the following 36 | disclaimer in the documentation and/or other materials provided 37 | with the distribution. 38 | 39 | * Neither the name of the DIRT Developers nor the names of its 40 | contributors may be used to endorse or promote products derived 41 | from this software without specific prior written permission. 42 | 43 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 44 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 45 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 46 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 47 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 48 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 49 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 50 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 51 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 52 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 53 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 54 | 55 | ''' 56 | 57 | ''' 58 | # standard python imports 59 | ''' 60 | import math, random 61 | 62 | # -- The Point class represents points in n-dimensional space 63 | class Point: 64 | # Instance variables 65 | # self.coords is a list of coordinates for this Point 66 | # self.n is the number of dimensions this Point lives in (ie, its space) 67 | # self.reference is an object bound to this Point 68 | # Initialize new Points 69 | def __init__(self, coords, reference=None): 70 | self.coords = coords 71 | self.n = len(coords) 72 | self.reference = reference 73 | # Return a string representation of this Point 74 | def __repr__(self): 75 | return str(self.coords) 76 | def __getitem__(self,idx): 77 | return self.coords[idx] 78 | # -- The Cluster class represents clusters of points in n-dimensional space 79 | class Cluster: 80 | # Instance variables 81 | # self.points is a list of Points associated with this Cluster 82 | # self.n is the number of dimensions this Cluster's Points live in 83 | # self.centroid is the sample mean Point of this Cluster 84 | def __init__(self, points): 85 | # We forbid empty Clusters (they don't make mathematical sense!) 86 | if len(points) == 0: raise Exception("ILLEGAL: EMPTY CLUSTER") 87 | self.points = points 88 | self.n = points[0].n 89 | # We also forbid Clusters containing Points in different spaces 90 | # Ie, no Clusters with 2D Points and 3D Points 91 | for p in points: 92 | if p.n != self.n: raise Exception("ILLEGAL: MULTISPACE CLUSTER") 93 | # Figure out what the centroid of this Cluster should be 94 | self.centroid = self.calculateCentroid() 95 | # Return a string representation of this Cluster 96 | def __repr__(self): 97 | return str(self.points) 98 | def __getitem__(self,idx): 99 | return self.points[idx] 100 | # Update function for the K-means algorithm 101 | # Assigns a new list of Points to this Cluster, returns centroid difference 102 | def update(self, points): 103 | old_centroid = self.centroid 104 | self.points = points 105 | self.centroid = self.calculateCentroid() 106 | return self.getDistance(old_centroid, self.centroid) 107 | # -- Get the Euclidean distance between two Points 108 | def getDistance(self,a, b): 109 | # Forbid measurements between Points in different spaces 110 | if a.n != b.n: raise Exception("ILLEGAL: NON-COMPARABLE POINTS") 111 | # Euclidean distance between a and b is sqrt(sum((a[i]-b[i])^2) for all i) 112 | ret = 0.0 113 | for i in range(a.n): 114 | ret = ret + pow((a.coords[i] - b.coords[i]), 2) 115 | return math.sqrt(ret) 116 | # Calculates the centroid Point - the centroid is the sample mean Point 117 | # (in plain English, the average of all the Points in the Cluster) 118 | def calculateCentroid(self): 119 | centroid_coords = [] 120 | # For each coordinate: 121 | for i in range(self.n): 122 | # Take the average across all Points 123 | centroid_coords.append(0.0) 124 | for p in self.points: 125 | centroid_coords[i] = centroid_coords[i] + p.coords[i] 126 | if len(self.points)>0: centroid_coords[i] = centroid_coords[i] / len(self.points) 127 | else: centroid_coords[i] = -1 128 | # Return a Point object using the average coordinates 129 | return Point(centroid_coords) 130 | # -- Return Clusters of Points formed by K-means clustering 131 | class kMeans: 132 | def __init__(self,pts): 133 | self.__points=[] 134 | for i in pts: 135 | self.__points.append(Point(i)) 136 | 137 | def kmeans(self,k, cutoff): 138 | # Randomly sample k Points from the points list, build Clusters around them 139 | initial = random.sample(self.__points, k) 140 | clusters = [] 141 | for p in initial: clusters.append(Cluster([p])) 142 | # Enter the program loop 143 | while True: 144 | # Make a list for each Cluster 145 | lists = [] 146 | for _ in clusters: lists.append([]) 147 | # For each Point: 148 | for p in self.__points: 149 | # Figure out which Cluster's centroid is the nearest 150 | smallest_distance = self.getDistance(p, clusters[0].centroid) 151 | index = 0 152 | for i in range(len(clusters[1:])): 153 | distance = self.getDistance(p, clusters[i + 1].centroid) 154 | if distance < smallest_distance: 155 | smallest_distance = distance 156 | index = i + 1 157 | # Add this Point to that Cluster's corresponding list 158 | lists[index].append(p) 159 | # Update each Cluster with the corresponding list 160 | # Record the biggest centroid shift for any Cluster 161 | biggest_shift = 0.0 162 | for i in range(len(clusters)): 163 | shift = clusters[i].update(lists[i]) 164 | biggest_shift = max(biggest_shift, shift) 165 | # If the biggest centroid shift is less than the cutoff, stop 166 | if biggest_shift < cutoff: break 167 | # Return the list of Clusters 168 | return clusters 169 | # -- Get the Euclidean distance between two Points 170 | def getDistance(self,a, b): 171 | # Forbid measurements between Points in different spaces 172 | if a.n != b.n: raise Exception("ILLEGAL: NON-COMPARABLE POINTS") 173 | # Euclidean distance between a and b is sqrt(sum((a[i]-b[i])^2) for all i) 174 | ret = 0.0 175 | for i in range(a.n): 176 | ret = ret + pow((a.coords[i] - b.coords[i]), 2) 177 | return math.sqrt(ret) 178 | # -- Create a random Point in n-dimensional space 179 | def makeRandomPoint(self,n, lower, upper): 180 | coords = [] 181 | for _ in range(n): coords.append(random.uniform(lower, upper)) 182 | return Point(coords) 183 | 184 | -------------------------------------------------------------------------------- /RootTipPaths.py: -------------------------------------------------------------------------------- 1 | ''' 2 | RootTipPaths.py 3 | 4 | This module is used to compute the RTPs and the RTP skeleton as described in the paper. 5 | 6 | The code is free for non-commercial use. 7 | Please contact the author for commercial use. 8 | 9 | Please cite the DIRT Paper if you use the code for your scientific project. 10 | 11 | Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology 12 | 13 | ------------------------------------------------------------------------------------------- 14 | Author: Alexander Bucksch 15 | School of Biology and Interactive computing 16 | Georgia Institute of Technology 17 | 18 | Mail: bucksch@gatech.edu 19 | Web: http://www.bucksch.nl 20 | ------------------------------------------------------------------------------------------- 21 | 22 | Copyright (c) 2014 Alexander Bucksch 23 | All rights reserved. 24 | 25 | Redistribution and use in source and binary forms, with or without 26 | modification, are permitted provided that the following conditions are 27 | met: 28 | 29 | * Redistributions of source code must retain the above copyright 30 | notice, this list of conditions and the following disclaimer. 31 | 32 | * Redistributions in binary form must reproduce the above 33 | copyright notice, this list of conditions and the following 34 | disclaimer in the documentation and/or other materials provided 35 | with the distribution. 36 | 37 | * Neither the name of the DIRT Developers nor the names of its 38 | contributors may be used to endorse or promote products derived 39 | from this software without specific prior written permission. 40 | 41 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 42 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 43 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 44 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 45 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 46 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 47 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 48 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 49 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 50 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 51 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 52 | 53 | ''' 54 | 55 | ''' 56 | # external library imports 57 | ''' 58 | import numpy as np 59 | import scipy.optimize as sp 60 | import graph_tool.topology as gt 61 | 62 | ''' 63 | # standard python imports 64 | ''' 65 | import time 66 | 67 | class RootTipPaths(object): 68 | ''' 69 | classdocs 70 | ''' 71 | 72 | 73 | def __init__(self, io): 74 | ''' 75 | Constructor 76 | ''' 77 | self.__RTP=[] 78 | self.__io=io 79 | self.__id=io.getID() 80 | self.__currentIdx=io.getCurrentID() 81 | self.__medianTipDiameter=0.0 82 | self.__meanTipDiameter=0.0 83 | self.__90TipDiameter=0.0 84 | self.__expF=0.0 85 | self.__tips=[] 86 | self.__rootingDepth=0.0 87 | self.__rootWidth=0.0 88 | def compareTwoOrderedLists(self,l1,l2): 89 | 90 | if len(l1)>len(l2): l1=l1[:len(l2)] 91 | if len(l1)=percent90)[0]) 203 | tmpdia90=[] 204 | for i in idx: 205 | tmpdia90.append(tipDia[i]) 206 | 207 | if tmpdia90: 208 | dia90=np.max(tmpdia90) 209 | else: 210 | dia90 = 0 211 | self.__90TipDiameter=dia90 212 | if tipDia: 213 | self.__90TipDiameter=np.max(tipDia) 214 | else: 215 | self.__90TipDiameter=0 216 | if tipHeight: 217 | self.__rootingDepth=np.max(tipHeight) 218 | else: 219 | self.__rootingDepth=0 220 | if rootW: 221 | self.__rootWidth=np.max(rootW)-np.min(rootW) 222 | else: 223 | self.__rootWidth=0 224 | except: 225 | pass 226 | return tips 227 | 228 | def getRTPSkeleton(self,thickestPath,G,newRTp=False): 229 | eprop=G.edge_properties["ep"] 230 | if newRTp==True: self.__RTP=[] 231 | if len(self.__RTP) == 0: 232 | startT=time.time() 233 | RTP,tips = self.getRootTipPaths(thickestPath, G) 234 | self.__RTP=RTP 235 | print 'RTPs computed in ' +str(time.time()-startT)+'s' 236 | print 'calculating RTP Skeleton' 237 | 238 | rtpSkel=G.copy() 239 | for e in G.edges(): 240 | if eprop[e]['RTP']==False: 241 | rtpSkel.remove_edge(e) 242 | 243 | return rtpSkel,len(self.__RTP),self.__medianTipDiameter,self.__meanTipDiameter,self.__90TipDiameter,self.__RTP,tips,self.__rootingDepth,self.__rootWidth 244 | -------------------------------------------------------------------------------- /index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 15 | 16 | Dirt by Computational-Plant-Science 17 | 18 | 19 | 20 |
21 |
22 |

Dirt

23 |

The source code of the publication :Image-based high-throughput field phenotyping of crop roots"

24 | View project on GitHub 25 |
26 |
27 | 28 |
29 |
30 |
31 |

32 | DIRT 1.1 - An automatic high throughput root phenotyping platform

33 | 34 |

(c) 2014 Alexander Bucksch - bucksch@gatech.edu, Georgia Institute of Technology
35 | (c) 2016 Alexander Bucksch - bucksch@uga.edu, University of Georgia, Athens
36 | Web application by Abhiram Das - adas30@biology.gatech.edu

37 | 38 |

Web application: http://dirt.iplantcollaborative.org

39 | 40 |

User and developer group: https://groups.google.com/forum/#!forum/dirt-users

41 | 42 |

43 | Dependencies

44 | 45 |

The software is written and tested in:

46 | 47 | 51 | 52 |

53 | Required packages:

54 | 55 | 65 | 66 |

67 | Optional packages:

68 | 69 |

Optionally binaries of standard OCR and BarCode software can be used for tag recognition:

70 | 71 | 77 | 78 |

79 | Command line usage

80 | 81 |
    82 |
  • full path to the root image
    83 |
  • 84 |
  • ID which will be a folder name in the working directory. Integer value needed.
    85 |
  • 86 |
  • multiplier for the automatically determined mask threshold. 1.0 works fine and is default. For example, if a flashlight is used to take root images, then 0.6 is a good choice.
    87 |
  • 88 |
  • number of roots placed at the right of the root crown, 0 - excised root analysis is off
    89 |
  • 90 |
  • 1 - crown root analysis is on, 0 - crown root analysis is off
    91 |
  • 92 |
  • 1 - is on, 0 - is off. Off refers to a pre-existing segmention done with DIRT. Binary masks as input images are detected automatically.
    93 |
  • 94 |
  • a simple decimal e.g. 25.4. If 0.0 is used, then the output will have pixels as unit.
    95 |
  • 96 |
  • 1 - reconstruction is turned on, 0 - reconstruction is turned off
    97 |
  • 98 |
  • 1 - plotting data is stored, 0 - plotting data is not stored
    99 |
  • 100 |
  • 1 - the full trait set is put into one excel file containing empty cells for traits that were not computed, 0 - only computed files are written to the output file
    101 |
  • 102 |
  • full path to folder were the result is stored
    103 |
  • 104 |
  • full path to .csv file containing the traits to be computed'
    105 |
  • 106 |
107 | 108 |

109 | Example:

110 | 111 |
python main.py /Documents/image_name.jpg 8 25.0 1 1 1 25.1 0 0 0 /Documents/image_folder/ /Documents/traits.csv
112 | 
113 | 114 |

Notes on common questions:

115 | 116 |
    117 |
  • Input is restricted to .jpg, .png and .tif images
    118 |
  • 119 |
  • It is not possible to analyze only an excised root when a root crown is in the image. However, it is possible to analyze compute images containing only excised roots.
    120 |
  • 121 |
122 | 123 |

124 | Running DIRT on folder content

125 | 126 |

For convenience we provide the runOnFolder script, that executes DIRT on all images in a specified folder. 127 | Note we made the masking threshold available on the command line because of user requests.

128 | 129 |

130 | Example:

131 | 132 |
python runOnFolder.py /Users/image_folder/ <masking threshold>
133 | 
134 | 135 |

Please adjust line 86 according to the description above and note that the script uses 6 cores to compute images in parallel. The number of cores can be adjusted in line 80.

136 | 137 |

138 | Updates in DIRT 1.1 (11 January 2016):

139 | 140 |
    141 |
  • Minor bug fixes in Preprocessing.py to allow smaller circle markers and fix a possible missdetection of the experiment tag as the circle.
    142 | Thanks to Linda Zamariola (U Bologna) for finding this issue.
    143 |
  • 144 |
145 | 146 |

147 | Updates in DIRT 1.1 (4 November 2015):

148 | 149 |
    150 |
  • Minor bug fixes in the excised root calculations. Thanks to Alexandre Grondin (U Nebraska) for discovering and validating the fixes.
    151 |
  • 152 |
153 | 154 |

155 | Changes in DIRT 1.1 (14 January 2015):

156 | 157 |
    158 |
  • storage of trait values is changed from a list data structure to a dictionary to allow trait selection controlled by the file traits.csv
    159 |
  • 160 |
  • added support for trait selection to reduce computation time. See example file traits.csv (1 - trait is computed, 0 - trait is not computed)
    161 |
  • 162 |
  • removed unused tip-diameter switch on the command line
    163 |
  • 164 |
  • add stem reconstruction switch on the command line to turn the experimental stem reconstruction on/off
    165 |
  • 166 |
  • output file now uses the codes in the trait.csv file and only contains selected traits
    167 |
  • 168 |
  • removed several unused variables and minor bugs fixed
    169 |
  • 170 |
  • added command line option to turn storage of numpy arrays on/off. These files can be used to plot the individual root statistics and can be found in the "Plots" folders.
    171 |
  • 172 |
  • new (experimental, not validated) traits added due to community requests: projected root area, width and depth of the skeleton (medial axis), top and bottom angle for monocots, segmentation of adventious and basal roots for legumes to retrieve taproot and hypocotyl diameter and adventious and basal root counts.
    173 |
  • 174 |
  • added computational statistics such as computation time and graph size to help balancing grid installations
    175 |
  • 176 |
  • added an option to have an output file with all possible traits that contains empty cells for not computed traits in the output.csv file. This was a developer request to enable faster ingestion into data bases
    177 |
  • 178 |
179 |
180 | 181 | 195 |
196 |
197 | 198 | 199 | 200 | 201 | -------------------------------------------------------------------------------- /Preprocessing.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Preprocessing.py 3 | 4 | This module contains all imaging operations to create input for analysis, 5 | such as detecting the root in the image create and filter the binary image etc. 6 | 7 | The code is free for non-commercial use. 8 | Please contact the author for commercial use. 9 | 10 | Please cite the DIRT Paper if you use the code for your scientific project. 11 | 12 | Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology 13 | 14 | ------------------------------------------------------------------------------------------- 15 | Author: Alexander Bucksch 16 | School of Biology and Interactive computing 17 | Georgia Institute of Technology 18 | 19 | Mail: bucksch@gatech.edu 20 | Web: http://www.bucksch.nl 21 | ------------------------------------------------------------------------------------------- 22 | 23 | Copyright (c) 2014 Alexander Bucksch 24 | All rights reserved. 25 | 26 | Redistribution and use in source and binary forms, with or without 27 | modification, are permitted provided that the following conditions are 28 | met: 29 | 30 | * Redistributions of source code must retain the above copyright 31 | notice, this list of conditions and the following disclaimer. 32 | 33 | * Redistributions in binary form must reproduce the above 34 | copyright notice, this list of conditions and the following 35 | disclaimer in the documentation and/or other materials provided 36 | with the distribution. 37 | 38 | * Neither the name of the DIRT Developers nor the names of its 39 | contributors may be used to endorse or promote products derived 40 | from this software without specific prior written permission. 41 | 42 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 43 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 44 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 45 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 46 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 47 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 48 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 49 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 50 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 51 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 52 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 | 54 | ''' 55 | 56 | ''' 57 | # external library imports 58 | ''' 59 | import numpy as np 60 | import scipy.misc 61 | import scipy.ndimage 62 | 63 | ''' 64 | # internal library imports 65 | ''' 66 | import Segmentation 67 | import Masking 68 | import DirtOcr as ocr 69 | 70 | ''' 71 | # standard python imports 72 | ''' 73 | import os 74 | 75 | class Preprocessing(object): 76 | ''' 77 | classdocs 78 | ''' 79 | 80 | 81 | def __init__(self,io): 82 | ''' 83 | Constructor 84 | ''' 85 | self.__io=io 86 | self.__labelHist=[] 87 | self.__id=io.getID() 88 | self.__currentIdx=io.getCurrentID() 89 | self.__compsX=[] 90 | self.__compsY=[] 91 | self.__h=0 92 | self.__w=0 93 | self.__tagCrop=10 94 | 95 | def prepocess(self,img,rootCrown,scale=1.0,nrExRoot=1, marker=True, stemCorrection=False): 96 | print 'starting to segment' 97 | rIdx=-1 98 | self.__io.setServerPath('./') 99 | circleIdx= circleRatio= circleWidth= circleHeight= imgCircle = 0 100 | Failed=False 101 | orig=img.copy() 102 | mask=Masking.Masking(scale=scale) 103 | imgGrey = img.astype(np.uint8) 104 | print 'make mask' 105 | imgBinary=mask.calculateMask(imgGrey) 106 | print 'saving binary mask' 107 | scipy.misc.imsave(self.__io.getHomePath()+'/Mask/' + self.__io.getFileName()+'.png', imgBinary) 108 | pathold=os.getcwd() 109 | os.chdir(self.__io.getHomePath()) 110 | self.__io.writeServerFile('dirt_out.csv',self.__io.getHomePath()+'/Mask/'+self.__io.getFileName()+'.png,' +str(self.__io.getID())+',0') 111 | os.chdir(pathold) 112 | imgLabel=self.calculateLabelHist(imgBinary) 113 | 114 | if marker== True: 115 | print 'Marker is True' 116 | circleIdx, circleRatio, circleWidth, circleHeight, imgCircle =self.findCircle(imgLabel.copy()) 117 | else: 118 | print 'Marker is False' 119 | circleIdx, circleRatio, circleWidth, circleHeight, imgCircle = -1, 1, 1, 1, None 120 | 121 | rectIdx, _, _, _,imgTag, tagText =self.findTag(imgLabel.copy() , imgBinary, orig, rect_ratio=5.) 122 | 123 | if rectIdx >=0: 124 | print 'tagIdx'+str(rectIdx) 125 | try: self.__labelHist[rectIdx] = 0 126 | except: pass 127 | 128 | if circleIdx >=0 and marker==True: 129 | try: self.__labelHist[circleIdx] = 0 130 | except: pass 131 | 132 | ''' 133 | These two functions belong together and have to be called right after each other. I know, that is bad. 134 | ''' 135 | if rootCrown==True: 136 | rIdx,rIdxList,crownMin,crownMax,crownBottom,crownTop=self.findRoot(imgLabel.copy()) 137 | if stemCorrection== True: 138 | print 'Stem reconstruction is active ' 139 | imgRoot=self.correctForStem(imgLabel.copy(), [circleIdx,rectIdx,rIdx], crownMin, crownMax, crownBottom, crownTop, rIdx, rIdxList) 140 | else: 141 | print 'No stem reconstruction active' 142 | imgReturn=np.zeros_like(imgLabel) 143 | imgReturn[rIdxList]=1 144 | imgRoot=imgReturn[crownMax:crownMin,crownBottom:crownTop] 145 | 146 | if nrExRoot >1 and rootCrown==True: 147 | 148 | for i in range(nrExRoot): 149 | exRIdx,imgExRoot,centerPtx,centerPty=self.findExcisedRoot(imgLabel.copy(),[circleIdx,rectIdx,rIdx],crownMin,crownMax) 150 | if exRIdx != -1: 151 | print 'found excised root '+str(i) 152 | try: 153 | scipy.misc.imsave(self.__io.getHomePath()+'/Lateral/' + self.__io.getFileName()+'_'+str(centerPtx)+'_'+str(centerPty)+'.png', imgExRoot) 154 | print 'excised root '+str(i)+'saved' 155 | except: 156 | print 'NOT SAVED !!!' 157 | raise 158 | try: 159 | pathold=os.getcwd() 160 | os.chdir(self.__io.getHomePath()) 161 | self.__io.writeServerFile('dirt_out.csv',self.__io.getHomePath()+'/Lateral/'+self.__io.getFileName()+'_'+str(centerPtx)+'_'+str(centerPty)+'.png,' +str(self.__io.getID())+',0') 162 | print 'excised root '+str(i)+'saved Server' 163 | os.chdir(pathold) 164 | except: 165 | print 'NOT SAVED !!!!' 166 | raise 167 | elif nrExRoot ==1 and rootCrown==True: 168 | exRIdx,imgExRoot,centerPtx,centerPty=self.findExcisedRoot(imgLabel.copy(),[circleIdx,rectIdx,rIdx],crownMin,crownMax) 169 | if exRIdx != -1: 170 | print 'found the excised root ' 171 | try: 172 | pathold=os.getcwd() 173 | os.chdir(self.__io.getHomePath()) 174 | scipy.misc.imsave(self.__io.getHomePath()+'/Lateral/' + self.__io.getFileName()+'_'+str(centerPtx)+'_'+str(centerPty)+'.png', imgExRoot) 175 | print 'excised root saved' 176 | self.__io.writeServerFile('dirt_out.csv',self.__io.getHomePath()+'/Lateral/'+self.__io.getFileName()+'_'+str(centerPtx)+'_'+str(centerPty)+'.png,' +str(self.__io.getID())+',0') 177 | print 'excised root saved Server' 178 | os.chdir(pathold) 179 | except: print 'NOT SAVED !!!!' 180 | elif nrExRoot ==1 and rootCrown==False: 181 | exRIdx,imgExRoot,centerPtx,centerPty=self.findExcisedRoot(imgLabel.copy(),[circleIdx,rectIdx],0,1) 182 | if exRIdx != -1: 183 | print 'found the excised root ' 184 | rIdx=-1 185 | try: 186 | pathold=os.getcwd() 187 | os.chdir(self.__io.getHomePath()) 188 | scipy.misc.imsave(self.__io.getHomePath()+'/Lateral/' + self.__io.getFileName()+'_'+str(centerPtx)+'_'+str(centerPty)+'.png', imgExRoot) 189 | print 'excised root saved' 190 | self.__io.writeServerFile('dirt_out.csv',self.__io.getHomePath()+'/Lateral/'+self.__io.getFileName()+'_'+str(centerPtx)+'_'+str(centerPty)+'.png,' +str(self.__io.getID())+',0') 191 | print 'excised root saved Server' 192 | os.chdir(pathold) 193 | except: print 'NOT SAVED !!!!' 194 | 195 | 196 | if marker==True: 197 | scipy.misc.imsave(self.__io.getHomePath()+'/Mask/' + self.__io.getFileName()+'Circle.png', imgCircle) 198 | scipy.misc.imsave(self.__io.getHomePath()+'/Mask/' + self.__io.getFileName()+'Tag.png', imgTag) 199 | #pathold=os.getcwd() 200 | #os.chdir(self.__io.getHomePath()) 201 | 202 | if marker==True: 203 | self.__io.writeServerFile('dirt_out.csv',self.__io.getHomePath()+'/Mask/'+self.__io.getFileName()+'Circle.png,' +str(self.__io.getID())+',0') 204 | 205 | #os.chdir(pathold) 206 | if rIdx != -1: 207 | ''' 208 | If image is usable, then it gets segmented and copied. Otherwise we ignore it 209 | ''' 210 | try: 211 | print 'root image to be saved' 212 | scipy.misc.imsave(self.__io.getHomePath()+'/Crown/' + self.__io.getFileName()+'.png', imgRoot) 213 | except: 214 | print 'CROWN NOT SAVED' 215 | raise 216 | try: 217 | pathold=os.getcwd() 218 | os.chdir(self.__io.getHomePath()) 219 | self.__io.writeServerFile('dirt_out.csv',self.__io.getHomePath()+'/Crown/'+self.__io.getFileName()+'.png,' +str(self.__io.getID())+',0') 220 | os.chdir(pathold) 221 | except: print 'MASK NOT WRITTEN TO SERVER FILE' 222 | elif rIdx == -1 and exRIdx !=-1: 223 | print "Only excised roots computed" 224 | else: Failed=True 225 | print "old path: "+pathold 226 | return Failed,tagText,circleRatio, circleWidth, circleHeight 227 | 228 | def calculateLabelHist(self,imgBinary): 229 | seg=Segmentation.Segmentation(imgBinary,io=self.__io) 230 | labeled,_=seg.labelAll() 231 | x, y = np.shape(labeled) 232 | val = labeled.flatten() 233 | 234 | histo, _ = np.histogram(val, bins=np.max(labeled) + 1) 235 | ''' 236 | TEST: background can have less pixels than foreground if no markers are in the image 237 | ''' 238 | if len(np.unique(labeled))==2: 239 | nrOfwhitePx=len(np.where(imgBinary==255)[1]) 240 | comp1 = np.max(histo) 241 | if comp1==nrOfwhitePx: 242 | comp1 = np.min(histo) 243 | idx1 = list(histo).index(comp1) 244 | histo[idx1]=0 245 | else: 246 | comp1 = np.max(histo) 247 | idx1 = list(histo).index(comp1) 248 | histo[idx1]=0 249 | 250 | for i in range(len(histo)): 251 | if histo[i]<100: 252 | histo[i]=0 253 | self.__labelHist = histo 254 | 255 | self.__compsX = [[] for i in range(len(self.__labelHist))] 256 | self.__compsY = [[] for i in range(len(self.__labelHist))] 257 | 258 | self.__h, self.__w = np.shape(labeled) 259 | for i in range(self.__w): 260 | for j in range(self.__h): 261 | self.__compsX[labeled[j][i]].append(i) 262 | self.__compsY[labeled[j][i]].append(j) 263 | return labeled 264 | 265 | def findCircle(self, labeled): 266 | print 'searching circle' 267 | ratio = [] 268 | w,h=np.shape(labeled) 269 | for i in range(len(self.__compsX)): 270 | if self.__labelHist[i] > 0: 271 | xMin = np.min(self.__compsX[i]) 272 | xMax = np.max(self.__compsX[i]) 273 | yMin = np.min(self.__compsY[i]) 274 | yMax = np.max(self.__compsY[i]) 275 | nonZ=len(self.__compsX[i]) 276 | allPx=(xMax-xMin)*(yMax-yMin) 277 | squareToCircleRatio=float(nonZ)/float(allPx) 278 | ''' 279 | compensates for small noisy components and small excised roots 280 | ''' 281 | if float(nonZ)/float(w*h)>0.0001: 282 | ''' 283 | the inscribed circle of a bounding box fills exactly 78.64 percent -> we allow 8.64 percent variation due to noise 284 | ''' 285 | tagRatio=(float(xMax) - float(xMin)) / (float(yMax) - float(yMin)) 286 | if squareToCircleRatio > 0.7: 287 | ''' 288 | sanity check 289 | ''' 290 | if (float(yMax) - float(yMin)) > 0: 291 | ''' 292 | determine tag ratio 293 | ''' 294 | tagRatio=(float(xMax) - float(xMin)) / (float(yMax) - float(yMin)) 295 | print 'Circle Ratio : '+str(tagRatio) +' ID: '+str(self.__currentIdx) 296 | ratio.append(np.abs(1-(float(xMax) - float(xMin)) / (float(yMax) - float(yMin)))) 297 | else: ratio.append(1000) 298 | else: ratio.append(1000) 299 | else: ratio.append(1000) 300 | else: ratio.append(1000) 301 | 302 | 303 | rect = np.min(ratio) 304 | rectIdx = list(ratio).index(rect) 305 | 306 | xMin = np.min(self.__compsX[rectIdx]) 307 | xMax = np.max(self.__compsX[rectIdx]) 308 | yMin = np.min(self.__compsY[rectIdx]) 309 | yMax = np.max(self.__compsY[rectIdx]) 310 | 311 | 312 | print 'Circle Ratio: '+str(rect) 313 | 314 | idx = np.where(labeled==rectIdx) 315 | ''' 316 | bounding box 317 | ''' 318 | iMin=np.min(idx[0]) 319 | jMin=np.min(idx[1]) 320 | iMax=np.max(idx[0]) 321 | jMax=np.max(idx[1]) 322 | 323 | sel = labeled != rectIdx 324 | labeled[sel]=0 325 | 326 | if rect > 0.2: 327 | print 'Error: No circle detectable' 328 | rect=1 329 | rectIdx=0 330 | 331 | return rectIdx, rect, float(xMax) - float(xMin), float(yMax) - float(yMin),labeled[iMin:iMax, jMin:jMax] 332 | 333 | 334 | def findRoot(self, labeledToCopy): 335 | print 'searching rootstock' 336 | labeled=labeledToCopy.copy() 337 | h,w=np.shape(labeled) 338 | found=False 339 | idx1=0 340 | count=0 341 | ''' 342 | We keep this piece of debug code, because the problem occurs in 1 of 10,000 images. Perhaps we understand it one day. 343 | ''' 344 | while found==False: 345 | idx1 = np.argmax(self.__labelHist) 346 | idx = np.where(labeled==idx1) 347 | if (np.max(idx[0])+1) == w and (np.max(idx[1])+1)==h and (np.min(idx[0])) == 0 and (np.min(idx[1]))==0: 348 | if count < len(self.__labelHist): 349 | found=False 350 | count+=1 351 | else: found=True 352 | print 'Only 1 background component that is smaller than the foreground ??? Probably a bug in the Masking routine' 353 | else: 354 | found=True 355 | 356 | ''' 357 | bounding box 358 | ''' 359 | iMin=np.min(idx[0]) 360 | jMin=np.min(idx[1]) 361 | iMax=np.max(idx[0]) 362 | jMax=np.max(idx[1]) 363 | 364 | print 'xMin and xMax of Root Crown: '+str(iMin)+' '+str(iMax) 365 | print 'yMin and yMax of Root Crown: '+str(jMin)+' '+str(jMax) 366 | 367 | 368 | return idx1,idx,iMax,iMin,jMin,jMax 369 | 370 | def correctForStem(self,labeledToCopy,excludeIdx,left,right,bottom,top,rootIdx,rootIdxList): 371 | ''' 372 | We loop through detected objects to identify them. During recognition the labeled image has to be made free of noise. 373 | Therefore we have to copy it. 374 | ''' 375 | print 'checking for stem part' 376 | labeled=labeledToCopy.copy() 377 | idx2=-1 378 | counter=0 379 | again=True 380 | while again==True: 381 | counter+=1 382 | if counter>30: break 383 | print 'stem part loop' 384 | again=False 385 | nr_objects=len(self.__labelHist) 386 | if nr_objects>1: 387 | comp = np.argmax(self.__labelHist) 388 | count=0 389 | if comp in excludeIdx: 390 | self.__labelHist[comp] = 0 391 | comp = np.argmax(self.__labelHist) 392 | if count>len(excludeIdx): 393 | print 'Error: Image is not usable' 394 | idx2=-1 395 | break 396 | else: count +=1 397 | idx2 = comp 398 | 399 | else: 400 | idx2=-1 401 | 402 | labeled=labeledToCopy.copy() 403 | idx = np.where(labeled==idx2) 404 | ''' 405 | bounding box 406 | ''' 407 | try: 408 | iMin=np.min(idx[0]) 409 | jMin=np.min(idx[1]) 410 | iMax=np.max(idx[0]) 411 | jMax=np.max(idx[1]) 412 | 413 | print 'xMin and xMax of stem part: '+str(iMin)+' '+str(iMax) 414 | print 'yMin and yMax of stem part: '+str(jMin)+' '+str(jMax) 415 | print 'yMax of crown: '+str(top) 416 | print 'xMin of crown: '+str(bottom) 417 | sel = labeled != idx2 418 | labeled[sel]=0 419 | nonZ=len(idx[0]) 420 | boundingBoxSize=(iMax-iMin)*(jMax-jMin) 421 | zeros=boundingBoxSize-nonZ 422 | ratio=float(zeros)/float(nonZ) 423 | print 'ratio: '+str(ratio) 424 | 425 | if counter>=nr_objects: 426 | again=False 427 | 428 | except: 429 | again=True 430 | nrOfObjPart=0 431 | if (right)>iMax*0.9: 432 | 433 | sel = labeled == (idx2) 434 | self.__labelHist[idx2] = 0 435 | self.__labelHist[rootIdx]=0 436 | imgReturn=np.zeros_like(labeled) 437 | 438 | imgReturn[sel]=1 439 | imgReturn[rootIdxList]=1 440 | rep=int(np.fabs(iMax*0.9-right*1.1)) 441 | for i in range(rep): 442 | imgReturn[iMax*0.9:right*1.1,jMin:jMax]=scipy.ndimage.binary_dilation(imgReturn[iMax*0.9:right*1.1,jMin:jMax]).astype(np.int) 443 | imgLabel,nrOfObjPart=scipy.ndimage.label(imgReturn[iMin:left,bottom:top]) 444 | print 'nrOfObj = '+str(nrOfObjPart) 445 | if nrOfObjPart == 1: 446 | break 447 | 448 | if nrOfObjPart ==1: 449 | return imgReturn[iMin:left,bottom:top] 450 | else: 451 | self.__labelHist[rootIdx]=0 452 | imgReturn=np.zeros_like(labeled) 453 | imgReturn[rootIdxList]=1 454 | return imgReturn[right:left,bottom:top] 455 | else: 456 | self.__labelHist[rootIdx]=0 457 | imgReturn=np.zeros_like(labeled) 458 | imgReturn[rootIdxList]=1 459 | return imgReturn[right:left,bottom:top] 460 | 461 | def findExcisedRoot(self, labeledToCopy,excludeIdx,minOfCrown,maxOfCrown): 462 | 463 | ''' 464 | We loop through detected objects to identify them. During recognition the labeled image has to be made free of noise. 465 | Therefore we have to copy it. 466 | ''' 467 | print 'searching excised root' 468 | labeled=labeledToCopy.copy() 469 | w,h=np.shape(labeled) 470 | idx2=-1 471 | counter=0 472 | again=True 473 | 474 | while again==True: 475 | counter+=1 476 | if counter>30: break 477 | print 'excised root loop' 478 | again=False 479 | nr_objects=len(self.__labelHist) 480 | if nr_objects>1: 481 | comp = np.argmax(self.__labelHist) 482 | count=0 483 | if comp in excludeIdx: 484 | 485 | self.__labelHist[comp] = 0 486 | comp = np.argmax(self.__labelHist) 487 | if count>len(excludeIdx): 488 | print 'Error: Image is not usable' 489 | idx2=-1 490 | break 491 | else: count +=1 492 | idx2 = comp 493 | self.__labelHist[idx2] = 0 494 | else: 495 | idx2=-1 496 | 497 | labeled=labeledToCopy.copy() 498 | idx = np.where(labeled==idx2) 499 | 500 | ''' 501 | bounding box 502 | ''' 503 | try: 504 | iMin=np.min(idx[0]) 505 | jMin=np.min(idx[1]) 506 | iMax=np.max(idx[0]) 507 | jMax=np.max(idx[1]) 508 | print 'xMin and xMax of Excised Root: '+str(jMin)+' '+str(jMax) 509 | print 'yMin and yMax of Excised Root: '+str(iMin)+' '+str(iMax) 510 | print 'xMax of crown: '+str(maxOfCrown) 511 | print 'xMin of crown: '+str(minOfCrown) 512 | sel = labeled != idx2 513 | labeled[sel]=0 514 | nonZ=len(idx[0]) 515 | boundingBoxSize=(iMax-iMin)*(jMax-jMin) 516 | zeros=boundingBoxSize-nonZ 517 | ratio=float(zeros)/float(nonZ) 518 | print 'ratio: '+str(ratio) 519 | 520 | if counter>=nr_objects: 521 | again=False 522 | 523 | except: 524 | again=True 525 | sel = labeled == idx2 526 | labeled[sel]=255 527 | return idx2,labeled[iMin:iMax, jMin:jMax],(iMax+iMin)/2,(jMax+jMin)/2, 528 | 529 | def findTag(self, labeled, imgBinary, img, rect_ratio=0.33): 530 | print 'searching tag' 531 | 532 | ratio = [] 533 | for i in range(len(self.__compsX)): 534 | if self.__labelHist[i] > 0: 535 | xMin = np.min(self.__compsX[i]) 536 | xMax = np.max(self.__compsX[i]) 537 | yMin = np.min(self.__compsY[i]) 538 | yMax = np.max(self.__compsY[i]) 539 | ''' 540 | The Tag should cover at least 0.5% of the picture 541 | ''' 542 | if float((xMax-xMin)*(yMax-yMin))/float(self.__h*self.__w) >0.005 and float((xMax-xMin)*(yMax-yMin))/float(self.__h*self.__w) <0.01: 543 | ''' 544 | The Tag should have more length then height 545 | ''' 546 | if (xMax-xMin)>=1.5*(yMax-yMin): 547 | ''' 548 | The Tag should be in the upper half of the image 549 | ''' 550 | if yMin< (self.__h*0.5): 551 | tagRatio=(float(xMax) - float(xMin)) / (float(yMax) - float(yMin)) 552 | print 'TagRatio detected: '+str(tagRatio)+' ID: '+str(self.__currentIdx) 553 | ratio.append((float(xMax) - float(xMin)) / (float(yMax) - float(yMin))) 554 | else: ratio.append(-1) 555 | else: ratio.append(-1) 556 | else: ratio.append(-1) 557 | else: ratio.append(-1) 558 | 559 | 560 | rect = np.max(ratio) 561 | if rect >=0: 562 | rectIdx = list(ratio).index(rect) 563 | xMin = np.min(self.__compsX[rectIdx]) 564 | xMax = np.max(self.__compsX[rectIdx]) 565 | yMin = np.min(self.__compsY[rectIdx]) 566 | yMax = np.max(self.__compsY[rectIdx]) 567 | else: 568 | xMin=xMax=yMin=yMax = 0 569 | rectIdx =-1 570 | 571 | print 'Tag Ratio: '+str(rect) 572 | if rect ==-1: 573 | rectIdx=-1 574 | iMin=iMax=jMin=jMax=0 575 | else: 576 | idx = np.where(labeled==rectIdx) 577 | ''' 578 | bounding box 579 | ''' 580 | iMin=np.min(idx[0]) 581 | jMin=np.min(idx[1]) 582 | iMax=np.max(idx[0]) 583 | jMax=np.max(idx[1]) 584 | 585 | sel = labeled != rectIdx 586 | if rect>=0: 587 | labeled[sel]=0 588 | try: 589 | print 'Check for text' 590 | tagText=ocr.getTextFromImage(img[iMin+self.__tagCrop:iMax-self.__tagCrop, jMin+self.__tagCrop:jMax-self.__tagCrop],self.__io.getHomePath(),str(self.__id)) 591 | except: 592 | tagText='Tag text extraction Failed' 593 | pass 594 | ''' 595 | This order prefers the barcode over the text reader 596 | ''' 597 | try: 598 | print 'Check for barcode' 599 | tagCode=ocr.getCodeFromImage(img[iMin+self.__tagCrop:iMax-self.__tagCrop, jMin+self.__tagCrop:jMax-self.__tagCrop],self.__io.getHomePath()) 600 | if len(tagCode) >2: 601 | tagText=tagCode[8:len(tagCode)-1] 602 | print tagText 603 | else: 604 | print 'bar code to short: '+tagCode 605 | except: 606 | pass 607 | 608 | self.__io.writeServerFile('dirt_out.csv',self.__io.getHomePath()+'/Mask/'+self.__io.getFileName()+'Tag.png,' +str(self.__io.getID())+',0') 609 | 610 | 611 | else: 612 | tagText= 'No label found' 613 | 614 | print 'return value rectIdx: '+str(rectIdx) 615 | return rectIdx, rect, float(xMax) - float(xMin), float(yMax) - float(yMin),imgBinary[iMin+self.__tagCrop:iMax-self.__tagCrop, jMin+self.__tagCrop:jMax-self.__tagCrop],tagText 616 | 617 | 618 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | #! /nv/hp10/adas30/bin/python 2 | ''' 3 | ---------------------------------------------------------------------------------------------------- 4 | DIRT 1.1 - An automatic high throughput root phenotyping platform 5 | Web interface by Abhiram Das - adas30@biology.gatech.edu 6 | 7 | http://dirt.iplantcollaborative.org 8 | 9 | University of Georgia 10 | 11 | The software is written in: 12 | - python 2.7 (https://www.python.org) 13 | 14 | The software depends on: 15 | - the graphtools package (http://graph-tool.skewed.de) 16 | - the mahotas package (http://luispedro.org/software/mahotas) 17 | - the numpy package (http://sourceforge.net/projects/numpy/) 18 | - the scipy package (http://www.scipy.org/SciPy) 19 | 20 | Optionally binaries of can be used for tag recognition: 21 | 22 | - tesseract (https://code.google.com/p/tesseract-ocr/) 23 | - zbar (http://zbar.sourceforge.net) 24 | 25 | The software uses free code that had no references when found on the net: 26 | - http://www.daniweb.com/software-development/python/threads/31449/k-means-clustering 27 | 28 | The software uses modified code from the scikit.image: 29 | - adaptive thresholding in Masking.py (http://scikit-image.org) 30 | 31 | The software uses modified code from Kyle Fox: 32 | - fixOrientation.py: https://github.com/kylefox/python-image-orientation-patch 33 | 34 | 35 | Please cite the DIRT Paper if you use the code for your scientific project. 36 | 37 | Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology 38 | 39 | 40 | ---------------------------------------------------------------------------------------------------- 41 | Author: Alexander Bucksch 42 | Department of Plant Biology 43 | Warnell School of Forestry and Natural Resources 44 | Institute of Bioinformatics 45 | University of Georgia 46 | 47 | Mail: bucksch@uga.edu 48 | Web: http://www.computational-plant-science.org 49 | ---------------------------------------------------------------------------------------------------- 50 | 51 | Copyright (c) 2014 Alexander Bucksch 52 | All rights reserved. 53 | 54 | Redistribution and use in source and binary forms, with or without 55 | modification, are permitted provided that the following conditions are 56 | met: 57 | 58 | * Redistributions of source code must retain the above copyright 59 | notice, this list of conditions and the following disclaimer. 60 | 61 | * Redistributions in binary form must reproduce the above 62 | copyright notice, this list of conditions and the following 63 | disclaimer in the documentation and/or other materials provided 64 | with the distribution. 65 | 66 | * Neither the name of the DIRT Developers nor the names of its 67 | contributors may be used to endorse or promote products derived 68 | from this software without specific prior written permission. 69 | 70 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 71 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 72 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 73 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 74 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 75 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 76 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 77 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 78 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 79 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 80 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 81 | 82 | ''' 83 | 84 | ''' 85 | # external library imports 86 | ''' 87 | import scipy 88 | 89 | ''' 90 | # internal library imports 91 | ''' 92 | import dirtIO 93 | import Segmentation 94 | import Preprocessing 95 | import Skeleton 96 | import Analysis 97 | import RootTipPaths 98 | from fixImageOrientation import * 99 | 100 | ''' 101 | # standard python imports 102 | ''' 103 | import os 104 | import pickle 105 | import csv 106 | import sys 107 | import time 108 | from collections import OrderedDict 109 | 110 | ''' 111 | #global defs 112 | ''' 113 | allCrown=[] 114 | allPara=[] 115 | f=[] 116 | imgID=None 117 | io=dirtIO.IO() 118 | options=[] 119 | scale=None 120 | ID=None 121 | stemCorrection=False 122 | maxExRoot=None 123 | traitDict= OrderedDict() 124 | 125 | def init(fpath, io): 126 | oldpath = os.getcwd() 127 | io.setHomePath(fpath) 128 | if not os.path.exists(fpath): 129 | os.mkdir(fpath) 130 | os.chdir(fpath) 131 | print os.getcwd() 132 | io.setServerPath(os.getcwd()) 133 | if not os.path.exists('tmp'): 134 | os.mkdir('tmp') 135 | if not os.path.exists('Mask'): 136 | os.mkdir('Mask') 137 | if not os.path.exists(os.path.join('Lateral','Plots')): 138 | os.makedirs(os.path.join('Lateral','Plots')) 139 | if not os.path.exists(os.path.join('Lateral','Result')): 140 | os.makedirs(os.path.join('Lateral','Result')) 141 | if not os.path.exists(os.path.join('Crown','Plots')): 142 | os.makedirs(os.path.join('Crown','Plots')) 143 | if not os.path.exists(os.path.join('Crown','Result')): 144 | os.makedirs(os.path.join('Crown','Result')) 145 | if not os.path.exists(os.path.join('Crown','Skeleton')): 146 | os.makedirs(os.path.join('Crown','Skeleton')) 147 | 148 | os.chdir(oldpath) 149 | readTraits(options[12][1]) 150 | 151 | def readTraits(myFilePath='./traits.csv'): 152 | global traitDict 153 | print "TRAITS DIRECTORY: "+ os.getcwd() 154 | #check to make sure its a file not a sub folder 155 | if (os.path.isfile(myFilePath) and myFilePath.endswith(".csv")): 156 | with open(myFilePath, 'U') as csvfile: 157 | #sniff to find the format 158 | fileDialect = csv.Sniffer().sniff(csvfile.read(1024)) 159 | csvfile.seek(0) 160 | #read the CSV file into a dictionary 161 | dictReader = csv.reader(csvfile, dialect=fileDialect) 162 | for row in dictReader: 163 | try: 164 | traitDict[row[0]]=bool(int(row[1])) 165 | except: 166 | print 'invalid entry in trait file: '+ str(row) 167 | pass 168 | print traitDict 169 | return 170 | 171 | def readOptions(): 172 | global options 173 | if len(sys.argv)==13: 174 | options.append([0,os.path.dirname(sys.argv[1])]) 175 | options.append([0,os.path.basename(sys.argv[1])]) 176 | options.append([0,sys.argv[2]]) 177 | options.append([0,sys.argv[3]]) 178 | options.append([0,sys.argv[4]]) 179 | options.append([0,sys.argv[5]]) 180 | options.append([0,sys.argv[6]]) 181 | options.append([0,sys.argv[7]]) 182 | options.append([0,sys.argv[8]]) 183 | options.append([0,sys.argv[9]]) 184 | options.append([0,sys.argv[10]]) 185 | options.append([0,sys.argv[11]]) 186 | options.append([0,sys.argv[12]]) 187 | 188 | else: 189 | with open('./options.csv','U') as csvfile: 190 | filedata= csv.reader(csvfile) 191 | for i in filedata: 192 | options.append(i) 193 | 194 | 195 | return options 196 | 197 | def ifAnyKeyIsTrue(listOfKeys): 198 | for i in listOfKeys: 199 | if traitDict[i]==True: 200 | return True 201 | return False 202 | 203 | def threadSegmentation(filepath,imgFile,imgID,maxExRoot,rootCrown,marker): 204 | 205 | global io 206 | global scale 207 | global stemCorrection 208 | 209 | stemCorrection=bool(int(options[8][1])) 210 | io.setFileName(imgFile) 211 | io.setidIdx(imgID) 212 | prep=Preprocessing.Preprocessing(io) 213 | print 'segmenting file: '+imgFile +'\n' 214 | 215 | image_file_path = os.path.join(options[0][1], imgFile) 216 | 217 | if os.path.isfile(image_file_path): 218 | # fix orientation of the image in tiff and Jpg files 219 | try: 220 | fix_orientation(image_file_path, save_over=True) 221 | except: 222 | pass 223 | img= scipy.misc.imread(image_file_path, flatten=True) 224 | 225 | else: 226 | print 'Image not readable' 227 | img=[] 228 | 229 | if len(img)>0: 230 | currT=time.time() 231 | Failed,tagExtract,circleRatio, circleWidth, circleHeight = prep.prepocess(img,rootCrown,scale=float(options[3][1]),nrExRoot=maxExRoot,marker=marker,stemCorrection=stemCorrection) 232 | print 'Segmentation finished in '+str(time.time()-currT)+'s' 233 | if Failed == False: 234 | xScale=scale/float(circleWidth) 235 | yScale=scale/float(circleHeight) 236 | if xScale<=0.0: xScale=1. 237 | if yScale<=0.0: yScale=1. 238 | para=[int(imgID),io.getFileName(),Failed,tagExtract,circleRatio, circleWidth, circleHeight,xScale,yScale,-1,-1] 239 | if maxExRoot>1: 240 | for _ in range(maxExRoot): 241 | allPara.append(para) 242 | else: allPara.append(para) 243 | else: 244 | xScale=scale/float(1.0) 245 | yScale=scale/float(1.0) 246 | circleRatio=1.0 247 | circleWidth=1.0 248 | circleHeight=1.0 249 | para=[int(imgID),io.getFileName(),Failed,tagExtract,circleRatio, circleWidth, circleHeight,xScale,yScale,-1,-1] 250 | if maxExRoot>1: 251 | for _ in range(maxExRoot): 252 | allPara.append(para) 253 | else: allPara.append(para) 254 | 255 | def threadCrown(filepath): 256 | global io 257 | 258 | rtpSkel=-1 259 | crownT=OrderedDict() 260 | imgL=[] 261 | stemCorrection=bool(int(options[8][1])) 262 | 263 | print io.getHomePath() 264 | oldHome=io.getHomePath() 265 | os.chdir(io.getHomePath()) 266 | io.setHomePath('./Crown/') 267 | f=io.scanDir() 268 | for (counter,i) in enumerate(f): 269 | io.setFileName(os.path.basename(i)) 270 | io.setidIdx(imgID) 271 | 272 | print 'processing Crown file: '+i 273 | xScale=allPara[counter][7] 274 | yScale=allPara[counter][8] 275 | analysis=Analysis.Analysis(io,(xScale+yScale)/2) 276 | rtp=RootTipPaths.RootTipPaths(io) 277 | 278 | 279 | try: 280 | img=scipy.misc.imread(i,flatten=True) 281 | except: 282 | print 'Image not readable' 283 | img=-1 284 | 285 | if len(img)>0: 286 | seg=Segmentation.Segmentation(img,io) 287 | imgL=seg.label() 288 | print 'compute root profile' 289 | currT=time.time() 290 | if ifAnyKeyIsTrue(['AVG_DENSITY','WIDTH_MED','WIDTH_MAX','DIA_STM_SIMPLE','D10','D20','D30','D40','D50','D60','D70','D80','D90','DS10','DS20','DS30','DS40','DS50','DS60','DS70','DS80','DS90','AREA','ANG_TOP','ANG_BTM']): 291 | crownT['AVG_DENSITY'],crownT['WIDTH_MED'],crownT['WIDTH_MAX'],crownT['D10'],crownT['D20'],crownT['D30'],crownT['D40'],crownT['D50'],crownT['D60'],crownT['D70'],crownT['D80'],crownT['D90'],crownT['DS10'],crownT['DS20'],crownT['DS30'],crownT['DS40'],crownT['DS50'],crownT['DS60'],crownT['DS70'],crownT['DS80'],crownT['DS90'],crownT['AREA'],crownT['DIA_STM_SIMPLE'],crownT['ANG_TOP'],crownT['ANG_BTM']=analysis.getWidthOverHeight(imgL,xScale,yScale) 292 | print 'Mask traits computed '+str(time.time()-currT)+'s' 293 | 294 | if ifAnyKeyIsTrue(['DIA_STM','TD_MED','TD_AVG','STA_RANGE','STA_DOM_I','STA_DOM_II','STA_25_I','STA_25_II','STA_50_I','STA_50_II','STA_75_I','STA_75_II','STA_90_I','STA_90_II','RTA_DOM_I','RTA_DOM_II','STA_MIN','STA_MAX','STA_MED','RTA_RANGE','RTA_MIN','RTA_MAX','RTA_MED','NR_RTP_SEG_I','NR_RTP_SEG_II','ADVT_COUNT','BASAL_COUNT','ADVT_ANG','BASAL_ANG','HYP_DIA','TAP_DIA','MAX_DIA_90','DROP_50','CP_DIA25','CP_DIA50','CP_DIA75','CP_DIA90','SKL_DEPTH','SKL_WIDTH']): 295 | currT=time.time() 296 | skel=Skeleton.Skeleton(imgL) 297 | testSkel,testDia=skel.skel(imgL) 298 | scipy.misc.imsave(os.path.join(io.getHomePath(), 'Skeleton', io.getFileName() + '_skel.png'), testSkel) 299 | print 'Medial axis computed '+str(time.time()-currT)+'s' 300 | currT=time.time() 301 | path,skelGraph,crownT['DIA_STM'],skelSize=seg.findThickestPath(testSkel,testDia,xScale,yScale) 302 | allPara[counter][10]=skelSize 303 | print 'Central path computed '+str(time.time()-currT)+'s' 304 | 305 | if ifAnyKeyIsTrue(['TD_MED','TD_AVG','STA_RANGE','STA_DOM_I','STA_DOM_II','STA_25_I','STA_25_II','STA_50_I','STA_50_II','STA_75_I','STA_75_II','STA_90_I','STA_90_II','RTA_DOM_I','RTA_DOM_II','STA_MIN','STA_MAX','STA_MED','RTA_RANGE','RTA_MIN','RTA_MAX','RTA_MED','NR_RTP_SEG_I','NR_RTP_SEG_II','ADVT_COUNT','BASAL_COUNT','ADVT_ANG','BASAL_ANG','HYP_DIA','TAP_DIA','MAX_DIA_90','DROP_50','CP_DIA25','CP_DIA50','CP_DIA75','CP_DIA90','SKL_DEPTH','SKL_WIDTH','RTP_COUNT']): 306 | print 'Compute RTP skeleton' 307 | currT=time.time() 308 | rtpSkel,crownT['RTP_COUNT'], crownT['TD_MED'],crownT['TD_AVG'],crownT['MAX_DIA_90'], rtps, tips, crownT['SKL_WIDTH'], crownT['SKL_DEPTH'] =rtp.getRTPSkeleton(path,skelGraph,True) 309 | seg.setTips(tips) 310 | print 'RTP Skeleton computed '+str(time.time()-currT)+'s' 311 | 312 | allPara[len(allPara)-1][2]=seg.getFail() 313 | 314 | 315 | if ifAnyKeyIsTrue(['RDISTR_X','RDISTR_Y']): 316 | print 'Compute spatial root distribution' 317 | currT=time.time() 318 | crownT['RDISTR_X'],crownT['RDISTR_Y']=analysis.getSymmetry(rtps,rtpSkel) 319 | print 'Symmetry computed '+str(time.time()-currT)+'s' 320 | 321 | if rtpSkel!=-1: 322 | if ifAnyKeyIsTrue(['NR_RTP_SEG_I','NR_RTP_SEG_II','ADVT_COUNT','BASAL_COUNT','ADVT_ANG','BASAL_ANG','HYP_DIA','TAP_DIA']): 323 | print 'searching for hypocotyl' 324 | currT=time.time() 325 | branchRad,nrPaths=seg.findHypocotylCluster(path,rtpSkel) 326 | print 'hypocotyl computed '+str(time.time()-currT)+'s' 327 | print 'starting kmeans' 328 | try: 329 | currT=time.time() 330 | c1x,c1y,c2x,c2y = analysis.plotDiaRadius(nrPaths, branchRad,path,2) 331 | 332 | print '2 clusters computed in '+str(time.time()-currT)+'s' 333 | 334 | currT=time.time() 335 | segImg=seg.makeSegmentationPicture(path,rtpSkel,img,xScale,yScale,c1x,c1y,c2x,c2y) 336 | scipy.misc.imsave(io.getHomePath()+'/Result/' +io.getFileName()+ 'Seg2.png', segImg) 337 | crownT['ADVT_COUNT'],crownT['BASAL_COUNT'],crownT['NR_RTP_SEG_I'],crownT['NR_RTP_SEG_II'], crownT['HYP_DIA'], crownT['TAP_DIA'] =analysis.countRootsPerSegment(c1y,c2y,c1x,c2x) 338 | except: 339 | c1x=None 340 | c1y=None 341 | c2x=None 342 | c2y=None 343 | pass 344 | crownT['DROP_50']=analysis.RTPsOverDepth(path,rtpSkel) 345 | print 'count roots per segment' 346 | print 'Root classes computed in '+str(time.time()-currT)+'s' 347 | 348 | if ifAnyKeyIsTrue(['ADVT_ANG','BASAL_ANG','STA_RANGE','STA_DOM_I','STA_DOM_II','STA_25_I','STA_25_II','STA_50_I','STA_50_II','STA_75_I','STA_75_II','STA_90_I','STA_90_II','RTA_DOM_I','RTA_DOM_II','STA_MIN','STA_MAX','STA_MED','RTA_RANGE','RTA_MIN','RTA_MAX','RTA_MED']): 349 | currT=time.time() 350 | lat,corrBranchpts=seg.findLaterals(rtps, rtpSkel,(xScale+yScale)/2, None) 351 | print 'seg.findLaterals computed in '+str(time.time()-currT)+'s' 352 | print 'Compute angles at 2cm' 353 | currT=time.time() 354 | if c1x!=None and c1y!=None and c2x!=None and c2y!=None: crownT['ADVT_ANG'],crownT['BASAL_ANG']=analysis.anglesPerClusterAtDist(c1y, c2y, rtpSkel, path, lat, corrBranchpts, (xScale+yScale)/2, dist=20) 355 | else: 356 | crownT['ADVT_ANG']='nan' 357 | crownT['BASAL_NG']='nan' 358 | print 'angles at 2cm computed in '+str(time.time()-currT)+'s' 359 | 360 | if ifAnyKeyIsTrue(['STA_25_I','STA_25_II','STA_50_I','STA_50_II','STA_75_I','STA_75_II','STA_90_I','STA_90_II']): 361 | try: 362 | print 'compute quantile angles' 363 | currT=time.time() 364 | a25,a50,a75,a90=analysis.calculateAngleQuantiles(path,lat,corrBranchpts,rtpSkel) 365 | print 'angles computed in '+str(time.time()-currT)+'s' 366 | except: 367 | a25=['nan'] 368 | a50=['nan'] 369 | a75=['nan'] 370 | a90=['nan'] 371 | print 'ERROR: No quantile angles calculated' 372 | 373 | if ifAnyKeyIsTrue(['RTA_RANGE','RTA_MIN','RTA_MAX','RTA_MED']): 374 | try: 375 | print 'compute angles' 376 | currT=time.time() 377 | crownT['RTA_MED'],crownT['RTA_MIN'],crownT['RTA_MAX'],crownT['RTA_RANGE'],anglesN=analysis.calculateAngles(path,lat,corrBranchpts,rtpSkel) 378 | print 'RTA angle characteristics computed in '+str(time.time()-currT)+'s' 379 | except: 380 | print 'ERROR: No RTA angles calculated' 381 | 382 | if ifAnyKeyIsTrue(['STA_RANGE','STA_MIN','STA_MAX','STA_MED']): 383 | try: 384 | print 'compute STA angles' 385 | currT=time.time() 386 | crownT['STA_RANGE'],crownT['STA_MED'],crownT['STA_MIN'],crownT['STA_MAX'],angles=analysis.getLateralAngles(path,lat,corrBranchpts,rtpSkel) 387 | print 'STA angles characteristics computed in '+str(time.time()-currT)+'s' 388 | except: 389 | print 'ERROR: No STA angles calculated' 390 | 391 | if ifAnyKeyIsTrue(['CP_DIA25','CP_DIA50','CP_DIA75','CP_DIA90']): 392 | try: 393 | print 'compute diameter quantils' 394 | currT=time.time() 395 | crownT['CP_DIA25'],crownT['CP_DIA50'],crownT['CP_DIA75'],crownT['CP_DIA90']=analysis.getDiameterQuantilesAlongSinglePath(path,rtpSkel) 396 | print 'Tap diameters computed in '+str(time.time()-currT)+'s' 397 | except: 398 | print 'ERROR: No quantile diameters calculated' 399 | 400 | if ifAnyKeyIsTrue(['STA_DOM_I','STA_DOM_II']): 401 | try: 402 | print 'compute STA dominant angles' 403 | currT=time.time() 404 | crownT['STA_DOM_I'],crownT['STA_DOM_II']=analysis.findHistoPeaks(angles) 405 | print 'STA dominant angles computed in '+str(time.time()-currT)+'s' 406 | except: 407 | print 'ERROR: No dominant angles calculated (STA)' 408 | 409 | if ifAnyKeyIsTrue(['STA_25_I','STA_25_II']): 410 | try: 411 | currT=time.time() 412 | crownT['STA_25_I'],crownT['STA_25_II']=analysis.findHistoPeaks(a25) 413 | print 'STA 25 angles computed in '+str(time.time()-currT)+'s' 414 | except: 415 | print 'ERROR: No dominant angles25 calculated' 416 | 417 | if ifAnyKeyIsTrue(['STA_50_I','STA_50_II']): 418 | try: 419 | currT=time.time() 420 | crownT['STA_50_I'],crownT['STA_50_II']=analysis.findHistoPeaks(a50) 421 | print 'STA 50 angles computed in '+str(time.time()-currT)+'s' 422 | except: 423 | print 'ERROR: No dominant angles50 calculated' 424 | 425 | if ifAnyKeyIsTrue(['STA_75_I','STA_75_II']): 426 | try: 427 | currT=time.time() 428 | crownT['STA_75_I'],crownT['STA_75_II']=analysis.findHistoPeaks(a75) 429 | print 'STA 75 angles computed in '+str(time.time()-currT)+'s' 430 | except: 431 | print 'ERROR: No dominant angles75 calculated' 432 | 433 | if ifAnyKeyIsTrue(['STA_90_I','STA_90_II']): 434 | try: 435 | currT=time.time() 436 | crownT['STA_90_I'],crownT['STA_90_II']=analysis.findHistoPeaks(a90) 437 | print 'STA 90 angles computed in '+str(time.time()-currT)+'s' 438 | except: 439 | print 'ERROR: No dominant angles90 calculated' 440 | 441 | if ifAnyKeyIsTrue(['RTA_DOM_I','RTA_DOM_II']): 442 | try: 443 | currT=time.time() 444 | crownT['RTA_DOM_I'],crownT['RTA_DOM_II']=analysis.findHistoPeaks(anglesN) 445 | print 'angles computed in '+str(time.time()-currT)+'s' 446 | except: 447 | print 'ERROR: No dominant RTA angles calculated' 448 | io.setHomePath(oldHome) 449 | if maxExRoot >= 1: 450 | rtpSkel=-1 451 | os.chdir(io.getHomePath()) 452 | io.setHomePath('./Lateral/') 453 | f=io.scanDir() 454 | for (counter,i) in enumerate(f): 455 | print 'processing lateral file: '+i 456 | 457 | if maxExRoot>0: 458 | xScale=allPara[counter/maxExRoot][7] 459 | yScale=allPara[counter/maxExRoot][8] 460 | io.setFileName(os.path.basename(i)) 461 | else: 462 | xScale=allPara[counter][7] 463 | yScale=allPara[counter][8] 464 | io.setFileName(os.path.basename(i)) 465 | io.setidIdx(counter) 466 | 467 | rtp=RootTipPaths.RootTipPaths(io) 468 | 469 | analysis=Analysis.Analysis(io,(xScale+yScale)/2) 470 | 471 | 472 | try: 473 | img=scipy.misc.imread(i,flatten=True) 474 | except: 475 | print 'Image not readable' 476 | img=[] 477 | pass 478 | if len(img)>0: 479 | 480 | seg=Segmentation.Segmentation(img,io=io) 481 | imgL=seg.label() 482 | 483 | if imgL!=None: 484 | skel=Skeleton.Skeleton(imgL) 485 | testSkel,testDia=skel.skel(imgL) 486 | path,skelGraph=seg.findThickestPathLateral(testSkel,testDia,xScale,yScale) 487 | if ifAnyKeyIsTrue(['LT_AVG_LEN','NODAL_LEN','LT_BRA_FRQ','NODAL_AVG_DIA','LT_AVG_ANG','LT_ANG_RANGE','LT_MIN_ANG','LT_MAX_ANG','LT_DIST_FIRST','LT_MED_DIA','LT_AVG_DIA']): 488 | rtpSkel,_,crownT['LT_MED_DIA'],crownT['LT_AVG_DIA'],_,rtps,_,_,_=rtp.getRTPSkeleton(path,skelGraph,True) 489 | 490 | if rtpSkel!=-1: 491 | if ifAnyKeyIsTrue(['LT_BRA_FRQ']): 492 | crownT['LT_BRA_FRQ']=analysis.getBranchingfrequencyAlongSinglePath(rtps,path) 493 | crownT['NODAL_AVG_DIA'],_=analysis.getDiametersAlongSinglePath(path,rtpSkel,(xScale+yScale)/2) 494 | crownT['NODAL_LEN']=analysis.getLengthOfPath(path) 495 | if ifAnyKeyIsTrue(['LT_DIST_FIRST','LT_AVG_LEN','LT_BRA_FRQ','LT_ANG_RANGE','LT_AVG_ANG','LT_MIN_ANG','LT_MAX_ANG']): 496 | lat,corrBranchpts,crownT['LT_DIST_FIRST']=seg.findLaterals(rtps, rtpSkel,(xScale+yScale)/2,path) 497 | if ifAnyKeyIsTrue(['LT_AVG_LEN']): 498 | crownT['LT_AVG_LEN']=analysis.getLateralLength(lat,path,rtpSkel) 499 | if ifAnyKeyIsTrue(['LT_ANG_RANGE','LT_AVG_ANG','LT_MIN_ANG','LT_MAX_ANG']): 500 | crownT['LT_ANG_RANGE'],crownT['LT_AVG_ANG'],crownT['LT_MIN_ANG'],crownT['LT_MAX_ANG'],_=analysis.getLateralAngles(path,lat,corrBranchpts,rtpSkel) 501 | allCrown.append(crownT.copy()) 502 | else: 503 | allCrown.append(crownT.copy()) 504 | 505 | io.setHomePath(oldHome) 506 | #os.chdir('../') 507 | 508 | def printHeader(): 509 | if not os.path.exists('./options.csv') and len(sys.argv)!=13: 510 | print '------------------------------------------------------------' 511 | print 'DIRT 1.1 - An automatic highthroughput root phenotyping platform' 512 | print '(c) 2014 Alexander Bucksch - bucksch@uga.edu' 513 | print 'Web application by Abhiram Das - abhiram.das@gmail.com' 514 | print ' ' 515 | print 'http://dirt.iplantcollaborative.org' 516 | print ' ' 517 | print 'University of Georgia' 518 | print '------------------------------------------------------------' 519 | print 'Program usage: python main.py (please configure the program with the otions.csv file)' 520 | print ' full path to file with the root image' 521 | print ' ID which will be a folder name in theworking directory. Integer value needed' 522 | print ' multiplier for the automatically determned mask threshold. 1.0 works fine and is default. If flashlight is used, the 0.6 is a good choice.' 523 | print ' 1 - excised root analysis is on, 0 - excised root analysis is off' 524 | print ' 1 - crown root analysis is on, 0 - crown root analysis is off' 525 | print ' 1 - is on, 0 - is off' 526 | print ' a simple decimal e.g. 25.4. If 0.0 is used, then the output will have pixels as unit.' 527 | print ' 1 - reconstruction is turned on, 0 - reconstruction is turned off' 528 | print ' 1 - plotting data is stored, 0 - plotting data is not stored' 529 | print ' 1 - the full trait set is put into one excel file containing empty cells for traits that were not computed, 0 - only computed files are written to the output file' 530 | print ' full path to folder were the result is stored' 531 | print ' full path to .csv file containing the traits to be computed' 532 | print ' ' 533 | print 'Example: ' 534 | print '/Documents/image_name.jpg 8 25.0 1 1 1 25.1 0 0 0 /Documents/image_folder/ /Documents/traits.csv' 535 | 536 | sys.exit() 537 | else: 538 | print '------------------------------------------------------------' 539 | print 'DIRT 1.1 - An automatic highthroughput root phenotyping platform' 540 | print '(c) 2014 Alexander Bucksch - bucksch@uga.edu' 541 | print 'Web application by Abhiram Das - abhiram.das@gmail.com' 542 | print ' ' 543 | print 'http://dirt.iplantcollaborative.org' 544 | print ' ' 545 | print 'University of Georgia' 546 | print '------------------------------------------------------------' 547 | print ' ' 548 | print 'Initializing folder structure' 549 | 550 | def main(opt=None): 551 | 552 | global io 553 | global ID 554 | global scale 555 | global allPara 556 | global allLat 557 | global allCrown 558 | global options 559 | global maxExRoot 560 | 561 | printHeader() 562 | 563 | allStart=time.time() 564 | 565 | if opt is None: 566 | options = readOptions() 567 | else: options=opt 568 | 569 | ID=int(options[2][1]) 570 | 571 | try: scale = float(options[7][1]) 572 | except: scale =1. 573 | rootCrown=int(options[5][1]) 574 | maxExRoot=int(options[4][1]) 575 | io.__init__(options[0][1],ID=ID,plots=bool(int(options[9][1]))) 576 | init(os.path.join(options[11][1], str(ID)), io) 577 | 578 | #Run analysis 579 | if int(options[6][1]) == 0: 580 | io.setHomePath(os.path.join(options[11][1], str(ID))) 581 | print os.getcwd() 582 | infile=open(os.path.join(io.getHomePath(), 'tmp', 'para.sav'), 'rb') 583 | allPara=pickle.load(infile) 584 | infile.close() 585 | print 'Saved parameters loaded' 586 | infile.close() 587 | 588 | elif int(options[6][1]) == 1: 589 | threadSegmentation(options[11][1],options[1][1],ID,int(options[4][1]),rootCrown,float(options[7][1])>0.0) 590 | outfile=open(os.path.join(io.getHomePath(), 'tmp', 'para.sav'), 'wb') 591 | pickle.dump(allPara,outfile) 592 | outfile.close() 593 | else: print'The segmentation switch must be 0 or 1' 594 | 595 | if int(options[5][1]) != 0 or int(options[4][1]) != 0: 596 | 597 | print 'Start Root Analysis' 598 | threadCrown(os.path.join(options[11][1], str(ID))) 599 | print "Exiting Root Analysis" 600 | 601 | 602 | compTime=int((time.time() - allStart)) 603 | print 'All done in just ' + str(compTime) + ' s!' 604 | print 'Write output.csv file' 605 | r=len(allCrown) 606 | if r==0: r=len(allCrown) 607 | for i in range(r): 608 | allPara[i][9]=compTime 609 | io.writeFile(allPara[i], allCrown[i],traitDict,int(options[10][1])) 610 | return 0 611 | 612 | if __name__ == '__main__': 613 | sys.exit(main()) 614 | -------------------------------------------------------------------------------- /Analysis.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Analysis.py 3 | 4 | The analysis module for DIRT. Most of the traits are computed here. 5 | 6 | The code is free for non-commercial use. 7 | Please contact the author for commercial use. 8 | 9 | Please cite the DIRT Paper if you use the code for your scientific project. 10 | 11 | Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology 12 | 13 | ------------------------------------------------------------------------------------------- 14 | Author: Alexander Bucksch 15 | School of Biology and Interactive computing 16 | Georgia Institute of Technology 17 | 18 | Mail: bucksch@gatech.edu 19 | Web: http://www.bucksch.nl 20 | ------------------------------------------------------------------------------------------- 21 | 22 | Copyright (c) 2014 Alexander Bucksch 23 | All rights reserved. 24 | 25 | Redistribution and use in source and binary forms, with or without 26 | modification, are permitted provided that the following conditions are 27 | met: 28 | 29 | * Redistributions of source code must retain the above copyright 30 | notice, this list of conditions and the following disclaimer. 31 | 32 | * Redistributions in binary form must reproduce the above 33 | copyright notice, this list of conditions and the following 34 | disclaimer in the documentation and/or other materials provided 35 | with the distribution. 36 | 37 | * Neither the name of the DIRT Developers nor the names of its 38 | contributors may be used to endorse or promote products derived 39 | from this software without specific prior written permission. 40 | 41 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 42 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 43 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 44 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 45 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 46 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 47 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 48 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 49 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 50 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 51 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 52 | 53 | ''' 54 | 55 | ''' 56 | # internal library imports 57 | ''' 58 | import kmeans as km 59 | import ransac 60 | ''' 61 | # external library imports 62 | ''' 63 | import numpy as np 64 | import warnings 65 | warnings.simplefilter('ignore', np.RankWarning) #suppress the warning 66 | from scipy import polyfit, polyval 67 | import scipy.stats 68 | import scipy.misc 69 | import scipy.interpolate 70 | from graph_tool.all import * 71 | import graph_tool.topology as gt 72 | 73 | class Analysis(object): 74 | ''' 75 | classdocs 76 | ''' 77 | 78 | 79 | def __init__(self,io,scale): 80 | ''' 81 | Constructor 82 | ''' 83 | self.__io = io 84 | self.__id=io.getID() 85 | self.__currentIdx=io.getCurrentID() 86 | self.__scale=scale 87 | 88 | 89 | def findHistoPeaks(self,ang): 90 | try: 91 | pdf, _ =np.histogram(ang,bins=9, range=(0, 90)) 92 | maximum=[] 93 | for i in range(0,len(pdf)-1): 94 | if pdf[i-1] <= pdf[i] or i==0: 95 | if pdf[i+1] <= pdf[i] or i==len(pdf)-1: 96 | maximum.append(i) 97 | 98 | pdfSort=np.argsort(pdf) 99 | strongestMax = [] 100 | for i in pdfSort[::-1]: 101 | if i in maximum: 102 | strongestMax.append(i) 103 | if len(strongestMax)==2: 104 | break 105 | avgAng=[] 106 | avgResult=[] 107 | for i in strongestMax: 108 | avgAng=[] 109 | for j in ang: 110 | if j >= i*10: 111 | if j <= (i+1)*10: 112 | avgAng.append(j) 113 | avgResult.append(np.mean(avgAng)) 114 | if (len(avgResult)>1): 115 | return np.max(avgResult),np.min(avgResult) 116 | else: 117 | return avgResult[0],-1 118 | 119 | except: 120 | print 'WARNING: no histo peaks found: Analysis.findHistoPeaks' 121 | print pdf 122 | return -1,-1 123 | 124 | def smooth(self,x,window_len=11,window='hanning'): 125 | """smooth the data using a window with requested size. 126 | 127 | This method is based on the convolution of a scaled window with the signal. 128 | The signal is prepared by introducing reflected copies of the signal 129 | (with the window size) in both ends so that transient parts are minimized 130 | in the begining and end part of the output signal. 131 | 132 | input: 133 | x: the input signal 134 | window_len: the dimension of the smoothing window; should be an odd integer 135 | window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' 136 | flat window will produce a moving average smoothing. 137 | 138 | output: 139 | the smoothed signal 140 | 141 | example: 142 | 143 | t=linspace(-2,2,0.1) 144 | x=sin(t)+randn(len(t))*0.1 145 | y=smooth(x) 146 | 147 | see also: 148 | 149 | numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve 150 | scipy.signal.lfilter 151 | 152 | TODO: the window parameter could be the window itself if an array instead of a string 153 | NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y. 154 | """ 155 | 156 | if x.ndim != 1: 157 | raise ValueError, "smooth only accepts 1 dimension arrays." 158 | 159 | if x.size < window_len: 160 | raise ValueError, "Input vector needs to be bigger than window size." 161 | 162 | 163 | if window_len<3: 164 | return x 165 | 166 | 167 | if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: 168 | raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" 169 | 170 | 171 | s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] 172 | #print(len(s)) 173 | if window == 'flat': #moving average 174 | w=np.ones(window_len,'d') 175 | else: 176 | w=eval('np.'+window+'(window_len)') 177 | 178 | y=np.convolve(w/w.sum(),s,mode='valid') 179 | return y 180 | 181 | def filterPathDiameters(self,path,G,scale): 182 | #remove diameters around branching points in range of the branching point diameter 183 | vprop=G.vertex_properties["vp"] 184 | for i in range(len(path)): 185 | count = 0 186 | for _ in path[i].out_neighbours(): 187 | count+=1 188 | if count >2: 189 | break 190 | if count>2: 191 | for j in range(int(vprop[path[i]]['diameter']/self.__scale)): 192 | if j >20: break 193 | if i-j >0: vprop[path[i-j]]['diameter']=0 194 | if i+j 0: 210 | for i in pathList: 211 | length = len(i) 212 | if len(i)>0: 213 | lengthArr.append(length) 214 | x.append(vprop[G.vertex(i[0])]['imgIdx'][0]) 215 | else: 216 | lengthArr.append(-1) 217 | x.append(-1) 218 | avgLength=np.average(lengthArr) 219 | 220 | print 'avg. Length: ' + str(avgLength) 221 | try: 222 | self.__io.saveArray(lengthArr,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_LengthHist') 223 | 224 | f2 = scipy.interpolate.interp1d(x, lengthArr, kind='cubic') 225 | self.__io.saveArray(x,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_LengthX') 226 | self.__io.saveArray(f2(x),self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_LengthY') 227 | 228 | except: 229 | pass 230 | print "Avg.Length Scale Nodalroot"+str(self.__scale) 231 | return avgLength*self.__scale 232 | 233 | def getLateralLengthRTP(self,RTP,img,counter=None): 234 | lengthArr=[] 235 | imgDebug=img.astype(np.uint8) 236 | for i in RTP: 237 | length = len(i) 238 | for j in i: 239 | imgDebug[j[1]][j[0]]=255 240 | lengthArr.append(length) 241 | scipy.misc.imsave(self.__io.getHomePath()+'Result/' +str(j[1])+' '+str(j[0])+ 'DebugThickP.png', imgDebug) 242 | self.__io.writeServerFile(self.__io.getHomePath(), 'dirt_out.csv',self.__io.getHomePath()+'Result/'+self.__io.getFileName()+'DebugThickP.png,' +str(self.__id[self.__currentIdx])+',0') 243 | avgLength=np.average(lengthArr) 244 | print 'avg. Length: ' + str(avgLength) 245 | 246 | self.__io.saveArray(lengthArr,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_LateralLengthHisto') 247 | self.__io.saveArray(range(0,len(lengthArr)),self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_LateralLengthX') 248 | self.__io.saveArray(lengthArr,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_LateralLengthY') 249 | 250 | def getSymmetry(self,rtps,G): 251 | vprop=G.vertex_properties["vp"] 252 | #calculate bounding box 253 | xMax=0 254 | xMin=5000000000 255 | yMax=0 256 | yMin=5000000000 257 | sumX=0 258 | sumY=0 259 | count=0.0 260 | for r in rtps: 261 | for i in r: 262 | v=G.vertex(i) 263 | if vprop[v]['coord'][0] > xMax: 264 | xMax=vprop[v]['coord'][0] 265 | if vprop[v]['coord'][0] < xMin: 266 | xMin=vprop[v]['coord'][0] 267 | if vprop[v]['coord'][1] > yMax: 268 | yMax=vprop[v]['coord'][1] 269 | if vprop[v]['coord'][1] < yMin: 270 | yMin=vprop[v]['coord'][1] 271 | sumX+=vprop[v]['coord'][0] 272 | sumY+=vprop[v]['coord'][1] 273 | count+=1.0 274 | if count>0: 275 | avgX=sumX/count 276 | avgY=sumY/count 277 | avgBoxX=(xMax-xMin)/2 278 | avgBoxY=(yMin-yMax)/2 279 | vecX=avgBoxX-avgX 280 | vecY=avgBoxY-avgY 281 | vecSym=[vecX,vecY] 282 | else: 283 | vecSym=[np.nan,np.nan] 284 | 285 | return vecSym 286 | 287 | def getLateralAngles(self,thickestPath,lat,corrBranchpts,G,counter=None): 288 | # calculates the angle between the hypocotyle and all rtps 289 | angles=[] 290 | tangents=self.filterRTPTangent(thickestPath,lat,corrBranchpts) 291 | for i in range(len(lat)): 292 | ang=self.getAngleBetweenPaths(tangents[i],lat[i],G) 293 | angles.append(ang) 294 | try: 295 | self.__io.saveArray(angles,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_AngleHisto') 296 | minAngle=np.min(angles) 297 | maxAngle=np.max(angles) 298 | angRange=maxAngle-minAngle 299 | avgAngle=np.average(angles) 300 | 301 | except: 302 | minAngle=-1 303 | maxAngle=-1 304 | angRange=-1 305 | avgAngle=-1 306 | return angRange,avgAngle,minAngle,maxAngle,angles 307 | 308 | def getBranchingfrequencyAlongSinglePath(self,rtps,path): 309 | bp=[] 310 | for i in rtps: 311 | if i[0]!=path[0]: 312 | bp.append(i[0]) 313 | 314 | bpUnique=np.unique(bp) 315 | try: 316 | branchFreqency=float(len(path))/float(len(bpUnique)) 317 | except: 318 | branchFreqency=-1 319 | print 'Branching Frequency in given unit: ' +str(branchFreqency*self.__scale) 320 | return branchFreqency*self.__scale 321 | 322 | def getDiametersAlongSinglePath(self,path,G,scale,counter=None): 323 | vprop=G.vertex_properties["vp"] 324 | 325 | x=[] 326 | y=[] 327 | 328 | length=0 329 | for i in path: 330 | length+=1 331 | if vprop[i]['diameter'] > 0: 332 | x.append(length) 333 | y.append(vprop[i]['diameter']) 334 | coeffs=polyfit(x,y,1) 335 | 336 | besty = polyval ( coeffs , x) 337 | 338 | self.__io.saveArray(x,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterX') 339 | self.__io.saveArray(y,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterY') 340 | 341 | avgDiameter=np.average(y) 342 | self.__io.saveArray(y,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterHisto') 343 | return avgDiameter,coeffs[0] 344 | 345 | def getDiameterQuantilesAlongSinglePath(self,path,G,counter=None): 346 | 347 | G=self.filterPathDiameters(path, G,self.__scale) 348 | x=[] 349 | y=[] 350 | length=0 351 | vprop=G.vertex_properties["vp"] 352 | for i in path: 353 | length+=1 354 | if vprop[i]['diameter'] > 0: 355 | x.append(length) 356 | y.append(vprop[i]['diameter']) 357 | coeffs=polyfit(x,y,1) 358 | 359 | besty = polyval ( coeffs , x) 360 | 361 | self.__io.saveArray(x,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterX') 362 | self.__io.saveArray(y,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterY') 363 | 364 | l=len(y)-1 365 | l25=int(l*0.25) 366 | l50=int(l*0.5) 367 | l75=int(l*0.75) 368 | l90=int(l*0.90) 369 | 370 | d25=np.average(y[:l25]) 371 | d50=np.average(y[l25:l50]) 372 | d75=np.average(y[l50:l75]) 373 | d90=np.average(y[l90:]) 374 | 375 | 376 | self.__io.saveArray(y,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_DiameterHistoTP') 377 | 378 | return d25,d50,d75,d90 379 | 380 | def getWidthOverHeight(self,img2,xScale,yScale): 381 | # We compute here all mask based traits at once (long lives Spagetti code :-) ) 382 | if len(img2) > 0: 383 | print 'IMG OK' 384 | else: 385 | print 'IMAGE IS NOT ACCESSIBLE' 386 | return 'nan','nan','nan','nan','nan','nan','nan','nan',['nan']*9,['nan']*9,'nan' 387 | h, w = np.shape(img2) 388 | xx=[] 389 | yy=[] 390 | blackArr=[] 391 | sizeCount=0 392 | densityArray=[] 393 | #compute density value 394 | for i in range(h): 395 | white=0 396 | black=1 397 | start = 0 398 | end = 0 399 | x = i / w 400 | 401 | idx = np.where(img2[i]>0) 402 | try: 403 | start=idx[0][0] 404 | white=len(idx[0]) 405 | end=idx[0][len(idx[0])-1] 406 | black=(end-start)-white 407 | sizeCount+=white 408 | width=float(end-start) 409 | xx.append(float(i)*yScale) 410 | if black==0: black=1 411 | blackArr.append(black) 412 | yy.append(float(width)*xScale) 413 | 414 | normWhite=1 415 | normBlack=1 416 | 417 | if w > 0: 418 | normWhite=float(white)/float(w) 419 | if normWhite==1.: normWhite=0. 420 | if black >0: normBlack=float(black)/float(w) 421 | else: normBlack=1. 422 | 423 | if normWhite>0.: densityArray.append(float(normWhite)/float(normBlack)) 424 | except: 425 | densityArray.append(float(0.0)) 426 | print 'empty image line in crown file -> placed 0. as density for this line' 427 | pass 428 | 429 | rootDensity=np.average(densityArray) 430 | print 'Avg. Root density: ' + str(rootDensity) 431 | 432 | ysmooth = yy 433 | smoothRegion=15 434 | xxNorm=np.array(xx)/np.max(xx) 435 | tenPercent=float(len(yy))*0.1 436 | # retrieve stem diameter as the average if the distance field in the first 10% 437 | try: 438 | stemDia=np.median(xx[20:int(tenPercent)]) 439 | except: 440 | stemDia=-1 441 | # compute a simple angle at top and bottom along the outline for monocots (note this is more noisy than the D10 or D20 values that are more robust) 442 | try: 443 | angleSimple=self.getAngleToXAxXY(xx[int(tenPercent):int(tenPercent)*3], yy[int(tenPercent):int(tenPercent)*3], ransacFitting=True) 444 | angleSimpleBottom=self.getAngleToXAxXY(xx[int(tenPercent)*3:int(tenPercent)*9], yy[int(tenPercent)*3:int(tenPercent)*9], ransacFitting=True) 445 | except: 446 | angleSimple=-1 447 | angleSimpleBottom=-1 448 | print 'Stem Diameter: '+str(stemDia) 449 | print 'Root Top Angle: '+str(angleSimple) 450 | print 'Root Bottom Angle: '+str(angleSimpleBottom) 451 | 452 | #smooth the noisy data 453 | for i in range(smoothRegion,len(yy)-smoothRegion): 454 | tmp=[] 455 | for j in range(smoothRegion): 456 | tmp.append(yy[i+j]) 457 | tmp.append(yy[i-j]) 458 | ysmooth[i]=np.median(tmp) 459 | self.__io.saveArray(xxNorm,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_HeightWidthX') 460 | self.__io.saveArray(ysmooth,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_HeightWidthY') 461 | 462 | # compute the width parameters of the root 463 | medianWidth=np.median(ysmooth) 464 | maxWidth=np.max(ysmooth) 465 | print 'Median. Root Width: ' + str(medianWidth) 466 | print 'Max. Root Width: ' + str(maxWidth) 467 | #compute the cummulative width profile 468 | ysmoothCS=np.array(ysmooth).cumsum() 469 | ysmoothCS=ysmoothCS/np.max(ysmoothCS) 470 | 471 | #find index at x% width accumulation 472 | D=[] 473 | 474 | dD=0.1 475 | count=0.0 476 | for i in ysmoothCS: 477 | count+=1.0 478 | if i>dD: 479 | D.append(count) 480 | dD+=0.1 481 | if dD==1.0: 482 | break 483 | while len(D)<9: 484 | D.append(-1) 485 | 486 | 487 | # for drawing we need all DS valuse ofver the curve 488 | #Dslope=self.filterCDFTAngentSlope(xxNorm,ysmoothCS, range(len(ysmoothCS)),200) 489 | 490 | #save the data for plotting 491 | self.__io.saveArray(xxNorm,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_HeightWidthCSX') 492 | self.__io.saveArray(ysmoothCS,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_HeightWidthCSY') 493 | #self.__io.saveArray(Dslope,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_HeightWidthDSX') 494 | #self.__io.saveArray(ysmoothCS,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_HeightWidthDSY') 495 | 496 | # for the final output we just need the 10 reveant slopes 497 | # Note, we adjusted the window size to 100. In the paper we used 20 498 | print '****** filterCDFTAngentSlope Input **********' 499 | print xxNorm 500 | print ysmoothCS 501 | print D[:len(D)-1] 502 | 503 | Dslope=self.filterCDFTAngentSlope(xxNorm,ysmoothCS, D[:len(D)-1],100) 504 | #convert D array counts to percentages saved in the output file 505 | D=np.array(D,dtype=float)/float(len(ysmoothCS)) 506 | return rootDensity,medianWidth,maxWidth,D[0],D[1],D[2],D[3],D[4],D[5],D[6],D[7],D[8],Dslope[0],Dslope[1],Dslope[2],Dslope[3],Dslope[4],Dslope[5],Dslope[6],Dslope[7],Dslope[8],sizeCount*(xScale*yScale),stemDia,angleSimple,angleSimpleBottom 507 | 508 | def getLengthOfPath(self, path): 509 | length=len(path)*self.__scale 510 | return length 511 | 512 | def calculateAngleAtDist(self,thickestPath,lat,corrBranchpts,scale,G,atDist=20,counter=None): 513 | # calculates the angle between the hypocotyle and all rtps 514 | 515 | angelsAtDist=[] 516 | 517 | pxDist=int(atDist/scale) 518 | for i in range(len(lat)): 519 | if len(lat[i])>pxDist and G.vertex(corrBranchpts[i]) in thickestPath: 520 | angAtDist=self.getAngleToXAx(G,lat[i][:pxDist]) 521 | angelsAtDist.append(angAtDist) 522 | 523 | self.__io.saveArray(angelsAtDist,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_AngleHistoAtDist') 524 | return np.mean(angelsAtDist) 525 | 526 | def calculateAngleQuantiles(self,thickestPath,lat,corrBranchpts,G,counter=None): 527 | 528 | angles25=[] 529 | angles50=[] 530 | angles75=[] 531 | angles90=[] 532 | 533 | for i in range(len(lat)): 534 | l=len(lat[i]) 535 | l25=int(l*0.25) 536 | l50=int(l*0.5) 537 | l75=int(l*0.75) 538 | l90=int(l*0.90) 539 | 540 | ang25=self.getAngleToXAx(G,lat[i][:l25]) 541 | ang50=self.getAngleToXAx(G,lat[i][:l50]) 542 | ang75=self.getAngleToXAx(G,lat[i][:l75]) 543 | ang90=self.getAngleToXAx(G,lat[i][:l90]) 544 | angles25.append(ang25) 545 | angles50.append(ang50) 546 | angles75.append(ang75) 547 | angles90.append(ang90) 548 | 549 | self.__io.saveArray(angles25,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_AngleHisto25') 550 | self.__io.saveArray(angles50,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_AngleHisto50') 551 | self.__io.saveArray(angles75,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_AngleHisto75') 552 | self.__io.saveArray(angles90,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_AngleHisto90') 553 | 554 | return angles25,angles50,angles75,angles90 555 | 556 | def calculateAngles(self,thickestPath,lat,corrBranchpts,G,counter=None): 557 | # calculates the angle between the hypocotyle and all rtps 558 | angles=[] 559 | #tangents=self.filterRTPTangent(thickestPath,lat,corrBranchpts,window=45) 560 | #RTP=self.filterRTP(RTP,thickestPath,skel) 561 | for i in range(len(lat)): 562 | ang=self.getAngleToXAx(G,lat[i]) 563 | angles.append(ang) 564 | self.__io.saveArray(angles,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_AngleHistoN') 565 | # f=p.figure() 566 | # ax = f.add_subplot(111) 567 | # p.hist(angles,bins=9, range=(0, 90)) 568 | # p.title('Histogram of angles between hypocotyle and roots') 569 | # p.xlabel('angles in degree') 570 | # p.ylabel('# of angles') 571 | minAngle=np.min(angles) 572 | maxAngle=np.max(angles) 573 | angRange=maxAngle-minAngle 574 | avgAngle=np.median(angles) 575 | # p.text(0.3, 0.9,'angular range: '+str(angRange), ha='center', va='center', transform=ax.transAxes) 576 | # p.text(0.3, 0.85,' median angle: '+str(avgAngle), ha='center', va='center', transform=ax.transAxes) 577 | # p.savefig(self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_AngleHisto.png') 578 | # self.__io.writeServerFile('./', 'dirt_out.csv',self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_AngleHisto.png,' +str(self.__id[self.__currentIdx])+',1') 579 | # p.clf() 580 | return avgAngle,minAngle,maxAngle,angRange,angles 581 | 582 | def filterRTP(self,RTP,thickestpath,skel): 583 | RTPnew =[] 584 | for i in range(len(RTP)): 585 | RTPnew.append(frozenset(RTP[i]).difference(frozenset(thickestpath))) 586 | 587 | diameter=skel.node[i[0]]['diameter'] 588 | 589 | for j in range(int(diameter)): 590 | i.pop(j) 591 | 592 | return RTPnew 593 | 594 | def filterCDFTAngentSlope(self,x,CDF,idx, window=20): 595 | #We estimate the slope at a point over a small region along the CDF 596 | tangents=[] 597 | for i in idx: 598 | tmpTangentX=[] 599 | tmpTangentY=[] 600 | for j in range(window): 601 | print 'appendices' 602 | print x[int(i)-j],CDF[int(i)-j] 603 | if jcY[1][0]: 682 | return cX[0],cY[0],cX[1],cY[1] 683 | else: 684 | return cX[1],cY[1],cX[0],cY[0] 685 | if nrOfClusters ==3: 686 | if cY[0][0]>cY[1][0] and cY[1][0]>cY[2][0]: 687 | return cX[0],cY[0],cX[1],cY[1],cX[2],cY[2] 688 | if cY[0][0]>cY[1][0] and cY[1][0] 0: #just a little treshhold to get rid of noise 733 | nrOfAdvRoots+=1 734 | for idx,i in enumerate(cBas): 735 | if cBas[idx-1]-i > 0: #just a little treshhold to get rid of noise 736 | nrOfBasRoots+=1 737 | 738 | return nrOfAdvRoots,nrOfBasRoots,firstSegmentRoots,secondSegmentRoots, np.mean(cDiaAdv),np.mean(cDiaBas) 739 | 740 | def RTPsOverDepth(self, centralPath, rtpSkel): 741 | 742 | vprop=rtpSkel.vertex_properties['vp'] 743 | depth=[len(centralPath)] 744 | nrOfP=[len(centralPath)] 745 | fiftyPercentRtp=vprop[centralPath[0]]['nrOfPaths']/2 746 | fiftyPercentDrop=0 747 | for i in centralPath: 748 | nrOfP.append(vprop[i]['nrOfPaths']) 749 | depth.append(vprop[i]['coord'][1]) 750 | if vprop[i]['nrOfPaths']<=fiftyPercentRtp: 751 | fiftyPercentDrop=vprop[i]['coord'][1] 752 | 753 | 754 | self.__io.saveArray(nrOfP,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_RTPDepthX') 755 | self.__io.saveArray(depth,self.__io.getHomePath()+'Plots/'+self.__io.getFileName()+'_RTPDepthY') 756 | 757 | return fiftyPercentDrop 758 | def anglesPerClusterAtDist(self, cAdv, cBas, rtpSkel, path, lat,corrBranchpts, scale, dist=20): 759 | # estimating the angles per cluster leads to distinguishing adventious roots from basal roots. 760 | 761 | vprop=rtpSkel.vertex_properties["vp"] 762 | minPathsAdv=np.min(cAdv) 763 | for idx,i in enumerate(path): 764 | if minPathsAdv>vprop[i]['nrOfPaths']: 765 | meanAdv=self.calculateAngleAtDist(path[:idx],lat,corrBranchpts,scale,rtpSkel,dist,None) 766 | meanBas=self.calculateAngleAtDist(path[idx:],lat,corrBranchpts,scale,rtpSkel,dist,None) 767 | break 768 | print 'Adv and Bas Angle at 2cm:'+str(meanAdv)+' , '+str(meanBas) 769 | return meanAdv,meanBas 770 | 771 | -------------------------------------------------------------------------------- /Segmentation.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Segmentation.py 3 | 4 | The Segmentation module for DIRT. We perform connected component labeling and construct the medial axis graph here. 5 | 6 | The code is free for non-commercial use. 7 | Please contact the author for commercial use. 8 | 9 | Please cite the DIRT Paper if you use the code for your scientific project. 10 | 11 | Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology 12 | 13 | ------------------------------------------------------------------------------------------- 14 | Author: Alexander Bucksch 15 | School of Biology and Interactive computing 16 | Georgia Institute of Technology 17 | 18 | Mail: bucksch@gatech.edu 19 | Web: http://www.bucksch.nl 20 | ------------------------------------------------------------------------------------------- 21 | 22 | Copyright (c) 2014 Alexander Bucksch 23 | All rights reserved. 24 | 25 | Redistribution and use in source and binary forms, with or without 26 | modification, are permitted provided that the following conditions are 27 | met: 28 | 29 | * Redistributions of source code must retain the above copyright 30 | notice, this list of conditions and the following disclaimer. 31 | 32 | * Redistributions in binary form must reproduce the above 33 | copyright notice, this list of conditions and the following 34 | disclaimer in the documentation and/or other materials provided 35 | with the distribution. 36 | 37 | * Neither the name of the DIRT Developers nor the names of its 38 | contributors may be used to endorse or promote products derived 39 | from this software without specific prior written permission. 40 | 41 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 42 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 43 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 44 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 45 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 46 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 47 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 48 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 49 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 50 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 51 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 52 | ''' 53 | 54 | ''' 55 | # external library imports 56 | ''' 57 | import numpy as np 58 | from scipy import ndimage 59 | import graph_tool.topology as gt 60 | import graph_tool.util as gu 61 | from graph_tool import Graph 62 | import mahotas as m 63 | 64 | ''' 65 | # standard python import 66 | ''' 67 | import time 68 | 69 | class Segmentation(object): 70 | ''' 71 | classdocs 72 | ''' 73 | def __init__(self,img,io=0,tips=[],): 74 | ''' 75 | Constructor 76 | ''' 77 | self.__idIdx=io.getCurrentID() 78 | self.__img = img 79 | self.__io = io 80 | self.__id = io.getID() 81 | self.__height, self.__width = np.shape(self.__img) 82 | self.__tips=tips 83 | self.__fail=False 84 | def getFail(self): 85 | return self.__fail 86 | def setTips(self,tips): 87 | ''' 88 | BAD HACK. DO IT CLEAN IN THE REFACTORED VERSION 89 | ''' 90 | self.__tips=tips 91 | 92 | def smooth(self,x,window_len=11,window='hanning'): 93 | """smooth the data using a window with requested size. 94 | 95 | This method is based on the convolution of a scaled window with the signal. 96 | The signal is prepared by introducing reflected copies of the signal 97 | (with the window size) in both ends so that transient parts are minimized 98 | in the begining and end part of the output signal. 99 | 100 | input: 101 | x: the input signal 102 | window_len: the dimension of the smoothing window; should be an odd integer 103 | window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' 104 | flat window will produce a moving average smoothing. 105 | 106 | output: 107 | the smoothed signal 108 | 109 | example: 110 | 111 | t=linspace(-2,2,0.1) 112 | x=sin(t)+randn(len(t))*0.1 113 | y=smooth(x) 114 | 115 | see also: 116 | 117 | numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve 118 | scipy.signal.lfilter 119 | 120 | TODO: the window parameter could be the window itself if an array instead of a string 121 | NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y. 122 | """ 123 | 124 | if x.ndim != 1: 125 | raise ValueError, "smooth only accepts 1 dimension arrays." 126 | 127 | if x.size < window_len: 128 | raise ValueError, "Input vector needs to be bigger than window size." 129 | 130 | 131 | if window_len<3: 132 | return x 133 | 134 | 135 | if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: 136 | raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" 137 | 138 | 139 | s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] 140 | if window == 'flat': #moving average 141 | w=np.ones(window_len,'d') 142 | else: 143 | w=eval('np.'+window+'(window_len)') 144 | 145 | y=np.convolve(w/w.sum(),s,mode='valid') 146 | return y 147 | 148 | def label(self, onlyOne=True): 149 | 150 | labeled, nr_objects = ndimage.label(self.__img) 151 | print 'Number of components: ' + str(nr_objects) 152 | #if nr_objects>2: return None 153 | if nr_objects==0: return None 154 | val=labeled.flatten() 155 | hist = [] 156 | hist+=range(np.max(val) + 1) 157 | test, _ = np.histogram(val, hist) 158 | comp1 = np.max(test) 159 | idx1 = list(test).index(comp1) 160 | 161 | 162 | if nr_objects>1: 163 | test[idx1] = 0 164 | comp2 = np.max(test) 165 | idx2 = list(test).index(comp2) 166 | test[idx2] = 0 167 | else: 168 | idx2=1 169 | 170 | idx = np.where(labeled==idx2) 171 | #bounding box 172 | iMin=np.min(idx[0]) 173 | jMin=np.min(idx[1]) 174 | iMax=np.max(idx[0]) 175 | jMax=np.max(idx[1]) 176 | 177 | return labeled[iMin:iMax, jMin:jMax] #just return the cropped image of the largest component 178 | 179 | 180 | def labelAll(self): 181 | labeled, nr_objects = ndimage.label(self.__img) 182 | return labeled, nr_objects 183 | 184 | def findCircle(self,hist, labled): 185 | compsX = [] 186 | compsY = [] 187 | for i in range(len(hist) + 1): 188 | compsX.append([]) 189 | compsY.append([]) 190 | h, w = np.shape(labled) 191 | for i in range(w): 192 | for j in range(h): 193 | compsX[labled[j][i]].append(j) 194 | compsY[labled[j][i]].append(i) 195 | ratio = [] 196 | for i in range(len(compsX)): 197 | xMin = np.min(compsX[i]) 198 | xMax = np.max(compsX[i]) 199 | yMin = np.min(compsY[i]) 200 | yMax = np.max(compsY[i]) 201 | if yMax - yMin > w/200: 202 | if yMax - yMin < w/10: 203 | if ratio < 0.2: 204 | ratio.append((float(xMax) - float(xMin)) / (float(yMax) - float(yMin))) 205 | else: 206 | ratio.append(-1) 207 | else: 208 | ratio.append(-1) 209 | 210 | circleRatio = 1 211 | circleIdx = 0 212 | for i in range(len(ratio)): 213 | if ratio[i] >= 0: 214 | if np.abs(1 - ratio[i]) < circleRatio: 215 | circleRatio = np.abs(1 - ratio[i]) 216 | circleIdx = i 217 | xMin = np.min(compsX[circleIdx]) 218 | xMax = np.max(compsX[circleIdx]) 219 | yMin = np.min(compsY[circleIdx]) 220 | yMax = np.max(compsY[circleIdx]) 221 | 222 | return circleIdx, circleRatio, float(xMax) - float(xMin), float(yMax) - float(yMin) 223 | 224 | def findThickestPath(self,skelImg,skelDia,xScale,yScale): 225 | print 'create skeleton graph' 226 | skelGraph,skelSize=self.makeGraphFast(skelImg,skelDia,xScale,yScale) 227 | rootVertex,_=self.findRootVertex(skelGraph) 228 | epropW=skelGraph.edge_properties["w"] 229 | 230 | maxDia=np.max(skelDia) 231 | try: 232 | diaIdx=int(len(skelDia)*0.1) 233 | except: 234 | print "Error line 234 in Segmentation.py" 235 | diaIdx=1 236 | maxDia10=np.max(skelDia[0:diaIdx]) 237 | print 'max Diameter: '+ str(maxDia) 238 | path=[] 239 | #remove all two-connected ones with 0 label 240 | print 'trace path of thickest diameter' 241 | #find thickest path 242 | pathDetect=True 243 | if skelGraph.num_vertices() >0: 244 | 245 | pathDetect=True 246 | while pathDetect==True: 247 | lastVertex=self.findLastRootVertex(skelGraph) 248 | try: 249 | path,_=gt.shortest_path(skelGraph, rootVertex, lastVertex , weights=epropW, pred_map=None) 250 | pathDetect=False 251 | except: 252 | raise 253 | if lastVertex <=0: 254 | pathDetect=False 255 | else: 256 | skelGraph.remove_vertex(lastVertex) 257 | lastVertex=self.findLastRootVertex(skelGraph) 258 | 259 | return path,skelGraph,maxDia10,skelSize 260 | 261 | def findThickestPathLateral(self,skelImg,skelDia,xScale,yScale): 262 | print 'create skeleton graph' 263 | skelGraph,_=self.makeGraphFast(skelImg,skelDia,xScale,yScale) 264 | rootVertex=self.findRootVertexLateral(skelGraph) 265 | epropW=skelGraph.edge_properties["w"] 266 | 267 | path=[] 268 | #remove all two-connected ones with 0 label 269 | print 'trace path of thickest diameter' 270 | #find thickest path 271 | pathDetect=True 272 | if skelGraph.num_vertices() >0: 273 | pathDetect=True 274 | while pathDetect==True: 275 | lastVertex=self.findLastRootVertex(skelGraph) 276 | try: 277 | path,_=gt.shortest_path(skelGraph, rootVertex, lastVertex , weights=epropW, pred_map=None) 278 | pathDetect=False 279 | except: 280 | raise 281 | if lastVertex <=0: 282 | pathDetect=False 283 | else: 284 | skelGraph.remove_vertex(lastVertex) 285 | lastVertex=self.findLastRootVertex(skelGraph) 286 | 287 | return path,skelGraph 288 | 289 | def makeGraphFast(self,img,dia,xScale,yScale): 290 | print('Building Graph Data Structure'), 291 | start=time.time() 292 | G = Graph(directed=False) 293 | sumAddVertices=0 294 | 295 | vprop=G.new_vertex_property('object') 296 | eprop=G.new_edge_property('object') 297 | epropW=G.new_edge_property("float") 298 | h, w = np.shape(img) 299 | if xScale>0 and yScale>0: avgScale=(xScale+yScale)/2 300 | else: 301 | avgScale=1. 302 | xScale=1. 303 | yScale=1. 304 | addedVerticesLine2=[] 305 | vListLine2=[] 306 | percentOld=0 307 | counter=0 308 | ''' 309 | Sweep over each line in the image except the last line 310 | ''' 311 | for idx,i in enumerate(img[:len(img)-2]): 312 | ''' 313 | Get foreground indices in the current line of the image and make vertices 314 | ''' 315 | counter+=1 316 | percent=(float(counter)/float(h))*100 317 | if percentOld+10< percent: 318 | print (str(np.round(percent,1))+'% '), 319 | percentOld=percent 320 | 321 | line1=np.where(i==True) 322 | if len(line1[0])>0: 323 | line1=set(line1[0]).difference(set(addedVerticesLine2)) 324 | vL=G.add_vertex(len(list(line1))) 325 | 326 | 327 | if len(line1)>1 : 328 | vList=vListLine2+list(vL) 329 | else: vList=vListLine2+[vL] 330 | line1=addedVerticesLine2+list(line1) 331 | for jdx,j in enumerate(line1): 332 | vprop[vList[jdx]]={'imgIdx':(j,idx),'coord': (float(j)*xScale,float(idx)*yScale), 'nrOfPaths':0, 'diameter':float(dia[idx][j])*avgScale} 333 | ''' 334 | keep order of the inserted vertices 335 | ''' 336 | sumAddVertices+=len(line1) 337 | 338 | addedVerticesLine2=[] 339 | vListLine2=[] 340 | ''' 341 | Connect foreground indices to neighbours in the next line 342 | ''' 343 | for v1 in line1: 344 | va=vList[line1.index(v1)] 345 | diagonalLeft = diagonalRight = True 346 | try: 347 | if img[idx][v1-1]==True: 348 | diagonalLeft=False 349 | vb=vList[line1.index(v1-1)] 350 | e=G.add_edge(va,vb) 351 | eprop[e]={'coord1':vprop[va]['coord'], 'coord2':vprop[vb]['coord'],'weight':((vprop[va]['diameter']+vprop[vb]['diameter'])/2),'RTP':False} 352 | epropW[e]=2./(eprop[e]['weight']**2) 353 | except: 354 | print 'Boundary vertex at: '+str([v1,idx-1])+' image size: '+ str([w,h]) 355 | pass 356 | 357 | try: 358 | if img[idx][v1+1]==True: 359 | diagonalRight=False 360 | vb=vList[line1.index(v1+1)] 361 | e=G.add_edge(va,vb) 362 | eprop[e]={'coord1':vprop[va]['coord'], 'coord2':vprop[vb]['coord'],'weight':((vprop[va]['diameter']+vprop[vb]['diameter'])/2),'RTP':False} 363 | epropW[e]=2./(eprop[e]['weight']**2) 364 | except: 365 | print 'Boundary vertex at: '+str([v1+1,idx])+' image size: '+ str([w,h]) 366 | pass # just if we are out of bounds 367 | 368 | try: 369 | if img[idx+1][v1]==True: 370 | diagonalRight=False 371 | diagonalLeft=False 372 | vNew=G.add_vertex() 373 | vprop[vNew]={'imgIdx':(v1,idx+1),'coord': (float(v1)*xScale,float(idx+1)*yScale), 'nrOfPaths':0, 'diameter':float(dia[idx+1][v1])*avgScale} 374 | vListLine2.append(vNew) 375 | e=G.add_edge(vList[line1.index(v1)],vNew) 376 | eprop[e]={'coord1':vprop[va]['coord'], 'coord2':vprop[vNew]['coord'],'weight':((vprop[va]['diameter']+vprop[vNew]['diameter'])/2),'RTP':False} 377 | epropW[e]=1./(eprop[e]['weight']**2) 378 | if v1 not in addedVerticesLine2: addedVerticesLine2.append(v1) 379 | except: 380 | print 'Boundary vertex at: '+str([v1,idx+1])+' image size: '+ str([w,h]) 381 | pass 382 | 383 | try: 384 | if diagonalRight == True and img[idx+1][v1+1]==True: 385 | vNew=G.add_vertex() 386 | vprop[vNew]={'imgIdx':(v1+1,idx+1),'coord': (float(v1+1)*xScale,float(idx+1)*yScale), 'nrOfPaths':0, 'diameter':float(dia[idx+1][v1+1])*avgScale} 387 | vListLine2.append(vNew) 388 | e=G.add_edge(vList[line1.index(v1)],vNew) 389 | eprop[e]={'coord1':vprop[va]['coord'], 'coord2':vprop[vNew]['coord'],'weight':((vprop[va]['diameter']+vprop[vNew]['diameter'])/2),'RTP':False} 390 | epropW[e]=1.41/(eprop[e]['weight']**2) 391 | if v1+1 not in addedVerticesLine2: addedVerticesLine2.append(v1+1) 392 | except: 393 | print 'Boundary vertex at: '+str([v1+1,idx+1])+' image size: '+ str([w,h]) 394 | pass 395 | 396 | try: 397 | if diagonalLeft == True and img[idx+1][v1-1]==True: 398 | vNew=G.add_vertex() 399 | vprop[vNew]={'imgIdx':(v1-1,idx+1),'coord': (float(v1-1)*xScale,float(idx+1)*yScale), 'nrOfPaths':0, 'diameter':float(dia[idx+1][v1-1])*avgScale} 400 | vListLine2.append(vNew) 401 | e=G.add_edge(vList[line1.index(v1)],vNew) 402 | eprop[e]={'coord1':vprop[va]['coord'], 'coord2':vprop[vNew]['coord'],'weight':((vprop[va]['diameter']+vprop[vNew]['diameter'])/2),'RTP':False} 403 | epropW[e]=1.41/(eprop[e]['weight']**2) 404 | if v1-1 not in addedVerticesLine2: addedVerticesLine2.append(v1-1) 405 | except: 406 | print 'Boundary vertex at: '+str([v1-1,idx+1])+' image size: '+ str([w,h]) 407 | pass 408 | try: 409 | if img[idx][v1+1]==False and img[idx][v1-1]==False and img[idx+1][v1]==False and diagonalLeft==False and diagonalRight==False: 410 | print 'tip detected' 411 | if img[idx-1][v1-1]==False and img[idx-1][v1+1]==False and img[idx-1][v1]==False: 412 | print 'floating pixel' 413 | except: 414 | pass 415 | 416 | print'done!' 417 | G.edge_properties["ep"] = eprop 418 | G.edge_properties["w"] = epropW 419 | G.vertex_properties["vp"] = vprop 420 | print 'graph build in '+str(time.time()-start) 421 | l = gt.label_largest_component(G) 422 | u = gt.GraphView(G, vfilt=l) 423 | print '# vertices' 424 | print(u.num_vertices()) 425 | print(G.num_vertices()) 426 | if u.num_vertices()!=G.num_vertices(): self.__fail=float((G.num_vertices()-u.num_vertices()))/float(G.num_vertices()) 427 | return u,u.num_vertices() 428 | 429 | def makeGraph(self,img,dia,xScale,yScale): 430 | print 'Building Graph Data Structure' 431 | start=time.time() 432 | G = Graph(directed=False) 433 | vprop=G.new_vertex_property('object') 434 | eprop=G.new_edge_property('object') 435 | epropW=G.new_edge_property("int32_t") 436 | avgScale=(xScale+yScale)/2 437 | 438 | test=np.where(img==True) 439 | ss = np.shape(test) 440 | cccc=0 441 | percentOld=0.0 442 | print str(np.round(percentOld,1))+'%' 443 | for (i,j) in zip(test[1],test[0]): 444 | cccc+=1 445 | percent=(float(cccc)/float(ss[1]))*100 446 | if percentOld+10< percent: 447 | print str(np.round(percent,1))+'%' 448 | percentOld=percent 449 | nodeNumber1 = (float(i)*yScale,float(j)*xScale) 450 | if gu.find_vertex(G, vprop, {'imgIdx':(j,i),'coord':nodeNumber1, 'nrOfPaths':0, 'diameter':float(dia[j][i])*avgScale}): 451 | v1=gu.find_vertex(G, vprop, {'imgIdx':(j,i),'coord':nodeNumber1, 'nrOfPaths':0, 'diameter':float(dia[j][i])*avgScale})[0] 452 | else: 453 | v1=G.add_vertex() 454 | vprop[G.vertex(v1)]={'imgIdx':(j,i),'coord':nodeNumber1, 'nrOfPaths':0, 'diameter':float(dia[j][i])*avgScale} 455 | try: 456 | 457 | if img[j,i+1] == True: 458 | nodeNumber2 = (float(i+1)*yScale,float(j)*xScale) 459 | if gu.find_vertex(G, vprop, {'imgIdx':(j,i+1),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j][i+1])*avgScale}): 460 | v2=gu.find_vertex(G, vprop, {'imgIdx':(j,i+1),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j][i+1])*avgScale})[0] 461 | if gu.find_edge(G, eprop, {'coord1':vprop[v2]['coord'], 'coord2':vprop[v1]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False}): 462 | pass 463 | else: 464 | e = G.add_edge(v1, v2) 465 | epropW[e]=(((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)/avgScale)**4 466 | eprop[e]={'coord1':vprop[v1]['coord'], 'coord2':vprop[v2]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False} 467 | else: 468 | v2=G.add_vertex() 469 | vprop[G.vertex(v2)]={'imgIdx':(j,i+1),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j][i+1])*avgScale} 470 | e = G.add_edge(v1, v2) 471 | epropW[e]=(((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)/avgScale)**4 472 | eprop[e]={'coord1':vprop[v1]['coord'], 'coord2':vprop[v2]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False} 473 | except: 474 | pass 475 | try: 476 | if img[j,i-1] == True: 477 | nodeNumber2 = (float(i-1)*yScale,float(j)*xScale) 478 | if gu.find_vertex(G, vprop, {'imgIdx':(j,i-1),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j][i-1])*avgScale}): 479 | v2=gu.find_vertex(G, vprop, {'imgIdx':(j,i-1),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j][i-1])*avgScale})[0] 480 | if gu.find_edge(G, eprop, {'coord1':vprop[v2]['coord'], 'coord2':vprop[v1]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False}): 481 | pass 482 | else: 483 | e = G.add_edge(v1, v2) 484 | epropW[e]=(((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)/avgScale)**4 485 | eprop[e]={'coord1':vprop[v1]['coord'], 'coord2':vprop[v2]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False} 486 | else: 487 | v2=G.add_vertex() 488 | vprop[G.vertex(v2)]={'imgIdx':(j,i-1),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j][i-1])*avgScale} 489 | e = G.add_edge(v1, v2) 490 | epropW[e]=(((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)/avgScale)**4 491 | eprop[e]={'coord1':vprop[v1]['coord'], 'coord2':vprop[v2]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False} 492 | except:pass 493 | try: 494 | if img[j + 1,i] == True: 495 | nodeNumber2 = (float(i)*yScale,float(j+1)*xScale) 496 | if gu.find_vertex(G, vprop, {'imgIdx':(j+1,i),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j+1][i])*avgScale}): 497 | v2=gu.find_vertex(G, vprop, {'imgIdx':(j+1,i),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j+1][i])*avgScale})[0] 498 | if gu.find_edge(G, eprop, {'coord1':vprop[v2]['coord'], 'coord2':vprop[v1]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False}): 499 | pass 500 | else: 501 | e = G.add_edge(v1, v2) 502 | epropW[e]=(((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)/avgScale)**4 503 | eprop[e]={'coord1':vprop[v1]['coord'], 'coord2':vprop[v2]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False} 504 | else: 505 | v2=G.add_vertex() 506 | vprop[G.vertex(v2)]={'imgIdx':(j+1,i),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j+1][i])*avgScale} 507 | e = G.add_edge(v1, v2) 508 | epropW[e]=(((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)/avgScale)**4 509 | eprop[e]={'coord1':vprop[v1]['coord'], 'coord2':vprop[v2]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False} 510 | except:pass 511 | try: 512 | if img[j - 1,i] == True: 513 | nodeNumber2 = (float(i)*yScale,float(j-1)*xScale) 514 | if gu.find_vertex(G, vprop, {'imgIdx':(j-1,i),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j-1][i])*avgScale}): 515 | v2=gu.find_vertex(G, vprop, {'imgIdx':(j-1,i),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j-1][i])*avgScale})[0] 516 | if gu.find_edge(G, eprop, {'coord1':vprop[v2]['coord'], 'coord2':vprop[v1]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False}): 517 | pass 518 | else: 519 | e = G.add_edge(v1, v2) 520 | epropW[e]=(((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)/avgScale)**4 521 | eprop[e]={'coord1':vprop[v1]['coord'], 'coord2':vprop[v2]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False} 522 | else: 523 | v2=G.add_vertex() 524 | vprop[G.vertex(v2)]={'imgIdx':(j-1,i),'coord':nodeNumber2, 'nrOfPaths':0, 'diameter':float(dia[j-1][i])*avgScale} 525 | e = G.add_edge(v1, v2) 526 | epropW[e]=(((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)/avgScale)**4 527 | eprop[e]={'coord1':vprop[v1]['coord'], 'coord2':vprop[v2]['coord'],'weight':((vprop[v1]['diameter']+vprop[v2]['diameter'])/2)**4,'RTP':False} 528 | except: pass 529 | # 530 | print '100.0%' 531 | print 'selecting largest connected component' 532 | G.edge_properties["ep"] = eprop 533 | G.edge_properties["w"] = epropW 534 | G.vertex_properties["vp"] = vprop 535 | l = gt.label_largest_component(G) 536 | print(l.a) 537 | u = gt.GraphView(G, vfilt=l) 538 | print '# vertices' 539 | print(u.num_vertices()) 540 | print(G.num_vertices()) 541 | print '# edges' 542 | print(u.num_edges()) 543 | print 'building graph finished in: '+str(time.time()-start)+'s' 544 | return u 545 | 546 | def findRootVertex(self,G): 547 | print 'finding root vertex X' 548 | h=self.__height 549 | vertexIndex = 0 550 | dTmp=0 551 | dMax=0 552 | vprop=G.vertex_properties["vp"] 553 | for v in G.vertices(): 554 | count=0 555 | for _ in v.out_neighbours(): 556 | count+=1 557 | if count >2: 558 | break 559 | if count>2: 560 | dTmp=vprop[v]['diameter'] 561 | if vprop[v]['imgIdx'][1] < h: 562 | dMax=dTmp 563 | h = vprop[v]['imgIdx'][1] 564 | vertexIndex = v 565 | return vertexIndex,dMax 566 | 567 | def findRootVertexLateral(self,G): 568 | print 'finding root vertex X' 569 | h=self.__height 570 | vertexIndex = 0 571 | 572 | vprop=G.vertex_properties["vp"] 573 | 574 | for v in G.vertices(): 575 | if vprop[v]['imgIdx'][1] < h: 576 | h = vprop[v]['imgIdx'][1] 577 | vertexIndex = v 578 | return vertexIndex 579 | 580 | def findLastRootVertex(self,G): 581 | dpath =0 582 | vertexIndex = 0 583 | vprop=G.vertex_properties["vp"] 584 | for i in G.vertices(): 585 | try: 586 | if vprop[i]['imgIdx'][1] > dpath: 587 | dpath = vprop[i]['imgIdx'][1] 588 | vertexIndex = i 589 | except: 590 | pass 591 | return vertexIndex 592 | 593 | def findLaterals(self,RTP,G,scale,path): 594 | if scale ==0.: 595 | scale=1. 596 | corresBranchPoints=[] 597 | laterals=[] 598 | distToFirstLateral=2000000000000000. 599 | vprop=G.vertex_properties["vp"] 600 | idx=self.findRootVertexLateral(G) 601 | for i in RTP: 602 | if len(i)>0: 603 | for bp in i: 604 | d=float(vprop[G.vertex(bp)]['diameter']) 605 | radius=int(d/scale) # convert radius at branching point to pixels 606 | #print d,radius 607 | if radius>0: 608 | break 609 | 610 | # remove the radius from of the main trunk from the lateral length 611 | # to obtain the emerging lateral length from the surface 612 | 613 | if radius+2< len(i): 614 | lBranch=len(i[:radius]) 615 | laterals.append(i[radius:]) 616 | corresBranchPoints.append(i[0]) 617 | 618 | #if path is not given, then no distance to first lateral is computed 619 | if path!=None: 620 | 621 | x=vprop[G.vertex(idx)]['imgIdx'][0] # Note idx is a vertex object 622 | y=vprop[G.vertex(idx)]['imgIdx'][1] 623 | 624 | for i in corresBranchPoints: 625 | try: 626 | ix=vprop[G.vertex(i)]['imgIdx'][0] #Note: i is an index and the vertex object has to be called 627 | iy=vprop[G.vertex(i)]['imgIdx'][1] 628 | d=(ix-x)**2+(iy-y)**2 629 | if d < distToFirstLateral: 630 | distToFirstLateral=np.sqrt(d) 631 | except: 632 | pass 633 | 634 | if path == None: 635 | return laterals,corresBranchPoints 636 | else: 637 | return laterals,corresBranchPoints,distToFirstLateral*scale 638 | 639 | 640 | def findHypocotylCluster(self,thickestPath,rtpSkel): 641 | print 'find Cluster' 642 | branchingPaths=[] 643 | branchingPoints=[] 644 | radius=[] 645 | vprop= rtpSkel.vertex_properties["vp"] 646 | for i in thickestPath: 647 | # if len(nx.neighbors(rtpSkel, i))>2: 648 | branchingPaths.append(vprop[i]['nrOfPaths']) 649 | branchingPoints.append(i) 650 | #radius.append(rtpSkel.node[i]['diameter']) 651 | 652 | for i in branchingPoints: 653 | radius.append(vprop[i]['diameter']) 654 | 655 | bp=[] 656 | rad=[] 657 | tmpAvg=0. 658 | counter=0. 659 | for i in range(len(branchingPoints)-1): 660 | if branchingPaths[i]==branchingPaths[i+1]: 661 | tmpAvg+=radius[i] 662 | counter+=1 663 | elif counter>0: 664 | tmpAvg=tmpAvg/counter 665 | rad.append(tmpAvg) 666 | bp.append(branchingPaths[i]) 667 | counter=0. 668 | tmpAvg=0. 669 | 670 | return bp,rad 671 | 672 | def makeSegmentationPicture(self,thickestPath,G,crownImg,xScale,yScale,c1x,c1y,c2x,c2y,c3x=None,c3y=None): 673 | 674 | print 'make cluster picture' 675 | crownImg=m.as_rgb(crownImg,crownImg,crownImg) 676 | vprop=G.vertex_properties["vp"] 677 | for i in thickestPath: 678 | 679 | if vprop[i]['nrOfPaths'] in c1y: 680 | 681 | y=int(vprop[i]['imgIdx'][0]) 682 | x=int(vprop[i]['imgIdx'][1]) 683 | try: crownImg[x][y]=(125,0,0) 684 | except: pass 685 | dia=vprop[i]['diameter']/(xScale/2+yScale/2) 686 | dia=dia*1.5 687 | for j in range(int(dia)): 688 | try: crownImg[x][y+j]=(125,0,0) 689 | except: pass 690 | try: crownImg[x][y-j]=(125,0,0) 691 | except: pass 692 | try: crownImg[x-j][y]=(125,0,0) 693 | except: pass 694 | try: crownImg[x+j][y]=(125,0,0) 695 | except: pass 696 | elif vprop[i]['nrOfPaths'] in c2y: 697 | y=int(vprop[i]['imgIdx'][0]) 698 | x=int(vprop[i]['imgIdx'][1]) 699 | try: crownImg[x][y]=(125,0,0) 700 | except: pass 701 | dia=vprop[i]['diameter']/(xScale/2+yScale/2) 702 | dia=dia*1.5 703 | for j in range(int(dia)): 704 | try: crownImg[x][y+j]=(0,125,0) 705 | except: pass 706 | try: crownImg[x][y-j]=(0,125,0) 707 | except: pass 708 | try: crownImg[x-j][y]=(0,125,0) 709 | except: pass 710 | try: crownImg[x+j][y]=(0,125,0) 711 | except: pass 712 | y=int(vprop[i]['imgIdx'][0]) 713 | x=int(vprop[i]['imgIdx'][1]) 714 | try: crownImg[x][y]=(0,0,125) 715 | except: pass 716 | dia=vprop[i]['diameter']/(xScale/2+yScale/2) 717 | dia=dia*1.5 718 | for j in range(int(dia)): 719 | try: crownImg[x][y+j]=(0,0,125) 720 | except: pass 721 | try: crownImg[x][y-j]=(0,0,125) 722 | except: pass 723 | try: crownImg[x-j][y]=(0,0,125) 724 | except: pass 725 | try: crownImg[x+j][y]=(0,0,125) 726 | except: pass 727 | return crownImg 728 | 729 | 730 | --------------------------------------------------------------------------------