├── .gitignore ├── .travis.yml ├── CreateTrainJob.m ├── DefDataPackages.m ├── EnsemblePredictions.m ├── LICENSE ├── Makefile ├── Merge_LargeData.m ├── PlotValidation.m ├── PreprocessPackage.m ├── PreprocessTrainingData.m ├── PreprocessValidation.m ├── README.md ├── StartPostprocessing.m ├── VERSION ├── aws ├── basic_cloudformation.json ├── delete_keypair.py ├── delete_stack.py ├── describe_stack.py ├── find_running_cloudformations.py ├── find_running_ec2_instances.py ├── get_iam_users.py ├── get_keypairs.py ├── get_updated_ami_mappings.py ├── launch_cdeep3m.py ├── motd └── run100imagebenchmark.py ├── caffepredict.sh ├── caffetrain.sh ├── commonfunctions.sh ├── mito_testsample ├── testset │ ├── images.081.png │ ├── images.082.png │ ├── images.083.png │ ├── images.084.png │ └── images.085.png ├── training │ ├── images │ │ ├── images.010.png │ │ ├── images.011.png │ │ ├── images.012.png │ │ ├── images.013.png │ │ ├── images.014.png │ │ ├── images.015.png │ │ ├── images.016.png │ │ ├── images.017.png │ │ ├── images.018.png │ │ ├── images.019.png │ │ ├── images.020.png │ │ ├── images.021.png │ │ ├── images.022.png │ │ ├── images.023.png │ │ ├── images.024.png │ │ ├── images.025.png │ │ ├── images.026.png │ │ ├── images.027.png │ │ ├── images.028.png │ │ └── images.029.png │ └── labels │ │ ├── mitos_3D.010.png │ │ ├── mitos_3D.011.png │ │ ├── mitos_3D.012.png │ │ ├── mitos_3D.013.png │ │ ├── mitos_3D.014.png │ │ ├── mitos_3D.015.png │ │ ├── mitos_3D.016.png │ │ ├── mitos_3D.017.png │ │ ├── mitos_3D.018.png │ │ ├── mitos_3D.019.png │ │ ├── mitos_3D.020.png │ │ ├── mitos_3D.021.png │ │ ├── mitos_3D.022.png │ │ ├── mitos_3D.023.png │ │ ├── mitos_3D.024.png │ │ ├── mitos_3D.025.png │ │ ├── mitos_3D.026.png │ │ ├── mitos_3D.027.png │ │ ├── mitos_3D.028.png │ │ └── mitos_3D.029.png └── validation │ ├── images │ ├── images.050.png │ ├── images.051.png │ ├── images.052.png │ ├── images.053.png │ ├── images.054.png │ ├── images.055.png │ ├── images.056.png │ ├── images.057.png │ ├── images.058.png │ └── images.059.png │ └── labels │ ├── mitos_3D.050.png │ ├── mitos_3D.051.png │ ├── mitos_3D.052.png │ ├── mitos_3D.053.png │ ├── mitos_3D.054.png │ ├── mitos_3D.055.png │ ├── mitos_3D.056.png │ ├── mitos_3D.057.png │ ├── mitos_3D.058.png │ └── mitos_3D.059.png ├── model ├── LICENSE ├── inception_residual_train_prediction_1fm │ ├── deploy.prototxt │ ├── label_class_selection.prototxt │ ├── solver.prototxt │ └── train_val.prototxt ├── inception_residual_train_prediction_3fm │ ├── deploy.prototxt │ ├── label_class_selection.prototxt │ ├── solver.prototxt │ └── train_val.prototxt └── inception_residual_train_prediction_5fm │ ├── deploy.prototxt │ ├── label_class_selection.prototxt │ ├── solver.prototxt │ └── train_val.prototxt ├── postprocessworker.sh ├── predictworker.sh ├── preprocessworker.sh ├── runprediction.sh ├── runtraining.sh ├── runvalidation.sh ├── scripts ├── Histmatch.m ├── SNEMI3D_metrics.m ├── augment_data.m ├── functions │ ├── add_z_padding.m │ ├── augment_image_data_only.m │ ├── augment_package.m │ ├── break_large_img.m │ ├── check_image_size.m │ ├── check_img_dims.m │ ├── checkpoint_isbinary.m │ ├── checkpoint_nobinary.m │ ├── convert_training_data_to_h5stack.m │ ├── copy_model.m │ ├── copy_over_allmodels.m │ ├── copy_version.m │ ├── create_dir.m │ ├── create_predict_outdir.m │ ├── crop_png.py │ ├── filter_files.m │ ├── full_fill.m │ ├── get_pkg_folders.m │ ├── get_train_basemodel_names.m │ ├── get_variation_folders.m │ ├── hmtransf.m │ ├── imageimporter.m │ ├── imageimporter_large.m │ ├── nanmean.m │ ├── overlay.py │ ├── read_files_in_folder.m │ ├── run_train.m │ ├── update_solverproto_txt_file.m │ ├── update_train_val_prototxt.m │ ├── verify_and_create_train_file.m │ └── write_train_readme.m ├── generate_16_average_probs.m ├── label2rgb3d.m ├── post_processing │ ├── Adjust3DWatershed.m │ ├── Apply3DWatershed.m │ ├── merge_16_probs_v2.m │ └── merge_16_probs_v3.m └── write_label2rgb_image.m ├── singularity └── ubuntu-xenial64-sdsc-comet │ ├── Makefile │ ├── Vagrantfile │ ├── bootstrap.sh │ ├── cdeep3m-1.6.2.def │ └── ubuntu-cuda.def ├── tests ├── RunUnitTests.m ├── caffepredict.sh.bats ├── caffetrain.sh.bats ├── commonfunctions.sh.bats ├── octavetests.bats ├── postprocessworker.sh.bats ├── predictworker.sh.bats ├── preprocessworker.sh.bats ├── runprediction.sh.bats ├── runtraining.sh.bats ├── system │ ├── 1fmonlydemo2.bats │ ├── checkstitch.bats │ ├── demo1.bats │ ├── demo2.bats │ ├── retraindemo.bats │ └── testdata │ │ └── 2kimage │ │ └── images.081.mirrored.png └── trainworker.sh.bats ├── trainworker.sh └── vagrant ├── Vagrantfile ├── bootstrap.sh └── cdeep3m_logo-01.png /.gitignore: -------------------------------------------------------------------------------- 1 | vagrant/.vagrant 2 | vagrant/ubuntu*.log 3 | *.swp 4 | singularity/ubuntu-xenial64-sdsc-comet/.vagrant 5 | singularity/ubuntu-xenial64-sdsc-comet/ubuntu*.log 6 | singularity/ubuntu-xenial64-sdsc-comet/*.img 7 | singularity/ubuntu-xenial64-sdsc-comet/build 8 | dist/ 9 | 10 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: bash 2 | 3 | before_install: 4 | - sudo apt-get update 5 | - sudo apt-get install -y octave octave-image octave-pkg-dev git python-pip unzip 6 | 7 | # command to install dependencies 8 | install: 9 | - wget https://github.com/stegro/hdf5oct/archive/b047e6e611e874b02740e7465f5d139e74f9765f.zip 10 | - unzip b047e6e611e874b02740e7465f5d139e74f9765f.zip 11 | - pushd hdf5oct-* && make && sudo make install && popd 12 | - wget https://github.com/bats-core/bats-core/archive/v0.4.0.tar.gz 13 | - tar -zxf v0.4.0.tar.gz 14 | - pushd bats-core-0.4.0 && sudo ./install.sh /usr/local && popd 15 | 16 | # command to run tests 17 | script: make test 18 | 19 | -------------------------------------------------------------------------------- /CreateTrainJob.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | 3 | % CreateTrainJob 4 | % Generates Training job that uses caffe on3 models, 1fm, 3fm, and 5fm 5 | % -> Outputs trained caffe model to output directory 6 | % 7 | % Syntax : CreateTrainJob.m 8 | % 9 | % 10 | %------------------------------------------------------------------------------- 11 | %% Train for Deep3M -- NCMIR/NBCR, UCSD -- Author: C Churas -- Date: 12/2017 12 | %------------------------------------------------------------------------------- 13 | % 14 | % ------------------------------------------------------------------------------ 15 | %% Initialize 16 | % ------------------------------------------------------------------------------ 17 | 18 | script_dir = fileparts(make_absolute_filename(program_invocation_name())); 19 | addpath(genpath(script_dir)); 20 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep()))); 21 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep(),'functions'))); 22 | tic 23 | pkg load hdf5oct 24 | pkg load image 25 | 26 | run_train(argv()); 27 | 28 | -------------------------------------------------------------------------------- /DefDataPackages.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | % def_datapackages 3 | % 4 | % -> Defines size of datapackages used for augmentation of image data 5 | % Input: Image folder and output directory to store de_augmentation file 6 | % Output: de_augmentation_info.mat 7 | % 8 | % Syntax : def_datapackages /ImageData/EMdata1/ /ImageData/AugmentedEMData/ 9 | % 10 | % 11 | %---------------------------------------------------------------------------------------- 12 | %% PreProcessImageData for CDeep3M -- NCMIR/NBCR, UCSD -- Author: M Haberl 13 | %---------------------------------------------------------------------------------------- 14 | % 15 | % ---------------------------------------------------------------------------------------- 16 | %% Initialize 17 | % ---------------------------------------------------------------------------------------- 18 | 19 | disp('Starting Image Augmentation'); 20 | tic 21 | script_dir = fileparts(make_absolute_filename(program_invocation_name())); 22 | addpath(genpath(script_dir)); 23 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep()))); 24 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep(),'functions'))); 25 | 26 | 27 | pkg load hdf5oct 28 | pkg load image 29 | 30 | arg_list = argv (); 31 | if numel(arg_list)<2; disp('Use -> PreProcessImageData /ImageData/EMdata1/ /ImageData/AugmentedEMData/'); return; end 32 | in_img_path = arg_list{1}; 33 | outdir = arg_list{2}; 34 | if ~exist(outdir,'dir'), mkdir(outdir); end 35 | 36 | % ---------------------------------------------------------------------------------------- 37 | %% 38 | % ---------------------------------------------------------------------------------------- 39 | 40 | imagesize = check_image_size(in_img_path); 41 | [packages,z_blocks] = break_large_img(imagesize); 42 | num_of_pkg = numel(packages); 43 | save(fullfile(outdir,'de_augmentation_info.mat'),'packages','num_of_pkg','imagesize','z_blocks'); 44 | 45 | document = fullfile(outdir,'package_processing_info.txt'); 46 | opendoc = fopen(document, "w"); 47 | fprintf(opendoc, '\nNumber of XY Packages\n%s\nNumber of z-blocks\n%s', num2str(num_of_pkg),num2str(numel(z_blocks)-1)); 48 | fclose(opendoc); 49 | -------------------------------------------------------------------------------- /EnsemblePredictions.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | %% EnsemblePredictions 3 | % different predictions coming from files e.g. from 1fm 3fm and 5fm will be averaged here 4 | % flexible number of inputs 5 | % last argument has to be the outputdirectory where the average files are stored 6 | % 7 | % ----------------------------------------------------------------------------- 8 | %% NCMIR, UCSD -- Author: M Haberl -- Data: 10/2017 9 | % ----------------------------------------------------------------------------- 10 | % 11 | 12 | %% Initialize 13 | pkg load hdf5oct 14 | pkg load image 15 | 16 | script_dir = fileparts(make_absolute_filename(program_invocation_name())); 17 | addpath(genpath(script_dir)); 18 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep()))); 19 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep(),'functions'))); 20 | tic 21 | 22 | arg_list = argv (); 23 | 24 | if numel(arg_list) < 3 25 | fprintf('Please specify more than one input directory to average: EnsemblePredictions ./inputdir1 ./inputdir2 ./inputdir3 ./outputdir\n'); 26 | return 27 | end 28 | 29 | for i = 1:(numel(arg_list)-1) 30 | to_process{i} = arg_list{i}; 31 | if ~isdir(arg_list{i}) 32 | fprintf('%s not a directory\nPlease use: EnsemblePredictions ./inputdir1 ./inputdir2 ./inputdir3 ./outputdir\n',arg_list{i}); 33 | return 34 | end 35 | list{i} = filter_files(read_files_in_folder(to_process{i}),'.png'); 36 | end 37 | outputdir = arg_list{numel(arg_list)}; 38 | mkdir(outputdir); 39 | %raw_image_full_path = arg_list{end}; 40 | 41 | %% =============== Generate ensemble predictions ================================= 42 | 43 | %merged_file_save=fullfile(outfolder, 'EnsemblePredict.tiff'); 44 | %if exist(merged_file_save, 'file'),delete(merged_file_save); end 45 | %outputdir = fileparts(to_process{1}); % Writes automatically in the parent directory of the first prediction folder 46 | total_zplanes = size(list{1},1); 47 | for z = 1:total_zplanes 48 | for proc = 1:numel(to_process) 49 | image_name = fullfile(to_process{proc}, list{proc}(z).name); 50 | cumul_plane(:,:,proc) = imread(image_name); %Cumulate all average predictions of this plane 51 | end 52 | prob_map = uint8(mean(cumul_plane,3)); 53 | 54 | save_file_save = fullfile(outputdir, list{1}(z).name); 55 | fprintf('Saving Image # %s of %s: %s\n', num2str(z), num2str(total_zplanes),save_file_save); 56 | imwrite(prob_map, save_file_save); 57 | clear cumul_plane prob_map; 58 | end 59 | 60 | fprintf('Elapsed time for merging predictions is %06d seconds.\n', round(toc)); 61 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Different license applies to code in model directory. See model/LICENSE 2 | 3 | Copyright   2018   The Regents of the University of California 4 | All Rights Reserved 5 | 6 | 7 | Permission to copy, modify and distribute any part of this CDeep3M for educational, research and non-profit purposes, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following three paragraphs appear in all copies. 8 | 9 | Those desiring to incorporate this CDeep3M into commercial products or use for commercial purposes should contact the Technology Transfer Office, University of California, San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910, Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu. 10 | 11 | IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS CDeep3M, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 12 | 13 | THE CDeep3M PROVIDED HEREIN IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.  THE UNIVERSITY OF CALIFORNIA MAKES NO REPRESENTATIONS AND EXTENDS NO WARRANTIES OF ANY KIND, EITHER IMPLIED OR EXPRESS, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE CDeep3M WILL NOT INFRINGE ANY PATENT, TRADEMARK OR OTHER RIGHTS. 14 | 15 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: clean help updateversion 2 | .DEFAULT_GOAL := help 3 | define BROWSER_PYSCRIPT 4 | import os, webbrowser, sys 5 | try: 6 | from urllib import pathname2url 7 | except: 8 | from urllib.request import pathname2url 9 | 10 | webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1]))) 11 | endef 12 | export BROWSER_PYSCRIPT 13 | 14 | define PRINT_HELP_PYSCRIPT 15 | import re, sys 16 | 17 | for line in sys.stdin: 18 | match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) 19 | if match: 20 | target, help = match.groups() 21 | print("%-20s %s" % (target, help)) 22 | endef 23 | export PRINT_HELP_PYSCRIPT 24 | BROWSER := python -c "$$BROWSER_PYSCRIPT" 25 | 26 | help: 27 | @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) 28 | 29 | clean: ## remove all build, and test artifacts 30 | rm -fr dist/ 31 | 32 | test: ## run tests 33 | bats tests 34 | 35 | checkrepo: ## checks if remote repo is CRBS 36 | @therepo=`git remote get-url origin | sed "s/^.*://" | sed "s/\/.*//"` ;\ 37 | if [ "$$therepo" != "CRBS" ] ; then \ 38 | echo "ERROR can only do a release from master repo, not from $$therepo" ; \ 39 | exit 1 ;\ 40 | else \ 41 | echo "Repo appears to be master $$therepo" ; \ 42 | fi 43 | 44 | release: dist checkrepo ## package and upload a release to s3 45 | @echo "Creating new release" 46 | @vers=`cat VERSION` ; \ 47 | tarfile=cdeep3m-$${vers}.tar.gz ;\ 48 | cloudform=cdeep3m_$${vers}_basic_cloudformation.json ;\ 49 | aws s3 cp dist/$$cloudform s3://cdeep3m-releases/$${vers}/$$cloudform --acl public-read ; \ 50 | aws s3 cp dist/$$tarfile s3://cdeep3m-releases/$${vers}/$$tarfile --acl public-read ; \ 51 | deep3mdirname=cdeep3m-$$vers ;\ 52 | distdir=dist/$$deep3mdirname ;\ 53 | cp $$distdir/README.md . ;\ 54 | branchy=`git branch --list | sed "s/^\* *//"` ;\ 55 | git commit -m 'updated launch stack link' README.md ;\ 56 | git push origin $$branchy ;\ 57 | git tag -a v$${vers} -m 'new release' ; \ 58 | git push origin v$${vers} 59 | 60 | dist: clean ## creates distributable package 61 | @vers=`cat VERSION` ; \ 62 | hvers=`cat VERSION | sed "s/\./-/g"` ;\ 63 | deep3mdirname=cdeep3m-$$vers ;\ 64 | distdir=dist/$$deep3mdirname ;\ 65 | /bin/mkdir -p $$distdir ;\ 66 | cat aws/motd | sed "s/@@VERSION@@/$${vers}/g" > $$distdir/motd ;\ 67 | cp *.m $$distdir/. ;\ 68 | cp *.sh $$distdir/. ;\ 69 | cp -a scripts $$distdir/. ;\ 70 | cp -a mito_testsample $$distdir/. ;\ 71 | cp -a README.md $$distdir/. ;\ 72 | sed -i "s/cdeep3m-stack-.*template/cdeep3m-stack-$$hvers\&template/g" $$distdir/README.md ;\ 73 | sed -i "s/releases\/.*\/cdeep3m.*\.json/releases\/$$vers\/cdeep3m\_$$vers\_basic\_cloudformation.json/g" $$distdir/README.md ;\ 74 | sed -i "s/cdeep3m\/archive\/.*\.tar.gz/cdeep3m\/archive\/v$$vers\.tar\.gz/"g $$distdir/README.md ;\ 75 | sed -i "s/^tar -zxf v.*tar.gz/tar -zxf v$$vers.tar.gz/g" $$distdir/README.md ;\ 76 | sed -i "s/^cd cdeep3m-.*/cd cdeep3m-$$vers/g" $$distdir/README.md ;\ 77 | cp -a LICENSE $$distdir/. ;\ 78 | cp -a model $$distdir/. ;\ 79 | cp -a tests $$distdir/. ;\ 80 | cp VERSION $$distdir/. ;\ 81 | cat aws/basic_cloudformation.json | sed "s/@@VERSION@@/$${vers}/g" > dist/cdeep3m_$${vers}_basic_cloudformation.json ;\ 82 | tar -C dist/ -cz $$deep3mdirname > $$distdir.tar.gz 83 | ls -l dist 84 | 85 | updateversion: ## Updates version by updating VERSION file 86 | @cv=`cat VERSION`; \ 87 | read -p "Current ($$cv) enter new version: " vers; \ 88 | echo "Updating VERSION with new version: $$vers"; \ 89 | echo $$vers > VERSION 90 | 91 | deploytohost: dist ## Deploys cdeep3m host specified by $cdeep3m_host 92 | @cv=`cat VERSION`; \ 93 | deep3mdirname=cdeep3m-$$cv ;\ 94 | distdir=dist/$$deep3mdirname ;\ 95 | echo "Version is $$cv and deploying to $$cdeep3m_host" ; \ 96 | scp $$distdir.tar.gz ubuntu@$$cdeep3m_host:/home/ubuntu/. ; \ 97 | ssh ubuntu@$$cdeep3m_host /bin/rm -rf `readlink /home/ubuntu/cdeep3m` ;\ 98 | ssh ubuntu@$$cdeep3m_host tar -zxf /home/ubuntu/$$deep3mdirname.tar.gz 99 | 100 | -------------------------------------------------------------------------------- /Merge_LargeData.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | % Merge LargeData 3 | % 4 | % After segmentation of smaller image packages this 5 | % script will stitch the initial dataset back together 6 | % Assumes Packages are in the subdirectories of 1fm / 3fm / 5fm 7 | % an expects a de_augmentation_info.mat in the parent directory thereof. 8 | % 9 | % Runs after StartPostProcessing which merges the 16variations 10 | % and already removed z-padding. 11 | % 12 | % 13 | % Use: Merge_LargeData ~/prediction/1fm 14 | % expects de_augmentation_info.mat in the parent directory 15 | % 16 | %------------------------------------------------------------------ 17 | %% NCMIR/NBCR, UCSD -- Author: M Haberl -- Date: 10/2017 18 | %------------------------------------------------------------------ 19 | 20 | disp('Starting to merge large image dataset'); 21 | pkg load image 22 | script_dir = fileparts(make_absolute_filename(program_invocation_name())); 23 | addpath(genpath(script_dir)); 24 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep()))); 25 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep(),'functions'))); 26 | 27 | arg_list = argv (); 28 | 29 | if numel(arg_list) == 0; 30 | disp('Use -> Merge_LargeData ~/prediction/1fm'); 31 | return; 32 | else 33 | fm_dir = arg_list{1}; 34 | end 35 | 36 | tic 37 | if fm_dir(end)==filesep; fm_dir=fm_dir(1:end-1); end %fixing special case which can cause error 38 | [parent_dir,~,ext] = fileparts(fm_dir); 39 | de_aug_file = fullfile(parent_dir,'de_augmentation_info.mat'); 40 | disp('Processing:');disp(de_aug_file); 41 | load(de_aug_file,'packages','num_of_pkg','imagesize','zplanes','z_blocks'); 42 | 43 | %% Merge Z-sections 44 | % first combine images from the same x/y areas through all z-planes 45 | disp('Combining image stacks'); 46 | for x_y_num = 1:numel(packages) 47 | imcounter = 0; %Reset imagecounter to combine next Package 48 | combined_folder = fullfile(fm_dir, sprintf('Pkg_%03d',x_y_num)); 49 | mkdir(combined_folder); 50 | for z_plane = 1:(numel(z_blocks)-1) 51 | in_folder = fullfile(fm_dir, sprintf('Pkg%03d_Z%02d',x_y_num, z_plane)); 52 | disp(['Reading:', in_folder]); 53 | imlist = read_files_in_folder(in_folder); 54 | imlist = filter_files(imlist,'.png'); 55 | for file = 1:numel(imlist) 56 | imcounter = imcounter + 1; 57 | in_filename = fullfile(in_folder,imlist(file).name); 58 | out_filename = fullfile(combined_folder, sprintf('segmentation_%04d.png', imcounter)); 59 | movefile(in_filename, out_filename); 60 | end 61 | 62 | end 63 | end 64 | 65 | z_found = numel(filter_files(read_files_in_folder(fullfile(fm_dir, sprintf('Pkg_001'))),'.png')); 66 | fprintf('Expected number of planes: %s ... Found: %s planes\n', num2str(z_blocks(end)),num2str(z_found)); 67 | %% Now stitch individual sections 68 | combined_folder = fullfile(fm_dir, sprintf('Pkg_%03d',1)); %read in the filenames of the first Pkg 69 | filelist = read_files_in_folder(combined_folder); 70 | for z_plane = 1:z_found %one z-plane at a time 71 | fprintf('Merging image no. %s\n', num2str(z_plane)); 72 | 73 | merger_image = (NaN([imagesize(1:2),1],'single')); %Initialize empty image in x/y 2 in z 74 | for x_y_num = 1:numel(packages) 75 | packagedir = fullfile(fm_dir, sprintf('Pkg_%03d',x_y_num)); 76 | filename = fullfile(packagedir, filelist(z_plane).name); 77 | 78 | small_patch = single(imread(filename)); 79 | %bitdepth = single(2.^([1:16])); 80 | %[~,idx] = min(abs(bitdepth - max(small_patch(:)))); 81 | %fprintf('Scaling %s bit image\n', num2str(idx)); 82 | %save_plane = uint8((255 /bitdepth(idx))*combined_plane); 83 | %small_patch = single((255 /bitdepth(idx))*small_patch); 84 | %small_patch = single((255 /max(small_patch(:)))*small_patch); 85 | area = packages{x_y_num}; 86 | if numel(packages)>1 87 | 88 | corners = [area(1)+12, area(2)-12, area(3)+12, area(4)-12]; 89 | if area(1)==1; corners(1) = 1; end; if area(2)==size(merger_image,1), corners(2) = size(merger_image,1); end 90 | if area(3)==1; corners(3) = 1; end; if area(4)==size(merger_image,2), corners(4) = size(merger_image,2); end 91 | if corners(2)>size(merger_image,1), corners(2) = size(merger_image,1); end; 92 | if corners(4)>size(merger_image,2), corners(4) = size(merger_image,2); end; 93 | insertsize = [(corners(2)+1)-corners(1),(corners(4)+1)-corners(3)]; 94 | 95 | merger_image(corners(1):corners(2),corners(3):corners(4),1) = small_patch(13:insertsize(1)+12,13:insertsize(2)+12); 96 | 97 | else %if there is only one package 98 | if imagesize(1)<=1012, start(1) = 13;else, start(1) = 1;end %define where the image has been padded 99 | if imagesize(2)<=1012, start(2) = 13;else, start(2) = 1;end %define where the image has been padded 100 | clear merger_image; 101 | merger_image = small_patch(start(1):(imagesize(1)+start(1)-1),start(2):(imagesize(2)+start(2)-1)); 102 | end 103 | 104 | end 105 | 106 | bitdepth = single(2.^([1:16])); 107 | %fprintf('Scaling %s bit image\n', num2str(idx)); 108 | [~,idx] = min(abs(bitdepth - single(max(merger_image(:))))); 109 | save_plane = uint8((255 /bitdepth(idx))*merger_image); 110 | outfile = fullfile(fm_dir, sprintf('Segmented_%04d.png',z_plane)); 111 | %fprintf('Saving image %s\n', outfile); 112 | imwrite(save_plane,outfile); 113 | 114 | end 115 | disp('Merging large image dataset completed'); 116 | toc 117 | fprintf('Your results are in: %s\n', fm_dir); 118 | 119 | done_file = fopen(strcat(fm_dir, filesep(),"DONE"), "w"); 120 | fprintf(done_file,"0\n"); 121 | fclose(done_file); 122 | 123 | -------------------------------------------------------------------------------- /PlotValidation.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | 3 | 4 | % PlotValidation 5 | % Generates traing vs validation loss plot. 6 | % 7 | % Syntax : PlotValidation.m 8 | % 9 | % or can be run directly on a log folder of CDeep3M 10 | % Syntax : PlotValidation.m ~/trainingdata/1fm/log 11 | % => Will create csv files in same log folder and plot of loss.png in same directory 12 | % 13 | %------------------------------------------------------------------------------- 14 | %% Validation for Deep3M -- NCMIR/NBCR, UCSD -- Author: L Tindall -- Date: 5/2018 15 | %------------------------------------------------------------------------------- 16 | % 17 | 18 | % arg_list = 19 | % { 20 | % [1,1] = train_output.csv 21 | % [2,1] = test_output.csv 22 | % [3,1] = output dir 23 | % } 24 | arg_list = argv (); 25 | if numel(arg_list) == 0 26 | fprintf('\nSyntax:\n PlotValidation.m \nor\nPlotValidation.m ~/trainingdata/1fm/log\n');return 27 | elseif numel(arg_list) == 1 28 | logdir = arg_list{1,1}; 29 | if exist(logdir,'dir')==7 30 | disp('Parsing log file'); 31 | train_file = fullfile(logdir, 'out.log.train'); 32 | test_file = fullfile(logdir, 'out.log.test'); 33 | system(sprintf('python ~/caffe_nd_sense_segmentation/tools/extra/parse_log.py %s %s',fullfile(logdir, 'out.log'), logdir)); 34 | else 35 | disp('Invalid argument'); 36 | return 37 | end 38 | 39 | else 40 | logdir = arg_list{3,1}; 41 | train_file = arg_list{1,1}; 42 | test_file = arg_list{2,1}; 43 | end 44 | 45 | disp('Reading CSV files'); 46 | % column format for train_output csv (NumIters,Seconds,LearningRate,loss_deconv_all) 47 | train_output = csvread(train_file,1,0); 48 | 49 | % column format for test_output csv (NumIters,Seconds,LearningRate,accuracy_conv,class_Acc,loss_deconv_all) 50 | test_output = csvread(test_file,1,0); 51 | 52 | 53 | % Plot loss 54 | plt_loss = figure; 55 | plot(train_output(:,1),train_output(:,4), test_output(:,1), test_output(:,6)); 56 | grid on; 57 | set(gca, 'xlabel', 'Number of Iterations'); 58 | set(gca, 'ylabel', 'Loss'); 59 | set(gca, 'Title', 'Training vs Validation Loss'); 60 | set(gca, 'YMinorTick','on', 'YMinorGrid','on'); 61 | set(gca,'YScale','log'); 62 | legend("Training","Validation"); 63 | 64 | outfile = fullfile(logdir, 'loss.pdf'); 65 | 66 | %print(plt_loss,outfile, "-dpngcairo"); 67 | print(plt_loss,outfile, "-dpdfcairo"); 68 | fprintf('Your loss output file is saved as: %s\n', outfile); 69 | 70 | 71 | 72 | 73 | % Plot accuracy 74 | plt_accuracy = figure; 75 | plot(test_output(:,1), test_output(:,4)); 76 | grid on; 77 | set(gca, 'xlabel', 'Number of iterations'); 78 | set(gca, 'ylabel', 'Accuracy'); 79 | set(gca, 'Title', 'Validation Accuracy'); 80 | 81 | 82 | 83 | outfile = fullfile(logdir, 'accuracy.pdf'); 84 | print(plt_accuracy,outfile, "-dpdfcairo"); 85 | fprintf('Your accuracy output file is saved as: %s\n', outfile); 86 | 87 | -------------------------------------------------------------------------------- /PreprocessPackage.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | % preprocess_package 3 | % receives package index numbers to process 4 | % requires data_packagedef to have run before 5 | % -> Makes augmented hdf5 datafiles from raw images based on defining parameters 6 | % 7 | % Syntax : preprocess_package indir outdir xy_package z_stack augmentation speed 8 | % Example: preprocess_package ~/EMdata1/ ~/AugmentedEMData/ 15 2 1fm 10 9 | % 10 | % Speed: supported values 1,2,4 or 10 11 | % speeds up processing potentially with a negative effect on accuracy (speed of 1 equals highest accuracy) 12 | % 13 | % 14 | %---------------------------------------------------------------------------------------- 15 | %% preprocess_package for Deep3M -- NCMIR/NBCR, UCSD -- Author: M Haberl -- Date: 04/2018 16 | %---------------------------------------------------------------------------------------- 17 | % 18 | % ---------------------------------------------------------------------------------------- 19 | %% Initialize 20 | % ---------------------------------------------------------------------------------------- 21 | 22 | disp('Starting Image Augmentation'); 23 | tic 24 | script_dir = fileparts(make_absolute_filename(program_invocation_name())); 25 | addpath(genpath(script_dir)); 26 | 27 | pkg load hdf5oct 28 | pkg load image 29 | 30 | arg_list = argv (); 31 | %if numel(arg_list)<2; disp('Use -> PreProcessImageData /ImageData/EMdata1/ /ImageData/AugmentedEMData/'); return; end 32 | in_img_path = arg_list{1}; 33 | outdir = arg_list{2}; 34 | ii = str2num(arg_list{3}); 35 | zz = str2num(arg_list{4}); 36 | fmtype = arg_list{5}; 37 | fmnumber = str2num(fmtype(1)); 38 | speed = str2num(arg_list{6}); 39 | 40 | fmdir = fullfile(outdir,[num2str(fmnumber),'fm']); 41 | if ~exist(fmdir,'dir'), mkdir(fmdir); end 42 | load(fullfile(outdir,'de_augmentation_info.mat'),'packages','num_of_pkg','imagesize','z_blocks'); 43 | % ---------------------------------------------------------------------------------------- 44 | %% 45 | % ---------------------------------------------------------------------------------------- 46 | if zz == 1 47 | z_stack = [z_blocks(zz), z_blocks(zz+1)] 48 | else 49 | z_stack = [z_blocks(zz)+1, z_blocks(zz+1)] 50 | end 51 | 52 | %if ii ==1; t1 = tic; end 53 | %if ii ==2; t_int = toc(t1)/60; end 54 | %fprintf('------- Image Augmentation large data ------\n'); 55 | %fprintf('-------- Augmenting Part %s out of %s -------\n', num2str(ii), num2str(num_of_pkg)); 56 | %if ii>2 57 | %fprintf('-> Remaining time estimated: %s min\n', num2str(round(t_int*(num_of_pkg-ii)))); 58 | %end 59 | %define label name 60 | 61 | area = packages{ii}; 62 | [stack] = imageimporter_large(in_img_path,area,z_stack,outdir); %load only subarea here 63 | checkpoint_nobinary(stack); 64 | disp('Padding images'); 65 | [stack] = add_z_padding(stack); %adds 2 planes i beginning and end 66 | 67 | %% augment_and_saveSave image data 68 | outsubdir = fullfile(fmdir, sprintf('Pkg%03d_Z%02d',ii, zz)); 69 | if ~exist(outsubdir,'dir'), mkdir(outsubdir); end 70 | augment_package(stack, outsubdir,fmnumber,speed); 71 | clear -v stack 72 | clear -v data 73 | 74 | done_file = fopen(strcat(outsubdir, filesep(),"DONE"), "w"); 75 | fprintf(done_file,"0\n"); 76 | fclose(done_file); 77 | -------------------------------------------------------------------------------- /PreprocessTrainingData.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | % 3 | % PreprocessTraining 4 | % Makes augmented hdf5 datafiles from raw and label images 5 | % 6 | % Syntax : PreprocessTraining /ImageData/training/images/ /ImageData/training/labels/ /ImageData/augmentedtraining/ 7 | % 8 | % 9 | %---------------------------------------------------------------------------------------- 10 | %% PreprocessTraining for Deep3M -- NCMIR/NBCR, UCSD -- Author: M Haberl -- Date: 10/2017 11 | %---------------------------------------------------------------------------------------- 12 | % 13 | % Adapted to speed up time 14 | % reduced Runtime from >20min for 1024x1024x100 dataset to ~1-2 min 15 | % 16 | 17 | 18 | % ---------------------------------------------------------------------------------------- 19 | %% Initialize 20 | % ---------------------------------------------------------------------------------------- 21 | warning("off") 22 | disp('Starting Training data Preprocessing'); 23 | pkg load hdf5oct 24 | pkg load image 25 | script_dir = fileparts(make_absolute_filename(program_invocation_name())); 26 | addpath(genpath(script_dir)); 27 | 28 | arg_list = argv (); 29 | if numel(arg_list)<3; disp('Use -> PreprocessTraining /ImageData/training/images/ /ImageData/training/labels/ /ImageData/augmentedtraining/'); return; end 30 | 31 | tic 32 | trainig_img_path = arg_list{1}; 33 | disp('Training Image Path:');disp(trainig_img_path); 34 | label_img_path = arg_list{2}; 35 | disp('Training Label Path:');disp(label_img_path); 36 | outdir = arg_list{3}; 37 | disp('Output Path:');disp(outdir); 38 | 39 | % ---------------------------------------------------------------------------------------- 40 | %% Load training images 41 | % ---------------------------------------------------------------------------------------- 42 | 43 | disp('Loading:'); 44 | disp(trainig_img_path); 45 | [imgstack] = imageimporter(trainig_img_path); 46 | disp('Verifying images'); 47 | checkpoint_nobinary(imgstack); 48 | 49 | % ---------------------------------------------------------------------------------------- 50 | %% Load train data 51 | % ---------------------------------------------------------------------------------------- 52 | 53 | disp('Loading:'); 54 | disp(label_img_path); 55 | [lblstack] = imageimporter(label_img_path); 56 | disp('Verifying labels'); 57 | checkpoint_isbinary(lblstack); 58 | 59 | % ---------------------------------------------------------------------------------------- 60 | %% Check size of images and labels 61 | % ---------------------------------------------------------------------------------------- 62 | 63 | [imgstack, lblstack] = check_img_dims(imgstack, lblstack, 325); 64 | 65 | % ---------------------------------------------------------------------------------------- 66 | %% Augment the data, generating 16 versions and save 67 | % ---------------------------------------------------------------------------------------- 68 | 69 | %imshow(labels_arr(:,:,1)) 70 | %data_arr=permute(imgstack,[3 1 2]); %from tiff to h5 /100*1000*1000 71 | %labels_arr=permute(lblstack,[3 1 2]); %from tiff to h5 /100*1000*1000 72 | %[outdir,name,ext] = fileparts(save_file); 73 | 74 | img_v1 =single(imgstack); 75 | lb_v1 =single(lblstack); 76 | 77 | d_details = '/data'; 78 | l_details = '/label'; 79 | if ~exist(outdir,'dir'), mkdir(outdir); end 80 | ext = '.h5'; 81 | 82 | disp('Augmenting training data 1-8 and 9-16'); 83 | for i=1:8 84 | %% v1-8 85 | [img,lb]=augment_data(img_v1,lb_v1,i); 86 | 87 | %% v9-16 88 | inv_img = flip(img,3); %var 9 -16 89 | inv_lb = flip(lb,3); %var 9 -16 90 | 91 | %% v1-8 92 | img=permute(img,[3 1 2]); %from tiff to h5 /100*1000*1000 93 | lb=permute(lb,[3 1 2]); %from tiff to h5 /100*1000*1000 94 | filename = fullfile(outdir, sprintf('training_full_stacks_v%s%s', num2str(i), ext)); 95 | fprintf('Saving: %s\n', filename); 96 | h5write(filename,d_details,img); 97 | h5write(filename,l_details,lb); 98 | clear img lb 99 | %% v9-16 100 | inv_img = permute(inv_img,[3 1 2]); %from tiff to h5 /100*1000*1000 101 | inv_lb = permute(inv_lb,[3 1 2]); %from tiff to h5 /100*1000*1000 102 | filename = fullfile(outdir, sprintf('training_full_stacks_v%s%s', num2str(i+8), ext)); 103 | fprintf('Saving: %s\n', filename); 104 | h5write(filename,d_details,inv_img); 105 | h5write(filename,l_details,inv_lb); 106 | clear inv_img inv_lb 107 | end 108 | 109 | 110 | % ---------------------------------------------------------------------------------------- 111 | %% Completed 112 | % ---------------------------------------------------------------------------------------- 113 | 114 | toc 115 | disp('-> Training data augmentation completed'); 116 | fprintf('Training data stored in %s\n', outdir); 117 | fprintf('For training your model please run runtraining.sh %s \n', outdir); 118 | -------------------------------------------------------------------------------- /PreprocessValidation.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | % 3 | % PreprocessValidation 4 | % Makes hdf5 validation file from raw images and corresponding labels 5 | % 6 | % Syntax : PreprocessValidation ~/validation/images/ ~/validation/labels/ ~/validation/combined 7 | % 8 | % 9 | %---------------------------------------------------------------------------------------- 10 | %% PreprocessTraining for Deep3M -- NCMIR/NBCR, UCSD -- Author: M Haberl -- Date: 04/2018 11 | %---------------------------------------------------------------------------------------- 12 | % 13 | % Runtime: <1 min 14 | % 15 | 16 | 17 | % ---------------------------------------------------------------------------------------- 18 | %% Initialize 19 | % ---------------------------------------------------------------------------------------- 20 | 21 | disp('Starting Validation data Preprocessing'); 22 | pkg load hdf5oct 23 | pkg load image 24 | script_dir = fileparts(make_absolute_filename(program_invocation_name())); 25 | addpath(genpath(script_dir)); 26 | 27 | arg_list = argv (); 28 | if numel(arg_list)<3; disp('Use -> PreprocessValidation /validation/images/ /validation/labels/ /validdation/combined'); return; end 29 | 30 | tic 31 | trainig_img_path = arg_list{1}; 32 | disp('Validation Image Path:');disp(trainig_img_path); 33 | label_img_path = arg_list{2}; 34 | disp('Validation Label Path:');disp(label_img_path); 35 | outdir = arg_list{3}; 36 | disp('Output Path:');disp(outdir); 37 | 38 | % ---------------------------------------------------------------------------------------- 39 | %% Load images 40 | % ---------------------------------------------------------------------------------------- 41 | 42 | disp('Loading:'); 43 | disp(trainig_img_path); 44 | [imgstack] = imageimporter(trainig_img_path); 45 | disp('Verifying images'); 46 | checkpoint_nobinary(imgstack); 47 | 48 | % ---------------------------------------------------------------------------------------- 49 | %% Load labels 50 | % ---------------------------------------------------------------------------------------- 51 | 52 | disp('Loading:'); 53 | disp(label_img_path); 54 | [lblstack] = imageimporter(label_img_path); 55 | disp('Verifying labels'); 56 | checkpoint_isbinary(lblstack); 57 | 58 | % ---------------------------------------------------------------------------------------- 59 | %% Convert and save 60 | % ---------------------------------------------------------------------------------------- 61 | 62 | img_v1 =single(imgstack); 63 | lb_v1 =single(lblstack); 64 | 65 | d_details = '/data'; 66 | l_details = '/label'; 67 | if ~exist(outdir,'dir'), mkdir(outdir); end 68 | ext = '.h5'; 69 | 70 | img=permute(img_v1,[3 1 2]); %from tiff to h5 /100*1000*1000 71 | lb=permute(lb_v1,[3 1 2]); %from tiff to h5 /100*1000*1000 72 | filename = fullfile(outdir, sprintf('validation_stack_v%s%s', num2str(1), ext)); 73 | fprintf('Saving: %s\n', filename); 74 | h5write(filename,d_details,img); 75 | h5write(filename,l_details,lb); 76 | clear img lb 77 | 78 | % ---------------------------------------------------------------------------------------- 79 | %% Completed 80 | % ---------------------------------------------------------------------------------------- 81 | 82 | toc 83 | fprintf('Validation data stored in %s\n', outdir); 84 | -------------------------------------------------------------------------------- /StartPostprocessing.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | % New Postprocessing 3 | % Syntax: StartPostprocessing /example/seg1/predict/ /example/seg2/predict/ 4 | % 5 | % Runtime estimate 2min for 1024x1024x100 dataset 6 | % 7 | %------------------------------------------------------------------ 8 | %% New -- NCMIR/NBCR, UCSD -- Author: M Haberl -- Date: 10/2017 9 | %------------------------------------------------------------------ 10 | arg_list = argv (); 11 | script_dir = fileparts(make_absolute_filename(program_invocation_name())); 12 | addpath(genpath(script_dir)); 13 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep()))); 14 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep(),'functions'))); 15 | addpath(genpath(strcat(script_dir,filesep(),'scripts',filesep(),'post_processing'))); 16 | 17 | pkg load hdf5oct 18 | pkg load image 19 | 20 | if ~numel(arg_list) >= 1 21 | disp('Please specify at least 1 input directory'); 22 | disp('Use -> StartPostprocessing /example/seg1/predict/ /example/seg2/predict/'); 23 | return 24 | end 25 | tic 26 | 27 | %% Enable batch processing all predictions 28 | disp('Starting to merge de-augment data'); 29 | fprintf('Starting to process %d datasets \n',(numel(arg_list))); 30 | for i = 1:floor(numel(arg_list)) 31 | inputdir = arg_list{i}; 32 | 33 | if ~isdir(inputdir) 34 | error(sprintf('%s not a input directory',inputdir)); 35 | return 36 | end 37 | 38 | fprintf('Generating Average Prediction of %s\n',inputdir) 39 | average_prob_folder = merge_16_probs_v3(inputdir); 40 | 41 | end 42 | fprintf('Elapsed runtime for data-deaugmentation: %04d seconds.\n', round(toc)); 43 | 44 | %% Run Merge Predictions now 45 | %MergePredictions 46 | 47 | %% Run 3D Watershed if required 48 | %if regexpi(arg_list{end},'water','once') 49 | %readvars =1; 50 | %Run_3DWatershed_onPredictions 51 | %end 52 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 1.6.3rc3 2 | -------------------------------------------------------------------------------- /aws/delete_keypair.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import datetime 7 | from datetime import tzinfo, timedelta 8 | import json 9 | from dateutil.tz import tzutc 10 | import boto3 11 | from ipify import get_ip 12 | 13 | 14 | def _parse_arguments(desc, theargs): 15 | """Parses command line arguments using argparse 16 | """ 17 | help_formatter = argparse.RawDescriptionHelpFormatter 18 | parser = argparse.ArgumentParser(description=desc, 19 | formatter_class=help_formatter) 20 | parser.add_argument('name', help='name of keypair to delete') 21 | parser.add_argument('--region', default='us-east-2', 22 | help="Region to use" + 23 | "(default us-east-2)") 24 | parser.add_argument('--profile', 25 | default=None, 26 | help='AWS profile to load from credentials. default none') 27 | 28 | return parser.parse_args(theargs) 29 | 30 | 31 | def _delete_keypair(theargs): 32 | """Delete key pair by name 33 | """ 34 | if theargs.profile is not None: 35 | boto3.setup_default_session(profile_name=theargs.profile) 36 | 37 | ec2 = boto3.client('ec2', region_name=theargs.region) 38 | 39 | resp = ec2.delete_key_pair(KeyName=theargs.name) 40 | 41 | return str(resp) 42 | 43 | 44 | def main(arglist): 45 | desc = """ 46 | Gets list of users 47 | """ 48 | theargs = _parse_arguments(desc, sys.argv[1:]) 49 | sys.stdout.write('Contacting AWS: \n') 50 | sys.stdout.write(_delete_keypair(theargs)) 51 | 52 | 53 | if __name__ == '__main__': # pragma: no cover 54 | sys.exit(main(sys.argv)) 55 | -------------------------------------------------------------------------------- /aws/delete_stack.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import boto3 7 | from ipify import get_ip 8 | 9 | 10 | def _parse_arguments(desc, theargs): 11 | """Parses command line arguments using argparse 12 | """ 13 | help_formatter = argparse.RawDescriptionHelpFormatter 14 | parser = argparse.ArgumentParser(description=desc, 15 | formatter_class=help_formatter) 16 | parser.add_argument('stackid', 17 | help='id of stack') 18 | parser.add_argument('--region', default='us-east-2', 19 | help="Region to use" + 20 | "(default us-east-2)") 21 | parser.add_argument('--profile', 22 | default=None, 23 | help='AWS profile to load from credentials. default none') 24 | 25 | return parser.parse_args(theargs) 26 | 27 | 28 | def _delete_stack(theargs): 29 | """Launches cloud formation 30 | """ 31 | if theargs.profile is not None: 32 | boto3.setup_default_session(profile_name=theargs.profile) 33 | 34 | cloudform = boto3.client('cloudformation', region_name=theargs.region) 35 | 36 | resp = cloudform.delete_stack( 37 | StackName=theargs.stackid 38 | ) 39 | """ 40 | Example successful response: 41 | """ 42 | return str(resp) 43 | 44 | 45 | def main(arglist): 46 | desc = """ 47 | Deletes CloudFormation Stack 48 | """ 49 | theargs = _parse_arguments(desc, sys.argv[1:]) 50 | sys.stdout.write('Contacting AWS: \n') 51 | sys.stdout.write(_delete_stack(theargs)) 52 | 53 | 54 | if __name__ == '__main__': # pragma: no cover 55 | sys.exit(main(sys.argv)) 56 | -------------------------------------------------------------------------------- /aws/describe_stack.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import boto3 7 | from ipify import get_ip 8 | 9 | 10 | def _parse_arguments(desc, theargs): 11 | """Parses command line arguments using argparse 12 | """ 13 | help_formatter = argparse.RawDescriptionHelpFormatter 14 | parser = argparse.ArgumentParser(description=desc, 15 | formatter_class=help_formatter) 16 | parser.add_argument('stackid', 17 | help='id of stack') 18 | parser.add_argument('--region', default='us-east-2', 19 | help="Region to use" + 20 | "(default us-east-2)") 21 | parser.add_argument('--profile', 22 | default=None, 23 | help='AWS profile to load from credentials. default none') 24 | 25 | return parser.parse_args(theargs) 26 | 27 | 28 | def _describe_stack(theargs): 29 | """Launches cloud formation 30 | """ 31 | if theargs.profile is not None: 32 | boto3.setup_default_session(profile_name=theargs.profile) 33 | 34 | cloudform = boto3.client('cloudformation', region_name=theargs.region) 35 | 36 | resp = cloudform.describe_stacks( 37 | StackName=theargs.stackid 38 | ) 39 | """ 40 | Example successful response: 41 | {'ResponseMetadata': {'HTTPHeaders': {'content-length': '2990', 42 | 'content-type': 'text/xml', 43 | 'date': 'Fri, 09 Mar 2018 22:39:15 GMT', 44 | 'vary': 'Accept-Encoding', 45 | 'x-amzn-requestid': 'b31139ea-23ea-11e8-8b15-e51fdee770f9'}, 46 | 'HTTPStatusCode': 200, 47 | 'RequestId': 'b31139ea-23ea-11e8-8b15-e51fdee770f9', 48 | 'RetryAttempts': 0}, 49 | u'Stacks': [{u'CreationTime': datetime.datetime(2018, 3, 9, 22, 13, 16, 339000, tzinfo=tzutc()), 50 | u'Description': 'AWS CloudFormation Deep3m template. Creates an EC2 ubuntu instance off of a base Amazon Deep Learning AMI and installs necessary software to run Deep3M image segmentation. This template provides ssh access to the machine created. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.', 51 | u'DisableRollback': False, 52 | u'EnableTerminationProtection': False, 53 | u'NotificationARNs': [], 54 | u'Outputs': [{u'Description': 'InstanceId of the newly created EC2 instance', 55 | u'OutputKey': 'InstanceId', 56 | u'OutputValue': 'i-09330dc3e3869d2f5'}, 57 | {u'Description': 'Public IP address of the newly created EC2 instance', 58 | u'OutputKey': 'PublicIP', 59 | u'OutputValue': '18.217.233.149'}, 60 | {u'Description': 'Availability Zone of the newly created EC2 instance', 61 | u'OutputKey': 'AZ', 62 | u'OutputValue': 'us-east-2c'}, 63 | {u'Description': 'Public DNSName of the newly created EC2 instance', 64 | u'OutputKey': 'PublicDNS', 65 | u'OutputValue': 'ec2-18-217-233-149.us-east-2.compute.amazonaws.com'}], 66 | u'Parameters': [{u'ParameterKey': 'KeyName', 67 | u'ParameterValue': 'id_rsa'}, 68 | {u'ParameterKey': 'SSHLocation', 69 | u'ParameterValue': '1.2.3.4/32'}, 70 | {u'ParameterKey': 'GPUInstanceType', 71 | u'ParameterValue': 'p2.xlarge'}, 72 | {u'ParameterKey': 'GPUDiskSize', 73 | u'ParameterValue': '100'}], 74 | u'RollbackConfiguration': {u'RollbackTriggers': []}, 75 | u'StackId': 'arn:aws:cloudformation:us-east-2:063349100599:stack/banana/10b8e390-23e7-11e8-af27-503f3157b0d1', 76 | u'StackName': 'banana', 77 | u'StackStatus': 'CREATE_COMPLETE', 78 | u'Tags': []}]} 79 | 80 | """ 81 | # import pprint 82 | # pp = pprint.PrettyPrinter() 83 | # pp.pprint(resp) 84 | str_res = '' 85 | 86 | for stack in resp['Stacks']: 87 | str_res = 'Name: ' + stack['StackName'] + '\n' 88 | str_res += 'Status: ' + stack['StackStatus'] + '\n' 89 | if 'CREATE_COMPLETE' in stack['StackStatus']: 90 | for output in stack['Outputs']: 91 | if output['OutputKey'] == 'PublicDNS': 92 | str_res += 'PublicDNS: ' + output['OutputValue'] + '\n' 93 | return '\n' + str_res + '\n' 94 | 95 | 96 | def main(arglist): 97 | desc = """ 98 | Describes CloudFormation Stack 99 | """ 100 | theargs = _parse_arguments(desc, sys.argv[1:]) 101 | sys.stdout.write('Contacting AWS: \n') 102 | sys.stdout.write(_describe_stack(theargs)) 103 | 104 | 105 | if __name__ == '__main__': # pragma: no cover 106 | sys.exit(main(sys.argv)) 107 | -------------------------------------------------------------------------------- /aws/find_running_cloudformations.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import boto3 7 | import pprint 8 | 9 | def _parse_arguments(desc, theargs): 10 | """Parses command line arguments using argparse 11 | """ 12 | help_formatter = argparse.RawDescriptionHelpFormatter 13 | parser = argparse.ArgumentParser(description=desc, 14 | formatter_class=help_formatter) 15 | parser.add_argument('--namefilter', 16 | default='Deep Learning AMI with Source Code v2.0', 17 | help='Find only AMI image with this string in name' + 18 | ' (default: Deep Learning AMI with Source ' + 19 | 'Code v2.0') 20 | parser.add_argument('--profile', 21 | default=None, 22 | help='AWS profile to load from credentials. default none') 23 | parser.add_argument('--region', default=None, 24 | help='Only search in this specific region') 25 | return parser.parse_args(theargs) 26 | 27 | def _get_running_cloudformations(theargs): 28 | """Finds all running cloudformations 29 | """ 30 | mapstr = '' 31 | if theargs.profile is not None: 32 | boto3.setup_default_session(profile_name=theargs.profile) 33 | ec2 = boto3.client('ec2', region_name='us-west-2') 34 | response = ec2.describe_regions() 35 | for region in response['Regions']: 36 | rname = region['RegionName'] 37 | if theargs.region is not None: 38 | if rname != theargs.region: 39 | continue 40 | sys.stdout.write('Running cloudformation query in region: ' + rname + '\n') 41 | ec2 = boto3.client('cloudformation', region_name=rname) 42 | mapstr += '\nRegion: ' + rname + '\n' 43 | respy = ec2.describe_stacks() 44 | # pp = pprint.PrettyPrinter(indent=4) 45 | 46 | for stack in respy['Stacks']: 47 | # pp.pprint(stack) 48 | mapstr += ('\n\t\tName: ' + stack['StackName'] + '\n' + 49 | '\t\tStackId: ' + stack['StackId'] + '\n' + 50 | '\t\tStackStatus: ' + stack['StackStatus'] + '\n' + 51 | '\t\tLaunch Date: ' + str(stack['CreationTime']) + '\n') 52 | if 'CREATE_COMPLETE' in stack['StackStatus']: 53 | for output in stack['Outputs']: 54 | if output['OutputKey'] == 'PublicDNS': 55 | mapstr += '\t\tPublicDNS: ' + output['OutputValue'] + '\n' 56 | 57 | """ 58 | for entry in reso['Instances']: 59 | mapstr += ('\t\t' + entry['PublicDnsName'] + '\n' + 60 | '\t\tLaunch Date: ' + str(entry['LaunchTime']) + 61 | '\n' + 62 | '\t\tId: ' + entry['InstanceId'] + '\n' + 63 | '\t\tType: ' + entry['InstanceType'] + '\n' + 64 | '\t\tState: ' + entry['State']['Name'] + '\n\n') 65 | """ 66 | sys.stdout.write('\nResults:\n\n') 67 | return mapstr 68 | 69 | 70 | def main(arglist): 71 | desc = """ 72 | This script uses AWS boto library to find running 73 | Cloudformations instances in any region. 74 | """ 75 | theargs = _parse_arguments(desc, sys.argv[1:]) 76 | sys.stdout.write('Querying AWS: \n') 77 | sys.stdout.write(_get_running_cloudformations(theargs)) 78 | 79 | 80 | if __name__ == '__main__': # pragma: no cover 81 | sys.exit(main(sys.argv)) 82 | -------------------------------------------------------------------------------- /aws/find_running_ec2_instances.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import boto3 7 | 8 | 9 | def _parse_arguments(desc, theargs): 10 | """Parses command line arguments using argparse 11 | """ 12 | help_formatter = argparse.RawDescriptionHelpFormatter 13 | parser = argparse.ArgumentParser(description=desc, 14 | formatter_class=help_formatter) 15 | parser.add_argument("--ownerid", default='898082745236', 16 | help="Owner id to pass to search " + 17 | "(default 898082745236)") 18 | parser.add_argument('--namefilter', 19 | default='Deep Learning AMI with Source Code v2.0', 20 | help='Find only AMI image with this string in name' + 21 | ' (default: Deep Learning AMI with Source ' + 22 | 'Code v2.0') 23 | parser.add_argument('--profile', 24 | default=None, 25 | help='AWS profile to load from credentials. default none') 26 | parser.add_argument('--region', default=None, 27 | help='Only search in this specific region') 28 | return parser.parse_args(theargs) 29 | 30 | def _get_running_ec2_instances(theargs): 31 | """Returns a string containing any ec2 instances 32 | """ 33 | mapstr = '' 34 | if theargs.profile is not None: 35 | boto3.setup_default_session(profile_name=theargs.profile) 36 | ec2 = boto3.client('ec2', region_name='us-west-2') 37 | 38 | response = ec2.describe_regions() 39 | for region in response['Regions']: 40 | rname = region['RegionName'] 41 | if theargs.region is not None: 42 | if rname != theargs.region: 43 | continue 44 | sys.stdout.write('Running ec2 query in region: ' + rname + '\n') 45 | ec2 = boto3.client('ec2', region_name=rname) 46 | mapstr += 'Region: ' + rname + '\n' 47 | respy = ec2.describe_instances() 48 | for reso in respy['Reservations']: 49 | for entry in reso['Instances']: 50 | namey = '' 51 | try: 52 | for keyval in entry['Tags']: 53 | if keyval['Key'] == 'Name': 54 | namey = keyval['Value'] 55 | break 56 | except KeyError: 57 | pass 58 | 59 | mapstr += ('\t\t' + entry['PublicDnsName'] + '\n' + 60 | '\t\tLaunch Date: ' + str(entry['LaunchTime']) + 61 | '\n' + 62 | '\t\tId: ' + entry['InstanceId'] + '\n' + 63 | '\t\tType: ' + entry['InstanceType'] + '\n' + 64 | '\t\tName: ' + namey + '\n' + 65 | '\t\tState: ' + entry['State']['Name'] + '\n\n') 66 | sys.stdout.write('\nResults:\n\n') 67 | return mapstr 68 | 69 | 70 | def main(arglist): 71 | desc = """ 72 | This script uses AWS boto library to find running 73 | EC2 instances in any region. 74 | """ 75 | theargs = _parse_arguments(desc, sys.argv[1:]) 76 | sys.stdout.write('Querying AWS: \n') 77 | sys.stdout.write(_get_running_ec2_instances(theargs)) 78 | 79 | 80 | if __name__ == '__main__': # pragma: no cover 81 | sys.exit(main(sys.argv)) 82 | -------------------------------------------------------------------------------- /aws/get_iam_users.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import datetime 7 | from datetime import tzinfo, timedelta 8 | import json 9 | from dateutil.tz import tzutc 10 | import boto3 11 | from ipify import get_ip 12 | 13 | 14 | def _parse_arguments(desc, theargs): 15 | """Parses command line arguments using argparse 16 | """ 17 | help_formatter = argparse.RawDescriptionHelpFormatter 18 | parser = argparse.ArgumentParser(description=desc, 19 | formatter_class=help_formatter) 20 | parser.add_argument('--region', default='us-east-2', 21 | help="Region to use" + 22 | "(default us-east-2)") 23 | parser.add_argument('--profile', 24 | default=None, 25 | help='AWS profile to load from credentials. default none') 26 | 27 | return parser.parse_args(theargs) 28 | 29 | 30 | def _get_iam_users(theargs): 31 | """Launches cloud formation 32 | """ 33 | if theargs.profile is not None: 34 | boto3.setup_default_session(profile_name=theargs.profile) 35 | 36 | iam = boto3.client('iam', region_name=theargs.region) 37 | 38 | resp = iam.list_users() 39 | 40 | #import pprint 41 | #pp = pprint.PrettyPrinter() 42 | #pp.pprint(resp) 43 | result = 'User name,Groups,Policies,Last activity\n' 44 | for user in resp['Users']: 45 | pass_last_used = 'Never' 46 | try: 47 | if isinstance(user['PasswordLastUsed'], datetime.datetime): 48 | time_del = datetime.datetime.now(user['PasswordLastUsed'].tzinfo) - user['PasswordLastUsed'] 49 | pass_last_used_in_days = str(int(time_del.total_seconds() / 3600 / 24)) + ' days' 50 | except KeyError: 51 | pass 52 | 53 | user_groups = iam.list_groups_for_user(UserName=user['UserName']) 54 | grp_str = '' 55 | try: 56 | for group in user_groups['Groups']: 57 | grp_str += group['GroupName'] + ' ' 58 | except KeyError: 59 | grp_str = 'NA' 60 | 61 | user_policy = iam.list_attached_user_policies(UserName=user['UserName']) 62 | 63 | user_p_list = '' 64 | for policy in user_policy['AttachedPolicies']: 65 | user_p_list += policy['PolicyName'] + ' ' 66 | 67 | if len(grp_str) is 0: 68 | grp_str = 'None' 69 | 70 | result += user['UserName'] + ',' + grp_str + ',' + user_p_list + ',' + pass_last_used_in_days + '\n' 71 | 72 | """ 73 | Example successful response: 74 | 75 | """ 76 | 77 | return str(result) 78 | 79 | 80 | def main(arglist): 81 | desc = """ 82 | Gets list of users 83 | """ 84 | theargs = _parse_arguments(desc, sys.argv[1:]) 85 | sys.stdout.write('Contacting AWS: \n') 86 | sys.stdout.write(_get_iam_users(theargs)) 87 | 88 | 89 | if __name__ == '__main__': # pragma: no cover 90 | sys.exit(main(sys.argv)) 91 | -------------------------------------------------------------------------------- /aws/get_keypairs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import datetime 7 | from datetime import tzinfo, timedelta 8 | import json 9 | from dateutil.tz import tzutc 10 | import boto3 11 | from ipify import get_ip 12 | 13 | 14 | def _parse_arguments(desc, theargs): 15 | """Parses command line arguments using argparse 16 | """ 17 | help_formatter = argparse.RawDescriptionHelpFormatter 18 | parser = argparse.ArgumentParser(description=desc, 19 | formatter_class=help_formatter) 20 | parser.add_argument('--region', default='us-east-2', 21 | help="Region to use" + 22 | "(default us-east-2)") 23 | parser.add_argument('--profile', 24 | default=None, 25 | help='AWS profile to load from credentials. default none') 26 | 27 | return parser.parse_args(theargs) 28 | 29 | 30 | def _get_keypairs(theargs): 31 | """Get list of key pairs 32 | """ 33 | if theargs.profile is not None: 34 | boto3.setup_default_session(profile_name=theargs.profile) 35 | 36 | ec2 = boto3.client('ec2', region_name=theargs.region) 37 | 38 | resp = ec2.describe_key_pairs() 39 | kp_str = 'KeyPairName\n' 40 | for kp in resp['KeyPairs']: 41 | kp_str += kp['KeyName'] + '\n' 42 | 43 | return kp_str 44 | 45 | 46 | def main(arglist): 47 | desc = """ 48 | Gets list of users 49 | """ 50 | theargs = _parse_arguments(desc, sys.argv[1:]) 51 | sys.stdout.write('Contacting AWS: \n') 52 | sys.stdout.write(_get_keypairs(theargs)) 53 | 54 | 55 | if __name__ == '__main__': # pragma: no cover 56 | sys.exit(main(sys.argv)) 57 | -------------------------------------------------------------------------------- /aws/get_updated_ami_mappings.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import boto3 7 | 8 | 9 | def _parse_arguments(desc, theargs): 10 | """Parses command line arguments using argparse 11 | """ 12 | help_formatter = argparse.RawDescriptionHelpFormatter 13 | parser = argparse.ArgumentParser(description=desc, 14 | formatter_class=help_formatter) 15 | parser.add_argument("--ownerid", default='898082745236', 16 | help="Owner id to pass to search " + 17 | "(default 898082745236)") 18 | parser.add_argument('--namefilter', 19 | default='Deep Learning AMI with Source Code Ubuntu v5.0', 20 | help='Find only AMI image with this string in name' + 21 | ' (default: Deep Learning AMI with Source Code)' + 22 | ' Ubuntu v5.0') 23 | return parser.parse_args(theargs) 24 | 25 | def _get_ami_mapping(theargs): 26 | """Returns a string containing ami mapping 27 | """ 28 | mapstr = '' 29 | ec2 = boto3.client('ec2') 30 | response = ec2.describe_regions() 31 | for region in response['Regions']: 32 | rname = region['RegionName'] 33 | sys.stdout.write('Running query in region: ' + rname + '\n') 34 | ec2 = boto3.client('ec2', region_name=rname) 35 | resp = ec2.describe_images(Owners=[theargs.ownerid], 36 | Filters=[{'Name': 'name', 37 | 'Values': [theargs.namefilter]}]) 38 | for image in resp['Images']: 39 | mapstr += (' "' + rname + '" : {"AMI" : "' + 40 | image['ImageId'] + '"},\n') 41 | sys.stdout.write('\n\n Below is json fragment that can ' + 42 | 'go in "RegionMap"\n\n') 43 | return mapstr 44 | 45 | 46 | def main(arglist): 47 | desc = """ 48 | This script uses AWS boto library to query for AMI 49 | images that match the owner and name filter 50 | passed in via --ownerid and --namefilter flags for 51 | this tool. The output is json fragment that can 52 | be put in the "Mappings" => "RegionMap" section 53 | """ 54 | theargs = _parse_arguments(desc, sys.argv[1:]) 55 | sys.stdout.write('Querying AWS: \n') 56 | sys.stdout.write(_get_ami_mapping(theargs)) 57 | 58 | 59 | if __name__ == '__main__': # pragma: no cover 60 | sys.exit(main(sys.argv)) 61 | -------------------------------------------------------------------------------- /aws/launch_cdeep3m.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import os 5 | import argparse 6 | import boto3 7 | import time 8 | from ipify import get_ip 9 | 10 | 11 | def _parse_arguments(desc, theargs): 12 | """Parses command line arguments using argparse 13 | """ 14 | help_formatter = argparse.RawDescriptionHelpFormatter 15 | parser = argparse.ArgumentParser(description=desc, 16 | formatter_class=help_formatter) 17 | parser.add_argument('--template', required=True, 18 | help='CloudFormation template file to use') 19 | parser.add_argument('--region', default='us-east-2', 20 | help="Region to use" + 21 | "(default us-east-2)") 22 | parser.add_argument('--name', default='USERNAMEstack', 23 | help='Stack name to use') 24 | parser.add_argument('--profile', default=None, 25 | help='AWS profile to load from credentials. default none') 26 | # parser.add_argument('--cdeep3mversion', default='0.15.2', 27 | # help='Version of CDeep3M to launch (default 0.15.2)') 28 | parser.add_argument('--keypairname', default='id_rsa', 29 | help='AWS EC2 KeyPair Name') 30 | parser.add_argument('--wait', action='store_true', 31 | help='If set wait for cloudformation to complete') 32 | parser.add_argument('--instancetype', default='p3.2xlarge', 33 | choices=['p2.xlarge', 'p3.2xlarge','p3.8xlarge','p3.16xlarge'], 34 | help='GPU Instance type to launch (default p3.2xlarge') 35 | parser.add_argument('--disksize', default='100', 36 | help='GPU Disk Size in gigabytes (default 100)') 37 | parser.add_argument('--sshlocation', default='', 38 | help='ip4 CIDR to denote ip address(s) to allow ' 39 | 'ssh access to GPU EC2 instance. (default is ip ' 40 | 'address of machine running this script') 41 | parser.add_argument('--dataseturl', default='', 42 | help='url of file to download during initialization of ec2 instance') 43 | return parser.parse_args(theargs) 44 | 45 | 46 | def _launch_cloudformation(theargs): 47 | """Launches cloud formation 48 | """ 49 | if theargs.profile is not None: 50 | boto3.setup_default_session(profile_name=theargs.profile) 51 | 52 | cloudform = boto3.client('cloudformation', region_name=theargs.region) 53 | template = theargs.template 54 | with open(template, 'r') as f: 55 | template_data = f.read() 56 | 57 | if theargs.sshlocation is None or theargs.sshlocation is '': 58 | theargs.sshlocation = str(get_ip()) + '/32' 59 | 60 | params = [ 61 | { 62 | 'ParameterKey': 'KeyName', 63 | 'ParameterValue': theargs.keypairname 64 | }, 65 | { 66 | 'ParameterKey': 'GPUInstanceType', 67 | 'ParameterValue': theargs.instancetype 68 | }, 69 | { 70 | 'ParameterKey': 'GPUDiskSize', 71 | 'ParameterValue': theargs.disksize 72 | }, 73 | { 74 | 'ParameterKey': 'SSHLocation', 75 | 'ParameterValue': theargs.sshlocation 76 | } 77 | ] 78 | 79 | if theargs.dataseturl!='': 80 | params.append({ 81 | 'ParameterKey': 'DatasetURL', 82 | 'ParameterValue': theargs.dataseturl 83 | }) 84 | 85 | 86 | tags = [ 87 | { 88 | 'Key': 'Name', 89 | 'Value': theargs.name 90 | } 91 | ] 92 | 93 | resp = cloudform.create_stack( 94 | StackName=theargs.name, 95 | TemplateBody=template_data, 96 | Parameters=params, 97 | TimeoutInMinutes=25, 98 | Tags=tags 99 | ) 100 | """ 101 | Example successful response: 102 | {u'StackId': 'arn:aws:cloudformation:us-east-2:063349100599:stack/chris-autolaunch/7d639570-20c3-11e8-80ea-50a68a270856', 103 | 'ResponseMetadata': {'RetryAttempts': 0, 104 | 'HTTPStatusCode': 200, 105 | 'RequestId': '7d5bcdb5-20c3-11e8-9b7f-cf77c73ccdc2', 106 | 'HTTPHeaders': {'x-amzn-requestid': 107 | '7d5bcdb5-20c3-11e8-9b7f-cf77c73ccdc2', 108 | 'date': 'Mon, 05 Mar 2018 22:21:02 GMT', 109 | 'content-length': '386', 110 | 'content-type': 'text/xml'} 111 | } 112 | } 113 | """ 114 | return resp 115 | 116 | 117 | def _wait_for_stack(stackid, theargs): 118 | cloudform = boto3.client('cloudformation', region_name=theargs.region) 119 | dns = _is_stack_complete(stackid, cloudform) 120 | while dns is None: 121 | sys.stdout.write('.') 122 | sys.stdout.flush() 123 | time.sleep(30) 124 | dns = _is_stack_complete(stackid, cloudform) 125 | sys.stdout.write('\n') 126 | time.sleep(30) 127 | return dns 128 | 129 | 130 | def _is_stack_complete(stackid, cloudform): 131 | """Waits for stack to launch""" 132 | 133 | resp = cloudform.describe_stacks(StackName=stackid) 134 | for stack in resp['Stacks']: 135 | if 'CREATE_COMPLETE' in stack['StackStatus']: 136 | for output in stack['Outputs']: 137 | if output['OutputKey'] == 'PublicDNS': 138 | return output['OutputValue'] 139 | if not 'CREATE_IN_PROGRESS' in stack['StackStatus']: 140 | return 'Error, stack status: ' + str(stack['StackStatus']) 141 | return None 142 | 143 | 144 | def main(arglist): 145 | desc = """ 146 | Launches CloudFormation template 147 | """ 148 | theargs = _parse_arguments(desc, sys.argv[1:]) 149 | sys.stdout.write('Contacting AWS: \n') 150 | res = _launch_cloudformation(theargs) 151 | sys.stdout.write(str(res)) 152 | sys.stdout.flush() 153 | if theargs.wait is True: 154 | dns = _wait_for_stack(res['StackId'], theargs) 155 | sys.stdout.write('\nStack created, DNS => ' + str(dns) + '\n') 156 | 157 | 158 | if __name__ == '__main__': # pragma: no cover 159 | sys.exit(main(sys.argv)) 160 | -------------------------------------------------------------------------------- /aws/motd: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ======================================= 5 | CDeep3M - @@VERSION@@ 6 | ======================================= 7 | 8 | CDeep3M - Plug-and-Play cloud based deep learning for image segmentation of light, electron and X-ray microscopy 9 | 10 | If you are using CDeep3M for research please reference: 11 | https://doi.org/10.1101/353425 12 | 13 | 14 | ======================================= 15 | Quick instructions: 16 | 17 | # 1) Training: 18 | 19 | cd ~ 20 | PreprocessTrainingData.m ~/cdeep3m/mito_testsample/training/images/ ~/cdeep3m/mito_testsample/training/labels/ ~/mito_testaugtrain 21 | runtraining.sh --numiterations 50000 ~/mito_testaugtrain ~/train_out 22 | 23 | # 2) Prediction 24 | runprediction.sh --augspeed 10 --models 5fm ~/train_out/ ~/cdeep3m/mito_testsample/testset/ ~/predictout 25 | # results are in ~/predictout/ensembled 26 | ls ~/predictout/ensembled 27 | 28 | 29 | ======================================= 30 | 31 | For further info go to: 32 | https://github.com/CRBS/cdeep3m 33 | 34 | -------------------------------------------------------------------------------- /caffetrain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | script_dir=`dirname "$0"` 4 | script_name=`basename $0` 5 | version="???" 6 | 7 | if [ -f "$script_dir/VERSION" ] ; then 8 | version=`cat $script_dir/VERSION` 9 | fi 10 | 11 | source "${script_dir}/commonfunctions.sh" 12 | 13 | numiterations="30000" 14 | gpu="all" 15 | base_lr="1e-02" 16 | power="0.8" 17 | momentum="0.9" 18 | weight_decay="0.0005" 19 | average_loss="16" 20 | lr_policy="poly" 21 | iter_size="8" 22 | snapshot_interval="2000" 23 | 24 | function usage() 25 | { 26 | echo "usage: $script_name [-h] [--numiterations NUMITERATIONS] [--gpu GPU] 27 | [--base_lr BASE_LR] [--power POWER] 28 | [--momentum MOMENTUM] 29 | [--weight_decay WEIGHT_DECAY] 30 | [--average_loss AVERAGE_LOSS] 31 | [--lr_policy POLICY] [--iter_size ITER_SIZE] 32 | [--snapshot_interval SNAPSHOT_INTERVAL] 33 | model trainoutdir 34 | 35 | Version: $version 36 | 37 | Runs caffe on CDeep3M model specified by model argument 38 | to perform training. The trained model will be stored in 39 | //trainedmodel directory 40 | Output from caffe will be redirected to //log/out.log 41 | 42 | For further information about parameters below please see: 43 | https://github.com/BVLC/caffe/wiki/Solver-Prototxt 44 | 45 | 46 | positional arguments: 47 | model The model to train, should be one of the following: 48 | 1fm, 3fm, 5fm 49 | trainoutdir Directory created by runtraining.sh contained 50 | output of training. 51 | 52 | optional arguments: 53 | -h, --help show this help message and exit 54 | --gpu Which GPU to use, can be a number ie 0 or 1 or 55 | all to use all GPUs (default $gpu) 56 | --base_learn Base learning rate (default $base_lr) 57 | --power Used in poly and sigmoid lr_policies. (default $power) 58 | --momentum Indicates how much of the previous weight will be 59 | retained in the new calculation. (default $momentum) 60 | --weight_decay Factor of (regularization) penalization of large 61 | weights (default $weight_decay) 62 | --average_loss Number of iterations to use to average loss 63 | (default $average_loss) 64 | --lr_policy Learning rate policy (default $lr_policy) 65 | --iter_size Accumulate gradients across batches through the 66 | iter_size solver field. (default $iter_size) 67 | --snapshot_interval How often caffe should output a model and solverstate. 68 | (default $snapshot_interval) 69 | --numiterations Number of training iterations to run (default $numiterations) 70 | 71 | " 1>&2; 72 | exit 1; 73 | } 74 | 75 | TEMP=`getopt -o h --long "gpu:,numiterations:,base_learn:,power:,momentum:,weight_decay:,average_loss:,lr_policy:,iter_size:,snapshot_interval:" -n '$0' -- "$@"` 76 | eval set -- "$TEMP" 77 | 78 | while true ; do 79 | case "$1" in 80 | -h ) usage ;; 81 | --gpu ) gpu=$2 ; shift 2 ;; 82 | --numiterations ) numiterations=$2 ; shift 2 ;; 83 | --base_learn ) base_lr=$2 ; shift 2 ;; 84 | --power ) power=$2 ; shift 2 ;; 85 | --momentum ) momentum=$2 ; shift 2 ;; 86 | --weight_decay ) weight_decay=$2 ; shift 2 ;; 87 | --average_loss ) average_loss=$2 ; shift 2 ;; 88 | --lr_policy ) lr_policy=$2 ; shift 2 ;; 89 | --iter_size ) iter_size=$2 ; shift 2 ;; 90 | --snapshot_interval ) snapshot_interval=$2 ; shift 2 ;; 91 | --) shift ; break ;; 92 | esac 93 | done 94 | 95 | 96 | if [ $# -ne 2 ] ; then 97 | usage 98 | fi 99 | 100 | model=$1 101 | 102 | base_dir=$2 103 | model_dir="$base_dir/$model" 104 | log_dir="$model_dir/log" 105 | 106 | 107 | # update the solver.prototxt with numiterations value 108 | sed -i "s/^max_iter:.*/max_iter: $numiterations/g" "${model_dir}/solver.prototxt" 109 | 110 | if [ $? != 0 ] ; then 111 | echo "ERROR trying to update max_iter in $model_dir/solver.prototxt" 112 | exit 2 113 | fi 114 | 115 | # update solver.protoxt with base_lr value 116 | sed -i "s/^base_lr:.*/base_lr: $base_lr/g" "${model_dir}/solver.prototxt" 117 | 118 | # update solver.prototxt with power value 119 | sed -i "s/^power:.*/power: $power/g" "${model_dir}/solver.prototxt" 120 | 121 | # update solver.prototxt with momentum value 122 | sed -i "s/^momentum:.*/momentum: $momentum/g" "${model_dir}/solver.prototxt" 123 | 124 | # update solver.prototxt with weight_decay value 125 | sed -i "s/^weight_decay:.*/weight_decay: $weight_decay/g" "${model_dir}/solver.prototxt" 126 | 127 | # update solver.prototxt with average loss value 128 | sed -i "s/^average_loss:.*/average_loss: $average_loss/g" "${model_dir}/solver.prototxt" 129 | 130 | # update solver.prototxt with lr_policy value 131 | sed -i "s/^lr_policy:.*/lr_policy: \"$lr_policy\"/g" "${model_dir}/solver.prototxt" 132 | 133 | # update solver.prototxt with iter_size value 134 | sed -i "s/^iter_size:.*/iter_size: $iter_size/g" "${model_dir}/solver.prototxt" 135 | 136 | # update solver.prototxt with snapshot interval value 137 | sed -i "s/^snapshot:.*/snapshot: $snapshot_interval/g" "${model_dir}/solver.prototxt" 138 | 139 | if [ ! -d "$log_dir" ] ; then 140 | mkdir -p "$log_dir" 141 | if [ $? != 0 ] ; then 142 | echo "ERROR unable to make $log_dir directory" 143 | exit 3 144 | fi 145 | fi 146 | 147 | if [ ! -d "$model_dir/trainedmodel" ] ; then 148 | mkdir -p "$model_dir/trainedmodel" 149 | if [ $? != 0 ] ; then 150 | echo "ERROR unable to make $model_dir/trainedmodel directory" 151 | exit 4 152 | fi 153 | fi 154 | 155 | latest_iteration=$(get_latest_iteration "$model_dir/trainedmodel") 156 | 157 | snapshot_opts="" 158 | # we got a completed iteration lets start from that 159 | if [ ! "$latest_iteration" == "" ] ; then 160 | snap_file=`find "$model_dir/trainedmodel" -name "*${latest_iteration}.solverstate" -type f` 161 | snapshot_opts="--snapshot=$snap_file" 162 | echo "Resuming run from snapshot file: $snap_file" 163 | fi 164 | 165 | pushd "$model_dir" > /dev/null 166 | GLOG_log_dir=$log_dir caffe.bin train --solver=$model_dir/solver.prototxt --gpu $gpu $snapshot_opts > "${model_dir}/log/out.log" 2>&1 167 | exitcode=$? 168 | popd > /dev/null 169 | 170 | if [ $exitcode != 0 ] ; then 171 | echo "ERROR: caffe had a non zero exit code: $exitcode" 172 | fi 173 | 174 | exit $exitcode 175 | -------------------------------------------------------------------------------- /mito_testsample/testset/images.081.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/testset/images.081.png -------------------------------------------------------------------------------- /mito_testsample/testset/images.082.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/testset/images.082.png -------------------------------------------------------------------------------- /mito_testsample/testset/images.083.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/testset/images.083.png -------------------------------------------------------------------------------- /mito_testsample/testset/images.084.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/testset/images.084.png -------------------------------------------------------------------------------- /mito_testsample/testset/images.085.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/testset/images.085.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.010.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.011.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.012.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.013.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.014.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.015.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.016.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.016.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.017.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.018.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.018.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.019.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.020.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.020.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.021.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.021.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.022.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.022.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.023.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.023.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.024.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.024.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.025.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.025.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.026.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.026.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.027.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.027.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.028.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.028.png -------------------------------------------------------------------------------- /mito_testsample/training/images/images.029.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/images/images.029.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.010.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.011.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.012.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.013.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.014.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.015.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.016.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.016.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.017.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.018.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.018.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.019.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.020.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.020.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.021.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.021.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.022.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.022.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.023.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.023.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.024.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.024.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.025.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.025.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.026.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.026.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.027.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.027.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.028.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.028.png -------------------------------------------------------------------------------- /mito_testsample/training/labels/mitos_3D.029.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/training/labels/mitos_3D.029.png -------------------------------------------------------------------------------- /mito_testsample/validation/images/images.050.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/images/images.050.png -------------------------------------------------------------------------------- /mito_testsample/validation/images/images.051.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/images/images.051.png -------------------------------------------------------------------------------- /mito_testsample/validation/images/images.052.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/images/images.052.png -------------------------------------------------------------------------------- /mito_testsample/validation/images/images.053.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/images/images.053.png -------------------------------------------------------------------------------- /mito_testsample/validation/images/images.054.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/images/images.054.png -------------------------------------------------------------------------------- /mito_testsample/validation/images/images.055.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/images/images.055.png -------------------------------------------------------------------------------- /mito_testsample/validation/images/images.056.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/images/images.056.png -------------------------------------------------------------------------------- /mito_testsample/validation/images/images.057.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/images/images.057.png -------------------------------------------------------------------------------- /mito_testsample/validation/images/images.058.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/images/images.058.png -------------------------------------------------------------------------------- /mito_testsample/validation/images/images.059.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/images/images.059.png -------------------------------------------------------------------------------- /mito_testsample/validation/labels/mitos_3D.050.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/labels/mitos_3D.050.png -------------------------------------------------------------------------------- /mito_testsample/validation/labels/mitos_3D.051.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/labels/mitos_3D.051.png -------------------------------------------------------------------------------- /mito_testsample/validation/labels/mitos_3D.052.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/labels/mitos_3D.052.png -------------------------------------------------------------------------------- /mito_testsample/validation/labels/mitos_3D.053.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/labels/mitos_3D.053.png -------------------------------------------------------------------------------- /mito_testsample/validation/labels/mitos_3D.054.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/labels/mitos_3D.054.png -------------------------------------------------------------------------------- /mito_testsample/validation/labels/mitos_3D.055.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/labels/mitos_3D.055.png -------------------------------------------------------------------------------- /mito_testsample/validation/labels/mitos_3D.056.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/labels/mitos_3D.056.png -------------------------------------------------------------------------------- /mito_testsample/validation/labels/mitos_3D.057.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/labels/mitos_3D.057.png -------------------------------------------------------------------------------- /mito_testsample/validation/labels/mitos_3D.058.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/labels/mitos_3D.058.png -------------------------------------------------------------------------------- /mito_testsample/validation/labels/mitos_3D.059.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/mito_testsample/validation/labels/mitos_3D.059.png -------------------------------------------------------------------------------- /model/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 DiveLab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/label_class_selection.prototxt: -------------------------------------------------------------------------------- 1 | ignore_rest_of_label:true 2 | rest_of_label_mapping:false; 3 | rest_of_label_mapping_label: 1; 4 | rest_of_label_prob : 0.5; 5 | label_prob_mapping_info{ 6 | label: 0 7 | prob: 1 8 | } 9 | label_prob_mapping_info{ 10 | label: 1 11 | prob: 0.3 12 | } 13 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_1fm/solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "train_val.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 20 10 | test_iter: 20 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 1e-02 13 | power: 0.8 14 | momentum: 0.9 15 | weight_decay: 0.0005 16 | average_loss: 16 17 | # The learning rate policy 18 | lr_policy: "poly" 19 | #stepsize: 2500 20 | # Display every 100 iterations 21 | display: 3 22 | # The maximum number of iterations 23 | max_iter: 50000 24 | iter_size: 8 25 | # snapshot intermediate results 26 | snapshot: 2000 27 | #snapshot_format: HDF5 28 | snapshot_format: BINARYPROTO 29 | snapshot_prefix: "trained_weights/inception_fcn_mscal_classifier" 30 | # solver mode: CPU or GPU 31 | solver_mode: GPU 32 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/label_class_selection.prototxt: -------------------------------------------------------------------------------- 1 | ignore_rest_of_label:true 2 | rest_of_label_mapping:false; 3 | rest_of_label_mapping_label: 1; 4 | rest_of_label_prob : 0.5; 5 | label_prob_mapping_info{ 6 | label: 0 7 | prob: 1 8 | } 9 | label_prob_mapping_info{ 10 | label: 1 11 | prob: 0.3 12 | } 13 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_3fm/solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "train_val.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 10 10 | test_iter: 20 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 1e-02 13 | power: 0.8 14 | momentum: 0.9 15 | weight_decay: 0.0005 16 | average_loss: 16 17 | # The learning rate policy 18 | lr_policy: "poly" 19 | #stepsize: 2500 20 | # Display every 100 iterations 21 | display: 3 22 | # The maximum number of iterations 23 | max_iter: 50000 24 | iter_size: 8 25 | # snapshot intermediate results 26 | snapshot: 2000 27 | #snapshot_format: HDF5 28 | snapshot_format: BINARYPROTO 29 | snapshot_prefix: "trained_weights/inception_fcn_mscal_classifier_fullstacks_train" 30 | # solver mode: CPU or GPU 31 | solver_mode: GPU 32 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_5fm/label_class_selection.prototxt: -------------------------------------------------------------------------------- 1 | ignore_rest_of_label:true 2 | rest_of_label_mapping:false; 3 | rest_of_label_mapping_label: 1; 4 | rest_of_label_prob : 0.5; 5 | label_prob_mapping_info{ 6 | label: 0 7 | prob: 1 8 | } 9 | label_prob_mapping_info{ 10 | label: 1 11 | prob: 0.3 12 | } 13 | -------------------------------------------------------------------------------- /model/inception_residual_train_prediction_5fm/solver.prototxt: -------------------------------------------------------------------------------- 1 | # reduce the learning rate after 8 epochs (4000 iters) by a factor of 10 2 | 3 | # The train/test net protocol buffer definition 4 | net: "train_val.prototxt" 5 | # test_iter specifies how many forward passes the test should carry out. 6 | # In the case of MNIST, we have test batch size 100 and 100 test iterations, 7 | # covering the full 10,000 testing images. 8 | # Carry out testing every 500 training iterations. 9 | test_interval: 10 10 | test_iter: 20 11 | # The base learning rate, momentum and the weight decay of the network. 12 | base_lr: 1e-02 13 | power: 0.8 14 | momentum: 0.9 15 | weight_decay: 0.0005 16 | average_loss: 16 17 | # The learning rate policy 18 | lr_policy: "poly" 19 | #stepsize: 2500 20 | # Display every 100 iterations 21 | display: 3 22 | # The maximum number of iterations 23 | max_iter: 50000 24 | iter_size: 8 25 | # snapshot intermediate results 26 | snapshot: 2000 27 | #snapshot_format: HDF5 28 | snapshot_format: BINARYPROTO 29 | snapshot_prefix: "trained_weights/inception_fcn_mscal_classifier_fullstacks_train" 30 | # solver mode: CPU or GPU 31 | solver_mode: GPU 32 | -------------------------------------------------------------------------------- /postprocessworker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | script_name=`basename $0` 4 | script_dir=`dirname $0` 5 | 6 | source "${script_dir}/commonfunctions.sh" 7 | 8 | version="???" 9 | waitinterval="1" 10 | 11 | if [ -f "$script_dir/VERSION" ] ; then 12 | version=`cat $script_dir/VERSION` 13 | fi 14 | 15 | function usage() 16 | { 17 | echo "usage: $script_name [-h] 18 | predictdir 19 | 20 | Version: $version 21 | 22 | Runs StartPostprocessing.m and Merge_LargeData.m 23 | as packages become available to process. 24 | This script uses predict.config 25 | file to obtain location of trained model 26 | and image data 27 | 28 | positional arguments: 29 | predictdir Predict directory generated by 30 | runprediction.sh 31 | 32 | optional arguments: 33 | -h, --help show this help message and exit 34 | 35 | --waitinterval Number of seconds to wait between checking 36 | for number of completed packages 37 | (default $waitinterval) 38 | 39 | " 1>&2; 40 | exit 1; 41 | } 42 | 43 | TEMP=`getopt -o h --long "help,waitinterval:" -n '$0' -- "$@"` 44 | eval set -- "$TEMP" 45 | 46 | while true ; do 47 | case "$1" in 48 | -h ) usage ;; 49 | --help ) usage ;; 50 | --waitinterval ) waitinterval=$2 ; shift 2 ;; 51 | --) shift ; break ;; 52 | esac 53 | done 54 | 55 | if [ $# -ne 1 ] ; then 56 | usage 57 | fi 58 | 59 | out_dir=$1 60 | 61 | echo "" 62 | 63 | predict_config="$out_dir/predict.config" 64 | 65 | parse_predict_config "$predict_config" 66 | 67 | if [ $? != 0 ] ; then 68 | fatal_error "$out_dir" "ERROR parsing $predict_config" 2 69 | fi 70 | 71 | echo "Running Postprocess" 72 | echo "" 73 | 74 | echo "Trained Model Dir: $trained_model_dir" 75 | echo "Image Dir: $img_dir" 76 | echo "Models: $model_list" 77 | echo "Speed: $aug_speed" 78 | echo "" 79 | 80 | package_proc_info="$out_dir/augimages/package_processing_info.txt" 81 | 82 | if [ ! -s "$package_proc_info" ] ; then 83 | fatal_error "$out_dir" "ERROR $package_proc_info not found" 7 84 | fi 85 | 86 | parse_package_processing_info "$package_proc_info" 87 | 88 | space_sep_models=$(get_models_as_space_separated_list "$model_list") 89 | 90 | for model_name in `echo $space_sep_models` ; do 91 | if [ -f "$out_dir/$model_name/DONE" ] ; then 92 | echo "Found $out_dir/$model_name/DONE Prediction on model completed. Skipping..." 93 | continue 94 | fi 95 | let cntr=1 96 | for CUR_PKG in `seq -w 001 $num_pkgs` ; do 97 | for CUR_Z in `seq -w 01 $num_zstacks` ; do 98 | package_name=$(get_package_name "$CUR_PKG" "$CUR_Z") 99 | Z="$out_dir/augimages/$model_name/$package_name" 100 | out_pkg="$out_dir/$model_name/$package_name" 101 | if [ -f "$out_pkg/DONE" ] ; then 102 | echo " Found $out_pkg/DONE Prediction completed. Skipping..." 103 | continue 104 | fi 105 | echo "For model $model_name postprocessing $package_name $cntr of $tot_pkgs" 106 | echo "Waiting for $out_pkg to finish processing" 107 | res=$(wait_for_predict_to_finish_on_package "$out_dir" "$out_pkg" "$waitinterval") 108 | if [ "$res" == "killed" ] ; then 109 | echo "KILL.REQUEST file found. Exiting" 110 | exit 1 111 | fi 112 | 113 | echo "Running StartPostprocessing.m on $out_pkg" 114 | /usr/bin/time -p StartPostprocessing.m "$out_pkg" 115 | ecode=$? 116 | if [ $ecode != 0 ] ; then 117 | fatal_error "$out_dir" "ERROR non-zero exit code ($ecode) from running StartPostprocessing.m" 7 118 | fi 119 | echo "0" > "$out_pkg/DONE" 120 | echo "Removing $Z" 121 | /bin/rm -rf "$Z" 122 | let cntr+=1 123 | done 124 | done 125 | /usr/bin/time -p Merge_LargeData.m "$out_dir/$model_name" 126 | ecode=$? 127 | if [ $ecode != 0 ] ; then 128 | fatal_error "$out_dir" "ERROR non-zero exit code ($ecode) from running Merge_LargeData.m" 8 129 | fi 130 | echo "Removing Pkg* folders" 131 | /bin/rm -rf $out_dir/$model_name/Pkg* 132 | done 133 | 134 | echo "" 135 | echo "Postprocessing has completed." 136 | echo "" 137 | -------------------------------------------------------------------------------- /predictworker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | script_name=`basename $0` 4 | script_dir=`dirname $0` 5 | 6 | source "${script_dir}/commonfunctions.sh" 7 | 8 | version="???" 9 | waitinterval="1" 10 | 11 | if [ -f "$script_dir/VERSION" ] ; then 12 | version=`cat $script_dir/VERSION` 13 | fi 14 | 15 | gpu="all" 16 | 17 | function usage() 18 | { 19 | echo "usage: $script_name [-h] [--gpu GPU] [--waitinterval WAIT] 20 | predictdir 21 | 22 | Version: $version 23 | 24 | Runs caffepredict.sh in a serial fashion 25 | waiting for the next available package 26 | to process. This script uses predict.config 27 | file to obtain location of trained model 28 | and image data 29 | 30 | positional arguments: 31 | predictdir Predict directory generated by 32 | runprediction.sh 33 | 34 | optional arguments: 35 | -h, --help show this help message and exit 36 | --gpu Which GPU to use, can be a number ie 0 or 1 or 37 | all to use all GPUs (default $gpu) 38 | --waitinterval Number of seconds to wait between checking 39 | for number of completed packages 40 | (default $waitinterval) 41 | 42 | " 1>&2; 43 | exit 1; 44 | } 45 | 46 | TEMP=`getopt -o h --long "help,waitinterval:,gpu:" -n '$0' -- "$@"` 47 | eval set -- "$TEMP" 48 | 49 | while true ; do 50 | case "$1" in 51 | -h ) usage ;; 52 | --help ) usage ;; 53 | --gpu ) gpu=$2 ; shift 2 ;; 54 | --waitinterval ) waitinterval=$2 ; shift 2 ;; 55 | --) shift ; break ;; 56 | esac 57 | done 58 | 59 | if [ $# -ne 1 ] ; then 60 | usage 61 | fi 62 | 63 | out_dir=$1 64 | 65 | echo "" 66 | 67 | predict_config="$out_dir/predict.config" 68 | 69 | parse_predict_config "$predict_config" 70 | 71 | if [ $? != 0 ] ; then 72 | fatal_error "$out_dir" "ERROR parsing $predict_config" 2 73 | fi 74 | 75 | echo "Running Prediction" 76 | echo "" 77 | 78 | echo "Trained Model Dir: $trained_model_dir" 79 | echo "Image Dir: $img_dir" 80 | echo "Models: $model_list" 81 | echo "Speed: $aug_speed" 82 | echo "GPU: $gpu" 83 | echo "" 84 | 85 | package_proc_info="$out_dir/augimages/package_processing_info.txt" 86 | 87 | if [ ! -s "$package_proc_info" ] ; then 88 | fatal_error "$out_dir" "ERROR $package_proc_info not found" 7 89 | fi 90 | 91 | parse_package_processing_info "$package_proc_info" 92 | 93 | space_sep_models=$(get_models_as_space_separated_list "$model_list") 94 | 95 | for model_name in `echo $space_sep_models` ; do 96 | 97 | let cntr=1 98 | for CUR_PKG in `seq -w 001 $num_pkgs` ; do 99 | for CUR_Z in `seq -w 01 $num_zstacks` ; do 100 | package_name=$(get_package_name "$CUR_PKG" "$CUR_Z") 101 | Z="$out_dir/augimages/$model_name/$package_name" 102 | out_pkg="$out_dir/$model_name/$package_name" 103 | 104 | # caffepredict.sh creates PREDICTDONE when done 105 | # so check for that file before running 106 | if [ -f "$out_pkg/PREDICTDONE" ] ; then 107 | echo " Found $out_pkg/PREDICTDONE Prediction completed. Skipping..." 108 | continue 109 | fi 110 | 111 | # StartPostprocessing.m creates DONE when done 112 | # so check for that file before running 113 | if [ -f "$out_pkg/DONE" ] ; then 114 | echo " Found $out_pkg/DONE Postprocessing completed. Skipping..." 115 | continue 116 | fi 117 | echo "For model $model_name preprocessing $package_name $cntr of $tot_pkgs" 118 | res=$(wait_for_preprocess_to_finish_on_package "$out_dir" "$Z" "$waitinterval") 119 | if [ "$res" == "killed" ] ; then 120 | echo "KILL.REQUEST file found. Exiting" 121 | exit 1 122 | fi 123 | echo "Running prediction on $model_name $package_name" 124 | /usr/bin/time -p caffepredict.sh --gpu $gpu "$trained_model_dir/$model_name/trainedmodel" "$Z" "$out_pkg" 125 | ecode=$? 126 | if [ $ecode != 0 ] ; then 127 | fatal_error "$out_dir" "ERROR, a non-zero exit code ($ecode) was received from: caffepredict.sh" 4 128 | fi 129 | let cntr+=1 130 | done 131 | done 132 | done 133 | 134 | echo "" 135 | echo "Prediction has completed." 136 | echo "" 137 | -------------------------------------------------------------------------------- /preprocessworker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | script_name=`basename $0` 4 | script_dir=`dirname $0` 5 | 6 | source "${script_dir}/commonfunctions.sh" 7 | 8 | version="???" 9 | maxpackages="5" 10 | waitinterval="60" 11 | 12 | if [ -f "$script_dir/VERSION" ] ; then 13 | version=`cat $script_dir/VERSION` 14 | fi 15 | 16 | function usage() 17 | { 18 | echo "usage: $script_name [-h] 19 | predictdir 20 | 21 | Version: $version 22 | 23 | Runs PreprocessPackage.m in a serial fashion 24 | waiting if too many packages have yet to be 25 | processed. This script uses predict.config 26 | file to obtain location of trained model 27 | and image data 28 | 29 | positional arguments: 30 | predictdir Predict directory generated by 31 | runprediction.sh 32 | 33 | optional arguments: 34 | -h, --help show this help message and exit 35 | --maxpackages Number of completed packages allowed before 36 | script should wait to let prediction catch up. 37 | (default $maxpackages) 38 | 39 | --waitinterval Number of seconds to wait between checking 40 | for number of completed packages 41 | (default $waitinterval) 42 | 43 | " 1>&2; 44 | exit 1; 45 | } 46 | 47 | TEMP=`getopt -o h --long "help,maxpackages:,waitinterval:" -n '$0' -- "$@"` 48 | eval set -- "$TEMP" 49 | 50 | while true ; do 51 | case "$1" in 52 | -h ) usage ;; 53 | --help ) usage ;; 54 | --waitinterval ) waitinterval=$2 ; shift 2 ;; 55 | --maxpackages ) maxpackages=$2 ; shift 2 ;; 56 | --) shift ; break ;; 57 | esac 58 | done 59 | 60 | if [ $# -ne 1 ] ; then 61 | usage 62 | fi 63 | 64 | out_dir=$1 65 | 66 | echo "" 67 | 68 | predict_config="$out_dir/predict.config" 69 | 70 | parse_predict_config "$predict_config" 71 | 72 | if [ $? != 0 ] ; then 73 | fatal_error "$out_dir" "ERROR parsing $predict_config" 2 74 | fi 75 | 76 | echo "Running PreprocessPackage" 77 | echo "" 78 | 79 | echo "Trained Model Dir: $trained_model_dir" 80 | echo "Image Dir: $img_dir" 81 | echo "Models: $model_list" 82 | echo "Speed: $aug_speed" 83 | echo "" 84 | 85 | package_proc_info="$out_dir/augimages/package_processing_info.txt" 86 | 87 | if [ ! -s "$package_proc_info" ] ; then 88 | fatal_error "$out_dir" "ERROR $package_proc_info not found" 7 89 | fi 90 | 91 | parse_package_processing_info "$package_proc_info" 92 | 93 | space_sep_models=$(get_models_as_space_separated_list "$model_list") 94 | 95 | for model_name in `echo $space_sep_models` ; do 96 | 97 | let cntr=1 98 | for CUR_PKG in `seq -w 001 $num_pkgs` ; do 99 | for CUR_Z in `seq -w 01 $num_zstacks` ; do 100 | package_name=$(get_package_name "$CUR_PKG" "$CUR_Z") 101 | Z="$out_dir/augimages/$model_name/$package_name" 102 | out_pkg="$out_dir/$model_name/$package_name" 103 | if [ -f "$out_pkg/DONE" ] ; then 104 | echo " Found $out_pkg/DONE. Prediction completed. Skipping..." 105 | continue 106 | fi 107 | echo "Preprocessing $package_name in model $model_name" 108 | augoutfile="$out_dir/augimages/preproc.${model_name}.${package_name}.log" 109 | 110 | /usr/bin/time -p PreprocessPackage.m "$img_dir" "$out_dir/augimages" $CUR_PKG $CUR_Z $model_name $aug_speed > "$augoutfile" 2>&1 111 | ecode=$? 112 | if [ $ecode != 0 ] ; then 113 | fatal_error "$out_dir" "ERROR, a non-zero exit code ($ecode) received from PreprocessPackage.m $CUR_PKG $CUR_Z $model_name $aug_speed" 8 114 | fi 115 | echo "Waiting for prediction to catch up" 116 | res=$(wait_for_prediction_to_catchup "$out_dir/augimages" $maxpackages $waitinterval) 117 | if [ "$res" == "killed" ] ; then 118 | echo "KILL.REQUEST file found. Exiting" 119 | exit 1 120 | fi 121 | let cntr+=1 122 | done 123 | done 124 | done 125 | 126 | echo "" 127 | echo "PreprocessPackaging has completed." 128 | echo "" 129 | -------------------------------------------------------------------------------- /runvalidation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | script_name=`basename $0` 4 | script_dir=`dirname $0` 5 | version="???" 6 | 7 | if [ -f "$script_dir/VERSION" ] ; then 8 | version=`cat $script_dir/VERSION` 9 | fi 10 | 11 | function usage() 12 | { 13 | echo "usage: $script_name log_file out_dir 14 | 15 | Script to plot training vs validation loss. 16 | Example: runvalidation.sh ~/cdeep3m/train_out/1fm/log/out.log ~/cdeep3m/train_out/1fm/log/ 17 | 18 | Version: $version 19 | 20 | positional arguments: 21 | log_file Log file from desired model. 22 | out_dir Directory for output files 23 | 24 | 25 | " 1>&2; 26 | exit 1; 27 | } 28 | 29 | if [ $# -ne 2 ] ; then 30 | usage 31 | fi 32 | 33 | 34 | log_dir=$(dirname $1) 35 | 36 | 37 | python $CAFFE_PATH/tools/extra/parse_log.py $1 $log_dir 38 | 39 | 40 | PlotValidation.m $1.train $1.test $2 41 | -------------------------------------------------------------------------------- /scripts/Histmatch.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | % Histmatch 3 | % Match Histogram of 1 Dataset to another Dataset 4 | % E.g. to use a model that has already been trained on another dataset 5 | % to this end the average histogram of the second image stack will be used 6 | % as a reference 7 | % 8 | % Syntax: Histmatch ~/Dataset1/ ~/Reference_Dataset2/ ~/Histomatched_Dataset1/ 9 | % Positional arguments: 10 | % - Input dataset 11 | % - Reference dataset 12 | % - Output folder 13 | % 14 | % Expected runtime 3min for 1024x1024x100 dataset 15 | % 16 | %------------------------------------------------------------------ 17 | %% CDeep3M -- NCMIR/NBCR, UCSD -- Author: M Haberl -- Date: 11/2017 18 | %------------------------------------------------------------------ 19 | 20 | arg_list = argv (); 21 | pkg load hdf5oct 22 | pkg load image 23 | 24 | if ~numel(arg_list) >= 2 25 | error('Please specify 1 input images/folders, 1 input reference image(s)/folders and 1 outputname'); 26 | return 27 | end 28 | tic 29 | disp('Starting Histogram matching'); 30 | 31 | %----------------------------------------- 32 | %% Input arguments / Loading data 33 | %----------------------------------------- 34 | fprintf('Starting to process %d datasets \n',floor(numel(arg_list)/2)); 35 | 36 | inputdir_raw = arg_list{1}; 37 | inputdir_ref = arg_list{2}; 38 | outputfolder = arg_list{3}; 39 | 40 | %----------------------------------------- 41 | %% Check with user before overwriting/deleting any files 42 | %----------------------------------------- 43 | 44 | if ~exist(outputfolder,'folder') 45 | mkdir(outputfolder); 46 | end 47 | 48 | %----------------------------------------- 49 | %% Loading data 50 | %----------------------------------------- 51 | 52 | raw_stack = imageimporter(inputdir_raw); 53 | ref_stack = imageimporter(inputdir_ref); 54 | 55 | ref_image = mean(ref_stack,3); 56 | 57 | %----------------------------------------- 58 | %% Histmatching and Saving 59 | %----------------------------------------- 60 | 61 | disp('Saving ...') 62 | for i=1:size(raw_stack,3) 63 | fprintf('.'); 64 | outputfile = fullfile(outputfolder, sprintf('Image_%04d.png',i)); 65 | hist_matched_imgs = imhistmatch(raw_stack(:,:,i),ref_image); 66 | imwrite(hist_matched_imgs,outputfile); 67 | end 68 | 69 | toc 70 | -------------------------------------------------------------------------------- /scripts/SNEMI3D_metrics.m: -------------------------------------------------------------------------------- 1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 | % SNEMI3D challenge: 3D segmentation of neurites in EM images 3 | % 4 | % Script to calculate the segmentation error between some 3D 5 | % original labels and their corresponding proposed labels. 6 | % 7 | % The evaluation metric is: 8 | % - Rand error: 1 - F-score of adapted Rand index 9 | % 10 | % author: Ignacio Arganda-Carreras (iarganda@mit.edu) 11 | % More information at http://brainiac.mit.edu/SNEMI3D 12 | % 13 | % This script released under the terms of the General Public 14 | % License in its latest edition. 15 | % 16 | % Input: 17 | % segA - ground truth (16-bit labels, 0 = background) 18 | % segB - proposed labels (16-bit labels, 0 = background) 19 | % Output: 20 | % re - adapated Rand error (1.0 - F-score of adapted Rand index) 21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 22 | function [re] = SNEMI3D_metrics( segA, segB ) 23 | 24 | segA = double(segA)+1; 25 | segB = double(segB)+1; 26 | n = numel(segA); 27 | 28 | n_labels_A = max(segA(:)); 29 | n_labels_B = max(segB(:)); 30 | 31 | % compute overlap matrix 32 | p_ij = sparse(segA(:),segB(:),1,n_labels_A,n_labels_B); 33 | 34 | % a_i 35 | a_i = sum(p_ij(2:end,:), 2); 36 | 37 | % b_j 38 | b_j = sum(p_ij(2:end,2:end), 1); 39 | 40 | p_i0 = p_ij(2:end,1); % pixels marked as BG in segB which are not BG in segA 41 | p_ij = p_ij(2:end,2:end); 42 | 43 | sumA = sum(a_i.*a_i); 44 | sumB = sum(b_j.*b_j) + sum(p_i0)/n; 45 | sumAB = sum(sum(p_ij.^2)) + sum(p_i0)/n; 46 | 47 | % Rand index 48 | %ri = full(1 - (sumA + sumB - 2*sumAB)/ n^2); 49 | 50 | % precision 51 | prec = sumAB / sumB; 52 | 53 | % recall 54 | rec = sumAB / sumA; 55 | 56 | % F-score 57 | fScore = 2.0 * prec * rec / (prec + rec); 58 | 59 | re = 1.0 - fScore; 60 | 61 | end 62 | 63 | -------------------------------------------------------------------------------- /scripts/augment_data.m: -------------------------------------------------------------------------------- 1 | function [img_out,lb_out]=augment_data(img_in,lbl_in,i) 2 | fprintf('\nCreate variation %s and %s\n',num2str(i),num2str(i+8)); 3 | switch(i) 4 | case 1 5 | img_out = img_in; 6 | lb_out = lbl_in; 7 | case 2 8 | img_out = flipdim(img_in,1); 9 | lb_out = flipdim(lbl_in,1); 10 | case 3 11 | img_out = flipdim(img_in,2); 12 | lb_out = flipdim(lbl_in,2); 13 | case 4 14 | img_out = rot90(img_in); 15 | lb_out = rot90(lbl_in); 16 | case 5 17 | img_out = rot90(img_in, -1); 18 | lb_out = rot90(lbl_in, -1); 19 | case 6 20 | img_out = rot90(flipdim(img_in, 1)); 21 | lb_out = rot90(flipdim(lbl_in, 1)); 22 | case 7 23 | img_out = rot90(flipdim(img_in,2)); 24 | lb_out = rot90(flipdim(lbl_in,2)); 25 | case 8 26 | img_out = rot90(img_in, 2); 27 | lb_out = rot90(lbl_in, 2); 28 | end 29 | 30 | end 31 | -------------------------------------------------------------------------------- /scripts/functions/add_z_padding.m: -------------------------------------------------------------------------------- 1 | function [im_stack] = add_z_padding(im_stack) 2 | %adds 2 planes in the begin and end of the image stack 3 | im_stack = cat(3,im_stack(:,:,3),im_stack(:,:,2),im_stack,im_stack(:,:,end-1),im_stack(:,:,end-2)); 4 | end 5 | 6 | -------------------------------------------------------------------------------- /scripts/functions/augment_image_data_only.m: -------------------------------------------------------------------------------- 1 | function [D]=augment_image_data_only(raw_images, outsubdir)%,y) 2 | % Update: inserted saving here 3 | % remove memory limiting steps and speeding up processing 4 | 5 | idx=0; 6 | %% V1-8: 7 | create8Variation(raw_images,idx,outsubdir); 8 | 9 | %% V9-16: sweep Z dimension 10 | disp('Sweep Z dimension') 11 | raw_images = flip(raw_images,3); 12 | 13 | idx=8; 14 | create8Variation(raw_images,idx,outsubdir); 15 | end 16 | 17 | function [D]=create8Variation(original,idx,outsubdir) % without any label, just data 18 | for j = 1:8 19 | variation=j+idx; 20 | fprintf('Create Hd5 file Variation %s\n',num2str(variation)); 21 | switch(j) 22 | case 1 23 | stack = original; 24 | case 2 25 | stack = flipdim(original,1); 26 | 27 | case 3 28 | stack = flipdim(original,2); 29 | 30 | case 4 31 | stack = rot90(original); 32 | 33 | case 5 34 | stack = rot90(original, -1); 35 | 36 | case 6 37 | stack = rot90(flipdim(original, 1)); 38 | 39 | case 7 40 | stack = rot90(flipdim(original,2)); 41 | 42 | case 8 43 | stack = rot90(original, 2); 44 | end 45 | 46 | stack=permute(stack,[3 1 2]); %from tiff to h5 /xyz to z*x*y 47 | d_details = '/data'; 48 | filename = fullfile(outsubdir, sprintf('test_data_full_stacks_v%s.h5', num2str(variation))); 49 | %fprintf('Saving: %s\n',filename) 50 | %h5create(filename,d_details,size(i_stack)); %nescessary for Matlab not for Octave 51 | h5write(filename,d_details,stack); 52 | clear -v stack 53 | 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /scripts/functions/augment_package.m: -------------------------------------------------------------------------------- 1 | function augment_package(original, outsubdir,fmnumber,speed); 2 | % Updates: 3 | % removed memory limiting steps 4 | % All vectorized 5 | % Optimized for reducing time of processing 6 | % 7 | % 8 | %------------------------------------------------ 9 | %% augment_package for CDeep3M -- NCMIR/NBCR, UCSD 10 | %------------------------------------------------ 11 | 12 | allowed_speed = [1,2,4,10]; 13 | if ~ismember(speed,allowed_speed) 14 | [~,I] = min(abs(allowed_speed-speed)); 15 | speed = allowed_speed(I) 16 | end 17 | 18 | if speed == 10 19 | switch(fmnumber) 20 | case 1 21 | do_var = [1]; 22 | case 3 23 | do_var = [1]; 24 | case 5 25 | do_var = [1]; 26 | endswitch 27 | 28 | elseif speed == 1 29 | switch(fmnumber) 30 | case 1 31 | do_var = [1:8]; 32 | case 3 33 | do_var = [1:16]; 34 | case 5 35 | do_var = [1:16]; 36 | endswitch 37 | 38 | elseif speed == 2 39 | switch(fmnumber) 40 | case 1 41 | do_var = [1, 6, 11, 15]; 42 | case 3 43 | do_var = [1:4, 13:16]; 44 | case 5 45 | do_var = [5:12]; 46 | endswitch 47 | 48 | elseif speed == 4 49 | switch(fmnumber) 50 | case 1 51 | do_var = [2, 3, 6, 7 ]; 52 | case 3 53 | do_var = [7, 8, 10, 12]; 54 | case 5 55 | do_var = [1, 6, 11, 15]; 56 | endswitch 57 | end 58 | 59 | 60 | for i = do_var(do_var<9) 61 | 62 | fprintf('Create Hd5 file Variation %s\n',num2str(i)); 63 | switch(i) 64 | case 1 65 | stack = original; 66 | case 2 67 | stack = flipdim(original,1); 68 | 69 | case 3 70 | stack = flipdim(original,2); 71 | 72 | case 4 73 | stack = rot90(original); 74 | 75 | case 5 76 | stack = rot90(original, -1); 77 | 78 | case 6 79 | stack = rot90(flipdim(original, 1)); 80 | 81 | case 7 82 | stack = rot90(flipdim(original,2)); 83 | 84 | case 8 85 | stack = rot90(original, 2); 86 | end 87 | 88 | stack_out=permute(stack,[3 1 2]); %from tiff to h5 /xyz to z*x*y 89 | d_details = '/data'; 90 | filename = fullfile(outsubdir, sprintf('image_stacks_v%s.h5', num2str(i))); 91 | fprintf('Saving: %s\n',filename) 92 | %h5create(filename,d_details,size(i_stack)); %nescessary for Matlab not for Octave 93 | h5write(filename,d_details,stack_out); 94 | %clear -v stack 95 | end 96 | 97 | if max(do_var)>8 98 | %% V9-16: sweep Z dimension 99 | disp('Sweep Z dimension') 100 | original = flip(original,3); 101 | 102 | 103 | for i = do_var(do_var>8) 104 | fprintf('Create Hd5 file Variation %s\n',num2str(i)); 105 | switch(i) 106 | case 9 107 | stack = original; 108 | case 10 109 | stack = flipdim(original,1); 110 | 111 | case 11 112 | stack = flipdim(original,2); 113 | 114 | case 12 115 | stack = rot90(original); 116 | 117 | case 13 118 | stack = rot90(original, -1); 119 | 120 | case 14 121 | stack = rot90(flipdim(original, 1)); 122 | 123 | case 15 124 | stack = rot90(flipdim(original,2)); 125 | 126 | case 16 127 | stack = rot90(original, 2); 128 | end 129 | 130 | stack_out=permute(stack,[3 1 2]); %from tiff to h5 /xyz to z*x*y 131 | d_details = '/data'; 132 | filename = fullfile(outsubdir, sprintf('image_stacks_v%s.h5', num2str(i))); 133 | fprintf('Saving: %s\n',filename) 134 | %h5create(filename,d_details,size(i_stack)); %nescessary for Matlab not for Octave 135 | h5write(filename,d_details,stack_out); 136 | %clear -v stack 137 | 138 | end 139 | endif 140 | end 141 | -------------------------------------------------------------------------------- /scripts/functions/break_large_img.m: -------------------------------------------------------------------------------- 1 | function [packages, z_blocks] = break_large_img(imagesize) 2 | %Defines how to read large images 3 | %Note: Packages define X/Y direction only; 4 | % z_blocks define splitting z direction 5 | 6 | %= 1: round(imagesize(1)/1024) 7 | 8 | %% Z-direction splitting 9 | if imagesize(3) >100 10 | z_blocks = [1:100:imagesize(3)]; 11 | if z_blocks(end) 1024 20 | x_breaks = [0:1000:(imagesize(1))]; 21 | if x_breaks(end) 1024 26 | y_breaks = [0:1000:(imagesize(2))]; 27 | if y_breaks(end)1 34 | counter = 0; 35 | for xx = 1:(numel(x_breaks)-1) 36 | if xx==1 37 | xstart = x_breaks(xx)+1; 38 | else 39 | xstart = x_breaks(xx)-11; 40 | end 41 | 42 | if xx==numel(x_breaks)-1 43 | xend = x_breaks(xx+1); 44 | else 45 | xend = x_breaks(xx+1)+12; 46 | end 47 | for yy = 1:(numel(y_breaks)-1) 48 | counter = counter+1; 49 | if yy==1 50 | ystart = y_breaks(yy)+1; 51 | else 52 | ystart = y_breaks(yy)-11; 53 | end 54 | 55 | if yy==numel(y_breaks)-1 56 | yend = y_breaks(yy+1); 57 | else 58 | yend = y_breaks(yy+1)+12; 59 | end 60 | 61 | packages{counter} = [xstart, xend, ystart, yend]; 62 | 63 | end 64 | 65 | 66 | 67 | 68 | end 69 | else 70 | packages{1} = [1, imagesize(1), 1, imagesize(2)]; 71 | end 72 | 73 | 74 | end 75 | -------------------------------------------------------------------------------- /scripts/functions/check_image_size.m: -------------------------------------------------------------------------------- 1 | function [imagesize] = check_image_size(img_path) 2 | %check_image_size: to see how to break large image data 3 | % 4 | % 5 | % 6 | %----------------------------------------------------------------------------- 7 | %% CDeep3M -- NCMIR/NBCR, UCSD -- Author: M Haberl -- Date: 11/2017 8 | %----------------------------------------------------------------------------- 9 | warning ("off") 10 | disp('Check image size of: '); 11 | disp(img_path); 12 | % check if a folder of png/tif files or a single stack to load 13 | [Dir,name,ext] = fileparts(img_path); 14 | if ~isempty(ext) 15 | if ~isempty(strfind(ext,'h5')) 16 | disp('Reading H5 image file'); 17 | %hinfo = h5info(img_path); 18 | %try 19 | %imagesize = hinfo.Datasets.Dataspace.MaxSize; 20 | %catch 21 | %imagesize = hinfo.Datasets.ChunkSize; 22 | %end 23 | %hinfo.Datasets.ChunkSize; 24 | temp = load(img_path); 25 | cont = cell2mat(fieldnames(temp)); 26 | imagesize = size(temp.(cont)); 27 | imagesize=[imagesize(2:3),imagesize(1)]; 28 | clear temp 29 | elseif ~isempty(strfind(ext,'tif')) 30 | info = imfinfo(img_path); 31 | fprintf('Reading image stack with %d images\n',size(info,1)); 32 | imagesize = [info(1).Height, info(1).Width, size(info,1)]; 33 | 34 | end 35 | 36 | elseif isdir(img_path) 37 | file_list = read_files_in_folder(img_path); 38 | png_list = filter_files(file_list,'.png'); 39 | tif_list = filter_files(file_list,'.tif'); 40 | if size(tif_list,1)+size(png_list,1) == 0, disp('No Tifs or PNGs found in the directory');return; 41 | else 42 | [~, type] = max([size(tif_list,1),size(png_list,1)]); %only read tif or pngs if ambiguous 43 | if type==1, file_list = tif_list; elseif type==2, file_list = png_list; end 44 | for idx =1 45 | filename = fullfile(img_path,file_list(idx).name); 46 | fprintf('Reading file: %s\n', filename); 47 | info = imfinfo(filename); 48 | imagesize = [info(1).Height, info(1).Width, size(file_list,1)]; 49 | end 50 | end 51 | 52 | else 53 | error('No images found'); 54 | return 55 | end 56 | 57 | 58 | 59 | end 60 | 61 | -------------------------------------------------------------------------------- /scripts/functions/check_img_dims.m: -------------------------------------------------------------------------------- 1 | function [imgstack, lblstack] = check_img_dims(imgstack, lblstack, minsize) 2 | % 3 | % Check Canvas Size of training images and training labels 4 | % to match same size and to fullfill min canvas size 5 | % 6 | %---------------------------------------------------------------------------------------- 7 | %% CDeep3M -- NCMIR/NBCR, UCSD -- Author: M Haberl -- Date: 03/2018 8 | %---------------------------------------------------------------------------------------- 9 | % 10 | 11 | disp('Checking image dimensions'); 12 | if ~(size(imgstack,1) == size(lblstack,1)) | ~(size(imgstack,2) == size(lblstack,2)) 13 | error('Image dimension mismatch in x/y between images and labels'); 14 | return 15 | end 16 | if ~(size(imgstack,3) == size(lblstack,3)) 17 | error('Image dimension mismatch in z between images and labels'); 18 | return 19 | end 20 | 21 | x1 = size(imgstack,1); 22 | y1 = size(imgstack,2); 23 | 24 | if x1 2 5 | disp('Your labels do not seem to be binary files'); 6 | reply = input('Type S to stop label augmentation here? Otherwise label augmentation will proceed now','s'); 7 | if regexpi(reply ,'S') 8 | disp('Augmentation cancelled') 9 | return 10 | end 11 | end 12 | 13 | end 14 | 15 | -------------------------------------------------------------------------------- /scripts/functions/checkpoint_nobinary.m: -------------------------------------------------------------------------------- 1 | function [] = checkpoint_nobinary(imagestack) 2 | %checkpoint_nobinary 3 | % Make sure user didn't by mistake input binaries where there should be images 4 | if numel(unique(imagestack(:))) < 3 5 | disp('Images are not 8 or 16bit'); 6 | disp('Please be sure you did not use binary labels by mistake here'); 7 | reply = input('Type S to stop image augmentation? Otherwise images will be augmented now','s'); 8 | if regexpi(reply ,'S') 9 | disp('Augmentation cancelled') 10 | return 11 | end 12 | end 13 | 14 | end 15 | 16 | -------------------------------------------------------------------------------- /scripts/functions/convert_training_data_to_h5stack.m: -------------------------------------------------------------------------------- 1 | ## Usage [status,errmsg] = convert_training_data_to_h5stack(training_images_path, training_labels_path, out_dir) 2 | ## 3 | ## Makes augmented hdf5 datafiles from raw and label images 4 | ## 5 | ## Runtime ~20min for 1024x1024x100 dataset 6 | ## 7 | 8 | function [status,errmsg] = convert_training_data_to_h5stack(training_images_path, 9 | training_labels_path, 10 | out_dir) 11 | tic 12 | 13 | % --------------------------------------------------------------------------- 14 | %% Load train data 15 | % --------------------------------------------------------------------------- 16 | status = 0; 17 | errmsg = ''; 18 | 19 | disp('Loading:'); 20 | disp(training_labels_path); 21 | [lblstack] = imageimporter(training_labels_path); 22 | checkpoint_isbinary(lblstack); 23 | 24 | % --------------------------------------------------------------------------- 25 | %% Load training images 26 | % --------------------------------------------------------------------------- 27 | 28 | disp('Loading:'); 29 | disp(training_images_path); 30 | [imgstack] = imageimporter(training_images_path); 31 | checkpoint_nobinary(imgstack); 32 | 33 | % --------------------------------------------------------------------------- 34 | %% Augment the data, generating 16 versions and save 35 | % --------------------------------------------------------------------------- 36 | 37 | disp('Augmenting ...'); 38 | data_arr=permute(imgstack,[3 1 2]); %from tiff to h5 /100*1000*1000 39 | labels_arr=permute(lblstack,[3 1 2]); %from tiff to h5 /100*1000*1000 40 | 41 | d_tr =single(data_arr); 42 | l_tr =single(labels_arr); 43 | [data,labels]=augment_data(d_tr,l_tr); 44 | 45 | d_details = '/data'; 46 | l_details = '/label'; 47 | if ~exist(out_dir,'dir'); 48 | mkdir(out_dir); 49 | endif 50 | ext = '.h5'; 51 | 52 | disp('Saving ...'); 53 | for i=1:length(data) 54 | d_tr=data{i}; 55 | l_tr=labels{i}; 56 | filename = fullfile(out_dir, sprintf('training_full_stacks_v%s%s', 57 | num2str(i), ext)); 58 | disp(filename); 59 | h5write(filename,d_details,d_tr); 60 | h5write(filename,l_details,l_tr); 61 | endfor 62 | endfunction 63 | % ----------------------------------------------------------------------------- 64 | %% Completed 65 | % ----------------------------------------------------------------------------- 66 | 67 | toc 68 | -------------------------------------------------------------------------------- /scripts/functions/copy_model.m: -------------------------------------------------------------------------------- 1 | ## Usage copy_model( base_dir, the_model, dest_dir ) 2 | ## 3 | ## Starting from base_dir directory this function 4 | ## copies *txt files from model/inception_residual_train_prediction_ 5 | ## to directory specified by dest_dir argument. If copy fails 6 | ## error() is invoked describing the issue 7 | ## 8 | 9 | function copy_model(base_dir, the_model, dest_dir) 10 | % Copies *txt files from model/inception_residual_train_prediction_ %directory 11 | % to directory specified by dest_dir argument. If copy fails error() is 12 | % invoked describing 13 | % the issue 14 | src_files = strcat(base_dir,filesep(),'model',filesep(), 15 | 'inception_residual_train_prediction_',the_model, 16 | filesep(),'*txt'); 17 | res = copyfile(src_files,dest_dir); 18 | if res(1) == 0; 19 | errmsg = sprintf('Error copying model %s : %s\n',the_model,res(2)); 20 | error(errmsg); 21 | endif 22 | endfunction 23 | 24 | %!error copy_model(); 25 | 26 | %!error copy_model('.','weroijef','.'); 27 | 28 | %!test 29 | %! test_fname = tempname(); 30 | %! mkdir(test_fname); 31 | %! copy_model('.','1fm',test_fname); 32 | %! test_dir = strcat(test_fname,filesep()); 33 | %! assert(exist(strcat(test_dir,'deploy.prototxt'),'file'), 2); 34 | %! assert(exist(strcat(test_dir,'solver.prototxt'),'file'), 2); 35 | %! rmdir(test_fname,'s'); 36 | -------------------------------------------------------------------------------- /scripts/functions/copy_over_allmodels.m: -------------------------------------------------------------------------------- 1 | ## Usage [onefm_dest, threefm_dest, fivefm_dest] = copy_over_allmodels( base_dir, outdir ) 2 | ## 3 | ## Create outdir directory and copy over model files for 4 | ## 1fm, 3fm, and 5fm models. It is assumed that 5 | ## base_dir directory contains the deep3m source tree 6 | ## and there exists model/inception_residual_train_prediction_ 7 | ## directories 8 | ## 9 | ## Upon success three directory paths are returned, 10 | ## one for each model 11 | 12 | function [onefm_dest, threefm_dest, fivefm_dest] = copy_over_allmodels(base_dir, outdir) 13 | % ---------------------------------------------------------------------------- 14 | % Create output directory and copy over model files and 15 | % adjust configuration files 16 | % ---------------------------------------------------------------------------- 17 | 18 | create_dir(outdir); 19 | 20 | % copy over 1fm, 3fm, and 5fm model data to separate directories 21 | onefm_dest = strcat(outdir,filesep(),'1fm'); 22 | create_dir(onefm_dest); 23 | copy_model(base_dir,'1fm',onefm_dest); 24 | 25 | threefm_dest = strcat(outdir,filesep(),'3fm'); 26 | create_dir(threefm_dest); 27 | copy_model(base_dir,'3fm',threefm_dest); 28 | 29 | fivefm_dest = strcat(outdir,filesep(),'5fm'); 30 | create_dir(fivefm_dest); 31 | copy_model(base_dir,'5fm',fivefm_dest); 32 | endfunction 33 | 34 | %!error copy_over_allmodels(); 35 | 36 | %!test 37 | %! test_fname = tempname(); 38 | %! mkdir(test_fname); 39 | %! [one, three, five] = copy_over_allmodels('.', test_fname); 40 | %! one_dir = strcat(one,filesep()); 41 | %! three_dir = strcat(three,filesep()); 42 | %! five_dir = strcat(five,filesep()); 43 | %! assert(exist(strcat(one_dir,'deploy.prototxt'),'file'), 2); 44 | %! assert(exist(strcat(three_dir,'solver.prototxt'),'file'), 2); 45 | %! assert(exist(strcat(five_dir,'solver.prototxt'),'file'), 2); 46 | %! rmdir(test_fname,'s'); 47 | 48 | -------------------------------------------------------------------------------- /scripts/functions/copy_version.m: -------------------------------------------------------------------------------- 1 | ## Usage [errmsg] = copy_version( base_dir, dest_dir ) 2 | ## 3 | ## Starting from base_dir directory this function 4 | ## copies the VERSION from to directory specified 5 | ## by dest_dir argument. If copy fails errmsg is set 6 | ## to string describing error otherwise its empty string 7 | ## 8 | 9 | function [errmsg] = copy_version(base_dir, dest_dir) 10 | % Copies VERSION from base_dir/ directory to dest_dir directory 11 | % If copy fails errmsg set to string describing 12 | % the issue, otherwise an empty string is returned. 13 | 14 | errmsg = ''; 15 | src_file = strcat(base_dir, filesep(), 'VERSION'); 16 | res = copyfile(src_file, dest_dir); 17 | if res(1) == 0; 18 | errmsg = sprintf('Error copying VERSION %s : %s\n', src_file, res(2)); 19 | endif 20 | endfunction 21 | 22 | %!error copy_version(); 23 | 24 | %!test 25 | %! test_fname = tempname(); 26 | %! mkdir(test_fname); 27 | %! copy_version('.',test_fname); 28 | %! test_dir = strcat(test_fname,filesep()); 29 | %! rmdir(test_fname,'s'); 30 | -------------------------------------------------------------------------------- /scripts/functions/create_dir.m: -------------------------------------------------------------------------------- 1 | ## Usage create_dir( thedir ) 2 | ## 3 | ## Creates directory thedir if it does not already exist. 4 | ## If there is an error creating directory then error() is 5 | ## invoked with message 6 | 7 | function create_dir(thedir) 8 | if isdir(thedir) == 0; 9 | [status,msg,msgid] = mkdir(thedir); 10 | if status == 0; 11 | errmsg = sprintf('Error making directory: %s : %s\n', status, 12 | msg); 13 | error(errmsg); 14 | endif 15 | endif 16 | endfunction 17 | 18 | %!test 19 | %! test_fname = tempname(); 20 | %! create_dir(test_fname); 21 | %! assert(isdir(test_fname),'expected a directory to be created'); 22 | %! rmdir(test_fname); 23 | 24 | -------------------------------------------------------------------------------- /scripts/functions/create_predict_outdir.m: -------------------------------------------------------------------------------- 1 | ## Usage create_predict_outdir (pkgdirs,models,outdir) 2 | ## 3 | ## Creates predict out directory 4 | ## If there is an error creating directory then error() is 5 | ## invoked with message 6 | 7 | function create_predict_outdir(pkgdirs,models,outdir) 8 | create_dir(outdir); 9 | 10 | for i = 1:rows(models) 11 | model_dir = strcat(outdir,filesep(),char(models(i))); 12 | create_dir(model_dir); 13 | for j = 1:rows(pkgdirs) 14 | [d,n,e] = fileparts(char(pkgdirs(j))); 15 | pkgname = strcat(n,e); 16 | dir_to_make = strcat(model_dir,filesep(),pkgname); 17 | create_dir(dir_to_make); 18 | endfor 19 | endfor 20 | endfunction 21 | 22 | %!test 23 | %! test_fname = tempname(); 24 | %! models = cell(3,1); 25 | %! models(1) = ['1fm']; 26 | %! models(2) = ['3fm']; 27 | %! models(3) = ['5fm']; 28 | %! pkgdirs = cell(2,1); 29 | %! pkgdirs(1) = ['/foo/Pkg001']; 30 | %! pkgdirs(2) = ['/foo/Pkg002']; 31 | %! pkgnames = cell(2,1); 32 | %! pkgnames(1) = ['Pkg001']; 33 | %! pkgnames(2) = ['Pkg002']; 34 | %! create_predict_outdir(pkgdirs,models,test_fname); 35 | %! for i = 1:rows(models) 36 | %! model_dir = strcat(test_fname,filesep(),char(models(i))); 37 | %! for j = 1:rows(pkgnames) 38 | %! pkg_dir = strcat(model_dir,filesep(),char(pkgnames(j))); 39 | %! assert(isdir(pkg_dir) == 1) 40 | %! endfor 41 | %! endfor 42 | %! rmdir(test_fname,'s'); 43 | 44 | -------------------------------------------------------------------------------- /scripts/functions/crop_png.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Crop image frame for CDeep3M 5 | 6 | 7 | NCMIR/NBCR, UCSD -- Authors: M Haberl / C Churas -- Date: 5/2018 8 | 9 | """ 10 | import sys 11 | import os 12 | import argparse 13 | import cv2 14 | import requests 15 | from joblib import Parallel, delayed 16 | from multiprocessing import Pool, TimeoutError 17 | import time 18 | 19 | INSTANCE_TYPE_URL = 'http://169.254.169.254/latest/meta-data/instance-type' 20 | 21 | def _get_number_of_tasks_to_run_based_on_instance_type(theargs): 22 | """Gets instance type and returns number of parallel 23 | tasks to run based on that value. If none are found then 24 | default value of 2 is used. 25 | """ 26 | try: 27 | r = requests.get(theargs.instancetypeurl, 28 | timeout=theargs.instancetypeurltimeout) 29 | if r.status_code is 200: 30 | if 'p3.2xlarge' in r.text: 31 | return 6 32 | if 'p3.8xlarge' in r.text: 33 | return 12 34 | if 'p3.16xlarge' in r.text: 35 | return 20 36 | except Exception as e: 37 | sys.stderr.write('Got exception checking instance type: ' + 38 | str(e) + '\n') 39 | return 2 40 | 41 | 42 | def _parse_arguments(desc, theargs): 43 | """Parses command line arguments using argparse 44 | """ 45 | help_formatter = argparse.RawDescriptionHelpFormatter 46 | parser = argparse.ArgumentParser(description=desc, 47 | formatter_class=help_formatter) 48 | parser.add_argument('inputlistfile', 49 | help='File containing list of input image paths') 50 | parser.add_argument('outputlistfile', 51 | help='File containing list of output image paths') 52 | parser.add_argument('leftxcoord', type=int, help='Left x pixel coordinate') 53 | parser.add_argument('rightxcoord', type=int, 54 | help='Right x pixel coordinate') 55 | parser.add_argument('topycoord', type=int, help='Top y pixel coordinate') 56 | parser.add_argument('bottomycoord', type=int, 57 | help='Bottom y pixel coordinate') 58 | parser.add_argument('--instancetypeurl', default=INSTANCE_TYPE_URL, 59 | help='URL to query for meta data instance type ' + 60 | '(default ' + INSTANCE_TYPE_URL + ')') 61 | parser.add_argument('--instancetypeurltimeout',default='1.0',type=float, 62 | help='Timeout in seconds for checking instancetypeurl' + 63 | ' default 1.0') 64 | return parser.parse_args(theargs) 65 | 66 | 67 | desc = """ 68 | Given a file with a list of images (inputlistfile 1st arg), 69 | this program will extract a subimage at coordinates specified 70 | on the command line and save that subimage to the file on the 71 | same line in the file with list of output images 72 | (outputlistfile 2nd arg) 73 | """ 74 | 75 | # Parse arguments 76 | theargs = _parse_arguments(desc, sys.argv[1:]) 77 | 78 | in1 = theargs.leftxcoord 79 | in2 = theargs.rightxcoord 80 | in3 = theargs.topycoord 81 | in4 = theargs.bottomycoord 82 | 83 | sys.stdout.write(str(in1) + '\n') 84 | sys.stdout.write(str(in2) + '\n') 85 | sys.stdout.write(str(in3) + '\n') 86 | sys.stdout.write(str(in4) + '\n') 87 | 88 | file = open(theargs.inputlistfile, "r") 89 | lines = [line.rstrip('\n') for line in file] 90 | file.close() 91 | 92 | file = open(theargs.outputlistfile, "r") 93 | outfiles = [line.rstrip('\n') for line in file] 94 | file.close() 95 | 96 | 97 | def processInput(x): 98 | sys.stdout.write('Loading: ' + str(lines[x]) + '\n') 99 | img = cv2.imread(lines[x], cv2.IMREAD_UNCHANGED) 100 | cropped = img[in1:in2, in3:in4] 101 | sys.stdout.write('Saving: ' + str(outfiles[x]) + '\n') 102 | cv2.imwrite(outfiles[x], cropped) 103 | return 104 | 105 | 106 | p_tasks = _get_number_of_tasks_to_run_based_on_instance_type(theargs) 107 | sys.stdout.write('Running ' + str(p_tasks) + ' parallel tasks\n') 108 | results = Parallel(n_jobs=p_tasks)(delayed(processInput)(i) for i in range(0, len(lines))) 109 | -------------------------------------------------------------------------------- /scripts/functions/filter_files.m: -------------------------------------------------------------------------------- 1 | function list = filter_files(list,fileformat) 2 | %filter_files 3 | % Reducing the factors that are not the 'fileformat' in 'list' 4 | for i=1:length(list) 5 | clear -v ext 6 | [~,~,ext] = fileparts(list(i).name); 7 | list(i).fileformat = strcmpi(ext,fileformat); 8 | if isempty(list(i).fileformat) 9 | list(i).fileformat = 0; 10 | end 11 | fileformats(i) = list(i).fileformat; 12 | end 13 | fileformats_in_folder = find(fileformats); 14 | list = list(fileformats_in_folder); 15 | end 16 | -------------------------------------------------------------------------------- /scripts/functions/full_fill.m: -------------------------------------------------------------------------------- 1 | function x=full_fill(x) 2 | count =0; 3 | %while(length(find(x==0)) > 0) 4 | while(sum(x(:)==0) > 0) 5 | %x = ReplacePixelsWithModeNew(x, find(x==0)); 6 | x = ReplacePixelsWithModeNew(x, x==0); 7 | count =count+1; 8 | %disp(['count loop = ' num2str(count)]) 9 | %imshow(label2rgb(x)) 10 | end 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /scripts/functions/get_pkg_folders.m: -------------------------------------------------------------------------------- 1 | ## Usage [pkgfolders] = get_pkg_folders ( thedir ) 2 | ## 3 | ## Returns list of of package folders in thedir passed in. 4 | ## If there is an error creating directory then error() is 5 | ## invoked with message 6 | 7 | function [pkgfolders] = get_pkg_folders(thedir) 8 | if isdir(thedir) == 0; 9 | errmsg = sprintf('%s is not a directory',thedir); 10 | error(errmsg); 11 | endif 12 | 13 | pkgfolders = glob(strcat(thedir,filesep(),'Pkg*')); 14 | 15 | % examine all entries and remove any that are NOT directories 16 | for i = 1:rows(pkgfolders) 17 | if isdir(char(pkgfolders(i))) == 0; 18 | pkgfolders(strcmp(pkgfolders,pkgfolders(i))) = []; 19 | endif 20 | endfor 21 | endfunction 22 | 23 | %!error get_pkg_folders(''); 24 | 25 | %!error get_pkg_folders(program_invocation_name()); 26 | 27 | %!test 28 | %! test_fname = tempname(); 29 | %! mkdir(test_fname); 30 | %! [res] = get_pkg_folders(test_fname); 31 | %! assert(columns(res) == 0); 32 | %! one_pkg = strcat(test_fname,filesep(),'Pkg001'); 33 | %! mkdir(one_pkg); 34 | %! [res] = get_pkg_folders(test_fname); 35 | %! assert(rows(res) == 1); 36 | %! assert(char(res(1)) == one_pkg); 37 | %! two_pkg = strcat(test_fname,filesep(),'Pkg002'); 38 | %! mkdir(two_pkg); 39 | %! [res] = get_pkg_folders(test_fname); 40 | %! assert(rows(res) == 2); 41 | %! de_aug_file = strcat(test_fname,filesep(),'de_augmentation_info.mat'); 42 | %! f_out = fopen(de_aug_file,'w'); 43 | %! fprintf(f_out,'hi\n'); 44 | %! fclose(f_out); 45 | %! [res] = get_pkg_folders(test_fname); 46 | %! assert(rows(res) == 2); 47 | %! pkg_file = strcat(test_fname,filesep(),'Pkg003'); 48 | %! f_out = fopen(pkg_file,'w'); 49 | %! fprintf(f_out,'hi\n'); 50 | %! fclose(f_out); 51 | %! [res] = get_pkg_folders(test_fname); 52 | %! assert(rows(res) == 2); 53 | %! rmdir(test_fname,'s'); 54 | 55 | -------------------------------------------------------------------------------- /scripts/functions/get_train_basemodel_names.m: -------------------------------------------------------------------------------- 1 | ## Usage [pkgfolders] = get_train_basemodel_names ( thedir ) 2 | ## 3 | ## Returns list of of *fm directories in thedir passed in. 4 | ## If there is an error creating directory then error() is 5 | ## invoked with message 6 | 7 | function [trainfolders] = get_train_basemodel_names(thedir) 8 | if isdir(thedir) == 0; 9 | errmsg = sprintf('%s is not a directory',thedir); 10 | error(errmsg); 11 | endif 12 | 13 | trainfolders = glob(strcat(thedir,filesep(),'*fm')); 14 | 15 | % examine all entries and remove any that are NOT directories 16 | for i = 1:rows(trainfolders) 17 | if isdir(char(trainfolders(i))) == 0; 18 | trainfolders(strcmp(trainfolders,trainfolders(i))) = []; 19 | else 20 | [dir,name,ext] = fileparts(char(trainfolders(i))); 21 | trainfolders(i) = [strcat(name,ext)]; 22 | endif 23 | endfor 24 | endfunction 25 | 26 | %!error get_train_basemodel_names(''); 27 | 28 | %!error get_train_basemodel_names(program_invocation_name()); 29 | 30 | %!test 31 | %! test_fname = tempname(); 32 | %! mkdir(test_fname); 33 | %! [res] = get_train_basemodel_names(test_fname); 34 | %! assert(columns(res) == 0); 35 | %! one_fm = strcat(test_fname,filesep(),'1fm'); 36 | %! mkdir(one_fm); 37 | %! [res] = get_train_basemodel_names(test_fname); 38 | %! assert(rows(res) == 1); 39 | %! assert(char(res(1)) == '1fm'); 40 | %! three_fm = strcat(test_fname,filesep(),'3fm'); 41 | %! mkdir(three_fm); 42 | %! [res] = get_train_basemodel_names(test_fname); 43 | %! assert(rows(res) == 2); 44 | %! de_aug_file = strcat(test_fname,filesep(),'de_augmentation_info.mat'); 45 | %! f_out = fopen(de_aug_file,'w'); 46 | %! fprintf(f_out,'hi\n'); 47 | %! fclose(f_out); 48 | %! [res] = get_train_basemodel_names(test_fname); 49 | %! assert(rows(res) == 2); 50 | %! pkg_file = strcat(test_fname,filesep(),'6fm'); 51 | %! f_out = fopen(pkg_file,'w'); 52 | %! fprintf(f_out,'hi\n'); 53 | %! fclose(f_out); 54 | %! [res] = get_train_basemodel_names(test_fname); 55 | %! assert(rows(res) == 2); 56 | %! rmdir(test_fname,'s'); 57 | 58 | -------------------------------------------------------------------------------- /scripts/functions/get_variation_folders.m: -------------------------------------------------------------------------------- 1 | ## Usage [vfolders] = get_variation_folders ( thedir ) 2 | ## 3 | ## Returns list of variation folders found within thedir 4 | ## variable passed in. The vfolders is a struct following 5 | ## same layout as that returned by isdir. 6 | 7 | ## If thedir is not a directory, error() is invoked 8 | 9 | function [vfolders] = get_variation_folders(thedir) 10 | if isdir(thedir) == 0; 11 | errmsg = sprintf('%s is not a directory',thedir); 12 | error(errmsg); 13 | endif 14 | 15 | folder=fullfile(thedir); 16 | folderlist = dir(folder); 17 | vfolders = struct("name",{},"date",{},"bytes",{},"isdir",{},"datenum",{},"statinfo",{}); 18 | vcntr = 1; 19 | for i = 1:rows(folderlist) 20 | if folderlist(i).isdir == 1; 21 | if strcmp(folderlist(i).name(1:1),'v') == 1; 22 | vfolders(vcntr++) = folderlist(i); 23 | endif 24 | endif 25 | endfor 26 | endfunction 27 | 28 | %!error get_variation_folders(''); 29 | 30 | %!error get_variation_folders(program_invocation_name()); 31 | 32 | -------------------------------------------------------------------------------- /scripts/functions/hmtransf.m: -------------------------------------------------------------------------------- 1 | function img = hmtransf(img, t, connectiv) img = imcomplement(img); img = imreconstruct((img-double(t)), img, connectiv); img = imcomplement(img); end -------------------------------------------------------------------------------- /scripts/functions/imageimporter.m: -------------------------------------------------------------------------------- 1 | function [imgstack] = imageimporter(img_path) 2 | %imageimporter: loads image data from folder or from an individual image stack 3 | % 4 | % 5 | % 6 | %----------------------------------------------------------------------------- 7 | %% CDeep3M -- NCMIR/NBCR, UCSD -- Author: M Haberl -- Date: 10/2017 8 | %----------------------------------------------------------------------------- 9 | disp('Image importer loading ... '); 10 | disp(img_path); 11 | % check if a folder of png/tif files or a single stack to load 12 | [Dir,name,ext] = fileparts(img_path); 13 | if ~isempty(ext) 14 | if ~isempty(strfind(ext,'.h5')) 15 | fprintf('Reading H5 image file %s\n', img_path); 16 | hinfo = h5info(img_path); 17 | %h5read(img_path, hinfo.GroupHierarchy.Datasets.Name); 18 | imgstack = h5read(img_path, ['/', hinfo.Datasets.Name]); 19 | imgstack=permute(imgstack,[2 3 1]); %To match the same format as TIF or PNG images 20 | elseif ~isempty(strfind(ext,'.tif')) 21 | info = imfinfo(img_path); 22 | fprintf('Reading image stack with %d images\n',size(info,1)); 23 | for idx =1:size(info,1) 24 | imgstack(:,:,idx) = imread(img_path,'index',idx); 25 | end 26 | 27 | end 28 | 29 | elseif isdir(img_path) 30 | file_list = read_files_in_folder(img_path); 31 | png_list = filter_files(file_list,'.png'); 32 | tif_list = filter_files(file_list,'.tif'); 33 | if size(tif_list,1)+size(png_list,1) == 0, disp('No Tifs or PNGs found in training directory');return; 34 | else 35 | [~, type] = max([size(tif_list,1),size(png_list,1)]); %only read tif or pngs if ambiguous 36 | if type==1, file_list = tif_list; elseif type==2, file_list = png_list; end 37 | for idx =1:size(file_list,1) 38 | filename = fullfile(img_path,file_list(idx).name); 39 | fprintf('Reading file: %s\n', filename); 40 | imgstack(:,:,idx) = imread(filename); 41 | end 42 | end 43 | 44 | else 45 | error('No images found'); 46 | return 47 | end 48 | 49 | x_size=size(imgstack,1);y_size=size(imgstack,2); 50 | if x_size<325 || y_size<325 51 | temp_img = zeros(325,325,size(imgstack,3)); 52 | temp_img(1:size(imgstack,1),1:size(imgstack,2),:) = imgstack; 53 | imgstack = temp_img; 54 | end 55 | 56 | end 57 | 58 | -------------------------------------------------------------------------------- /scripts/functions/nanmean.m: -------------------------------------------------------------------------------- 1 | ##Copyright (C) 2001 Paul Kienzle 2 | ## 3 | ## This program is free software; you can redistribute it and/or modify 4 | ## it under the terms of the GNU General Public License as published by 5 | ## the Free Software Foundation; either version 2 of the License, or 6 | ## (at your option) any later version. 7 | ## 8 | ## This program is distributed in the hope that it will be useful, 9 | ## but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | ## GNU General Public License for more details. 12 | ## 13 | ## You should have received a copy of the GNU General Public License 14 | ## along with this program; if not, write to the Free Software 15 | ## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 | 17 | ## v = nanmean(X [, dim]); 18 | ## nanmean is identical to the mean function except that NaN values are 19 | ## ignored. If all values are NaN, the mean is returned as NaN. 20 | ## [Is this behaviour compatible?] 21 | ## 22 | ## See also: nanmin, nanmax, nansum, nanmedian 23 | function v = nanmean (X, varargin) 24 | if nargin < 1 25 | usage ("v = nanmean(X [, dim])"); 26 | else 27 | n = sum (!isnan(X), varargin{:}); 28 | n(n == 0) = NaN; 29 | X(isnan(X)) = 0; 30 | v = sum (X, varargin{:}) ./ n; 31 | endif 32 | endfunction 33 | 34 | -------------------------------------------------------------------------------- /scripts/functions/overlay.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Overlay image frame for CDeep3M 5 | NCMIR/NBCR, UCSD -- Authors: M Haberl -- Date: 2/2019 6 | 7 | 8 | Example: 9 | python overlay.py ~/rawimage/image_001.png ~/pediction/segmented_001.png ~/overlay_001.png 10 | 11 | 12 | """ 13 | 14 | import sys 15 | import cv2 16 | import numpy as np 17 | import argparse 18 | 19 | 20 | def _parse_arguments(desc, theargs): 21 | """Parses command line arguments using argparse 22 | """ 23 | help_formatter = argparse.RawDescriptionHelpFormatter 24 | parser = argparse.ArgumentParser(description=desc, 25 | formatter_class=help_formatter) 26 | parser.add_argument('inputrawimage_name', 27 | help='File containing list of input raw image name') 28 | parser.add_argument('inputsegmentedimage_name', 29 | help='File containing list of input segmented image name') 30 | parser.add_argument('outputoverlayimage_name', 31 | help='File containing list of output overlay image name') 32 | return parser.parse_args(theargs) 33 | 34 | desc = """ 35 | """ 36 | 37 | # Parse arguments 38 | theargs = _parse_arguments(desc, sys.argv[1:]) 39 | 40 | 41 | raw_image = cv2.imread(theargs.inputrawimage_name, cv2.IMREAD_UNCHANGED) 42 | #raw_stack = np.dstack((raw_image,raw_image,raw_image)) 43 | 44 | segmented = cv2.imread(theargs.inputsegmentedimage_name, cv2.IMREAD_UNCHANGED) 45 | 46 | # overlay 47 | alpha = 0.45 48 | overlayed = cv2.addWeighted(segmented, alpha , raw_image, 1 - alpha, 0) 49 | 50 | sys.stdout.write('Saving: ' + str(theargs.outputoverlayimage_name) + '\n') 51 | cv2.imwrite(theargs.outputoverlayimage_name, overlayed) 52 | 53 | 54 | -------------------------------------------------------------------------------- /scripts/functions/read_files_in_folder.m: -------------------------------------------------------------------------------- 1 | function [fileList, file_list_length] = read_files_in_folder(input_directory) 2 | %Read_files_in_folder 3 | % Get the complete list of good excluding hidden Files, excluding any subfolders in the input folder 4 | % 5 | % 6 | % INPUT FORMAT 7 | % -------------------------- 8 | % (InputDirectory) 9 | % 10 | % OUTPUT FORMAT 11 | % -------------------------- 12 | % [fileList, file_list_length] 13 | % 14 | % 15 | % -------------------------- 16 | % -- National Center for Microscopy and Imaging Research, NCMIR 17 | % -- Matthias Haberl -- San Diego, 02/2016 18 | 19 | fileList = dir(input_directory); 20 | 21 | %# remove all folders 22 | isBadFile = cat(1,fileList.isdir); %# all directories are bad 23 | 24 | %# loop to identify hidden files 25 | for iFile = find(~isBadFile)' %'# loop only non-dirs 26 | %# on OSX, hidden files start with a dot 27 | isBadFile(iFile) = strcmp(fileList(iFile).name(1),'.'); 28 | if ~isBadFile(iFile) && ispc 29 | %# check for hidden Windows files - only works on Windows 30 | [~,stats] = fileattrib(fullfile(input_directory,fileList(iFile).name)); 31 | if stats.hidden 32 | isBadFile(iFile) = true; 33 | end 34 | end 35 | end 36 | 37 | %# remove bad files 38 | fileList(isBadFile) = []; 39 | 40 | file_list_length = length(fileList); 41 | 42 | 43 | end 44 | 45 | -------------------------------------------------------------------------------- /scripts/functions/run_train.m: -------------------------------------------------------------------------------- 1 | ## Usage run_train(arg_list) 2 | ## 3 | ## Sets up directory and scripts to run training on CDeep3M model by caffe. 4 | ## arg_list should contain two a element cell array with first value 5 | ## set to path to augmented training data and the second argument the 6 | ## destination output directory 7 | ## 8 | ## Example: arg_list = 9 | ## { 10 | ## [1,1] = /foo/traindata 11 | ## [2,1] = /foo/output 12 | ## [3,1] = /foo/validationdata 13 | ## } 14 | ## 15 | 16 | function run_train(arg_list) 17 | % Runs CDeep3M train using caffe. 18 | % Usage runtrain(cell array of strings) 19 | % by first verifying first argument is path to training data and 20 | % then copying over models under model/ directory to output directory 21 | % suffix for hdf5 files 22 | H_FIVE_SUFFIX='.h5'; 23 | prog_name = program_name(); 24 | base_dir = fileparts(make_absolute_filename(program_invocation_name())); 25 | 26 | if numel(arg_list) < 2; 27 | fprintf('\n'); 28 | msg = sprintf('%s expects at least two command line arguments\n\n', prog_name); 29 | msg = strcat(msg, 30 | sprintf('Usage: %s (validation data is optional)\n', 31 | prog_name)); 32 | error(msg); 33 | return; 34 | endif 35 | 36 | in_img_path = make_absolute_filename(arg_list{1}); 37 | 38 | if isdir(in_img_path) == 0; 39 | error('First argument is not a directory and its supposed to be'); 40 | endif 41 | 42 | outdir = make_absolute_filename(arg_list{2}); 43 | 44 | validation_img_path = make_absolute_filename(arg_list{3}); 45 | 46 | if isdir(validation_img_path) == 0; 47 | error('Third argument is not a directory and its supposed to be'); 48 | endif 49 | % --------------------------------------------------------------------------- 50 | % Examine input training data and generate list of h5 files 51 | % --------------------------------------------------------------------------- 52 | fprintf(stdout(), 'Verifying input training data is valid ... '); 53 | [status, errmsg, train_file, valid_file] = verify_and_create_train_file(in_img_path, outdir, validation_img_path); 54 | 55 | if status != 0; 56 | error(errmsg); 57 | endif 58 | 59 | fprintf(stdout(),'success\n'); 60 | 61 | % ---------------------------------------------------------------------------- 62 | % Create output directory and copy over model files and 63 | % adjust configuration files 64 | % ---------------------------------------------------------------------------- 65 | fprintf(stdout(),'Copying over model files and creating run scripts ... '); 66 | 67 | [onefm_dest,threefm_dest,fivefm_dest] = copy_over_allmodels(base_dir,outdir); 68 | max_iterations = 10000; 69 | update_solverproto_txt_file(outdir,'1fm'); 70 | update_solverproto_txt_file(outdir,'3fm'); 71 | update_solverproto_txt_file(outdir,'5fm'); 72 | 73 | update_train_val_prototxt(outdir,'1fm',train_file,valid_file); 74 | update_train_val_prototxt(outdir,'3fm',train_file,valid_file); 75 | update_train_val_prototxt(outdir,'5fm',train_file,valid_file); 76 | 77 | copy_version(base_dir, outdir); 78 | write_train_readme(outdir); 79 | fprintf(stdout(),'success\n\n'); 80 | 81 | fprintf(stdout(),'A new directory has been created: %s\n', outdir); 82 | fprintf(stdout(),'In this directory are 3 directories 1fm,3fm,5fm which\n'); 83 | fprintf(stdout(),'correspond to 3 caffe models that need to be trained\n'); 84 | 85 | endfunction 86 | 87 | -------------------------------------------------------------------------------- /scripts/functions/update_solverproto_txt_file.m: -------------------------------------------------------------------------------- 1 | ## Usage [train_model_dest] = update_solverproto_txt_file(outdir,model) 2 | ## 3 | ## Updates solver.prototxt file in by adjusting the 4 | ## snapshot_prefix path. The new path is set to 5 | ## _model/_classifer 6 | ## 7 | ## Function also creates a trainedmodel directory under directory 8 | ## like so: 9 | ## //trainedmodel 10 | ## 11 | ## The _model/_classifierpath is returned 12 | ## via variable 13 | ## 14 | 15 | function [train_model_dest] = update_solverproto_txt_file(outdir,model) 16 | solver_prototxt = strcat(outdir,filesep(),model,filesep(), 'solver.prototxt'); 17 | s_data = fileread(solver_prototxt); 18 | solver_out = fopen(solver_prototxt,"w"); 19 | lines = strsplit(s_data,'\n'); 20 | model_dir = strcat(outdir,filesep(),model,filesep(),'trainedmodel'); 21 | create_dir(model_dir); 22 | train_model_dest = strcat(model_dir, 23 | filesep(),model,'_classifer'); 24 | for j = 1:columns(lines) 25 | if index(char(lines(j)),'snapshot_prefix:') == 1; 26 | fprintf(solver_out,'snapshot_prefix: "%s"\n',train_model_dest); 27 | else 28 | fprintf(solver_out,'%s\n',char(lines(j))); 29 | endif 30 | endfor 31 | fclose(solver_out); 32 | endfunction 33 | 34 | %!error update_solverproto_txt_file(); 35 | 36 | # test with valid solver.prototxt 37 | %!test 38 | %! test_fname = tempname(); 39 | %! create_dir(test_fname); 40 | %! model = '1fm'; 41 | %! create_dir(strcat(test_fname,filesep(),model)); 42 | %! solver = strcat(test_fname,filesep(),model,filesep(),'solver.prototxt'); 43 | %! f_out = fopen(solver,'w'); 44 | %! fprintf(f_out,'#hi\n#The\ntest_interval: 20\nsnapshot_prefix: "hi"\n'); 45 | %! fprintf(f_out,'#solver\nsolver mdoe: GPU\n'); 46 | %! fclose(f_out); 47 | %! tmd = update_solverproto_txt_file(test_fname,model); 48 | %! tdir = strcat(test_fname,filesep(),model,filesep(),'trainedmodel'); 49 | %! assert(isdir(tdir)); 50 | %! s_data = fileread(solver); 51 | %! lines = strsplit(s_data,'\n'); 52 | %! assert(char(lines(4)) == strcat('snapshot_prefix: "',tmd,'"')); 53 | %! rmdir(test_fname,'s'); 54 | -------------------------------------------------------------------------------- /scripts/functions/update_train_val_prototxt.m: -------------------------------------------------------------------------------- 1 | ## Usage update_train_val_prototxt(outdir, model, train_file, valid_file) 2 | ## 3 | ## Updates //train_val.prototxt file 4 | ## replacing lines with 'train_file.txt' with value of train_file parameter 5 | ## like so: 6 | ## data_source: "train_file" 7 | ## and replacing lines with 'valid_file.txt' with value of valid_file parameter 8 | ## like so: 9 | ## data_source: "valid_file" 10 | ## 11 | 12 | function update_train_val_prototxt(outdir,model,train_file,valid_file) 13 | % updates data_source in train_val.prototxt file 14 | train_val_prototxt = strcat(outdir,filesep(),model,filesep(), 15 | 'train_val.prototxt'); 16 | t_data = fileread(train_val_prototxt); 17 | lines = strsplit(t_data,'\n'); 18 | train_out = fopen(train_val_prototxt,"w"); 19 | for j = 1:columns(lines) 20 | if index(char(lines(j)),'train_file.txt') >= 1; 21 | fprintf(train_out,' data_source: "%s"\n',train_file); 22 | elseif index(char(lines(j)), 'valid_file.txt') >= 1; 23 | fprintf(train_out,' data_source: "%s"\n', valid_file); 24 | else 25 | fprintf(train_out,'%s\n',char(lines(j))); 26 | endif 27 | endfor 28 | fclose(train_out); 29 | endfunction 30 | 31 | %!error update_train_val_prototxt(); 32 | 33 | # test with valid train_val.prototxt 34 | %!test 35 | %! test_fname = tempname(); 36 | %! create_dir(test_fname); 37 | %! model = '1fm'; 38 | %! create_dir(strcat(test_fname,filesep(),model)); 39 | %! trainv = strcat(test_fname,filesep(),model,filesep(),'train_val.prototxt'); 40 | %! f_out = fopen(trainv,'w'); 41 | %! fprintf(f_out,'hi\nThe\ntest_interval: 20\n data_source: "train_file.txt"\n data_source: "valid_file.txt"\n'); 42 | %! fclose(f_out); 43 | %! update_train_val_prototxt(test_fname,model,'boo', 'haa'); 44 | %! t_data = fileread(trainv); 45 | %! lines = strsplit(t_data,'\n'); 46 | %! assert(char(lines(4)) == ' data_source: "boo"') 47 | %! assert(char(lines(5)) == ' data_source: "haa"') 48 | %! assert(char(lines(2)) == 'The'); 49 | %! rmdir(test_fname,'s'); 50 | 51 | -------------------------------------------------------------------------------- /scripts/functions/verify_and_create_train_file.m: -------------------------------------------------------------------------------- 1 | ## Usage [status, errmsg, train_file, valid_file] = verify_and_create_train_file ( train_input, outdir, valid_input="" ) 2 | ## 3 | ## 1st Looks for files ending with .h5 in train_input directory 4 | ## and verifies there are 16 of them. 2nd code creates a 5 | ## train_file.txt in the outdir directory that has a list 6 | ## of full paths to these h5 files. 7 | ## 8 | ## Upon success train_file will have the path to the train_file.txt created 9 | ## by this function. 10 | ## 11 | ## If there is an error status will be set to a non zero numeric 12 | ## value and errmsg will explain the issue. 13 | 14 | function [status,errmsg, train_file, valid_file] = verify_and_create_train_file (train_input, outdir, valid_input="") 15 | errmsg = ''; 16 | train_file = ''; 17 | valid_file = ''; 18 | status = 0; 19 | H_FIVE_SUFFIX='.h5'; 20 | 21 | if isdir(train_input) == 0; 22 | errmsg = sprintf('%s is not a directory', train_input); 23 | status = 1; 24 | return; 25 | endif 26 | 27 | train_files = glob(strcat(train_input, filesep(),'*', H_FIVE_SUFFIX)); 28 | 29 | if rows(train_files) != 16; 30 | errmsg = sprintf('Expecting 16 .h5 files, but got: %d', rows(train_files)); 31 | status = 3; 32 | return; 33 | endif 34 | 35 | create_dir(outdir); 36 | 37 | train_file = strcat(outdir, filesep(),'train_file.txt'); 38 | train_out = fopen(train_file, "w"); 39 | for i = 1:rows(train_files) 40 | fprintf(train_out,'%s\n',char(train_files(i))); 41 | endfor 42 | fclose(train_out); 43 | 44 | % If user specified validation file 45 | if !isempty(valid_input); 46 | 47 | if isdir(valid_input) == 0; 48 | errmsg = sprintf('%s is not a directory', valid_input); 49 | status = 1; 50 | return; 51 | endif 52 | 53 | valid_files = glob(strcat(valid_input, filesep(),'*', H_FIVE_SUFFIX)); 54 | 55 | valid_file = strcat(outdir, filesep(),'valid_file.txt'); 56 | valid_out = fopen(valid_file, "w"); 57 | for i = 1:rows(valid_files) 58 | fprintf(valid_out,'%s\n',char(valid_files(i))); 59 | endfor 60 | fclose(valid_out); 61 | 62 | else 63 | valid_file = train_file; 64 | endif 65 | endfunction 66 | 67 | %!test 68 | %! [status,errmsg, tf] = verify_and_create_train_file('',''); 69 | %! assert(status, 1); 70 | 71 | %!test 72 | %! test_fname = tempname(); 73 | %! mkdir(test_fname); 74 | %! dest_dir = strcat(test_fname,filesep(),'out'); 75 | %! mkdir(dest_dir); 76 | %! for i = 1:16 77 | %! hfile = sprintf('%s%sfoo_v%d.h5',test_fname,filesep(),i); 78 | %! fout = fopen(hfile, "w"); 79 | %! fprintf(fout,"hi\n"); 80 | %! fclose(fout); 81 | %! endfor 82 | %! [status,errmsg, tf] = verify_and_create_train_file(test_fname,dest_dir); 83 | %! assert(status, 0); 84 | %! assert(errmsg, ''); 85 | %! assert(tf, strcat(dest_dir,filesep(),"train_file.txt")); 86 | %! rmdir(test_fname, 's'); 87 | -------------------------------------------------------------------------------- /scripts/functions/write_train_readme.m: -------------------------------------------------------------------------------- 1 | ## Usage write_train_readme(outdir) 2 | ## 3 | ## Writes out a /readme.txt file with text 4 | ## describing contents of this train folder 5 | ## 6 | 7 | function write_train_readme(outdir) 8 | readme = strcat(outdir,filesep(), 'readme.txt'); 9 | out = fopen(readme, "w"); 10 | fprintf(out, "\nIn this directory contains files and directories needed to\n"); 11 | fprintf(out, "run CDeep3M training using caffe. Below is a description\n"); 12 | fprintf(out, "of the key files and directories:\n\n"); 13 | fprintf(out, "1fm/,3fm/,5fm/ -- Model directories that contain results from training via caffe.\n"); 14 | fprintf(out, "/trainedmodel -- Contains .caffemodel files that are the actual trained models\n"); 15 | fprintf(out, "parallel.jobs -- Input file to GNU parallel to run caffe training jobs in parallel\n"); 16 | fprintf(out, "VERSION -- Version of Cdeep3M used\n"); 17 | fprintf(out, "train_file.txt -- Paths of augmented training data, used by caffe\n"); 18 | fprintf(out, "valid_file.txt -- Paths of augmented validation data, used by caffe\n"); 19 | fprintf(out, "\n"); 20 | fclose(out); 21 | endfunction 22 | 23 | %!error write_train_config(); 24 | 25 | # test with valid directory 26 | %!test 27 | %! test_fname = tempname(); 28 | %! create_dir(test_fname); 29 | %! write_train_readme(test_fname); 30 | %! readme_file = strcat(test_fname,filesep(),'readme.txt'); 31 | %! assert(exist(readme_file, "file"),2); 32 | %! rmdir(test_fname,'s'); 33 | -------------------------------------------------------------------------------- /scripts/generate_16_average_probs.m: -------------------------------------------------------------------------------- 1 | function average=generate_16_average_probs(folder) 2 | %addpath('../../script') 3 | for i=1:16 4 | folder_name=[folder filesep 'v' num2str(i)]; 5 | prob=combinePredicctionSlice(folder_name); 6 | data{i}=prob; 7 | end 8 | 9 | average=de_augment_data(data); 10 | tiff_file_save=[folder filesep 'ave_16.tiff']; 11 | if exist(tiff_file_save, 'file'),delete(tiff_file_save); end 12 | 13 | mx_im=max(average(:)); 14 | for i=1:size(average,3) 15 | b=average(:,:,i); 16 | im=255-uint8(b*(255/mx_im)); 17 | imwrite(im,tiff_file_save,'WriteMode','append'); 18 | disp(['write #' num2str(i) ' image ... ' tiff_file_save]); 19 | end 20 | end -------------------------------------------------------------------------------- /scripts/label2rgb3d.m: -------------------------------------------------------------------------------- 1 | function rgb3d=label2rgb3d(varargin) 2 | [label,map,zerocolor,order,fcnflag] = parse_inputs(varargin{:}); 3 | %label= 3d image with labels 4 | % map= specified color map 5 | %================================== 6 | numregion = double(max(label(:))); 7 | 8 | % If MAP is a function, evaluate it. Make sure that the evaluated function 9 | % returns a valid colormap. 10 | if fcnflag == 1 11 | cmap = feval(map, numregion); 12 | if ~isreal(cmap) || any(cmap(:) > 1) || any(cmap(:) < 0) || ... 13 | ~isequal(size(cmap,2),3) || size(cmap,1) < 1 14 | eid = sprintf('Images:%s:functionReturnsInvalidColormap',mfilename); 15 | msg = 'function handle MAP must return a n x 3 colormap array'; 16 | error(eid,'%s',msg); 17 | end 18 | else 19 | cmap = map; 20 | end 21 | 22 | % If ORDER is set to 'shuffle', save original state. The SHUFFLE keyword 23 | % uses the same state every time it is called. After shuffling is completed, 24 | % go back to original state. 25 | if isequal(order,'shuffle') 26 | S = rand('state'); 27 | rand('state', 0); 28 | index = randperm(numregion); 29 | cmap = cmap(index,:,:); 30 | rand('state', S); 31 | end 32 | 33 | % Issue a warning if the zerocolor (boundary color) matches the color of one 34 | % of the regions. 35 | for i=1:numregion 36 | if isequal(zerocolor,cmap(i,:)) 37 | wid= sprintf('Images:%s:zerocolorSameAsRegionColor',mfilename); 38 | msg= sprintf('Region number %d has the same color as the ZEROCOLOR.',i); 39 | warning(wid,'%s',msg); 40 | end 41 | end 42 | cmap = [zerocolor;cmap]; 43 | 44 | % if label is of type double, need to pass 'label + 1' into IND2RGB. 45 | % IND2RGB does not like double arrays containing zero values. 46 | if isa(label, 'double') 47 | rgb3d = ind2rgb3d(label + 1, cmap); 48 | else 49 | rgb3d = ind2rgb3d(label, cmap); 50 | end 51 | 52 | %====================================================================== 53 | %====================================================================== 54 | function [L, Map, Zerocolor, Order, Fcnflag] = parse_inputs(varargin) 55 | % L label 3d matrix: matrix containing non-negative values. 56 | % Map colormap: name of standard colormap, user-defined map, function 57 | % handle. 58 | % Zerocolor RGB triple or Colorspec 59 | % Order keyword if specified: 'shuffle' or 'noshuffle' 60 | % Fcnflag flag to indicating that Map is a function 61 | 62 | valid_order = {'shuffle', 'noshuffle'}; 63 | iptchecknargin(1,4,nargin,mfilename); 64 | 65 | % set defaults 66 | L = varargin{1}; 67 | Map = 'jet'; 68 | Zerocolor = [1 1 1]; 69 | Order = 'noshuffle'; 70 | Fcnflag = 0; 71 | 72 | % parse inputs 73 | if nargin > 1 74 | Map = varargin{2}; 75 | end 76 | if nargin > 2 77 | Zerocolor = varargin{3}; 78 | end 79 | if nargin > 3 80 | Order = varargin{4}; 81 | end 82 | 83 | % error checking for L 84 | iptcheckinput(L,{'numeric' 'logical'}, ... 85 | {'real' 'nonsparse' 'finite' 'nonnegative' 'integer'}, ... 86 | mfilename,'L',1); 87 | 88 | % error checking for Map 89 | [fcn, fcnchk_msg] = fcnchk(Map); 90 | if isempty(fcnchk_msg) 91 | Map = fcn; 92 | Fcnflag = 1; 93 | else 94 | if isnumeric(Map) 95 | if ~isreal(Map) || any(Map(:) > 1) || any(Map(:) < 0) || ... 96 | ~isequal(size(Map,2), 3) || size(Map,1) < 1 97 | eid = sprintf('Images:%s:invalidColormap',mfilename); 98 | msg = 'Invalid entry for MAP.'; 99 | error(eid,'%s',msg); 100 | end 101 | else 102 | eid = sprintf('Images:%s:invalidFunctionforMAP',mfilename); 103 | error(eid,'%s',fcnchk_msg); 104 | end 105 | end 106 | 107 | % error checking for Zerocolor 108 | if ~ischar(Zerocolor) 109 | % check if Zerocolor is a RGB triple 110 | if ~isreal(Zerocolor) || ~isequal(size(Zerocolor),[1 3]) || ... 111 | any(Zerocolor> 1) || any(Zerocolor < 0) 112 | eid = sprintf('Images:%s:invalidZerocolor',mfilename); 113 | msg = 'Invalid RGB triple entry for ZEROCOLOR.'; 114 | error(eid,'%s',msg); 115 | end 116 | else 117 | [cspec, msg] = cspecchk(Zerocolor); 118 | if ~isempty(msg) 119 | eid = sprintf('Images:%s:notInColorspec',mfilename); 120 | error(eid,'%s',msg); 121 | else 122 | Zerocolor = cspec; 123 | end 124 | end 125 | 126 | % error checking for Order 127 | idx = strmatch(lower(Order), valid_order); 128 | eid = sprintf('Images:%s:invalidEntryForOrder',mfilename); 129 | if isempty(idx) 130 | msg = 'Valid entries for ORDER are ''shuffle'' or ''noshuffle''.'; 131 | error(eid,'%s',msg); 132 | elseif length(idx) > 1 133 | msg = sprintf('Ambiguous string for ORDER: %s.', Order); 134 | error(eid,'%s',msg); 135 | else 136 | Order = valid_order{idx}; 137 | end 138 | 139 | 140 | %================================================================ 141 | %================================================================= 142 | function [rout,g,b] = ind2rgb3d(a,cm) 143 | %IND2RGB Convert indexed image to RGB image. 144 | % RGB = IND2RGB(X,MAP) converts the 3d matrix X and corresponding 145 | % colormap MAP to RGB (truecolor) format. 146 | % 147 | % Class Support 148 | % ------------- 149 | % X can be of class uint8, uint16, or double. RGB is an 150 | % M-by-N-by-3 array of class double. 151 | % 152 | % See also IND2GRAY, RGB2IND (in the Image Processing Toolbox). 153 | 154 | 155 | if ~isa(a, 'double') 156 | a = double(a)+1; % Switch to one based indexing 157 | end 158 | 159 | error(nargchk(2,2,nargin)); 160 | 161 | % Make sure A is in the range from 1 to size(cm,1) 162 | a = max(1,min(a,size(cm,1))); 163 | 164 | % Extract r,g,b components 165 | r = zeros(size(a)); r(:) = cm(a,1); 166 | g = zeros(size(a)); g(:) = cm(a,2); 167 | b = zeros(size(a)); b(:) = cm(a,3); 168 | 169 | if nargout==3, 170 | rout = r; 171 | else 172 | rout = zeros([size(r),3]); 173 | rout(:,:,:,1) = r; 174 | rout(:,:,:,2) = g; 175 | rout(:,:,:,3) = b; 176 | end 177 | 178 | 179 | 180 | 181 | -------------------------------------------------------------------------------- /scripts/post_processing/Apply3DWatershed.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | %% Apply3DWatershed 3 | % 4 | % 5 | % Syntax : Apply3DWateshed /example/rawimage/image.tiff predict/ensemble.tiff hm3d:0.15 6 | % make sure StartPostprocessing and EnsemblePredictions ran first which 7 | % created the 'ensemble.tiff' 8 | % 9 | % -> Argument 1: raw image file (either as a tiff stack or as a h5 file) 10 | % -> Argument 2: location of the 'ensemble.tiff' 11 | % -> optional input: hm3d:0.15 12 | % 13 | % Output is written into the folder of the raw image input, as one 14 | % segmented file, and the subsequently colorized segmented file. 15 | % 16 | % ----------------------------------------------------------------------------- 17 | %% CDeep3M -- NCMIR, UCSD -- Author: M Haberl -- Data: 10/2017 18 | % ----------------------------------------------------------------------------- 19 | % 20 | 21 | %% Initialize 22 | pkg load hdf5oct 23 | pkg load image 24 | 25 | 26 | if exist('readvars','var') %ran StartPostprocessing before, using output and variables from here 27 | for i = 1:floor(numel(arg_list)/2) 28 | filelist{i} = arg_list{(2*i)+1}; 29 | raw_img_missing =1; 30 | end 31 | 32 | else %% Regular call of this script 33 | arg_list = argv (); 34 | raw_image_full_path = arg_list{1}; %fist is the raw img file, then all folders are input directories 35 | outfolder = fileparts(raw_image_full_path); %use the directory of the rawimage for the output 36 | merged_file_saved = arg_list{2}; 37 | %{ 38 | for i = 2:(numel(arg_list)) 39 | if isdir(arg_list{i}) 40 | to_process{i-1} = arg_list{i}; 41 | end 42 | end 43 | %} 44 | 45 | end 46 | 47 | % ------------------------------------------------------------------------- 48 | %% ---------------- Input parameters ------------------------------------- 49 | % ------------------------------------------------------------------------- 50 | 51 | h= fspecial('Gaussian', [6 6], 0.105); 52 | ishm3dparam = regexpi(arg_list,'hm3d'); 53 | if isempty([ishm3dparam]) 54 | hm3d=0.15; %standard setting, if no user input 55 | else 56 | for rr = 1:numel(arg_list) 57 | found_ahm = strfind(arg_list{rr},'hm3d') 58 | if ~isempty(found_ahm) 59 | found_hm = arg_list{rr}; 60 | m = regexp(arg_list{rr}, ':') ; 61 | hm3d = str2num(found_hm(m+1:end)); %use everything after the semicolon as input for hminima transf value 62 | end 63 | 64 | end 65 | end 66 | 67 | % ------------------------------------------------------------------------- 68 | %% ---------------- load ensemble model ----------------------------------- 69 | % ------------------------------------------------------------------------- 70 | 71 | %merged_file_saved=fullfile(folder, 'ensemble.tiff'); 72 | fprintf('Loading: %s ...\n', merged_file_saved); 73 | if exist(merged_file_saved, 'file'), 74 | for z = 1:size(prob,3) 75 | ensMod(:,:,proc) = imread(merged_file_saved,z) 76 | end 77 | else 78 | error('%s not found',merged_file_saved); 79 | return 80 | end 81 | 82 | % ------------------------------------------------------------------------- 83 | %% ---------------- perform 3D watershed --------------------------------- 84 | % ------------------------------------------------------------------------- 85 | 86 | h= fspecial('Gaussian', [6 6], 0.105); 87 | hm3d=0.15; %imhm_th_3d=0.19; 88 | prob_mask_th=0.8; 89 | disp('Applying 3D watershed ...') 90 | L = watershed(hmtransf(imfilter(ensMod, h), hm3d),6); %applying first a gaussian filter then a H-minima transform 91 | disp('Saving 3D watershed ...') 92 | segmented_filename = fullfile(outfolder,sprintf('Deep3M_segmented_out_hm3D%s.tif',num2str(hm3d))); 93 | if exist(segmented_filename, 'file'),delete(segmented_filename); end 94 | for i=1:size(L,3) 95 | imwrite(L(:,:,i),segmented_filename,'WriteMode','append') 96 | end 97 | 98 | %{ 99 | L_fill_merge=L; 100 | %L_fill_merge(deconv_prob_test>=prob_mask_th) = 0; 101 | L_fill_merge=double(L_fill_merge); 102 | parfor i=1:size(L_fill_merge,3) 103 | disp(['disp ' num2str(i)]) 104 | f = full_fill(L_fill_merge(:,:,i)); 105 | out_map(:,:,i)=f; 106 | end 107 | %} 108 | 109 | % ------------------------------------------------------------------------- 110 | %% ---------------- Load raw data for overlay ----------------------------- 111 | % ------------------------------------------------------------------------- 112 | if raw_img_missing 113 | raw_image_full_path = input('\nPlease insert raw image file location:','s'); 114 | end 115 | 116 | if strcmpi('.h5',raw_data_file(end-2:end)) 117 | Raw_img=h5read(raw_image_full_path,'/data'); 118 | Raw_img=permute(Raw_img,[2 3 1]); 119 | elseif strcmpi('.tif',raw_data_file(end-3:end)) || strcmpi('.tiff',raw_data_file(end-3:end)) 120 | info = inmfinfo(raw_image_full_path); 121 | fprintf('Reading image stack with %d images\n',size(info,1)); 122 | for idx =1:size(info,1) 123 | Raw_img(:,:,i)=imread(raw_image_full_path,'index',idx); 124 | end 125 | else 126 | errordlg('Cannot open other format than h5 or tiff'); 127 | end 128 | 129 | 130 | helpdlg(sprintf('Elapsed time is %06d seconds.', round(toc))) 131 | 132 | %% ============================== Make submission Files =============================== 133 | %make_submit_tiff(out_map,'iter_50000_1fm3fm5fm_correctByFull_outmap_th019_slice1') 134 | fprintf('Colorizing segmented images \n'); 135 | overlay = fullfile(outfolder,sprintf('Deep3M_colored_hm3D%s.tif',num2str(hm3d))); 136 | write_label2rgb_image(out_map,Raw_img,overlay) 137 | 138 | %make_submit_tiff(out_map,'Best_iter_50000_1fm3fm5fm_correctByFull_outmap_th020_rindx0064745019') 139 | %write_label2rgb_image(out_map,Raw_img,'Best_segmentation_test_iter_50000_1fm3fm5fm_correctByFull_outmap_th020_rindx0064745019'); 140 | % write_label2rgb_image(L); 141 | 142 | %% END 143 | -------------------------------------------------------------------------------- /scripts/post_processing/merge_16_probs_v2.m: -------------------------------------------------------------------------------- 1 | function output_folder_name = merge_16_probs_v2(folder) 2 | output_folder_name=fullfile(folder); 3 | mkdir(output_folder_name); 4 | %% Changed to do plane by plane and save as .png each plane 5 | folder_name=fullfile(folder, 'v1'); 6 | all_files = read_files_in_folder(folder_name) 7 | first_file = all_files(1).name; 8 | [~,NAME,ext] = fileparts(first_file); 9 | %digits = regexpi(NAME, '\d'); 10 | filebasename = NAME(1:end-1); %drop the last is the digit 11 | 12 | for fff = 2: (numel(all_files)-3) %predictions start with 0; Ignore 0&1 and last two, since they are z-padding 13 | 14 | loadfile = [filebasename,num2str(fff),'.h5']; 15 | fprintf('Merging 16 variations of file %s ... number %s of %s\n', filebasename, num2str(fff-1), num2str(numel(all_files)-3)); 16 | for i=1:8 %File 1:8 are 1:100 17 | folder_name=[folder filesep 'v' num2str(i)]; 18 | filename = fullfile(folder_name,loadfile); 19 | %fileinfo = h5info(filename); 20 | load_im = h5read(filename, '/data'); 21 | disp('H5 Dimensions:');disp(size(load_im)); 22 | %scale = max(max(load_im(:,:,2))); 23 | b{i} = load_im(:,:,2); 24 | %prob=combinePredicctionSlice_v2(folder_name); 25 | %data{i}=prob; 26 | end 27 | eight_vars=recover8Variation(b); %Now rotate first 8 back into normal space 28 | %Variations 9-16 are inverse organized 29 | loadfile_revert = [filebasename,num2str(numel(all_files) - (fff+1)),'.h5']; 30 | for i=1:8 %File 9:16 are 100:1 31 | var = i+8; 32 | folder_name=[folder filesep 'v' num2str(var)]; 33 | filename = fullfile(folder_name,loadfile_revert); 34 | load_im = h5read(filename, '/data'); 35 | %scale = max(max(load_im(:,:,2))); 36 | b{i} = load_im(:,:,2); 37 | end 38 | next_vars=recover8Variation(b); %Now rotate next 8 back into normal space 39 | sixteen_vars = cat(3,eight_vars, next_vars); 40 | 41 | %{ 42 | %To check if 16 variations are good uncomment here 43 | output_filename=fullfile(output_folder_name , sprintf('%s_%04d.tiff', filebasename,(fff+1))); 44 | for z = 1:16 45 | imwrite(sixteen_vars(:,:,z),output_filename,'WriteMode','append'); 46 | fprintf('Saving: %s ... Image #%s \n', output_filename, num2str(z)); 47 | end 48 | %} 49 | image = mean(sixteen_vars,3); 50 | 51 | %image_stack=de_augment_data(b); 52 | output_filename=fullfile(output_folder_name , sprintf('%s_%04d.png', filebasename,(fff-2))); 53 | %delete(filename); 54 | disp(['write: ' output_filename]); 55 | imwrite(image,output_filename); 56 | %tiff_file_save=[folder filesep 'ave_16.tiff']; 57 | 58 | %%tried different weighting of 16v predictions, using mode instead of average -> need to test if better but currently slow 59 | %{ 60 | image2 = mode(sixteen_vars,3); 61 | outdir2=fullfile(folder,'de_augmented_mode_weighting'); 62 | mkdir(outdir2); 63 | output_filename2=fullfile(outdir2, sprintf('%s_%04d.png', filebasename,(fff+1))); 64 | %delete(filename); 65 | disp(['write: ' output_filename2]); 66 | imwrite(image2,output_filename2); 67 | %} 68 | end 69 | %{ 70 | if exist(tiff_file_save, 'file'),delete(tiff_file_save); end 71 | mx_im=max(average(:)); 72 | for i=1:size(average,3) 73 | b=average(:,:,i); 74 | im=uint8(b*(255/mx_im)); %removed 255- inverted image values 75 | imwrite(im,tiff_file_save,'WriteMode','append'); 76 | disp(['write #' num2str(i) ' image ... ' tiff_file_save]); 77 | end 78 | %} 79 | disp('Deleting intermediate .h5 files'); 80 | for i = 1:16 81 | removefolders=[folder,filesep,'v',num2str(i)]; 82 | fprintf('Deleting %s\n', removefolders); 83 | rmdir(removefolders, 's'); 84 | end 85 | 86 | end 87 | 88 | function eight_vars=recover8Variation(x) 89 | prob=x{1}; 90 | average=zeros([size(prob),8]); 91 | for j = 1:8 92 | prob = x{j}; 93 | %for j = 1:size(prob,3) 94 | %p=squeeze(prob(:,:,j)); 95 | switch(j) 96 | case 1 97 | average(:,:,j) = average(:,:,j)+ prob; 98 | case 2 99 | average(:,:,j) = average(:,:,j) + flipdim(prob,1); 100 | case 3 101 | average(:,:,j) = average(:,:,j) + flipdim(prob,2); 102 | case 4 103 | average(:,:,j) = average(:,:,j) + rot90(prob, -1); 104 | case 5 105 | average(:,:,j) = average(:,:,j) + rot90(prob); 106 | case 6 107 | average(:,:,j) = average(:,:,j) + flipdim(rot90(prob,-1), 1); 108 | case 7 109 | average(:,:,j) = average(:,:,j) + flipdim(rot90(prob,-1), 2); 110 | case 8 111 | average(:,:,j) = average(:,:,j) + rot90(prob,2); 112 | end 113 | %end 114 | eight_vars=average; 115 | end 116 | end 117 | -------------------------------------------------------------------------------- /scripts/post_processing/merge_16_probs_v3.m: -------------------------------------------------------------------------------- 1 | function folder = merge_16_probs_v3(folder) 2 | 3 | % get variation directories aka directories that start with v 4 | vfolderlist = get_variation_folders(folder); 5 | 6 | folder_name = fullfile(folder, vfolderlist(min(find([vfolderlist.isdir]))).name) 7 | all_files = read_files_in_folder(folder_name); 8 | first_file = all_files(1).name; 9 | [~,NAME,ext] = fileparts(first_file); 10 | %digits = regexpi(NAME, '\d'); 11 | filebasename = NAME(1:end-1); %drop the last is the digit 12 | 13 | for fff = 2: (numel(all_files)-3) %predictions start with 0; Ignore 0&1 and last two, since they are z-padding 14 | 15 | loadfile = [filebasename,num2str(fff),'.h5']; 16 | fprintf('Merging 16 variations of file %s ... number %s of %s\n', filebasename, num2str(fff-1), num2str(numel(all_files)-3)); 17 | image = []; 18 | for i=1:8 %File 1:8 are 1:100 19 | folder_name=[folder filesep 'v' num2str(i)]; 20 | 21 | if exist(folder_name,'dir')==7 22 | filename = fullfile(folder_name,loadfile); 23 | %fileinfo = h5info(filename); 24 | load_im = h5read(filename, '/data'); 25 | fprintf('H5 Dimensions: %s \n' ,num2str(size(load_im))); 26 | %scale = max(max(load_im(:,:,2))); 27 | inputim = load_im(:,:,2); 28 | switch(i) 29 | case 1 30 | inputim = inputim; 31 | case 2 32 | inputim = flipdim(inputim,1); 33 | case 3 34 | inputim = flipdim(inputim,2); 35 | case 4 36 | inputim = rot90(inputim, -1); 37 | case 5 38 | inputim = rot90(inputim); 39 | case 6 40 | inputim = flipdim(rot90(inputim,-1), 1); 41 | case 7 42 | inputim = flipdim(rot90(inputim,-1), 2); 43 | case 8 44 | inputim = rot90(inputim,2); 45 | end 46 | image = cat(3,image,inputim); 47 | end 48 | %prob=combinePredicctionSlice_v2(folder_name); 49 | %data{i}=prob; 50 | end 51 | 52 | %Variations 9-16 are inverse organized 53 | loadfile_revert = [filebasename,num2str(numel(all_files) - (fff+1)),'.h5']; 54 | for i=1:8 %File 9:16 are 100:1 55 | var = i+8; 56 | folder_name=[folder filesep 'v' num2str(var)]; 57 | if exist(folder_name,'dir')==7 58 | filename = fullfile(folder_name,loadfile_revert); 59 | load_im = h5read(filename, '/data'); 60 | %scale = max(max(load_im(:,:,2))); 61 | inputim = load_im(:,:,2); 62 | switch(i) 63 | case 1 64 | inputim = inputim; 65 | case 2 66 | inputim = flipdim(inputim,1); 67 | case 3 68 | inputim = flipdim(inputim,2); 69 | case 4 70 | inputim = rot90(inputim, -1); 71 | case 5 72 | inputim = rot90(inputim); 73 | case 6 74 | inputim = flipdim(rot90(inputim,-1), 1); 75 | case 7 76 | inputim = flipdim(rot90(inputim,-1), 2); 77 | case 8 78 | inputim = rot90(inputim,2); 79 | end 80 | image = cat(3,image,inputim) ; 81 | end 82 | end 83 | 84 | %{ 85 | %To check if 16 variations are good uncomment here 86 | output_filename=fullfile(folder , sprintf('%s_%04d.tiff', filebasename,(fff+1))); 87 | for z = 1:16 88 | imwrite(sixteen_vars(:,:,z),output_filename,'WriteMode','append'); 89 | fprintf('Saving: %s ... Image #%s \n', output_filename, num2str(z)); 90 | end 91 | %} 92 | if size(image,3)>1 93 | image = mean(image,3); 94 | end 95 | 96 | %image_stack=de_augment_data(b); 97 | output_filename=fullfile(folder , sprintf('%s_%04d.png', filebasename,(fff-2))); 98 | %delete(filename); 99 | disp(['write: ' output_filename]); 100 | imwrite(image,output_filename); 101 | %tiff_file_save=[folder filesep 'ave_16.tiff']; 102 | 103 | %%tried different weighting of 16v predictions, using mode instead of average -> need to test if better but currently slow 104 | %{ 105 | image2 = mode(sixteen_vars,3); 106 | outdir2=fullfile(folder,'de_augmented_mode_weighting'); 107 | mkdir(outdir2); 108 | output_filename2=fullfile(outdir2, sprintf('%s_%04d.png', filebasename,(fff+1))); 109 | %delete(filename); 110 | disp(['write: ' output_filename2]); 111 | imwrite(image2,output_filename2); 112 | %} 113 | end 114 | %{ 115 | if exist(tiff_file_save, 'file'),delete(tiff_file_save); end 116 | mx_im=max(average(:)); 117 | for i=1:size(average,3) 118 | b=average(:,:,i); 119 | im=uint8(b*(255/mx_im)); %removed 255- inverted image values 120 | imwrite(im,tiff_file_save,'WriteMode','append'); 121 | disp(['write #' num2str(i) ' image ... ' tiff_file_save]); 122 | end 123 | %} 124 | disp('Deleting intermediate .h5 files'); 125 | for i = 1:16 126 | removefolders=[folder,filesep,'v',num2str(i)]; 127 | fprintf('Deleting %s\n', removefolders); 128 | rmdir(removefolders, 's'); 129 | end 130 | 131 | end 132 | 133 | 134 | function eight_vars=recover8Variation(x) 135 | prob=x{1}; 136 | average=zeros([size(prob),8]); 137 | for j = 1:8 138 | prob = x{j}; 139 | %for j = 1:size(prob,3) 140 | %p=squeeze(prob(:,:,j)); 141 | switch(j) 142 | case 1 143 | average(:,:,j) = average(:,:,j)+ prob; 144 | case 2 145 | average(:,:,j) = average(:,:,j) + flipdim(prob,1); 146 | case 3 147 | average(:,:,j) = average(:,:,j) + flipdim(prob,2); 148 | case 4 149 | average(:,:,j) = average(:,:,j) + rot90(prob, -1); 150 | case 5 151 | average(:,:,j) = average(:,:,j) + rot90(prob); 152 | case 6 153 | average(:,:,j) = average(:,:,j) + flipdim(rot90(prob,-1), 1); 154 | case 7 155 | average(:,:,j) = average(:,:,j) + flipdim(rot90(prob,-1), 2); 156 | case 8 157 | average(:,:,j) = average(:,:,j) + rot90(prob,2); 158 | end 159 | %end 160 | eight_vars=average; 161 | end 162 | end 163 | 164 | -------------------------------------------------------------------------------- /scripts/write_label2rgb_image.m: -------------------------------------------------------------------------------- 1 | function write_label2rgb_image(L,fuse_im,file_prefix) 2 | label_rgb = label2rgb3d(L,'hsv',[1,1,1],'Shuffle');'w' 3 | for K=1:length(L(1, 1, :)) 4 | %outputFileName = sprintf('img_%d.tif',K); 5 | %imwrite(label2rgb(L_test(:, :, K)), outputFileName); 6 | 7 | %imwrite(squeeze(lable_rgb(:,:,K,:),outoutFiileName)); 8 | im=squeeze(label_rgb(:,:,K,:)); 9 | if nargin >1 10 | f_im=squeeze(fuse_im(:,:,K,:)); 11 | im = imfuse(im,f_im,'blend','Scaling','joint'); 12 | end 13 | if nargin >2 14 | file=[file_prefix '.tiff']; 15 | else 16 | file='img.tiff'; 17 | end 18 | imwrite(im,file,'WriteMode','append'); 19 | end 20 | -------------------------------------------------------------------------------- /singularity/ubuntu-xenial64-sdsc-comet/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: clean singularity 2 | 3 | help: 4 | @echo "clean - remove all build and test artifacts" 5 | @echo "singularity - Creates singularity 2.3.2 image" 6 | 7 | clean: 8 | sudo rm -fr /vagrant/build/ 9 | sudo rm -f /tmp/*.deb 10 | sudo rm -f /tmp/Mini* 11 | 12 | 13 | singularity: clean 14 | @echo 'Creating Singularity v232 image' 15 | mkdir -p /vagrant/build 16 | imgfile='/vagrant/build/cdeep3m.img' ; \ 17 | sudo /usr/local/bin/singularity create -s 12096 $$imgfile ; \ 18 | sudo /usr/local/bin/singularity bootstrap $$imgfile ubuntu-cuda.def; \ 19 | echo 'Singularity image created $imgfile' 20 | -------------------------------------------------------------------------------- /singularity/ubuntu-xenial64-sdsc-comet/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure(2) do |config| 9 | config.vm.box = "ubuntu/xenial64" 10 | config.vm.provision :shell, path: "bootstrap.sh" 11 | config.ssh.forward_x11 = true 12 | 13 | config.vm.provider "virtualbox" do |vb| 14 | vb.memory = "4092" 15 | vb.cpus = "2" 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /singularity/ubuntu-xenial64-sdsc-comet/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Installing base packages" 4 | apt-get -y update 5 | apt-get -y upgrade 6 | apt-get -y install build-essential python debootstrap 7 | 8 | wget https://github.com/singularityware/singularity/releases/download/2.3.2/singularity-2.3.2.tar.gz 9 | tar xvf singularity-2.3.2.tar.gz 10 | cd singularity-2.3.2 11 | ./configure --prefix=/usr/local 12 | make 13 | make install 14 | /bin/rm -rf singularity* 15 | /bin/rm -f /tmp/*.deb 16 | -------------------------------------------------------------------------------- /tests/RunUnitTests.m: -------------------------------------------------------------------------------- 1 | #!/usr/bin/octave -qf 2 | 3 | 4 | script_dir = fileparts(make_absolute_filename(program_invocation_name())); 5 | 6 | dist_dir=make_absolute_filename(strcat(script_dir,filesep(),'..')); 7 | 8 | old_dir = cd(dist_dir); 9 | 10 | addpath(genpath(dist_dir)); 11 | addpath(genpath(strcat(dist_dir,filesep(),'scripts',filesep()))); 12 | addpath(genpath(strcat(dist_dir,filesep(),'scripts',filesep(),'functions'))); 13 | 14 | test_files = vertcat(glob('*.m'), glob('*/*.m'),glob('*/*/*.m')); 15 | numfailed=0; 16 | for x = 1:rows(test_files) 17 | t_file = char(test_files(x)); 18 | t_file_data = fileread(t_file); 19 | lines = strsplit(t_file_data,'\n'); 20 | run_test = 0; 21 | for j = 1:columns(lines) 22 | if index(char(lines(j)),'%!') == 1; 23 | run_test = 1; 24 | break; 25 | endif 26 | endfor 27 | if run_test == 1; 28 | success = test(t_file,"quiet",stdout()); 29 | if success == 0; 30 | numfailed+=1; 31 | endif 32 | endif 33 | endfor 34 | 35 | cd(old_dir); 36 | 37 | if numfailed > 0; 38 | error(sprintf('%d tests failed', numfailed)); 39 | endif 40 | -------------------------------------------------------------------------------- /tests/caffetrain.sh.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | 4 | setup() { 5 | export CAFFE_TRAIN_SH="${BATS_TEST_DIRNAME}/../caffetrain.sh" 6 | export TEST_TMP_DIR="${BATS_TMPDIR}/"`uuidgen` 7 | /bin/mkdir -p "$TEST_TMP_DIR" 8 | } 9 | 10 | teardown() { 11 | if [ -d "$TEST_TMP_DIR" ] ; then 12 | /bin/rm -rf "$TEST_TMP_DIR" 13 | fi 14 | } 15 | 16 | @test "caffetrain.sh no args empty dir" { 17 | run $CAFFE_TRAIN_SH 18 | echo "$status $output" 1>&2 19 | [ "$status" -eq 1 ] 20 | [ "${lines[0]}" = "usage: caffetrain.sh [-h] [--numiterations NUMITERATIONS] [--gpu GPU]" ] 21 | } 22 | 23 | @test "caffetrain.sh 1fm no 1fm/solver.prototxt file" { 24 | run $CAFFE_TRAIN_SH 1fm "$TEST_TMP_DIR" 25 | echo "$status $output" 1>&2 26 | [ "$status" -eq 2 ] 27 | [ "${lines[1]}" = "ERROR trying to update max_iter in $TEST_TMP_DIR/1fm/solver.prototxt" ] 28 | } 29 | 30 | @test "caffetrain.sh 1fm verify correctly updated solver.prototxt file no args set" { 31 | mkdir -p $TEST_TMP_DIR/1fm 32 | echo "#blah" > $TEST_TMP_DIR/1fm/solver.prototxt 33 | echo "base_lr: 1" >> $TEST_TMP_DIR/1fm/solver.prototxt 34 | echo "power: 2" >> $TEST_TMP_DIR/1fm/solver.prototxt 35 | echo "momentum: 3.5" >> $TEST_TMP_DIR/1fm/solver.prototxt 36 | echo "weight_decay: 4.4" >> $TEST_TMP_DIR/1fm/solver.prototxt 37 | echo "average_loss: 1" >> $TEST_TMP_DIR/1fm/solver.prototxt 38 | echo "lr_policy: \"foo\"" >> $TEST_TMP_DIR/1fm/solver.prototxt 39 | echo "iter_size: 23" >> $TEST_TMP_DIR/1fm/solver.prototxt 40 | echo "snapshot: 3" >> $TEST_TMP_DIR/1fm/solver.prototxt 41 | echo "max_iter: 4" >> $TEST_TMP_DIR/1fm/solver.prototxt 42 | echo "solver mode: GPU" >> $TEST_TMP_DIR/1fm/solver.prototxt 43 | 44 | touch $TEST_TMP_DIR/1fm/log 45 | run $CAFFE_TRAIN_SH 1fm "$TEST_TMP_DIR" 46 | [ "$status" -eq 3 ] 47 | [ "${lines[1]}" = "ERROR unable to make $TEST_TMP_DIR/1fm/log directory" ] 48 | run cat $TEST_TMP_DIR/1fm/solver.prototxt 49 | 50 | [ "${lines[0]}" = "#blah" ] 51 | [ "${lines[1]}" = "base_lr: 1e-02" ] 52 | [ "${lines[2]}" = "power: 0.8" ] 53 | [ "${lines[3]}" = "momentum: 0.9" ] 54 | [ "${lines[4]}" = "weight_decay: 0.0005" ] 55 | [ "${lines[5]}" = "average_loss: 16" ] 56 | [ "${lines[6]}" = "lr_policy: \"poly\"" ] 57 | [ "${lines[7]}" = "iter_size: 8" ] 58 | [ "${lines[8]}" = "snapshot: 2000" ] 59 | [ "${lines[9]}" = "max_iter: 30000" ] 60 | [ "${lines[10]}" = "solver mode: GPU" ] 61 | } 62 | 63 | @test "caffetrain.sh 1fm verify correctly updated solver.prototxt file no args set" { 64 | mkdir -p $TEST_TMP_DIR/1fm 65 | echo "#blah" > $TEST_TMP_DIR/1fm/solver.prototxt 66 | echo "base_lr: 1" >> $TEST_TMP_DIR/1fm/solver.prototxt 67 | echo "power: 2" >> $TEST_TMP_DIR/1fm/solver.prototxt 68 | echo "momentum: 3.5" >> $TEST_TMP_DIR/1fm/solver.prototxt 69 | echo "weight_decay: 4.4" >> $TEST_TMP_DIR/1fm/solver.prototxt 70 | echo "average_loss: 1" >> $TEST_TMP_DIR/1fm/solver.prototxt 71 | echo "lr_policy: \"foo\"" >> $TEST_TMP_DIR/1fm/solver.prototxt 72 | echo "iter_size: 23" >> $TEST_TMP_DIR/1fm/solver.prototxt 73 | echo "snapshot: 3" >> $TEST_TMP_DIR/1fm/solver.prototxt 74 | echo "max_iter: 4" >> $TEST_TMP_DIR/1fm/solver.prototxt 75 | echo "solver mode: GPU" >> $TEST_TMP_DIR/1fm/solver.prototxt 76 | 77 | touch $TEST_TMP_DIR/1fm/log 78 | run $CAFFE_TRAIN_SH --base_learn "1e-04" --power 0.2 --momentum 0.3 --weight_decay 0.4 --average_loss 0.5 --lr_policy yo --iter_size 7 --snapshot_interval 8 --numiterations 9 1fm "$TEST_TMP_DIR" 79 | [ "$status" -eq 3 ] 80 | [ "${lines[1]}" = "ERROR unable to make $TEST_TMP_DIR/1fm/log directory" ] 81 | run cat $TEST_TMP_DIR/1fm/solver.prototxt 82 | 83 | [ "${lines[0]}" = "#blah" ] 84 | [ "${lines[1]}" = "base_lr: 1e-04" ] 85 | [ "${lines[2]}" = "power: 0.2" ] 86 | [ "${lines[3]}" = "momentum: 0.3" ] 87 | [ "${lines[4]}" = "weight_decay: 0.4" ] 88 | [ "${lines[5]}" = "average_loss: 0.5" ] 89 | [ "${lines[6]}" = "lr_policy: \"yo\"" ] 90 | [ "${lines[7]}" = "iter_size: 7" ] 91 | [ "${lines[8]}" = "snapshot: 8" ] 92 | [ "${lines[9]}" = "max_iter: 9" ] 93 | [ "${lines[10]}" = "solver mode: GPU" ] 94 | } 95 | 96 | 97 | @test "caffetrain.sh 1fm unable to create log directory" { 98 | mkdir -p $TEST_TMP_DIR/1fm 99 | touch $TEST_TMP_DIR/1fm/solver.prototxt 100 | touch $TEST_TMP_DIR/1fm/log 101 | run $CAFFE_TRAIN_SH 1fm "$TEST_TMP_DIR" 102 | echo "$status $output" 1>&2 103 | [ "$status" -eq 3 ] 104 | [ "${lines[1]}" = "ERROR unable to make $TEST_TMP_DIR/1fm/log directory" ] 105 | } 106 | 107 | @test "caffetrain.sh 1fm unable to create trainedmodel directory" { 108 | mkdir -p $TEST_TMP_DIR/1fm 109 | touch $TEST_TMP_DIR/1fm/solver.prototxt 110 | touch $TEST_TMP_DIR/1fm/trainedmodel 111 | run $CAFFE_TRAIN_SH 1fm "$TEST_TMP_DIR" 112 | echo "$status $output" 1>&2 113 | [ "$status" -eq 4 ] 114 | [ "${lines[1]}" = "ERROR unable to make $TEST_TMP_DIR/1fm/trainedmodel directory" ] 115 | } 116 | 117 | @test "caffetrain.sh 1fm success" { 118 | mkdir -p $TEST_TMP_DIR/1fm 119 | touch $TEST_TMP_DIR/1fm/solver.prototxt 120 | ln -s /bin/echo $TEST_TMP_DIR/caffe.bin 121 | export A_TEMP_PATH=$PATH 122 | export PATH=$TEST_TMP_DIR:$PATH 123 | 124 | run $CAFFE_TRAIN_SH 1fm "$TEST_TMP_DIR" 125 | [ "$status" -eq 0 ] 126 | echo "$status $output" 1>&2 127 | [ "${lines[0]}" = "" ] 128 | 129 | run cat "$TEST_TMP_DIR/1fm/log/out.log" 130 | echo "$status $output" 1>&2 131 | [ "${lines[0]}" = "train --solver=${TEST_TMP_DIR}/1fm/solver.prototxt --gpu all" ] 132 | 133 | export PATH=$A_TEMP_PATH 134 | } 135 | 136 | # TODO add tests to verify fining last completed iteration works 137 | 138 | 139 | 140 | -------------------------------------------------------------------------------- /tests/octavetests.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | @test "test octave scripts" { 4 | run $BATS_TEST_DIRNAME/RunUnitTests.m 5 | echo "$output" 1>&2 6 | [ "$status" -eq 0 ] 7 | } 8 | 9 | -------------------------------------------------------------------------------- /tests/preprocessworker.sh.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | 4 | setup() { 5 | export PREPROCESS_WORKER_SH="${BATS_TEST_DIRNAME}/../preprocessworker.sh" 6 | export TEST_TMP_DIR="${BATS_TMPDIR}/"`uuidgen` 7 | /bin/mkdir -p "$TEST_TMP_DIR" 8 | } 9 | 10 | teardown() { 11 | if [ -d "$TEST_TMP_DIR" ] ; then 12 | /bin/rm -rf "$TEST_TMP_DIR" 13 | fi 14 | } 15 | 16 | @test "preprocessworker.sh no args" { 17 | run $PREPROCESS_WORKER_SH 18 | echo "$status $output" 1>&2 19 | [ "$status" -eq 1 ] 20 | [ "${lines[0]}" = "usage: preprocessworker.sh [-h]" ] 21 | } 22 | 23 | @test "preprocessworker.sh empty dir" { 24 | run $PREPROCESS_WORKER_SH "$TEST_TMP_DIR" 25 | echo "$status $output" 1>&2 26 | [ "$status" -eq 2 ] 27 | [ "${lines[0]}" == "ERROR no $TEST_TMP_DIR/predict.config file found" ] 28 | run cat "$TEST_TMP_DIR/ERROR" 29 | [ "$status" -eq 0 ] 30 | [ "${lines[0]}" == "ERROR parsing $TEST_TMP_DIR/predict.config" ] 31 | } 32 | 33 | @test "preprocessworker.sh no package_processing_info.txt" { 34 | pconfig="$TEST_TMP_DIR/predict.config" 35 | echo "trainedmodeldir=tmodel" >> "$pconfig" 36 | echo "imagedir=imagey" >> "$pconfig" 37 | echo "models=1fm" >> "$pconfig" 38 | echo "augspeed=1" >> "$pconfig" 39 | run $PREPROCESS_WORKER_SH "$TEST_TMP_DIR" 40 | echo "$status $output" 1>&2 41 | [ "$status" -eq 7 ] 42 | [ "${lines[5]}" == "ERROR $TEST_TMP_DIR/augimages/package_processing_info.txt not found" ] 43 | } 44 | 45 | @test "preprocessworker.sh PreprocessPackage.m fails" { 46 | ln -s /bin/false "$TEST_TMP_DIR/PreprocessPackage.m" 47 | pconfig="$TEST_TMP_DIR/predict.config" 48 | echo "trainedmodeldir=tmodel" >> "$pconfig" 49 | echo "imagedir=imagey" >> "$pconfig" 50 | echo "models=1fm" >> "$pconfig" 51 | echo "augspeed=1" >> "$pconfig" 52 | 53 | mkdir -p "$TEST_TMP_DIR/augimages" 54 | p_info="$TEST_TMP_DIR/augimages/package_processing_info.txt" 55 | echo "" > "$p_info" 56 | echo "Number of XY Packages" >> "$p_info" 57 | echo "4" >> "$p_info" 58 | echo "Number of z-blocks" >> "$p_info" 59 | echo "6" >> "$p_info" 60 | 61 | export A_TEMP_PATH=$PATH 62 | export PATH=$TEST_TMP_DIR:$PATH 63 | 64 | run $PREPROCESS_WORKER_SH "$TEST_TMP_DIR" 65 | echo "$status $output" 1>&2 66 | [ "$status" -eq 8 ] 67 | run cat "$TEST_TMP_DIR/ERROR" 68 | echo "$status $output" 1>&2 69 | [ "$status" -eq 0 ] 70 | [ "${lines[0]}" == "ERROR, a non-zero exit code (1) received from PreprocessPackage.m 001 01 1fm 1" ] 71 | export PATH=$A_TEMP_PATH 72 | } 73 | 74 | @test "preprocessworker.sh success" { 75 | ln -s /bin/echo "$TEST_TMP_DIR/PreprocessPackage.m" 76 | pconfig="$TEST_TMP_DIR/predict.config" 77 | echo "trainedmodeldir=tmodel" >> "$pconfig" 78 | echo "imagedir=imagey" >> "$pconfig" 79 | echo "models=1fm" >> "$pconfig" 80 | echo "augspeed=1" >> "$pconfig" 81 | 82 | mkdir -p "$TEST_TMP_DIR/augimages" 83 | p_info="$TEST_TMP_DIR/augimages/package_processing_info.txt" 84 | echo "" > "$p_info" 85 | echo "Number of XY Packages" >> "$p_info" 86 | echo "1" >> "$p_info" 87 | echo "Number of z-blocks" >> "$p_info" 88 | echo "1" >> "$p_info" 89 | 90 | export A_TEMP_PATH=$PATH 91 | export PATH=$TEST_TMP_DIR:$PATH 92 | 93 | run $PREPROCESS_WORKER_SH --maxpackages 20 --waitinterval 0 "$TEST_TMP_DIR" 94 | echo "$status $output" 1>&2 95 | [ "$status" -eq 0 ] 96 | [ "${lines[0]}" == "Running PreprocessPackage" ] 97 | [ "${lines[5]}" == "Preprocessing Pkg001_Z01 in model 1fm" ] 98 | [ "${lines[6]}" == "Waiting for prediction to catch up" ] 99 | [ "${lines[7]}" == "PreprocessPackaging has completed." ] 100 | [ ! -f "$TEST_TMP_DIR/ERROR" ] 101 | 102 | run cat "$TEST_TMP_DIR/augimages/preproc.1fm.Pkg001_Z01.log" 103 | echo "$status $output" 1>&2 104 | [ "$status" -eq 0 ] 105 | [ "${lines[0]}" == "imagey $TEST_TMP_DIR/augimages 001 01 1fm 1" ] 106 | export PATH=$A_TEMP_PATH 107 | } 108 | -------------------------------------------------------------------------------- /tests/system/1fmonlydemo2.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | setup() { 4 | export RUNTRAINING_SH="${BATS_TEST_DIRNAME}/../../runtraining.sh" 5 | export RUNPREPROC_SH="${BATS_TEST_DIRNAME}/../../PreprocessTrainingData.m" 6 | export RUNPREDICTION_SH="${BATS_TEST_DIRNAME}/../../runprediction.sh" 7 | export TEST_TMP_DIR="${BATS_TMPDIR}/"`uuidgen` 8 | /bin/mkdir -p "$TEST_TMP_DIR" 9 | } 10 | 11 | teardown() { 12 | if [ -d "$TEST_TMP_DIR" ] ; then 13 | /bin/rm -rf "$TEST_TMP_DIR" 14 | fi 15 | } 16 | 17 | @test "Runs demo 2, train and predict with --1fmonly flag, est time 5 min" { 18 | run $RUNPREPROC_SH ~/cdeep3m/mito_testsample/training/images ~/cdeep3m/mito_testsample/training/labels "$TEST_TMP_DIR/mito_testaugtrain" 19 | echo "$status $output" 1>&2 20 | [ "$status" -eq 0 ] 21 | run $RUNTRAINING_SH --numiterations 100 --1fmonly "$TEST_TMP_DIR/mito_testaugtrain" "$TEST_TMP_DIR/train_out" 22 | echo "$status $output" 1>&2 23 | [ "$status" -eq 0 ] 24 | 25 | run $RUNPREDICTION_SH "$TEST_TMP_DIR/train_out" --models 1fm ~/cdeep3m/mito_testsample/testset/ "$TEST_TMP_DIR/predictout" 26 | echo "$status $output" 1>&2 27 | [ "$status" -eq 0 ] 28 | 29 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0001.png" ] 30 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0002.png" ] 31 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0003.png" ] 32 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0004.png" ] 33 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0005.png" ] 34 | 35 | } 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /tests/system/checkstitch.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | setup() { 4 | export RUNPREDICTION_SH="${BATS_TEST_DIRNAME}/../../runprediction.sh" 5 | export TEST_TMP_DIR="${BATS_TMPDIR}/"`uuidgen` 6 | /bin/mkdir -p "$TEST_TMP_DIR" 7 | export TEST_DATA_DIR="${BATS_TEST_DIRNAME}/testdata" 8 | } 9 | 10 | teardown() { 11 | if [ -d "$TEST_TMP_DIR" ] ; then 12 | /bin/rm -rf "$TEST_TMP_DIR" 13 | fi 14 | } 15 | 16 | @test "Runs demo 1, predict on pretrained model against 5 2k x 2k images 30min" { 17 | 18 | imgdir="$TEST_TMP_DIR/myimages" 19 | mkdir -p "$imgdir" 20 | for Y in `seq 1 5` ; do 21 | cp "$TEST_DATA_DIR/2kimage/images.081.mirrored.png" "$imgdir/$Y.png" 22 | done 23 | run $RUNPREDICTION_SH ~/sbem/mitochrondria/xy5.9nm40nmz/30000iterations_train_out "$imgdir" "$TEST_TMP_DIR/job" 24 | echo "$status $output" 1>&2 25 | [ "$status" -eq 0 ] 26 | [ -s "$TEST_TMP_DIR/job/ensembled/Segmented_0001.png" ] 27 | [ -s "$TEST_TMP_DIR/job/ensembled/Segmented_0002.png" ] 28 | [ -s "$TEST_TMP_DIR/job/ensembled/Segmented_0003.png" ] 29 | [ -s "$TEST_TMP_DIR/job/ensembled/Segmented_0004.png" ] 30 | [ -s "$TEST_TMP_DIR/job/ensembled/Segmented_0005.png" ] 31 | } 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /tests/system/demo1.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | setup() { 4 | export RUNPREDICTION_SH="${BATS_TEST_DIRNAME}/../../runprediction.sh" 5 | export TEST_TMP_DIR="${BATS_TMPDIR}/"`uuidgen` 6 | /bin/mkdir -p "$TEST_TMP_DIR" 7 | } 8 | 9 | teardown() { 10 | if [ -d "$TEST_TMP_DIR" ] ; then 11 | /bin/rm -rf "$TEST_TMP_DIR" 12 | fi 13 | } 14 | 15 | @test "Runs demo 1, predict on pretrained model, est time 15 min" { 16 | run $RUNPREDICTION_SH ~/sbem/mitochrondria/xy5.9nm40nmz/30000iterations_train_out ~/cdeep3m/mito_testsample/testset/ "$TEST_TMP_DIR" 17 | echo "$status $output" 1>&2 18 | [ "$status" -eq 0 ] 19 | [ -s "$TEST_TMP_DIR/ensembled/Segmented_0001.png" ] 20 | [ -s "$TEST_TMP_DIR/ensembled/Segmented_0002.png" ] 21 | [ -s "$TEST_TMP_DIR/ensembled/Segmented_0003.png" ] 22 | [ -s "$TEST_TMP_DIR/ensembled/Segmented_0004.png" ] 23 | [ -s "$TEST_TMP_DIR/ensembled/Segmented_0005.png" ] 24 | } 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /tests/system/demo2.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | setup() { 4 | export RUNTRAINING_SH="${BATS_TEST_DIRNAME}/../../runtraining.sh" 5 | export RUNPREPROC_SH="${BATS_TEST_DIRNAME}/../../PreprocessTrainingData.m" 6 | export RUNPREDICTION_SH="${BATS_TEST_DIRNAME}/../../runprediction.sh" 7 | export TEST_TMP_DIR="${BATS_TMPDIR}/"`uuidgen` 8 | /bin/mkdir -p "$TEST_TMP_DIR" 9 | } 10 | 11 | teardown() { 12 | if [ -d "$TEST_TMP_DIR" ] ; then 13 | /bin/rm -rf "$TEST_TMP_DIR" 14 | fi 15 | } 16 | 17 | @test "Runs demo 2, train and predict, est time 15 min" { 18 | run $RUNPREPROC_SH ~/cdeep3m/mito_testsample/training/images ~/cdeep3m/mito_testsample/training/labels "$TEST_TMP_DIR/mito_testaugtrain" 19 | echo "$status $output" 1>&2 20 | [ "$status" -eq 0 ] 21 | run $RUNTRAINING_SH --numiterations 100 "$TEST_TMP_DIR/mito_testaugtrain" "$TEST_TMP_DIR/train_out" 22 | echo "$status $output" 1>&2 23 | [ "$status" -eq 0 ] 24 | 25 | run $RUNPREDICTION_SH "$TEST_TMP_DIR/train_out" ~/cdeep3m/mito_testsample/testset/ "$TEST_TMP_DIR/predictout" 26 | echo "$status $output" 1>&2 27 | [ "$status" -eq 0 ] 28 | 29 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0001.png" ] 30 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0002.png" ] 31 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0003.png" ] 32 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0004.png" ] 33 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0005.png" ] 34 | 35 | } 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /tests/system/retraindemo.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | setup() { 4 | export RUNTRAINING_SH="${BATS_TEST_DIRNAME}/../../runtraining.sh" 5 | export RUNPREPROC_SH="${BATS_TEST_DIRNAME}/../../PreprocessTrainingData.m" 6 | export RUNPREDICTION_SH="${BATS_TEST_DIRNAME}/../../runprediction.sh" 7 | export TEST_TMP_DIR="${BATS_TMPDIR}/"`uuidgen` 8 | /bin/mkdir -p "$TEST_TMP_DIR" 9 | } 10 | 11 | teardown() { 12 | if [ -d "$TEST_TMP_DIR" ] ; then 13 | /bin/rm -rf "$TEST_TMP_DIR" 14 | fi 15 | } 16 | 17 | @test "Runs a retrain and then prediction" { 18 | run $RUNPREPROC_SH ~/cdeep3m/mito_testsample/training/images ~/cdeep3m/mito_testsample/training/labels "$TEST_TMP_DIR/mito_testaugtrain" 19 | echo "$status $output" 1>&2 20 | [ "$status" -eq 0 ] 21 | run $RUNTRAINING_SH --retrain ~/sbem/mitochrondria/xy5.9nm40nmz/30000iterations_train_out --additerations 50 "$TEST_TMP_DIR/mito_testaugtrain" "$TEST_TMP_DIR/train_out" 22 | echo "$status $output" 1>&2 23 | [ "$status" -eq 0 ] 24 | 25 | run $RUNPREDICTION_SH "$TEST_TMP_DIR/train_out" ~/cdeep3m/mito_testsample/testset/ "$TEST_TMP_DIR/predictout" 26 | echo "$status $output" 1>&2 27 | [ "$status" -eq 0 ] 28 | 29 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0001.png" ] 30 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0002.png" ] 31 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0003.png" ] 32 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0004.png" ] 33 | [ -s "$TEST_TMP_DIR/predictout/ensembled/Segmented_0005.png" ] 34 | 35 | } 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /tests/system/testdata/2kimage/images.081.mirrored.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/tests/system/testdata/2kimage/images.081.mirrored.png -------------------------------------------------------------------------------- /trainworker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | script_name=`basename $0` 4 | script_dir=`dirname $0` 5 | version="???" 6 | 7 | if [ -f "$script_dir/VERSION" ] ; then 8 | version=`cat $script_dir/VERSION` 9 | fi 10 | 11 | numiterations="30000" 12 | gpu="all" 13 | one_fmonly=false 14 | base_lr="1e-02" 15 | power="0.8" 16 | momentum="0.9" 17 | weight_decay="0.0005" 18 | average_loss="16" 19 | lr_policy="poly" 20 | iter_size="8" 21 | snapshot_interval="2000" 22 | model_list="1fm,3fm,5fm" 23 | 24 | 25 | function usage() 26 | { 27 | echo "usage: $script_name [-h] [--models MODELS] 28 | [--numiterations NUMITERATIONS] 29 | [--gpu GPU] [--base_lr BASE_LR] [--power POWER] 30 | [--momentum MOMENTUM] 31 | [--weight_decay WEIGHT_DECAY] 32 | [--average_loss AVERAGE_LOSS] 33 | [--lr_policy POLICY] [--iter_size ITER_SIZE] 34 | [--snapshot_interval SNAPSHOT_INTERVAL] 35 | trainoutdir 36 | Version: $version 37 | 38 | Runs caffe training on CDeep3M model in 39 | directory. 40 | 41 | For further information about parameters below please see: 42 | https://github.com/BVLC/caffe/wiki/Solver-Prototxt 43 | 44 | 45 | optional arguments: 46 | -h, --help show this help message and exit 47 | --models Only train on models specified in comma 48 | delimited list. (default 1fm,3fm,5fm) 49 | --gpu Which GPU to use, can be a number ie 0 or 1 or 50 | all to use all GPUs (default $gpu) 51 | --base_learn Base learning rate (default $base_lr) 52 | --power Used in poly and sigmoid lr_policies. (default $power) 53 | --momentum Indicates how much of the previous weight will be 54 | retained in the new calculation. (default $momentum) 55 | --weight_decay Factor of (regularization) penalization of large 56 | weights (default $weight_decay) 57 | --average_loss Number of iterations to use to average loss 58 | (default $average_loss) 59 | --lr_policy Learning rate policy (default $lr_policy) 60 | --iter_size Accumulate gradients across batches through the 61 | iter_size solver field. (default $iter_size) 62 | --snapshot_interval How often caffe should output a model and solverstate. 63 | (default $snapshot_interval) 64 | --numiterations Number of training iterations to run (default $numiterations) 65 | " 1>&2; 66 | exit 1; 67 | } 68 | 69 | TEMP=`getopt -o h --long "models:,gpu:,numiterations:,base_learn:,power:,momentum:,weight_decay:,average_loss:,lr_policy:,iter_size:,snapshot_interval:" -n '$0' -- "$@"` 70 | eval set -- "$TEMP" 71 | 72 | while true ; do 73 | case "$1" in 74 | -h ) usage ;; 75 | --models ) model_list=$2 ; shift 2 ;; 76 | --numiterations ) numiterations=$2 ; shift 2 ;; 77 | --gpu ) gpu=$2 ; shift 2 ;; 78 | --base_learn ) base_lr=$2 ; shift 2 ;; 79 | --power ) power=$2 ; shift 2 ;; 80 | --momentum ) momentum=$2 ; shift 2 ;; 81 | --weight_decay ) weight_decay=$2 ; shift 2 ;; 82 | --average_loss ) average_loss=$2 ; shift 2 ;; 83 | --lr_policy ) lr_policy=$2 ; shift 2 ;; 84 | --iter_size ) iter_size=$2 ; shift 2 ;; 85 | --snapshot_interval ) snapshot_interval=$2 ; shift 2 ;; 86 | --) shift ; break ;; 87 | esac 88 | done 89 | 90 | if [ $# -ne 1 ] ; then 91 | usage 92 | fi 93 | 94 | trainoutdir=$1 95 | 96 | echo "" 97 | 98 | let maxgpuindex=0 99 | gpucount=`nvidia-smi -L | wc -l` 100 | if [ "$gpucount" -eq 0 ] ; then 101 | echo "ERROR unable to get count of GPU(s). Is nvidia-smi working?" 102 | exit 4 103 | fi 104 | 105 | let maxgpuindex=$gpucount-1 106 | 107 | if [ $maxgpuindex -gt 0 ] ; then 108 | echo -n "Detected $gpucount GPU(s)." 109 | if [ "$gpu" == "all" ] ; then 110 | echo " Will run in parallel." 111 | else 112 | echo " Using only GPU $gpu" 113 | fi 114 | else 115 | echo "Single GPU detected." 116 | fi 117 | 118 | if [ "$gpu" == "all" ] ; then 119 | let cntr=0 120 | else 121 | let cntr=$gpu 122 | let gpucount=1 123 | fi 124 | 125 | parallel_job_file="$trainoutdir/parallel.jobs" 126 | 127 | for model_name in `echo "$model_list" | sed "s/,/ /g"` ; do 128 | if [ ! -d "$trainoutdir/$model_name" ] ; then 129 | echo "ERROR, no $trainoutdir/$model_name directory found." 130 | exit 2 131 | fi 132 | echo -e "$numiterations\n$cntr\n$base_lr\n$power\n$momentum\n$weight_decay\n$average_loss\n$lr_policy\n$iter_size\n$snapshot_interval\n$model_name\n$trainoutdir" >> $parallel_job_file 133 | if [ "$gpu" == "all" ] ; then 134 | let cntr++ 135 | if [ $cntr -gt $maxgpuindex ] ; then 136 | let cntr=0 137 | fi 138 | fi 139 | done 140 | 141 | # the --delay 2 is to add a 2 second delay between starting jobs 142 | # without this jobs would fail on GPU with out of memory error 143 | # 144 | 145 | cat $parallel_job_file | parallel --no-notice --delay 2 -N 12 -j $gpucount caffetrain.sh --numiterations {1} --gpu {2} --base_learn {3} --power {4} --momentum {5} --weight_decay {6} --average_loss {7} --lr_policy {8} --iter_size {9} --snapshot_interval {10} {11} {12} 146 | if [ $? != 0 ] ; then 147 | echo "Non zero exit code from caffe for train of model. Exiting." 148 | exit 1 149 | fi 150 | 151 | echo "" 152 | echo "Training has completed. Have a nice day!" 153 | echo "" 154 | -------------------------------------------------------------------------------- /vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure(2) do |config| 9 | config.vm.box = "ubuntu/xenial64" 10 | config.vm.provision :shell, path: "bootstrap.sh" 11 | config.ssh.forward_x11 = true 12 | 13 | config.vm.provider "virtualbox" do |vb| 14 | vb.memory = "2048" 15 | vb.cpus = "2" 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /vagrant/bootstrap.sh: -------------------------------------------------------------------------------- 1 | 2 | apt-get -y upgrade 3 | apt-get -y update 4 | apt-get -y install octave octave-image octave-pkg-dev git python-pip unzip 5 | 6 | cd ~ 7 | wget https://github.com/stegro/hdf5oct/archive/b047e6e611e874b02740e7465f5d139e74f9765f.zip 8 | unzip b047e6e611e874b02740e7465f5d139e74f9765f.zip 9 | cd hdf5oct-* 10 | make 11 | make install 12 | cd ~ 13 | wget https://github.com/bats-core/bats-core/archive/v0.4.0.tar.gz 14 | tar -zxf v0.4.0.tar.gz 15 | cd bats-core-0.4.0 16 | ./install.sh /usr/local 17 | cd ~ 18 | -------------------------------------------------------------------------------- /vagrant/cdeep3m_logo-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CRBS/cdeep3m/abf995802608073532f324ce57425fa1b7cf9d19/vagrant/cdeep3m_logo-01.png --------------------------------------------------------------------------------