├── .gitattributes ├── .gitignore ├── .travis.yml ├── Jenkinsfile ├── LICENCE ├── MANIFEST.in ├── README.md ├── __init__.py ├── docs ├── .nojekyll ├── Makefile ├── build │ └── html │ │ ├── .nojekyll │ │ ├── _modules │ │ ├── index.html │ │ └── tensorgraph │ │ │ ├── cost.html │ │ │ ├── data_iterator.html │ │ │ ├── dataset │ │ │ ├── cifar10.html │ │ │ ├── cifar100.html │ │ │ ├── mnist.html │ │ │ └── preprocess.html │ │ │ ├── graph.html │ │ │ ├── layers │ │ │ ├── activation.html │ │ │ ├── backbones.html │ │ │ ├── cast.html │ │ │ ├── conv.html │ │ │ ├── linear.html │ │ │ ├── merge.html │ │ │ ├── misc.html │ │ │ ├── noise.html │ │ │ ├── normalization.html │ │ │ ├── recurrent.html │ │ │ ├── sampling.html │ │ │ └── template.html │ │ │ ├── models_zoo │ │ │ ├── airnet │ │ │ │ ├── model.html │ │ │ │ └── train.html │ │ │ ├── attention_unet │ │ │ │ ├── model.html │ │ │ │ └── train.html │ │ │ ├── densenet │ │ │ │ ├── model.html │ │ │ │ └── train.html │ │ │ ├── hed_modified │ │ │ │ ├── model.html │ │ │ │ └── train.html │ │ │ └── heteronet │ │ │ │ ├── layers.html │ │ │ │ ├── model.html │ │ │ │ └── train.html │ │ │ ├── node.html │ │ │ ├── progbar.html │ │ │ ├── sequential.html │ │ │ ├── stopper.html │ │ │ ├── trainobject.html │ │ │ └── utils.html │ │ ├── _sources │ │ ├── index.rst.txt │ │ └── modules │ │ │ ├── modules.rst.txt │ │ │ ├── tensorgraph.dataset.rst.txt │ │ │ ├── tensorgraph.layers.rst.txt │ │ │ ├── tensorgraph.models_zoo.airnet.rst.txt │ │ │ ├── tensorgraph.models_zoo.attention_unet.rst.txt │ │ │ ├── tensorgraph.models_zoo.densenet.rst.txt │ │ │ ├── tensorgraph.models_zoo.hed_modified.rst.txt │ │ │ ├── tensorgraph.models_zoo.heteronet.rst.txt │ │ │ ├── tensorgraph.models_zoo.rst.txt │ │ │ ├── tensorgraph.models_zoo.unet.rst.txt │ │ │ └── tensorgraph.rst.txt │ │ ├── _static │ │ ├── ajax-loader.gif │ │ ├── basic.css │ │ ├── comment-bright.png │ │ ├── comment-close.png │ │ ├── comment.png │ │ ├── css │ │ │ ├── badge_only.css │ │ │ └── theme.css │ │ ├── doctools.js │ │ ├── documentation_options.js │ │ ├── down-pressed.png │ │ ├── down.png │ │ ├── file.png │ │ ├── fonts │ │ │ ├── Inconsolata-Bold.ttf │ │ │ ├── Inconsolata-Regular.ttf │ │ │ ├── Lato-Bold.ttf │ │ │ ├── Lato-BoldItalic.ttf │ │ │ ├── Lato-Italic.ttf │ │ │ ├── Lato-Regular.ttf │ │ │ ├── RobotoSlab-Bold.ttf │ │ │ ├── RobotoSlab-Regular.ttf │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.svg │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── fontawesome-webfont.woff │ │ │ └── fontawesome-webfont.woff2 │ │ ├── jquery-3.2.1.js │ │ ├── jquery.js │ │ ├── js │ │ │ ├── modernizr.min.js │ │ │ └── theme.js │ │ ├── minus.png │ │ ├── plus.png │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── underscore-1.3.1.js │ │ ├── underscore.js │ │ ├── up-pressed.png │ │ ├── up.png │ │ └── websupport.js │ │ ├── genindex.html │ │ ├── index.html │ │ ├── modules │ │ ├── modules.html │ │ ├── tensorgraph.dataset.html │ │ ├── tensorgraph.html │ │ ├── tensorgraph.layers.html │ │ ├── tensorgraph.models_zoo.airnet.html │ │ ├── tensorgraph.models_zoo.attention_unet.html │ │ ├── tensorgraph.models_zoo.densenet.html │ │ ├── tensorgraph.models_zoo.hed_modified.html │ │ ├── tensorgraph.models_zoo.heteronet.html │ │ ├── tensorgraph.models_zoo.html │ │ └── tensorgraph.models_zoo.unet.html │ │ ├── objects.inv │ │ ├── py-modindex.html │ │ ├── search.html │ │ └── searchindex.js ├── conf.py ├── index.html ├── index.md ├── index.rst ├── make.bat ├── modules │ ├── modules.rst │ ├── tensorgraph.dataset.rst │ ├── tensorgraph.layers.rst │ ├── tensorgraph.models_zoo.airnet.rst │ ├── tensorgraph.models_zoo.attention_unet.rst │ ├── tensorgraph.models_zoo.densenet.rst │ ├── tensorgraph.models_zoo.hed_modified.rst │ ├── tensorgraph.models_zoo.heteronet.rst │ ├── tensorgraph.models_zoo.rst │ ├── tensorgraph.models_zoo.unet.rst │ └── tensorgraph.rst └── readme.md ├── draw ├── graph.png ├── hsoftmax.png └── transferlearn.png ├── examples ├── __init__.py ├── charcnn_text_classifier.py ├── cifar10_allcnn.py ├── example.py ├── hierachical_softmax.py ├── mnist_cnn.py ├── multi_gpus_horovod.py └── tweets_large.csv ├── exclude.txt ├── pipupdate.sh ├── setup.cfg ├── setup.py ├── tensorgraph ├── __init__.py ├── cost.py ├── data_iterator.py ├── dataset │ ├── __init__.py │ ├── cifar10.py │ ├── cifar100.py │ ├── mnist.py │ └── preprocess.py ├── graph.py ├── layers │ ├── __init__.py │ ├── activation.py │ ├── backbones.py │ ├── cast.py │ ├── conv.py │ ├── linear.py │ ├── merge.py │ ├── misc.py │ ├── noise.py │ ├── normalization.py │ ├── recurrent.py │ ├── sampling.py │ └── template.py ├── models_zoo │ ├── __init__.py │ ├── aibraintumormodel │ │ ├── .gitignore │ │ ├── README.md │ │ ├── __init__.py │ │ ├── main_train.py │ │ ├── model_C3 │ │ │ └── train_model_C3.ini │ │ ├── model_C4 │ │ │ └── train_model_C4.ini │ │ ├── model_C4R │ │ │ └── train_model_C4R.ini │ │ ├── model_C5 │ │ │ └── train_model_C5.ini │ │ ├── model_C5XS │ │ │ └── train_model_C5XS.ini │ │ ├── model_CR │ │ │ └── train_model_CR.ini │ │ ├── nn │ │ │ ├── __init__.py │ │ │ ├── data │ │ │ │ ├── __init__.py │ │ │ │ └── data_train_hvd.py │ │ │ ├── model │ │ │ │ ├── CommonBlocks.py │ │ │ │ ├── __init__.py │ │ │ │ ├── model_C3.py │ │ │ │ ├── model_C4.py │ │ │ │ ├── model_C4R.py │ │ │ │ ├── model_C5.py │ │ │ │ ├── model_C5XS.py │ │ │ │ └── model_CR.py │ │ │ └── run │ │ │ │ ├── __init__.py │ │ │ │ ├── configReader.py │ │ │ │ ├── costfunction.py │ │ │ │ ├── radiomicsFeatures.py │ │ │ │ └── train.py │ │ ├── run_mpi.sh │ │ └── run_nonmpi.sh │ ├── airnet │ │ ├── __init__.py │ │ ├── model.py │ │ └── train.py │ ├── attention_unet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── img │ │ │ └── attention_unet.jpg │ │ ├── model.py │ │ └── train.py │ ├── densenet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── img │ │ │ ├── level1.png │ │ │ ├── level2.png │ │ │ └── level3.png │ │ ├── model.py │ │ └── train.py │ ├── echocardiac │ │ └── dilated_unet │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── dilated_unet.png │ │ │ ├── model.py │ │ │ └── train.py │ ├── hed_modified │ │ ├── __init__.py │ │ ├── img │ │ │ ├── level1.png │ │ │ └── level2.png │ │ ├── model.py │ │ └── train.py │ ├── heteronet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── img │ │ │ ├── level1.png │ │ │ ├── level2.png │ │ │ ├── level3_1.png │ │ │ └── level3_2.png │ │ ├── layers.py │ │ ├── model.py │ │ └── train.py │ ├── image_search │ │ ├── __init__.py │ │ ├── model.py │ │ ├── train.py │ │ └── triplet_or_hist_loss.py │ └── unet │ │ ├── README.md │ │ ├── __init__.py │ │ ├── model.py │ │ └── train.py ├── node.py ├── preprocessing.py ├── progbar.py ├── sequential.py ├── stopper.py ├── trainobject.py └── utils.py └── test ├── __init__.py ├── cost_test.py ├── data_iterator_test.py ├── layer_backbones_test.py ├── layer_conv_test.py ├── layer_linear_test.py ├── layer_merge_test.py ├── layer_misc_test.py ├── layer_noise_test.py ├── models_zoo ├── aibraintumormodel │ ├── model_C3_test.py │ ├── model_C4R_test.py │ ├── model_C5XS_test.py │ ├── model_CR_test.py │ └── resources │ │ ├── __init__.py │ │ ├── generateDummyData.py │ │ ├── run_mpi.sh │ │ ├── test_train.ini │ │ ├── test_train_ref.txt │ │ └── train_mpi_test.py.not_used ├── airnet_test.py ├── attention_unet_test.py ├── densenet_test.py ├── dilated_unet_test.py ├── hed_modified_test.py ├── heteronet_test.py ├── image_search_test.py └── unet_test.py ├── tensorgraph_test.py └── utils_test.py /.gitattributes: -------------------------------------------------------------------------------- 1 | *.tfrecords filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *pyc 2 | .DS_Store 3 | doctrees/ 4 | .buildinfo 5 | .remote-sync.json 6 | *tensorboard* 7 | .coverage.* 8 | __pycache__/ 9 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: trusty 2 | language: python 3 | python: 4 | - "2.7" 5 | - "3.5" 6 | cache: pip 7 | install: 8 | 9 | - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then 10 | pip install --only-binary=numpy,scipy numpy nose scipy pytest sklearn; 11 | pip install tensorflow; 12 | pip install git+https://github.com/hycis/TensorGraph.git@master; 13 | fi 14 | 15 | - if [[ "$TRAVIS_PYTHON_VERSION" == "3.5" ]]; then 16 | pip3 install --only-binary=numpy,scipy numpy nose scipy pytest sklearn; 17 | pip3 install tensorflow; 18 | pip3 install git+https://github.com/hycis/TensorGraph.git@master; 19 | fi 20 | 21 | script: 22 | - echo "TensorGraph Testing.." 23 | - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then 24 | python -m pytest test; 25 | fi 26 | - if [[ "$TRAVIS_PYTHON_VERSION" == "3.5" ]]; then 27 | python3 -m pytest test; 28 | fi 29 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md LICENCE 2 | recursive-include tensorgraph *.py 3 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/__init__.py -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/.nojekyll -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = TensorGraph 8 | SOURCEDIR = . 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/build/html/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/.nojekyll -------------------------------------------------------------------------------- /docs/build/html/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. TensorGraph documentation master file, created by 2 | sphinx-quickstart on Sat Jun 2 17:01:26 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to TensorGraph's documentation! 7 | ======================================= 8 | 9 | .. toctree:: 10 | :maxdepth: 10 11 | 12 | modules/tensorgraph 13 | 14 | 15 | Indices and tables 16 | ================== 17 | 18 | * :ref:`genindex` 19 | * :ref:`modindex` 20 | * :ref:`search` 21 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/modules.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph 2 | =========== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | tensorgraph 8 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/tensorgraph.dataset.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph.dataset package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.dataset.cifar10 module 8 | ---------------------------------- 9 | 10 | .. automodule:: tensorgraph.dataset.cifar10 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.dataset.cifar100 module 16 | ----------------------------------- 17 | 18 | .. automodule:: tensorgraph.dataset.cifar100 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | tensorgraph.dataset.mnist module 24 | -------------------------------- 25 | 26 | .. automodule:: tensorgraph.dataset.mnist 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | tensorgraph.dataset.preprocess module 32 | ------------------------------------- 33 | 34 | .. automodule:: tensorgraph.dataset.preprocess 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: tensorgraph.dataset 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/tensorgraph.layers.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph.layers package 2 | ========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.layers.activation module 8 | ------------------------------------ 9 | 10 | .. automodule:: tensorgraph.layers.activation 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.layers.backbones module 16 | ----------------------------------- 17 | 18 | .. automodule:: tensorgraph.layers.backbones 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | tensorgraph.layers.cast module 24 | ------------------------------ 25 | 26 | .. automodule:: tensorgraph.layers.cast 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | tensorgraph.layers.conv module 32 | ------------------------------ 33 | 34 | .. automodule:: tensorgraph.layers.conv 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | tensorgraph.layers.linear module 40 | -------------------------------- 41 | 42 | .. automodule:: tensorgraph.layers.linear 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | tensorgraph.layers.merge module 48 | ------------------------------- 49 | 50 | .. automodule:: tensorgraph.layers.merge 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | tensorgraph.layers.misc module 56 | ------------------------------ 57 | 58 | .. automodule:: tensorgraph.layers.misc 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | tensorgraph.layers.noise module 64 | ------------------------------- 65 | 66 | .. automodule:: tensorgraph.layers.noise 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | tensorgraph.layers.normalization module 72 | --------------------------------------- 73 | 74 | .. automodule:: tensorgraph.layers.normalization 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | tensorgraph.layers.recurrent module 80 | ----------------------------------- 81 | 82 | .. automodule:: tensorgraph.layers.recurrent 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | tensorgraph.layers.sampling module 88 | ---------------------------------- 89 | 90 | .. automodule:: tensorgraph.layers.sampling 91 | :members: 92 | :undoc-members: 93 | :show-inheritance: 94 | 95 | tensorgraph.layers.template module 96 | ---------------------------------- 97 | 98 | .. automodule:: tensorgraph.layers.template 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | 103 | 104 | Module contents 105 | --------------- 106 | 107 | .. automodule:: tensorgraph.layers 108 | :members: 109 | :undoc-members: 110 | :show-inheritance: 111 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/tensorgraph.models_zoo.airnet.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.airnet package 2 | ====================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.airnet.model module 8 | ------------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.airnet.model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.airnet.train module 16 | ------------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.airnet.train 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: tensorgraph.models_zoo.airnet 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/tensorgraph.models_zoo.attention_unet.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.attention\_unet package 2 | =============================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.attention\_unet.model module 8 | ---------------------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.attention_unet.model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.attention\_unet.train module 16 | ---------------------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.attention_unet.train 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: tensorgraph.models_zoo.attention_unet 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/tensorgraph.models_zoo.densenet.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.densenet package 2 | ======================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.densenet.model module 8 | --------------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.densenet.model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.densenet.train module 16 | --------------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.densenet.train 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: tensorgraph.models_zoo.densenet 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/tensorgraph.models_zoo.hed_modified.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.hed\_modified package 2 | ============================================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.hed\_modified.model module 8 | -------------------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.hed_modified.model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.hed\_modified.train module 16 | -------------------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.hed_modified.train 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: tensorgraph.models_zoo.hed_modified 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/tensorgraph.models_zoo.heteronet.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.heteronet package 2 | ========================================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.heteronet.layers module 8 | ----------------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.heteronet.layers 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.heteronet.model module 16 | ---------------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.heteronet.model 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | tensorgraph.models\_zoo.heteronet.train module 24 | ---------------------------------------------- 25 | 26 | .. automodule:: tensorgraph.models_zoo.heteronet.train 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: tensorgraph.models_zoo.heteronet 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/tensorgraph.models_zoo.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo package 2 | =============================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | tensorgraph.models_zoo.airnet 10 | tensorgraph.models_zoo.attention_unet 11 | tensorgraph.models_zoo.densenet 12 | tensorgraph.models_zoo.hed_modified 13 | tensorgraph.models_zoo.heteronet 14 | tensorgraph.models_zoo.unet 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: tensorgraph.models_zoo 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/tensorgraph.models_zoo.unet.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.unet package 2 | ==================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.unet.model module 8 | ----------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.unet.model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.unet.train module 16 | ----------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.unet.train 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: tensorgraph.models_zoo.unet 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/build/html/_sources/modules/tensorgraph.rst.txt: -------------------------------------------------------------------------------- 1 | tensorgraph package 2 | =================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | tensorgraph.dataset 10 | tensorgraph.layers 11 | tensorgraph.models_zoo 12 | 13 | Submodules 14 | ---------- 15 | 16 | tensorgraph.cost module 17 | ----------------------- 18 | 19 | .. automodule:: tensorgraph.cost 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | 24 | tensorgraph.data\_iterator module 25 | --------------------------------- 26 | 27 | .. automodule:: tensorgraph.data_iterator 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | 32 | tensorgraph.graph module 33 | ------------------------ 34 | 35 | .. automodule:: tensorgraph.graph 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | 40 | tensorgraph.node module 41 | ----------------------- 42 | 43 | .. automodule:: tensorgraph.node 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | 48 | tensorgraph.progbar module 49 | -------------------------- 50 | 51 | .. automodule:: tensorgraph.progbar 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | 56 | tensorgraph.sequential module 57 | ----------------------------- 58 | 59 | .. automodule:: tensorgraph.sequential 60 | :members: 61 | :undoc-members: 62 | :show-inheritance: 63 | 64 | tensorgraph.stopper module 65 | -------------------------- 66 | 67 | .. automodule:: tensorgraph.stopper 68 | :members: 69 | :undoc-members: 70 | :show-inheritance: 71 | 72 | tensorgraph.trainobject module 73 | ------------------------------ 74 | 75 | .. automodule:: tensorgraph.trainobject 76 | :members: 77 | :undoc-members: 78 | :show-inheritance: 79 | 80 | tensorgraph.utils module 81 | ------------------------ 82 | 83 | .. automodule:: tensorgraph.utils 84 | :members: 85 | :undoc-members: 86 | :show-inheritance: 87 | 88 | 89 | Module contents 90 | --------------- 91 | 92 | .. automodule:: tensorgraph 93 | :members: 94 | :undoc-members: 95 | :show-inheritance: 96 | -------------------------------------------------------------------------------- /docs/build/html/_static/ajax-loader.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/ajax-loader.gif -------------------------------------------------------------------------------- /docs/build/html/_static/comment-bright.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/comment-bright.png -------------------------------------------------------------------------------- /docs/build/html/_static/comment-close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/comment-close.png -------------------------------------------------------------------------------- /docs/build/html/_static/comment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/comment.png -------------------------------------------------------------------------------- /docs/build/html/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../fonts/fontawesome-webfont.eot");src:url("../fonts/fontawesome-webfont.eot?#iefix") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff") format("woff"),url("../fonts/fontawesome-webfont.ttf") format("truetype"),url("../fonts/fontawesome-webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;overflow-y:scroll;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{max-height:100%}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} 2 | -------------------------------------------------------------------------------- /docs/build/html/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: '', 3 | VERSION: '', 4 | LANGUAGE: 'None', 5 | COLLAPSE_INDEX: false, 6 | FILE_SUFFIX: '.html', 7 | HAS_SOURCE: true, 8 | SOURCELINK_SUFFIX: '.txt' 9 | }; -------------------------------------------------------------------------------- /docs/build/html/_static/down-pressed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/down-pressed.png -------------------------------------------------------------------------------- /docs/build/html/_static/down.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/down.png -------------------------------------------------------------------------------- /docs/build/html/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/file.png -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Inconsolata-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/Inconsolata-Bold.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Inconsolata-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/Inconsolata-Regular.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/Lato-Bold.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato-BoldItalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/Lato-BoldItalic.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato-Italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/Lato-Italic.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/Lato-Regular.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/RobotoSlab-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/RobotoSlab-Bold.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/RobotoSlab-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/RobotoSlab-Regular.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/docs/build/html/_static/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/js/theme.js: -------------------------------------------------------------------------------- 1 | require=function r(s,a,l){function c(i,n){if(!a[i]){if(!s[i]){var e="function"==typeof require&&require;if(!n&&e)return e(i,!0);if(u)return u(i,!0);var t=new Error("Cannot find module '"+i+"'");throw t.code="MODULE_NOT_FOUND",t}var o=a[i]={exports:{}};s[i][0].call(o.exports,function(n){var e=s[i][1][n];return c(e||n)},o,o.exports,r,s,a,l)}return a[i].exports}for(var u="function"==typeof require&&require,n=0;n"),i("table.docutils.footnote").wrap("
"),i("table.docutils.citation").wrap("
"),i(".wy-menu-vertical ul").not(".simple").siblings("a").each(function(){var e=i(this);expand=i(''),expand.on("click",function(n){return t.toggleCurrent(e),n.stopPropagation(),!1}),e.prepend(expand)})},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),i=e.find('[href="'+n+'"]');if(0===i.length){var t=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(i=e.find('[href="#'+t.attr("id")+'"]')).length&&(i=e.find('[href="#"]'))}0this.docHeight||(this.navBar.scrollTop(i),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",function(){this.linkScroll=!1})},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current"),e.siblings().find("li.current").removeClass("current"),e.find("> ul li.current").removeClass("current"),e.toggleClass("current")}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:e.exports.ThemeNav}),function(){for(var r=0,n=["ms","moz","webkit","o"],e=0;e 2 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. TensorGraph documentation master file, created by 2 | sphinx-quickstart on Sat Jun 2 17:01:26 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to TensorGraph's documentation! 7 | ======================================= 8 | 9 | .. toctree:: 10 | :maxdepth: 10 11 | 12 | modules/tensorgraph 13 | 14 | 15 | Indices and tables 16 | ================== 17 | 18 | * :ref:`genindex` 19 | * :ref:`modindex` 20 | * :ref:`search` 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=TensorGraph 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/modules/modules.rst: -------------------------------------------------------------------------------- 1 | tensorgraph 2 | =========== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | tensorgraph 8 | -------------------------------------------------------------------------------- /docs/modules/tensorgraph.dataset.rst: -------------------------------------------------------------------------------- 1 | tensorgraph.dataset package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.dataset.cifar10 module 8 | ---------------------------------- 9 | 10 | .. automodule:: tensorgraph.dataset.cifar10 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.dataset.cifar100 module 16 | ----------------------------------- 17 | 18 | .. automodule:: tensorgraph.dataset.cifar100 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | tensorgraph.dataset.mnist module 24 | -------------------------------- 25 | 26 | .. automodule:: tensorgraph.dataset.mnist 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | tensorgraph.dataset.preprocess module 32 | ------------------------------------- 33 | 34 | .. automodule:: tensorgraph.dataset.preprocess 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: tensorgraph.dataset 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /docs/modules/tensorgraph.layers.rst: -------------------------------------------------------------------------------- 1 | tensorgraph.layers package 2 | ========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.layers.activation module 8 | ------------------------------------ 9 | 10 | .. automodule:: tensorgraph.layers.activation 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.layers.backbones module 16 | ----------------------------------- 17 | 18 | .. automodule:: tensorgraph.layers.backbones 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | tensorgraph.layers.cast module 24 | ------------------------------ 25 | 26 | .. automodule:: tensorgraph.layers.cast 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | tensorgraph.layers.conv module 32 | ------------------------------ 33 | 34 | .. automodule:: tensorgraph.layers.conv 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | tensorgraph.layers.linear module 40 | -------------------------------- 41 | 42 | .. automodule:: tensorgraph.layers.linear 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | tensorgraph.layers.merge module 48 | ------------------------------- 49 | 50 | .. automodule:: tensorgraph.layers.merge 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | tensorgraph.layers.misc module 56 | ------------------------------ 57 | 58 | .. automodule:: tensorgraph.layers.misc 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | tensorgraph.layers.noise module 64 | ------------------------------- 65 | 66 | .. automodule:: tensorgraph.layers.noise 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | tensorgraph.layers.normalization module 72 | --------------------------------------- 73 | 74 | .. automodule:: tensorgraph.layers.normalization 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | tensorgraph.layers.recurrent module 80 | ----------------------------------- 81 | 82 | .. automodule:: tensorgraph.layers.recurrent 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | tensorgraph.layers.sampling module 88 | ---------------------------------- 89 | 90 | .. automodule:: tensorgraph.layers.sampling 91 | :members: 92 | :undoc-members: 93 | :show-inheritance: 94 | 95 | tensorgraph.layers.template module 96 | ---------------------------------- 97 | 98 | .. automodule:: tensorgraph.layers.template 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | 103 | 104 | Module contents 105 | --------------- 106 | 107 | .. automodule:: tensorgraph.layers 108 | :members: 109 | :undoc-members: 110 | :show-inheritance: 111 | -------------------------------------------------------------------------------- /docs/modules/tensorgraph.models_zoo.airnet.rst: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.airnet package 2 | ====================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.airnet.model module 8 | ------------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.airnet.model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.airnet.train module 16 | ------------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.airnet.train 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: tensorgraph.models_zoo.airnet 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/modules/tensorgraph.models_zoo.attention_unet.rst: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.attention\_unet package 2 | =============================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.attention\_unet.model module 8 | ---------------------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.attention_unet.model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.attention\_unet.train module 16 | ---------------------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.attention_unet.train 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: tensorgraph.models_zoo.attention_unet 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/modules/tensorgraph.models_zoo.densenet.rst: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.densenet package 2 | ======================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.densenet.model module 8 | --------------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.densenet.model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.densenet.train module 16 | --------------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.densenet.train 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: tensorgraph.models_zoo.densenet 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/modules/tensorgraph.models_zoo.hed_modified.rst: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.hed\_modified package 2 | ============================================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.hed\_modified.model module 8 | -------------------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.hed_modified.model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.hed\_modified.train module 16 | -------------------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.hed_modified.train 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: tensorgraph.models_zoo.hed_modified 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/modules/tensorgraph.models_zoo.heteronet.rst: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.heteronet package 2 | ========================================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.heteronet.layers module 8 | ----------------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.heteronet.layers 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.heteronet.model module 16 | ---------------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.heteronet.model 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | tensorgraph.models\_zoo.heteronet.train module 24 | ---------------------------------------------- 25 | 26 | .. automodule:: tensorgraph.models_zoo.heteronet.train 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: tensorgraph.models_zoo.heteronet 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /docs/modules/tensorgraph.models_zoo.rst: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo package 2 | =============================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | tensorgraph.models_zoo.airnet 10 | tensorgraph.models_zoo.attention_unet 11 | tensorgraph.models_zoo.densenet 12 | tensorgraph.models_zoo.hed_modified 13 | tensorgraph.models_zoo.heteronet 14 | tensorgraph.models_zoo.unet 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: tensorgraph.models_zoo 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /docs/modules/tensorgraph.models_zoo.unet.rst: -------------------------------------------------------------------------------- 1 | tensorgraph.models\_zoo.unet package 2 | ==================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | tensorgraph.models\_zoo.unet.model module 8 | ----------------------------------------- 9 | 10 | .. automodule:: tensorgraph.models_zoo.unet.model 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | tensorgraph.models\_zoo.unet.train module 16 | ----------------------------------------- 17 | 18 | .. automodule:: tensorgraph.models_zoo.unet.train 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | Module contents 25 | --------------- 26 | 27 | .. automodule:: tensorgraph.models_zoo.unet 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | -------------------------------------------------------------------------------- /docs/modules/tensorgraph.rst: -------------------------------------------------------------------------------- 1 | tensorgraph package 2 | =================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | tensorgraph.dataset 10 | tensorgraph.layers 11 | tensorgraph.models_zoo 12 | 13 | Submodules 14 | ---------- 15 | 16 | tensorgraph.cost module 17 | ----------------------- 18 | 19 | .. automodule:: tensorgraph.cost 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | 24 | tensorgraph.data\_iterator module 25 | --------------------------------- 26 | 27 | .. automodule:: tensorgraph.data_iterator 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | 32 | tensorgraph.graph module 33 | ------------------------ 34 | 35 | .. automodule:: tensorgraph.graph 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | 40 | tensorgraph.node module 41 | ----------------------- 42 | 43 | .. automodule:: tensorgraph.node 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | 48 | tensorgraph.progbar module 49 | -------------------------- 50 | 51 | .. automodule:: tensorgraph.progbar 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | 56 | tensorgraph.sequential module 57 | ----------------------------- 58 | 59 | .. automodule:: tensorgraph.sequential 60 | :members: 61 | :undoc-members: 62 | :show-inheritance: 63 | 64 | tensorgraph.stopper module 65 | -------------------------- 66 | 67 | .. automodule:: tensorgraph.stopper 68 | :members: 69 | :undoc-members: 70 | :show-inheritance: 71 | 72 | tensorgraph.trainobject module 73 | ------------------------------ 74 | 75 | .. automodule:: tensorgraph.trainobject 76 | :members: 77 | :undoc-members: 78 | :show-inheritance: 79 | 80 | tensorgraph.utils module 81 | ------------------------ 82 | 83 | .. automodule:: tensorgraph.utils 84 | :members: 85 | :undoc-members: 86 | :show-inheritance: 87 | 88 | 89 | Module contents 90 | --------------- 91 | 92 | .. automodule:: tensorgraph 93 | :members: 94 | :undoc-members: 95 | :show-inheritance: 96 | -------------------------------------------------------------------------------- /docs/readme.md: -------------------------------------------------------------------------------- 1 | #### Build HTML docs: 2 | run 3 | ```bash 4 | sphinx-apidoc -o modules ../tensorgraph 5 | make html 6 | ``` 7 | -------------------------------------------------------------------------------- /draw/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/draw/graph.png -------------------------------------------------------------------------------- /draw/hsoftmax.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/draw/hsoftmax.png -------------------------------------------------------------------------------- /draw/transferlearn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/draw/transferlearn.png -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/examples/__init__.py -------------------------------------------------------------------------------- /examples/charcnn_text_classifier.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | import tensorgraph as tg 4 | from tensorgraph.layers import Reshape, Embedding, Conv2D, RELU, Linear, Flatten, ReduceSum, Softmax 5 | from nltk.tokenize import RegexpTokenizer 6 | from nlpbox import CharNumberEncoder, CatNumberEncoder 7 | from tensorgraph.utils import valid, split_df, make_one_hot 8 | from tensorgraph.cost import entropy, accuracy 9 | import pandas 10 | import numpy as np 11 | 12 | # character CNN 13 | def model(word_len, sent_len, nclass): 14 | unicode_size = 1000 15 | ch_embed_dim = 20 16 | X_ph = tf.placeholder('int32', [None, sent_len, word_len]) 17 | input_sn = tg.StartNode(input_vars=[X_ph]) 18 | charcnn_hn = tg.HiddenNode(prev=[input_sn], 19 | layers=[Reshape(shape=(-1, word_len)), 20 | Embedding(cat_dim=unicode_size, 21 | encode_dim=ch_embed_dim, 22 | zero_pad=True), 23 | Reshape(shape=(-1, ch_embed_dim, word_len, 1)), 24 | Conv2D(num_filters=20, padding='VALID', 25 | kernel_size=(ch_embed_dim,5), stride=(1,1)), 26 | RELU(), 27 | Conv2D(num_filters=40, padding='VALID', 28 | kernel_size=(1,5), stride=(1,1)), 29 | RELU(), 30 | Conv2D(num_filters=60, padding='VALID', 31 | kernel_size=(1,5), stride=(1,2)), 32 | RELU(), 33 | Flatten(), 34 | Linear(nclass), 35 | Reshape((-1, sent_len, nclass)), 36 | ReduceSum(1), 37 | Softmax() 38 | ]) 39 | 40 | output_en = tg.EndNode(prev=[charcnn_hn]) 41 | graph = tg.Graph(start=[input_sn], end=[output_en]) 42 | y_train_sb = graph.train_fprop()[0] 43 | y_test_sb = graph.test_fprop()[0] 44 | 45 | return X_ph, y_train_sb, y_test_sb 46 | 47 | 48 | def tweets(word_len, sent_len, train_valid_ratio=[5,1]): 49 | df = pandas.read_csv('tweets_large.csv') 50 | field = 'text' 51 | label = 'label' 52 | tokenizer = RegexpTokenizer(r'\w+') 53 | 54 | # encode characters into numbers 55 | encoder = CharNumberEncoder(df[field].values, tokenizer=tokenizer, 56 | word_len=word_len, sent_len=sent_len) 57 | encoder.build_char_map() 58 | encode_X = encoder.make_char_embed() 59 | 60 | # encode categories into one hot array 61 | cat_encoder = CatNumberEncoder(df[label]) 62 | cat_encoder.build_cat_map() 63 | 64 | encode_y = cat_encoder.make_cat_embed() 65 | nclass = len(np.unique(encode_y)) 66 | encode_y = make_one_hot(encode_y, nclass) 67 | 68 | return encode_X, encode_y, nclass 69 | 70 | 71 | def train(): 72 | from tensorgraph.trainobject import train as mytrain 73 | with tf.Session() as sess: 74 | word_len = 20 75 | sent_len = 50 76 | 77 | # load data 78 | X_train, y_train, nclass = tweets(word_len, sent_len) 79 | 80 | # build model 81 | X_ph, y_train_sb, y_test_sb = model(word_len, sent_len, nclass) 82 | y_ph = tf.placeholder('float32', [None, nclass]) 83 | 84 | # set cost and optimizer 85 | train_cost_sb = entropy(y_ph, y_train_sb) 86 | optimizer = tf.train.AdamOptimizer(0.001) 87 | test_accu_sb = accuracy(y_ph, y_test_sb) 88 | 89 | # train model 90 | mytrain(session=sess, 91 | feed_dict={X_ph:X_train, y_ph:y_train}, 92 | train_cost_sb=train_cost_sb, 93 | valid_cost_sb=-test_accu_sb, 94 | optimizer=optimizer, 95 | epoch_look_back=5, max_epoch=100, 96 | percent_decrease=0, train_valid_ratio=[5,1], 97 | batchsize=64, randomize_split=False) 98 | 99 | 100 | if __name__ == '__main__': 101 | train() 102 | -------------------------------------------------------------------------------- /examples/example.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | import numpy as np 4 | from tensorgraph import Graph, StartNode, HiddenNode, EndNode 5 | from tensorgraph.layers import Linear, RELU, Concat, Mean, Sum 6 | from tensorgraph import ProgressBar, SequentialIterator 7 | 8 | 9 | def model(): 10 | y1_dim = 50 11 | y2_dim = 100 12 | 13 | learning_rate = 0.01 14 | 15 | y1 = tf.placeholder('float32', [None, y1_dim]) 16 | y2 = tf.placeholder('float32', [None, y2_dim]) 17 | start1 = StartNode(input_vars=[y1]) 18 | start2 = StartNode(input_vars=[y2]) 19 | 20 | h1 = HiddenNode(prev=[start1, start2], 21 | input_merge_mode=Concat(), 22 | layers=[Linear(y2_dim), RELU()]) 23 | h2 = HiddenNode(prev=[start2], 24 | layers=[Linear(y2_dim), RELU()]) 25 | h3 = HiddenNode(prev=[h1, h2], 26 | input_merge_mode=Sum(), 27 | layers=[Linear(y1_dim), RELU()]) 28 | e1 = EndNode(prev=[h3]) 29 | e2 = EndNode(prev=[h2]) 30 | 31 | 32 | graph = Graph(start=[start1, start2], end=[e1, e2]) 33 | o1, o2 = graph.train_fprop() 34 | 35 | o1_mse = tf.reduce_mean((y1 - o1)**2) 36 | o2_mse = tf.reduce_mean((y2 - o2)**2) 37 | mse = o1_mse + o2_mse 38 | optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse) 39 | return y1, y2, o1, o2, optimizer 40 | 41 | 42 | def train(): 43 | batchsize = 32 44 | y1, y2, o1, o2, optimizer = model() 45 | Y1 = np.random.rand(100, 50) 46 | Y2 = np.random.rand(100, 100) 47 | data = SequentialIterator(Y1, Y2, batchsize=batchsize) 48 | 49 | init = tf.global_variables_initializer() 50 | with tf.Session() as sess: 51 | sess.run(init) 52 | saver = tf.train.Saver() 53 | for i in range(10): 54 | pbar = ProgressBar(target=len(data)) 55 | n_exp = 0 56 | for y1_batch, y2_batch in data: 57 | sess.run([o1, o2], feed_dict={y1:y1_batch, y2:y2_batch}) 58 | sess.run(optimizer, feed_dict={y1:y1_batch, y2:y2_batch}) 59 | n_exp += len(y1_batch) 60 | pbar.update(n_exp) 61 | print('end') 62 | # saver.save(sess, 'test.tf') 63 | 64 | 65 | if __name__ == '__main__': 66 | train() 67 | -------------------------------------------------------------------------------- /examples/hierachical_softmax.py: -------------------------------------------------------------------------------- 1 | 2 | from tensorgraph.node import StartNode, HiddenNode, EndNode 3 | import tensorflow as tf 4 | from tensorgraph.layers.linear import Linear 5 | from tensorgraph.layers.activation import RELU, Softmax 6 | from tensorgraph.layers.merge import Concat, Mean, Sum 7 | from tensorgraph.graph import Graph 8 | import numpy as np 9 | from tensorgraph.data_iterator import SequentialIterator 10 | 11 | 12 | ## params 13 | x_dim = 50 14 | component_dim = 100 15 | batchsize = 32 16 | learning_rate = 0.01 17 | 18 | 19 | x_ph = tf.placeholder('float32', [None, x_dim]) 20 | # the three components 21 | y1_ph = tf.placeholder('float32', [None, component_dim]) 22 | y2_ph = tf.placeholder('float32', [None, component_dim]) 23 | y3_ph = tf.placeholder('float32', [None, component_dim]) 24 | 25 | # define the graph model structure 26 | start = StartNode(input_vars=[x_ph]) 27 | 28 | h1 = HiddenNode(prev=[start], layers=[Linear(component_dim), Softmax()]) 29 | h2 = HiddenNode(prev=[h1], layers=[Linear(component_dim), Softmax()]) 30 | h3 = HiddenNode(prev=[h2], layers=[Linear(component_dim), Softmax()]) 31 | 32 | 33 | e1 = EndNode(prev=[h1], input_merge_mode=Sum()) 34 | e2 = EndNode(prev=[h1, h2], input_merge_mode=Sum()) 35 | e3 = EndNode(prev=[h1, h2, h3], input_merge_mode=Sum()) 36 | 37 | graph = Graph(start=[start], end=[e1, e2, e3]) 38 | 39 | o1, o2, o3 = graph.train_fprop() 40 | 41 | o1_mse = tf.reduce_mean((y1_ph - o1)**2) 42 | o2_mse = tf.reduce_mean((y2_ph - o2)**2) 43 | o3_mse = tf.reduce_mean((y3_ph - o3)**2) 44 | mse = o1_mse + o2_mse + o3_mse 45 | optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse) 46 | 47 | X = np.random.rand(1000, x_dim) 48 | Y1 = np.random.rand(1000, component_dim) 49 | Y2 = np.random.rand(1000, component_dim) 50 | Y3 = np.random.rand(1000, component_dim) 51 | 52 | data = SequentialIterator(X, Y1, Y2, Y3, batchsize=batchsize) 53 | 54 | init = tf.global_variables_initializer() 55 | with tf.Session() as sess: 56 | sess.run(init) 57 | i = 0 58 | for x_batch, y1_batch, y2_batch, y3_batch in data: 59 | print(i) 60 | i += 1 61 | sess.run(optimizer, feed_dict={x_ph:x_batch, y1_ph:y1_batch, y2_ph:y2_batch, y3_ph:y3_batch}) 62 | -------------------------------------------------------------------------------- /examples/multi_gpus_horovod.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorgraph as tg 3 | import numpy as np 4 | import tensorflow as tf 5 | import horovod.tensorflow as hvd 6 | import cifar10_allcnn 7 | from tensorflow.python.framework import ops 8 | 9 | hvd.init() 10 | 11 | def cifar10(create_tfrecords=True, batch_size=32): 12 | tfrecords = tg.utils.MakeTFRecords() 13 | tfpath_train = './cifar10_train.tfrecords' 14 | tfpath_test = './cifar10_test.tfrecords' 15 | if create_tfrecords: 16 | X_train, y_train, X_test, y_test = tg.dataset.Cifar10() 17 | tfrecords.make_tfrecords_from_arrs(data_records={'X':X_train, 'y':y_train}, save_path=tfpath_train) 18 | tfrecords.make_tfrecords_from_arrs(data_records={'X':X_test, 'y':y_test}, save_path=tfpath_test) 19 | 20 | 21 | nr_train = tfrecords.read_and_decode(tfrecords_filename_list=[tfpath_train], 22 | data_shapes={'X':[32,32,3], 'y':[10]}, 23 | batch_size=batch_size) 24 | nr_test = tfrecords.read_and_decode(tfrecords_filename_list=[tfpath_test], 25 | data_shapes={'X':[32,32,3], 'y':[10]}, 26 | batch_size=batch_size) 27 | 28 | n_train = sum(1 for _ in tf.python_io.tf_record_iterator(tfpath_train)) 29 | n_test = sum(1 for _ in tf.python_io.tf_record_iterator(tfpath_test)) 30 | return dict(nr_train), n_train, dict(nr_test), n_test 31 | 32 | 33 | def train(): 34 | graph = tf.Graph() 35 | with graph.as_default(): 36 | batch_size = 100 37 | nr_train, n_train, nr_test, n_test = cifar10(create_tfrecords=True, batch_size=batch_size) 38 | seq = cifar10_allcnn.model(nclass=10, h=32, w=32, c=3) 39 | 40 | y_train_sb = seq.train_fprop(nr_train['X']) 41 | y_test_sb = seq.test_fprop(nr_test['X']) 42 | 43 | loss_train_sb = tg.cost.mse(y_train_sb, nr_train['y']) 44 | accu_train_sb = tg.cost.accuracy(y_train_sb, nr_train['y']) 45 | accu_test_sb = tg.cost.accuracy(y_test_sb, nr_test['y']) 46 | 47 | opt = tf.train.RMSPropOptimizer(0.001) 48 | opt = hvd.DistributedOptimizer(opt) 49 | 50 | # required for BatchNormalization layer 51 | update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS) 52 | with ops.control_dependencies(update_ops): 53 | train_op = opt.minimize(loss_train_sb) 54 | 55 | init_op = tf.group(tf.global_variables_initializer(), 56 | tf.local_variables_initializer()) 57 | bcast = hvd.broadcast_global_variables(0) 58 | 59 | # Pin GPU to be used to process local rank (one GPU per process) 60 | config = tf.ConfigProto() 61 | config.gpu_options.allow_growth = True 62 | config.gpu_options.visible_device_list = str(hvd.local_rank()) 63 | 64 | with tf.Session(graph=graph, config=config) as sess: 65 | coord = tf.train.Coordinator() 66 | threads = tf.train.start_queue_runners(coord=coord) 67 | sess.run(init_op) 68 | bcast.run() 69 | 70 | for epoch in range(100): 71 | pbar = tg.ProgressBar(n_train) 72 | ttl_train_loss = 0 73 | for i in range(0, n_train, batch_size): 74 | pbar.update(i) 75 | _, loss_train = sess.run([train_op, loss_train_sb]) 76 | ttl_train_loss += loss_train * batch_size 77 | pbar.update(n_train) 78 | ttl_train_loss /= n_train 79 | print('epoch {}, train loss {}'.format(epoch, ttl_train_loss)) 80 | 81 | pbar = tg.ProgressBar(n_test) 82 | ttl_test_loss = 0 83 | for i in range(0, n_test, batch_size): 84 | pbar.update(i) 85 | loss_test = sess.run(accu_test_sb) 86 | ttl_test_loss += loss_test * batch_size 87 | pbar.update(n_test) 88 | ttl_test_loss /= n_test 89 | print('epoch {}, test accuracy {}'.format(epoch, ttl_test_loss)) 90 | 91 | 92 | coord.request_stop() 93 | coord.join(threads) 94 | 95 | if __name__ == '__main__': 96 | train() 97 | -------------------------------------------------------------------------------- /exclude.txt: -------------------------------------------------------------------------------- 1 | .git/ 2 | *pyc 3 | -------------------------------------------------------------------------------- /pipupdate.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/bash 2 | 3 | version=$1 4 | git tag $version -m "update to version $version" 5 | git push --tag 6 | 7 | # python setup.py register -r pypi 8 | # python setup.py sdist upload -r pypi 9 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from setuptools import find_packages 3 | import json 4 | from tensorgraph import __version__ 5 | 6 | setup( 7 | name='tensorgraph', 8 | version=__version__, 9 | author='Joe Wu', 10 | author_email='hiceen@gmail.com', 11 | url='https://github.com/hycis/TensorGraph', 12 | download_url = 'https://github.com/hycis/TensorGraph/tarball/{}'.format(__version__), 13 | license='Apache 2.0, see LICENCE', 14 | description='A high level tensorflow library for building deep learning models', 15 | long_description=open('README.md').read(), 16 | packages=find_packages(), 17 | install_requires=['numpy>=1.7.1', 18 | 'six>=1.9.0', 19 | 'scikit-learn>=0.17', 20 | 'pandas>=0.17'], 21 | include_package_data=True, 22 | zip_safe=False 23 | ) 24 | -------------------------------------------------------------------------------- /tensorgraph/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | __version__ = "7.0.2" 3 | 4 | from .stopper import EarlyStopper 5 | from .sequential import Sequential 6 | from .graph import Graph 7 | from .node import StartNode, HiddenNode, EndNode 8 | from .progbar import ProgressBar 9 | from .data_iterator import SequentialIterator, StepIterator, SimpleBlocks, DataBlocks 10 | from . import cost 11 | from . import utils 12 | from .dataset.preprocess import * 13 | -------------------------------------------------------------------------------- /tensorgraph/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .mnist import Mnist 3 | from .cifar10 import Cifar10 4 | from .cifar100 import Cifar100 5 | -------------------------------------------------------------------------------- /tensorgraph/dataset/cifar10.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | from tensorgraph.utils import make_one_hot 4 | import struct 5 | import numpy 6 | import gzip 7 | 8 | import tarfile, inspect, os, sys 9 | from six.moves.urllib.request import urlretrieve 10 | from ..progbar import ProgressBar 11 | from ..utils import get_file_from_url 12 | from .preprocess import global_contrast_normalize, zca_whiten 13 | 14 | 15 | def Cifar10(flatten=False, onehot=True, contrast_normalize=False, whiten=False, datadir='./cifar10/'): 16 | url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' 17 | save_path = '{}/cifar-10-python.tar.gz'.format(datadir) 18 | datadir = get_file_from_url(save_path=save_path, origin=url, untar=True) 19 | sav_dir = datadir + '/cifar-10-batches-py' 20 | 21 | def make_data(batchnames): 22 | X = [] 23 | y = [] 24 | for data_batch in batchnames: 25 | fp = sav_dir + '/' + data_batch 26 | with open(fp, 'rb') as fin: 27 | # python2 28 | if sys.version_info.major == 2: 29 | import cPickle 30 | tbl = cPickle.load(fin) 31 | # python 3 32 | elif sys.version_info.major == 3: 33 | import pickle 34 | tbl = pickle.load(fin, encoding='bytes') 35 | 36 | else: 37 | raise Exception('python version not 2 or 3') 38 | X.append(tbl[b'data']) 39 | y.append(tbl[b'labels']) 40 | X = np.concatenate(X, axis=0).astype('f4') 41 | y = np.concatenate(y, axis=0).astype('int') 42 | X /= 255.0 43 | return X, y 44 | 45 | X_train, y_train = make_data(['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4', 'data_batch_5']) 46 | X_test , y_test = make_data(['test_batch']) 47 | 48 | 49 | if contrast_normalize: 50 | norm_scale = 55.0 # Goodfellow 51 | X_train = global_contrast_normalize(X_train, scale=norm_scale) 52 | X_test = global_contrast_normalize(X_test, scale=norm_scale) 53 | 54 | 55 | if whiten: 56 | zca_cache = os.path.join(datadir, 'cifar-10-zca-cache.pkl') 57 | X_train, X_test = zca_whiten(X_train, X_test, cache=zca_cache) 58 | 59 | 60 | if onehot: 61 | y_train = make_one_hot(y_train, 10) 62 | y_test = make_one_hot(y_test, 10) 63 | 64 | if not flatten: 65 | X_train = X_train.reshape((-1, 3, 32, 32)).swapaxes(1, 3) 66 | X_test = X_test.reshape((-1, 3, 32, 32)).swapaxes(1, 3) 67 | 68 | return X_train, y_train, X_test, y_test 69 | 70 | 71 | if __name__ == '__main__': 72 | X_train, y_train, X_test, y_test = Cifar10(flatten=False, onehot=False) 73 | from scipy.misc import imshow, imsave 74 | imsave('img.png', X_train[0]) 75 | print('X_train:', X_train.shape) 76 | print('y_train:', y_train.shape) 77 | print('X_test:', X_test.shape) 78 | print('y_test:', y_test.shape) 79 | -------------------------------------------------------------------------------- /tensorgraph/dataset/cifar100.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | from tensorgraph.utils import make_one_hot 4 | import struct 5 | import numpy 6 | import gzip 7 | 8 | import tarfile, inspect, os, sys 9 | from six.moves.urllib.request import urlretrieve 10 | from ..progbar import ProgressBar 11 | from ..utils import get_file_from_url 12 | 13 | 14 | def Cifar100(flatten=False, onehot=True, datadir='./cifar100/', fine_label=True): 15 | ''' 16 | Args: 17 | fine_label (bool): True (100 classes) False (20 classes) 18 | ''' 19 | 20 | url = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' 21 | save_path = '{}/cifar-100-python.tar.gz'.format(datadir) 22 | datadir = get_file_from_url(save_path=save_path, origin=url, untar=True) 23 | print('untar dir', datadir) 24 | sav_dir = datadir + '/cifar-100-python' 25 | nclass = None 26 | def make_data(batchnames): 27 | X = [] 28 | y = [] 29 | for data_batch in batchnames: 30 | fp = sav_dir + '/' + data_batch 31 | with open(fp, 'rb') as fin: 32 | # python2 33 | if sys.version_info.major == 2: 34 | import cPickle 35 | tbl = cPickle.load(fin) 36 | # python 3 37 | elif sys.version_info.major == 3: 38 | import pickle 39 | tbl = pickle.load(fin, encoding='bytes') 40 | 41 | else: 42 | raise Exception('python version not 2 or 3') 43 | X.append(tbl[b'data']) 44 | 45 | if fine_label: 46 | y.append(tbl[b'fine_labels']) 47 | nclass = 100 48 | else: 49 | y.append(tbl[b'coarse_labels']) 50 | nclass = 20 51 | 52 | X = np.concatenate(X, axis=0).astype('f4') 53 | y = np.concatenate(y, axis=0).astype('int') 54 | X /= 255.0 55 | return X, y, nclass 56 | 57 | X_train, y_train, nclass = make_data(['train']) 58 | X_test , y_test, nclass = make_data(['test']) 59 | if onehot: 60 | y_train = make_one_hot(y_train, nclass) 61 | y_test = make_one_hot(y_test, nclass) 62 | 63 | if not flatten: 64 | X_train = X_train.reshape((-1, 3, 32, 32)).swapaxes(1, 3) 65 | X_test = X_test.reshape((-1, 3, 32, 32)).swapaxes(1, 3) 66 | 67 | return X_train, y_train, X_test, y_test 68 | 69 | 70 | if __name__ == '__main__': 71 | X_train, y_train, X_test, y_test = Cifar100(flatten=False, onehot=True) 72 | print('X_train:', X_train.shape) 73 | print('y_train:', y_train.shape) 74 | print('X_test:', X_test.shape) 75 | print('y_test:', y_test.shape) 76 | -------------------------------------------------------------------------------- /tensorgraph/graph.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class Graph(object): 5 | TRAIN_FPROP = 'train_fprop' 6 | TEST_FPROP = 'test_fprop' 7 | 8 | def __init__(self, start, end): 9 | ''' 10 | For building the graph that connects all the nodes together 11 | 12 | Args: 13 | start (list): list of start nodes 14 | end (list): list of end nodes 15 | ''' 16 | assert isinstance(start, list) 17 | assert isinstance(end, list) 18 | self.start = start 19 | self.end = end 20 | for node in self.start: 21 | assert node.__class__.__name__ == 'StartNode' 22 | for node in self.end: 23 | assert node.__class__.__name__ == 'EndNode' 24 | # nodes visited during train or test fprop 25 | self.visited_train = {} 26 | self.visited_test = {} 27 | 28 | 29 | def _output(self, node, mode): 30 | assert node.__class__.__name__ in ['StartNode', 'HiddenNode', 'EndNode'] 31 | if node.__class__.__name__ == 'StartNode': 32 | if node in self.start: 33 | return node.input_vars 34 | else: 35 | return [] 36 | input_vars = [] 37 | for pnode in node.prev: 38 | if mode == Graph.TRAIN_FPROP: 39 | # check if the train mode of hidden node has been visited 40 | if pnode not in self.visited_train: 41 | output = self._output(pnode, mode) 42 | input_vars += output 43 | self.visited_train[pnode] = output 44 | else: 45 | input_vars += self.visited_train[pnode] 46 | 47 | elif mode == Graph.TEST_FPROP: 48 | # check if the test mode of hidden node has been visited 49 | if pnode not in self.visited_test: 50 | output = self._output(pnode, mode) 51 | input_vars += output 52 | self.visited_test[pnode] = output 53 | else: 54 | input_vars += self.visited_test[pnode] 55 | else: 56 | raise Exception('unknown mode: {}'.format(mode)) 57 | 58 | node.input_vars = input_vars 59 | return getattr(node, mode)() 60 | 61 | 62 | def train_fprop(self): 63 | """ 64 | forward propagation for train mode that builds the final tensorflow graph 65 | """ 66 | outs = [] 67 | for node in self.end: 68 | outs += self._output(node, Graph.TRAIN_FPROP) 69 | return outs 70 | 71 | 72 | def test_fprop(self): 73 | """ 74 | forward propagation for test mode that builds the final tensorflow graph 75 | """ 76 | outs = [] 77 | for node in self.end: 78 | outs += self._output(node, Graph.TEST_FPROP) 79 | return outs 80 | -------------------------------------------------------------------------------- /tensorgraph/layers/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .activation import * 3 | from .conv import * 4 | from .linear import * 5 | from .merge import * 6 | from .noise import * 7 | from .normalization import * 8 | from .sampling import * 9 | from .misc import * 10 | from .recurrent import * 11 | from .cast import * 12 | from .template import * 13 | from .backbones import * 14 | -------------------------------------------------------------------------------- /tensorgraph/layers/activation.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from .template import BaseLayer 3 | 4 | 5 | class RELU(BaseLayer): 6 | def _train_fprop(self, state_below): 7 | return tf.nn.relu(state_below) 8 | 9 | 10 | class RELU6(BaseLayer): 11 | def _train_fprop(self, state_below): 12 | return tf.nn.relu6(state_below) 13 | 14 | 15 | class LeakyRELU(BaseLayer): 16 | 17 | @BaseLayer.init_name_scope 18 | def __init__(self, leak=0.2): 19 | self.leak = leak 20 | 21 | def _train_fprop(self, state_below): 22 | return tf.maximum(state_below, self.leak*state_below) 23 | 24 | 25 | class ELU(BaseLayer): 26 | def _train_fprop(self, state_below): 27 | return tf.nn.elu(state_below) 28 | 29 | 30 | class Softplus(BaseLayer): 31 | def _train_fprop(self, state_below): 32 | return tf.nn.softplus(state_below) 33 | 34 | 35 | class Softsign(BaseLayer): 36 | def _train_fprop(self, state_below): 37 | return tf.nn.softsign(state_below) 38 | 39 | 40 | class Tanh(BaseLayer): 41 | def _train_fprop(self, state_below): 42 | return tf.nn.tanh(state_below) 43 | 44 | 45 | class Sigmoid(BaseLayer): 46 | def _train_fprop(self, state_below): 47 | return tf.nn.sigmoid(state_below) 48 | 49 | 50 | class Tanh(BaseLayer): 51 | def _train_fprop(self, state_below): 52 | return tf.tanh(state_below) 53 | 54 | 55 | class Softmax(BaseLayer): 56 | def _train_fprop(self, state_below): 57 | return tf.nn.softmax(state_below) 58 | -------------------------------------------------------------------------------- /tensorgraph/layers/cast.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from .template import BaseLayer 3 | 4 | 5 | class ToFloat(BaseLayer): 6 | def _train_fprop(self, state_below): 7 | return tf.to_float(state_below, name='ToFloat') 8 | 9 | 10 | class ToInt32(BaseLayer): 11 | def _train_fprop(self, state_below): 12 | return tf.to_int32(state_below, name='ToInt32') 13 | -------------------------------------------------------------------------------- /tensorgraph/layers/linear.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from .template import BaseLayer 3 | 4 | 5 | class Linear(BaseLayer): 6 | 7 | @BaseLayer.init_name_scope 8 | def __init__(self, this_dim=None, W=None, b=None, stddev=0.1): 9 | """ 10 | Description: 11 | This is a fully connected layer 12 | 13 | Args: 14 | this_dim (int): dimension of this layer 15 | """ 16 | self.this_dim = this_dim 17 | self.stddev = stddev 18 | self.W = W 19 | self.b = b 20 | 21 | 22 | @BaseLayer.init_name_scope 23 | def __init_var__(self, state_below): 24 | prev_dim = int(state_below.shape[1]) 25 | if self.W is None: 26 | self.W = tf.Variable(tf.random_normal([prev_dim, self.this_dim], stddev=self.stddev), 27 | name=self.__class__.__name__ + '_W') 28 | if self.b is None: 29 | self.b = tf.Variable(tf.zeros([self.this_dim]), name=self.__class__.__name__ + '_b') 30 | 31 | 32 | def _train_fprop(self, state_below): 33 | return tf.matmul(state_below, self.W) + self.b 34 | 35 | 36 | @property 37 | def _variables(self): 38 | return [self.W, self.b] 39 | 40 | 41 | class LinearMasked(BaseLayer): 42 | 43 | @BaseLayer.init_name_scope 44 | def __init__(self, this_dim=None, W=None, b=None, mask=None, stddev=0.1): 45 | """ 46 | Description: 47 | This is a fully connected layer with an applied mask for partial connections 48 | 49 | Args: 50 | this_dim (int): dimension of this layer 51 | name (string): name of the layer 52 | W (tensor variable): Weight of 2D tensor matrix 53 | b (tensor variable): bias of 2D tensor matrix 54 | mask (numpy.ndarray or tensorflow placeholder): mask for partial connection 55 | params (list): a list of params in layer that can be updated 56 | """ 57 | 58 | self.this_dim = this_dim 59 | self.mask = mask 60 | self.stddev = stddev 61 | self.W = W 62 | self.b = b 63 | 64 | 65 | @BaseLayer.init_name_scope 66 | def __init_var__(self, state_below): 67 | prev_dim = int(state_below.shape[1]) 68 | if self.W is None: 69 | self.W = tf.Variable(tf.random_normal([prev_dim, self.this_dim], stddev=self.stddev), 70 | name=self.__class__.__name__ + '_W') 71 | if self.b is None: 72 | self.b = tf.Variable(tf.zeros([self.this_dim]), name=self.__class__.__name__ + '_b') 73 | 74 | 75 | def _train_fprop(self, state_below): 76 | return tf.multiply(tf.matmul(state_below, self.W) + self.b, self.mask) 77 | 78 | 79 | @property 80 | def _variables(self): 81 | return [self.W, self.b] 82 | 83 | 84 | class SparseLinear(BaseLayer): 85 | 86 | @BaseLayer.init_name_scope 87 | def __init__(self, prev_dim=None, this_dim=None, W=None, b=None, batchsize=None, stddev=0.1): 88 | """ 89 | Description: 90 | This is a fully connected layer with sparse inputs are two tensors 91 | one is index tensor of dimension [N, prev_dim] and another one is value 92 | tensor of [N] 93 | 94 | Args: 95 | prev_dim (int): dimension of previous layer 96 | this_dim (int): dimension of this layer 97 | name (string): name of the layer 98 | W (tensor variable): Weight of 2D tensor matrix 99 | b (tensor variable): bias of 2D tensor matrix 100 | params (list): a list of params in layer that can be updated 101 | """ 102 | 103 | self.prev_dim = prev_dim 104 | self.this_dim = this_dim 105 | self.batchsize = batchsize 106 | self.stddev = stddev 107 | self.W = W 108 | self.b = b 109 | 110 | if self.W is None: 111 | self.W = tf.Variable(tf.random_normal([self.prev_dim, self.this_dim], stddev=self.stddev), 112 | name=self.__class__.__name__ + '_W') 113 | if self.b is None: 114 | self.b = tf.Variable(tf.zeros([self.this_dim]), name=self.__class__.__name__ + '_b') 115 | 116 | 117 | def _train_fprop(self, state_below): 118 | idx, val = state_below 119 | X = tf.SparseTensor(tf.cast(idx, 'int64'), val, dense_shape=[self.batchsize, self.prev_dim]) 120 | X_order = tf.sparse_reorder(X) 121 | XW = tf.sparse_tensor_dense_matmul(X_order, self.W, adjoint_a=False, adjoint_b=False) 122 | return tf.add(XW, self.b) 123 | 124 | 125 | @property 126 | def _variables(self): 127 | return [self.W, self.b] 128 | -------------------------------------------------------------------------------- /tensorgraph/layers/noise.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | from .template import BaseLayer 4 | 5 | 6 | class Dropout(BaseLayer): 7 | 8 | @BaseLayer.init_name_scope 9 | def __init__(self, dropout_below=0.5, noise_shape=None): 10 | ''' 11 | Args: 12 | dropout_below(float): probability of the inputs from the layer below 13 | been masked out 14 | noise_shape (list or tuple): shape of the noise: example [-1, 2, -1] which applies 15 | noise to the second dimension only 16 | ''' 17 | self.dropout_below = dropout_below 18 | self.noise_shape = noise_shape 19 | 20 | def _test_fprop(self, state_below): 21 | """ 22 | Description: 23 | Since input is already scaled up during training, therefore during 24 | testing, we don't need to scale the inputs again 25 | """ 26 | return state_below 27 | 28 | def _train_fprop(self, state_below): 29 | """ 30 | Description: 31 | Applies dropout to the layer during training with probability keep_prob, 32 | outputs the input element scaled up by 1 / keep_prob 33 | Args: 34 | keep_prob: probability of keeping the neuron active 35 | """ 36 | if self.noise_shape is not None: 37 | assert len(state_below.get_shape()) == len(self.noise_shape) 38 | noise_shape = [] 39 | for i, v in enumerate(self.noise_shape): 40 | if v == -1 or v is None: 41 | noise_shape.append(tf.shape(state_below)[i]) 42 | else: 43 | noise_shape.append(v) 44 | self.noise_shape = noise_shape 45 | 46 | return tf.nn.dropout(state_below, keep_prob=1-self.dropout_below, 47 | noise_shape=self.noise_shape) 48 | -------------------------------------------------------------------------------- /tensorgraph/layers/sampling.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from .template import BaseLayer 3 | 4 | class OneSample(BaseLayer): 5 | 6 | @BaseLayer.init_name_scope 7 | def __init__(self, dim): 8 | ''' 9 | Description: 10 | multinomial sample one output from the softmax probability 11 | 12 | Args: 13 | dim (int): layer dimension 14 | ''' 15 | self.diag = tf.diag(tf.ones(dim)) 16 | 17 | 18 | def _train_fprop(self, state_below): 19 | samples = tf.multinomial(state_below, num_samples=1) 20 | samples = tf.squeeze(samples) 21 | return tf.gather(self.diag, samples) 22 | -------------------------------------------------------------------------------- /tensorgraph/layers/template.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | from ..graph import Graph 4 | from ..node import StartNode, HiddenNode, EndNode 5 | from functools import wraps 6 | 7 | class ScopeDeco(object): 8 | 9 | @classmethod 10 | def init_name_scope(cls, func): 11 | @wraps(func) 12 | def decorated(self, *args, **kwargs): 13 | if not hasattr(self, 'scope'): 14 | with tf.name_scope(self.__class__.__name__) as self.scope: 15 | return func(self, *args, **kwargs) 16 | elif not hasattr(self, '__func_visited_by_fprop__'): 17 | self.__func_visited_by_fprop__ = True 18 | with tf.name_scope(self.scope): 19 | return func(self, *args, **kwargs) 20 | return decorated 21 | 22 | 23 | @classmethod 24 | def fprop_name_scope(cls, func): 25 | @wraps(func) 26 | def decorated(self, *args, **kwargs): 27 | if hasattr(self, 'scope'): 28 | with tf.name_scope(self.scope + func.__name__): 29 | return func(self, *args, **kwargs) 30 | else: 31 | print('{}: scope not initiated for {}'.format(func.__name__, self.__class__.__name__)) 32 | return func(self, *args, **kwargs) 33 | return decorated 34 | 35 | 36 | 37 | class Template(ScopeDeco): 38 | 39 | @ScopeDeco.init_name_scope 40 | def __init__(self, *args, **kwargs): 41 | pass 42 | 43 | @ScopeDeco.init_name_scope 44 | def __init_var__(self, state_below): 45 | '''Define variables which requires input information from state_below, 46 | this is called during forward propagation 47 | ''' 48 | pass 49 | 50 | def _train_fprop(self, state_below): 51 | raise NotImplementedError() 52 | 53 | def _test_fprop(self, state_below): 54 | '''Defines the forward propogation through the layer during testing, 55 | defaults to the same as train forward propogation 56 | ''' 57 | return self._train_fprop(state_below) 58 | 59 | @ScopeDeco.fprop_name_scope 60 | def train_fprop(self, state_below): 61 | return self._train_fprop(state_below) 62 | 63 | @ScopeDeco.fprop_name_scope 64 | def test_fprop(self, state_below): 65 | return self._test_fprop(state_below) 66 | 67 | @property 68 | def _variables(self): 69 | '''Defines the trainable parameters in the layer 70 | Returns: list of Variables 71 | ''' 72 | return [] 73 | 74 | 75 | class BaseLayer(Template): 76 | '''renaming of Template to BaseLayer''' 77 | pass 78 | 79 | 80 | class BaseModel(Template): 81 | 82 | @staticmethod 83 | def check_y(y): 84 | '''Check if the output list contains one element or a list, if contains 85 | only one element, return the element, if contains more than one element, 86 | returns the entire list. 87 | ''' 88 | if len(y) == 1: 89 | return y[0] 90 | elif len(y) > 1: 91 | return y 92 | else: 93 | raise Exception('{} is empty or not a list'.format(y)) 94 | 95 | 96 | def _train_fprop(self, *state_belows): 97 | self.startnode.input_vars = state_belows 98 | graph = Graph(start=[self.startnode], end=[self.endnode]) 99 | y = graph.train_fprop() 100 | return BaseModel.check_y(y) 101 | 102 | 103 | def _test_fprop(self, *state_belows): 104 | self.startnode.input_vars = state_belows 105 | graph = Graph(start=[self.startnode], end=[self.endnode]) 106 | y = graph.test_fprop() 107 | return BaseModel.check_y(y) 108 | 109 | 110 | def train_fprop(self, *state_below): 111 | return self._train_fprop(*state_below) 112 | 113 | def test_fprop(self, *state_below): 114 | return self._test_fprop(*state_below) 115 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | .DS_Store 6 | _cache/ 7 | _tmp/ 8 | *.pyc 9 | *.pyc[co] 10 | 11 | # Specific files 12 | *.log* 13 | ckpt/ 14 | train_ckpt/ 15 | exclude/ 16 | .ipynb_checkpoints/ 17 | rsync*.sh 18 | unit_test/assets 19 | unit_test/*.html 20 | 21 | # C extensions 22 | *.so 23 | 24 | # Distribution / packaging 25 | .Python 26 | env/ 27 | build/ 28 | develop-eggs/ 29 | dist/ 30 | downloads/ 31 | eggs/ 32 | .eggs/ 33 | lib/ 34 | lib64/ 35 | parts/ 36 | sdist/ 37 | var/ 38 | wheels/ 39 | *.egg-info/ 40 | .installed.cfg 41 | *.egg 42 | 43 | # PyInstaller 44 | # Usually these files are written by a python script from a template 45 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 46 | *.manifest 47 | *.spec 48 | 49 | # Installer logs 50 | pip-log.txt 51 | pip-delete-this-directory.txt 52 | 53 | # Unit test / coverage reports 54 | htmlcov/ 55 | .tox/ 56 | .coverage 57 | .coverage.* 58 | .cache 59 | nosetests.xml 60 | coverage.xml 61 | *.cover 62 | .hypothesis/ 63 | 64 | # Translations 65 | *.mo 66 | *.pot 67 | 68 | # Django stuff: 69 | *.log 70 | local_settings.py 71 | 72 | # Flask stuff: 73 | instance/ 74 | .webassets-cache 75 | 76 | # Scrapy stuff: 77 | .scrapy 78 | 79 | # Sphinx documentation 80 | docs/_build/ 81 | 82 | # PyBuilder 83 | target/ 84 | 85 | # Jupyter Notebook 86 | .ipynb_checkpoints 87 | 88 | # pyenv 89 | .python-version 90 | 91 | # celery beat schedule file 92 | celerybeat-schedule 93 | 94 | # SageMath parsed files 95 | *.sage.py 96 | 97 | # dotenv 98 | .env 99 | 100 | # virtualenv 101 | .venv 102 | venv/ 103 | ENV/ 104 | 105 | # Spyder project settings 106 | .spyderproject 107 | .spyproject 108 | 109 | # Rope project settings 110 | .ropeproject 111 | 112 | # mkdocs documentation 113 | /site 114 | 115 | # mypy 116 | .mypy_cache/ 117 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/README.md: -------------------------------------------------------------------------------- 1 | # AI-Brain-Tumor Model 5 2 | Brain tumor classification & segmentation using ensemble models 3 | 4 | ---- 5 | 6 | Six models trained to simultaneously produce segmentation maps & classification 7 | on 24 distinct tumor types, grouped into 19 classes for final output in the 8 | ensemble. This repository contains the scripts to train each model individually. 9 | 10 | Output of the training procedure are TF checkpoints for 6 variations of a 11 | joint semgentation-classification model which can then be ensembled in a 12 | separate script. 13 | 14 | Folder content: 15 | 1. main_train.py - Main training launcher 16 | 2. run_mpi.sh; run_nonmpi.sh - Script to run MPI/non-MPI processes on da Vinci 17 | 3. nn/ - Folder containing model, data, and actual training scripts 18 | 4. model_C3/; model_C4/; model_C4R/; model_C5/; model_C5XS/; model_CR/ - Folders with training configuration INIs for each model 19 | 20 | To train, go to each model folder and run ../run_mpi.sh ../main_train.py " 21 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/aibraintumormodel/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/main_train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | ''' 3 | #------------------------------------------------------------------------------ 4 | # Main script to read config file, set up & run training 5 | # WLRRDDC 6 | #------------------------------------------------------------------------------ 7 | # W - Water segmentation 8 | # L - Cyclical learning rate 9 | # R - Radiomics 10 | # R - Batch renormalization 11 | # D - Series dropout (from dataset) 12 | # D - Distributed batch normalization 13 | # C - New costAccumulator (fixed some summation bugs & cleanup) 14 | #------------------------------------------------------------------------------ 15 | ''' 16 | # Python2 compatibility 17 | from __future__ import print_function 18 | 19 | import argparse 20 | import sys 21 | import os 22 | import traceback 23 | 24 | # Horovod 25 | import horovod.tensorflow as hvd 26 | hvd.init() 27 | if hvd.rank() == 0: 28 | print("WLRRDDC") 29 | print("Horovod initialized with %3d nodes" % hvd.size()) 30 | 31 | script_dir = os.path.dirname(os.path.realpath(__file__)) 32 | sys.path.insert(0, script_dir) 33 | 34 | import nn.run.train as train 35 | import nn.run.configReader as configReader 36 | 37 | # MPI 38 | import mpi4py.rc 39 | mpi4py.rc.initialize = False # Do not initialize MPI 40 | mpi4py.rc.finalize = False 41 | import mpi4py.MPI as MPI 42 | 43 | if __name__ == '__main__': 44 | ''' 45 | Usage: 46 | CUDA_DEVICE_ORDER=PCI_BUS_ID CUDA_VISIBLE_DEVICES= 47 | mpirun -np <# CPU cores, 1 for each GPU> -bind-to none -map-by slot 48 | main_train.py 49 | ''' 50 | try: 51 | # Print MPI environment 52 | if MPI.COMM_WORLD.Get_rank() == 0: 53 | print("# MPI Processes: " + str(MPI.COMM_WORLD.Get_size())) 54 | 55 | # Parse arguments 56 | flags = None 57 | if MPI.COMM_WORLD.Get_rank() == 0: 58 | # Parse arguments 59 | parser = argparse.ArgumentParser(description='Trains NN model') 60 | parser.add_argument('config', type=str, help='config file for training') 61 | parser.add_argument('--data_dir', type=str, help='abs path of data dir (overwrites INI)', default=None) 62 | args = parser.parse_args() 63 | 64 | # Read config file for training 65 | flags = configReader.parameters(args.config) 66 | if args.data_dir is not None: 67 | if flags.data_dir is not None: 68 | print("WARNING: Overwriting data_dir with cmd argument: ", flags.data_dir) 69 | flags.data_dir = args.data_dir 70 | 71 | assert flags.model_module is not None, "ERROR: No model specified" 72 | assert flags.data_module is not None, "ERROR: No data specified" 73 | 74 | print("TRAINING CONFIGURATION") 75 | flags.listFlags() 76 | print("") 77 | flags = MPI.COMM_WORLD.bcast(flags, root=0) 78 | 79 | # Load model & set up training routines 80 | if flags.batch_renorm: 81 | batch_renorm_rmax_dmax = (flags.renorm_rmax[0], flags.renorm_dmax[0]) 82 | else: 83 | batch_renorm_rmax_dmax = None 84 | train_model = train.trainModel(flags.data_module, flags.model_module, \ 85 | flags.model_scope, flags.batchsize, flags.l2_weight_decay, \ 86 | batch_renorm_rmax_dmax, flags.anatomies, flags.biopsy_only, \ 87 | flags.mask_only, flags.water_mask, flags.series_kprob, \ 88 | flags.train_valid_seed, flags.validation_list, flags.radiomics, \ 89 | flags.clsmatcoeff, flags.distributed_batchnorm, \ 90 | flags.data_dir, flags.testing) 91 | 92 | train_model.configureTraining(flags, \ 93 | flags.save_path, flags.restore_path, flags.log_path) 94 | 95 | # Train model according to config file parameters 96 | train_model.train() 97 | 98 | # Finalize train_model object 99 | train_model.finalize() 100 | 101 | if flags.testing: 102 | print("TEST MAINTRAIN DONE RANK ", hvd.rank()) 103 | # Force terminate after this point (evyerthing works, MPI just refuses to return) 104 | MPI.COMM_WORLD.Abort(0) 105 | 106 | except Exception as e: 107 | print(traceback.format_exc()) 108 | print(e) 109 | print("NODE ", hvd.rank(), ": ERROR running script") 110 | MPI.COMM_WORLD.Abort(1) 111 | sys.exit(1) 112 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/model_C3/train_model_C3.ini: -------------------------------------------------------------------------------- 1 | # Config file for training. All params must be in the [train] section 2 | # Model C3 3 | # - No radiomics 4 | # - BG/water/tumor segmentation mask 5 | # - Small & big classes 6 | [train] 7 | # Model 8 | model_module = "nn.model.model_C3" 9 | model_scope = "model_C3" 10 | testing = True 11 | 12 | # Data 13 | data_module = "nn.data.data_train_hvd" 14 | anatomies = [] 15 | biopsy_only = False 16 | mask_only = True 17 | water_mask = True 18 | series_kprob = [1.0, 1.0, 1.0, 0] 19 | series_val = [1, 1, 1, 0] 20 | radiomics = False 21 | clsmatcoeff = (0.0, 1.0) 22 | train_valid_seed = 1111 23 | #validation_list = "../valid.dat" 24 | 25 | # Continuation params 26 | continuation = False 27 | restore = False 28 | restore_seg = True 29 | restore_cls = True 30 | save = True 31 | 32 | # Model training parameters 33 | batchsize = 4 34 | keep_prob = 1.0 35 | keep_prob_fcn_D1 = 0.7 36 | keep_prob_fcn_D2 = 0.7 37 | 38 | # Training parameters 39 | optimizer = "adam" 40 | max_epochs = 256 41 | train_seg = True 42 | train_cls = True 43 | big_cls = True 44 | small_cls = True 45 | min_learning_rate = 0.00005 46 | max_learning_rate = 0.0003 47 | learning_rate_decay_step = 25000 48 | learning_rate_decay_rate = 0.995 49 | learning_rate_epochsize = 8 50 | learning_range_decay = True 51 | learning_range_decay_rate = 0.8 52 | seg_loss_coefficient = 4.0 53 | cls_loss_coefficient = 1.0 54 | l2_regularizer = True 55 | l2_weight_decay = 0.0002 56 | distributed_batchnorm = True 57 | batch_renorm = False 58 | renorm_rmax = (1.0, 3.0, 10000, 100000) 59 | renorm_dmax = (0.0, 5.0, 10000, 60000) 60 | 61 | # Reporting parameters 62 | report_every_nsteps = 500 63 | save_every_nsteps = 1000 64 | validate_every_nepoch = 2 65 | save_out_every_nepoch = 10 66 | sel_threshold = (0, 1.0) 67 | out_res_frac = 0.1 68 | 69 | # I/O paths 70 | save_path = "../ckpt/ckpt_model_C3_tg/tfgraph.ckpt" 71 | restore_path = "../ckpt/ckpt_model_C3_tg/tfgraph.ckpt" 72 | log_path = "../exclude/train_model_C3_tg/tflog" 73 | out_res_path = "../exclude/train_model_C3_tg/validres.pckl" 74 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/model_C4/train_model_C4.ini: -------------------------------------------------------------------------------- 1 | # Config file for training. All params must be in the [train] section 2 | # Model C4 3 | # - No radiomics 4 | # - BG/water/tumor segmentation mask 5 | # - Small & big classes 6 | [train] 7 | # Model 8 | model_module = "nn.model.model_C4" 9 | model_scope = "model_C4" 10 | testing = True 11 | 12 | # Data 13 | data_module = "nn.data.data_train_hvd" 14 | anatomies = [] 15 | biopsy_only = False 16 | mask_only = True 17 | water_mask = True 18 | series_kprob = [1.0, 1.0, 1.0, 0] 19 | series_val = [1, 1, 1, 0] 20 | radiomics = False 21 | clsmatcoeff = (0.0, 1.0) 22 | train_valid_seed = 1111 23 | #validation_list = "../valid.dat" 24 | 25 | # Continuation params 26 | continuation = False 27 | restore = False 28 | restore_seg = True 29 | restore_cls = True 30 | save = True 31 | 32 | # Model training parameters 33 | batchsize = 4 34 | keep_prob = 1.0 35 | keep_prob_fcn_D1 = 0.7 36 | keep_prob_fcn_D2 = 0.7 37 | 38 | # Training parameters 39 | optimizer = "adam" 40 | max_epochs = 256 41 | train_seg = True 42 | train_cls = True 43 | big_cls = True 44 | small_cls = True 45 | min_learning_rate = 0.00005 46 | max_learning_rate = 0.0003 47 | learning_rate_decay_step = 25000 48 | learning_rate_decay_rate = 0.995 49 | learning_rate_epochsize = 8 50 | learning_range_decay = True 51 | learning_range_decay_rate = 0.8 52 | seg_loss_coefficient = 4.0 53 | cls_loss_coefficient = 1.0 54 | l2_regularizer = True 55 | l2_weight_decay = 0.0002 56 | distributed_batchnorm = True 57 | batch_renorm = False 58 | renorm_rmax = (1.0, 3.0, 10000, 100000) 59 | renorm_dmax = (0.0, 5.0, 10000, 60000) 60 | 61 | # Reporting parameters 62 | report_every_nsteps = 500 63 | save_every_nsteps = 1000 64 | validate_every_nepoch = 2 65 | save_out_every_nepoch = 10 66 | sel_threshold = (0, 1.0) 67 | out_res_frac = 0.1 68 | 69 | # I/O paths 70 | save_path = "../ckpt/ckpt_model_C4_tg/tfgraph.ckpt" 71 | restore_path = "../ckpt/ckpt_model_C4_tg/tfgraph.ckpt" 72 | log_path = "../exclude/train_model_C4_tg/tflog" 73 | out_res_path = "../exclude/train_model_C4_tg/validres.pckl" 74 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/model_C4R/train_model_C4R.ini: -------------------------------------------------------------------------------- 1 | # Config file for training. All params must be in the [train] section 2 | # Model C4R 3 | # - Radiomics 4 | # - BG/water/tumor segmentation mask 5 | # - Small & big classes 6 | [train] 7 | # Model 8 | model_module = "nn.model.model_C4R" 9 | model_scope = "model_C4R" 10 | testing = True 11 | 12 | # Data 13 | data_module = "nn.data.data_train_hvd" 14 | anatomies = [] 15 | biopsy_only = False 16 | mask_only = True 17 | water_mask = True 18 | series_kprob = [1.0, 1.0, 1.0, 0] 19 | series_val = [1, 1, 1, 0] 20 | radiomics = True 21 | clsmatcoeff = (-1.0, 1.0) 22 | train_valid_seed = 1111 23 | #validation_list = "../valid.dat" 24 | 25 | # Continuation params 26 | continuation = False 27 | restore = False 28 | restore_seg = True 29 | restore_cls = True 30 | save = True 31 | 32 | # Model training parameters 33 | batchsize = 3 34 | keep_prob = 1.0 35 | keep_prob_fcn_D1 = 0.7 36 | keep_prob_fcn_D2 = 0.7 37 | 38 | # Training parameters 39 | optimizer = "adam" 40 | max_epochs = 256 41 | train_seg = True 42 | train_cls = True 43 | big_cls = True 44 | small_cls = True 45 | min_learning_rate = 0.00005 46 | max_learning_rate = 0.0005 47 | learning_rate_decay_step = 25000 48 | learning_rate_decay_rate = 0.995 49 | learning_rate_epochsize = 8 50 | learning_range_decay = True 51 | learning_range_decay_rate = 0.8 52 | seg_loss_coefficient = 4.0 53 | cls_loss_coefficient = 1.0 54 | l2_regularizer = True 55 | l2_weight_decay = 0.0002 56 | distributed_batchnorm = True 57 | batch_renorm = False 58 | renorm_rmax = (1.0, 3.0, 10000, 100000) 59 | renorm_dmax = (0.0, 5.0, 10000, 60000) 60 | 61 | # Reporting parameters 62 | report_every_nsteps = 500 63 | save_every_nsteps = 1000 64 | validate_every_nepoch = 2 65 | save_out_every_nepoch = 10 66 | sel_threshold = (0, 1.0) 67 | out_res_frac = 0.1 68 | 69 | # I/O paths 70 | save_path = "../ckpt/ckpt_model_C4R_tg/tfgraph.ckpt" 71 | restore_path = "../ckpt/ckpt_model_C4R_tg/tfgraph.ckpt" 72 | log_path = "../exclude/ckpt_model_C4R_tg/tflog" 73 | out_res_path = "../exclude/ckpt_model_C4R_tg/validres.pckl" 74 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/model_C5/train_model_C5.ini: -------------------------------------------------------------------------------- 1 | # Config file for training. All params must be in the [train] section 2 | # Model C5 3 | # - No radiomics 4 | # - BG/water/tumor segmentation mask 5 | # - Small & big classes 6 | [train] 7 | # Model 8 | model_module = "nn.model.model_C5" 9 | model_scope = "model_C5" 10 | testing = True 11 | 12 | # Data 13 | data_module = "nn.data.data_train_hvd" 14 | anatomies = [] 15 | biopsy_only = False 16 | mask_only = True 17 | water_mask = True 18 | series_kprob = [1.0, 1.0, 1.0, 0] 19 | series_val = [1, 1, 1, 0] 20 | radiomics = False 21 | clsmatcoeff = (-0.5, 1.0) 22 | train_valid_seed = 1111 23 | #validation_list = "../valid.dat" 24 | 25 | # Continuation params 26 | continuation = False 27 | restore = False 28 | restore_seg = True 29 | restore_cls = True 30 | save = True 31 | 32 | # Model training parameters 33 | batchsize = 4 34 | keep_prob = 1.0 35 | keep_prob_fcn_D1 = 0.7 36 | keep_prob_fcn_D2 = 0.7 37 | 38 | # Training parameters 39 | optimizer = "adam" 40 | max_epochs = 256 41 | train_seg = True 42 | train_cseg = True 43 | train_cls = True 44 | big_cls = True 45 | small_cls = True 46 | min_learning_rate = 0.00005 47 | max_learning_rate = 0.0003 48 | learning_rate_decay_step = 25000 49 | learning_rate_decay_rate = 0.995 50 | learning_rate_epochsize = 8 51 | learning_range_decay = True 52 | learning_range_decay_rate = 0.8 53 | seg_loss_coefficient = 4.0 54 | cls_loss_coefficient = 1.0 55 | l2_regularizer = False 56 | l2_weight_decay = 0.0002 57 | distributed_batchnorm = True 58 | batch_renorm = False 59 | renorm_rmax = (1.0, 3.0, 10000, 100000) 60 | renorm_dmax = (0.0, 5.0, 10000, 60000) 61 | 62 | # Reporting parameters 63 | report_every_nsteps = 500 64 | save_every_nsteps = 1000 65 | validate_every_nepoch = 2 66 | save_out_every_nepoch = 10 67 | sel_threshold = (0, 1.0) 68 | out_res_frac = 0.1 69 | 70 | # I/O paths 71 | save_path = "../ckpt/ckpt_model_C5_tg/tfgraph.ckpt" 72 | restore_path = "../ckpt/ckpt_model_C5_tg/tfgraph.ckpt" 73 | log_path = "../exclude/train_model_C5_tg/tflog" 74 | out_res_path = "../exclude/train_model_C5_tg/validres.pckl" 75 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/model_C5XS/train_model_C5XS.ini: -------------------------------------------------------------------------------- 1 | # Config file for training. All params must be in the [train] section 2 | # Model C5XS 3 | # - No radiomics 4 | # - BG/water/tumor segmentation mask 5 | # - Small & big classes 6 | [train] 7 | # Model 8 | model_module = "nn.model.model_C5XS" 9 | model_scope = "model_C5XS" 10 | testing = True 11 | 12 | # Data 13 | data_module = "nn.data.data_train_hvd" 14 | anatomies = [] 15 | biopsy_only = False 16 | mask_only = True 17 | water_mask = True 18 | series_kprob = [1.0, 1.0, 1.0, 0] 19 | series_val = [1, 1, 1, 0] 20 | radiomics = False 21 | clsmatcoeff = (-0.5, 1.0) 22 | train_valid_seed = 1111 23 | #validation_list = "../valid.dat" 24 | 25 | # Continuation params 26 | continuation = False 27 | restore = False 28 | restore_seg = True 29 | restore_cls = True 30 | save = True 31 | 32 | # Model training parameters 33 | batchsize = 5 34 | keep_prob = 1.0 35 | keep_prob_fcn_D1 = 0.7 36 | keep_prob_fcn_D2 = 0.7 37 | 38 | # Training parameters 39 | optimizer = "adam" 40 | max_epochs = 128 41 | train_seg = True 42 | train_cls = True 43 | big_cls = True 44 | small_cls = True 45 | min_learning_rate = 0.00005 46 | max_learning_rate = 0.0003 47 | learning_rate_decay_step = 25000 48 | learning_rate_decay_rate = 0.995 49 | learning_rate_epochsize = 8 50 | learning_range_decay = True 51 | learning_range_decay_rate = 0.8 52 | seg_loss_coefficient = 4.0 53 | cls_loss_coefficient = 1.0 54 | l2_regularizer = True 55 | l2_weight_decay = 0.0002 56 | distributed_batchnorm = True 57 | batch_renorm = False 58 | renorm_rmax = (1.0, 3.0, 10000, 100000) 59 | renorm_dmax = (0.0, 5.0, 10000, 60000) 60 | 61 | # Reporting parameters 62 | report_every_nsteps = 500 63 | save_every_nsteps = 1000 64 | validate_every_nepoch = 2 65 | save_out_every_nepoch = 10 66 | sel_threshold = (0, 1.0) 67 | out_res_frac = 0.1 68 | 69 | # I/O paths 70 | save_path = "../ckpt/ckpt_model_C5XS_tg/tfgraph.ckpt" 71 | restore_path = "../ckpt/ckpt_model_C5XS_tg/tfgraph.ckpt" 72 | log_path = "../exclude/train_model_C5XS_tg/tflog" 73 | out_res_path = "../exclude/train_model_C5XS_tg/validres.pckl" 74 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/model_CR/train_model_CR.ini: -------------------------------------------------------------------------------- 1 | # Config file for training. All params must be in the [train] section 2 | # Model CR 3 | # - Radiomics 4 | # - BG/tumor segmentation only (water mask combined with tumor) 5 | # - Small classes only 6 | [train] 7 | # Model 8 | model_module = "nn.model.model_CR" 9 | model_scope = "model_CR" 10 | testing = True 11 | 12 | # Data 13 | data_module = "nn.data.data_train_hvd" 14 | anatomies = [] 15 | biopsy_only = False 16 | mask_only = True 17 | water_mask = False 18 | series_kprob = [1.0, 1.0, 1.0, 0] 19 | series_val = [1, 1, 1, 0] 20 | radiomics = True 21 | clsmatcoeff = (0.0, 1.0) 22 | train_valid_seed = 1111 23 | #validation_list = "../valid.dat" 24 | 25 | # Continuation params 26 | continuation = False 27 | restore = False 28 | restore_seg = True 29 | restore_cls = True 30 | save = True 31 | 32 | # Model training parameters 33 | batchsize = 3 34 | keep_prob = 1.0 35 | keep_prob_fcn_D1 = 0.7 36 | keep_prob_fcn_D2 = 0.7 37 | 38 | # Training parameters 39 | optimizer = "adam" 40 | max_epochs = 256 41 | train_seg = True 42 | train_cls = True 43 | big_cls = False 44 | small_cls = True 45 | min_learning_rate = 0.00005 46 | max_learning_rate = 0.0003 47 | learning_rate_decay_step = 25000 48 | learning_rate_decay_rate = 0.995 49 | learning_rate_epochsize = 8 50 | learning_range_decay = True 51 | learning_range_decay_rate = 0.8 52 | seg_loss_coefficient = 4.0 53 | cls_loss_coefficient = 1.0 54 | l2_regularizer = True 55 | l2_weight_decay = 0.0002 56 | distributed_batchnorm = True 57 | batch_renorm = False 58 | renorm_rmax = (1.0, 3.0, 10000, 100000) 59 | renorm_dmax = (0.0, 5.0, 10000, 60000) 60 | 61 | # Reporting parameters 62 | report_every_nsteps = 500 63 | save_every_nsteps = 1000 64 | validate_every_nepoch = 2 65 | save_out_every_nepoch = 10 66 | sel_threshold = (0, 1.0) 67 | out_res_frac = 0.1 68 | 69 | # I/O paths 70 | save_path = "../ckpt/ckpt_model_CR_tg/tfgraph.ckpt" 71 | restore_path = "../ckpt/ckpt_model_CR_tg/tfgraph.ckpt" 72 | log_path = "../exclude/train_model_CR_tg/tflog" 73 | out_res_path = "../exclude/train_model_CR_tg/validres.pckl" 74 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/nn/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/aibraintumormodel/nn/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/nn/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/aibraintumormodel/nn/data/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/nn/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/aibraintumormodel/nn/model/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/nn/run/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/aibraintumormodel/nn/run/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/nn/run/radiomicsFeatures.py: -------------------------------------------------------------------------------- 1 | ''' 2 | #------------------------------------------------------------------------------ 3 | PyRadiomics functions 4 | Routines to generate radiomics LoG and Wavelet transforms from inputs 5 | Louis Lee 11-10-2018 6 | #------------------------------------------------------------------------------- 7 | API details: 8 | Version: ML_BRAIN_TUMOR_v2.0.0 9 | Internal identifier: Model5 10 | Script details: 11 | Version: v1.0.0 12 | Internal identifier: PyRadiomics.py 13 | #------------------------------------------------------------------------------- 14 | ''' 15 | # Python2 compatibility 16 | from __future__ import print_function 17 | 18 | import numpy as np 19 | import SimpleITK as sitk 20 | import radiomics.imageoperations 21 | 22 | # Fixed list of LoG and wavelets to generate 23 | #sigma_list = [1.0, 2.0, 3.0, 4.0, 5.0] 24 | #n_wavelets = 8 25 | sigma_list = [1.0, 3.0, 5.0] 26 | n_wavelets = 4 27 | 28 | def getNumFeatures(imgsize_in, nchannels=1): 29 | ''' 30 | Accessory function to return # feature maps 31 | Input: 3D image size + channels tuple (z,y,x,# channels) 32 | Output: 3D image size + channels tuple (z,y,x,# radiomics channels) 33 | ''' 34 | return imgsize_in[:-1] + (nchannels*(len(sigma_list) + n_wavelets),) 35 | 36 | def radiomicsLoGWavelet(np_img): 37 | ''' 38 | Function to calculate & return radiomics transformations 39 | Input: FP NumPy image of dimension (z,y,x) 40 | Output: Tuple of (LoG, Wavelet) FP NumPy image of dimension 41 | (z,y,x, # radiomics channels). E.g. LoG has dim (z,y,x,#LoG channels) 42 | ''' 43 | sitk_img = sitk.GetImageFromArray(np_img.astype(np.float32)) 44 | sitk_msk = sitk.GetImageFromArray(np.ones(np_img.shape, np.float32)) 45 | 46 | LoG = radiomics.imageoperations.getLoGImage( \ 47 | sitk_img ,sitk_msk, sigma=sigma_list) 48 | wavelet = radiomics.imageoperations.getWaveletImage(sitk_img, sitk_msk, \ 49 | force2D=True, force2Ddimension=0) 50 | 51 | LoG_np = [] 52 | for isample in range(len(sigma_list)): 53 | LoG_sitk,_,_ = next(LoG) 54 | LoG_img = sitk.GetArrayFromImage(LoG_sitk) 55 | LoG_np.append(np.expand_dims(LoG_img, axis=-1)) 56 | LoG_np = np.concatenate(LoG_np, axis=-1) 57 | 58 | wavelet_np = [] 59 | i = 0 60 | for isample in range(n_wavelets): 61 | wavelet_sitk,_,_ = next(wavelet) 62 | wavelet_img = sitk.GetArrayFromImage(wavelet_sitk) 63 | wavelet_np.append(np.expand_dims(wavelet_img, axis=-1)) 64 | wavelet_np = np.concatenate(wavelet_np, axis=-1) 65 | 66 | return LoG_np, wavelet_np 67 | 68 | def getFeatures(batch_imgchannels): 69 | ''' 70 | Function to transform batch of input images into batch of radiomics transforms 71 | Input: NumPy images of dimension (batchsize,z,y,x,# channels) 72 | Output: NumPy images of dimension (batchsize,z,y,x,# output channels) where 73 | # output channels = # radiomics channels per input channel x # input channels 74 | ''' 75 | batchsize = batch_imgchannels.shape[0] 76 | nchannels = batch_imgchannels.shape[-1] 77 | 78 | # Iterate over each sample in batch 79 | output = [] 80 | for ibatch in range(batchsize): 81 | imgs = batch_imgchannels[ibatch,:,:,:,:] 82 | # Iterate over each channel in sample 83 | features = [] 84 | for ichannel in range(nchannels): 85 | LoG, wavelet = radiomicsLoGWavelet(imgs[:,:,:,ichannel]) 86 | features.append(np.concatenate([LoG, wavelet], axis=-1)) 87 | # Stack all radiomics output from each channel to last dim 88 | output.append(np.concatenate(features, axis=-1)) 89 | # Stack all batch outputs to 1st dim 90 | output = np.stack(output, axis=0) 91 | 92 | return output 93 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/run_mpi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | module purge 3 | module load shared 4 | module load python/3.6.6 5 | module load ml-python3deps/1.0.0 6 | module load tensorflow/1.8.0-ref 7 | module load horovod/0.14.1 8 | 9 | python=python3 10 | 11 | devices=$1 12 | 13 | num=$(echo $1 | awk -F',' '{print NF}') 14 | script=$2 15 | args="${@:3}" 16 | 17 | echo "HOST : "$(hostname) 18 | echo "DEVICES : "$devices 19 | echo "NPROC : "$num 20 | CUDA_DEVICE_ORDER=PCI_BUS_ID CUDA_VISIBLE_DEVICES=$devices \ 21 | mpirun -np $num -H localhost:$num -bind-to none -map-by slot \ 22 | -mca pml ob1 -mca btl ^openib \ 23 | -mca orte_base_help_aggregate 0 \ 24 | -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \ 25 | $python $script $args 26 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/aibraintumormodel/run_nonmpi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | module purge 3 | module load shared 4 | module load python/3.6.6 5 | module load ml-python3deps/1.0.0 6 | module load tensorflow/1.8.0-ref 7 | module load horovod/0.14.1 8 | 9 | python=python3 10 | 11 | devices=$1 12 | 13 | num=$(echo $1 | awk -F',' '{print NF}') 14 | script=$2 15 | args="${@:3}" 16 | 17 | echo "HOST : "$(hostname) 18 | echo "DEVICES : "$devices 19 | echo "NPROC : "$num 20 | CUDA_DEVICE_ORDER=PCI_BUS_ID CUDA_VISIBLE_DEVICES=$devices $python $script $args 21 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/airnet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/airnet/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/airnet/train.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import tensorflow as tf 4 | import os 5 | from ...trainobject import train as mytrain 6 | from ...cost import mse 7 | 8 | 9 | def train(seq, X1_ph, X2_ph, y_ph, X1_train, X2_train, y_train): 10 | y_train_sb = seq.train_fprop(X1_ph, X2_ph) 11 | y_test_sb = seq.test_fprop(X1_ph, X2_ph) 12 | train_cost_sb = mse(y_ph, y_train_sb) 13 | test_accu_sb = mse(y_ph, y_test_sb) 14 | optimizer = tf.train.AdamOptimizer(0.001) 15 | with tf.Session() as sess: 16 | this_dir = os.path.dirname(os.path.realpath(__file__)) 17 | writer = tf.summary.FileWriter(this_dir + '/tensorboard', sess.graph) 18 | mytrain(session=sess, 19 | feed_dict={X1_ph:X1_train, X2_ph:X2_train, y_ph:y_train}, 20 | train_cost_sb=train_cost_sb, 21 | valid_cost_sb=test_accu_sb, 22 | optimizer=optimizer, 23 | epoch_look_back=5, max_epoch=1, 24 | percent_decrease=0, train_valid_ratio=[5,1], 25 | batchsize=1, randomize_split=False) 26 | writer.close() 27 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/attention_unet/README.md: -------------------------------------------------------------------------------- 1 | ## Attention Unet 2 | 3 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/attention_unet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/attention_unet/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/attention_unet/img/attention_unet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/attention_unet/img/attention_unet.jpg -------------------------------------------------------------------------------- /tensorgraph/models_zoo/attention_unet/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import os 4 | from ...trainobject import train as mytrain 5 | from ...cost import entropy, accuracy 6 | 7 | 8 | def train(seq, X_ph, y_ph, X_train, y_train): 9 | y_train_sb = seq.train_fprop(X_ph) 10 | y_test_sb = seq.test_fprop(X_ph) 11 | train_cost_sb = entropy(y_ph, y_train_sb) 12 | optimizer = tf.train.AdamOptimizer(0.0001) 13 | test_accu_sb = accuracy(y_ph, y_test_sb) 14 | with tf.Session() as sess: 15 | this_dir = os.path.dirname(os.path.realpath(__file__)) 16 | writer = tf.summary.FileWriter(this_dir + '/tensorboard', sess.graph) 17 | mytrain(session=sess, 18 | feed_dict={X_ph:X_train, y_ph:y_train}, 19 | train_cost_sb=train_cost_sb, 20 | valid_cost_sb=-test_accu_sb, 21 | optimizer=optimizer, 22 | epoch_look_back=5, max_epoch=1, 23 | percent_decrease=0, train_valid_ratio=[5,1], 24 | batchsize=1, randomize_split=False) 25 | writer.close() 26 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/densenet/README.md: -------------------------------------------------------------------------------- 1 | ## Level 1 2 | 3 | 4 | ## Level 2 5 | 6 | 7 | ## Level 3 8 | 9 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/densenet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/densenet/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/densenet/img/level1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/densenet/img/level1.png -------------------------------------------------------------------------------- /tensorgraph/models_zoo/densenet/img/level2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/densenet/img/level2.png -------------------------------------------------------------------------------- /tensorgraph/models_zoo/densenet/img/level3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/densenet/img/level3.png -------------------------------------------------------------------------------- /tensorgraph/models_zoo/densenet/model.py: -------------------------------------------------------------------------------- 1 | 2 | from ...node import StartNode, HiddenNode, EndNode 3 | from ...layers import BaseModel, DenseNet, MaxPooling, Flatten, Linear, Softmax 4 | 5 | 6 | class MyDenseNet(BaseModel): 7 | 8 | @BaseModel.init_name_scope 9 | def __init__(self, nclass): 10 | layers = [] 11 | layers.append(DenseNet(ndense=1, growth_rate=1, nlayer1blk=1)) 12 | layers.append(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID')) 13 | layers.append(Flatten()) 14 | layers.append(Linear(this_dim=nclass)) 15 | layers.append(Softmax()) 16 | self.startnode = StartNode(input_vars=[None]) 17 | out_hn = HiddenNode(prev=[self.startnode], layers=layers) 18 | self.endnode = EndNode(prev=[out_hn]) 19 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/densenet/train.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import tensorflow as tf 4 | import os 5 | from ...trainobject import train as mytrain 6 | from ...cost import entropy, accuracy 7 | 8 | 9 | def train(seq, X_ph, y_ph, X_train, y_train): 10 | y_train_sb = seq.train_fprop(X_ph) 11 | y_test_sb = seq.test_fprop(X_ph) 12 | train_cost_sb = entropy(y_ph, y_train_sb) 13 | optimizer = tf.train.AdamOptimizer(0.0001) 14 | test_accu_sb = accuracy(y_ph, y_test_sb) 15 | with tf.Session() as sess: 16 | this_dir = os.path.dirname(os.path.realpath(__file__)) 17 | writer = tf.summary.FileWriter(this_dir + '/tensorboard', sess.graph) 18 | mytrain(session=sess, 19 | feed_dict={X_ph:X_train, y_ph:y_train}, 20 | train_cost_sb=train_cost_sb, 21 | valid_cost_sb=-test_accu_sb, 22 | optimizer=optimizer, 23 | epoch_look_back=5, max_epoch=1, 24 | percent_decrease=0, train_valid_ratio=[5,1], 25 | batchsize=1, randomize_split=False) 26 | writer.close() 27 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/echocardiac/dilated_unet/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/echocardiac/dilated_unet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/echocardiac/dilated_unet/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/echocardiac/dilated_unet/dilated_unet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/echocardiac/dilated_unet/dilated_unet.png -------------------------------------------------------------------------------- /tensorgraph/models_zoo/echocardiac/dilated_unet/model.py: -------------------------------------------------------------------------------- 1 | from ....graph import Graph 2 | from ....node import StartNode, HiddenNode, EndNode 3 | from ....layers import MaxPooling, ELU, BatchNormalization, Concat, BaseModel, Sigmoid 4 | from ....layers.conv import Conv2D_Transpose, Atrous_Conv2D, Conv2D 5 | import numpy as np 6 | class Dilated_Unet(BaseModel): 7 | @BaseModel.init_name_scope 8 | def __init__(self, nclasses=1): 9 | def downsampling_block(in_hn, filters): 10 | blk = [] 11 | blk.append(Atrous_Conv2D(rate=1, num_filters=filters, kernel_size=(3,3), padding='SAME')) 12 | blk.append(BatchNormalization()) 13 | blk.append(ELU()) 14 | blk.append(Atrous_Conv2D(rate=2, num_filters=filters, kernel_size=(3,3), padding='SAME')) 15 | blk.append(BatchNormalization()) 16 | blk.append(ELU()) 17 | out_skip = HiddenNode(prev=[in_hn], layers=blk) 18 | out_hn = HiddenNode(prev=[out_skip], layers=[MaxPooling(stride=(2,2))]) 19 | return out_hn, out_skip 20 | 21 | def upsampling_bolock(in_hn, in_skip, filters): 22 | blk = [] 23 | blk.append(Conv2D(num_filters=filters, kernel_size=(3,3), stride=(1,1), padding='SAME')) 24 | blk.append(BatchNormalization()) 25 | blk.append(ELU()) 26 | blk.append(Conv2D(num_filters=filters, kernel_size=(3,3), stride=(1,1), padding='SAME')) 27 | blk.append(BatchNormalization()) 28 | blk.append(ELU()) 29 | out_hn = HiddenNode(prev=[in_hn, in_skip], input_merge_mode=Concat(axis=-1), layers=blk) 30 | return out_hn 31 | 32 | def dilation_block(in_hn, filters, dilation_rate): 33 | blk = [] 34 | blk.append(Atrous_Conv2D(rate=dilation_rate, num_filters=filters, kernel_size=(3,3), padding='SAME')) 35 | blk.append(BatchNormalization()) 36 | blk.append(ELU()) 37 | out_hn = HiddenNode(prev=[in_hn], layers=blk) 38 | return out_hn 39 | 40 | self.startnode = StartNode(input_vars=[None]) 41 | # encoding layers 42 | blk1_hn, blk1_skip = downsampling_block(self.startnode, 64) 43 | blk2_hn, blk2_skip = downsampling_block(blk1_hn, 128) 44 | blk3_hn, blk3_skip = downsampling_block(blk2_hn, 256) 45 | blk4_hn, blk4_skip = downsampling_block(blk3_hn, 512) 46 | # dilation layers 47 | dilation_rate = [1, 2, 4, 8, 16] 48 | dl1_hn = dilation_block(blk4_hn, 512, dilation_rate[0]) 49 | dl2_hn = dilation_block(dl1_hn, 512, dilation_rate[1]) 50 | dl3_hn = dilation_block(dl2_hn, 512, dilation_rate[2]) 51 | dl4_hn = dilation_block(dl3_hn, 512, dilation_rate[3]) 52 | dl5_hn = dilation_block(dl4_hn, 512, dilation_rate[4]) 53 | # decoding layers 54 | pre4 = HiddenNode(prev=[dl5_hn], layers=[Conv2D_Transpose(kernel_size=(3,3), num_filters=512, stride=(2,2), padding='SAME')]) 55 | deblk4_hn = upsampling_bolock(pre4, blk4_skip, 512) 56 | pre3 = HiddenNode(prev=[deblk4_hn], layers=[Conv2D_Transpose(kernel_size=(3,3), num_filters=256, stride=(2,2), padding='SAME')]) 57 | deblk3_hn = upsampling_bolock(pre3, blk3_skip, 256) 58 | pre2 = HiddenNode(prev=[deblk3_hn], layers=[Conv2D_Transpose(kernel_size=(3,3), num_filters=128, stride=(2,2), padding='SAME')]) 59 | deblk2_hn = upsampling_bolock(pre2, blk2_skip, 128) 60 | pre1 = HiddenNode(prev=[deblk2_hn], layers=[Conv2D_Transpose(kernel_size=(3,3), num_filters=64, stride=(2,2), padding='SAME')]) 61 | deblk1_hn = upsampling_bolock(pre1, blk1_skip, 64) 62 | out_hn = HiddenNode(prev=[deblk1_hn], layers=[Conv2D(num_filters=nclasses, kernel_size=(1,1)), Sigmoid()]) 63 | self.endnode = EndNode(prev=[out_hn]) 64 | 65 | 66 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/echocardiac/dilated_unet/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import os 4 | from ....trainobject import train as mytrain 5 | from ....cost import entropy, accuracy 6 | 7 | 8 | def train(seq, X_ph, y_ph, X_train, y_train): 9 | y_train_sb = seq.train_fprop(X_ph) 10 | y_test_sb = seq.test_fprop(X_ph) 11 | train_cost_sb = entropy(y_ph, y_train_sb) 12 | optimizer = tf.train.AdamOptimizer(0.0001) 13 | test_accu_sb = accuracy(y_ph, y_test_sb) 14 | with tf.Session() as sess: 15 | this_dir = os.path.dirname(os.path.realpath(__file__)) 16 | writer = tf.summary.FileWriter(this_dir + '/tensorboard', sess.graph) 17 | mytrain(session=sess, 18 | feed_dict={X_ph:X_train, y_ph:y_train}, 19 | train_cost_sb=train_cost_sb, 20 | valid_cost_sb=-test_accu_sb, 21 | optimizer=optimizer, 22 | epoch_look_back=5, max_epoch=1, 23 | percent_decrease=0, train_valid_ratio=[5,1], 24 | batchsize=1, randomize_split=False) 25 | writer.close() -------------------------------------------------------------------------------- /tensorgraph/models_zoo/hed_modified/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/hed_modified/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/hed_modified/img/level1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/hed_modified/img/level1.png -------------------------------------------------------------------------------- /tensorgraph/models_zoo/hed_modified/img/level2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/hed_modified/img/level2.png -------------------------------------------------------------------------------- /tensorgraph/models_zoo/hed_modified/train.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | import numpy as np 4 | from ...utils import split_arr 5 | from ...data_iterator import SequentialIterator 6 | from ...cost import mean_dice, inv_dice 7 | 8 | 9 | 10 | D, H, W = 16, 64, 64 11 | X_train = np.random.rand(6, D, H, W, 1) 12 | y_train = np.random.rand(6, D, H, W, 1) 13 | 14 | 15 | def train(seq, X_ph, y_ph, X_train, y_train): 16 | y_train_sb = seq.train_fprop(X_ph) 17 | y_test_sb = seq.test_fprop(X_ph) 18 | 19 | train_dice_tf = mean_dice(y_ph, y_train_sb) 20 | valid_dice_tf = mean_dice(y_ph, y_test_sb) 21 | train_invLoss_tf = inv_dice(y_ph, y_train_sb) 22 | valid_invLoss_tf = inv_dice(y_ph, y_test_sb) 23 | 24 | train_cost_tf = 1.0 * train_dice_tf + 0.0 * train_invLoss_tf 25 | valid_cost_tf = 1.0 * valid_dice_tf + 0.0 * valid_invLoss_tf 26 | reg_loss_tf = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) 27 | train_cost_reg_tf = tf.add_n([train_cost_tf] + reg_loss_tf) 28 | valid_cost_reg_tf = tf.add_n([valid_cost_tf] + reg_loss_tf) 29 | 30 | total_epochs = 1 31 | print_period = 1 32 | batchsize = 1 33 | lr = 1e-3 34 | decay_steps = 380 35 | t_mul = 1.027 36 | m_mul = 0.987 37 | min_ratio = 0.081 38 | 39 | global_step_tf = tf.Variable(0, trainable=False) 40 | decayed_lr_tf = tf.train.cosine_decay_restarts(lr, 41 | global_step_tf, 42 | decay_steps, 43 | t_mul, m_mul, min_ratio) 44 | optimizer = tf.train.AdamOptimizer(learning_rate=decayed_lr_tf, 45 | epsilon=10**-6) 46 | with tf.variable_scope('AdamOptimizer'): 47 | extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) 48 | with tf.control_dependencies(extra_update_ops): 49 | train_op = optimizer.minimize(train_cost_reg_tf, 50 | global_step_tf) 51 | 52 | train_arrs = [] 53 | valid_arrs = [] 54 | phs = [] 55 | 56 | feed_dict={X_ph:X_train, y_ph:y_train} 57 | 58 | for ph, arr in feed_dict.items(): 59 | train_arr, valid_arr = split_arr(arr, [5,1], randomize=False) 60 | phs.append(ph) 61 | train_arrs.append(train_arr) 62 | valid_arrs.append(valid_arr) 63 | 64 | iter_train = SequentialIterator(*train_arrs, batchsize=batchsize) 65 | iter_valid = SequentialIterator(*valid_arrs, batchsize=batchsize) 66 | 67 | 68 | config = tf.ConfigProto() 69 | config.gpu_options.allow_growth = True 70 | sess = tf.Session(config=config) 71 | init = tf.global_variables_initializer() 72 | sess.run(init) 73 | 74 | Holder_trainloss = [0] * total_epochs 75 | Holder_validloss = [0] * total_epochs 76 | 77 | for i in range(total_epochs): 78 | j = 0 79 | for batches in iter_train: 80 | j += 1 81 | fd = dict(zip(phs, batches)) 82 | _, loss, invloss = sess.run([train_op, train_cost_reg_tf, train_invLoss_tf], 83 | feed_dict=fd) 84 | Holder_trainloss[i] += loss 85 | 86 | if j % print_period == 0: 87 | print("Epoch i: %d, j: %d, Training loss: %.3f" % (i, j, loss)) 88 | 89 | Holder_trainloss[i] /= j 90 | print("Completed training all batches in epoch %d. Performing validation..." % i) 91 | 92 | k = 0 93 | for batches in iter_valid: 94 | k += 1 95 | fd = dict(zip(phs, batches)) 96 | valid_loss, valid_invloss = sess.run([valid_cost_reg_tf, valid_invLoss_tf], 97 | feed_dict=fd) 98 | Holder_validloss[i] += valid_loss 99 | print("Validation loss is %.3f" % (valid_loss)) 100 | 101 | Holder_validloss[i] /= k 102 | 103 | print("Avg train loss for epoch %d: %.3f" % (i, Holder_trainloss[i])) 104 | print("Avg valid loss for epoch %d: %.3f" % (i, Holder_validloss[i])) 105 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/heteronet/README.md: -------------------------------------------------------------------------------- 1 | ## Level 1 - HeteroNet 2 | 3 | 4 | ## Level 2 5 | HeteroNet contains single_encoder and merged_encoder.
6 | Single_encoder served as image_feature_extraction from individual MRI series.
7 | It is a single encoder that process all input modalitlies (T1,T2,T1+C) in cropped ROI region of 4x320x320.
8 | Merged_encoder merges all features from all inputs for further classification.
9 | 10 | 11 | ## Level 3 - Single Encoder 12 | 13 | 14 | ## Level 3 = Merged Encoder 15 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/heteronet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/heteronet/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/heteronet/img/level1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/heteronet/img/level1.png -------------------------------------------------------------------------------- /tensorgraph/models_zoo/heteronet/img/level2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/heteronet/img/level2.png -------------------------------------------------------------------------------- /tensorgraph/models_zoo/heteronet/img/level3_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/heteronet/img/level3_1.png -------------------------------------------------------------------------------- /tensorgraph/models_zoo/heteronet/img/level3_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/heteronet/img/level3_2.png -------------------------------------------------------------------------------- /tensorgraph/models_zoo/heteronet/layers.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from ...layers import BaseLayer 3 | 4 | 5 | class Conv3Dx(BaseLayer): 6 | # refer to tensorgraph.layers.conv.Conv3D 7 | @BaseLayer.init_name_scope 8 | def __init__(self, num_filters=None, kernel_size=(3,3,3), stride=(1,1,1), 9 | filters=None, b=None, padding='VALID', initializer='xavier',stddev=0.1): 10 | """ 11 | 3d convolution with customized initialization 12 | initializer options: 'xavier', 'normal' 13 | """ 14 | self.num_filters = num_filters 15 | self.kernel_size = kernel_size 16 | self.stride = stride 17 | self.padding = padding 18 | self.stddev = stddev 19 | self.filter = filters 20 | self.b = b 21 | self.initializer = initializer 22 | 23 | @BaseLayer.init_name_scope 24 | def __init_var__(self, state_below): 25 | b,d,h,w,c = state_below.shape 26 | c = int(c) 27 | self.filter_shape = self.kernel_size + (c, self.num_filters) 28 | if self.initializer == 'xavier': 29 | xavier = tf.contrib.layers.xavier_initializer() 30 | if self.filter is None: 31 | self.filter = tf.Variable(xavier(self.filter_shape), name=self.__class__.__name__ + '_filter') 32 | if self.b is None: 33 | self.b = tf.Variable(xavier([self.num_filters]),name=self.__class__.__name__ + '_b') 34 | elif self.initializer == 'normal': 35 | if self.filter is None: 36 | self.filter = tf.Variable(tf.random_normal(self.filter_shape, stddev=self.stddev), 37 | name=self.__class__.__name__ + '_filter') 38 | if self.b is None: 39 | self.b = tf.Variable(tf.zeros([self.num_filters]), name=self.__class__.__name__ + '_b') 40 | else: 41 | raise Exception('current initializer supports only xavier, and normal') 42 | 43 | def _train_fprop(self, state_below): 44 | ''' 45 | Args: 46 | state_below: (b, d, h, w, c) 47 | ''' 48 | conv_out = tf.nn.conv3d(state_below, self.filter, strides=(1,)+tuple(self.stride)+(1,), 49 | padding=self.padding) 50 | return tf.nn.bias_add(conv_out, self.b) 51 | 52 | @property 53 | def _variables(self): 54 | return [self.filter, self.b] 55 | 56 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/heteronet/model.py: -------------------------------------------------------------------------------- 1 | from ...node import StartNode, HiddenNode, EndNode 2 | from ...layers import BaseModel, Softmax, Graph, BatchNormalization 3 | from ...layers import MaxPooling3D, RELU, Sum, Concat, Reshape 4 | from .layers import Conv3Dx 5 | 6 | import tensorflow as tf 7 | 8 | class Conv3DBlock(BaseModel): 9 | @BaseModel.init_name_scope 10 | def __init__(self, filters=32, kernel=(3,3,3)): 11 | self.startnode = StartNode([None]) 12 | encode = [] 13 | encode.append(Conv3Dx(num_filters=filters, kernel_size=kernel, stride=(1,1,1), padding='SAME')) 14 | encode.append(RELU()) 15 | encode.append(Conv3Dx(num_filters=filters, kernel_size=kernel, stride=(1,1,1), padding='SAME')) 16 | encode.append(BatchNormalization()) 17 | encode.append(RELU()) 18 | out_hn = HiddenNode(prev=[self.startnode], input_merge_mode=Sum(), layers=encode) 19 | self.endnode = EndNode(prev=[out_hn]) 20 | 21 | class SingleEncoder(BaseModel): 22 | @BaseModel.init_name_scope 23 | def __init__(self): 24 | self.startnode = StartNode([None]) 25 | encode = [] 26 | encode.append(Conv3DBlock(filters=32, kernel=(1,5,5))) 27 | encode.append(MaxPooling3D(poolsize=(1,2,2), stride=(1,2,2), padding='SAME')) 28 | encode.append(Conv3DBlock(filters=64, kernel=(1,5,5))) 29 | out_hn = HiddenNode(prev=[self.startnode], input_merge_mode=Sum(), layers=encode) 30 | self.endnode = EndNode(prev=[out_hn]) 31 | 32 | class MergeEncoder(BaseModel): 33 | @BaseModel.init_name_scope 34 | def __init__(self): 35 | self.startnode = StartNode([None]) 36 | encode2 = [] 37 | # squeezing concat_conv_filters 38 | encode2.append(Conv3Dx(num_filters=64, kernel_size=(1,1,1), stride=(1,1,1), padding='SAME')) 39 | encode2.append(MaxPooling3D(poolsize=(1,2,2), stride=(1,2,2), padding='SAME')) 40 | 41 | encode2.append(Conv3DBlock(filters=96, kernel=(2,3,3))) 42 | encode2.append(MaxPooling3D(poolsize=(1,2,2), stride=(1,2,2), padding='SAME')) 43 | encode2.append(Conv3DBlock(filters=128, kernel=(3,3,3))) 44 | encode2.append(MaxPooling3D(poolsize=(1,2,2), stride=(1,2,2), padding='SAME')) 45 | 46 | # fully_connected_layers, current_shape is (5, 6, 20, 20, 128) 47 | encode2.append(Conv3Dx(num_filters=500,kernel_size=(1,20,20), stride=(1,20,20), padding='SAME')) 48 | encode2.append(RELU()) 49 | encode2.append(Conv3Dx(num_filters=125,kernel_size=(1,1,1), stride=(1,1,1), padding='SAME')) 50 | encode2.append(RELU()) 51 | encode2.append(Conv3Dx(num_filters=75,kernel_size=(4,1,1), stride=(4,1,1), padding='SAME')) 52 | encode2.append(RELU()) 53 | encode2.append(Conv3Dx(num_filters=30,kernel_size=(1,1,1), stride=(1,1,1), padding='SAME')) 54 | encode2.append(Reshape([-1,30])) 55 | encode2.append(Softmax()) 56 | out_hn = HiddenNode(prev=[self.startnode], input_merge_mode=Sum(), layers=encode2) 57 | self.endnode = EndNode(prev=[out_hn]) 58 | 59 | 60 | class HeteroNet(BaseModel): 61 | @BaseModel.init_name_scope 62 | def __init__(self): 63 | self.startnode1 = StartNode([None]) 64 | self.startnode2 = StartNode([None]) 65 | self.startnode3 = StartNode([None]) 66 | layers1 = SingleEncoder() 67 | layers2 = MergeEncoder() 68 | t1_hn = HiddenNode(prev=[self.startnode1], input_merge_mode=Sum(), layers=[layers1]) 69 | t2_hn = HiddenNode(prev=[self.startnode2], input_merge_mode=Sum(), layers=[layers1]) 70 | tc_hn = HiddenNode(prev=[self.startnode3], input_merge_mode=Sum(), layers=[layers1]) 71 | out_hn = HiddenNode(prev=[t1_hn,t2_hn,tc_hn], input_merge_mode=Concat(-1), layers=[layers2]) 72 | self.endnode = EndNode(prev=[out_hn]) 73 | 74 | def _train_fprop(self, start1, start2, start3): 75 | self.startnode1.input_vars = [start1] 76 | self.startnode2.input_vars = [start2] 77 | self.startnode3.input_vars = [start3] 78 | graph = Graph(start=[self.startnode1,self.startnode2,self.startnode3], end=[self.endnode]) 79 | return graph.train_fprop() 80 | 81 | def _test_fprop(self, start1, start2, start3): 82 | self.startnode1.input_vars = [start1] 83 | self.startnode2.input_vars = [start2] 84 | self.startnode3.input_vars = [start3] 85 | graph = Graph(start=[self.startnode1,self.startnode2,self.startnode3], end=[self.endnode]) 86 | return graph.test_fprop() 87 | 88 | 89 | 90 | if __name__ == '__main__': 91 | import os 92 | tf.reset_default_graph() 93 | X_ph = tf.placeholder(tf.float32, [5,4,320,320,1]) 94 | NN = HeteroNet() 95 | out = NN.train_fprop(X_ph,X_ph,X_ph)[0] 96 | print( tf.global_variables() ) 97 | with tf.Session() as sess: 98 | this_dir = os.path.dirname(os.path.realpath(__file__)) 99 | writer = tf.summary.FileWriter(this_dir + '/tensorboard', sess.graph) 100 | sess.close() 101 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/heteronet/train.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from ...cost import entropy 3 | 4 | class HeteroTrain(): 5 | def __init__(self,model, t1_ph, t2_ph, tc_ph, y_ph): 6 | y_train_sb = model.train_fprop(t1_ph,t2_ph,tc_ph)[0] 7 | train_cost_sb = entropy(y_ph, y_train_sb) 8 | optimizer = tf.train.AdamOptimizer(0.0001) 9 | opt = optimizer.minimize(train_cost_sb) 10 | self.sess = tf.Session() 11 | print('initialize global_variables') 12 | self.sess.run(tf.global_variables_initializer()) 13 | self.model = model 14 | self.train_cost_sb = train_cost_sb 15 | self.optimizer = opt 16 | 17 | def train(self,feed_dict): 18 | cost, _ = self.sess.run([self.train_cost_sb,self.optimizer],feed_dict=feed_dict) 19 | print(cost) 20 | 21 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/image_search/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/image_search/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/image_search/model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Purpose: model image search model 3 | """ 4 | import tensorflow as tf 5 | import numpy as np 6 | import os 7 | 8 | from ...node import StartNode, HiddenNode, EndNode 9 | from ...layers import BaseModel, Conv3D, RELU, Flatten, Linear, MaxPooling3D, Concat, BatchNormalization, Dropout 10 | 11 | class Convbnrelu(BaseModel): 12 | @BaseModel.init_name_scope 13 | def __init__(self,nfilters,kernel_size,stride): 14 | """ 15 | define a model object. 16 | """ 17 | layers = [] 18 | layers.append(Conv3D(num_filters=nfilters, kernel_size=kernel_size, stride=stride, padding='SAME', stddev=0.1)) 19 | layers.append(BatchNormalization()) 20 | layers.append(RELU()) 21 | 22 | self.startnode = StartNode(input_vars=[None]) 23 | model_hn = HiddenNode(prev=[self.startnode], layers=layers) 24 | self.endnode = EndNode(prev=[model_hn]) 25 | 26 | class Inception(BaseModel): 27 | @BaseModel.init_name_scope 28 | def __init__(self, ks1, ks2, channel_1, channel_2_1x1, channel_2_3x3, channel_3_1x1, channel_3_5x5, poolsize_4_max, channel_4_1x1, stride_4_max): 29 | """ 30 | args: 31 | inp: input layer 32 | ks1: 3x3 conv strides 33 | ks2: 5x5 conv strides 34 | return: 35 | incept: output of inception layer 36 | """ 37 | #print('outputSize = ', channel_1+channel_2_3x3+channel_3_5x5+channel_4_1x1) 38 | 39 | self.startnode = StartNode(input_vars=[None]) 40 | def incept_layers(nfilters,kernel_size,stride): 41 | layers = [] 42 | layers.append(Conv3D(num_filters=nfilters, kernel_size=kernel_size, stride=stride, padding='SAME', stddev=0.1)) 43 | layers.append(BatchNormalization()) 44 | layers.append(RELU()) 45 | return layers 46 | 47 | incept = [] 48 | if channel_1>0: 49 | conv1 = HiddenNode(prev=[self.startnode], layers=incept_layers(channel_1, (1, 1, 1), (1, 1, 1))) 50 | # else: 51 | # conv1 = StartNode(input_vars=[None]) 52 | 53 | conv3a = HiddenNode(prev=[self.startnode], layers=incept_layers(channel_2_1x1, (1, 1, 1), (1, 1, 1))) 54 | conv3 = HiddenNode(prev=[conv3a], layers=incept_layers(channel_2_3x3, (3, 3, 3), ks1)) 55 | 56 | conv5a = HiddenNode(prev=[self.startnode], layers=incept_layers(channel_3_1x1, (1, 1, 1), (1, 1, 1))) 57 | conv5 = HiddenNode(prev=[conv5a], layers=incept_layers(channel_3_5x5, (5, 5, 5), ks2)) 58 | 59 | layer_pool=[] 60 | layer_pool.append(MaxPooling3D(poolsize_4_max, stride_4_max, 'SAME')) 61 | 62 | pool = HiddenNode(prev=[self.startnode], layers=layer_pool) 63 | pool_conv = HiddenNode(prev=[pool], layers=incept_layers(channel_4_1x1, (1, 1, 1), (1, 1, 1))) 64 | 65 | if channel_1>0: 66 | incept = EndNode(prev=[conv1,conv3,conv5,pool_conv], input_merge_mode=Concat(axis=-1)) 67 | else: 68 | incept = EndNode(prev=[conv3,conv5,pool_conv], input_merge_mode=Concat(axis=-1)) 69 | 70 | self.endnode = incept 71 | 72 | class Image_Search_Model(BaseModel): 73 | 74 | @BaseModel.init_name_scope 75 | def __init__(self, keep_probability=0.5, bottleneck_layer_size=128): 76 | """ Define an inference network based on inception modules 77 | args: 78 | keep_probability: probability of dropout 79 | bottleneck_layer_size: dimension of output embeddings 80 | """ 81 | net = [] 82 | net.append(Convbnrelu(64, (7, 7, 7), (1, 2, 2))) 83 | net.append(MaxPooling3D((3, 3, 3), (1, 2, 2), 'SAME')) 84 | net.append(Convbnrelu(64, (1, 1, 1), (1, 1, 1))) 85 | net.append(Convbnrelu(192, (3, 3, 3), (1, 1, 1))) 86 | net.append(MaxPooling3D((3, 3, 3), (1, 2, 2), 'SAME')) 87 | 88 | net.append(Inception((1,1,1), (1,1,1), 64, 96, 128, 16, 32, (3,3,3), 32, (1,1,1))) 89 | net.append(Dropout(keep_probability)) 90 | net.append(Inception((2,2,2), (2,2,2), 0, 128, 256, 32, 64, (3,3,3), 256, (2,2,2))) 91 | net.append(Dropout(keep_probability)) 92 | net.append(Inception((1,1,1), (1,1,1), 256, 96, 192, 32, 64, (3,3,3), 128, (1,1,1))) 93 | net.append(Dropout(keep_probability)) 94 | net.append(Inception((2,2,2), (2,2,2), 0, 80, 128, 32, 64, (3,3,3), 320, (2,2,2))) 95 | net.append(Dropout(keep_probability)) 96 | net.append(Inception((1,1,1), (1,1,1), 384, 192, 384, 48, 128, (3,3,3), 128, (1,1,1))) 97 | net.append(MaxPooling3D((5, 5, 5), (1, 1, 1), 'VALID')) 98 | #net.append(MaxPooling3D((1, 1, 1), (1, 1, 1), 'SAME')) 99 | 100 | net.append(Flatten()) 101 | net.append(Linear(this_dim=bottleneck_layer_size)) 102 | net.append(Dropout(keep_probability)) 103 | 104 | self.startnode = StartNode(input_vars=[None]) 105 | h = HiddenNode(prev=[self.startnode], layers=net) 106 | self.endnode = EndNode(prev=[h]) 107 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/image_search/train.py: -------------------------------------------------------------------------------- 1 | """ 2 | Purpose: Training image search model 3 | """ 4 | import numpy as np 5 | import tensorflow as tf 6 | import os 7 | from ...trainobject import train as mytrain 8 | from ...cost import entropy, accuracy 9 | #import triplet_or_hist_loss as th_loss 10 | from . import triplet_or_hist_loss as th_loss 11 | 12 | 13 | def train(seq, X_ph, y_ph, X_train, y_train): 14 | y_train_sb = seq.train_fprop(X_ph) 15 | y_test_sb = seq.test_fprop(X_ph) 16 | 17 | #Set the target for triplet loss 18 | target_list = list([1,2]) #suppose that the first two disease is the target. 19 | target = tf.constant(target_list, dtype = tf.int32) 20 | target_size = len(target_list) 21 | 22 | loss_choose = 'triplet' 23 | if loss_choose == 'triplet': 24 | train_cost_sb, fraction_positive_triplets = th_loss.triplet_loss(y_ph, y_train_sb, alpha=0.3, 25 | target=target_list, labels_size=1, 26 | target_size=target_size, penalize_ratio=0.2) 27 | elif loss_choose == 'histogram': 28 | train_cost_sb = th_loss.histogram_loss(y_ph, y_train_sb, target=target_list, labels_size=1, 29 | target_size=target_size, penalize_ratio=0.2) 30 | 31 | optimizer = tf.train.AdamOptimizer(0.0001) 32 | test_accu_sb = accuracy(y_ph, y_test_sb) 33 | with tf.Session() as sess: 34 | this_dir = os.path.dirname(os.path.realpath(__file__)) 35 | writer = tf.summary.FileWriter(this_dir + '/tensorboard', sess.graph) 36 | mytrain(session=sess, 37 | feed_dict={X_ph:X_train, y_ph:y_train}, 38 | train_cost_sb=train_cost_sb, 39 | valid_cost_sb=train_cost_sb, 40 | optimizer=optimizer, 41 | epoch_look_back=5, max_epoch=1, 42 | percent_decrease=0, train_valid_ratio=[1,1], 43 | batchsize=4, randomize_split=False) 44 | writer.close() 45 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/unet/README.md: -------------------------------------------------------------------------------- 1 | # Model_zoo/Unet 2 | 3 | An U-Net implementation in TensorGraph 4 | 5 | ## Train 6 | 7 | You may refer training example in train.py 8 | 9 | 1. Prepared training data 10 | 2. Do necessary modification like filter size in model.py 11 | 3. Initialize model by import tensorgraph.models_zoo.Unet.model.Unet 12 | 4. Define cost and optimizer 13 | 5. Feed data to tensorgraph.trainobject.train for training 14 | 15 | ## Test 16 | 17 | You may refer testing example in test.py 18 | 19 | 1. Initialzie api class by import tensorgraph.models_zoo.Unet.unet_api 20 | 2. Define model_path to restore if there is any (same training data dims with testing) 21 | 3. Predict by feeding testing data 22 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/unet/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/models_zoo/unet/__init__.py -------------------------------------------------------------------------------- /tensorgraph/models_zoo/unet/model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from ...node import StartNode, HiddenNode, EndNode 3 | from ...layers import BaseModel, BatchNormalization, RELU ,MaxPooling, \ 4 | Conv2D, Conv2D_Transpose, Dropout, Concat 5 | 6 | class Convbnrelu(BaseModel): 7 | @BaseModel.init_name_scope 8 | def __init__(self,kernel_size,stride,nfilters,drop=True): 9 | """ 10 | define a model object. 11 | 12 | Args: 13 | kernel_size (tuple): Kernel size. 14 | stride (tuple): Stride size. 15 | nfilters (integer): The number of filters. 16 | drop (bool): whether dropout. 17 | 18 | """ 19 | layers = [] 20 | layers.append(Conv2D(num_filters=nfilters, kernel_size=kernel_size, stride=stride,padding='SAME')) 21 | layers.append(BatchNormalization()) 22 | layers.append(RELU()) 23 | if drop: 24 | layers.append(Dropout(0.02)) 25 | self.startnode = StartNode(input_vars=[None]) 26 | model_hn = HiddenNode(prev=[self.startnode], layers=layers) 27 | self.endnode = EndNode(prev=[model_hn]) 28 | 29 | 30 | class Unet(BaseModel): 31 | @BaseModel.init_name_scope 32 | def __init__(self, nclass): 33 | """ 34 | define a Unet model object. 35 | Paper: https://arxiv.org/abs/1505.04597 36 | 37 | Args: 38 | nclass (integer): The number of classes of the ouput mask. 39 | h (integer): The height of the input image. 40 | w (integer): The width of the input image. 41 | c (integer): The number of channels of the input image. 42 | """ 43 | ksize = (3,3) 44 | stride = (1,1) 45 | filters = [32,64,128,256] 46 | poolsize = (2,2) 47 | poolstride = (2,2) 48 | 49 | #down1 50 | blk1 = [] 51 | blk1.append(Convbnrelu(ksize,stride,filters[0])) 52 | blk1.append(Convbnrelu(ksize,stride,filters[0],False)) 53 | maxpool1 =[] 54 | maxpool1.append(MaxPooling(poolsize=poolsize, stride=poolstride, padding='SAME')) 55 | 56 | #down2 57 | blk2 = [] 58 | blk2.append(Convbnrelu(ksize,stride,filters[1])) 59 | blk2.append(Convbnrelu(ksize,stride,filters[1],False)) 60 | maxpool2 =[] 61 | maxpool2.append(MaxPooling(poolsize=poolsize, stride=poolstride, padding='SAME')) 62 | 63 | #down3 64 | blk3 = [] 65 | blk3.append(Convbnrelu(ksize,stride,filters[2])) 66 | blk3.append(Convbnrelu(ksize,stride,filters[2],False)) 67 | maxpool3 = [] 68 | maxpool3.append(MaxPooling(poolsize=poolsize, stride=poolstride, padding='SAME')) 69 | 70 | #down4 71 | blk4 = [] 72 | blk4.append(Convbnrelu(ksize,stride,filters[3])) 73 | blk4.append(Convbnrelu(ksize,stride,filters[3],False)) 74 | 75 | #up1 76 | transpose1 =[] 77 | transpose1.append(Conv2D_Transpose(filters[2], kernel_size=ksize, stride=poolstride, 78 | padding='SAME')) 79 | 80 | blk5 = [] 81 | blk5.append(Convbnrelu(ksize,stride,filters[2])) 82 | blk5.append(Convbnrelu(ksize,stride,filters[2],False)) 83 | 84 | #up2 85 | transpose2 =[] 86 | transpose2.append(Conv2D_Transpose(filters[1], kernel_size=ksize, stride=poolstride, 87 | padding='SAME')) 88 | blk6 = [] 89 | blk6.append(Convbnrelu(ksize,stride,filters[1])) 90 | blk6.append(Convbnrelu(ksize,stride,filters[1],False)) 91 | 92 | #up3 93 | transpose3 =[] 94 | transpose3.append(Conv2D_Transpose(filters[0], kernel_size=ksize, stride=poolstride, 95 | padding='SAME')) 96 | blk7 = [] 97 | blk7.append(Convbnrelu(ksize,stride,filters[0])) 98 | blk7.append(Convbnrelu(ksize,stride,filters[0],False)) 99 | blk7.append(Conv2D(num_filters=nclass,kernel_size=(1,1),stride=stride,padding='SAME')) 100 | 101 | self.startnode = StartNode(input_vars=[None]) 102 | blk1_hn = HiddenNode(prev=[self.startnode], layers=blk1) 103 | maxpool1_hn = HiddenNode(prev=[blk1_hn],layers=maxpool1) 104 | blk2_hn = HiddenNode(prev=[maxpool1_hn], layers=blk2) 105 | maxpool2_hn = HiddenNode(prev=[blk2_hn],layers=maxpool2) 106 | blk3_hn = HiddenNode(prev=[maxpool2_hn], layers=blk3) 107 | maxpool3_hn = HiddenNode(prev=[blk3_hn],layers=maxpool3) 108 | blk4_hn = HiddenNode(prev=[maxpool3_hn], layers=blk4) 109 | transpose1_hn = HiddenNode(prev=[blk4_hn], layers=transpose1) 110 | up1_hn = HiddenNode(prev=[transpose1_hn,blk3_hn],input_merge_mode=Concat(axis=-1),layers=blk5) 111 | transpose2_hn = HiddenNode(prev=[up1_hn], layers=transpose2) 112 | up2_hn = HiddenNode(prev=[transpose2_hn,blk2_hn],input_merge_mode=Concat(axis=-1),layers=blk6) 113 | transpose3_hn = HiddenNode(prev=[up2_hn], layers=transpose3) 114 | up3_hn = HiddenNode(prev=[transpose3_hn,blk1_hn],input_merge_mode=Concat(axis=-1),layers=blk7) 115 | self.endnode = EndNode(prev=[up3_hn]) 116 | -------------------------------------------------------------------------------- /tensorgraph/models_zoo/unet/train.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import os 4 | from ...trainobject import train as mytrain 5 | from ...cost import entropy, accuracy 6 | 7 | 8 | def train(seq, X_ph, y_ph, X_train, y_train): 9 | y_train_sb = seq.train_fprop(X_ph) 10 | y_test_sb = seq.test_fprop(X_ph) 11 | train_cost_sb = entropy(y_ph, y_train_sb) 12 | optimizer = tf.train.AdamOptimizer(0.0001) 13 | test_accu_sb = accuracy(y_ph, y_test_sb) 14 | with tf.Session() as sess: 15 | this_dir = os.path.dirname(os.path.realpath(__file__)) 16 | writer = tf.summary.FileWriter(this_dir + '/tensorboard', sess.graph) 17 | mytrain(session=sess, 18 | feed_dict={X_ph:X_train, y_ph:y_train}, 19 | train_cost_sb=train_cost_sb, 20 | valid_cost_sb=-test_accu_sb, 21 | optimizer=optimizer, 22 | epoch_look_back=5, max_epoch=1, 23 | percent_decrease=0, train_valid_ratio=[5,1], 24 | batchsize=1, randomize_split=False) 25 | writer.close() 26 | -------------------------------------------------------------------------------- /tensorgraph/node.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | class Sum(object): 4 | def train_fprop(self, state_list): 5 | return tf.add_n(state_list) 6 | 7 | def test_fprop(self, state_list): 8 | return self.train_fprop(state_list) 9 | 10 | 11 | class NoChangeState(object): 12 | 13 | @staticmethod 14 | def check_y(y): 15 | '''Check if the output list contains one element or a list, if contains 16 | only one element, return the element, if contains more than one element, 17 | returns the entire list. 18 | ''' 19 | if len(y) == 1: 20 | return y[0] 21 | elif len(y) > 1: 22 | return y 23 | else: 24 | raise Exception('{} is empty or not a list'.format(y)) 25 | 26 | def train_fprop(self, state_list): 27 | return NoChangeState.check_y(state_list) 28 | 29 | def test_fprop(self, state_list): 30 | return self.train_fprop(state_list) 31 | 32 | 33 | class StartNode(object): 34 | def __init__(self, input_vars): 35 | ''' 36 | StartNode defines the input to the graph 37 | 38 | Args: 39 | input_vars (list of tensors): the input tensors to the graph, which 40 | can be a placeholder or output of another graph or a tensor. 41 | ''' 42 | assert isinstance(input_vars, list) 43 | self.input_vars = input_vars 44 | 45 | 46 | class HiddenNode(object): 47 | def __init__(self, prev, input_merge_mode=Sum(), layers=[]): 48 | ''' 49 | HiddenNode encapsulates a list of layers, it can be connected to a StartNode 50 | or another HiddenNode 51 | 52 | Args: 53 | input_merge_mode(tensorgraph.layers.Merge): ``Merge`` Layer for merging 54 | the multiple inputs coming into this hidden node 55 | layers(list): the sequential layers within the node 56 | prev(list): list of previous nodes to link to 57 | ''' 58 | assert isinstance(prev, list) 59 | assert isinstance(layers, list) 60 | self.input_merge_mode = input_merge_mode 61 | self.prev = prev 62 | self.layers = layers 63 | self.input_vars = [] 64 | 65 | 66 | def train_fprop(self): 67 | if len(self.input_vars) == 0: 68 | return [] 69 | state = self.input_merge_mode.train_fprop(self.input_vars) 70 | for layer in self.layers: 71 | layer.__init_var__(state) 72 | state = layer.train_fprop(state) 73 | return [state] 74 | 75 | 76 | def test_fprop(self): 77 | if len(self.input_vars) == 0: 78 | return [] 79 | state = self.input_merge_mode.test_fprop(self.input_vars) 80 | for layer in self.layers: 81 | layer.__init_var__(state) 82 | state = layer.test_fprop(state) 83 | return [state] 84 | 85 | 86 | @property 87 | def variables(self): 88 | var = [] 89 | for layer in self.layers: 90 | var += layer._variables 91 | return var 92 | 93 | 94 | class EndNode(object): 95 | def __init__(self, prev, input_merge_mode=NoChangeState()): 96 | ''' 97 | EndNode is where we want to get the output from the graph. It can be 98 | connected to a HiddenNode or a StartNode. 99 | 100 | Args: 101 | input_merge_mode(tensorgraph.layers.Merge): ``Merge`` Layer for merging 102 | the multiple inputs coming into this hidden node 103 | prev(list): list of previous nodes to link to 104 | ''' 105 | assert isinstance(prev, list) 106 | self.input_merge_mode = input_merge_mode 107 | self.prev = prev 108 | self.input_vars = [] 109 | 110 | 111 | def train_fprop(self): 112 | return [self.input_merge_mode.train_fprop(self.input_vars)] 113 | 114 | def test_fprop(self): 115 | return [self.input_merge_mode.test_fprop(self.input_vars)] 116 | -------------------------------------------------------------------------------- /tensorgraph/preprocessing.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/tensorgraph/preprocessing.py -------------------------------------------------------------------------------- /tensorgraph/progbar.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import time 4 | import sys 5 | 6 | 7 | class ProgressBar(object): 8 | def __init__(self, target, width=30, verbose=1): 9 | ''' 10 | Args: 11 | target(int): total number of steps expected 12 | ''' 13 | self.width = width 14 | self.target = target 15 | self.sum_values = {} 16 | self.unique_values = [] 17 | self.start = time.time() 18 | self.total_width = 0 19 | self.seen_so_far = 0 20 | self.verbose = verbose 21 | 22 | def update(self, current, values=[]): 23 | ''' 24 | Args: 25 | current (int): index of current step 26 | values (list of tuples): (name, value_for_last_step). 27 | The progress bar will display averages for these values. 28 | ''' 29 | for k, v in values: 30 | if k not in self.sum_values: 31 | self.sum_values[k] = [v * (current-self.seen_so_far), current-self.seen_so_far] 32 | self.unique_values.append(k) 33 | else: 34 | self.sum_values[k][0] += v * (current-self.seen_so_far) 35 | self.sum_values[k][1] += (current-self.seen_so_far) 36 | self.seen_so_far = current 37 | 38 | now = time.time() 39 | if self.verbose == 1: 40 | prev_total_width = self.total_width 41 | sys.stdout.write("\b" * prev_total_width) 42 | sys.stdout.write("\r") 43 | 44 | numdigits = int(np.floor(np.log10(self.target))) + 1 45 | barstr = '%%%dd/%%%dd [' % (numdigits, numdigits) 46 | bar = barstr % (current, self.target) 47 | prog = float(current)/self.target 48 | if current > self.target: 49 | prog = 1 50 | prog_width = int(self.width*prog) 51 | if prog_width > 0: 52 | bar += ('='*(prog_width-1)) 53 | if current < self.target: 54 | bar += '>' 55 | bar += ('.'*(self.width-prog_width-1)) 56 | bar += ']' 57 | sys.stdout.write(bar) 58 | self.total_width = len(bar) 59 | 60 | if current: 61 | time_per_unit = (now - self.start) / current 62 | else: 63 | time_per_unit = 0 64 | eta = time_per_unit*(self.target - current) 65 | info = '' 66 | if current < self.target: 67 | info += ' - ETA: %ds' % eta 68 | else: 69 | info += ' - %ds' % (now - self.start) 70 | for k in self.unique_values: 71 | info += ' - %s: %.4f' % (k, self.sum_values[k][0]/ max(1, self.sum_values[k][1])) 72 | 73 | self.total_width += len(info) 74 | if prev_total_width > self.total_width: 75 | info += ((prev_total_width-self.total_width) * " ") 76 | 77 | sys.stdout.write(info) 78 | sys.stdout.flush() 79 | 80 | if self.verbose == 2: 81 | if current >= self.target: 82 | info = '%ds' % (now - self.start) 83 | for k in self.unique_values: 84 | info += ' - %s: %.4f' % (k, self.sum_values[k][0]/ max(1, self.sum_values[k][1])) 85 | sys.stdout.write(info + "\n") 86 | 87 | 88 | def add(self, n, values=[]): 89 | self.update(self.seen_so_far+n, values) 90 | -------------------------------------------------------------------------------- /tensorgraph/sequential.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class Sequential(object): 4 | 5 | def __init__(self): 6 | self.layers = [] 7 | 8 | def add(self, layer): 9 | self.layers.append(layer) 10 | 11 | def pop(self, index): 12 | return self.layers.pop(index) 13 | 14 | def train_fprop(self, input_state, layers=None): 15 | if layers is None: 16 | layers = range(len(self.layers)) 17 | for i in layers: 18 | self.layers[i].__init_var__(input_state) 19 | layer_output = self.layers[i].train_fprop(input_state) 20 | input_state = layer_output 21 | return input_state 22 | 23 | def test_fprop(self, input_state, layers=None): 24 | if layers is None: 25 | layers = range(len(self.layers)) 26 | for i in layers: 27 | self.layers[i].__init_var__(input_state) 28 | layer_output = self.layers[i].test_fprop(input_state) 29 | input_state = layer_output 30 | return input_state 31 | 32 | @property 33 | def variables(self): 34 | var = [] 35 | for layer in self.layers: 36 | var += layer._variables 37 | return list(set(var)) 38 | 39 | def total_num_parameters(self): 40 | count = 0 41 | for var in self.variables: 42 | count += int(np.prod(var.get_shape())) 43 | return count 44 | -------------------------------------------------------------------------------- /tensorgraph/stopper.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | 4 | class EarlyStopper(object): 5 | 6 | def __init__(self, max_epoch=100, epoch_look_back=None, percent_decrease=None): 7 | """ 8 | Use for doing early stopping during training 9 | 10 | Args: 11 | max_epoch (int): if the training reach the ``max_epoch``, it stops 12 | epoch_look_back (int): the number of epoch to look to check the percent 13 | decrease in validation loss. If the percent decrease is smaller than 14 | the desired value within the number of epoch look back then it stops 15 | percent_decrease (float): between ``0 to 1.0``, if within the ``epoch_look_back`` 16 | and the decrease in validation error is smaller than this percentage then it stops. 17 | 18 | """ 19 | 20 | self.max_epoch = max_epoch 21 | self.epoch_look_back = epoch_look_back 22 | self.percent_decrease = percent_decrease 23 | if self.percent_decrease is None: 24 | self.percent_decrease = 0 25 | 26 | self.best_valid_error = float(sys.maxsize) 27 | self.best_epoch_last_update = 0 28 | self.best_valid_last_update = float(sys.maxsize) 29 | self.epoch = 0 30 | 31 | 32 | def reset(self): 33 | self.best_valid_error = float(sys.maxsize) 34 | self.best_epoch_last_update = 0 35 | self.best_valid_last_update = float(sys.maxsize) 36 | self.epoch = 0 37 | 38 | 39 | def continue_learning(self, valid_error, epoch=None): 40 | ''' 41 | check if should continue learning, by default first epoch starts with 1. 42 | 43 | Args: 44 | valid_error (float): validation error to be keep track by early stopper, 45 | smaller is better 46 | epoch (int): the training epoch, if not specified, the stopper will auto 47 | keep track 48 | ''' 49 | if epoch: 50 | self.epoch = epoch 51 | else: 52 | self.epoch += 1 53 | if valid_error < self.best_valid_error: 54 | self.best_valid_error = valid_error 55 | if valid_error < self.best_valid_last_update: 56 | error_dcr = self.best_valid_last_update - valid_error 57 | else: 58 | error_dcr = 0 59 | 60 | # check if should continue learning based on the error decrease 61 | if self.epoch >= self.max_epoch: 62 | return False 63 | 64 | elif np.abs(float(error_dcr)/self.best_valid_last_update) > self.percent_decrease: 65 | self.best_valid_last_update = self.best_valid_error 66 | self.best_epoch_last_update = self.epoch 67 | return True 68 | 69 | elif self.epoch_look_back is None: 70 | return True 71 | 72 | elif self.epoch - self.best_epoch_last_update > self.epoch_look_back: 73 | return False 74 | 75 | else: 76 | return True 77 | -------------------------------------------------------------------------------- /tensorgraph/trainobject.py: -------------------------------------------------------------------------------- 1 | 2 | from .stopper import EarlyStopper 3 | from .progbar import ProgressBar 4 | from .utils import split_arr 5 | from .data_iterator import SequentialIterator 6 | from tensorflow.python.framework import ops 7 | import tensorflow as tf 8 | import logging 9 | logging.basicConfig(format='%(module)s.%(funcName)s %(lineno)d:%(message)s', level=logging.INFO) 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | def train(session, feed_dict, train_cost_sb, valid_cost_sb, optimizer, epoch_look_back=5, 14 | max_epoch=100, percent_decrease=0, train_valid_ratio=[5,1], batchsize=64, 15 | randomize_split=False): 16 | """ 17 | Example training object for training a dataset 18 | """ 19 | 20 | train_arrs = [] 21 | valid_arrs = [] 22 | phs = [] 23 | for ph, arr in feed_dict.items(): 24 | train_arr, valid_arr = split_arr(arr, train_valid_ratio, randomize=randomize_split) 25 | phs.append(ph) 26 | train_arrs.append(train_arr) 27 | valid_arrs.append(valid_arr) 28 | 29 | iter_train = SequentialIterator(*train_arrs, batchsize=batchsize) 30 | iter_valid = SequentialIterator(*valid_arrs, batchsize=batchsize) 31 | 32 | es = EarlyStopper(max_epoch, epoch_look_back, percent_decrease) 33 | 34 | # required for BatchNormalization layer 35 | update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS) 36 | with ops.control_dependencies(update_ops): 37 | train_op = optimizer.minimize(train_cost_sb) 38 | 39 | init = tf.global_variables_initializer() 40 | session.run(init) 41 | 42 | epoch = 0 43 | while True: 44 | epoch += 1 45 | ##############################[ Training ]############################## 46 | print('\n') 47 | logger.info('<<<<<[ epoch: {} ]>>>>>'.format(epoch)) 48 | logger.info('..training') 49 | pbar = ProgressBar(len(iter_train)) 50 | ttl_exp = 0 51 | mean_train_cost = 0 52 | for batches in iter_train: 53 | fd = dict(zip(phs, batches)) 54 | train_cost, _ = session.run([train_cost_sb, train_op], feed_dict=fd) 55 | mean_train_cost += train_cost * len(batches[0]) 56 | ttl_exp += len(batches[0]) 57 | pbar.update(ttl_exp) 58 | 59 | print('') 60 | mean_train_cost /= ttl_exp 61 | logger.info('..average train cost: {}'.format(mean_train_cost)) 62 | 63 | ##############################[ Validating ]############################ 64 | logger.info('..validating') 65 | pbar = ProgressBar(len(iter_valid)) 66 | ttl_exp = 0 67 | mean_valid_cost = 0 68 | for batches in iter_valid: 69 | fd = dict(zip(phs, batches)) 70 | valid_cost = session.run(valid_cost_sb, feed_dict=fd) 71 | mean_valid_cost += valid_cost * len(batches[0]) 72 | ttl_exp += len(batches[0]) 73 | pbar.update(ttl_exp) 74 | 75 | print('') 76 | mean_valid_cost /= ttl_exp 77 | logger.info('..average valid cost: {}'.format(mean_valid_cost)) 78 | 79 | if es.continue_learning(mean_valid_cost, epoch=epoch): 80 | logger.info('best epoch last update: {}'.format(es.best_epoch_last_update)) 81 | logger.info('best valid last update: {}'.format(es.best_valid_last_update)) 82 | else: 83 | logger.info('training done!') 84 | break 85 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/test/__init__.py -------------------------------------------------------------------------------- /test/cost_test.py: -------------------------------------------------------------------------------- 1 | import tensorgraph as tg 2 | import tensorflow as tf 3 | import numpy as np 4 | from tensorgraph.utils import make_one_hot 5 | from sklearn.metrics import f1_score 6 | 7 | def test_binary_f1(): 8 | ph1 = tf.placeholder('int32', [None, 2]) 9 | ph2 = tf.placeholder('int32', [None, 2]) 10 | 11 | f1_sb = tg.cost.binary_f1(ph1, ph2) 12 | with tf.Session() as sess: 13 | sess.run(tf.global_variables_initializer()) 14 | y1 = np.random.randint(0, 2, 100) 15 | y2 = np.random.randint(0 ,2, 100) 16 | sc1 = f1_score(y1, y2) 17 | 18 | y1_oh = make_one_hot(y1, 2) 19 | y2_oh = make_one_hot(y2, 2) 20 | sc2 = sess.run(f1_sb, feed_dict={ph1:y1_oh, ph2:y2_oh}) 21 | assert (sc1 - sc2)**2 < 1e-6 22 | # assert sc1 == sc2 23 | 24 | 25 | def test_image_f1(): 26 | ph1 = tf.placeholder('int32', [None, 3, 4, 5]) 27 | ph2 = tf.placeholder('int32', [None, 3, 4, 5]) 28 | 29 | f1_sb = tg.cost.image_f1(ph1, ph2) 30 | with tf.Session() as sess: 31 | sess.run(tf.global_variables_initializer()) 32 | y1 = np.random.random_integers(0, 1, [10, 3, 4, 5]) 33 | y2 = np.random.random_integers(0 ,1, [10, 3, 4, 5]) 34 | print(sess.run(f1_sb, feed_dict={ph1:y1, ph2:y2})) 35 | 36 | 37 | # if __name__ == '__main__': 38 | # test_image_f1() 39 | -------------------------------------------------------------------------------- /test/data_iterator_test.py: -------------------------------------------------------------------------------- 1 | import tensorgraph as tg 2 | import numpy as np 3 | import time 4 | 5 | 6 | def test_SimpleBlocks(): 7 | X = np.random.rand(100, 200) 8 | with open('X.npy', 'wb') as f: 9 | np.save(f, X) 10 | 11 | db = tg.SimpleBlocks(['X.npy']*10, batchsize=32, allow_preload=True) 12 | t1 = time.time() 13 | count = 1 14 | for blk in db: 15 | print(count) 16 | count += 1 17 | for batch in blk: 18 | print(time.sleep(0.1)) 19 | pass 20 | print('with preload time:', time.time() - t1) 21 | 22 | db = tg.SimpleBlocks(['X.npy']*10, batchsize=32, allow_preload=False) 23 | t1 = time.time() 24 | count = 1 25 | for blk in db: 26 | print(count) 27 | count += 1 28 | for batch in blk: 29 | print(time.sleep(0.1)) 30 | pass 31 | print('without preload time:', time.time() - t1) 32 | 33 | 34 | db = tg.SimpleBlocks([('X.npy', 'X.npy'), ('X.npy', 'X.npy')], batchsize=32, allow_preload=False) 35 | for blk in db: 36 | print(blk) 37 | for batch in blk: 38 | print('len batch:', len(batch)) 39 | print('batch1 size:', batch[0].shape) 40 | print('batch2 size:', batch[1].shape) 41 | 42 | 43 | def test_DataBlocks(): 44 | X = np.random.rand(100, 200) 45 | with open('X.npy', 'wb') as f: 46 | np.save(f, X) 47 | 48 | db = tg.DataBlocks(['X.npy']*10, batchsize=32, allow_preload=False) 49 | for train_blk, valid_blk in db: 50 | n_exp = 0 51 | pbar = tg.ProgressBar(len(train_blk)) 52 | for batch in train_blk: 53 | n_exp += len(batch[0]) 54 | time.sleep(0.05) 55 | pbar.update(n_exp) 56 | print() 57 | pbar = tg.ProgressBar(len(valid_blk)) 58 | n_exp = 0 59 | for batch in valid_blk: 60 | n_exp += len(batch[0]) 61 | time.sleep(0.05) 62 | pbar.update(n_exp) 63 | print() 64 | 65 | if __name__ == '__main__': 66 | test_DataBlocks() 67 | test_SimpleBlocks() 68 | -------------------------------------------------------------------------------- /test/layer_backbones_test.py: -------------------------------------------------------------------------------- 1 | import tensorgraph as tg 2 | from tensorgraph.layers.backbones import * 3 | from tensorgraph.layers import Softmax, Flatten, Linear, MaxPooling, BaseModel, Concat, Select, NoChange 4 | import tensorflow as tf 5 | import os 6 | from tensorgraph.trainobject import train as mytrain 7 | os.environ['CUDA_VISIBLE_DEVICES'] = '-1' 8 | 9 | 10 | X_train = np.random.rand(10, 32, 32, 1) 11 | y_train = np.random.rand(10, 1) 12 | _, h, w, c = X_train.shape 13 | _, nclass = y_train.shape 14 | X_ph = tf.placeholder('float32', [None, h, w, c]) 15 | y_ph = tf.placeholder('float32', [None, nclass]) 16 | 17 | 18 | def train(seq): 19 | y_train_sb = seq.train_fprop(X_ph) 20 | y_test_sb = seq.test_fprop(X_ph) 21 | train_cost_sb = tg.cost.entropy(y_ph, y_train_sb) 22 | optimizer = tf.train.AdamOptimizer(0.0001) 23 | test_accu_sb = tg.cost.accuracy(y_ph, y_test_sb) 24 | with tf.Session() as sess: 25 | this_dir = os.path.dirname(os.path.realpath(__file__)) 26 | writer = tf.summary.FileWriter(this_dir + '/tensorboard', sess.graph) 27 | mytrain(session=sess, 28 | feed_dict={X_ph:X_train, y_ph:y_train}, 29 | train_cost_sb=train_cost_sb, 30 | valid_cost_sb=-test_accu_sb, 31 | optimizer=optimizer, 32 | epoch_look_back=5, max_epoch=1, 33 | percent_decrease=0, train_valid_ratio=[5,1], 34 | batchsize=1, randomize_split=False) 35 | writer.close() 36 | 37 | 38 | def test_VGG16(): 39 | seq = tg.Sequential() 40 | seq.add(VGG16()) 41 | seq.add(Flatten()) 42 | seq.add(Linear(this_dim=nclass)) 43 | seq.add(Softmax()) 44 | train(seq) 45 | 46 | 47 | def test_VGG19(): 48 | seq = tg.Sequential() 49 | seq.add(VGG19()) 50 | seq.add(Flatten()) 51 | seq.add(Linear(this_dim=nclass)) 52 | seq.add(Softmax()) 53 | train(seq) 54 | 55 | 56 | def test_ResNetSmall(): 57 | seq = tg.Sequential() 58 | seq.add(ResNetSmall(config=[1,1])) 59 | seq.add(MaxPooling(poolsize=(1,1), stride=(1,1), padding='VALID')) 60 | seq.add(Flatten()) 61 | seq.add(Linear(this_dim=nclass)) 62 | seq.add(Softmax()) 63 | train(seq) 64 | 65 | 66 | def test_ResNetBase(): 67 | seq = tg.Sequential() 68 | seq.add(ResNetBase(config=[1,1,1,1])) 69 | seq.add(MaxPooling(poolsize=(1,1), stride=(1,1), padding='VALID')) 70 | seq.add(Flatten()) 71 | seq.add(Linear(this_dim=nclass)) 72 | seq.add(Softmax()) 73 | train(seq) 74 | 75 | 76 | def test_DenseNet(): 77 | seq = tg.Sequential() 78 | seq.add(DenseNet(ndense=1, growth_rate=1, nlayer1blk=1)) 79 | seq.add(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID')) 80 | seq.add(Flatten()) 81 | seq.add(Linear(this_dim=nclass)) 82 | seq.add(Softmax()) 83 | train(seq) 84 | 85 | 86 | def test_UNet(): 87 | seq = tg.Sequential() 88 | seq.add(UNet(input_shape=(h, w))) 89 | seq.add(MaxPooling(poolsize=(3,3), stride=(1,1), padding='VALID')) 90 | seq.add(Flatten()) 91 | seq.add(Linear(this_dim=nclass)) 92 | seq.add(Softmax()) 93 | train(seq) 94 | 95 | 96 | class XModel(BaseModel): 97 | 98 | @BaseModel.init_name_scope 99 | def __init__(self): 100 | 101 | self.startnode = tg.StartNode(input_vars=[None]) 102 | layers1 = [] 103 | layers1.append(Linear(5)) 104 | hn1 = tg.HiddenNode(prev=[self.startnode], input_merge_mode=Select(0), layers=layers1) 105 | 106 | layers2 = [] 107 | layers2.append(Linear(8)) 108 | hn2 = tg.HiddenNode(prev=[self.startnode], input_merge_mode=Select(1), layers=layers2) 109 | 110 | merge = tg.HiddenNode(prev=[hn1, hn2], input_merge_mode=Concat(axis=1)) 111 | 112 | layers1a = [] 113 | layers1a.append(Linear(20)) 114 | hn1a = tg.HiddenNode(prev=[merge], layers=layers1a) 115 | 116 | layers2a = [] 117 | layers2a.append(Linear(30)) 118 | hn2a = tg.HiddenNode(prev=[merge], layers=layers2a) 119 | self.endnode = tg.EndNode(prev=[hn1a, hn2a]) 120 | 121 | 122 | def test_BaseModel(): 123 | X1 = np.random.rand(10, 3).astype('float32') 124 | X2 = np.random.rand(10, 5).astype('float32') 125 | xmodel = XModel() 126 | y1, y2 = xmodel.train_fprop(X1, X2) 127 | assert y1.shape == (10, 20) 128 | assert y2.shape == (10, 30) 129 | 130 | 131 | if __name__ == '__main__': 132 | # print('runtime test') 133 | # test_VGG16() 134 | # print('..VGG16 running test done') 135 | # test_VGG19() 136 | # print('..VGG19 running test done') 137 | # test_ResNetSmall() 138 | # print('..ResNetSmall running test done') 139 | # test_ResNetBase() 140 | # print('..ResNetBase running test done') 141 | # test_DenseNet() 142 | # print('..DenseNet running test done') 143 | # test_UNet() 144 | # print('..UNet running test done') 145 | test_BaseModel() 146 | -------------------------------------------------------------------------------- /test/layer_conv_test.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | import tensorgraph as tg 4 | from tensorgraph.layers import Depthwise_Conv2D, Atrous_Conv2D, Conv2D, Conv3D 5 | import numpy as np 6 | 7 | def test_Depthwise_Conv2D(): 8 | 9 | seq = tg.Sequential() 10 | seq.add(Depthwise_Conv2D(num_filters=2, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) 11 | 12 | X_ph = tf.placeholder('float32', [None, 100, 100, 5]) 13 | 14 | y_sb = seq.train_fprop(X_ph) 15 | with tf.Session() as sess: 16 | init = tf.global_variables_initializer() 17 | sess.run(init) 18 | out = sess.run(y_sb, feed_dict={X_ph:np.random.rand(32,100,100,5)}) 19 | print(out.shape) 20 | 21 | 22 | def test_Conv2D(): 23 | 24 | seq = tg.Sequential() 25 | seq.add(Conv2D(num_filters=2, kernel_size=(3, 3), stride=(1, 1), padding='SAME')) 26 | 27 | X_ph = tf.placeholder('float32', [None, 100, 100, 5]) 28 | y_train_sb = seq.train_fprop(X_ph) 29 | y_test_sb = seq.test_fprop(X_ph) 30 | with tf.Session() as sess: 31 | init = tf.global_variables_initializer() 32 | sess.run(init) 33 | out = sess.run(y_train_sb, feed_dict={X_ph:np.random.rand(32,100,100,5)}) 34 | print(out.shape) 35 | 36 | 37 | def test_Atrous_Conv2D(): 38 | 39 | seq = tg.Sequential() 40 | seq.add(Atrous_Conv2D(num_filters=2, kernel_size=(3, 3), rate=3, padding='SAME')) 41 | 42 | h, w, c = 100, 300, 5 43 | X_ph = tf.placeholder('float32', [None, h, w, c]) 44 | 45 | y_sb = seq.train_fprop(X_ph) 46 | with tf.Session() as sess: 47 | init = tf.global_variables_initializer() 48 | sess.run(init) 49 | out = sess.run(y_sb, feed_dict={X_ph:np.random.rand(32, h, w, c)}) 50 | print(out.shape) 51 | assert out.shape[1] == h and out.shape[2] == w 52 | seq = tg.Sequential() 53 | r = 2 54 | k = 5 55 | seq.add(Atrous_Conv2D(num_filters=2, kernel_size=(k, k), rate=r, padding='VALID')) 56 | 57 | h, w, c = 100, 300, 5 58 | X_ph = tf.placeholder('float32', [None, h, w, c]) 59 | 60 | y_sb = seq.train_fprop(X_ph) 61 | with tf.Session() as sess: 62 | init = tf.global_variables_initializer() 63 | sess.run(init) 64 | out = sess.run(y_sb, feed_dict={X_ph:np.random.rand(32, h, w, c)}) 65 | print(out.shape) 66 | assert out.shape[1] == h - 2*int((k+(k-1)*(r-1))/2), out.shape[2] == w - 2*int((w+(w-1)*(r-1))/2) 67 | 68 | 69 | def test_Conv3D(): 70 | seq = tg.Sequential() 71 | seq.add(Conv3D(num_filters=2, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding='SAME')) 72 | X_ph = tf.placeholder('float32', [None, 10, 10, 10, 5]) 73 | y_train_sb = seq.train_fprop(X_ph) 74 | y_test_sb = seq.test_fprop(X_ph) 75 | with tf.Session() as sess: 76 | init = tf.global_variables_initializer() 77 | sess.run(init) 78 | out = sess.run(y_train_sb, feed_dict={X_ph:np.random.rand(32,10,10,10,5)}) 79 | print(out.shape) 80 | 81 | 82 | if __name__ == '__main__': 83 | test_Conv2D() 84 | test_Depthwise_Conv2D() 85 | test_Atrous_Conv2D() 86 | test_Conv3D() 87 | -------------------------------------------------------------------------------- /test/layer_linear_test.py: -------------------------------------------------------------------------------- 1 | 2 | from tensorgraph.layers import Linear, LinearMasked, SparseLinear 3 | import tensorgraph as tg 4 | import tensorflow as tf 5 | import numpy as np 6 | 7 | def test_linear(): 8 | seq = tg.Sequential() 9 | seq.add(Linear(this_dim=100)) 10 | seq.add(LinearMasked(this_dim=200, mask=np.zeros(200))) 11 | seq.add(Linear(this_dim=10)) 12 | 13 | 14 | X_ph = tf.placeholder('float32', [None, 100]) 15 | 16 | y_sb = seq.train_fprop(X_ph) 17 | with tf.Session() as sess: 18 | init = tf.global_variables_initializer() 19 | sess.run(init) 20 | out = sess.run(y_sb, feed_dict={X_ph:np.random.rand(32,100)}) 21 | print(out.shape) 22 | 23 | def test_SparseLinear(): 24 | seq = tg.Sequential() 25 | seq.add(SparseLinear(prev_dim=10, this_dim=300, batchsize=8)) 26 | seq.add(Linear(this_dim=10)) 27 | 28 | idx_ph = tf.placeholder('int32', [None, None]) 29 | val_ph = tf.placeholder('float32', [None]) 30 | y_sb = seq.train_fprop([idx_ph, val_ph]) 31 | with tf.Session() as sess: 32 | init = tf.global_variables_initializer() 33 | sess.run(init) 34 | out = sess.run(y_sb, feed_dict={idx_ph:[[0, 0], [1, 2]], val_ph:[5,6]}) 35 | print(out.shape) 36 | 37 | if __name__ == '__main__': 38 | test_linear() 39 | test_SparseLinear() 40 | -------------------------------------------------------------------------------- /test/layer_merge_test.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | import tensorgraph as tg 4 | from tensorgraph.layers import SequenceMask, MaskSoftmax, SelectedMaskSoftmax 5 | import numpy as np 6 | 7 | def test_SequenceMask(): 8 | X_ph = tf.placeholder('float32', [None, 5, 6, 7]) 9 | seq_ph = tf.placeholder('int32', [None]) 10 | 11 | X_sn = tg.StartNode(input_vars=[X_ph]) 12 | seq_sn = tg.StartNode(input_vars=[seq_ph]) 13 | 14 | merge_hn = tg.HiddenNode(prev=[X_sn, seq_sn], input_merge_mode=SequenceMask(maxlen=5)) 15 | 16 | out_en = tg.EndNode(prev=[merge_hn]) 17 | 18 | graph = tg.Graph(start=[X_sn, seq_sn], end=[out_en]) 19 | 20 | y_train_sb = graph.train_fprop() 21 | y_test_sb = graph.test_fprop() 22 | 23 | with tf.Session() as sess: 24 | init = tf.global_variables_initializer() 25 | sess.run(init) 26 | feed_dict = {X_ph:np.random.rand(3,5,6,7), seq_ph:[2,3,4]} 27 | y_train = sess.run(y_train_sb, feed_dict=feed_dict)[0] 28 | y_test = sess.run(y_test_sb, feed_dict=feed_dict)[0] 29 | assert y_train.sum() == y_test.sum() 30 | assert y_train[0, :2].sum() > 0 and y_train[0, 2:].sum() == 0 31 | assert y_train[1, :3].sum() > 0 and y_train[1, 3:].sum() == 0 32 | assert y_train[2, :4].sum() > 0 and y_train[2, 4:].sum() == 0 33 | print('test passed!') 34 | 35 | 36 | def test_MaskSoftmax(): 37 | X_ph = tf.placeholder('float32', [None, 20]) 38 | seq_ph = tf.placeholder('int32', [None]) 39 | 40 | X_sn = tg.StartNode(input_vars=[X_ph]) 41 | seq_sn = tg.StartNode(input_vars=[seq_ph]) 42 | 43 | merge_hn = tg.HiddenNode(prev=[X_sn, seq_sn], input_merge_mode=MaskSoftmax()) 44 | 45 | y_en = tg.EndNode(prev=[merge_hn]) 46 | 47 | graph = tg.Graph(start=[X_sn, seq_sn], end=[y_en]) 48 | y_sb, = graph.train_fprop() 49 | 50 | with tf.Session() as sess: 51 | sess.run(tf.global_variables_initializer()) 52 | feed_dict = {X_ph:np.random.rand(3, 20), 53 | seq_ph:[5, 8, 0]} 54 | out = sess.run(y_sb, feed_dict=feed_dict) 55 | assert (out[0][5:].sum() - 0)**2 < 1e-6 56 | assert (out[0][:5].sum() - 1)**2 < 1e-6 57 | assert (out[1][8:].sum() - 0)**2 < 1e-6 58 | assert (out[1][:8].sum() - 1)**2 < 1e-6 59 | assert (out[2].sum() - 0)**2 < 1e-6 60 | print('test passed!') 61 | 62 | 63 | def test_SelectedMaskSoftmax(): 64 | X_ph = tf.placeholder('float32', [None, 20]) 65 | mask_ph = tf.placeholder('float32', [20]) 66 | 67 | X_sn = tg.StartNode(input_vars=[X_ph]) 68 | mask_sn = tg.StartNode(input_vars=[mask_ph]) 69 | 70 | merge_hn = tg.HiddenNode(prev=[X_sn, mask_sn], input_merge_mode=SelectedMaskSoftmax()) 71 | 72 | y_en = tg.EndNode(prev=[merge_hn]) 73 | 74 | graph = tg.Graph(start=[X_sn, mask_sn], end=[y_en]) 75 | y_sb, = graph.train_fprop() 76 | 77 | with tf.Session() as sess: 78 | sess.run(tf.global_variables_initializer()) 79 | mask_arr = np.zeros(20) 80 | mask_arr[[2,3,4]] = 1 81 | # import pdb; pdb.set_trace() 82 | feed_dict = {X_ph:np.random.rand(3, 20), 83 | mask_ph:mask_arr} 84 | out = sess.run(y_sb, feed_dict=feed_dict) 85 | assert (out.sum(1) == 1).any() 86 | print(out) 87 | print('test passed!') 88 | 89 | 90 | 91 | if __name__ == '__main__': 92 | test_SequenceMask() 93 | test_MaskSoftmax() 94 | test_SelectedMaskSoftmax() 95 | -------------------------------------------------------------------------------- /test/layer_misc_test.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorgraph as tg 3 | import tensorflow as tf 4 | from tensorgraph.layers import OneHot 5 | import numpy as np 6 | 7 | def test_OneHot(): 8 | X1 = tf.placeholder('int32', [5, 6, 7]) 9 | X2 = tf.placeholder('int32', [5, 6, 7, 8]) 10 | seq = tg.Sequential() 11 | seq.add(OneHot(onehot_size=3)) 12 | 13 | y1 = seq.train_fprop(X1) 14 | y2 = seq.train_fprop(X2) 15 | 16 | with tf.Session() as sess: 17 | print(sess.run(y1, feed_dict={X1:np.random.random_integers(0, 2, [5,6,7])}).shape) 18 | print(sess.run(y2, feed_dict={X2:np.random.random_integers(0, 2, [5,6,7,8])}).shape) 19 | 20 | if __name__ == '__main__': 21 | 22 | test_OneHot() 23 | -------------------------------------------------------------------------------- /test/layer_noise_test.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | import tensorgraph as tg 4 | from tensorgraph.layers import Linear, Dropout 5 | import numpy as np 6 | 7 | def test_Dropout(): 8 | X_ph = tf.placeholder('float32', [None, 32]) 9 | seq = tg.Sequential() 10 | seq.add(Linear(20)) 11 | seq.add(Dropout(0.2, noise_shape=[-1, 20])) 12 | 13 | 14 | 15 | out = seq.train_fprop(X_ph) 16 | with tf.Session() as sess: 17 | sess.run(tf.global_variables_initializer()) 18 | out = sess.run(out, feed_dict={X_ph:np.random.rand(1, 32)}) 19 | print(out) 20 | print(out.shape) 21 | 22 | def test_dropout(): 23 | X_ph = tf.placeholder('float', [None, 5, 10]) 24 | out = tf.nn.dropout(X_ph, keep_prob=0.5, noise_shape=[tf.shape(X_ph)[0], 5, 1]) 25 | with tf.Session() as sess: 26 | sess.run(tf.global_variables_initializer()) 27 | out = sess.run(out, feed_dict={X_ph:np.random.rand(3, 5, 10)}) 28 | print(out) 29 | print(out.shape) 30 | 31 | 32 | if __name__ == '__main__': 33 | test_Dropout() 34 | # test_dropout() 35 | -------------------------------------------------------------------------------- /test/models_zoo/aibraintumormodel/model_C3_test.py: -------------------------------------------------------------------------------- 1 | # Python2 compatibility 2 | from __future__ import print_function 3 | 4 | import numpy as np 5 | import warnings 6 | import tensorflow as tf 7 | 8 | import sys 9 | try: 10 | from tensorgraph.models_zoo.aibraintumormodel.nn.model \ 11 | import model_C3 as model_module 12 | except: 13 | warnings.warn("WARNING: Unable to locate model_C3 in TensorGraph package. Attempting relative path import") 14 | model_path = "../../../tensorgraph/models_zoo/" 15 | sys.path.append(model_path) 16 | from aibraintumormodel.nn.model import model_C3 as model_module 17 | 18 | def test_model(): 19 | with tf.Graph().as_default(): 20 | image_size = (24, 128, 128, 1) 21 | batchsize = 4 22 | nclass0, nclass1 = (4,8) 23 | nseg = 2 24 | nclass_mat = np.zeros((nclass1, nclass0), dtype=np.int32) 25 | for i1 in range(nclass1): 26 | nclass_mat[i1, i1//2] = 1 27 | print("NCLASS_MAT:\n", nclass_mat) 28 | 29 | # Switches for training/testing 30 | training = tf.placeholder(tf.bool) # T/F for batchnorm 31 | kprob = tf.placeholder(tf.float32) # Dropout prob 32 | kprob_fcn_D1 = tf.placeholder(tf.float32) 33 | kprob_fcn_D2 = tf.placeholder(tf.float32) 34 | 35 | # Input placeholders 36 | t1_x = tf.placeholder(tf.float32, shape=(None,) + image_size, name='T1_ph') 37 | t2_x = tf.placeholder(tf.float32, shape=(None,) + image_size, name='T2_ph') 38 | tc_x = tf.placeholder(tf.float32, shape=(None,) + image_size, name='TC_ph') 39 | radiomics_x = None 40 | 41 | # Model declaration 42 | model = model_module.Model(nseg_class=nseg, \ 43 | nclass0=nclass0, nclass1=nclass1, nclass_mat=nclass_mat, \ 44 | kprob=kprob, kprob_fcn_D1=kprob_fcn_D1, kprob_fcn_D2=kprob_fcn_D1) 45 | 46 | # Train 47 | yseg_train, ycls0_train, ycls1_train = model.train_fprop( \ 48 | t1_x, t2_x, tc_x, radiomics_x) 49 | # Inference 50 | yseg_test, ycls0_test, ycls1_test = model.test_fprop( \ 51 | t1_x, t2_x, tc_x, radiomics_x) 52 | 53 | y_pseg, y0_pred, y1_pred = tf.cond(training, \ 54 | lambda: (yseg_train, ycls0_train, ycls1_train), \ 55 | lambda: (yseg_test , ycls0_test , ycls1_test)) 56 | 57 | # Initialize session 58 | config_proto = tf.ConfigProto(allow_soft_placement = True) 59 | config_proto.gpu_options.allow_growth = True 60 | #config_proto.gpu_options.per_process_gpu_memory_fraction = 0.95 61 | config_proto.log_device_placement = False 62 | sess = tf.Session(config = config_proto) 63 | sess.run(tf.global_variables_initializer()) 64 | sess.run(tf.local_variables_initializer()) 65 | 66 | # Train 67 | 68 | t1_sb = np.random.rand(*((batchsize,) + image_size)) 69 | t2_sb = np.random.rand(*((batchsize,) + image_size)) 70 | tc_sb = np.random.rand(*((batchsize,) + image_size)) 71 | 72 | feedDictt = {training: True, kprob: 1.0, kprob_fcn_D1: 0.8, \ 73 | kprob_fcn_D1: 0.8, t1_x: t1_sb, t2_x: t2_sb, tc_x: tc_sb} 74 | feedDictv = {training: False, kprob: 1.0, kprob_fcn_D1: 1.0, \ 75 | kprob_fcn_D1: 1.0, t1_x: t1_sb, t2_x: t2_sb, tc_x: tc_sb} 76 | 77 | # Training graph 78 | y_pseg_sb, y0_pred_sb, y1_pred_sb = sess.run( \ 79 | [y_pseg, y0_pred, y1_pred], feed_dict=feedDictt) 80 | print("TRAINING:") 81 | print("\tY_SEG SHAPE: ", y_pseg_sb.shape) 82 | print("\tY0_CLS SHAPE: ", y0_pred_sb.shape) 83 | print("\tY1_CLS SHAPE: ", y1_pred_sb.shape) 84 | sys.stdout.flush() 85 | assert y_pseg_sb.shape == (batchsize,) + image_size[:-1] + (nseg,), \ 86 | "ERROR: Wrong Y_SEG shape in output" 87 | assert y0_pred_sb.shape == (batchsize, nclass0), \ 88 | "ERROR: Wrong Y0_CLS shape in output" 89 | assert y1_pred_sb.shape == (batchsize, nclass1), \ 90 | "ERROR: Wrong Y1_CLS shape in output" 91 | 92 | # Inference graph 93 | y_pseg_sb, y0_pred_sb, y1_pred_sb = sess.run( \ 94 | [y_pseg, y0_pred, y1_pred], feed_dict=feedDictv) 95 | print("INFERENCE:") 96 | print("\tY_SEG SHAPE: ", y_pseg_sb.shape) 97 | print("\tY0_CLS SHAPE: ", y0_pred_sb.shape) 98 | print("\tY1_CLS SHAPE: ", y1_pred_sb.shape) 99 | sys.stdout.flush() 100 | assert y_pseg_sb.shape == (batchsize,) + image_size[:-1] + (nseg,), \ 101 | "ERROR: Wrong Y_SEG shape in output" 102 | assert y0_pred_sb.shape == (batchsize, nclass0), \ 103 | "ERROR: Wrong Y0_CLS shape in output" 104 | assert y1_pred_sb.shape == (batchsize, nclass1), \ 105 | "ERROR: Wrong Y1_CLS shape in output" 106 | 107 | if __name__ == '__main__': 108 | test_model() 109 | -------------------------------------------------------------------------------- /test/models_zoo/aibraintumormodel/model_C5XS_test.py: -------------------------------------------------------------------------------- 1 | # Python2 compatibility 2 | from __future__ import print_function 3 | 4 | import numpy as np 5 | import warnings 6 | import tensorflow as tf 7 | 8 | import sys 9 | try: 10 | from tensorgraph.models_zoo.aibraintumormodel.nn.model \ 11 | import model_C5XS as model_module 12 | except: 13 | warnings.warn("WARNING: Unable to locate model_C5XS in TensorGraph package. Attempting relative path import") 14 | model_path = "../../../tensorgraph/models_zoo/" 15 | sys.path.append(model_path) 16 | from aibraintumormodel.nn.model import model_C5XS as model_module 17 | 18 | def test_model(): 19 | with tf.Graph().as_default(): 20 | image_size = (24, 128, 128, 1) 21 | batchsize = 4 22 | nclass0, nclass1 = (4,8) 23 | nseg = 2 24 | nclass_mat = np.zeros((nclass1, nclass0), dtype=np.int32) 25 | for i1 in range(nclass1): 26 | nclass_mat[i1, i1//2] = 1 27 | print("NCLASS_MAT:\n", nclass_mat) 28 | 29 | # Switches for training/testing 30 | training = tf.placeholder(tf.bool) # T/F for batchnorm 31 | kprob = tf.placeholder(tf.float32) # Dropout prob 32 | kprob_fcn_D1 = tf.placeholder(tf.float32) 33 | kprob_fcn_D2 = tf.placeholder(tf.float32) 34 | 35 | # Input placeholders 36 | t1_x = tf.placeholder(tf.float32, shape=(None,) + image_size, name='T1_ph') 37 | t2_x = tf.placeholder(tf.float32, shape=(None,) + image_size, name='T2_ph') 38 | tc_x = tf.placeholder(tf.float32, shape=(None,) + image_size, name='TC_ph') 39 | radiomics_x = None 40 | 41 | # Model declaration 42 | model = model_module.Model(nseg_class=nseg, \ 43 | nclass0=nclass0, nclass1=nclass1, nclass_mat=nclass_mat, \ 44 | kprob=kprob, kprob_fcn_D1=kprob_fcn_D1, kprob_fcn_D2=kprob_fcn_D1) 45 | 46 | # Train 47 | yseg_train, ycls0_train, ycls1_train = model.train_fprop( \ 48 | t1_x, t2_x, tc_x, radiomics_x) 49 | # Inference 50 | yseg_test, ycls0_test, ycls1_test = model.test_fprop( \ 51 | t1_x, t2_x, tc_x, radiomics_x) 52 | 53 | y_pseg, y0_pred, y1_pred = tf.cond(training, \ 54 | lambda: (yseg_train, ycls0_train, ycls1_train), \ 55 | lambda: (yseg_test , ycls0_test , ycls1_test)) 56 | 57 | # Initialize session 58 | config_proto = tf.ConfigProto(allow_soft_placement = True) 59 | config_proto.gpu_options.allow_growth = True 60 | #config_proto.gpu_options.per_process_gpu_memory_fraction = 0.95 61 | config_proto.log_device_placement = False 62 | sess = tf.Session(config = config_proto) 63 | sess.run(tf.global_variables_initializer()) 64 | sess.run(tf.local_variables_initializer()) 65 | 66 | # Train 67 | 68 | t1_sb = np.random.rand(*((batchsize,) + image_size)) 69 | t2_sb = np.random.rand(*((batchsize,) + image_size)) 70 | tc_sb = np.random.rand(*((batchsize,) + image_size)) 71 | 72 | feedDictt = {training: True, kprob: 1.0, kprob_fcn_D1: 0.8, \ 73 | kprob_fcn_D1: 0.8, t1_x: t1_sb, t2_x: t2_sb, tc_x: tc_sb} 74 | feedDictv = {training: False, kprob: 1.0, kprob_fcn_D1: 1.0, \ 75 | kprob_fcn_D1: 1.0, t1_x: t1_sb, t2_x: t2_sb, tc_x: tc_sb} 76 | 77 | # Training graph 78 | y_pseg_sb, y0_pred_sb, y1_pred_sb = sess.run( \ 79 | [y_pseg, y0_pred, y1_pred], feed_dict=feedDictt) 80 | print("TRAINING:") 81 | print("\tY_SEG SHAPE: ", y_pseg_sb.shape) 82 | print("\tY0_CLS SHAPE: ", y0_pred_sb.shape) 83 | print("\tY1_CLS SHAPE: ", y1_pred_sb.shape) 84 | sys.stdout.flush() 85 | assert y_pseg_sb.shape == (batchsize,) + image_size[:-1] + (nseg,), \ 86 | "ERROR: Wrong Y_SEG shape in output" 87 | assert y0_pred_sb.shape == (batchsize, nclass0), \ 88 | "ERROR: Wrong Y0_CLS shape in output" 89 | assert y1_pred_sb.shape == (batchsize, nclass1), \ 90 | "ERROR: Wrong Y1_CLS shape in output" 91 | 92 | # Inference graph 93 | y_pseg_sb, y0_pred_sb, y1_pred_sb = sess.run( \ 94 | [y_pseg, y0_pred, y1_pred], feed_dict=feedDictv) 95 | print("INFERENCE:") 96 | print("\tY_SEG SHAPE: ", y_pseg_sb.shape) 97 | print("\tY0_CLS SHAPE: ", y0_pred_sb.shape) 98 | print("\tY1_CLS SHAPE: ", y1_pred_sb.shape) 99 | sys.stdout.flush() 100 | assert y_pseg_sb.shape == (batchsize,) + image_size[:-1] + (nseg,), \ 101 | "ERROR: Wrong Y_SEG shape in output" 102 | assert y0_pred_sb.shape == (batchsize, nclass0), \ 103 | "ERROR: Wrong Y0_CLS shape in output" 104 | assert y1_pred_sb.shape == (batchsize, nclass1), \ 105 | "ERROR: Wrong Y1_CLS shape in output" 106 | 107 | if __name__ == '__main__': 108 | test_model() 109 | -------------------------------------------------------------------------------- /test/models_zoo/aibraintumormodel/model_CR_test.py: -------------------------------------------------------------------------------- 1 | # Python2 compatibility 2 | from __future__ import print_function 3 | 4 | import numpy as np 5 | import warnings 6 | import tensorflow as tf 7 | 8 | import sys 9 | try: 10 | from tensorgraph.models_zoo.aibraintumormodel.nn.model \ 11 | import model_CR as model_module 12 | except: 13 | warnings.warn("WARNING: Unable to locate model_CR in TensorGraph package. Attempting relative path import") 14 | model_path = "../../../tensorgraph/models_zoo/" 15 | sys.path.append(model_path) 16 | from aibraintumormodel.nn.model import model_CR as model_module 17 | 18 | def test_model(): 19 | with tf.Graph().as_default(): 20 | image_size = (24, 128, 128, 1) 21 | rmics_size = (24, 128, 128, 8) 22 | batchsize = 4 23 | nclass0, nclass1 = (4,8) 24 | nseg = 2 25 | nclass_mat = np.zeros((nclass1, nclass0), dtype=np.float32) 26 | for i1 in range(nclass1): 27 | nclass_mat[i1, i1//2] = 1 28 | print("NCLASS_MAT:\n", nclass_mat) 29 | 30 | # Switches for training/testing 31 | training = tf.placeholder(tf.bool) # T/F for batchnorm 32 | kprob = tf.placeholder(tf.float32) # Dropout prob 33 | kprob_fcn_D1 = tf.placeholder(tf.float32) 34 | kprob_fcn_D2 = tf.placeholder(tf.float32) 35 | 36 | # Input placeholders 37 | t1_x = tf.placeholder(tf.float32, shape=(None,) + image_size, name='T1_ph') 38 | t2_x = tf.placeholder(tf.float32, shape=(None,) + image_size, name='T2_ph') 39 | tc_x = tf.placeholder(tf.float32, shape=(None,) + image_size, name='TC_ph') 40 | radiomics_x = tf.placeholder(tf.float32, shape=(None,) + rmics_size, \ 41 | name='RADIOMICS_ph') 42 | 43 | # Model declaration 44 | model = model_module.Model(nseg_class=nseg, \ 45 | nclass0=nclass0, nclass1=nclass1, nclass_mat=nclass_mat, \ 46 | kprob=kprob, kprob_fcn_D1=kprob_fcn_D1, kprob_fcn_D2=kprob_fcn_D1) 47 | 48 | # Train 49 | yseg_train, ycls0_train, ycls1_train = model.train_fprop( \ 50 | t1_x, t2_x, tc_x, radiomics_x) 51 | # Inference 52 | yseg_test, ycls0_test, ycls1_test = model.test_fprop( \ 53 | t1_x, t2_x, tc_x, radiomics_x) 54 | 55 | y_pseg, y0_pred, y1_pred = tf.cond(training, \ 56 | lambda: (yseg_train, ycls0_train, ycls1_train), \ 57 | lambda: (yseg_test , ycls0_test , ycls1_test)) 58 | 59 | # Initialize session 60 | config_proto = tf.ConfigProto(allow_soft_placement = True) 61 | config_proto.gpu_options.allow_growth = True 62 | #config_proto.gpu_options.per_process_gpu_memory_fraction = 0.95 63 | config_proto.log_device_placement = False 64 | sess = tf.Session(config = config_proto) 65 | sess.run(tf.global_variables_initializer()) 66 | sess.run(tf.local_variables_initializer()) 67 | 68 | # Train 69 | 70 | t1_sb = np.random.rand(*((batchsize,) + image_size)) 71 | t2_sb = np.random.rand(*((batchsize,) + image_size)) 72 | tc_sb = np.random.rand(*((batchsize,) + image_size)) 73 | rmics_sb = np.random.rand(*((batchsize,) + rmics_size)) 74 | 75 | feedDictt = {training: True, kprob: 1.0, kprob_fcn_D1: 0.8, \ 76 | kprob_fcn_D1: 0.8, t1_x: t1_sb, t2_x: t2_sb, tc_x: tc_sb, \ 77 | radiomics_x: rmics_sb} 78 | feedDictv = {training: False, kprob: 1.0, kprob_fcn_D1: 1.0, \ 79 | kprob_fcn_D1: 1.0, t1_x: t1_sb, t2_x: t2_sb, tc_x: tc_sb, \ 80 | radiomics_x: rmics_sb} 81 | 82 | # Training graph 83 | y_pseg_sb, y0_pred_sb, y1_pred_sb = sess.run( \ 84 | [y_pseg, y0_pred, y1_pred], feed_dict=feedDictt) 85 | print("TRAINING:") 86 | print("\tY_SEG SHAPE: ", y_pseg_sb.shape) 87 | print("\tY0_CLS SHAPE: ", y0_pred_sb.shape) 88 | print("\tY1_CLS SHAPE: ", y1_pred_sb.shape) 89 | sys.stdout.flush() 90 | assert y_pseg_sb.shape == (batchsize,) + image_size[:-1] + (nseg,), \ 91 | "ERROR: Wrong Y_SEG shape in output" 92 | assert y0_pred_sb.shape == (batchsize, nclass0), \ 93 | "ERROR: Wrong Y0_CLS shape in output" 94 | assert y1_pred_sb.shape == (batchsize, nclass1), \ 95 | "ERROR: Wrong Y1_CLS shape in output" 96 | 97 | # Inference graph 98 | y_pseg_sb, y0_pred_sb, y1_pred_sb = sess.run( \ 99 | [y_pseg, y0_pred, y1_pred], feed_dict=feedDictv) 100 | print("INFERENCE:") 101 | print("\tY_SEG SHAPE: ", y_pseg_sb.shape) 102 | print("\tY0_CLS SHAPE: ", y0_pred_sb.shape) 103 | print("\tY1_CLS SHAPE: ", y1_pred_sb.shape) 104 | sys.stdout.flush() 105 | assert y_pseg_sb.shape == (batchsize,) + image_size[:-1] + (nseg,), \ 106 | "ERROR: Wrong Y_SEG shape in output" 107 | assert y0_pred_sb.shape == (batchsize, nclass0), \ 108 | "ERROR: Wrong Y0_CLS shape in output" 109 | assert y1_pred_sb.shape == (batchsize, nclass1), \ 110 | "ERROR: Wrong Y1_CLS shape in output" 111 | 112 | if __name__ == '__main__': 113 | test_model() 114 | -------------------------------------------------------------------------------- /test/models_zoo/aibraintumormodel/resources/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hycis/TensorGraph/78efa6ced08c456e67bac910cdda605741b610ad/test/models_zoo/aibraintumormodel/resources/__init__.py -------------------------------------------------------------------------------- /test/models_zoo/aibraintumormodel/resources/run_mpi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | devices=$1 3 | 4 | num=$(echo $1 | awk -F',' '{print NF}') 5 | python=$2 6 | script=$3 7 | args="${@:4}" 8 | 9 | echo "HOST : "$(hostname) 10 | echo "DEVICES : "$devices 11 | echo "NPROC : "$num 12 | CUDA_DEVICE_ORDER=PCI_BUS_ID CUDA_VISIBLE_DEVICES=$devices \ 13 | mpirun -np $num -H localhost:$num -bind-to none -map-by slot \ 14 | -mca pml ob1 -mca btl ^openib \ 15 | -mca orte_base_help_aggregate 0 \ 16 | -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \ 17 | $python $script $args 18 | -------------------------------------------------------------------------------- /test/models_zoo/aibraintumormodel/resources/test_train.ini: -------------------------------------------------------------------------------- 1 | # Config file for training. All params must be in the [train] section 2 | # Model C5XS 3 | # - No radiomics 4 | # - BG/water/tumor segmentation mask 5 | # - Small & big classes 6 | [train] 7 | testing = True 8 | 9 | # Model 10 | model_module = "nn.model.model_C5XS" 11 | model_scope = "model_C5XS" 12 | 13 | # Data 14 | data_module = "nn.data.data_train_hvd" 15 | anatomies = [] 16 | data_test = True 17 | biopsy_only = True 18 | mask_only = True 19 | water_mask = True 20 | series_kprob = [1.0, 1.0, 1.0, 0] 21 | series_val = [1, 1, 1, 0] 22 | radiomics = False 23 | clsmatcoeff = (-0.5, 1.0) 24 | train_valid_seed = 1111 25 | 26 | # Continuation params 27 | continuation = False 28 | restore = False 29 | restore_seg = True 30 | restore_cls = True 31 | save = False 32 | 33 | # Model training parameters 34 | batchsize = 2 35 | keep_prob = 1.0 36 | keep_prob_fcn_D1 = 0.7 37 | keep_prob_fcn_D2 = 0.7 38 | 39 | # Training parameters 40 | optimizer = "adam" 41 | max_epochs = 1 42 | train_seg = True 43 | train_cls = True 44 | big_cls = True 45 | small_cls = True 46 | min_learning_rate = 0.00005 47 | max_learning_rate = 0.0003 48 | learning_rate_decay_step = 25000 49 | learning_rate_decay_rate = 0.995 50 | learning_rate_epochsize = 8 51 | learning_range_decay = True 52 | learning_range_decay_rate = 0.8 53 | seg_loss_coefficient = 4.0 54 | cls_loss_coefficient = 1.0 55 | l2_regularizer = True 56 | l2_weight_decay = 0.0002 57 | distributed_batchnorm = True 58 | batch_renorm = False 59 | renorm_rmax = (1.0, 3.0, 10000, 100000) 60 | renorm_dmax = (0.0, 5.0, 10000, 60000) 61 | 62 | # Reporting parameters 63 | report_every_nsteps = 500 64 | save_every_nsteps = 1000 65 | validate_every_nepoch = -1 66 | save_out_every_nepoch = 10 67 | sel_threshold = (0, 1.0) 68 | out_res_frac = 0.1 69 | 70 | # I/O paths 71 | save_path = "" 72 | restore_path = "" 73 | log_path = "" 74 | out_res_path = "" 75 | -------------------------------------------------------------------------------- /test/models_zoo/aibraintumormodel/resources/test_train_ref.txt: -------------------------------------------------------------------------------- 1 | TEST RUNMODEL INITIALIZATION RANK 0 2 | TEST RUNMODEL INITIALIZATION RANK 1 3 | TEST TRAINONEEPOCH RANK 0 4 | TEST TRAINONEEPOCH TOTAL 4 5 | TEST TRAINONEEPOCH RANK 1 6 | TEST TRAINONEEPOCH CLS0 (2, 19) 7 | TEST TRAINONEEPOCH SEGP (2, 24, 128, 128, 3) 8 | TEST VALIDONEEPOCH RANK 0 9 | TEST VALIDONEEPOCH RANK 1 10 | TEST VALIDONEEPOCH TOTAL 2 11 | TEST VALIDONEEPOCH CLS0 (1, 19) 12 | TEST VALIDONEEPOCH SEGP (1, 24, 128, 128, 3) 13 | TEST MAINTRAIN DONE RANK 1 14 | TEST MAINTRAIN DONE RANK 0 15 | -------------------------------------------------------------------------------- /test/models_zoo/airnet_test.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import tensorflow as tf 4 | from tensorgraph.models_zoo.airnet.model import AIRNet 5 | from tensorgraph.models_zoo.airnet.train import train 6 | 7 | 8 | def test_model(): 9 | X1_train = np.random.rand(5, 8, 160, 160, 1) 10 | X2_train = np.random.rand(5, 8, 160, 160, 1) 11 | y_train = np.random.rand(5, 12) 12 | _, d, h, w, c = X1_train.shape 13 | _, n = y_train.shape 14 | 15 | with tf.Graph().as_default(): 16 | X1_ph = tf.placeholder('float32', [None, d, h, w, c]) 17 | X2_ph = tf.placeholder('float32', [None, d, h, w, c]) 18 | y_ph = tf.placeholder('float32', [None, n]) 19 | 20 | seq = AIRNet() 21 | train(seq, X1_ph, X2_ph, y_ph, X1_train, X2_train, y_train) 22 | 23 | 24 | if __name__ == '__main__': 25 | test_model() 26 | -------------------------------------------------------------------------------- /test/models_zoo/attention_unet_test.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import tensorflow as tf 4 | from tensorgraph.models_zoo.attention_unet.model import Attention_UNet 5 | from tensorgraph.models_zoo.attention_unet.train import train 6 | 7 | 8 | def test_model(): 9 | X_train = np.random.rand(5, 128, 128, 1) 10 | y_train = np.random.rand(5, 128, 128, 1) 11 | _, h, w, c = X_train.shape 12 | 13 | with tf.Graph().as_default(): 14 | X_ph = tf.placeholder('float32', [None, h, w, c]) 15 | y_ph = tf.placeholder('float32', [None, h, w, c]) 16 | 17 | seq = Attention_UNet(input_shape=(h, w, c)) 18 | train(seq, X_ph, y_ph, X_train, y_train) 19 | 20 | if __name__ == '__main__': 21 | test_model() 22 | -------------------------------------------------------------------------------- /test/models_zoo/densenet_test.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import tensorflow as tf 4 | from tensorgraph.models_zoo.densenet.model import MyDenseNet 5 | from tensorgraph.models_zoo.densenet.train import train 6 | 7 | 8 | def test_model(): 9 | X_train = np.random.rand(5, 32, 32, 1) 10 | y_train = np.random.rand(5, 1) 11 | _, h, w, c = X_train.shape 12 | _, nclass = y_train.shape 13 | 14 | with tf.Graph().as_default(): 15 | X_ph = tf.placeholder('float32', [None, h, w, c]) 16 | y_ph = tf.placeholder('float32', [None, nclass]) 17 | 18 | seq = MyDenseNet(nclass) 19 | train(seq, X_ph, y_ph, X_train, y_train) 20 | 21 | 22 | if __name__ == '__main__': 23 | test_model() 24 | -------------------------------------------------------------------------------- /test/models_zoo/dilated_unet_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | dirt = sys.path[0] 4 | dirt = os.path.abspath(os.path.join(dirt, '../..')) 5 | sys.path.insert(0, dirt) 6 | import numpy as np 7 | import tensorflow as tf 8 | from tensorgraph.models_zoo.echocardiac.dilated_unet.model import Dilated_Unet 9 | from tensorgraph.models_zoo.echocardiac.dilated_unet.train import train 10 | 11 | 12 | def test_model(): 13 | X_train = np.random.rand(5, 128, 128, 1) 14 | y_train = np.random.rand(5, 128, 128, 1) 15 | _, h, w, c = X_train.shape 16 | 17 | with tf.Graph().as_default(): 18 | X_ph = tf.placeholder('float32', [None, h, w, c]) 19 | y_ph = tf.placeholder('float32', [None, h, w, c]) 20 | seq = Dilated_Unet() 21 | train(seq, X_ph, y_ph, X_train, y_train) 22 | 23 | if __name__ == '__main__': 24 | test_model() 25 | -------------------------------------------------------------------------------- /test/models_zoo/hed_modified_test.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import tensorflow as tf 4 | from tensorgraph.models_zoo.hed_modified.model import HED_Modified 5 | from tensorgraph.models_zoo.hed_modified.train import train 6 | 7 | 8 | def test_model(): 9 | D, H, W = 16, 64, 64 10 | X_train = np.random.rand(6, D, H, W, 1) 11 | y_train = np.random.rand(6, D, H, W, 1) 12 | _, d, h, w, c = X_train.shape 13 | 14 | with tf.Graph().as_default(): 15 | X_ph = tf.placeholder('float32', [None, d, h, w, c]) 16 | y_ph = tf.placeholder('float32', [None, d, h, w, c]) 17 | 18 | seq = HED_Modified(channels=1, side_features=1, output_shape=(D,H,W), output_channels=1, droprate=0.03) 19 | train(seq, X_ph, y_ph, X_train, y_train) 20 | 21 | 22 | if __name__ == '__main__': 23 | test_model() 24 | -------------------------------------------------------------------------------- /test/models_zoo/heteronet_test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | import tensorflow as tf 4 | from tensorgraph.models_zoo.heteronet.model import HeteroNet 5 | from tensorgraph.models_zoo.heteronet.train import HeteroTrain 6 | 7 | def test_heteromodel(): 8 | tf.reset_default_graph() 9 | model = HeteroNet() 10 | X_ph = tf.placeholder(tf.float32, [1, 4, 320,320,1]) 11 | y_ph = tf.placeholder(tf.float32, [1, 30]) 12 | 13 | Model = HeteroTrain(model, X_ph, X_ph, X_ph, y_ph) 14 | print('training') 15 | for i in range(15): 16 | X_train = np.random.rand(1, 4, 320,320,1) 17 | y_train = np.random.randint(0,2,[1,30]) 18 | Model.train(feed_dict={X_ph:X_train, y_ph:y_train}) 19 | 20 | if __name__ == '__main__': 21 | test_heteromodel() 22 | -------------------------------------------------------------------------------- /test/models_zoo/image_search_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorgraph.models_zoo.image_search.model import Image_Search_Model 4 | from tensorgraph.models_zoo.image_search.train import train 5 | 6 | 7 | def test_model(): 8 | X_train = np.random.rand(8, 24, 160, 160, 5) #the total number is 8, should be very big. 5 is the channels number(for t1,t2,dwi,etc) 9 | y_train = np.random.randint(low=0, high=5, size=8) #the labels: 0,1,2,3,4 (means totally 5 kinds of diseases) 10 | _, d, h, w, c = X_train.shape 11 | with tf.Graph().as_default(): 12 | X_ph = tf.placeholder('float32', [None, d, h, w, c]) 13 | y_ph = tf.placeholder('float32', [None]) 14 | seq = Image_Search_Model() 15 | train(seq, X_ph, y_ph, X_train, y_train) 16 | 17 | 18 | if __name__ == '__main__': 19 | test_model() 20 | 21 | 22 | -------------------------------------------------------------------------------- /test/models_zoo/unet_test.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import tensorflow as tf 4 | from tensorgraph.models_zoo.unet.model import Unet 5 | from tensorgraph.models_zoo.unet.train import train 6 | 7 | 8 | def test_model(): 9 | X_train = np.random.rand(5, 512, 512, 1) 10 | y_train = np.random.rand(5, 512, 512, 1) 11 | nclass = 1 12 | _, h, w, c = X_train.shape 13 | with tf.Graph().as_default(): 14 | X_ph = tf.placeholder('float32', [None, h, w, c]) 15 | y_ph = tf.placeholder('float32', [None, h, w, c]) 16 | seq = Unet(nclass=nclass) 17 | train(seq, X_ph, y_ph, X_train, y_train) 18 | 19 | 20 | if __name__ == '__main__': 21 | test_model() 22 | -------------------------------------------------------------------------------- /test/utils_test.py: -------------------------------------------------------------------------------- 1 | from tensorgraph.utils import MakeTFRecords,MakeTFRecords_tfdata 2 | import numpy as np 3 | import tensorflow as tf 4 | 5 | 6 | def test_make_tfrecords(): 7 | tfrecords = MakeTFRecords() 8 | data_records = {'X':np.random.rand(100,50,30), 'y':np.random.rand(100,10)} 9 | save_path = './arr.tfrecords' 10 | tfrecords.make_tfrecords_from_arrs(data_records, save_path) 11 | arrs = tfrecords.read_arrs_from_tfrecords(save_path, data_shapes={'X':[50,30], 'y':[10]}) 12 | for records in arrs: 13 | for record in records: 14 | print(record.shape) 15 | print('\n') 16 | 17 | 18 | def test_fetch_queue_tfrecords(): 19 | tfrecords = MakeTFRecords() 20 | tfrecords_filename = './arr.tfrecords' 21 | names_records = tfrecords.read_and_decode([tfrecords_filename], 22 | batch_size=1, 23 | data_shapes={'X':[50,30], 'y':[10]}) 24 | init_op = tf.group(tf.global_variables_initializer(), 25 | tf.local_variables_initializer()) 26 | with tf.Session() as sess: 27 | sess.run(init_op) 28 | coord = tf.train.Coordinator() 29 | threads = tf.train.start_queue_runners(coord=coord) 30 | for i in range(3): 31 | for name, record in names_records: 32 | arr = sess.run(record) 33 | print(name) 34 | print(arr.shape) 35 | print('\n') 36 | coord.request_stop() 37 | coord.join(threads) 38 | 39 | def test_make_tfrecords_tfdata(): 40 | tfrecords = MakeTFRecords_tfdata() 41 | data_records = {'X':np.random.rand(100,50,30), 'y':np.random.rand(100,10),'name':['a']*20+['b']*20+['c']*20+['d']*40} 42 | save_path = './arr.tfrecords' 43 | tfrecords.make_tfrecords_from_arrs(data_records, save_path,[np.float32,np.float32,str]) 44 | print('successfully created tfrecords.') 45 | 46 | def test_fetch_queue_tfrecords_tfdata(): 47 | tfrecords = MakeTFRecords_tfdata() 48 | tfrecords_filename = './arr.tfrecords' 49 | 50 | data_records = {'X':np.random.rand(100,50,30), 'y':np.random.rand(100,10),'name':['a']*20+['b']*20+['c']*20+['d']*40} 51 | tfrecords.make_tfrecords_from_arrs(data_records, tfrecords_filename,[np.float32,np.float32,str]) 52 | 53 | num_epochs = 3 54 | batch_size = 10 55 | n_train = sum(1 for _ in tf.python_io.tf_record_iterator(tfrecords_filename)) 56 | it,element = tfrecords.read_tensor_from_tfrecords(n_train, 57 | tfrecords_filenames=[tfrecords_filename], 58 | data_shape={'X':[50,30], 'y':[10],'name':None}, 59 | dtypes=[tf.float32,tf.float32,str], 60 | batch_size=batch_size,num_epochs=num_epochs,phase='training') 61 | with tf.Session() as sess: 62 | sess.run(it.initializer) 63 | batches = 100/batch_size 64 | keys = element.keys() 65 | for i in range(num_epochs): 66 | for j in range(int(batches)): 67 | arrs =sess.run([element[k] for k in keys]) 68 | for index, key in enumerate(keys): 69 | print (key, arrs[index].shape) 70 | print('\n') 71 | 72 | 73 | if __name__ == '__main__': 74 | test_make_tfrecords() 75 | test_fetch_queue_tfrecords() 76 | test_make_tfrecords_tfdata() 77 | test_fetch_queue_tfrecords_tfdata() 78 | --------------------------------------------------------------------------------