├── .dockerignore ├── .gitignore ├── .gitmodules ├── 3rd-party └── dcm2niix ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── kaggle.png ├── process.sh ├── src-old ├── adsb3.py ├── apss.h ├── apss.tpp ├── balltree.cpp ├── balltree.h ├── dcm2niix ├── gallery.py ├── implicits.h ├── mesh.h ├── mesh.py ├── mlssurface.h ├── mlssurface.tpp ├── models │ ├── ft │ │ ├── 200000.data-00000-of-00001 │ │ ├── 200000.index │ │ └── 200000.meta │ ├── nodule │ │ └── axial │ │ │ ├── 200000.data-00000-of-00001 │ │ │ ├── 200000.index │ │ │ └── 200000.meta │ └── score ├── papaya.py ├── process.py ├── pyadsb3.cpp ├── run.sh ├── setup.py ├── static │ ├── Detector.js │ ├── OBJLoader.js │ ├── PLYLoader.js │ ├── TransformControls.js │ ├── papaya.css │ ├── papaya.js │ ├── three.js │ └── three_index.html ├── templates │ ├── case.html │ ├── gallery.html │ ├── index.html │ ├── papaya_case.html │ ├── papaya_index.html │ ├── papaya_lymph_case.html │ └── plumo.html └── three.py └── src ├── README ├── adsb3.py ├── adsb3_cache_all_ft.sh ├── adsb3_cache_ft.py ├── adsb3_cache_mask.py ├── adsb3_eval.py ├── data └── adsb3 │ ├── stage1_labels.csv │ ├── stage1_public.csv │ ├── stage2_private.csv │ └── stage2_public.csv ├── dump_fts.py ├── mesh.py ├── plumo.cpp ├── plumo.py └── setup.py /.dockerignore: -------------------------------------------------------------------------------- 1 | test1 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "vcglib"] 2 | path = vcglib 3 | url = https://github.com/cnr-isti-vclab/vcglib.git 4 | -------------------------------------------------------------------------------- /3rd-party/dcm2niix: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaalgo/plumo/c2442307ba31f781df3b68ca604ec5c8f0a9fa57/3rd-party/dcm2niix -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | from tensorflow/tensorflow:0.12.1-gpu 2 | RUN apt-get update && apt-get install -y libboost-all-dev libopencv-dev libgoogle-glog-dev git python-opencv 3 | RUN pip install tqdm scikit_image==0.12.3 scikit-learn pydicom Pillow SimpleITK 4 | RUN pip install xgboost 5 | RUN rm -rf /usr/local/include/Eigen 6 | ADD 3rd-party/dcm2niix /usr/local/bin/dcm2niix 7 | ADD vcglib/vcg /usr/local/include/vcg 8 | ADD vcglib/wrap /usr/local/include/wrap 9 | ADD vcglib/eigenlib/Eigen /usr/local/include/Eigen 10 | ADD vcglib/eigenlib /usr/local/include/eigenlib 11 | ADD src-old /adsb3 12 | RUN cd /adsb3 && python setup.py build && sudo python setup.py install 13 | 14 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2017, Wei Dong 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | git submodule init 3 | git submodule update 4 | nvidia-docker build -t aaalgo/plumo . 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Plumo: Lung Cancer Detector for Data Science Bowl 2017 2 | 3 | Author: Wei Dong (wdong@wdong.org), Yuanfang Guan (yuanfang.guan@gmail.com) 4 | 5 | https://www.kaggle.com/c/data-science-bowl-2017/leaderboard 6 | 7 | ![image](kaggle.png) 8 | 9 | This repository contains the dockerized version of my best 10 | single (non-ensembled) lung cancer model for Data Science Bowl 2017 11 | (cross validation loss around 0.425). 12 | 13 | [Sample output](https://www.aaalgo.com/demos/plumo/plumo-example/) 14 | 15 | You need nvidia-docker to run the program. 16 | 17 | ``` 18 | Usage: 19 | 20 | ./process.sh input output 21 | 22 | input: an input directory containing dicom files (input/*.dcm). 23 | output: output directory where results (HTML) are written to. 24 | ``` 25 | Output format is designed to be viewed with a web browser on the local 26 | machine. Because the output contains all dicom files and a 3D lung mesh 27 | model and is quite large, it is not suitable to be served on a web 28 | server. 29 | 30 | More models and training code are upcoming. 31 | 32 | ![](http://www.aaalgo.com/demos/plumo/plumo1.png) 33 | ![](http://www.aaalgo.com/demos/plumo/plumo2.png) 34 | ![](http://www.aaalgo.com/demos/plumo/plumo3.png) 35 | 36 | (Images and samples from the SPIE dataset.) 37 | -------------------------------------------------------------------------------- /kaggle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaalgo/plumo/c2442307ba31f781df3b68ca604ec5c8f0a9fa57/kaggle.png -------------------------------------------------------------------------------- /process.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if which nvidia-docker > /dev/null 4 | then 5 | true 6 | else 7 | echo nvidia-docker not installed 8 | echo https://github.com/NVIDIA/nvidia-docker 9 | exit 10 | fi 11 | 12 | if nvidia-docker inspect aaalgo/plumo >& /dev/null 13 | then 14 | true 15 | else 16 | echo "Downloading aaalgo/plumo docker image, please be patient...." 17 | nvidia-docker pull aaalgo/plumo 18 | fi 19 | 20 | INPUT=$1 21 | OUTPUT=$2 22 | 23 | if [ -z "$OUTPUT" ] 24 | then 25 | echo Usage: 26 | echo " process.sh INPUT OUTPUT" 27 | echo 28 | echo "INPUT: directory contain dcm files" 29 | echo "OUTPUT: output directory" 30 | exit 31 | fi 32 | 33 | if [ ! -d "$INPUT" ] 34 | then 35 | echo Input directory $INPUT does not exist. 36 | exit 37 | fi 38 | 39 | 40 | #if [ -e "$OUTPUT" ] 41 | #then 42 | # echo Output directory $OUTPUT exists, not overwriting 43 | # exit 44 | #fi 45 | 46 | 47 | INPUT=`readlink -e $INPUT` 48 | 49 | if echo "$OUTPUT" | grep -v '^/' 50 | then 51 | OUTPUT=$PWD/$OUTPUT 52 | fi 53 | 54 | MY_UID=`id -u` 55 | MY_GID=`id -g` 56 | 57 | mkdir -p $OUTPUT 58 | nvidia-docker run -u=$MY_UID:$MY_GID -v $INPUT:/input -v $OUTPUT:/output aaalgo/plumo /adsb3/run.sh 59 | 60 | 61 | -------------------------------------------------------------------------------- /src-old/apss.h: -------------------------------------------------------------------------------- 1 | /**************************************************************************** 2 | * MeshLab o o * 3 | * A versatile mesh processing toolbox o o * 4 | * _ O _ * 5 | * Copyright(C) 2005 \/)\/ * 6 | * Visual Computing Lab /\/| * 7 | * ISTI - Italian National Research Council | * 8 | * \ * 9 | * All rights reserved. * 10 | * * 11 | * This program is free software; you can redistribute it and/or modify * 12 | * it under the terms of the GNU General Public License as published by * 13 | * the Free Software Foundation; either version 2 of the License, or * 14 | * (at your option) any later version. * 15 | * * 16 | * This program is distributed in the hope that it will be useful, * 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of * 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * 19 | * GNU General Public License (http://www.gnu.org/licenses/gpl.txt) * 20 | * for more details. * 21 | * * 22 | ****************************************************************************/ 23 | 24 | #ifndef APSS_H 25 | #define APSS_H 26 | 27 | #include "mlssurface.h" 28 | 29 | namespace GaelMls { 30 | 31 | template 32 | class APSS : public MlsSurface<_MeshType> 33 | { 34 | typedef MlsSurface<_MeshType> Base; 35 | 36 | typedef typename Base::Scalar Scalar; 37 | typedef typename Base::VectorType VectorType; 38 | typedef typename Base::MatrixType MatrixType; 39 | typedef _MeshType MeshType; 40 | using Base::mCachedQueryPointIsOK; 41 | using Base::mCachedQueryPoint; 42 | using Base::mNeighborhood; 43 | using Base::mCachedWeights; 44 | using Base::mCachedWeightDerivatives; 45 | using Base::mCachedWeightGradients; 46 | using Base::mCachedWeightSecondDerivatives; 47 | using Base::mBallTree; 48 | using Base::mPoints; 49 | using Base::mFilterScale; 50 | using Base::mMaxNofProjectionIterations; 51 | using Base::mAveragePointSpacing; 52 | using Base::mProjectionAccuracy; 53 | using Base::mGradientHint; 54 | 55 | enum Status {ASS_SPHERE, ASS_PLANE, ASS_UNDETERMINED}; 56 | 57 | public: 58 | 59 | APSS(const MeshType& m) 60 | : Base(m) 61 | { 62 | mSphericalParameter = 1; 63 | } 64 | 65 | virtual Scalar potential(const VectorType& x, int* errorMask = 0) const; 66 | virtual VectorType gradient(const VectorType& x, int* errorMask = 0) const; 67 | virtual MatrixType hessian(const VectorType& x, int* errorMask) const; 68 | virtual VectorType project(const VectorType& x, VectorType* pNormal = 0, int* errorMask = 0) const; 69 | 70 | /** \returns the approximation of the mean curvature obtained from the radius of the fitted sphere */ 71 | virtual Scalar approxMeanCurvature(const VectorType& x, int* errorMask = 0) const; 72 | 73 | void setSphericalParameter(Scalar v); 74 | 75 | protected: 76 | bool fit(const VectorType& x) const; 77 | bool mlsGradient(const VectorType& x, VectorType& grad) const; 78 | bool mlsHessian(const VectorType& x, MatrixType& hessian) const; 79 | 80 | protected: 81 | Scalar mSphericalParameter; 82 | 83 | // use double precision anyway 84 | typedef double LScalar; 85 | typedef vcg::Point3 LVector; 86 | 87 | // cached algebraic sphere coefficients 88 | mutable LScalar uConstant; 89 | mutable LVector uLinear; 90 | mutable LScalar uQuad; 91 | 92 | mutable LVector mCenter; 93 | mutable LScalar mRadius; 94 | mutable Status mStatus; 95 | 96 | mutable LVector mCachedSumP; 97 | mutable LVector mCachedSumN; 98 | mutable LScalar mCachedSumDotPP; 99 | mutable LScalar mCachedSumDotPN; 100 | mutable LScalar mCachedSumW; 101 | 102 | mutable LVector mCachedGradSumP[3]; 103 | mutable LVector mCachedGradSumN[3]; 104 | mutable LScalar mCachedGradSumDotPN[3]; 105 | mutable LScalar mCachedGradSumDotPP[3]; 106 | mutable LScalar mCachedGradSumW[3]; 107 | 108 | mutable LScalar mCachedGradNume[3]; 109 | mutable LScalar mCachedGradDeno[3]; 110 | 111 | mutable LScalar mCachedGradUConstant[3]; 112 | mutable LVector mCachedGradULinear[3]; 113 | mutable LScalar mCachedGradUQuad[3]; 114 | }; 115 | 116 | } 117 | 118 | //#include "apss.tpp" 119 | 120 | #endif 121 | -------------------------------------------------------------------------------- /src-old/balltree.cpp: -------------------------------------------------------------------------------- 1 | /**************************************************************************** 2 | * MeshLab o o * 3 | * A versatile mesh processing toolbox o o * 4 | * _ O _ * 5 | * Copyright(C) 2005 \/)\/ * 6 | * Visual Computing Lab /\/| * 7 | * ISTI - Italian National Research Council | * 8 | * \ * 9 | * All rights reserved. * 10 | * * 11 | * This program is free software; you can redistribute it and/or modify * 12 | * it under the terms of the GNU General Public License as published by * 13 | * the Free Software Foundation; either version 2 of the License, or * 14 | * (at your option) any later version. * 15 | * * 16 | * This program is distributed in the hope that it will be useful, * 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of * 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * 19 | * GNU General Public License (http://www.gnu.org/licenses/gpl.txt) * 20 | * for more details. * 21 | * * 22 | ****************************************************************************/ 23 | #include 24 | #include "balltree.h" 25 | 26 | namespace GaelMls { 27 | 28 | template 29 | BallTree<_Scalar>::BallTree(const vcg::ConstDataWrapper& points, const vcg::ConstDataWrapper& radii) 30 | : mPoints(points), mRadii(radii), mRadiusScale(1.), mTreeIsUptodate(false) 31 | { 32 | mRootNode = 0; 33 | mMaxTreeDepth = 12; 34 | mTargetCellSize = 24; 35 | } 36 | 37 | template 38 | void BallTree<_Scalar>::computeNeighbors(const VectorType& x, Neighborhood* pNei) const 39 | { 40 | if (!mTreeIsUptodate) 41 | const_cast(this)->rebuild(); 42 | 43 | pNei->clear(); 44 | mQueryPosition = x; 45 | queryNode(*mRootNode, pNei); 46 | } 47 | 48 | template 49 | void BallTree<_Scalar>::queryNode(Node& node, Neighborhood* pNei) const 50 | { 51 | if (node.leaf) 52 | { 53 | for (unsigned int i=0 ; iinsert(id, d2); 60 | } 61 | } 62 | else 63 | { 64 | if (mQueryPosition[node.dim] - node.splitValue < 0) 65 | queryNode(*node.children[0], pNei); 66 | else 67 | queryNode(*node.children[1], pNei); 68 | } 69 | } 70 | 71 | template 72 | inline vcg::Point3 CwiseAdd(vcg::Point3 const & p1, Scalar s) 73 | { 74 | return vcg::Point3(p1.X() + s, p1.Y() + s, p1.Z() + s); 75 | } 76 | 77 | template 78 | void BallTree<_Scalar>::rebuild(void) 79 | { 80 | delete mRootNode; 81 | 82 | mRootNode = new Node(); 83 | IndexArray indices(mPoints.size()); 84 | AxisAlignedBoxType aabb; 85 | aabb.Set(mPoints[0]); 86 | for (unsigned int i=0 ; i 99 | void BallTree<_Scalar>::split(const IndexArray& indices, const AxisAlignedBoxType& aabbLeft, const AxisAlignedBoxType& aabbRight, IndexArray& iLeft, IndexArray& iRight) 100 | { 101 | for (std::vector::const_iterator it=indices.begin(), end=indices.end() ; it!=end ; ++it) 102 | { 103 | unsigned int i = *it; 104 | if (vcg::PointFilledBoxDistance(mPoints[i], aabbLeft) < mRadii[i]*mRadiusScale) 105 | iLeft.push_back(i); 106 | 107 | if (vcg::PointFilledBoxDistance(mPoints[i], aabbRight) < mRadii[i]*mRadiusScale) 108 | iRight.push_back(i); 109 | } 110 | } 111 | 112 | template 113 | void BallTree<_Scalar>::buildNode(Node& node, std::vector& indices, AxisAlignedBoxType aabb, int level) 114 | { 115 | Scalar avgradius = 0.; 116 | for (std::vector::const_iterator it=indices.begin(), end=indices.end() ; it!=end ; ++it) 117 | avgradius += mRadii[*it]; 118 | avgradius = mRadiusScale * avgradius / Scalar(indices.size()); 119 | VectorType diag = aabb.max - aabb.min; 120 | if (int(indices.size()) std::max(std::max(diag.X(), diag.Y()), diag.Z()) 122 | || int(level)>=mMaxTreeDepth) 123 | { 124 | node.leaf = true; 125 | node.size = indices.size(); 126 | node.indices = new unsigned int[node.size]; 127 | for (unsigned int i=0 ; i iLeft, iRight; 142 | split(indices, aabbLeft, aabbRight, iLeft,iRight); 143 | 144 | // we don't need the index list anymore 145 | indices.clear(); 146 | 147 | { 148 | // left child 149 | //mNodes.resize(mNodes.size()+1); 150 | Node* pChild = new Node(); 151 | node.children[0] = pChild; 152 | buildNode(*pChild, iLeft, aabbLeft, level+1); 153 | } 154 | 155 | { 156 | // right child 157 | //mNodes.resize(mNodes.size()+1); 158 | Node* pChild = new Node(); 159 | node.children[1] = pChild; 160 | buildNode(*pChild, iRight, aabbRight, level+1); 161 | } 162 | } 163 | 164 | template class BallTree; 165 | template class BallTree; 166 | 167 | } 168 | -------------------------------------------------------------------------------- /src-old/balltree.h: -------------------------------------------------------------------------------- 1 | /**************************************************************************** 2 | * MeshLab o o * 3 | * A versatile mesh processing toolbox o o * 4 | * _ O _ * 5 | * Copyright(C) 2005 \/)\/ * 6 | * Visual Computing Lab /\/| * 7 | * ISTI - Italian National Research Council | * 8 | * \ * 9 | * All rights reserved. * 10 | * * 11 | * This program is free software; you can redistribute it and/or modify * 12 | * it under the terms of the GNU General Public License as published by * 13 | * the Free Software Foundation; either version 2 of the License, or * 14 | * (at your option) any later version. * 15 | * * 16 | * This program is distributed in the hope that it will be useful, * 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of * 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * 19 | * GNU General Public License (http://www.gnu.org/licenses/gpl.txt) * 20 | * for more details. * 21 | * * 22 | ****************************************************************************/ 23 | 24 | #ifndef BALLTREE_H 25 | #define BALLTREE_H 26 | 27 | #include 28 | #include 29 | #include 30 | 31 | namespace GaelMls { 32 | 33 | template 34 | class Neighborhood 35 | { 36 | public: 37 | typedef _Scalar Scalar; 38 | 39 | int index(int i) const { return mIndices.at(i); } 40 | Scalar squaredDistance(int i) const { return mSqDists.at(i); } 41 | 42 | void clear() { mIndices.clear(); mSqDists.clear(); } 43 | void resize(int size) { mIndices.resize(size); mSqDists.resize(size); } 44 | void reserve(int size) { mIndices.reserve(size); mSqDists.reserve(size); } 45 | int size() { return mIndices.size(); } 46 | 47 | void insert(int id, Scalar d2) { mIndices.push_back(id); mSqDists.push_back(d2); } 48 | 49 | protected: 50 | std::vector mIndices; 51 | std::vector mSqDists; 52 | }; 53 | 54 | template 55 | class BallTree 56 | { 57 | public: 58 | typedef _Scalar Scalar; 59 | typedef vcg::Point3 VectorType; 60 | 61 | BallTree(const vcg::ConstDataWrapper& points, const vcg::ConstDataWrapper& radii); 62 | 63 | void computeNeighbors(const VectorType& x, Neighborhood* pNei) const; 64 | 65 | void setRadiusScale(Scalar v) { mRadiusScale = v; mTreeIsUptodate = false; } 66 | 67 | protected: 68 | 69 | struct Node 70 | { 71 | ~Node() 72 | { 73 | if (!leaf) 74 | { 75 | delete children[0]; 76 | delete children[1]; 77 | } 78 | else 79 | { 80 | delete[] indices; 81 | } 82 | } 83 | Scalar splitValue; 84 | unsigned char dim:2; 85 | unsigned char leaf:1; 86 | union { 87 | Node* children[2]; 88 | struct { 89 | unsigned int* indices; 90 | unsigned int size; 91 | }; 92 | }; 93 | }; 94 | 95 | typedef std::vector IndexArray; 96 | typedef vcg::Box3 AxisAlignedBoxType; 97 | 98 | void rebuild(); 99 | void split(const IndexArray& indices, const AxisAlignedBoxType& aabbLeft, const AxisAlignedBoxType& aabbRight, 100 | IndexArray& iLeft, IndexArray& iRight); 101 | void buildNode(Node& node, std::vector& indices, AxisAlignedBoxType aabb, int level); 102 | void queryNode(Node& node, Neighborhood* pNei) const; 103 | 104 | protected: 105 | vcg::ConstDataWrapper mPoints; 106 | vcg::ConstDataWrapper mRadii; 107 | Scalar mRadiusScale; 108 | 109 | int mMaxTreeDepth; 110 | int mTargetCellSize; 111 | mutable bool mTreeIsUptodate; 112 | mutable VectorType mQueryPosition; 113 | 114 | Node* mRootNode; 115 | }; 116 | 117 | } 118 | 119 | #endif 120 | -------------------------------------------------------------------------------- /src-old/dcm2niix: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaalgo/plumo/c2442307ba31f781df3b68ca604ec5c8f0a9fa57/src-old/dcm2niix -------------------------------------------------------------------------------- /src-old/gallery.py: -------------------------------------------------------------------------------- 1 | import os 2 | from jinja2 import Environment, FileSystemLoader 3 | 4 | TMPL_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 5 | './templates') 6 | env = Environment(loader=FileSystemLoader(searchpath=TMPL_DIR)) 7 | tmpl = env.get_template('gallery.html') 8 | 9 | class Gallery: 10 | def __init__ (self, path, cols = 1, header = None, ext = '.png'): 11 | self.next_id = 0 12 | self.path = path 13 | self.cols = cols 14 | self.header = header 15 | self.ext = ext 16 | self.images = [] 17 | try: 18 | os.makedirs(path) 19 | except: 20 | pass 21 | pass 22 | 23 | def text (self, tt, br = False): 24 | self.images.append({ 25 | 'text': tt}) 26 | if br: 27 | for i in range(1, self.cols): 28 | self.images.append({ 29 | 'text': ''}) 30 | pass 31 | 32 | def next (self, text=None, link=None): 33 | path = '%03d%s' % (self.next_id, self.ext) 34 | self.images.append({ 35 | 'image': path, 36 | 'text': text, 37 | 'link': link}) 38 | self.next_id += 1 39 | return os.path.join(self.path, path) 40 | 41 | def flush (self, temp=None, extra={}): 42 | if temp is None: 43 | temp = tmpl 44 | else: 45 | temp = env.get_template(temp) 46 | with open(os.path.join(self.path, 'index.html'), 'w') as f: 47 | images = [self.images[i:i+self.cols] for i in range(0, len(self.images), self.cols)] 48 | f.write(temp.render(images=images, header=self.header, extra=extra)) 49 | pass 50 | pass 51 | 52 | -------------------------------------------------------------------------------- /src-old/implicits.h: -------------------------------------------------------------------------------- 1 | /**************************************************************************** 2 | * VCGLib o o * 3 | * Visual and Computer Graphics Library o o * 4 | * _ O _ * 5 | * Copyright(C) 2004 \/)\/ * 6 | * Visual Computing Lab /\/| * 7 | * ISTI - Italian National Research Council | * 8 | * \ * 9 | * All rights reserved. * 10 | * * 11 | * This program is free software; you can redistribute it and/or modify * 12 | * it under the terms of the GNU General Public License as published by * 13 | * the Free Software Foundation; either version 2 of the License, or * 14 | * (at your option) any later version. * 15 | * * 16 | * This program is distributed in the hope that it will be useful, * 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of * 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * 19 | * GNU General Public License (http://www.gnu.org/licenses/gpl.txt) * 20 | * for more details. * 21 | * * 22 | ****************************************************************************/ 23 | 24 | #ifndef __IMPLICITS 25 | #define __IMPLICITS 26 | 27 | #include 28 | #include 29 | #include 30 | #include 31 | 32 | namespace vcg 33 | { 34 | namespace implicits 35 | { 36 | 37 | /** \returns the Gauss curvature directly from the gradient and hessian */ 38 | template 39 | Scalar GaussCurvature(const Point3& gradient, const Matrix33& hessian) 40 | { 41 | Scalar l2 = gradient.SquaredNorm(); 42 | Matrix33 adjugate; 43 | adjugate[0][0] = hessian[1][1]*hessian[2][2] - hessian[1][2]*hessian[2][1]; 44 | adjugate[1][0] = hessian[0][2]*hessian[2][1] - hessian[0][1]*hessian[2][2]; 45 | adjugate[2][0] = hessian[0][1]*hessian[1][2] - hessian[0][2]*hessian[1][1]; 46 | 47 | adjugate[0][1] = hessian[1][2]*hessian[2][0] - hessian[1][0]*hessian[2][2]; 48 | adjugate[1][1] = hessian[0][0]*hessian[2][2] - hessian[0][2]*hessian[2][0]; 49 | adjugate[2][1] = hessian[1][0]*hessian[0][2] - hessian[0][0]*hessian[1][2]; 50 | 51 | adjugate[0][2] = hessian[1][0]*hessian[2][1] - hessian[1][1]*hessian[2][0]; 52 | adjugate[1][2] = hessian[0][1]*hessian[2][0] - hessian[0][0]*hessian[2][1]; 53 | adjugate[2][2] = hessian[0][0]*hessian[1][1] - hessian[0][1]*hessian[1][0]; 54 | return ((gradient* ( adjugate * gradient))) / (l2*l2); 55 | } 56 | 57 | /** \returns the mean curvature directly from the gradient and hessian */ 58 | template 59 | Scalar MeanCurvature(const Point3& gradient, const Matrix33& hessian) 60 | { 61 | Scalar l = gradient.Norm(); 62 | return (l*l*hessian.Trace() - (gradient* (hessian * gradient))) / (2.*l*l*l); 63 | } 64 | 65 | /** This class computes the Weingarten map of a scalar field and provides 66 | * methods to extract curvatures from it. 67 | * 68 | * The Weingarten map is equal to the gradient of the normal vector: 69 | * \f$ W = \nabla \mathbf(n) 70 | * = \nabla \frac{\mathbf{g}}{\Vert \mathbf{g} \Vert} 71 | * = \frac{(I - n n^T) H}{\Vert \mathbf{g} \Vert} \f$ 72 | * This matrix can also be seen as the projection of the hessian 73 | * matrix onto the tangent plane of normal n. 74 | */ 75 | template class WeingartenMap 76 | { 77 | public: 78 | typedef Point3 VectorType; 79 | typedef Matrix33 MatrixType; 80 | 81 | /** Default constructor computing the Weingarten map from the 82 | * first and second derivatives, namely the gradient vector 83 | * and hessian matrix of the scalar field. 84 | */ 85 | WeingartenMap(const VectorType& grad, const MatrixType& hess) 86 | { 87 | Scalar invL = 1.0/grad.Norm(); 88 | assert(grad.Norm()>1e-8); 89 | m_normal = grad * invL; 90 | assert(!math::IsNAN(invL) && "gradient should not be zero!"); 91 | 92 | Matrix33 I; I.SetIdentity(); 93 | m_nnT.ExternalProduct(m_normal,m_normal); 94 | 95 | m_w = (I-m_nnT) * hess * invL; 96 | 97 | m_kgIsDirty = true; 98 | m_kmIsDirty = true; 99 | m_kpAreDirty = true; 100 | m_kdirAreDirty = true; 101 | } 102 | 103 | /** \returns the Weingarten map matrix */ 104 | const MatrixType& W() const { return m_w; } 105 | 106 | /** \returns the Gauss curvature = k1 * k2 */ 107 | Scalar GaussCurvature() const 108 | { 109 | if (m_kgIsDirty) 110 | { 111 | // we add nn^T to W such that the third eigenvalue becomes 1 112 | // then det(W) = k1 * k2 * 1 = Gauss curvature ! 113 | m_kg = (m_w + m_nnT).Determinant(); 114 | m_kgIsDirty = false; 115 | } 116 | return m_kg; 117 | } 118 | 119 | /** \returns the mean curvature = (k1 + k2)/2 */ 120 | Scalar MeanCurvature() const 121 | { 122 | if (m_kmIsDirty) 123 | { 124 | // the third eigenvalue of W is 0, then tr(W) = k1 + k2 + 0 = 2 k mean ! 125 | m_km = m_w.Trace(); 126 | m_kmIsDirty = false; 127 | } 128 | return m_km; 129 | } 130 | 131 | /** \returns the first principal curvature */ 132 | Scalar K1() const { updateKp(); return m_k1; } 133 | 134 | /** \returns the second principal curvature */ 135 | Scalar K2() const { updateKp(); return m_k2; } 136 | 137 | /** \returns the direction of the first principal curvature */ 138 | const VectorType& K1Dir() const { extractEigenvectors(); return m_k1dir; } 139 | 140 | /** \returns the direction of the second principal curvature */ 141 | const VectorType& K2Dir() const { extractEigenvectors(); return m_k2dir; } 142 | 143 | protected: 144 | 145 | // direct computation of principal curvatures if needed 146 | inline void updateKp() const 147 | { 148 | if (m_kpAreDirty) 149 | { 150 | Scalar delta = sqrt(MeanCurvature()*m_km - 4.0*GaussCurvature()); 151 | m_k1 = 0.5*(m_km + delta); 152 | m_k2 = 0.5*(m_km - delta); 153 | if (fabs(m_k1) copy; 164 | m_w.ToEigenMatrix(copy); 165 | // MatrixType copy = m_w; 166 | // int mrot = 0; 167 | // VectorType evals; 168 | // MatrixType evecs; 169 | 170 | Eigen::SelfAdjointEigenSolver > eig(copy); 171 | Eigen::Matrix eval = eig.eigenvalues(); 172 | Eigen::Matrix evec = eig.eigenvectors(); 173 | eval = eval.cwiseAbs(); 174 | int ind0,ind1,ind2; 175 | eval.minCoeff(&ind0); 176 | ind1=(ind0+1)%3; 177 | ind2=(ind0+2)%3; 178 | if(eval[ind1]>eval[ind2]) std::swap(ind1,ind2); 179 | 180 | // Jacobi(copy, evals, evecs, mrot); 181 | // VectorType evalsAbs(fabs(evals[0]),fabs(evals[0]),fabs(evals[0])); 182 | // SortEigenvaluesAndEigenvectors(evals,evecs,true); 183 | m_k1 = eval[ind1]; 184 | m_k2 = eval[ind2]; 185 | m_k1dir.FromEigenVector(evec.col(ind1)); 186 | m_k2dir.FromEigenVector(evec.col(ind2)); 187 | m_kdirAreDirty = false; 188 | } 189 | } 190 | 191 | protected: 192 | VectorType m_normal; 193 | MatrixType m_nnT; 194 | MatrixType m_w; 195 | mutable VectorType m_k1dir, m_k2dir; 196 | mutable Scalar m_kg, m_km, m_k1, m_k2; 197 | mutable bool m_kgIsDirty, m_kmIsDirty, m_kpAreDirty, m_kdirAreDirty; 198 | }; 199 | 200 | } // namespace implicits 201 | } // namespace vcg 202 | 203 | #endif //__IMPLICITS 204 | -------------------------------------------------------------------------------- /src-old/mesh.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include "mlssurface.h" 17 | #include "apss.h" 18 | #include "implicits.h" 19 | #include "apss.tpp" 20 | 21 | class Vertex; class Edge; class Face; 22 | 23 | struct UsedTypes: public vcg::UsedTypes::AsVertexType, 24 | vcg::Use<::Edge>::AsEdgeType, 25 | vcg::Use<::Face>::AsFaceType>{}; 26 | 27 | class Vertex: public vcg::Vertex<::UsedTypes, 28 | vcg::vertex::Coord3f, 29 | vcg::vertex::Normal3f, 30 | vcg::vertex::VFAdj, 31 | vcg::vertex::Mark, 32 | vcg::vertex::Radiusf, 33 | vcg::vertex::Curvaturef, 34 | vcg::vertex::CurvatureDirf, 35 | vcg::vertex::Qualityf, 36 | vcg::vertex::Color4b, 37 | vcg::vertex::BitFlags> { 38 | vcg::math::Quadric _qd; 39 | public: 40 | Vertex () { 41 | _qd.SetZero(); 42 | } 43 | vcg::math::Quadric &Qd () { 44 | return _qd; 45 | } 46 | }; 47 | 48 | class Face: public vcg::Face<::UsedTypes, 49 | vcg::face::VertexRef, 50 | vcg::face::Mark, 51 | vcg::face::VFAdj, 52 | vcg::face::FFAdj, 53 | vcg::face::BitFlags> {}; 54 | class Edge: public vcg::Edge<::UsedTypes> {}; 55 | class Mesh: public vcg::tri::TriMesh, std::vector<::Face>, std::vector<::Edge>> {}; 56 | typedef vcg::Point3 Point3m; 57 | typedef vcg::Matrix33 Matrix33m; 58 | 59 | class TriEdgeCollapse: public vcg::tri::TriEdgeCollapseQuadric<::Mesh, vcg::tri::BasicVertexPair, ::TriEdgeCollapse> { 60 | public: 61 | class Params : public vcg::BaseParameterClass 62 | { 63 | public: 64 | double BoundaryQuadricWeight = 0.5; 65 | bool FastPreserveBoundary = false; 66 | bool AreaCheck = false; 67 | bool HardQualityCheck = false; 68 | double HardQualityThr = 0.1; 69 | bool HardNormalCheck = false; 70 | bool NormalCheck = false; 71 | double NormalThrRad = M_PI/2.0; 72 | double CosineThr = 0 ; 73 | bool OptimalPlacement =true; 74 | bool SVDPlacement = false; 75 | bool PreserveTopology =false; 76 | bool PreserveBoundary = false; 77 | double QuadricEpsilon = 1e-15; 78 | bool QualityCheck =true; 79 | double QualityThr =.3; // Collapsed that generate faces with quality LOWER than this value are penalized. So higher the value -> better the quality of the accepted triangles 80 | bool QualityQuadric =false; // During the initialization manage all the edges as border edges adding a set of additional quadrics that are useful mostly for keeping face aspect ratio good. 81 | double QualityQuadricWeight = 0.001f; // During the initialization manage all the edges as border edges adding a set of additional quadrics that are useful mostly for keeping face aspect ratio good. 82 | bool QualityWeight=false; 83 | double QualityWeightFactor=100.0; 84 | double ScaleFactor=1.0; 85 | bool ScaleIndependent=true; 86 | bool UseArea =true; 87 | bool UseVertexWeight=false; 88 | }; 89 | using TriEdgeCollapseQuadric::TriEdgeCollapseQuadric; 90 | }; 91 | 92 | struct MeshModelParams { 93 | int smooth; 94 | float sample; 95 | MeshModelParams (): smooth(10), sample(10) { 96 | } 97 | }; 98 | 99 | class MeshModel: public MeshModelParams { 100 | 101 | void colorize (Mesh &m) { 102 | GaelMls::APSS mls(m); 103 | LOG(INFO) << "Colorizing"; 104 | mls.setFilterScale(3); 105 | mls.setMaxProjectionIters(15); 106 | mls.setProjectionAccuracy(0.0001); 107 | mls.setSphericalParameter(1); 108 | mls.computeVertexRaddi(16); 109 | 110 | // pass 1: computes curvatures and statistics 111 | for (auto &vert: m.vert) { 112 | 113 | Point3m p = mls.project(vert.P()); 114 | float c = 0; 115 | 116 | int errorMask; 117 | Point3m grad = mls.gradient(p, &errorMask); 118 | if (errorMask == GaelMls::MLS_OK && grad.Norm() > 1e-8) 119 | { 120 | Matrix33m hess = mls.hessian(p, &errorMask); 121 | vcg::implicits::WeingartenMap W(grad,hess); 122 | 123 | vert.PD1() = W.K1Dir(); 124 | vert.PD2() = W.K2Dir(); 125 | vert.K1() = W.K1(); 126 | vert.K2() = W.K2(); 127 | 128 | c = W.MeanCurvature(); 129 | /* 130 | switch(ct) 131 | { 132 | case CT_MEAN: c = W.MeanCurvature(); break; 133 | case CT_GAUSS: c = W.GaussCurvature(); break; 134 | case CT_K1: c = W.K1(); break; 135 | case CT_K2: c = W.K2(); break; 136 | default: assert(0 && "invalid curvature type"); 137 | } 138 | */ 139 | } 140 | vert.Q() = c; 141 | } 142 | vcg::Histogramf H; 143 | vcg::tri::Stat::ComputePerVertexQualityHistogram(m,H); 144 | vcg::tri::UpdateColor::PerVertexQualityRamp(m,H.Percentile(0.01f),H.Percentile(0.99f)); 145 | } 146 | public: 147 | MeshModel (MeshModelParams const ¶ms): MeshModelParams(params) { 148 | } 149 | 150 | void apply (Mesh &m) { 151 | /* 152 | vcg::tri::RequirePerVertexNormal(m); 153 | vcg::tri::RequirePerVertexMark(m); 154 | 155 | vcg::tri::UpdateNormal::PerVertexNormalized(m); 156 | vcg::tri::UpdateTopology::VertexFace(m); 157 | */ 158 | LOG(INFO) << "Smoothing"; 159 | vcg::tri::Smooth::VertexCoordLaplacian(m, smooth, false, true); // smoothselected, cotangentWeight 160 | LOG(INFO) << "Simplifying"; 161 | auto ovn = m.VN(); 162 | auto ofn = m.FN(); 163 | ::TriEdgeCollapse::Params params; 164 | vcg::LocalOptimization DeciSession(m, ¶ms); 165 | DeciSession.Init<::TriEdgeCollapse>(); 166 | int target = int(m.FN() / sample)+1; 167 | DeciSession.SetTargetSimplices(target); 168 | DeciSession.SetTimeBudget(0.5f); 169 | while( DeciSession.DoOptimization() && m.fn>target) { 170 | LOG(INFO)<< "Simplifying..."; 171 | } 172 | DeciSession.Finalize<::TriEdgeCollapse >(); 173 | int r = vcg::tri::Clean::RemoveDuplicateVertex(m); 174 | LOG(INFO) << "Removed " << r << " duplicated vertices"; 175 | r = vcg::tri::Clean::RemoveUnreferencedVertex(m); 176 | LOG(INFO) << "Removed " << r << " unreferenced vertices"; 177 | vcg::tri::Allocator::CompactEveryVector(m); 178 | LOG(INFO) << "Simplified, V: " << ovn << "->" << m.VN() << " F: " << ofn << "->" << m.FN(); 179 | vcg::tri::Clean::FlipNormalOutside(m); 180 | /* 181 | vcg::tri::UpdateTopology::FaceFace(m); 182 | vcg::tri::UpdateCurvature::MeanAndGaussian(m); 183 | vcg::Histogramf H; 184 | vcg::tri::Stat::ComputePerVertexQualityHistogram(m,H); 185 | vcg::tri::UpdateColor::PerVertexQualityRamp(m,H.Percentile(0.01f),H.Percentile(0.99f)); 186 | */ 187 | vcg::tri::UpdateNormal::PerVertexNormalized(m); 188 | vcg::tri::UpdateTopology::VertexFace(m); 189 | colorize(m); 190 | } 191 | }; 192 | 193 | 194 | namespace vcg { namespace tri { namespace io { 195 | int constexpr SAVE_MASK = Mask::IOM_VERTCOLOR | Mask::IOM_VERTQUALITY | Mask::IOM_VERTRADIUS | vcg::tri::io::Mask::IOM_VERTNORMAL; 196 | }}} 197 | 198 | #include 199 | #include "balltree.cpp" 200 | -------------------------------------------------------------------------------- /src-old/mesh.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | import time 4 | import numpy as np 5 | import cv2 6 | import skimage 7 | from skimage import measure 8 | from scipy.ndimage.morphology import grey_dilation, binary_dilation, binary_fill_holes 9 | #from skimage import regionprops 10 | from adsb3 import * 11 | import scipy 12 | import pyadsb3 13 | 14 | def pad (images, padding=2, dtype=None): 15 | Z, Y, X = images.shape 16 | if dtype is None: 17 | dtype = images.dtype 18 | out = np.zeros((Z+padding*2, Y+padding*2, X+padding*2), dtype=dtype) 19 | out[padding:(Z+padding),padding:(Y+padding),padding:(X+padding)] = images 20 | return out 21 | 22 | def segment_body (image, smooth=1, th=-300): 23 | blur = scipy.ndimage.filters.gaussian_filter(image, smooth, mode='constant') 24 | binary = np.array(blur < th, dtype=np.uint8) 25 | 26 | # body is a rough region covering human body 27 | body = np.zeros_like(binary) 28 | for i, sl in enumerate(binary): 29 | #H, W = sl.shape 30 | ll = measure.label(sl, background=1) # connected components 31 | # biggest CC should be body 32 | pp = measure.regionprops(ll) 33 | boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air 34 | boxes = sorted(boxes, key = lambda x: -x[0]) 35 | if len(boxes) == 0: 36 | continue 37 | y0, x0, y1, x1 = boxes[0][1] 38 | body[i,y0:y1,x0:x1] = boxes[0][2] 39 | pass 40 | return body, None 41 | 42 | def fill_convex (image): 43 | H, W = image.shape 44 | padded = np.zeros((H+20, W+20), dtype=np.uint8) 45 | padded[10:(10+H),10:(10+W)] = image 46 | 47 | contours = measure.find_contours(padded, 0.5) 48 | if len(contours) == 0: 49 | return image 50 | if len(contours) == 1: 51 | contour = contours[0] 52 | else: 53 | contour = np.vstack(contours) 54 | cc = np.zeros_like(contour, dtype=np.int32) 55 | cc[:,0] = contour[:, 1] 56 | cc[:,1] = contour[:, 0] 57 | hull = cv2.convexHull(cc) 58 | contour = hull.reshape((1, -1, 2)) 59 | cv2.fillPoly(padded, contour, 1) 60 | return padded[10:(10+H),10:(10+W)] 61 | 62 | def segment_lung (image, smooth=1, th=-300): 63 | 64 | padding_value = np.min(image) 65 | if padding_value < -1010: 66 | padding = [image == padding_value] 67 | else: 68 | padding = None 69 | 70 | imagex = image 71 | if padding: 72 | imagex = np.copy(image) 73 | imagex[padding] = 0 74 | blur = scipy.ndimage.filters.gaussian_filter(imagex, smooth, mode='constant') 75 | if padding: 76 | blur[padding] = padding_value 77 | 78 | binary = np.array(blur < th, dtype=np.uint8) 79 | 80 | 81 | # body is a rough region covering human body 82 | body = np.zeros_like(binary) 83 | 84 | for i, sl in enumerate(binary): 85 | #H, W = sl.shape 86 | ll = measure.label(sl, background=1) # connected components 87 | # biggest CC should be body 88 | pp = measure.regionprops(ll) 89 | boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air 90 | 91 | boxes = sorted(boxes, key = lambda x: -x[0]) 92 | if len(boxes) == 0: 93 | print 'no body detected' 94 | continue 95 | y0, x0, y1, x1 = boxes[0][1] 96 | body[i,y0:y1,x0:x1] = fill_convex(boxes[0][2]) 97 | pass 98 | 99 | binary *= body 100 | 101 | if False: 102 | padding = np.min(image) 103 | if padding < -1010: 104 | binary[image == padding] = 0 105 | 106 | # 0: body 107 | # 1: air & lung 108 | 109 | labels = measure.label(binary, background=1) 110 | 111 | # set air (same cc as corners) -> body 112 | bg_labels = set() 113 | # 8 corders of the image 114 | for z in [0, -1]: 115 | for y in [0, -1]: 116 | for x in [0, -1]: 117 | bg_labels.add(labels[z, y, x]) 118 | print bg_labels 119 | bg_labels = list(bg_labels) 120 | for bg_label in bg_labels: 121 | binary[bg_label == labels] = 0 122 | pass 123 | 124 | # now binary: 125 | # 0: non-lung & body tissue in lung & air 126 | # 1: lung & holes in body 127 | #inside = np.copy(binary) 128 | 129 | 130 | # now binary: 131 | # 0: non-lung & body tissue in lung 132 | # 1: lung & holes in body 133 | binary = np.swapaxes(binary, 0, 1) 134 | for i, sl in enumerate(binary): 135 | #H, W = sl.shape 136 | ll = measure.label(sl, background=1) # connected components 137 | # biggest CC should be body 138 | vv, cc = np.unique(ll, return_counts=True) 139 | cc[0] = 0 140 | assert len(vv) > 0 141 | body_ll = vv[np.argmax(cc)] 142 | binary[i][ll != body_ll] = 1 143 | pass 144 | binary = np.swapaxes(binary, 0, 1) 145 | if padding: 146 | binary[padding] = 0 147 | binary *= body 148 | 149 | # binary 0: body 150 | # 1: - anything inside lung 151 | # - holes in body 152 | # - possibly image corners 153 | # 154 | 155 | # inside 0: non-lung & air 156 | # body tissue in lung 157 | # 1: lung 158 | 159 | # set corner again 160 | labels = measure.label(binary, background=0) 161 | bg_labels = set([0]) 162 | for z in [0, -1]: 163 | for y in [0, -1]: 164 | for x in [0, -1]: 165 | bg_labels.add(labels[z, y, x]) 166 | 167 | #print 'bg', bg_labels 168 | val_counts = zip(*np.unique(labels, return_counts=True)) 169 | val_counts = [x for x in val_counts if (not x[0] in bg_labels) and (x[1] >= 10)] 170 | val_counts = sorted(val_counts, key=lambda x:-x[1])[:100] # sort by size 171 | body_counts = [c for _, c in val_counts] 172 | print val_counts 173 | binary = np.zeros_like(binary, dtype=np.uint8) 174 | print val_counts[0][0] 175 | binary[labels == val_counts[0][0]] = 1 176 | #for v, _ in val_counts[0:5]: 177 | # binary[labels == v] = 1 178 | if len(val_counts) > 1: 179 | if val_counts[1][1] * 3 > val_counts[0][1]: 180 | #binary[labels == val_counts[1][0]] = 1 181 | #if val_counts[1][1] * 4 > val_counts[0][1]: 182 | logging.warn('more than 2 lungs parts detected') 183 | 184 | # remove upper part of qiguan 185 | last = binary.shape[0] - 1 186 | for ri in range(binary.shape[0]): 187 | #H, W = sl.shape 188 | i = last - ri 189 | ll = measure.label(binary[i], background=0) # connected components 190 | nl = np.unique(ll) 191 | if len(nl) <= 2: 192 | binary[i,:,:] = 0 193 | else: 194 | print 'removed %d slices' % ri 195 | break 196 | pass 197 | 198 | return binary, body_counts #, inside 199 | 200 | def convex_hull (binary): 201 | swap_sequence = [(0, 1), # 102 202 | (0, 2), # 201 203 | (0, 2)] # 102 204 | 205 | output = np.ndarray(binary.shape, dtype=binary.dtype) 206 | for swp1, swp2 in swap_sequence: 207 | N = binary.shape[0] 208 | print 'shape', binary.shape 209 | for i in range(N): 210 | contours = measure.find_contours(binary[i], 0.5) 211 | if len(contours) == 0: 212 | continue 213 | if len(contours) == 1: 214 | contour = contours[0] 215 | else: 216 | contour = np.vstack(contours) 217 | cc = np.zeros_like(contour, dtype=np.int32) 218 | cc[:,0] = contour[:, 1] 219 | cc[:,1] = contour[:, 0] 220 | hull = cv2.convexHull(cc) 221 | contour = hull.reshape((1, -1, 2)) 222 | cv2.fillPoly(binary[i], contour, 1) 223 | #binary[i] = skimage.morphology.convex_hull_image(binary[i]) 224 | pass 225 | print 'swap', swp1, swp2 226 | nb = np.swapaxes(binary, swp1, swp2) 227 | binary = np.ndarray(nb.shape, dtype=nb.dtype) 228 | binary[:,:] = nb[:,:] 229 | pass 230 | binary = np.swapaxes(binary, 0, 1) 231 | output[:,:] = binary[:,:] 232 | return output; 233 | #binary = binary_dilation(output, iterations=dilate) 234 | #return binary 235 | 236 | def segment_lung_internal (image, smooth=1, th=-300): 237 | 238 | padding_value = np.min(image) 239 | if padding_value < -1010: 240 | padding = [image == padding_value] 241 | else: 242 | padding = None 243 | 244 | imagex = image 245 | if padding: 246 | imagex = np.copy(image) 247 | imagex[padding] = 0 248 | blur = scipy.ndimage.filters.gaussian_filter(imagex, smooth, mode='constant') 249 | if padding: 250 | blur[padding] = padding_value 251 | 252 | binary = np.array(blur < th, dtype=np.uint8) 253 | 254 | #not_slid = np.array(blur < th, dtype=np.uint8) 255 | not_solid = np.copy(binary) 256 | 257 | 258 | # body is a rough region covering human body 259 | body = np.zeros_like(binary) 260 | 261 | for i, sl in enumerate(binary): 262 | #H, W = sl.shape 263 | ll = measure.label(sl, background=1) # connected components 264 | # biggest CC should be body 265 | pp = measure.regionprops(ll) 266 | boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air 267 | 268 | boxes = sorted(boxes, key = lambda x: -x[0]) 269 | if len(boxes) == 0: 270 | print 'no body detected' 271 | continue 272 | y0, x0, y1, x1 = boxes[0][1] 273 | body[i,y0:y1,x0:x1] = fill_convex(boxes[0][2]) 274 | pass 275 | 276 | binary *= body 277 | 278 | if False: 279 | padding = np.min(image) 280 | if padding < -1010: 281 | binary[image == padding] = 0 282 | 283 | # 0: body 284 | # 1: air & lung 285 | 286 | labels = measure.label(binary, background=1) 287 | 288 | # set air (same cc as corners) -> body 289 | bg_labels = set() 290 | # 8 corders of the image 291 | for z in [0, -1]: 292 | for y in [0, -1]: 293 | for x in [0, -1]: 294 | bg_labels.add(labels[z, y, x]) 295 | print bg_labels 296 | bg_labels = list(bg_labels) 297 | for bg_label in bg_labels: 298 | binary[bg_label == labels] = 0 299 | pass 300 | 301 | # now binary: 302 | # 0: non-lung & body tissue in lung & air 303 | # 1: lung & holes in body 304 | #inside = np.copy(binary) 305 | 306 | 307 | # now binary: 308 | # 0: non-lung & body tissue in lung 309 | # 1: lung & holes in body 310 | binary = np.swapaxes(binary, 0, 1) 311 | for i, sl in enumerate(binary): 312 | #H, W = sl.shape 313 | ll = measure.label(sl, background=1) # connected components 314 | # biggest CC should be body 315 | vv, cc = np.unique(ll, return_counts=True) 316 | cc[0] = 0 317 | assert len(vv) > 0 318 | body_ll = vv[np.argmax(cc)] 319 | binary[i][ll != body_ll] = 1 320 | pass 321 | binary = np.swapaxes(binary, 0, 1) 322 | if padding: 323 | binary[padding] = 0 324 | binary *= body 325 | 326 | # binary 0: body 327 | # 1: - anything inside lung 328 | # - holes in body 329 | # - possibly image corners 330 | # 331 | 332 | # inside 0: non-lung & air 333 | # body tissue in lung 334 | # 1: lung 335 | 336 | # set corner again 337 | labels = measure.label(binary, background=0) 338 | bg_labels = set([0]) 339 | for z in [0, -1]: 340 | for y in [0, -1]: 341 | for x in [0, -1]: 342 | bg_labels.add(labels[z, y, x]) 343 | 344 | #print 'bg', bg_labels 345 | val_counts = zip(*np.unique(labels, return_counts=True)) 346 | val_counts = [x for x in val_counts if (not x[0] in bg_labels) and (x[1] >= 10)] 347 | val_counts = sorted(val_counts, key=lambda x:-x[1])[:100] # sort by size 348 | body_counts = [c for _, c in val_counts] 349 | print val_counts 350 | binary = np.zeros_like(binary, dtype=np.uint8) 351 | print val_counts[0][0] 352 | binary[labels == val_counts[0][0]] = 1 353 | #for v, _ in val_counts[0:5]: 354 | # binary[labels == v] = 1 355 | if len(val_counts) > 1: 356 | if val_counts[1][1] * 3 > val_counts[0][1]: 357 | #binary[labels == val_counts[1][0]] = 1 358 | #if val_counts[1][1] * 4 > val_counts[0][1]: 359 | logging.warn('more than 2 lungs parts detected') 360 | 361 | # remove upper part of qiguan 362 | last = binary.shape[0] - 1 363 | for ri in range(binary.shape[0]): 364 | #H, W = sl.shape 365 | i = last - ri 366 | ll = measure.label(binary[i], background=0) # connected components 367 | nl = np.unique(ll) 368 | if len(nl) <= 2: 369 | binary[i,:,:] = 0 370 | else: 371 | print 'removed %d slices' % ri 372 | break 373 | pass 374 | 375 | #not_solid = np.logical_and(not_solid, binary) # solid within lung 376 | return np.logical_and(not_solid, binary), body_counts #, inside 377 | 378 | -------------------------------------------------------------------------------- /src-old/mlssurface.h: -------------------------------------------------------------------------------- 1 | /**************************************************************************** 2 | * MeshLab o o * 3 | * A versatile mesh processing toolbox o o * 4 | * _ O _ * 5 | * Copyright(C) 2005 \/)\/ * 6 | * Visual Computing Lab /\/| * 7 | * ISTI - Italian National Research Council | * 8 | * \ * 9 | * All rights reserved. * 10 | * * 11 | * This program is free software; you can redistribute it and/or modify * 12 | * it under the terms of the GNU General Public License as published by * 13 | * the Free Software Foundation; either version 2 of the License, or * 14 | * (at your option) any later version. * 15 | * * 16 | * This program is distributed in the hope that it will be useful, * 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of * 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * 19 | * GNU General Public License (http://www.gnu.org/licenses/gpl.txt) * 20 | * for more details. * 21 | * * 22 | ****************************************************************************/ 23 | 24 | #ifndef MLSSURFACE_H 25 | #define MLSSURFACE_H 26 | 27 | #include "balltree.h" 28 | #include 29 | #include 30 | #include 31 | #include 32 | 33 | namespace GaelMls { 34 | 35 | enum { 36 | MLS_OK, 37 | MLS_TOO_FAR, 38 | MLS_TOO_MANY_ITERS, 39 | MLS_NOT_SUPPORTED, 40 | 41 | MLS_DERIVATIVE_ACCURATE, 42 | MLS_DERIVATIVE_APPROX, 43 | MLS_DERIVATIVE_FINITEDIFF 44 | }; 45 | 46 | template 47 | class MlsSurface 48 | { 49 | public: 50 | typedef typename MeshType::ScalarType Scalar; 51 | typedef vcg::Point3 VectorType; 52 | typedef vcg::Matrix33 MatrixType; 53 | typedef typename MeshType::VertContainer PointsType; 54 | 55 | MlsSurface(const MeshType& mesh) 56 | : mMesh(mesh), mPoints(mesh.vert) 57 | { 58 | mCachedQueryPointIsOK = false; 59 | 60 | mAABB = mesh.bbox; 61 | 62 | // compute radii using a basic meshless density estimator 63 | /* 64 | if (!mPoints.RadiusEnabled) 65 | { 66 | const_cast(mPoints).EnableRadius(); 67 | */ 68 | //computeVertexRaddi(); 69 | 70 | 71 | mFilterScale = 4.0; 72 | mMaxNofProjectionIterations = 20; 73 | mProjectionAccuracy = (Scalar)1e-4; 74 | mBallTree = 0; 75 | mGradientHint = MLS_DERIVATIVE_ACCURATE; 76 | mHessianHint = MLS_DERIVATIVE_ACCURATE; 77 | 78 | mDomainMinNofNeighbors = 4; 79 | mDomainRadiusScale = 2.; 80 | mDomainNormalScale = 1.; 81 | } 82 | 83 | /** \returns the value of the reconstructed scalar field at point \a x */ 84 | virtual Scalar potential(const VectorType& x, int* errorMask = 0) const = 0; 85 | 86 | /** \returns the gradient of the reconstructed scalar field at point \a x 87 | * 88 | * The method used to compute the gradient can be controlled with setGradientHint(). 89 | */ 90 | virtual VectorType gradient(const VectorType& x, int* errorMask = 0) const = 0; 91 | 92 | /** \returns the hessian matrix of the reconstructed scalar field at point \a x 93 | * 94 | * The method used to compute the hessian matrix can be controlled with setHessianHint(). 95 | */ 96 | virtual MatrixType hessian(const VectorType& x, int* errorMask = 0) const 97 | { if (errorMask) *errorMask = MLS_NOT_SUPPORTED; return MatrixType(); } 98 | 99 | /** \returns the projection of point x onto the MLS surface, and optionnaly returns the normal in \a pNormal */ 100 | virtual VectorType project(const VectorType& x, VectorType* pNormal = 0, int* errorMask = 0) const = 0; 101 | 102 | /** \returns whether \a x is inside the restricted surface definition domain */ 103 | virtual bool isInDomain(const VectorType& x) const; 104 | 105 | /** \returns the mean curvature from the gradient vector and Hessian matrix. 106 | */ 107 | Scalar meanCurvature(const VectorType& gradient, const MatrixType& hessian) const; 108 | 109 | /** set the scale of the spatial filter */ 110 | void setFilterScale(Scalar v); 111 | /** set the maximum number of iterations during the projection */ 112 | void setMaxProjectionIters(int n); 113 | /** set the threshold factor to detect convergence of the iterations */ 114 | void setProjectionAccuracy(Scalar v); 115 | 116 | /** set a hint on how to compute the gradient 117 | * 118 | * Possible values are MLS_DERIVATIVE_ACCURATE, MLS_DERIVATIVE_APPROX, MLS_DERIVATIVE_FINITEDIFF 119 | */ 120 | void setGradientHint(int h); 121 | 122 | /** set a hint on how to compute the hessian matrix 123 | * 124 | * Possible values are MLS_DERIVATIVE_ACCURATE, MLS_DERIVATIVE_APPROX, MLS_DERIVATIVE_FINITEDIFF 125 | */ 126 | void setHessianHint(int h); 127 | 128 | inline const MeshType& mesh() const { return mMesh; } 129 | /** a shortcut for mesh().vert */ 130 | inline const PointsType& points() const { return mPoints; } 131 | 132 | inline vcg::ConstDataWrapper positions() const 133 | { 134 | return vcg::ConstDataWrapper(&mPoints[0].P(), mPoints.size(), 135 | size_t(mPoints[1].P().V()) - size_t(mPoints[0].P().V())); 136 | } 137 | inline vcg::ConstDataWrapper normals() const 138 | { 139 | return vcg::ConstDataWrapper(&mPoints[0].N(), mPoints.size(), 140 | size_t(mPoints[1].N().V()) - size_t(mPoints[0].N().V())); 141 | } 142 | inline vcg::ConstDataWrapper radii() const 143 | { 144 | return vcg::ConstDataWrapper(&mPoints[0].R(), mPoints.size(), 145 | size_t(&mPoints[1].R()) - size_t(&mPoints[0].R())); 146 | } 147 | const vcg::Box3& boundingBox() const { return mAABB; } 148 | 149 | static const Scalar InvalidValue() { return Scalar(12345679810.11121314151617); } 150 | 151 | void computeVertexRaddi(const int nbNeighbors = 16); 152 | protected: 153 | void computeNeighborhood(const VectorType& x, bool computeDerivatives) const; 154 | void requestSecondDerivatives() const; 155 | 156 | struct PointToPointSqDist 157 | { 158 | inline bool operator()(const VectorType &a, const VectorType &b, Scalar& refD2, VectorType &q) const 159 | { 160 | // std::cout << a.X() << a.Y() << a.Z() << " - " << b.X() << b.Y() << b.Z() << 161 | // " => " << vcg::Distance(a, b) << " < " << refD2 << "\n"; 162 | Scalar d2 = vcg::SquaredDistance(a, b); 163 | if (d2>refD2) 164 | return false; 165 | 166 | refD2 = d2; 167 | q = a; 168 | return true; 169 | } 170 | }; 171 | 172 | class DummyObjectMarker {}; 173 | 174 | protected: 175 | const MeshType& mMesh; 176 | const PointsType& mPoints; 177 | vcg::Box3 mAABB; 178 | int mGradientHint; 179 | int mHessianHint; 180 | 181 | BallTree* mBallTree; 182 | 183 | int mMaxNofProjectionIterations; 184 | Scalar mFilterScale; 185 | Scalar mAveragePointSpacing; 186 | Scalar mProjectionAccuracy; 187 | 188 | int mDomainMinNofNeighbors; 189 | float mDomainRadiusScale; 190 | float mDomainNormalScale; 191 | 192 | // cached values: 193 | mutable bool mCachedQueryPointIsOK; 194 | mutable VectorType mCachedQueryPoint; 195 | mutable Neighborhood mNeighborhood; 196 | mutable std::vector mCachedWeights; 197 | mutable std::vector mCachedWeightDerivatives; 198 | mutable std::vector mCachedWeightGradients; 199 | mutable std::vector mCachedWeightSecondDerivatives; 200 | }; 201 | 202 | } // namespace 203 | 204 | #include "mlssurface.tpp" 205 | 206 | #endif // MLSSURFACE_H 207 | -------------------------------------------------------------------------------- /src-old/mlssurface.tpp: -------------------------------------------------------------------------------- 1 | /**************************************************************************** 2 | * MeshLab o o * 3 | * A versatile mesh processing toolbox o o * 4 | * _ O _ * 5 | * Copyright(C) 2005 \/)\/ * 6 | * Visual Computing Lab /\/| * 7 | * ISTI - Italian National Research Council | * 8 | * \ * 9 | * All rights reserved. * 10 | * * 11 | * This program is free software; you can redistribute it and/or modify * 12 | * it under the terms of the GNU General Public License as published by * 13 | * the Free Software Foundation; either version 2 of the License, or * 14 | * (at your option) any later version. * 15 | * * 16 | * This program is distributed in the hope that it will be useful, * 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of * 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * 19 | * GNU General Public License (http://www.gnu.org/licenses/gpl.txt) * 20 | * for more details. * 21 | * * 22 | ****************************************************************************/ 23 | 24 | #include "mlssurface.h" 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | namespace GaelMls { 31 | 32 | // template 33 | // MlsSurface<_Scalar>::MlsSurface(const MeshModel& m) 34 | // : mMesh(m) 35 | // { 36 | // mCachedQueryPointIsOK = false; 37 | // 38 | // mPoints.resize(m.cm.vert.size()); 39 | // mNormals.resize(m.cm.vert.size()); 40 | // mRadii.resize(m.cm.vert.size()); 41 | // mAABB.Set(mMesh.cm.vert[0].cP()); 42 | // for (uint i = 0; i< m.cm.vert.size(); i++) 43 | // { 44 | // mPoints[i] = /*vcg::vector_cast*/(mMesh.cm.vert[i].cP()); 45 | // mNormals[i] = /*vcg::vector_cast*/(mMesh.cm.vert[i].cN()); 46 | // mAABB.Add(mMesh.cm.vert[i].cP()); 47 | // } 48 | // 49 | // // compute radii using a basic meshless density estimator 50 | // computeVertexRaddi(); 51 | // 52 | // mFilterScale = 4.0; 53 | // mMaxNofProjectionIterations = 20; 54 | // mProjectionAccuracy = 1e-4; 55 | // mBallTree = 0; 56 | // } 57 | 58 | template 59 | void MlsSurface<_MeshType>::setFilterScale(Scalar v) 60 | { 61 | mFilterScale = v; 62 | mCachedQueryPointIsOK = false; 63 | if (mBallTree) 64 | mBallTree->setRadiusScale(mFilterScale); 65 | } 66 | 67 | template 68 | void MlsSurface<_MeshType>::setMaxProjectionIters(int n) 69 | { 70 | mMaxNofProjectionIterations = n; 71 | mCachedQueryPointIsOK = false; 72 | } 73 | 74 | template 75 | void MlsSurface<_MeshType>::setProjectionAccuracy(Scalar v) 76 | { 77 | mProjectionAccuracy = v; 78 | mCachedQueryPointIsOK = false; 79 | } 80 | 81 | template 82 | void MlsSurface<_MeshType>::setGradientHint(int h) 83 | { 84 | mGradientHint = h; 85 | mCachedQueryPointIsOK = false; 86 | } 87 | 88 | template 89 | void MlsSurface<_MeshType>::setHessianHint(int h) 90 | { 91 | mHessianHint = h; 92 | mCachedQueryPointIsOK = false; 93 | } 94 | 95 | template 96 | void MlsSurface<_MeshType>::computeVertexRaddi(const int nbNeighbors) 97 | { 98 | #if 0 99 | int nbNeighbors = 16; 100 | vcg::Octree knn; 101 | knn.Set(mPoints.begin(), mPoints.end()); 102 | std::vector nearest_objects; 103 | std::vector nearest_points; 104 | std::vector sqDistances; 105 | mAveragePointSpacing = 0; 106 | for (uint i = 0; i< mPoints.size(); i++) 107 | { 108 | DummyObjectMarker dom; 109 | PointToPointSqDist dfunc; 110 | Scalar max_dist2 = 1e9;//std::numeric_limits::max(); 111 | knn.GetKClosest(dfunc, dom, nbNeighbors, mPoints[i], 112 | max_dist2, nearest_objects, sqDistances, nearest_points); 113 | // for (int j=0; i=2); 126 | vcg::KdTree knn(positions()); 127 | typename vcg::KdTree::PriorityQueue pq; 128 | // knn.setMaxNofNeighbors(nbNeighbors); 129 | mAveragePointSpacing = 0; 130 | for (size_t i = 0; i< mPoints.size(); i++) 131 | { 132 | knn.doQueryK(mPoints[i].cP(),nbNeighbors,pq); 133 | const_cast(mPoints)[i].R() = 2. * sqrt(pq.getTopWeight()/Scalar(pq.getNofElements())); 134 | mAveragePointSpacing += mPoints[i].cR(); 135 | } 136 | mAveragePointSpacing /= Scalar(mPoints.size()); 137 | 138 | #endif 139 | } 140 | 141 | template 142 | void MlsSurface<_MeshType>::computeNeighborhood(const VectorType& x, bool computeDerivatives) const 143 | { 144 | if (!mBallTree) 145 | { 146 | const_cast*&>(mBallTree) = new BallTree(positions(), radii()); 147 | const_cast*>(mBallTree)->setRadiusScale(mFilterScale); 148 | } 149 | mBallTree->computeNeighbors(x, &mNeighborhood); 150 | size_t nofSamples = mNeighborhood.size(); 151 | 152 | // compute spatial weights and partial derivatives 153 | mCachedWeights.resize(nofSamples); 154 | if (computeDerivatives) 155 | { 156 | mCachedWeightDerivatives.resize(nofSamples); 157 | mCachedWeightGradients.resize(nofSamples); 158 | } 159 | else 160 | mCachedWeightGradients.clear(); 161 | 162 | for (size_t i=0; i 184 | void MlsSurface<_MeshType>::requestSecondDerivatives() const 185 | { 186 | //if (!mSecondDerivativeUptodate) 187 | { 188 | size_t nofSamples = mNeighborhood.size(); 189 | if (nofSamples>mCachedWeightSecondDerivatives.size()) 190 | mCachedWeightSecondDerivatives.resize(nofSamples+10); 191 | 192 | { 193 | for (size_t i=0 ; i 210 | typename MlsSurface<_MeshType>::Scalar 211 | MlsSurface<_MeshType>::meanCurvature(const VectorType& gradient, const MatrixType& hessian) const 212 | { 213 | Scalar gl = gradient.Norm(); 214 | // return (gl*gl*hessian.Trace() - vcg::Dot(gradient, VectorType(hessian * gradient))) / (2.*gl*gl*gl); 215 | return (gl*gl*hessian.Trace() - (gradient * VectorType(hessian * gradient))) / (2.*gl*gl*gl); 216 | } 217 | 218 | template 219 | bool MlsSurface<_MeshType>::isInDomain(const VectorType& x) const 220 | { 221 | if ((!mCachedQueryPointIsOK) || mCachedQueryPoint!=x) 222 | { 223 | computeNeighborhood(x, false); 224 | } 225 | int nb = mNeighborhood.size(); 226 | if (nb rs2; 240 | ++i; 241 | } 242 | } 243 | else 244 | { 245 | Scalar s = 1./(mDomainNormalScale*mDomainNormalScale) - 1.f; 246 | while (out && i rs2; 253 | ++i; 254 | } 255 | } 256 | return !out; 257 | } 258 | 259 | // template class MlsSurface; 260 | // template class MlsSurface; 261 | 262 | } 263 | -------------------------------------------------------------------------------- /src-old/models/ft/200000.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaalgo/plumo/c2442307ba31f781df3b68ca604ec5c8f0a9fa57/src-old/models/ft/200000.data-00000-of-00001 -------------------------------------------------------------------------------- /src-old/models/ft/200000.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaalgo/plumo/c2442307ba31f781df3b68ca604ec5c8f0a9fa57/src-old/models/ft/200000.index -------------------------------------------------------------------------------- /src-old/models/ft/200000.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaalgo/plumo/c2442307ba31f781df3b68ca604ec5c8f0a9fa57/src-old/models/ft/200000.meta -------------------------------------------------------------------------------- /src-old/models/nodule/axial/200000.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaalgo/plumo/c2442307ba31f781df3b68ca604ec5c8f0a9fa57/src-old/models/nodule/axial/200000.data-00000-of-00001 -------------------------------------------------------------------------------- /src-old/models/nodule/axial/200000.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaalgo/plumo/c2442307ba31f781df3b68ca604ec5c8f0a9fa57/src-old/models/nodule/axial/200000.index -------------------------------------------------------------------------------- /src-old/models/nodule/axial/200000.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaalgo/plumo/c2442307ba31f781df3b68ca604ec5c8f0a9fa57/src-old/models/nodule/axial/200000.meta -------------------------------------------------------------------------------- /src-old/models/score: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aaalgo/plumo/c2442307ba31f781df3b68ca604ec5c8f0a9fa57/src-old/models/score -------------------------------------------------------------------------------- /src-old/papaya.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | import sys 4 | import shutil 5 | import subprocess 6 | from glob import glob 7 | from jinja2 import Environment, FileSystemLoader 8 | from adsb3 import DATA_DIR 9 | 10 | TMPL_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 11 | './templates') 12 | STATIC_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 13 | './static') 14 | env = Environment(loader=FileSystemLoader(searchpath=TMPL_DIR)) 15 | case_tmpl = env.get_template('papaya_case.html') 16 | index_tmpl = env.get_template('papaya_index.html') 17 | 18 | class Annotations: 19 | def __init__ (self): 20 | self.annos = [] 21 | pass 22 | 23 | def add (self, box, hint=None): 24 | self.annos.append({'box': box, 'hint': hint}) 25 | pass 26 | 27 | def Papaya (path, case, annotations=Annotations(), images = None, text = ''): 28 | try: 29 | os.makedirs(path) 30 | except: 31 | pass 32 | try: 33 | subprocess.check_call('rm -rf %s/dcm' % path, shell=True) 34 | except: 35 | pass 36 | #try: 37 | # data = os.path.abspath(DATA_DIR) 38 | # os.symlink(data, os.path.join(path, 'data')) 39 | #except: 40 | # pass 41 | for f in ['papaya.css', 'papaya.js']: 42 | shutil.copyfile(os.path.join(STATIC_DIR, f), os.path.join(path, f)) 43 | pass 44 | pass 45 | os.mkdir(os.path.join(path, 'dcm')) 46 | subprocess.check_call('rm -rf %s/case.nii.gz' % (path, ), shell=True) 47 | subprocess.check_call('dcm2niix -z i -o %s -f case %s' % (path, case.path), shell=True) 48 | boxes = [] 49 | centers = [] 50 | for anno in annotations.annos: 51 | box = case.papaya_box(anno['box']) 52 | boxes.append(box) 53 | z1, y1, x1, z2, y2, x2 = box 54 | hint = anno.get('hint', None) 55 | center = ((z1+z2)/2, (y1+y2)/2, (x1+x2)/2,hint) 56 | centers.append(center) 57 | with open(os.path.join(path, 'index.html'), 'w') as f: 58 | f.write(case_tmpl.render(boxes=boxes, centers=centers)) 59 | pass 60 | pass 61 | 62 | if __name__ == '__main__': 63 | from adsb3 import Case 64 | case = Case('008464bb8521d09a42985dd8add3d0d2') 65 | papaya = Papaya('/home/wdong/public_html/papaya_test') 66 | boxes =[[38, 359, 393, 42, 367, 404], [63, 189, 138, 64, 201, 156], [64, 208, 82, 66, 218, 90], [126, 227, 343, 128, 237, 351], [138, 385, 180, 139, 391, 186]] 67 | papaya.next(case, boxes=boxes) 68 | papaya.flush() 69 | -------------------------------------------------------------------------------- /src-old/process.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | from __future__ import print_function 5 | import os 6 | import sys 7 | import argparse 8 | import math 9 | import datetime 10 | import numpy as np 11 | import tensorflow as tf 12 | from tensorflow.python.framework import meta_graph 13 | from scipy.ndimage.morphology import grey_dilation, binary_dilation 14 | from scipy.ndimage.filters import gaussian_filter 15 | from skimage import measure 16 | from adsb3 import * 17 | import pyadsb3 18 | import mesh 19 | from papaya import Papaya, Annotations 20 | from three import Three 21 | from gallery import Gallery 22 | 23 | BATCH = 32 24 | MIN_NODULE_SIZE=30 25 | PARTITIONS = [(1,1,1),(1,1,2),(3,1,1),(1,1,3)] 26 | TOTAL_PARTS = 0 27 | for x, y, z in PARTITIONS: 28 | TOTAL_PARTS += x * y * z 29 | pass 30 | 31 | def extract_nodules (prob, fts, th=0.05, ext=2): 32 | if not fts is None: 33 | prob4 = np.reshape(prob, prob.shape + (1,)) 34 | assert prob4.base is prob 35 | fts = np.clip(fts, 0, 6) 36 | fts *= prob4 37 | binary = prob > th 38 | k = int(round(ext / SPACING)) 39 | binary = binary_dilation(binary, iterations=k) 40 | labels = measure.label(binary, background=0) 41 | boxes = measure.regionprops(labels) 42 | 43 | nodules = [] 44 | dim = 2 45 | if not fts is None: 46 | dim += fts.shape[3] 47 | 48 | Z, Y, X = prob.shape 49 | Z = 1.0 * Z 50 | Y = 1.0 * Y 51 | X = 1.0 * X 52 | for box in boxes: 53 | #print prob.shape, fts.shape 54 | z0, y0, x0, z1, y1, x1 = box.bbox 55 | #ft.append((z1-z0)*(y1-y0)*(x1-x0)) 56 | prob_roi = prob[z0:z1,y0:y1,x0:x1] 57 | za, ya, xa, zz, zy, zx, yy, yx, xx = pyadsb3.norm3d(prob_roi) 58 | zc = za + z0 59 | yc = ya + y0 60 | xc = xa + x0 61 | 62 | cov = np.array([[zz, zy, zx], 63 | [zy, yy, yx], 64 | [zx, yx, xx]], dtype=np.float32) 65 | eig, _ = np.linalg.eig(cov) 66 | #print zc, yc, xc, '------', (z0+z1)/2.0, (y0+y1)/2.0, (x0+x1)/2.0 67 | 68 | weight_sum = np.sum(prob_roi) 69 | UNIT = SPACING * SPACING * SPACING 70 | prob_sum = weight_sum * UNIT 71 | 72 | eig = sorted(list(eig), reverse=True) 73 | 74 | pos = (zc/Z, yc/Y, xc/X) 75 | #box = (z0/Z, y0/Y, x0/X, z1/Z, y1/Y, x1/X) 76 | 77 | one = [prob_sum, math.atan2(eig[0], eig[2])] 78 | if not fts is None: 79 | fts_roi = fts[z0:z1,y0:y1,x0:x1,:] 80 | fts_sum = np.sum(fts_roi, axis=(0,1,2)) 81 | one.extend(list(fts_sum/weight_sum)) 82 | nodules.append((prob_sum, pos, one, box.bbox)) 83 | pass 84 | nodules = sorted(nodules, key=lambda x: -x[0]) 85 | return dim, nodules 86 | 87 | def logits2prob (v, scope='logits2prob'): 88 | with tf.name_scope(scope): 89 | shape = tf.shape(v) # (?, ?, ?, 2) 90 | # softmax 91 | v = tf.reshape(v, (-1, 2)) 92 | v = tf.nn.softmax(v) 93 | v = tf.reshape(v, shape) 94 | # keep prob of 1 only 95 | v = tf.slice(v, [0, 0, 0, 1], [-1, -1, -1, -1]) 96 | # remove trailing dimension of 1 97 | v = tf.squeeze(v, axis=3) 98 | return v 99 | 100 | class ViewModel: 101 | def __init__ (self, X, KEEP, view, name, dir_path, node='logits:0', softmax=True): 102 | self.name = name 103 | self.view = view 104 | paths = glob(os.path.join(dir_path, '*.meta')) 105 | assert len(paths) == 1 106 | path = os.path.splitext(paths[0])[0] 107 | mg = meta_graph.read_meta_graph_file(path + '.meta') 108 | if KEEP is None: 109 | fts, = tf.import_graph_def(mg.graph_def, name=name, 110 | input_map={'images:0':X}, 111 | return_elements=[node]) 112 | else: 113 | fts, = tf.import_graph_def(mg.graph_def, name=name, 114 | input_map={'images:0':X, 'keep:0':KEEP}, 115 | return_elements=[node]) 116 | if softmax: 117 | fts = logits2prob(fts) 118 | self.fts = fts 119 | self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name) 120 | self.loader = lambda sess: self.saver.restore(sess, path) 121 | pass 122 | 123 | MODE_AXIAL = 1 124 | MODE_SAGITTAL = 2 125 | MODE_CORONAL = 3 126 | MODE_MIN = 4 127 | 128 | class Model: 129 | def __init__ (self, prob_model, prob_mode, fts_model, channels = 3, prob_dropout=False, fts_dropout=True): 130 | if channels == 1: 131 | self.X = tf.placeholder(tf.float32, shape=(None, None, None)) 132 | X4 = tf.expand_dims(self.X, axis=3) 133 | elif channels == 3: 134 | self.X = tf.placeholder(tf.float32, shape=(None, None, None, channels)) 135 | X4 = self.X 136 | else: 137 | assert False 138 | self.KEEP = tf.placeholder(tf.float32, shape=()) 139 | PROB_KEEP = None 140 | FTS_KEEP = None 141 | if prob_dropout: 142 | PROB_KEEP = self.KEEP 143 | if fts_dropout: 144 | FTS_KEEP = self.KEEP 145 | 146 | models = [] 147 | if fts_model is None: 148 | models.append(None) 149 | else: 150 | models.append(ViewModel(X4, FTS_KEEP, AXIAL, 'fts', 'models/%s' % fts_model, node='fts:0', softmax=False)) 151 | 152 | if prob_mode == MODE_AXIAL: 153 | models.append(ViewModel(X4, PROB_KEEP, AXIAL, 'axial', 'models/%s/axial' % prob_model)) 154 | elif prob_mode == MODE_SAGITTAL: 155 | models.append(ViewModel(X4, PROB_KEEP, SAGITTAL, 'sagittal', 'models/%s/sagittal' % prob_model)) 156 | elif prob_mode == MODE_CORONAL: 157 | models.append(ViewModel(X4, PROB_KEEP, CORONAL, 'coronal', 'models/%s/coronal' % prob_model)) 158 | else: 159 | models.append(ViewModel(X4, PROB_KEEP, AXIAL, 'axial', 'models/%s/axial' % prob_model)) 160 | models.append(ViewModel(X4, PROB_KEEP, SAGITTAL, 'sagittal', 'models/%s/sagittal' % prob_model)) 161 | models.append(ViewModel(X4, PROB_KEEP, CORONAL, 'coronal', 'models/%s/coronal' % prob_model)) 162 | self.channels = channels 163 | self.models = models 164 | self.mode = prob_mode 165 | pass 166 | 167 | def load (self, sess): 168 | for m in self.models: 169 | if m: 170 | m.loader(sess) 171 | pass 172 | 173 | def apply (self, sess, views, mask): 174 | r = [] 175 | for m in self.models: 176 | if m is None: 177 | r.append(None) 178 | continue 179 | cc = views[m.view] 180 | images = cc.images 181 | N, H, W = images.shape 182 | 183 | fts = None #np.zeros_like(images, dtype=np.float32) 184 | margin = 0 185 | if self.channels == 3: 186 | margin = GAP 187 | 188 | fts = None 189 | off = margin 190 | while off < N-margin: 191 | nxt = min(off + BATCH, N-margin) 192 | x = np.zeros((nxt-off, H, W, FLAGS.channels), dtype=np.float32) 193 | i = 0 194 | for j in range(off, nxt): 195 | if self.channels == 1: 196 | x[i] = images[j] 197 | elif self.channels == 3: 198 | x[i,:,:,0] = images[j-GAP] 199 | x[i,:,:,1] = images[j] 200 | x[i,:,:,2] = images[j+GAP] 201 | else: 202 | assert False 203 | i += 1 204 | pass 205 | assert i == x.shape[0] 206 | y, = sess.run([m.fts], feed_dict={self.X:x, self.KEEP:1.0}) 207 | if fts is None: 208 | fts = np.zeros((N,) + y.shape[1:], dtype=np.float32) 209 | fts[off:nxt] = y 210 | off = nxt 211 | pass 212 | assert off == N - margin 213 | if m.view != AXIAL: 214 | fts = cc.transpose_array(AXIAL, fts) 215 | r.append(fts) 216 | pass 217 | if len(r) == 2: 218 | prob = r[1] 219 | elif len(r) == 4: 220 | prob = r[1] 221 | np.minimum(prob, r[2], prob) 222 | np.minimum(prob, r[3], prob) 223 | else: 224 | assert False 225 | 226 | if not mask is None: 227 | pre_sum = np.sum(prob) 228 | prob *= mask 229 | post_sum = np.sum(prob) 230 | logging.info('mask reduction %f' % ((pre_sum-post_sum)/pre_sum)) 231 | prob = np.ascontiguousarray(prob) 232 | return extract_nodules(prob, r[0]) 233 | pass 234 | 235 | 236 | def combine (dim, nodules): 237 | if len(nodules) == 0 or nodules[0][0] < MIN_NODULE_SIZE: 238 | return [0] * dim 239 | else: 240 | return nodules[0][2] 241 | 242 | def pyramid (dim, nodules): 243 | parts = [] 244 | for _ in range(TOTAL_PARTS): 245 | parts.append([]) 246 | for w, pos, ft, box in nodules: 247 | z, y, x = pos 248 | off = 0 249 | for LZ, LY, LX in PARTITIONS: 250 | zi = min(int(math.floor(z * LZ)), LZ-1) 251 | yi = min(int(math.floor(y * LY)), LY-1) 252 | xi = min(int(math.floor(x * LX)), LX-1) 253 | pi = off + (zi * LY + yi) * LX + xi 254 | off += LZ * LY * LX 255 | assert pi < off 256 | parts[pi].append((w, pos, ft)) 257 | pass 258 | assert off == TOTAL_PARTS 259 | pass 260 | ft = [] 261 | for nodules in parts: 262 | ft.extend(combine(dim, nodules)) 263 | pass 264 | return ft 265 | 266 | flags = tf.app.flags 267 | FLAGS = flags.FLAGS 268 | flags.DEFINE_string('prob', 'nodule', 'prob model') # prob model 269 | #original default is luna.ns.3c 270 | flags.DEFINE_string('fts', 'ft', 'fts model') # ft model 271 | flags.DEFINE_string('score', 'score', 'score model') # ft model 272 | #flags.DEFINE_string('mask', None, 'mask') 273 | flags.DEFINE_integer('mode', MODE_AXIAL, '') # use axial instead of min of 3 views 274 | flags.DEFINE_integer('channels', 3, '') 275 | flags.DEFINE_integer('bits', 8, '') 276 | flags.DEFINE_integer('stride', 16, '') 277 | flags.DEFINE_integer('dilate', 10, '') 278 | flags.DEFINE_bool('prob_dropout', False, '') 279 | flags.DEFINE_bool('fts_dropout', True, '') 280 | flags.DEFINE_string('input', None, '') 281 | flags.DEFINE_string('output', None, '') 282 | 283 | def pred_wrap (Xin): 284 | Yout = model.predict_proba(Xin)[:,1] 285 | return Yout 286 | 287 | def save_mesh (binary, path): 288 | binary = mesh.pad(binary, dtype=np.float) 289 | binary = gaussian_filter(binary, 2, mode='constant') 290 | verts, faces = measure.marching_cubes(binary, 0.5) 291 | Three(path, verts, faces) 292 | 293 | def box_center (box, view): 294 | z0, y0, x0, z1, y1, x1 = box 295 | if view == AXIAL: 296 | return (z0+z1)//2, (y0, x0, y1, x1) 297 | elif view == SAGITTAL: 298 | return (x0+x1)//2, (y0, z0, y1, z1) 299 | elif view == CORONAL: 300 | return (y0+y1)//2, (z0, x0, z1, x1) 301 | else: 302 | assert False 303 | pass 304 | 305 | 306 | def main (argv): 307 | nodule_model = Model(FLAGS.prob, FLAGS.mode, FLAGS.fts, FLAGS.channels, FLAGS.prob_dropout, FLAGS.fts_dropout) 308 | with open(os.path.join('models', FLAGS.score), 'rb') as f: 309 | score_model = pickle.load(f) 310 | 311 | case = FsCase(FLAGS.input) 312 | 313 | case.normalizeHU() 314 | case = case.rescale3D(SPACING) 315 | lung, _ = mesh.segment_lung(case.images) 316 | save_mesh(lung, os.path.join(FLAGS.output, 'lung')) 317 | mask = mesh.convex_hull(lung) 318 | #body, _ = mesh.segment_body(case.images) 319 | #save_mesh(body, os.path.join(FLAGS.output, 'body')) 320 | case.standardize_color() 321 | 322 | case.round_stride(FLAGS.stride) 323 | 324 | mask = case.copy_replace_images(mask) 325 | mask.round_stride(FLAGS.stride) 326 | mask = mask.images 327 | 328 | views = [case.transpose(AXIAL), 329 | case.transpose(SAGITTAL), 330 | case.transpose(CORONAL)] 331 | 332 | if FLAGS.dilate > 0: 333 | ksize = FLAGS.dilate * 2 + 1 334 | mask = grey_dilation(mask, size=(ksize, ksize, ksize), mode='constant') 335 | pass 336 | if True: 337 | with tf.Session() as sess: 338 | tf.global_variables_initializer().run() 339 | nodule_model.load(sess) 340 | dim, nodules = nodule_model.apply(sess, views, mask) 341 | pass 342 | pass 343 | else: 344 | dim = 11 345 | nodules = [] 346 | 347 | fts = [] 348 | pos = [] 349 | #print(nodules) 350 | fts.append(pyramid(dim, nodules)) # global 351 | pos.append(None) # global 352 | for nodule in nodules: 353 | fts.append(pyramid(dim, [nodule])) 354 | pos.append(nodule[3]) 355 | pass 356 | Nt = np.array(fts, dtype=np.float32) 357 | Ny = score_model.predict_proba(Nt)[:,1] 358 | global_score = float(Ny[0]) 359 | #print('GLOBAL SCORE:', global_score) 360 | pw = sorted(zip(pos, list(Ny)), key=lambda x:x[1], reverse=True) 361 | 362 | gal = Gallery(FLAGS.output, cols=5, header=['nodule','score','axial','sagittal','coronal']) 363 | anno = Annotations() 364 | C = 1 365 | for box, score in pw: 366 | if box is None: 367 | continue 368 | if score < 0.1: 369 | break 370 | anno.add(box, str(score)) 371 | gal.text('%d' % C) 372 | gal.text('%.4f' % score) 373 | for v in VIEWS: 374 | cc, (y0, x0, y1, x1) = box_center(box, v) 375 | view = views[v] 376 | image = get3c(view.images, cc) 377 | cv2.rectangle(image, (x0,y0), (x1,y1), (0,255,255)) 378 | if v == AXIAL: 379 | image = cv2.flip(image, 1) 380 | elif v == SAGITTAL: 381 | image = cv2.transpose(image) 382 | image = cv2.flip(image, 0) 383 | elif v == CORONAL: 384 | image = cv2.flip(image, -1) 385 | cv2.imwrite(gal.next(), image) 386 | pass 387 | C += 1 388 | pass 389 | gal.flush('plumo.html', extra={'score':global_score}) 390 | Papaya(os.path.join(FLAGS.output, 'papaya'), case, annotations=anno) 391 | pass 392 | 393 | if __name__ == '__main__': 394 | logging.basicConfig(level=logging.INFO) 395 | tf.app.run() 396 | 397 | -------------------------------------------------------------------------------- /src-old/pyadsb3.cpp: -------------------------------------------------------------------------------- 1 | #ifdef _OPENMP 2 | #include 3 | #endif 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include "mesh.h" 12 | 13 | using namespace std; 14 | namespace python = boost::python; 15 | 16 | namespace { 17 | 18 | void shrink (PyArrayObject *array, float ratio) { 19 | Py_INCREF(array); 20 | if (array->nd != 2) throw runtime_error("not 2d array"); 21 | if (array->descr->type_num != NPY_BOOL) throw runtime_error("not bool array"); 22 | //auto rows = array->dimensions[0]; 23 | //auto cols = array->dimensions[1]; 24 | //cout << "XX " << rows << "x" << cols << endl; 25 | Py_DECREF(array); 26 | } 27 | 28 | void hull (PyObject *_array, PyObject *_dest) { 29 | PyArrayObject *array((PyArrayObject *)_array); 30 | PyArrayObject *dest((PyArrayObject *)_dest); 31 | if (array->nd != 3) throw runtime_error("not 3d array"); 32 | if (dest->nd != 3) throw runtime_error("not 3d array"); 33 | if (array->descr->type_num != NPY_UINT8) throw runtime_error("not uint8 array"); 34 | if (dest->descr->type_num != NPY_UINT8) throw runtime_error("not uint8 array"); 35 | static unsigned orders[][4] = { 36 | {1,2,0, 0}, 37 | {2,0,1, 0}, 38 | {0,1,2, 0} 39 | }; 40 | for (unsigned no = 0; no < 3; ++no) { 41 | unsigned d1 = orders[no][0]; 42 | unsigned d2 = orders[no][1]; 43 | unsigned d3 = orders[no][2]; 44 | unsigned fill0 = orders[no][3]; 45 | auto Z = array->dimensions[d1]; 46 | auto Y = array->dimensions[d2]; 47 | auto X = array->dimensions[d3]; 48 | auto from_sZ = array->strides[d1]; 49 | auto from_sY = array->strides[d2]; 50 | auto from_sX = array->strides[d3]; 51 | auto to_sZ = dest->strides[d1]; 52 | auto to_sY = dest->strides[d2]; 53 | auto to_sX = dest->strides[d3]; 54 | //cout << Z << ':' << from_sZ << ' ' << Y << ':' << from_sY << ' ' << X << ':' << from_sX << endl; 55 | auto from = reinterpret_cast(array->data); 56 | auto to = reinterpret_cast(dest->data); 57 | for (auto z = 0; z < Z; ++z) { 58 | auto from_y = from; 59 | auto to_y = to; 60 | for (auto y = 0; y < Y; ++y) { 61 | auto from_x = from_y; 62 | auto to_x = to_y; 63 | 64 | long int lb = 0; 65 | for (long int x = 0, o1 = 0, o2 = 0; 66 | x < X; 67 | ++x, o1 += from_sX, o2 += to_sX) { // forward 68 | if (from_x[o1]) { 69 | lb = o2; 70 | to_x[o2] = 1; 71 | break; 72 | } 73 | } 74 | long int rb = (X-1) * to_sX; 75 | for (long int x = 0, o1 = (X-1) * from_sX, o2 = (X-1) * to_sX; 76 | x < X; 77 | ++x, o1 -= from_sX, o2 -= to_sX) { // backward 78 | if (from_x[o1]) { 79 | rb = o2; 80 | to_x[o2] = 1; 81 | break; 82 | } 83 | } 84 | if (fill0) for (lb += to_sX; lb < rb; lb += to_sX) { 85 | to_x[lb] = 0; 86 | } 87 | from_y += from_sY; 88 | to_y += to_sY; 89 | } 90 | from += from_sZ; 91 | to += to_sZ; 92 | } 93 | } 94 | } 95 | 96 | void color_mesh (PyObject *_v, PyObject *_f, string const &path) { 97 | PyArrayObject *verts((PyArrayObject *)_v); 98 | PyArrayObject *faces((PyArrayObject *)_f); 99 | 100 | Mesh m; 101 | 102 | unsigned nv = verts->dimensions[0]; 103 | unsigned vs = verts->strides[0]; 104 | CHECK(verts->nd == 2); 105 | CHECK(verts->descr->type_num == NPY_FLOAT64); 106 | CHECK(verts->dimensions[1] == 3); 107 | Mesh::VertexIterator vi = vcg::tri::Allocator::AddVertices(m, nv); 108 | Mesh::VertexIterator v0 = vi; 109 | char const *pv = verts->data; 110 | for (unsigned i = 0; i < nv; ++i, pv += vs, ++vi) { 111 | double const *pp = reinterpret_cast(pv); 112 | vi->P() = Mesh::CoordType(pp[2], pp[1], pp[0]); 113 | } 114 | 115 | unsigned nf = faces->dimensions[0]; 116 | unsigned fs = faces->strides[0]; 117 | CHECK(faces->nd == 2); 118 | CHECK(faces->descr->type_num == NPY_INT64); 119 | CHECK(faces->dimensions[1] == 3); 120 | Mesh::FaceIterator fi = vcg::tri::Allocator<::Mesh>::AddFaces(m, nf); 121 | char const *pf = faces->data; 122 | for (unsigned i = 0; i < nf; ++i, pf += fs, ++fi) { 123 | int64_t const *pp = reinterpret_cast(pf); 124 | fi->V(0) = &(*(v0 + pp[0])); 125 | fi->V(1) = &(*(v0 + pp[1])); 126 | fi->V(2) = &(*(v0 + pp[2])); 127 | } 128 | //vcg::tri::io::ExporterPLY::Save(m, path.c_str(), vcg::tri::io::SAVE_MASK); 129 | MeshModelParams params; 130 | MeshModel model(params); 131 | model.apply(m); 132 | vcg::tri::io::ExporterPLY::Save(m, path.c_str(), vcg::tri::io::SAVE_MASK ); 133 | } 134 | 135 | void save_mesh (PyObject *_v, PyObject *_f, string const &path) { 136 | PyArrayObject *verts((PyArrayObject *)_v); 137 | PyArrayObject *faces((PyArrayObject *)_f); 138 | 139 | Mesh m; 140 | 141 | unsigned nv = verts->dimensions[0]; 142 | unsigned vs = verts->strides[0]; 143 | CHECK(verts->nd == 2); 144 | CHECK(verts->descr->type_num == NPY_FLOAT64); 145 | CHECK(verts->dimensions[1] == 3); 146 | Mesh::VertexIterator vi = vcg::tri::Allocator::AddVertices(m, nv); 147 | Mesh::VertexIterator v0 = vi; 148 | char const *pv = verts->data; 149 | for (unsigned i = 0; i < nv; ++i, pv += vs, ++vi) { 150 | double const *pp = reinterpret_cast(pv); 151 | vi->P() = Mesh::CoordType(pp[2], pp[1], pp[0]); 152 | } 153 | 154 | unsigned nf = faces->dimensions[0]; 155 | unsigned fs = faces->strides[0]; 156 | CHECK(faces->nd == 2); 157 | CHECK(faces->descr->type_num == NPY_INT64); 158 | CHECK(faces->dimensions[1] == 3); 159 | Mesh::FaceIterator fi = vcg::tri::Allocator<::Mesh>::AddFaces(m, nf); 160 | char const *pf = faces->data; 161 | for (unsigned i = 0; i < nf; ++i, pf += fs, ++fi) { 162 | int64_t const *pp = reinterpret_cast(pf); 163 | fi->V(0) = &(*(v0 + pp[0])); 164 | fi->V(1) = &(*(v0 + pp[1])); 165 | fi->V(2) = &(*(v0 + pp[2])); 166 | } 167 | vcg::tri::io::ExporterPLY::Save(m, path.c_str(), vcg::tri::io::SAVE_MASK ); 168 | } 169 | 170 | void decode3 (uint8_t from, float *to) { 171 | to[2] = from % 6; 172 | from /= 6; 173 | to[1] = from % 6; 174 | from /= 6; 175 | to[0] = from; 176 | } 177 | 178 | void decode_cell (float const *from, uint8_t *label, float *ft, vector *cache) { 179 | if (from[0] == 0) { 180 | label[0] = 0; 181 | ft[0] = ft[1] = ft[2] = ft[3] = ft[4] = ft[5] = ft[6] = ft[7] = ft[8] = 0; 182 | } 183 | else { 184 | if (cache->empty()) { 185 | cache->resize(9); 186 | float *cp = &cache->at(0); 187 | decode3(uint8_t(from[0]), cp); 188 | cp += 3; 189 | decode3(uint8_t(from[1]), cp); 190 | cp += 3; 191 | decode3(uint8_t(from[2]), cp); 192 | //*cp = uint8_t(from[2]); 193 | } 194 | float const *cp = &cache->at(0); 195 | label[0] = 1; 196 | ft[0] = cp[0]; 197 | ft[1] = cp[1]; 198 | ft[2] = cp[2]; 199 | ft[3] = cp[3]; 200 | ft[4] = cp[4]; 201 | ft[5] = cp[5]; 202 | ft[6] = cp[6]; 203 | ft[7] = cp[7]; 204 | ft[8] = cp[8]; 205 | } 206 | } 207 | 208 | void decode_cell_old (float const *from, uint8_t *label, float *ft, vector *cache) { 209 | if (from[0] == 0) { 210 | label[0] = 0; 211 | ft[0] = ft[1] = ft[2] = ft[3] = ft[4] = ft[5] = ft[6] = 0; 212 | } 213 | else { 214 | if (cache->empty()) { 215 | cache->resize(7); 216 | float *cp = &cache->at(0); 217 | decode3(uint8_t(from[0]), cp); 218 | cp += 3; 219 | decode3(uint8_t(from[1]), cp); 220 | cp += 3; 221 | *cp = uint8_t(from[2]); 222 | } 223 | float const *cp = &cache->at(0); 224 | label[0] = 1; 225 | ft[0] = cp[0]; 226 | ft[1] = cp[1]; 227 | ft[2] = cp[2]; 228 | ft[3] = cp[3]; 229 | ft[4] = cp[4]; 230 | ft[5] = cp[5]; 231 | ft[6] = cp[6]; 232 | } 233 | } 234 | 235 | template 236 | T *walk (T *p, int stride) { 237 | return (T *)((char *)p + stride); 238 | } 239 | 240 | template 241 | T const *walk (T const *p, int stride) { 242 | return (T *)((char const *)p + stride); 243 | } 244 | 245 | python::tuple decode_labels (PyObject *_array) { 246 | PyArrayObject *array((PyArrayObject *)_array); 247 | if (array->nd != 4) throw runtime_error("not 4d array"); 248 | if (array->descr->type_num != NPY_FLOAT32) throw runtime_error("not float32 array"); 249 | if (array->dimensions[0] != 1) throw runtime_error("not rgb image"); 250 | if (array->dimensions[3] != 3) throw runtime_error("not rgb image"); 251 | vector label_dims{1, 252 | array->dimensions[1], 253 | array->dimensions[2], 254 | 1}; 255 | vector ft_dims{1, 256 | array->dimensions[1], 257 | array->dimensions[2], 258 | 9}; 259 | PyArrayObject *label = (PyArrayObject*)PyArray_SimpleNew(label_dims.size(), &label_dims[0], NPY_UINT8); 260 | PyArrayObject *ft = (PyArrayObject *)PyArray_SimpleNew(ft_dims.size(), &ft_dims[0], NPY_FLOAT32); 261 | 262 | auto from_row = reinterpret_cast(array->data); 263 | auto to_lb_row = reinterpret_cast(label->data); 264 | auto to_ft_row = reinterpret_cast(ft->data); 265 | vector cache; 266 | for (unsigned y = 0; y < label_dims[1]; ++y) { 267 | auto from = from_row; 268 | auto to_lb = to_lb_row; 269 | auto to_ft = to_ft_row; 270 | for (unsigned x = 0; x < label_dims[2]; ++x) { 271 | decode_cell(from, to_lb, to_ft, &cache); 272 | from = walk(from, array->strides[2]); 273 | to_lb = walk(to_lb, label->strides[2]); 274 | to_ft = walk(to_ft, ft->strides[2]); 275 | } 276 | from_row = walk(from_row, array->strides[1]); 277 | to_lb_row = walk(to_lb_row, label->strides[1]); 278 | to_ft_row = walk(to_ft_row, ft->strides[1]); 279 | } 280 | PyArrayObject *cache_ft; 281 | { 282 | npy_intp dim[] = {cache.size()}; 283 | cache_ft = (PyArrayObject *)PyArray_SimpleNew(1, dim, NPY_FLOAT32); 284 | std::copy(cache.begin(), cache.end(), (float *)cache_ft->data); 285 | } 286 | 287 | return python::make_tuple(python::object(boost::python::handle<>((PyObject*)label)), 288 | python::object(boost::python::handle<>((PyObject*)ft)), 289 | python::object(boost::python::handle<>((PyObject*)cache_ft)) 290 | ); 291 | } 292 | 293 | python::tuple decode_labels_old (PyObject *_array) { 294 | PyArrayObject *array((PyArrayObject *)_array); 295 | if (array->nd != 4) throw runtime_error("not 4d array"); 296 | if (array->descr->type_num != NPY_FLOAT32) throw runtime_error("not float32 array"); 297 | if (array->dimensions[0] != 1) throw runtime_error("not rgb image"); 298 | if (array->dimensions[3] != 3) throw runtime_error("not rgb image"); 299 | vector label_dims{1, 300 | array->dimensions[1], 301 | array->dimensions[2], 302 | 1}; 303 | vector ft_dims{1, 304 | array->dimensions[1], 305 | array->dimensions[2], 306 | 7}; 307 | PyArrayObject *label = (PyArrayObject*)PyArray_SimpleNew(label_dims.size(), &label_dims[0], NPY_UINT8); 308 | PyArrayObject *ft = (PyArrayObject *)PyArray_SimpleNew(ft_dims.size(), &ft_dims[0], NPY_FLOAT32); 309 | 310 | auto from_row = reinterpret_cast(array->data); 311 | auto to_lb_row = reinterpret_cast(label->data); 312 | auto to_ft_row = reinterpret_cast(ft->data); 313 | vector cache; 314 | for (unsigned y = 0; y < label_dims[1]; ++y) { 315 | auto from = from_row; 316 | auto to_lb = to_lb_row; 317 | auto to_ft = to_ft_row; 318 | for (unsigned x = 0; x < label_dims[2]; ++x) { 319 | decode_cell_old(from, to_lb, to_ft, &cache); 320 | from = walk(from, array->strides[2]); 321 | to_lb = walk(to_lb, label->strides[2]); 322 | to_ft = walk(to_ft, ft->strides[2]); 323 | } 324 | from_row = walk(from_row, array->strides[1]); 325 | to_lb_row = walk(to_lb_row, label->strides[1]); 326 | to_ft_row = walk(to_ft_row, ft->strides[1]); 327 | } 328 | PyArrayObject *cache_ft; 329 | { 330 | npy_intp dim[] = {cache.size()}; 331 | cache_ft = (PyArrayObject *)PyArray_SimpleNew(1, dim, NPY_FLOAT32); 332 | std::copy(cache.begin(), cache.end(), (float *)cache_ft->data); 333 | } 334 | 335 | return python::make_tuple(python::object(boost::python::handle<>((PyObject*)label)), 336 | python::object(boost::python::handle<>((PyObject*)ft)), 337 | python::object(boost::python::handle<>((PyObject*)cache_ft)) 338 | ); 339 | } 340 | 341 | python::tuple norm3d (PyObject *_array) { 342 | PyArrayObject *array((PyArrayObject *)_array); 343 | if (array->nd != 3) throw runtime_error("not 3d array"); 344 | if (array->descr->type_num != NPY_FLOAT32) throw runtime_error("not float32 array"); 345 | auto Z = array->dimensions[0]; 346 | auto Y = array->dimensions[1]; 347 | auto X = array->dimensions[2]; 348 | auto sZ = array->strides[0]; 349 | auto sY = array->strides[1]; 350 | auto sX = array->strides[2]; 351 | //cout << Z << ':' << from_sZ << ' ' << Y << ':' << from_sY << ' ' << X << ':' << from_sX << endl; 352 | float sum_Z = 0, sum_Y = 0, sum_X = 0, sum = 0; 353 | auto from_z = reinterpret_cast(array->data); 354 | for (auto z = 0; z < Z; ++z, from_z += sZ) { 355 | auto from_y = from_z; 356 | for (auto y = 0; y < Y; ++y, from_y += sY) { 357 | auto from_x = from_y; 358 | for (auto x = 0; x < X; ++x, from_x += sX) { 359 | float v = *reinterpret_cast(from_x); 360 | sum += v; 361 | sum_Z += v * z; 362 | sum_Y += v * y; 363 | sum_X += v * x; 364 | } 365 | } 366 | } 367 | float avg_Z = sum_Z / sum, 368 | avg_Y = sum_Y / sum, 369 | avg_X = sum_X / sum; 370 | float sum_ZZ = 0, sum_YY = 0, sum_XX = 0; 371 | float sum_ZY = 0, sum_ZX = 0, sum_YX = 0; 372 | from_z = reinterpret_cast(array->data); 373 | float sum2 = 0; 374 | for (auto z = 0; z < Z; ++z, from_z += sZ) { 375 | auto from_y = from_z; 376 | float zz = z - avg_Z; 377 | for (auto y = 0; y < Y; ++y, from_y += sY) { 378 | auto from_x = from_y; 379 | float yy = y - avg_Y; 380 | for (auto x = 0; x < X; ++x, from_x += sX) { 381 | float v = *reinterpret_cast(from_x); 382 | float xx = x - avg_X; 383 | sum_ZZ += v * zz * zz; 384 | sum_ZY += v * yy * zz; 385 | sum_ZX += v * xx * zz; 386 | sum_YY += v * yy * yy; 387 | sum_YX += v * xx * yy; 388 | sum_XX += v * xx * xx; 389 | sum2 += v; 390 | } 391 | } 392 | } 393 | CHECK(sum == sum2); 394 | return python::make_tuple(avg_Z, avg_Y, avg_X, 395 | sum_ZZ/sum, sum_ZY/sum, sum_ZX/sum, 396 | sum_YY/sum, sum_YX/sum, 397 | sum_XX/sum); 398 | } 399 | } 400 | 401 | BOOST_PYTHON_MODULE(pyadsb3) 402 | { 403 | import_array(); 404 | python::numeric::array::set_module_and_type("numpy", "ndarray"); 405 | python::def("shrink", ::shrink); 406 | python::def("hull", ::hull); 407 | python::def("norm3d", ::norm3d); 408 | python::def("save_mesh", ::save_mesh); 409 | python::def("color_mesh", ::color_mesh); 410 | python::def("decode_labels", ::decode_labels); 411 | python::def("decode_labels_old", ::decode_labels_old); 412 | } 413 | 414 | 415 | -------------------------------------------------------------------------------- /src-old/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /adsb3 4 | ./process.py --input /input --output /output 5 | 6 | -------------------------------------------------------------------------------- /src-old/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup, Extension 2 | 3 | pyadsb3 = Extension('pyadsb3', 4 | language = 'c++', 5 | extra_compile_args = ['-O3', '-std=c++1y'], 6 | libraries = ['boost_python', 'glog'], 7 | include_dirs = ['/usr/local/include'], 8 | library_dirs = ['/usr/local/lib'], 9 | sources = ['pyadsb3.cpp'] 10 | ) 11 | 12 | setup (name = 'pyadsb3', 13 | version = '0.0.1', 14 | author = 'Wei Dong and Yuanfang Guan', 15 | author_email = 'wdong@wdong.org', 16 | license = 'MIT', 17 | description = 'This is a demo package', 18 | ext_modules = [pyadsb3], 19 | ) 20 | -------------------------------------------------------------------------------- /src-old/static/Detector.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @author alteredq / http://alteredqualia.com/ 3 | * @author mr.doob / http://mrdoob.com/ 4 | */ 5 | 6 | var Detector = { 7 | 8 | canvas: !! window.CanvasRenderingContext2D, 9 | webgl: ( function () { 10 | 11 | try { 12 | 13 | var canvas = document.createElement( 'canvas' ); return !! ( window.WebGLRenderingContext && ( canvas.getContext( 'webgl' ) || canvas.getContext( 'experimental-webgl' ) ) ); 14 | 15 | } catch ( e ) { 16 | 17 | return false; 18 | 19 | } 20 | 21 | } )(), 22 | workers: !! window.Worker, 23 | fileapi: window.File && window.FileReader && window.FileList && window.Blob, 24 | 25 | getWebGLErrorMessage: function () { 26 | 27 | var element = document.createElement( 'div' ); 28 | element.id = 'webgl-error-message'; 29 | element.style.fontFamily = 'monospace'; 30 | element.style.fontSize = '13px'; 31 | element.style.fontWeight = 'normal'; 32 | element.style.textAlign = 'center'; 33 | element.style.background = '#fff'; 34 | element.style.color = '#000'; 35 | element.style.padding = '1.5em'; 36 | element.style.width = '400px'; 37 | element.style.margin = '5em auto 0'; 38 | 39 | if ( ! this.webgl ) { 40 | 41 | element.innerHTML = window.WebGLRenderingContext ? [ 42 | 'Your graphics card does not seem to support WebGL.
', 43 | 'Find out how to get it here.' 44 | ].join( '\n' ) : [ 45 | 'Your browser does not seem to support WebGL.
', 46 | 'Find out how to get it here.' 47 | ].join( '\n' ); 48 | 49 | } 50 | 51 | return element; 52 | 53 | }, 54 | 55 | addGetWebGLMessage: function ( parameters ) { 56 | 57 | var parent, id, element; 58 | 59 | parameters = parameters || {}; 60 | 61 | parent = parameters.parent !== undefined ? parameters.parent : document.body; 62 | id = parameters.id !== undefined ? parameters.id : 'oldie'; 63 | 64 | element = Detector.getWebGLErrorMessage(); 65 | element.id = id; 66 | 67 | parent.appendChild( element ); 68 | 69 | } 70 | 71 | }; 72 | 73 | // browserify support 74 | if ( typeof module === 'object' ) { 75 | 76 | module.exports = Detector; 77 | 78 | } 79 | -------------------------------------------------------------------------------- /src-old/static/OBJLoader.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @author mrdoob / http://mrdoob.com/ 3 | */ 4 | 5 | THREE.OBJLoader = function ( manager ) { 6 | 7 | this.manager = ( manager !== undefined ) ? manager : THREE.DefaultLoadingManager; 8 | 9 | this.materials = null; 10 | 11 | this.regexp = { 12 | // v float float float 13 | vertex_pattern : /^v\s+([\d|\.|\+|\-|e|E]+)\s+([\d|\.|\+|\-|e|E]+)\s+([\d|\.|\+|\-|e|E]+)/, 14 | // vn float float float 15 | normal_pattern : /^vn\s+([\d|\.|\+|\-|e|E]+)\s+([\d|\.|\+|\-|e|E]+)\s+([\d|\.|\+|\-|e|E]+)/, 16 | // vt float float 17 | uv_pattern : /^vt\s+([\d|\.|\+|\-|e|E]+)\s+([\d|\.|\+|\-|e|E]+)/, 18 | // f vertex vertex vertex 19 | face_vertex : /^f\s+(-?\d+)\s+(-?\d+)\s+(-?\d+)(?:\s+(-?\d+))?/, 20 | // f vertex/uv vertex/uv vertex/uv 21 | face_vertex_uv : /^f\s+(-?\d+)\/(-?\d+)\s+(-?\d+)\/(-?\d+)\s+(-?\d+)\/(-?\d+)(?:\s+(-?\d+)\/(-?\d+))?/, 22 | // f vertex/uv/normal vertex/uv/normal vertex/uv/normal 23 | face_vertex_uv_normal : /^f\s+(-?\d+)\/(-?\d+)\/(-?\d+)\s+(-?\d+)\/(-?\d+)\/(-?\d+)\s+(-?\d+)\/(-?\d+)\/(-?\d+)(?:\s+(-?\d+)\/(-?\d+)\/(-?\d+))?/, 24 | // f vertex//normal vertex//normal vertex//normal 25 | face_vertex_normal : /^f\s+(-?\d+)\/\/(-?\d+)\s+(-?\d+)\/\/(-?\d+)\s+(-?\d+)\/\/(-?\d+)(?:\s+(-?\d+)\/\/(-?\d+))?/, 26 | // o object_name | g group_name 27 | object_pattern : /^[og]\s*(.+)?/, 28 | // s boolean 29 | smoothing_pattern : /^s\s+(\d+|on|off)/, 30 | // mtllib file_reference 31 | material_library_pattern : /^mtllib /, 32 | // usemtl material_name 33 | material_use_pattern : /^usemtl / 34 | }; 35 | 36 | }; 37 | 38 | THREE.OBJLoader.prototype = { 39 | 40 | constructor: THREE.OBJLoader, 41 | 42 | load: function ( url, onLoad, onProgress, onError ) { 43 | 44 | var scope = this; 45 | 46 | var loader = new THREE.XHRLoader( scope.manager ); 47 | loader.setPath( this.path ); 48 | loader.load( url, function ( text ) { 49 | 50 | onLoad( scope.parse( text ) ); 51 | 52 | }, onProgress, onError ); 53 | 54 | }, 55 | 56 | setPath: function ( value ) { 57 | 58 | this.path = value; 59 | 60 | }, 61 | 62 | setMaterials: function ( materials ) { 63 | 64 | this.materials = materials; 65 | 66 | }, 67 | 68 | _createParserState : function () { 69 | 70 | var state = { 71 | objects : [], 72 | object : {}, 73 | 74 | vertices : [], 75 | normals : [], 76 | uvs : [], 77 | 78 | materialLibraries : [], 79 | 80 | startObject: function ( name, fromDeclaration ) { 81 | 82 | // If the current object (initial from reset) is not from a g/o declaration in the parsed 83 | // file. We need to use it for the first parsed g/o to keep things in sync. 84 | if ( this.object && this.object.fromDeclaration === false ) { 85 | 86 | this.object.name = name; 87 | this.object.fromDeclaration = ( fromDeclaration !== false ); 88 | return; 89 | 90 | } 91 | 92 | this.object = { 93 | name : name || '', 94 | geometry : { 95 | vertices : [], 96 | normals : [], 97 | uvs : [] 98 | }, 99 | material : { 100 | name : '', 101 | smooth : true 102 | }, 103 | fromDeclaration : ( fromDeclaration !== false ) 104 | }; 105 | 106 | this.objects.push( this.object ); 107 | 108 | }, 109 | 110 | parseVertexIndex: function ( value, len ) { 111 | 112 | var index = parseInt( value, 10 ); 113 | return ( index >= 0 ? index - 1 : index + len / 3 ) * 3; 114 | 115 | }, 116 | 117 | parseNormalIndex: function ( value, len ) { 118 | 119 | var index = parseInt( value, 10 ); 120 | return ( index >= 0 ? index - 1 : index + len / 3 ) * 3; 121 | 122 | }, 123 | 124 | parseUVIndex: function ( value, len ) { 125 | 126 | var index = parseInt( value, 10 ); 127 | return ( index >= 0 ? index - 1 : index + len / 2 ) * 2; 128 | 129 | }, 130 | 131 | addVertex: function ( a, b, c ) { 132 | 133 | var src = this.vertices; 134 | var dst = this.object.geometry.vertices; 135 | 136 | dst.push( src[ a + 0 ] ); 137 | dst.push( src[ a + 1 ] ); 138 | dst.push( src[ a + 2 ] ); 139 | dst.push( src[ b + 0 ] ); 140 | dst.push( src[ b + 1 ] ); 141 | dst.push( src[ b + 2 ] ); 142 | dst.push( src[ c + 0 ] ); 143 | dst.push( src[ c + 1 ] ); 144 | dst.push( src[ c + 2 ] ); 145 | 146 | }, 147 | 148 | addVertexLine: function ( a ) { 149 | 150 | var src = this.vertices; 151 | var dst = this.object.geometry.vertices; 152 | 153 | dst.push( src[ a + 0 ] ); 154 | dst.push( src[ a + 1 ] ); 155 | dst.push( src[ a + 2 ] ); 156 | 157 | }, 158 | 159 | addNormal : function ( a, b, c ) { 160 | 161 | var src = this.normals; 162 | var dst = this.object.geometry.normals; 163 | 164 | dst.push( src[ a + 0 ] ); 165 | dst.push( src[ a + 1 ] ); 166 | dst.push( src[ a + 2 ] ); 167 | dst.push( src[ b + 0 ] ); 168 | dst.push( src[ b + 1 ] ); 169 | dst.push( src[ b + 2 ] ); 170 | dst.push( src[ c + 0 ] ); 171 | dst.push( src[ c + 1 ] ); 172 | dst.push( src[ c + 2 ] ); 173 | 174 | }, 175 | 176 | addUV: function ( a, b, c ) { 177 | 178 | var src = this.uvs; 179 | var dst = this.object.geometry.uvs; 180 | 181 | dst.push( src[ a + 0 ] ); 182 | dst.push( src[ a + 1 ] ); 183 | dst.push( src[ b + 0 ] ); 184 | dst.push( src[ b + 1 ] ); 185 | dst.push( src[ c + 0 ] ); 186 | dst.push( src[ c + 1 ] ); 187 | 188 | }, 189 | 190 | addUVLine: function ( a ) { 191 | 192 | var src = this.uvs; 193 | var dst = this.object.geometry.uvs; 194 | 195 | dst.push( src[ a + 0 ] ); 196 | dst.push( src[ a + 1 ] ); 197 | 198 | }, 199 | 200 | addFace: function ( a, b, c, d, ua, ub, uc, ud, na, nb, nc, nd ) { 201 | 202 | var vLen = this.vertices.length; 203 | 204 | var ia = this.parseVertexIndex( a, vLen ); 205 | var ib = this.parseVertexIndex( b, vLen ); 206 | var ic = this.parseVertexIndex( c, vLen ); 207 | var id; 208 | 209 | if ( d === undefined ) { 210 | 211 | this.addVertex( ia, ib, ic ); 212 | 213 | } else { 214 | 215 | id = this.parseVertexIndex( d, vLen ); 216 | 217 | this.addVertex( ia, ib, id ); 218 | this.addVertex( ib, ic, id ); 219 | 220 | } 221 | 222 | if ( ua !== undefined ) { 223 | 224 | var uvLen = this.uvs.length; 225 | 226 | ia = this.parseUVIndex( ua, uvLen ); 227 | ib = this.parseUVIndex( ub, uvLen ); 228 | ic = this.parseUVIndex( uc, uvLen ); 229 | 230 | if ( d === undefined ) { 231 | 232 | this.addUV( ia, ib, ic ); 233 | 234 | } else { 235 | 236 | id = this.parseUVIndex( ud, uvLen ); 237 | 238 | this.addUV( ia, ib, id ); 239 | this.addUV( ib, ic, id ); 240 | 241 | } 242 | 243 | } 244 | 245 | if ( na !== undefined ) { 246 | 247 | // Normals are many times the same. If so, skip function call and parseInt. 248 | var nLen = this.normals.length; 249 | ia = this.parseNormalIndex( na, nLen ); 250 | 251 | ib = na === nb ? ia : this.parseNormalIndex( nb, nLen ); 252 | ic = na === nc ? ia : this.parseNormalIndex( nc, nLen ); 253 | 254 | if ( d === undefined ) { 255 | 256 | this.addNormal( ia, ib, ic ); 257 | 258 | } else { 259 | 260 | id = this.parseNormalIndex( nd, nLen ); 261 | 262 | this.addNormal( ia, ib, id ); 263 | this.addNormal( ib, ic, id ); 264 | 265 | } 266 | 267 | } 268 | 269 | }, 270 | 271 | addLineGeometry: function ( vertices, uvs ) { 272 | 273 | this.object.geometry.type = 'Line'; 274 | 275 | var vLen = this.vertices.length; 276 | var uvLen = this.uvs.length; 277 | 278 | for ( var vi = 0, l = vertices.length; vi < l; vi ++ ) { 279 | 280 | this.addVertexLine( this.parseVertexIndex( vertices[ vi ], vLen ) ); 281 | 282 | } 283 | 284 | for ( var uvi = 0, l = uvs.length; uvi < l; uvi ++ ) { 285 | 286 | this.addUVLine( this.parseUVIndex( uvs[ uvi ], uvLen ) ); 287 | 288 | } 289 | 290 | } 291 | 292 | }; 293 | 294 | state.startObject( '', false ); 295 | 296 | return state; 297 | 298 | }, 299 | 300 | parse: function ( text ) { 301 | 302 | console.time( 'OBJLoader' ); 303 | 304 | var state = this._createParserState(); 305 | 306 | if ( text.indexOf( '\r\n' ) !== - 1 ) { 307 | 308 | // This is faster than String.split with regex that splits on both 309 | text = text.replace( '\r\n', '\n' ); 310 | 311 | } 312 | 313 | var lines = text.split( '\n' ); 314 | var line = '', lineFirstChar = '', lineSecondChar = ''; 315 | var lineLength = 0; 316 | var result = []; 317 | 318 | // Faster to just trim left side of the line. Use if available. 319 | var trimLeft = ( typeof ''.trimLeft === 'function' ); 320 | 321 | for ( var i = 0, l = lines.length; i < l; i ++ ) { 322 | 323 | line = lines[ i ]; 324 | 325 | line = trimLeft ? line.trimLeft() : line.trim(); 326 | 327 | lineLength = line.length; 328 | 329 | if ( lineLength === 0 ) continue; 330 | 331 | lineFirstChar = line.charAt( 0 ); 332 | 333 | // @todo invoke passed in handler if any 334 | if ( lineFirstChar === '#' ) continue; 335 | 336 | if ( lineFirstChar === 'v' ) { 337 | 338 | lineSecondChar = line.charAt( 1 ); 339 | 340 | if ( lineSecondChar === ' ' && ( result = this.regexp.vertex_pattern.exec( line ) ) !== null ) { 341 | 342 | // 0 1 2 3 343 | // ["v 1.0 2.0 3.0", "1.0", "2.0", "3.0"] 344 | 345 | state.vertices.push( 346 | parseFloat( result[ 1 ] ), 347 | parseFloat( result[ 2 ] ), 348 | parseFloat( result[ 3 ] ) 349 | ); 350 | 351 | } else if ( lineSecondChar === 'n' && ( result = this.regexp.normal_pattern.exec( line ) ) !== null ) { 352 | 353 | // 0 1 2 3 354 | // ["vn 1.0 2.0 3.0", "1.0", "2.0", "3.0"] 355 | 356 | state.normals.push( 357 | parseFloat( result[ 1 ] ), 358 | parseFloat( result[ 2 ] ), 359 | parseFloat( result[ 3 ] ) 360 | ); 361 | 362 | } else if ( lineSecondChar === 't' && ( result = this.regexp.uv_pattern.exec( line ) ) !== null ) { 363 | 364 | // 0 1 2 365 | // ["vt 0.1 0.2", "0.1", "0.2"] 366 | 367 | state.uvs.push( 368 | parseFloat( result[ 1 ] ), 369 | parseFloat( result[ 2 ] ) 370 | ); 371 | 372 | } else { 373 | 374 | throw new Error( "Unexpected vertex/normal/uv line: '" + line + "'" ); 375 | 376 | } 377 | 378 | } else if ( lineFirstChar === "f" ) { 379 | 380 | if ( ( result = this.regexp.face_vertex_uv_normal.exec( line ) ) !== null ) { 381 | 382 | // f vertex/uv/normal vertex/uv/normal vertex/uv/normal 383 | // 0 1 2 3 4 5 6 7 8 9 10 11 12 384 | // ["f 1/1/1 2/2/2 3/3/3", "1", "1", "1", "2", "2", "2", "3", "3", "3", undefined, undefined, undefined] 385 | 386 | state.addFace( 387 | result[ 1 ], result[ 4 ], result[ 7 ], result[ 10 ], 388 | result[ 2 ], result[ 5 ], result[ 8 ], result[ 11 ], 389 | result[ 3 ], result[ 6 ], result[ 9 ], result[ 12 ] 390 | ); 391 | 392 | } else if ( ( result = this.regexp.face_vertex_uv.exec( line ) ) !== null ) { 393 | 394 | // f vertex/uv vertex/uv vertex/uv 395 | // 0 1 2 3 4 5 6 7 8 396 | // ["f 1/1 2/2 3/3", "1", "1", "2", "2", "3", "3", undefined, undefined] 397 | 398 | state.addFace( 399 | result[ 1 ], result[ 3 ], result[ 5 ], result[ 7 ], 400 | result[ 2 ], result[ 4 ], result[ 6 ], result[ 8 ] 401 | ); 402 | 403 | } else if ( ( result = this.regexp.face_vertex_normal.exec( line ) ) !== null ) { 404 | 405 | // f vertex//normal vertex//normal vertex//normal 406 | // 0 1 2 3 4 5 6 7 8 407 | // ["f 1//1 2//2 3//3", "1", "1", "2", "2", "3", "3", undefined, undefined] 408 | 409 | state.addFace( 410 | result[ 1 ], result[ 3 ], result[ 5 ], result[ 7 ], 411 | undefined, undefined, undefined, undefined, 412 | result[ 2 ], result[ 4 ], result[ 6 ], result[ 8 ] 413 | ); 414 | 415 | } else if ( ( result = this.regexp.face_vertex.exec( line ) ) !== null ) { 416 | 417 | // f vertex vertex vertex 418 | // 0 1 2 3 4 419 | // ["f 1 2 3", "1", "2", "3", undefined] 420 | 421 | state.addFace( 422 | result[ 1 ], result[ 2 ], result[ 3 ], result[ 4 ] 423 | ); 424 | 425 | } else { 426 | 427 | throw new Error( "Unexpected face line: '" + line + "'" ); 428 | 429 | } 430 | 431 | } else if ( lineFirstChar === "l" ) { 432 | 433 | var lineParts = line.substring( 1 ).trim().split( " " ); 434 | var lineVertices = [], lineUVs = []; 435 | 436 | if ( line.indexOf( "/" ) === - 1 ) { 437 | 438 | lineVertices = lineParts; 439 | 440 | } else { 441 | 442 | for ( var li = 0, llen = lineParts.length; li < llen; li ++ ) { 443 | 444 | var parts = lineParts[ li ].split( "/" ); 445 | 446 | if ( parts[ 0 ] !== "" ) lineVertices.push( parts[ 0 ] ); 447 | if ( parts[ 1 ] !== "" ) lineUVs.push( parts[ 1 ] ); 448 | 449 | } 450 | 451 | } 452 | state.addLineGeometry( lineVertices, lineUVs ); 453 | 454 | } else if ( ( result = this.regexp.object_pattern.exec( line ) ) !== null ) { 455 | 456 | // o object_name 457 | // or 458 | // g group_name 459 | 460 | var name = result[ 0 ].substr( 1 ).trim(); 461 | state.startObject( name ); 462 | 463 | } else if ( this.regexp.material_use_pattern.test( line ) ) { 464 | 465 | // material 466 | 467 | state.object.material.name = line.substring( 7 ).trim(); 468 | 469 | } else if ( this.regexp.material_library_pattern.test( line ) ) { 470 | 471 | // mtl file 472 | 473 | state.materialLibraries.push( line.substring( 7 ).trim() ); 474 | 475 | } else if ( ( result = this.regexp.smoothing_pattern.exec( line ) ) !== null ) { 476 | 477 | // smooth shading 478 | 479 | var value = result[ 1 ].trim().toLowerCase(); 480 | state.object.material.smooth = ( value === '1' || value === 'on' ); 481 | 482 | } else { 483 | 484 | // Handle null terminated files without exception 485 | if ( line === '\0' ) continue; 486 | 487 | throw new Error( "Unexpected line: '" + line + "'" ); 488 | 489 | } 490 | 491 | } 492 | 493 | var container = new THREE.Group(); 494 | container.materialLibraries = [].concat( state.materialLibraries ); 495 | 496 | for ( var i = 0, l = state.objects.length; i < l; i ++ ) { 497 | 498 | var object = state.objects[ i ]; 499 | var geometry = object.geometry; 500 | var isLine = ( geometry.type === 'Line' ); 501 | 502 | // Skip o/g line declarations that did not follow with any faces 503 | if ( geometry.vertices.length === 0 ) continue; 504 | 505 | var buffergeometry = new THREE.BufferGeometry(); 506 | 507 | buffergeometry.addAttribute( 'position', new THREE.BufferAttribute( new Float32Array( geometry.vertices ), 3 ) ); 508 | 509 | if ( geometry.normals.length > 0 ) { 510 | 511 | buffergeometry.addAttribute( 'normal', new THREE.BufferAttribute( new Float32Array( geometry.normals ), 3 ) ); 512 | 513 | } else { 514 | 515 | buffergeometry.computeVertexNormals(); 516 | 517 | } 518 | 519 | if ( geometry.uvs.length > 0 ) { 520 | 521 | buffergeometry.addAttribute( 'uv', new THREE.BufferAttribute( new Float32Array( geometry.uvs ), 2 ) ); 522 | 523 | } 524 | 525 | var material; 526 | 527 | if ( this.materials !== null ) { 528 | 529 | material = this.materials.create( object.material.name ); 530 | 531 | // mtl etc. loaders probably can't create line materials correctly, copy properties to a line material. 532 | if ( isLine && material && ! ( material instanceof THREE.LineBasicMaterial ) ) { 533 | 534 | var materialLine = new THREE.LineBasicMaterial(); 535 | materialLine.copy( material ); 536 | material = materialLine; 537 | 538 | } 539 | 540 | } 541 | 542 | if ( ! material ) { 543 | 544 | material = ( ! isLine ? new THREE.MeshPhongMaterial() : new THREE.LineBasicMaterial() ); 545 | material.name = object.material.name; 546 | 547 | } 548 | 549 | material.shading = object.material.smooth ? THREE.SmoothShading : THREE.FlatShading; 550 | 551 | var mesh = ( ! isLine ? new THREE.Mesh( buffergeometry, material ) : new THREE.Line( buffergeometry, material ) ); 552 | mesh.name = object.name; 553 | 554 | container.add( mesh ); 555 | 556 | } 557 | 558 | console.timeEnd( 'OBJLoader' ); 559 | 560 | return container; 561 | 562 | } 563 | 564 | }; 565 | -------------------------------------------------------------------------------- /src-old/static/PLYLoader.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @author Wei Meng / http://about.me/menway 3 | * 4 | * Description: A THREE loader for PLY ASCII files (known as the Polygon 5 | * File Format or the Stanford Triangle Format). 6 | * 7 | * Limitations: ASCII decoding assumes file is UTF-8. 8 | * 9 | * Usage: 10 | * var loader = new THREE.PLYLoader(); 11 | * loader.load('./models/ply/ascii/dolphins.ply', function (geometry) { 12 | * 13 | * scene.add( new THREE.Mesh( geometry ) ); 14 | * 15 | * } ); 16 | * 17 | * If the PLY file uses non standard property names, they can be mapped while 18 | * loading. For example, the following maps the properties 19 | * “diffuse_(red|green|blue)” in the file to standard color names. 20 | * 21 | * loader.setPropertyNameMapping( { 22 | * diffuse_red: 'red', 23 | * diffuse_green: 'green', 24 | * diffuse_blue: 'blue' 25 | * } ); 26 | * 27 | */ 28 | 29 | 30 | THREE.PLYLoader = function ( manager ) { 31 | 32 | this.manager = ( manager !== undefined ) ? manager : THREE.DefaultLoadingManager; 33 | 34 | this.propertyNameMapping = {}; 35 | 36 | }; 37 | 38 | THREE.PLYLoader.prototype = { 39 | 40 | constructor: THREE.PLYLoader, 41 | 42 | load: function ( url, onLoad, onProgress, onError ) { 43 | 44 | var scope = this; 45 | 46 | var loader = new THREE.FileLoader( this.manager ); 47 | loader.setResponseType( 'arraybuffer' ); 48 | loader.load( url, function ( text ) { 49 | 50 | onLoad( scope.parse( text ) ); 51 | 52 | }, onProgress, onError ); 53 | 54 | }, 55 | 56 | setPropertyNameMapping: function ( mapping ) { 57 | 58 | this.propertyNameMapping = mapping; 59 | 60 | }, 61 | 62 | parse: function ( data ) { 63 | 64 | function isASCII( data ) { 65 | 66 | var header = parseHeader( bin2str( data ) ); 67 | return header.format === 'ascii'; 68 | 69 | } 70 | 71 | function bin2str( buf ) { 72 | 73 | var array_buffer = new Uint8Array( buf ); 74 | var str = ''; 75 | 76 | for ( var i = 0; i < buf.byteLength; i ++ ) { 77 | 78 | str += String.fromCharCode( array_buffer[ i ] ); // implicitly assumes little-endian 79 | 80 | } 81 | 82 | return str; 83 | 84 | } 85 | 86 | function parseHeader( data ) { 87 | 88 | var patternHeader = /ply([\s\S]*)end_header\s/; 89 | var headerText = ''; 90 | var headerLength = 0; 91 | var result = patternHeader.exec( data ); 92 | 93 | if ( result !== null ) { 94 | 95 | headerText = result [ 1 ]; 96 | headerLength = result[ 0 ].length; 97 | 98 | } 99 | 100 | var header = { 101 | comments: [], 102 | elements: [], 103 | headerLength: headerLength 104 | }; 105 | 106 | var lines = headerText.split( '\n' ); 107 | var currentElement; 108 | var lineType, lineValues; 109 | 110 | function make_ply_element_property( propertValues, propertyNameMapping ) { 111 | 112 | var property = { type: propertValues[ 0 ] }; 113 | 114 | if ( property.type === 'list' ) { 115 | 116 | property.name = propertValues[ 3 ]; 117 | property.countType = propertValues[ 1 ]; 118 | property.itemType = propertValues[ 2 ]; 119 | 120 | } else { 121 | 122 | property.name = propertValues[ 1 ]; 123 | 124 | } 125 | 126 | if ( property.name in propertyNameMapping ) { 127 | 128 | property.name = propertyNameMapping[ property.name ]; 129 | 130 | } 131 | 132 | return property; 133 | 134 | } 135 | 136 | for ( var i = 0; i < lines.length; i ++ ) { 137 | 138 | var line = lines[ i ]; 139 | line = line.trim(); 140 | 141 | if ( line === '' ) continue; 142 | 143 | lineValues = line.split( /\s+/ ); 144 | lineType = lineValues.shift(); 145 | line = lineValues.join( ' ' ); 146 | 147 | switch ( lineType ) { 148 | 149 | case 'format': 150 | 151 | header.format = lineValues[ 0 ]; 152 | header.version = lineValues[ 1 ]; 153 | 154 | break; 155 | 156 | case 'comment': 157 | 158 | header.comments.push( line ); 159 | 160 | break; 161 | 162 | case 'element': 163 | 164 | if ( currentElement !== undefined ) { 165 | 166 | header.elements.push( currentElement ); 167 | 168 | } 169 | 170 | currentElement = {}; 171 | currentElement.name = lineValues[ 0 ]; 172 | currentElement.count = parseInt( lineValues[ 1 ] ); 173 | currentElement.properties = []; 174 | 175 | break; 176 | 177 | case 'property': 178 | 179 | currentElement.properties.push( make_ply_element_property( lineValues, scope.propertyNameMapping ) ); 180 | 181 | break; 182 | 183 | 184 | default: 185 | 186 | console.log( 'unhandled', lineType, lineValues ); 187 | 188 | } 189 | 190 | } 191 | 192 | if ( currentElement !== undefined ) { 193 | 194 | header.elements.push( currentElement ); 195 | 196 | } 197 | 198 | return header; 199 | 200 | } 201 | 202 | function parseASCIINumber( n, type ) { 203 | 204 | switch ( type ) { 205 | 206 | case 'char': case 'uchar': case 'short': case 'ushort': case 'int': case 'uint': 207 | case 'int8': case 'uint8': case 'int16': case 'uint16': case 'int32': case 'uint32': 208 | 209 | return parseInt( n ); 210 | 211 | case 'float': case 'double': case 'float32': case 'float64': 212 | 213 | return parseFloat( n ); 214 | 215 | } 216 | 217 | } 218 | 219 | function parseASCIIElement( properties, line ) { 220 | 221 | var values = line.split( /\s+/ ); 222 | 223 | var element = {}; 224 | 225 | for ( var i = 0; i < properties.length; i ++ ) { 226 | 227 | if ( properties[ i ].type === 'list' ) { 228 | 229 | var list = []; 230 | var n = parseASCIINumber( values.shift(), properties[ i ].countType ); 231 | 232 | for ( var j = 0; j < n; j ++ ) { 233 | 234 | list.push( parseASCIINumber( values.shift(), properties[ i ].itemType ) ); 235 | 236 | } 237 | 238 | element[ properties[ i ].name ] = list; 239 | 240 | } else { 241 | 242 | element[ properties[ i ].name ] = parseASCIINumber( values.shift(), properties[ i ].type ); 243 | 244 | } 245 | 246 | } 247 | 248 | return element; 249 | 250 | } 251 | 252 | function parseASCII( data ) { 253 | 254 | // PLY ascii format specification, as per http://en.wikipedia.org/wiki/PLY_(file_format) 255 | 256 | var buffer = { 257 | indices : [], 258 | vertices : [], 259 | normals : [], 260 | uvs : [], 261 | colors : [] 262 | }; 263 | 264 | var result; 265 | 266 | var header = parseHeader( data ); 267 | 268 | var patternBody = /end_header\s([\s\S]*)$/; 269 | var body = ''; 270 | if ( ( result = patternBody.exec( data ) ) !== null ) { 271 | 272 | body = result [ 1 ]; 273 | 274 | } 275 | 276 | var lines = body.split( '\n' ); 277 | var currentElement = 0; 278 | var currentElementCount = 0; 279 | 280 | for ( var i = 0; i < lines.length; i ++ ) { 281 | 282 | var line = lines[ i ]; 283 | line = line.trim(); 284 | if ( line === '' ) { 285 | 286 | continue; 287 | 288 | } 289 | 290 | if ( currentElementCount >= header.elements[ currentElement ].count ) { 291 | 292 | currentElement ++; 293 | currentElementCount = 0; 294 | 295 | } 296 | 297 | var element = parseASCIIElement( header.elements[ currentElement ].properties, line ); 298 | 299 | handleElement( buffer, header.elements[ currentElement ].name, element ); 300 | 301 | currentElementCount ++; 302 | 303 | } 304 | 305 | return postProcess( buffer ); 306 | 307 | } 308 | 309 | function postProcess( buffer ) { 310 | 311 | var geometry = new THREE.BufferGeometry(); 312 | 313 | // mandatory buffer data 314 | 315 | if ( buffer.indices.length > 0 ) { 316 | 317 | geometry.setIndex( buffer.indices ); 318 | 319 | } 320 | 321 | geometry.addAttribute( 'position', new THREE.Float32BufferAttribute( buffer.vertices, 3 ) ); 322 | 323 | // optional buffer data 324 | 325 | if ( buffer.normals.length > 0 ) { 326 | 327 | geometry.addAttribute( 'normal', new THREE.Float32BufferAttribute( buffer.normals, 3 ) ); 328 | 329 | } 330 | 331 | if ( buffer.uvs.length > 0 ) { 332 | 333 | geometry.addAttribute( 'uv', new THREE.Float32BufferAttribute( buffer.uvs, 2 ) ); 334 | 335 | } 336 | 337 | if ( buffer.colors.length > 0 ) { 338 | 339 | geometry.addAttribute( 'color', new THREE.Float32BufferAttribute( buffer.colors, 3 ) ); 340 | 341 | } 342 | 343 | geometry.computeBoundingSphere(); 344 | 345 | return geometry; 346 | 347 | } 348 | 349 | function handleElement( buffer, elementName, element ) { 350 | 351 | if ( elementName === 'vertex' ) { 352 | 353 | buffer.vertices.push( element.x, element.y, element.z ); 354 | 355 | if ( 'nx' in element && 'ny' in element && 'nz' in element ) { 356 | 357 | buffer.normals.push( element.nx, element.ny, element.nz ); 358 | 359 | } 360 | 361 | if ( 's' in element && 't' in element ) { 362 | 363 | buffer.uvs.push( element.s, element.t ); 364 | 365 | } 366 | 367 | if ( 'red' in element && 'green' in element && 'blue' in element ) { 368 | 369 | buffer.colors.push( element.red / 255.0, element.green / 255.0, element.blue / 255.0 ); 370 | 371 | } 372 | 373 | } else if ( elementName === 'face' ) { 374 | 375 | var vertex_indices = element.vertex_indices || element.vertex_index; // issue #9338 376 | 377 | if ( vertex_indices.length === 3 ) { 378 | 379 | buffer.indices.push( vertex_indices[ 0 ], vertex_indices[ 1 ], vertex_indices[ 2 ] ); 380 | 381 | } else if ( vertex_indices.length === 4 ) { 382 | 383 | buffer.indices.push( vertex_indices[ 0 ], vertex_indices[ 1 ], vertex_indices[ 3 ] ); 384 | buffer.indices.push( vertex_indices[ 1 ], vertex_indices[ 2 ], vertex_indices[ 3 ] ); 385 | 386 | } 387 | 388 | } 389 | 390 | } 391 | 392 | function binaryRead( dataview, at, type, little_endian ) { 393 | 394 | switch ( type ) { 395 | 396 | // corespondences for non-specific length types here match rply: 397 | case 'int8': case 'char': return [ dataview.getInt8( at ), 1 ]; 398 | case 'uint8': case 'uchar': return [ dataview.getUint8( at ), 1 ]; 399 | case 'int16': case 'short': return [ dataview.getInt16( at, little_endian ), 2 ]; 400 | case 'uint16': case 'ushort': return [ dataview.getUint16( at, little_endian ), 2 ]; 401 | case 'int32': case 'int': return [ dataview.getInt32( at, little_endian ), 4 ]; 402 | case 'uint32': case 'uint': return [ dataview.getUint32( at, little_endian ), 4 ]; 403 | case 'float32': case 'float': return [ dataview.getFloat32( at, little_endian ), 4 ]; 404 | case 'float64': case 'double': return [ dataview.getFloat64( at, little_endian ), 8 ]; 405 | 406 | } 407 | 408 | } 409 | 410 | function binaryReadElement( dataview, at, properties, little_endian ) { 411 | 412 | var element = {}; 413 | var result, read = 0; 414 | 415 | for ( var i = 0; i < properties.length; i ++ ) { 416 | 417 | if ( properties[ i ].type === 'list' ) { 418 | 419 | var list = []; 420 | 421 | result = binaryRead( dataview, at + read, properties[ i ].countType, little_endian ); 422 | var n = result[ 0 ]; 423 | read += result[ 1 ]; 424 | 425 | for ( var j = 0; j < n; j ++ ) { 426 | 427 | result = binaryRead( dataview, at + read, properties[ i ].itemType, little_endian ); 428 | list.push( result[ 0 ] ); 429 | read += result[ 1 ]; 430 | 431 | } 432 | 433 | element[ properties[ i ].name ] = list; 434 | 435 | } else { 436 | 437 | result = binaryRead( dataview, at + read, properties[ i ].type, little_endian ); 438 | element[ properties[ i ].name ] = result[ 0 ]; 439 | read += result[ 1 ]; 440 | 441 | } 442 | 443 | } 444 | 445 | return [ element, read ]; 446 | 447 | } 448 | 449 | function parseBinary( data ) { 450 | 451 | var buffer = { 452 | indices : [], 453 | vertices : [], 454 | normals : [], 455 | uvs : [], 456 | colors : [] 457 | }; 458 | 459 | var header = parseHeader( bin2str( data ) ); 460 | var little_endian = ( header.format === 'binary_little_endian' ); 461 | var body = new DataView( data, header.headerLength ); 462 | var result, loc = 0; 463 | 464 | for ( var currentElement = 0; currentElement < header.elements.length; currentElement ++ ) { 465 | 466 | for ( var currentElementCount = 0; currentElementCount < header.elements[ currentElement ].count; currentElementCount ++ ) { 467 | 468 | result = binaryReadElement( body, loc, header.elements[ currentElement ].properties, little_endian ); 469 | loc += result[ 1 ]; 470 | var element = result[ 0 ]; 471 | 472 | handleElement( buffer, header.elements[ currentElement ].name, element ); 473 | 474 | } 475 | 476 | } 477 | 478 | return postProcess( buffer ); 479 | 480 | } 481 | 482 | // 483 | 484 | var geometry; 485 | var scope = this; 486 | 487 | if ( data instanceof ArrayBuffer ) { 488 | 489 | geometry = isASCII( data ) ? parseASCII( bin2str( data ) ) : parseBinary( data ); 490 | 491 | } else { 492 | 493 | geometry = parseASCII( data ); 494 | 495 | } 496 | 497 | return geometry; 498 | 499 | } 500 | 501 | }; 502 | -------------------------------------------------------------------------------- /src-old/static/papaya.css: -------------------------------------------------------------------------------- 1 | .papaya{width:90%;height:90%;margin:25px auto;background-color:black;font-family:sans-serif}.papaya:before{position:relative;content:"Papaya requires JavaScript...";display:block;top:45%;color:red;margin:0 auto;font-size:18px;font-family:sans-serif}.papaya-fullscreen{height:100%}.papaya-toolbar{text-align:left;box-sizing:content-box}.papaya-toolbar ul{margin:0;list-style:none}.papaya-toolbar input[type=file]{text-align:right;display:none}.papaya-kiosk-controls{margin:5px auto;list-style:none;-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.papaya-kiosk-controls ul{list-style:none}.papaya-kiosk-controls button{-webkit-appearance:none;border-radius:0;-webkit-border-radius:0;font-size:14px;height:25px;background-color:lightgray}.papaya-control-increment{-webkit-appearance:none;border-radius:0;-webkit-border-radius:0;font-size:14px;height:25px;width:25px;text-align:center;vertical-align:middle;padding:0;margin-left:auto;margin-right:auto;line-height:16px;box-sizing:border-box;font-family:"Courier New",Courier,monospace}.papaya-main-increment{-webkit-appearance:none;border-radius:0;-webkit-border-radius:0;font-size:14px;height:25px;width:25px;text-align:center;font-family:"Courier New",Courier,monospace;background-color:lightgray;vertical-align:middle;padding:0;margin-left:auto;margin-right:auto;box-sizing:border-box;outline:0}.papaya-main-decrement{-webkit-appearance:none;border-radius:0;-webkit-border-radius:0;font-size:14px;height:25px;width:25px;text-align:center;font-family:"Courier New",Courier,monospace;background-color:lightgray;vertical-align:middle;padding:0;margin-left:auto;margin-right:auto;box-sizing:border-box;outline:0}.papaya-main-swap{-webkit-appearance:none;border-radius:0;-webkit-border-radius:0;font-size:14px;height:25px;background-color:lightgray;outline:0}.papaya-main-goto-center{-webkit-appearance:none;border-radius:0;-webkit-border-radius:0;font-size:14px;height:25px;background-color:lightgray;outline:0}.papaya-main-goto-origin{-webkit-appearance:none;border-radius:0;-webkit-border-radius:0;font-size:14px;height:25px;background-color:lightgray;outline:0}.papaya-slider-slice{padding:0 5px;display:inline}.papaya-slider-slice span{font-size:14px;font-family:sans-serif;vertical-align:middle}.papaya-slider-slice button{-webkit-appearance:none;border-radius:0;-webkit-border-radius:0;vertical-align:middle;font-size:14px;height:25px;background-color:lightgray}.papaya-controlbar-label{color:#000}.papaya-menu{width:220px;background:#222;z-index:100;position:absolute;border:solid 2px darkgray;padding:4px;margin:0}.papaya-menu li{font-size:12px;font-family:sans-serif;padding:4px 2px;color:#b5cbd3;cursor:pointer;list-style-type:none}.papaya-menu-label{font-size:14px;font-family:sans-serif;font-weight:bold;padding:2px 8px;cursor:pointer;vertical-align:text-bottom}.papaya-menu-titlebar{font-size:16px;font-family:sans-serif;padding:3px 8px 0 8px;cursor:default;vertical-align:text-bottom;color:white}.papaya-menu-icon{margin-left:5px}.papaya-menu-icon img{box-sizing:content-box}.papaya-menu-hovering{background-color:#444}.papaya-menu-spacer{height:8px}.papaya-menu-unselectable{-moz-user-select:-moz-none;-khtml-user-select:none;-webkit-user-select:none;-ms-user-select:none;user-select:none;-webkit-user-drag:none;user-drag:none}.papaya-menu-button-hovering{background-color:#DDD}.papaya-menu-filechooser{cursor:pointer;width:200px;display:inline-block;font-weight:normal}.papaya-menu-input{width:38px;margin-right:5px;color:black}li .papaya-menu-slider{vertical-align:middle;text-align:center;display:inline;width:120px;padding:0;margin:0}.papaya-dialog{min-width:400px;max-width:500px;height:500px;background:#222;position:absolute;z-index:100;border:solid 2px darkgray;padding:6px;font-size:14px;font-family:sans-serif;color:#b5cbd3;box-sizing:content-box;line-height:1.45}.papaya-dialog-content{margin:20px;height:415px;color:#dedede;overflow:auto;-ms-overflow-style:-ms-autohiding-scrollbar}.papaya-dialog-content-nowrap{white-space:nowrap}.papaya-dialog-content table{margin:0 auto}.papaya-dialog-content-label{text-align:right;padding:5px;color:#b5cbd3}.papaya-dialog-content-control{text-align:left;padding:5px}.papaya-dialog-content-help{text-align:right;padding:5px;color:lightgray;font-size:12px}.papaya-dialog-title{color:#b5cbd3;font-weight:bold;font-size:16px}.papaya-dialog-button{text-align:right;box-sizing:content-box;height:22px}.papaya-dialog-button button{box-sizing:content-box;color:black;font-size:11px}.papaya-dialog-background{position:fixed;top:0;left:0;background-color:#fff;width:100%;height:100%;opacity:.5}.papaya-dialog-stopscroll{height:100%;overflow:hidden}.checkForJS{width:90%;height:90%;margin:25px auto;background-color:black}.checkForJS:before{position:relative;content:"Papaya requires JavaScript...";display:block;top:45%;color:red;margin:0 auto;font-size:18px;font-family:sans-serif;text-align:center}.papaya-utils-unsupported{width:90%;height:90%;margin:25px auto;background-color:black}.papaya-utils-unsupported-message{position:relative;display:block;top:45%;color:red;margin:0 auto;font-size:18px;font-family:sans-serif;text-align:center}.papaya-viewer{line-height:1;font-family:sans-serif}.papaya-viewer div,.papaya-viewer canvas{margin:0;padding:0;border:0;font:inherit;font-size:100%;vertical-align:baseline;font-family:sans-serif}.papaya-viewer canvas{cursor:crosshair} -------------------------------------------------------------------------------- /src-old/static/three_index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | AAALGO Model Viewer 5 | 6 | 7 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 180 | 181 | 182 | -------------------------------------------------------------------------------- /src-old/templates/case.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 9 | 10 | 11 | 12 | GIF
13 | {% for dcm in dcms %} 14 | 15 | 16 | 17 | 18 | {% endfor %} 19 |
{{dcm.location}}
20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /src-old/templates/gallery.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 9 | 10 | 11 | 12 | {% if header %} 13 | 14 | {% for col in header %} 15 | 16 | {% endfor %} 17 | 18 | {% endif %} 19 | {% for row in images %} 20 | 21 | {% for col in row %} 22 | 37 | {% endfor %} 38 | 39 | {% endfor %} 40 |
{{col}}
23 | {% if col.image %} 24 | {% if col.link %} 25 | 26 | {% else %} 27 | 28 | {% endif %} 29 | {% if col.text %} 30 |
31 | {% endif %} 32 | {% endif %} 33 | {% if col.text %} 34 | {{col.text}} 35 | {% endif %} 36 |
41 | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /src-old/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | {% for case in cases %} 18 | 19 | 20 | 21 | 22 | 23 | {% endfor %} 24 |
uidsliceslength
{{case.uid}}{{case.slices}}{{case.length}}
25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /src-old/templates/papaya_case.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | AAALGO Lung Cancer Detector 16 | 17 | 107 | 108 | 109 | 110 | 111 | 112 |
113 | 114 |
115 | 116 | 117 | {% for z, y, x, hint in centers %} 118 | 119 | 124 | 125 | {% endfor %} 126 |
Nodules
{{hint}}
127 |
128 | 129 | 130 | -------------------------------------------------------------------------------- /src-old/templates/papaya_index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | {% for case in cases %} 10 | 11 | {% endfor %} 12 |
{{case[0]}}
{{case[1]}}
13 | 14 | 15 | -------------------------------------------------------------------------------- /src-old/templates/papaya_lymph_case.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | Papaya Viewer 16 | 17 | 64 | 65 | 66 | 67 | 68 | 69 |
70 | 71 | 72 | -------------------------------------------------------------------------------- /src-old/templates/plumo.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 9 | 10 | 11 |

Score: {{extra.score}}

12 |

Lung Segmentation (slow loading over network)

13 |

DICOM Viewer (slow loading over network)

14 | 15 | {% if header %} 16 | 17 | {% for col in header %} 18 | 19 | {% endfor %} 20 | 21 | {% endif %} 22 | {% for row in images %} 23 | 24 | {% for col in row %} 25 | 40 | {% endfor %} 41 | 42 | {% endfor %} 43 |
{{col}}
26 | {% if col.image %} 27 | {% if col.link %} 28 | 29 | {% else %} 30 | 31 | {% endif %} 32 | {% if col.text %} 33 |
34 | {% endif %} 35 | {% endif %} 36 | {% if col.text %} 37 | {{col.text}} 38 | {% endif %} 39 |
44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /src-old/three.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | import sys 4 | import shutil 5 | import subprocess 6 | from glob import glob 7 | from jinja2 import Environment, FileSystemLoader 8 | from adsb3 import DATA_DIR 9 | import pyadsb3 10 | 11 | #TMPL_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 12 | # './templates') 13 | STATIC_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 14 | './static') 15 | #env = Environment(loader=FileSystemLoader(searchpath=TMPL_DIR)) 16 | #case_tmpl = env.get_template('three_case.html') 17 | #$index_tmpl = env.get_template('papaya_index.html') 18 | 19 | def Three (path, verts, faces): 20 | try: 21 | os.makedirs(path) 22 | except: 23 | pass 24 | try: 25 | subprocess.check_call('rm -rf %s/*' % path, shell=True) 26 | except: 27 | pass 28 | pyadsb3.save_mesh(verts, faces, os.path.join(path, 'model.ply')) 29 | #try: 30 | # data = os.path.abspath(DATA_DIR) 31 | # os.symlink(data, os.path.join(path, 'data')) 32 | #except: 33 | # pass 34 | for f in ['three.js', 'PLYLoader.js', 'Detector.js', 'TransformControls.js']: 35 | shutil.copyfile(os.path.join(STATIC_DIR, f), os.path.join(path, f)) 36 | shutil.copyfile(os.path.join(STATIC_DIR, 'three_index.html'), os.path.join(path, 'index.html')) 37 | pass 38 | 39 | -------------------------------------------------------------------------------- /src/README: -------------------------------------------------------------------------------- 1 | Software Setup: 2 | 3 | - Install https://github.com/aaalgo/picpac 4 | (commit b4a0ab776d0c7bf3bac6190ed6c0ddcc364ce233) 5 | - Run python setup.py build; sudo python setup.py install 6 | - Tensorflow version is r1.1, with cuda-8.0 7 | and libcudnn.so.5.1.5. 8 | 9 | cd src; python setup.py build 10 | 11 | 12 | Data Preparation 13 | 14 | - Create a directory/link src/data/adsb3/dicom what contains all dicom data, e.g. 15 | src/data/adsb3/dicom/ac4056071f3cc98489b9db3aebfe2b6a/804855d11f3347756b809e3ddff74f72.dcm 16 | 17 | - Create a directory/link src/cache 18 | 19 | - wget http://www.aaalgo.com/plumo/plumo-adsb3-models.tar.bz2 20 | and extract under src, so you have src/models/{ft, ft1, ...}. 21 | 22 | Both dicom and cache data should be backed by SSD storage for fast access. 23 | 24 | Reproducing Kaggle Results, (cd src) 25 | 26 | - ./adsb3_cache_mask.py -- This will compute all lung masks and convex hulls. 27 | - ./adsb3_cache_all_ft.sh 28 | - ./adsb3_eval.py --reproduce -- This will evaluate all the above feature data extracted and produce a score of 0.46482. 29 | -------------------------------------------------------------------------------- /src/adsb3.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | from __future__ import print_function 5 | import os 6 | import sys 7 | sys.path.append('build/lib.linux-x86_64-2.7') 8 | import shutil 9 | import math 10 | from glob import glob 11 | import cv2 12 | import csv 13 | import random 14 | from PIL import Image, ImageDraw 15 | import dicom 16 | import copy 17 | import numpy as np 18 | import SimpleITK as itk 19 | from skimage import measure 20 | import logging 21 | import cPickle as pickle 22 | import plumo 23 | 24 | # configuration options 25 | 26 | DICOM_STRICT = False 27 | SPACING = 0.8 28 | GAP = 5 29 | FAST = 400 30 | 31 | if 'SPACING' in os.environ: 32 | SPACING = float(os.environ['SPACING']) 33 | print('OVERRIDING SPACING = %f' % SPACING) 34 | 35 | if 'GAP' in os.environ: 36 | GAP = int(os.environ['GAP']) 37 | print('OVERRIDING GAP = %d' % GAP) 38 | 39 | 40 | ROOT = os.path.abspath(os.path.dirname(__file__)) 41 | DATA_DIR = os.path.join(ROOT, 'data', 'adsb3') 42 | 43 | # parse label file, return a list of (id, label) 44 | # if gs is True, labels are int 45 | # otherwise, labels are float 46 | def load_meta (path, gs=True): 47 | all = [] 48 | with open(path, 'r') as f: 49 | header = f.next() # skip one line 50 | assert header.strip() == 'id,cancer' 51 | for l in f: 52 | ID, label = l.strip().split(',') 53 | if gs: 54 | label = int(label) 55 | else: 56 | label = float(label) 57 | all.append((ID, label)) 58 | pass 59 | pass 60 | return all 61 | 62 | # write meta for verification 63 | def dump_meta (path, meta): 64 | with open(path, 'w') as f: 65 | f.write('id,cancer\n') 66 | for ID, label in meta: 67 | f.write('%s,%s\n' % (ID, str(label))) 68 | pass 69 | pass 70 | 71 | 72 | STAGE1_TRAIN = load_meta(os.path.join(DATA_DIR, 'stage1_labels.csv')) 73 | STAGE1_PUBLIC = load_meta(os.path.join(DATA_DIR, 'stage1_public.csv')) 74 | STAGE2_PUBLIC = load_meta(os.path.join(DATA_DIR, 'stage2_public.csv')) 75 | STAGE2_PRIVATE = load_meta(os.path.join(DATA_DIR, 'stage2_private.csv')) 76 | 77 | ALL_CASES = STAGE1_TRAIN + STAGE1_PUBLIC + STAGE2_PUBLIC + STAGE2_PRIVATE 78 | 79 | 80 | # All DiCOMs of a UID, organized 81 | class Case (plumo.DicomVolume): 82 | def __init__ (self, uid, regroup = True): 83 | path = os.path.join(DATA_DIR, 'dicom', uid) 84 | plumo.DicomVolume.__init__(self, path) 85 | self.uid = uid 86 | self.path = path 87 | pass 88 | pass 89 | 90 | 91 | def save_mask (path, mask): 92 | shape = np.array(list(mask.shape), dtype=np.uint32) 93 | total = mask.size 94 | totalx = (total +7 )// 8 * 8 95 | if totalx == total: 96 | padded = mask 97 | else: 98 | padded = np.zeros((totalx,), dtype=np.uint8) 99 | padded[:total] = np.reshape(mask, (total,)) 100 | pass 101 | padded = np.reshape(padded, (totalx//8, 8)) 102 | #print padded.shape 103 | packed = np.packbits(padded) 104 | #print packed.shape 105 | np.savez_compressed(path, shape, packed) 106 | pass 107 | 108 | def load_mask (path): 109 | import sys 110 | saved = np.load(path) 111 | shape = saved['arr_0'] 112 | D, H, W = shape 113 | size = D * H * W 114 | packed = saved['arr_1'] 115 | padded = np.unpackbits(packed) 116 | binary = padded[:size] 117 | return np.reshape(binary, [D, H, W]) 118 | 119 | def load_8bit_lungs_noseg (uid): 120 | case = Case(uid) 121 | case.normalize_8bit() 122 | return case 123 | 124 | def load_16bit_lungs_noseg (uid): 125 | case = Case(uid) 126 | case.normalize_16bit() 127 | return case 128 | 129 | def load_lungs_mask (uid): 130 | cache = os.path.join('maskcache/mask-v2/%s.npz' % case.uid) 131 | binary = None 132 | if os.path.exists(cache) and os.path.getsize(cache) > 0: 133 | # load cache 134 | binary = load_mask(cache) 135 | assert not binary is None 136 | if binary is None: 137 | case = load_case(uid) 138 | case.normalizeHU() 139 | binary = segment_lung_axial_v2(case.images) #, th=200.85) 140 | save_mask(cache, binary) 141 | pass 142 | return binary 143 | 144 | def load_fts (path): 145 | with open(path, 'rb') as f: 146 | return pickle.load(f) 147 | pass 148 | 149 | 150 | def patch_clip_range (x, tx, wx, X): 151 | if x < 0: # 152 | wx += x 153 | tx -= x 154 | x = 0 155 | if x + wx > X: 156 | d = x + wx - X 157 | wx -= d 158 | pass 159 | return x, tx, wx 160 | 161 | def extract_patch_3c (images, z, y, x, size): 162 | assert len(images.shape) == 3 163 | _, Y, X = images.shape 164 | z = int(round(z)) 165 | y = int(round(y)) 166 | x = int(round(x)) 167 | image = get3c(images, z) 168 | if image is None: 169 | return None 170 | ty = 0 171 | tx = 0 172 | y -= size//2 173 | x -= size//2 174 | wy = size 175 | wx = size 176 | #print y, ty, wy, x, tx, wx 177 | y, ty, wy = patch_clip_range(y, ty, wy, Y) 178 | x, tx, wx = patch_clip_range(x, tx, wx, X) 179 | # now do overlap 180 | patch = np.zeros((size, size, 3), dtype=image.dtype) 181 | #print y, ty, wy, x, tx, wx 182 | patch[ty:(ty+wy),tx:(tx+wx),:] = image[y:(y+wy),x:(x+wx),:] 183 | return patch 184 | 185 | def try_mkdir (path): 186 | try: 187 | os.makedirs(path) 188 | except: 189 | pass 190 | 191 | def try_remove (path): 192 | try: 193 | os.remove(path) 194 | except: 195 | shutil.rmtree(path, ignore_errors=True) 196 | pass 197 | 198 | if __name__ == '__main__': 199 | #dump_meta('a', STAGE1.train) 200 | #dump_meta('b', STAGE1.test) 201 | pass 202 | -------------------------------------------------------------------------------- /src/adsb3_cache_all_ft.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This program generates feature vector of various 4 | # combination of NODULE model and FEATURE model. 5 | # The generated feature files are cached in the feature 6 | # directory. 7 | # only the feature data actually used by 8 | # predict1 and predict2 have to be generated. 9 | 10 | 11 | # Feature directory naming scheme 12 | # comp1_comp2_[m?]_b8[_hull][_old] 13 | # comp1 is the nodule model name 14 | # comp2 is the feature model name 15 | # both nodule and feature models are in models directory, 16 | # if the nodule is detected under sagittal or coronal view, 17 | # m2 or m3 is added. If the nodule detection is applied 18 | # with a segmentation mask/hull, _mask or _hull is added. 19 | # so the feature directory name fully determines how 20 | # to invoke adsb3_cache_ft.py. 21 | 22 | # _old means to use adsb3_cache_ft_old.py instead of adsb3_cache_ft.py for extraction. 23 | 24 | # _b8 and --bits 8: we only use 8-bit image here. 25 | # --fts_dropout. for feature model with dropout this have to 26 | # be specified. A model either works with --fts_dropout, or not, 27 | # but not both. 28 | 29 | # the exit command in this file are for partial processing, 30 | # must be modified according to actual usage of feature data. 31 | 32 | ./adsb3_cache_ft.py --bits 8 --prob unet_k 33 | ./adsb3_cache_ft.py --bits 8 --prob unet --mode 2 34 | ./adsb3_cache_ft.py --bits 8 --prob unet --mode 3 35 | ./adsb3_cache_ft.py --bits 8 --prob tiny --fts ft1 --fts_dropout 36 | ./adsb3_cache_ft.py --bits 8 --prob tiny --mode 4 37 | 38 | # the above are needed to reproduce kaggle submission 39 | 40 | exit 41 | 42 | if false 43 | then 44 | ./adsb3_cache_ft.py --bits 8 --prob tiny --fts ft1 --fts_dropout 45 | ./adsb3_cache_ft.py --bits 8 --prob tiny --mode 2 46 | ./adsb3_cache_ft.py --bits 8 --prob tiny --mode 3 47 | ./adsb3_cache_ft.py --bits 8 --prob unet_k --fts ft1 --fts_dropout 48 | ./adsb3_cache_ft.py --bits 8 --prob tiny --mode 4 49 | ./adsb3_cache_ft.py --bits 8 --prob tiny --fts ft --fts_dropout 50 | fi 51 | 52 | 53 | ./adsb3_cache_ft.py --bits 8 --prob tiny --mask hull 54 | exit 55 | ./adsb3_cache_ft.py --bits 8 --prob tiny 56 | ./adsb3_cache_ft.py --bits 8 --prob tiny --mode 2 57 | ./adsb3_cache_ft.py --bits 8 --prob tiny --mode 3 58 | ./adsb3_cache_ft.py --bits 8 --prob tiny --mode 3 --fts ft --fts_dropout 59 | 60 | exit 61 | ./adsb3_cache_ft.py --bits 8 --prob unet_k --mask hull 62 | ./adsb3_cache_ft.py --bits 8 --prob unet --fts ft --fts_dropout --mask hull 63 | ./adsb3_cache_ft.py --bits 8 --prob unet --mode 4 --mask hull 64 | ./adsb3_cache_ft.py --bits 8 --prob unet --mode 3 --mask hull 65 | exit 66 | 67 | #./adsb3_cache_ft.py --bits 8 --prob unet --mask hull 68 | ./adsb3_cache_ft.py --bits 8 --prob unet_k --fts ft --fts_dropout --mask hull 69 | ./adsb3_cache_ft.py --bits 8 --prob unet --mode 2 --fts ft --fts_dropout --mask hull 70 | ./adsb3_cache_ft.py --bits 8 --prob unet --mode 2 --mask hull 71 | exit 72 | 73 | ./adsb3_cache_ft.py --bits 8 --prob unet_k --fts ft --fts_dropout 74 | ./adsb3_cache_ft.py --bits 8 --prob unet_k 75 | ./adsb3_cache_ft.py --bits 8 --prob unet --mode 2 --fts ft --fts_dropout 76 | ./adsb3_cache_ft.py --bits 8 --prob unet --mode 2 77 | ./adsb3_cache_ft.py --bits 8 --prob unet 78 | ./adsb3_cache_ft.py --bits 8 --prob unet --fts ft --fts_dropout 79 | ./adsb3_cache_ft.py --bits 8 --prob unet --mode 4 80 | ./adsb3_cache_ft.py --bits 8 --prob unet --mode 3 81 | 82 | exit 83 | 84 | exit 85 | ./adsb3_cache_ft.py --bits 8 --prob small --mode 4 --fts smallft 86 | ./adsb3_cache_ft.py --bits 8 --prob small --mode 3 --fts smallft 87 | ./adsb3_cache_ft.py --bits 8 --prob small --mode 2 --fts smallft 88 | exit 89 | 90 | ./adsb3_cache_ft.py --bits 8 --prob small --mode 3 91 | ./adsb3_cache_ft.py --bits 8 --prob small --mode 2 92 | ./adsb3_cache_ft.py --bits 8 --prob small --mode 4 93 | #./adsb3_cache_ft.py --bits 8 --prob small --mode 1 94 | exit 95 | ./adsb3_cache_ft.py --bits 8 --prob small --mode 1 --fts ft2 --fts_dropout 96 | exit 97 | ./adsb3_cache_ft.py --bits 8 --prob small_4k --mode 2 98 | exit 99 | ./adsb3_cache_ft.py --bits 8 --prob nnc --mode 1 100 | exit 101 | ./adsb3_cache_ft.py --bits 8 --prob small_4k --mode 3 102 | exit 103 | ./adsb3_cache_ft.py --bits 8 --prob small_4k --mode 1 104 | exit 105 | ./adsb3_cache_ft.py --bits 8 --prob small --mode 1 --fts ft --fts_dropout 106 | exit 107 | ./adsb3_cache_ft.py --bits 8 --prob small --mode 1 108 | exit 109 | ./adsb3_cache_ft.py --bits 8 --prob tiny2 --mode 1 --fts tcia --fts_dropout 110 | exit 111 | ./adsb3_cache_ft.py --bits 8 --prob tiny2 --mode 1 112 | exit 113 | ./adsb3_cache_ft.py --bits 8 --prob unet5 --fts tcia --fts_dropout 114 | exit 115 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 3 116 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 1 --fts ft --fts_dropout 117 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 2 --fts ft --fts_dropout 118 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 3 --fts ft --fts_dropout 119 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 4 --fts ft --fts_dropout 120 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 1 121 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 2 122 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 4 123 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 1 --fts ft_k --fts_dropout 124 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 2 --fts ft_k --fts_dropout 125 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 3 --fts ft_k --fts_dropout 126 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet --mode 4 --fts ft_k --fts_dropout 127 | exit 128 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet_l #--mask hull 129 | exit 130 | ./adsb3_cache_ft_calibc.py --bits 8 --prob unet_k #--mask hull 131 | exit 132 | 133 | ./adsb3_cache_ft_batch.py --bits 8 --fts ft_k --fts_dropout --prob unet 134 | ./adsb3_cache_ft_batch.py --bits 8 --fts ft_k --fts_dropout --prob unet --mode 3 135 | 136 | exit 137 | ./adsb3_cache_ft_batch.py --bits 8 --prob unet --mode 4 138 | ./adsb3_cache_ft_batch.py --bits 8 --fts ft_z --fts_dropout --prob unet 139 | 140 | exit 141 | ./adsb3_cache_ft_batch.py --bits 8 --prob unet_k --mask hull 142 | 143 | exit 144 | 145 | ./adsb3_cache_ft_batch.py --bits 8 --fts ft_k --fts_dropout --prob unet_k --mask hull 146 | exit 147 | ./adsb3_cache_ft_batch.py --bits 8 --prob unet_k --mask hull 148 | exit 149 | 150 | ./adsb3_cache_ft_batch.py --bits 8 --prob none_k 151 | ./adsb3_cache_ft_batch.py --bits 8 --prob small_k 152 | exit 153 | ./adsb3_cache_ft_batch.py --bits 8 --prob vnet 154 | exit 155 | #./adsb3_cache_ft_batch.py --bits 8 --fts ft_k --fts_dropout 156 | ./adsb3_cache_ft_batch.py --bits 8 --prob tiny_k 157 | exit 158 | SPACING=0.6 ./adsb3_cache_ft_batch.py --bits 8 --prob unet_k 159 | exit 160 | ./adsb3_cache_ft_batch.py --bits 8 --prob unet_k 161 | ./adsb3_cache_ft_batch.py --bits 8 --prob unet_l 162 | exit 163 | ./adsb3_cache_ft_batch.py --bits 8 --prob lymph_unet 164 | SPACING=0.6 ./adsb3_cache_ft_batch.py --bits 8 --prob unet 165 | exit 166 | ./adsb3_cache_ft_batch.py --bits 8 --prob unet 167 | 168 | SPACING=1.2 ./adsb3_cache_ft.py --bits 8 --prob luna_tiny 169 | exit 170 | SPACING=1.2 ./adsb3_cache_ft.py --bits 8 --prob luna 171 | exit 172 | SPACING=1.2 ./adsb3_cache_ft.py --bits 8 --prob luna_dilate 173 | ./adsb3_cache_ft.py --bits 8 174 | ./adsb3_cache_ft.py --bits 8 --mode 3 175 | ./adsb3_cache_ft.py --bits 8 --fts tcia.ft 176 | ./adsb3_cache_ft.py --bits 8 --prob luna.ns --channels 1 177 | ./adsb3_cache_ft.py --bits 8 --prob tcia.none 178 | ./adsb3_cache_ft.py --bits 8 --prob tcia.small 179 | ./adsb3_cache_ft.py --bits 8 --prob nnc 180 | #./adsb3_cache_ft.py --prob lymph3c16 181 | ./adsb3_cache_ft.py --bits 8 --fts ac --stride 32 182 | 183 | exit 184 | ./adsb3_cache_ft.py --bits 8 --fts tcia.ft.X 185 | exit 186 | -------------------------------------------------------------------------------- /src/adsb3_cache_ft.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | sys.path.append('build/lib.linux-x86_64-2.7') 4 | import math 5 | import time 6 | import traceback 7 | import subprocess 8 | import numpy as np 9 | import tensorflow as tf 10 | from tensorflow.python.framework import meta_graph 11 | from scipy.ndimage.morphology import grey_dilation, binary_dilation 12 | from skimage import measure 13 | import plumo 14 | from adsb3 import * 15 | 16 | BATCH = 32 17 | 18 | def setGpuConfig (config): 19 | mem_total = subprocess.check_output('nvidia-smi --query-gpu=memory.total --format=csv,noheader,nounits', shell=True) 20 | mem_total = float(mem_total) 21 | frac = 5000.0/mem_total 22 | print("setting GPU memory usage to %f" % frac) 23 | if frac < 0.5: 24 | config.gpu_options.per_process_gpu_memory_fraction = frac 25 | pass 26 | 27 | def extract (prob, fts, th=0.05, ext=2): 28 | if not fts is None: 29 | prob4 = np.reshape(prob, prob.shape + (1,)) 30 | assert prob4.base is prob 31 | fts = np.clip(fts, 0, 6) 32 | fts *= prob4 33 | binary = prob > th 34 | k = int(round(ext / SPACING)) 35 | binary = binary_dilation(binary, iterations=k) 36 | labels = measure.label(binary, background=0) 37 | boxes = measure.regionprops(labels) 38 | 39 | nodules = [] 40 | dim = 2 41 | if not fts is None: 42 | dim = fts.shape[3] 43 | 44 | Z, Y, X = prob.shape 45 | for box in boxes: 46 | #print prob.shape, fts.shape 47 | z0, y0, x0, z1, y1, x1 = box.bbox 48 | #ft.append((z1-z0)*(y1-y0)*(x1-x0)) 49 | prob_roi = prob[z0:z1,y0:y1,x0:x1] 50 | za, ya, xa, zz, zy, zx, yy, yx, xx = plumo.norm3d(prob_roi) 51 | zc = za + z0 52 | yc = ya + y0 53 | xc = xa + x0 54 | 55 | cov = np.array([[zz, zy, zx], 56 | [zy, yy, yx], 57 | [zx, yx, xx]], dtype=np.float32) 58 | eig, _ = np.linalg.eig(cov) 59 | #print zc, yc, xc, '------', (z0+z1)/2.0, (y0+y1)/2.0, (x0+x1)/2.0 60 | 61 | weight_sum = np.sum(prob_roi) 62 | UNIT = SPACING * SPACING * SPACING 63 | prob_sum = weight_sum * UNIT 64 | 65 | eig = sorted(list(eig), reverse=True) 66 | #print eig 67 | #print weight_sum, np.linalg.det(cov), eig 68 | #print za, ya, xa 69 | #print cov 70 | 71 | pos = (zc/Z, yc/Y, xc/X) 72 | 73 | if fts is None: 74 | one = [prob_sum, math.atan2(eig[0], eig[2])] 75 | else: 76 | fts_roi = fts[z0:z1,y0:y1,x0:x1,:] 77 | fts_sum = np.sum(fts_roi, axis=(0,1,2)) 78 | one = list(fts_sum/weight_sum) 79 | nodules.append((prob_sum, pos, one)) 80 | pass 81 | nodules = sorted(nodules, key=lambda x: -x[0]) 82 | return dim, nodules 83 | 84 | 85 | 86 | def logits2prob (v, scope='logits2prob'): 87 | with tf.name_scope(scope): 88 | shape = tf.shape(v) # (?, ?, ?, 2) 89 | # softmax 90 | v = tf.reshape(v, (-1, 2)) 91 | v = tf.nn.softmax(v) 92 | v = tf.reshape(v, shape) 93 | # keep prob of 1 only 94 | v = tf.slice(v, [0, 0, 0, 1], [-1, -1, -1, -1]) 95 | # remove trailing dimension of 1 96 | v = tf.squeeze(v, axis=3) 97 | return v 98 | 99 | class ViewModel: 100 | def __init__ (self, X, KEEP, view, name, dir_path, node='logits:0', softmax=True): 101 | self.name = name 102 | self.view = view 103 | print dir_path 104 | paths = glob(os.path.join(dir_path, '*.meta')) 105 | print paths 106 | assert len(paths) == 1 107 | path = os.path.splitext(paths[0])[0] 108 | mg = meta_graph.read_meta_graph_file(path + '.meta') 109 | if KEEP is None: 110 | fts, = tf.import_graph_def(mg.graph_def, name=name, 111 | input_map={'images:0':X}, 112 | return_elements=[node]) 113 | else: 114 | fts, = tf.import_graph_def(mg.graph_def, name=name, 115 | input_map={'images:0':X, 'keep:0':KEEP}, 116 | return_elements=[node]) 117 | if softmax: 118 | fts = logits2prob(fts) 119 | self.fts = fts 120 | self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name) 121 | self.loader = lambda sess: self.saver.restore(sess, path) 122 | pass 123 | 124 | MODE_AXIAL = 1 125 | MODE_SAGITTAL = 2 126 | MODE_CORONAL = 3 127 | MODE_MIN = 4 128 | 129 | class Model: 130 | def __init__ (self, prob_model, prob_mode, fts_model, channels = 3, prob_dropout=False, fts_dropout=True): 131 | if channels == 1: 132 | self.X = tf.placeholder(tf.float32, shape=(None, None, None)) 133 | X4 = tf.expand_dims(self.X, axis=3) 134 | elif channels == 3: 135 | self.X = tf.placeholder(tf.float32, shape=(None, None, None, channels)) 136 | X4 = self.X 137 | else: 138 | assert False 139 | self.KEEP = tf.placeholder(tf.float32, shape=()) 140 | PROB_KEEP = None 141 | FTS_KEEP = None 142 | if prob_dropout: 143 | PROB_KEEP = self.KEEP 144 | if fts_dropout: 145 | FTS_KEEP = self.KEEP 146 | 147 | pp = '%d' % FLAGS.bits 148 | if SPACING != 0.8: 149 | pp += '_%.1f' % SPACING 150 | if GAP != 5: 151 | pp += '_%d' % GAP 152 | print "PP:", pp 153 | 154 | models = [] 155 | if fts_model is None: 156 | models.append(None) 157 | else: 158 | models.append(ViewModel(X4, FTS_KEEP, plumo.AXIAL, 'fts', 'models/%s' % fts_model, node='fts:0', softmax=False)) 159 | 160 | if prob_mode == MODE_AXIAL: 161 | models.append(ViewModel(X4, PROB_KEEP, plumo.AXIAL, 'axial', 'models/%s/axial' % prob_model)) 162 | elif prob_mode == MODE_SAGITTAL: 163 | models.append(ViewModel(X4, PROB_KEEP, plumo.SAGITTAL, 'sagittal', 'models/%s/sagittal' % prob_model)) 164 | elif prob_mode == MODE_CORONAL: 165 | models.append(ViewModel(X4, PROB_KEEP, plumo.CORONAL, 'coronal', 'models/%s/coronal' % prob_model)) 166 | else: 167 | models.append(ViewModel(X4, PROB_KEEP, plumo.AXIAL, 'axial', 'models/%s/axial' % prob_model)) 168 | models.append(ViewModel(X4, PROB_KEEP, plumo.SAGITTAL, 'sagittal', 'models/%s/sagittal' % prob_model)) 169 | models.append(ViewModel(X4, PROB_KEEP, plumo.CORONAL, 'coronal', 'models/%s/coronal' % prob_model)) 170 | self.channels = channels 171 | self.models = models 172 | self.mode = prob_mode 173 | pass 174 | 175 | def load (self, sess): 176 | for m in self.models: 177 | if m: 178 | m.loader(sess) 179 | pass 180 | 181 | def apply (self, sess, case, mask): 182 | r = [] 183 | #comb = np.ones_like(case.images, dtype=np.float32) 184 | views = [case.transpose(plumo.AXIAL)] 185 | if self.mode > MODE_AXIAL: 186 | views.append(case.transpose(plumo.SAGITTAL)) 187 | views.append(case.transpose(plumo.CORONAL)) 188 | for m in self.models: 189 | if m is None: 190 | r.append(None) 191 | continue 192 | cc = views[m.view] 193 | images = cc.images 194 | N, H, W = images.shape 195 | 196 | fts = None #np.zeros_like(images, dtype=np.float32) 197 | margin = 0 198 | if self.channels == 3: 199 | margin = GAP 200 | 201 | fts = None 202 | off = margin 203 | while off < N-margin: 204 | nxt = min(off + BATCH, N-margin) 205 | x = np.zeros((nxt-off, H, W, FLAGS.channels), dtype=np.float32) 206 | i = 0 207 | for j in range(off, nxt): 208 | if self.channels == 1: 209 | x[i] = images[j] 210 | elif self.channels == 3: 211 | x[i,:,:,0] = images[j-GAP] 212 | x[i,:,:,1] = images[j] 213 | x[i,:,:,2] = images[j+GAP] 214 | else: 215 | assert False 216 | i += 1 217 | pass 218 | assert i == x.shape[0] 219 | y, = sess.run([m.fts], feed_dict={self.X:x, self.KEEP:1.0}) 220 | if fts is None: 221 | fts = np.zeros((N,) + y.shape[1:], dtype=np.float32) 222 | fts[off:nxt] = y 223 | off = nxt 224 | pass 225 | assert off == N - margin 226 | if m.view != plumo.AXIAL: 227 | fts = cc.transpose_array(plumo.AXIAL, fts) 228 | r.append(fts) 229 | pass 230 | if len(r) == 2: 231 | prob = r[1] 232 | elif len(r) == 4: 233 | prob = r[1] 234 | np.minimum(prob, r[2], prob) 235 | np.minimum(prob, r[3], prob) 236 | else: 237 | assert False 238 | 239 | if mask: 240 | pre_sum = np.sum(prob) 241 | prob *= mask.images 242 | post_sum = np.sum(prob) 243 | logging.info('mask reduction %f' % ((pre_sum-post_sum)/pre_sum)) 244 | prob = np.ascontiguousarray(prob) 245 | return extract(prob, r[0]) 246 | pass 247 | 248 | 249 | flags = tf.app.flags 250 | FLAGS = flags.FLAGS 251 | flags.DEFINE_string('prob', 'luna.ns.3c', 'prob model') # prob model 252 | #original default is luna.ns.3c 253 | flags.DEFINE_string('fts', None, 'fts model') # ft model 254 | flags.DEFINE_string('mask', None, 'mask') 255 | flags.DEFINE_integer('mode', MODE_AXIAL, '') # use axial instead of min of 3 views 256 | flags.DEFINE_integer('channels', 3, '') 257 | flags.DEFINE_integer('bits', 16, '') 258 | flags.DEFINE_integer('stride', 16, '') 259 | flags.DEFINE_integer('dilate', 10, '') 260 | flags.DEFINE_bool('prob_dropout', False, '') 261 | flags.DEFINE_bool('fts_dropout', False, '') 262 | flags.DEFINE_bool('fast', False, '') 263 | 264 | 265 | def main (argv): 266 | model = Model(FLAGS.prob, FLAGS.mode, FLAGS.fts, FLAGS.channels, FLAGS.prob_dropout, FLAGS.fts_dropout) 267 | name = FLAGS.prob 268 | if FLAGS.fts: 269 | name += '_' + FLAGS.fts 270 | if FLAGS.mode != MODE_AXIAL: 271 | name += '_m' + str(FLAGS.mode) 272 | if FLAGS.channels != 3: 273 | name += '_c' + str(FLAGS.channels) 274 | if FLAGS.bits != 16: 275 | name += '_b' + str(FLAGS.bits) 276 | if not FLAGS.mask is None: 277 | name += '_' + FLAGS.mask 278 | if FLAGS.dilate != 10: 279 | name += '_d' + str(FLAGS.dilate) 280 | if SPACING != 0.8: 281 | name += '_s%.1f' % SPACING 282 | if GAP != 5: 283 | name += '_g%d' % GAP 284 | 285 | #name = '%s_%s_%d_%d' % (FLAGS.prob, FLAGS.fts, FLAGS.mode, FLAGS.channels) 286 | ROOT = os.path.join('cache', name) 287 | try_mkdir(ROOT) 288 | 289 | config = tf.ConfigProto() 290 | setGpuConfig(config) 291 | with tf.Session(config=config) as sess: 292 | tf.global_variables_initializer().run() 293 | model.load(sess) 294 | for uid, _ in ALL_CASES: 295 | cache = os.path.join(ROOT, uid + '.pkl') 296 | if os.path.exists(cache): 297 | continue 298 | with open(cache, 'wb') as f: 299 | pass 300 | start_time = time.time() 301 | if FLAGS.bits == 8: 302 | case = load_8bit_lungs_noseg(uid) 303 | elif FLAGS.bits == 16: 304 | case = load_16bit_lungs_noseg(uid) 305 | else: 306 | assert False 307 | load_time = time.time() 308 | mask = None 309 | if not FLAGS.mask is None: 310 | try: 311 | mask_path = 'cache/%s/%s.npz' % (FLAGS.mask, uid) 312 | mask = load_mask(mask_path) 313 | mask = case.copy_replace_images(mask.astype(dtype=np.float32)) 314 | mask = mask.rescale_spacing(SPACING) 315 | mask.trunc_size(FLAGS.stride) 316 | if FLAGS.dilate > 0: 317 | #print 'dilate', FLAGS.dilate 318 | ksize = FLAGS.dilate * 2 + 1 319 | mask.images = grey_dilation(mask.images, size=(ksize, ksize, ksize), mode = 'constant') 320 | except: 321 | traceback.print_exc() 322 | logging.error('failed to load mask %s' % mask_path) 323 | mask = None 324 | 325 | case = case.rescale_spacing(SPACING) 326 | case.trunc_size(FLAGS.stride) 327 | dim, nodules = model.apply(sess, case, mask) 328 | predict_time = time.time() 329 | with open(cache, 'wb') as f: 330 | pickle.dump((dim, nodules), f) 331 | pass 332 | print uid, (load_time - start_time), (predict_time - load_time) 333 | pass 334 | 335 | if __name__ == '__main__': 336 | logging.basicConfig(level=logging.INFO) 337 | tf.app.run() 338 | 339 | -------------------------------------------------------------------------------- /src/adsb3_cache_mask.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | import time 4 | import subprocess 5 | import numpy as np 6 | import mesh 7 | from adsb3 import * 8 | 9 | try_mkdir('cache/mask') 10 | try_mkdir('cache/hull') 11 | for uid, _ in ALL_CASES: 12 | cache = os.path.join('cache/mask', uid + '.npz') 13 | cacheh = os.path.join('cache/hull', uid + '.npz') 14 | if os.path.exists(cache) and os.path.exists(cacheh): 15 | continue 16 | with open(cache, 'wb') as f: 17 | pass 18 | start_time = time.time() 19 | case = Case(uid) 20 | case.normalizeHU() 21 | spacing = case.spacing 22 | UNIT = spacing[0] * spacing[1] * spacing[2] 23 | binary, body_counts = mesh.segment_lung(case.images) #, smooth=20) 24 | ft = (1, [(x * UNIT, [x]) for x in body_counts]) 25 | save_mask(cache, binary) 26 | binary = mesh.convex_hull(binary) 27 | save_mask(cacheh, binary) 28 | load_time = time.time() 29 | print uid, (load_time - start_time) 30 | pass 31 | 32 | -------------------------------------------------------------------------------- /src/adsb3_eval.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | from __future__ import print_function 5 | import os 6 | import sys 7 | import argparse 8 | import math 9 | import datetime 10 | import numpy as np 11 | import logging 12 | from sklearn.model_selection import KFold 13 | from sklearn.metrics import classification_report 14 | from sklearn.metrics import log_loss 15 | import xgboost as xgb 16 | from adsb3 import * 17 | 18 | parser = argparse.ArgumentParser(description='') 19 | parser.add_argument('--valid', action='store_true') 20 | parser.add_argument('--cv', action='store_true') 21 | parser.add_argument('--stage1', action='store_true') 22 | parser.add_argument('--reproduce', action='store_true') 23 | parser.add_argument('models', nargs='*') 24 | args = parser.parse_args() 25 | 26 | models = args.models 27 | 28 | if len(models) == 0: 29 | models = ['unet_k_b8', 30 | 'unet_m2_b8', 31 | 'unet_m3_b8', 32 | #'unet_k_ft_b8', 33 | #'unet_k_b8_hull', 34 | 'tiny_ft1_b8', 35 | 'tiny_m4_b8' 36 | ] 37 | logging.warn('No models are specified, using default: ' + str(models)) 38 | 39 | # 0.43522 40 | SPLIT=3 41 | PARTITIONS = [(1,1,1),(1,1,2),(3,1,1),(1,1,3)] 42 | MIN_NODULE_SIZE=1 #30 43 | MAX_IT = 1200 44 | 45 | if args.reproduce: 46 | models = ['unet_k_b8', 47 | 'unet_m2_b8', 48 | 'unet_m3_b8', 49 | #'unet_k_ft_b8', 50 | #'unet_k_b8_hull', 51 | 'tiny_ft1_b8', 52 | 'tiny_m4_b8' 53 | ] 54 | MIN_NODULE_SIZE=30 55 | MAX_IT = 1200 56 | logging.warn('Reproducing best submission 0.46482') 57 | pass 58 | 59 | 60 | model = xgb.XGBClassifier(n_estimators=MAX_IT, 61 | learning_rate=0.01, 62 | max_depth=2,seed=2016, 63 | subsample=0.9, 64 | colsample_bytree=0.4) #, reg_lambda=0.5) 65 | 66 | xgb_param = {'max_depth':2, 67 | 'eta':0.01, 68 | 'silent':1, 69 | 'objective':'binary:logistic', 70 | 'subsample': 0.90, 71 | 'colsample_bytree': 0.40, 72 | } 73 | 74 | TRAIN = STAGE1_TRAIN 75 | 76 | if args.stage1: 77 | TRAIN = STAGE1_TRAIN 78 | TEST = STAGE1_PUBLIC 79 | else: 80 | TRAIN = STAGE1_TRAIN + STAGE1_PUBLIC 81 | TEST = STAGE2_PRIVATE 82 | pass 83 | 84 | failed = [] 85 | missing = [] 86 | 87 | pyramid = plumo.Pyramid(PARTITIONS) 88 | print("TOTAL_PARTS: ", pyramid.parts) 89 | 90 | def load_uid_features (uid): 91 | ft = [] 92 | for model in models: 93 | cached = os.path.join('cache', model, uid + '.pkl') 94 | if not os.path.exists(cached): 95 | logging.warn('missing ' + cached) 96 | missing.append(cached) 97 | ft.append(None) 98 | continue 99 | if True: 100 | dim, nodules = load_fts(cached) 101 | nodules = [n for n in nodules if n[0] >= MIN_NODULE_SIZE] 102 | ft.append(pyramid.apply(dim, nodules)) 103 | continue 104 | try: 105 | dim, nodules = load_fts(cached) 106 | nodules = [n for n in nodules if n[0] >= MIN_NODULE_SIZE] 107 | ft.append(pyramid.apply(dim, nodules)) 108 | except: 109 | failed.append(cached) 110 | ft.append(None) 111 | pass 112 | pass 113 | return ft 114 | 115 | def merge_features (fts, dims): 116 | v = [] 117 | fixed = False 118 | for ft, dim in zip(fts, dims): 119 | if ft is None: 120 | fixed = True 121 | v.extend([0] * dim) 122 | else: 123 | assert len(ft) == dim 124 | v.extend(ft) 125 | pass 126 | pass 127 | return v, fixed 128 | 129 | def load_features (dataset, dims = None): 130 | cases = [] 131 | for uid, label in dataset: 132 | fts = load_uid_features(uid) 133 | # fts, list of vectors: [ [...], [...], [...] ] 134 | cases.append((uid, label, fts)) 135 | if dims is None: # calculate dimensions of each model 136 | dims = [None] * len(models) 137 | # fix None features 138 | for i in range(len(models)): 139 | dims[i] = 0 140 | for _, _, fts in cases: 141 | if not fts[i] is None: 142 | dims[i] = len(fts[i]) 143 | break 144 | pass 145 | pass 146 | pass 147 | U = [] 148 | X = [] 149 | Y = [] 150 | for uid, label, fts in cases: 151 | v, fixed = merge_features(fts, dims) 152 | if args.valid and fixed: 153 | continue 154 | U.append(uid) 155 | Y.append(label) 156 | X.append(v) 157 | return U, np.array(X, dtype=np.float32), np.array(Y, dtype=np.float32), dims 158 | 159 | 160 | _, X_train, Y_train, dims = load_features(TRAIN) 161 | _, X_test, Y_test, _ = load_features(TEST, dims) 162 | 163 | print(X_train.shape) 164 | print(Y_train.shape) 165 | print(X_test.shape) 166 | print(Y_test.shape) 167 | 168 | kf = KFold(n_splits=SPLIT, shuffle=True, random_state=88) 169 | Y_train_pred = Y_train * 0 170 | Y_train_prob = Y_train * 0 171 | 172 | # K-fold cross validation 173 | for train, val in kf.split(X_train): 174 | #X_train, X_test, y_train = X[train,:], X[test,:], Y[train] 175 | model.fit(X_train[train,:], Y_train[train]) 176 | Y_train_pred[val] = model.predict(X_train[val, :]) 177 | Y_train_prob[val] = model.predict_proba(X_train[val, :])[:, 1] 178 | pass 179 | 180 | model.fit(X_train, Y_train) 181 | Y_test_pred = model.predict(X_test) 182 | Y_test_prob = model.predict_proba(X_test)[:, 1] 183 | 184 | if args.cv: 185 | xgb.cv(xgb_param, xgb.DMatrix(X_train, label=Y_train), (MAX_IT+1000), 186 | nfold=10, 187 | stratified=True, 188 | metrics={'logloss'}, 189 | callbacks=[xgb.callback.print_evaluation(show_stdv=False)]) 190 | 191 | print(classification_report(Y_train, Y_train_pred, target_names=["0", "1"])) 192 | print("validation logloss",log_loss(Y_train, Y_train_prob)) 193 | 194 | print(classification_report(Y_test, Y_test_pred, target_names=["0", "1"])) 195 | print("test logloss",log_loss(Y_test, Y_test_prob)) 196 | 197 | print("%d corrupt" % len(failed)) 198 | for uid in failed: 199 | print(uid) 200 | pass 201 | pass 202 | 203 | print("%d missing" % len(missing)) 204 | -------------------------------------------------------------------------------- /src/data/adsb3/stage1_public.csv: -------------------------------------------------------------------------------- 1 | id,cancer 2 | 026470d51482c93efc18b9803159c960,1 3 | 031b7ec4fe96a3b035a8196264a8c8c3,1 4 | 03bd22ed5858039af223c04993e9eb22,0 5 | 06a90409e4fcea3e634748b967993531,0 6 | 07b1defcfae5873ee1f03c90255eb170,1 7 | 0b20184e0cd497028bdd155d9fb42dc9,0 8 | 12db1ea8336eafaf7f9e3eda2b4e4fef,0 9 | 159bc8821a2dc39a1e770cb3559e098d,0 10 | 174c5f7c33ca31443208ef873b9477e5,0 11 | 1753250dab5fc81bab8280df13309733,0 12 | 1cf8e778167d20bf769669b4be96592b,0 13 | 1e62be2c3b6430b78ce31a8f023531ac,1 14 | 1f6333bc3599f683403d6f0884aefe00,0 15 | 1fdbc07019192de4a114e090389c8330,1 16 | 2004b3f761c3f5dffb02204f1247b211,0 17 | 202898fa97c5949fbdc07ae7ff1cd9f0,0 18 | 21b73c938fd7d346ee77a60bd60aaeac,0 19 | 243038f7bb7787497c59bc17f04c6ed9,0 20 | 26142353f46d20c9fdded93f01e2bff4,0 21 | 263a1c3bfa43556623e75ed901e3fd8f,1 22 | 2703df8c469906a06a45c0d7ff501199,1 23 | 2a3e6ecf9499607ef4fd14b436136b0c,0 24 | 2d596b6ead89ab35577fe625a9a17cbb,1 25 | 2eb92d17ca91b393765e8acf069763a6,1 26 | 2f77fd993fbd858dec3c085b9ff1a3a2,1 27 | 3295cec04482210dc6f78c2b4a1d287b,0 28 | 33387bea2cacf6127035cc7033036a02,0 29 | 34037914ceeec5605fc890159dd425c5,1 30 | 38bf066bba822584e14c0af65d4bb5e9,0 31 | 3ee1fd6a0f3f108c3558e6699fb011f2,0 32 | 42b2161e43b4dd0ea94604485976c59c,0 33 | 4434e19303b62ebaecef2596583ff351,0 34 | 4575fe61bf3f536ce6cfeb26fcc2893c,1 35 | 48ab0b98fc7789304c21430978624f32,0 36 | 49433c1588cc078b825a0eff1dc2e816,0 37 | 49c88f7cc77341c9ae4e64243f9912fc,0 38 | 4b28f147cb82baba3edcdbd34ca19085,0 39 | 505405b3e70fb24b92e6a8a5b7ed339c,0 40 | 50cdacec399071cf70d8badd2511d0b3,0 41 | 519ad4ead3e61d2d71088ac8e46f25b6,0 42 | 52f6d741e674f62fbcf73e6ec4f6a472,0 43 | 538543b57d0c8fa0b2b6bb7c84df3f33,0 44 | 5451203688c930484ba1f3c7f1378847,0 45 | 55b06d60e7c0329787f81d1b7cbf9aa0,0 46 | 567547810a1795b9c8e11c15dfd32c34,1 47 | 5791c42d317f34592be9a933c50e68ad,0 48 | 580cffecce8d3d53cde1abb922adf21a,0 49 | 59af702c21840ec18073b6b56c95e7fe,1 50 | 5a42f0a0d1e060531c20d04ed23efc02,0 51 | 5ae9ab473d59cd29262c47a741177b6e,0 52 | 5ce91933688cc8400105bf640ac11535,0 53 | 5d16819bd78c74448ce852a93bf423ad,1 54 | 61017c23bbae6e17062ff582d1a237b3,0 55 | 616f98dab4db03edbad28c73d22468d2,0 56 | 63458b5875a0b223ec21555d17b52fd4,0 57 | 6379e4435f78a5e5c150c32146ece4d4,0 58 | 649fd56ef9809019b57261fcf9574d76,0 59 | 665c1913d8e90e57af3b745349d19537,1 60 | 68f4dff6dd1f135488e83b8a4ee6e20e,0 61 | 6993396b31078993e13cf9c0a6fd470b,0 62 | 6c71617e2cee498fd3dd20956bb90a3b,0 63 | 6d3b16f2e60c3a1a4246f340dba73676,0 64 | 6d3be6081d76d2365b080e599628d3bc,0 65 | 6d43fdb6eb1bec3a5f4febfd442e8c93,0 66 | 6e240f23afa2c1b4352cd0db5d4f357d,1 67 | 6f229187fe608c9eacc567eb74c1458c,1 68 | 7027c0b8c8f8dcc76c6e4ba923d60a2e,0 69 | 70671fa94231eb377e8ac7cba4650dfb,0 70 | 70f4eb8201e3155cc3e399f0ff09c5ef,1 71 | 7191c236cfcfc68cd21143e3a0faac51,0 72 | 763288341ee363a264fe45a28ea28c21,0 73 | 7869cc6bfc3678fec1a81e93b34648cf,0 74 | 7c2fd0d32df5a2780b4b10fdf2f2cdbe,0 75 | 7ce310b8431ace09a91ededcc03f7361,0 76 | 7cf1a65bb0f89323668034244a59e725,1 77 | 7daeb8ef7307849c715f7f6f3e2dd88e,0 78 | 7f096cdfbc2fe03ec7f779278416a78c,0 79 | 7fd5be8ec9c236c314f801384bd89c0c,1 80 | 80938b4f531fa2334c13d829339e1356,0 81 | 80bda1afde73204abd74d1ebd2758382,0 82 | 81bd0c062bfa8e85616878bab90f2314,0 83 | 82b9fb9e238397b2f3bff98975577ff9,0 84 | 83728b6eed98845556bfc870b7567883,0 85 | 84ed26b5d79da321711ed869b3cad2ea,1 86 | 85ab88f093ca53a4fab5654e24c77ebe,0 87 | 85d6fb4a08853d370935a75de7495a27,0 88 | 86ad341b9ac27364f03981f6a775246c,0 89 | 88acee40bb9d8cb06898d1c5de01d3c8,1 90 | 89f003dbfbdbd18a5cdeb9b128cb075b,0 91 | 8a1e5830a16db34b580202f8b6dbbd3d,0 92 | 8b494d14d835dd5ae13dab19b9520a55,0 93 | 8b9a28375988de6ea0b143d48b4a8dc9,0 94 | 8bb7dd5fbfa5ecb95552d9c587f2fea5,1 95 | 8be7a7cc747365030bee8297221ab5bc,0 96 | 8e60f166f1f1dc0d72f997fe1c9e72b4,0 97 | 8e9002a485cbda2b47cd14014d6f1c36,0 98 | 8f517521a2ed576e853fab1907fa5ffd,1 99 | 8fde44df03fb80366c6604db53d3623f,0 100 | 901ed0a38aa16933c04ffd531b0aa2cf,1 101 | 9050cf3aa8371bd7088c4bdf967141d4,1 102 | 9065f2b133129c5747d42db18a424749,1 103 | 931253c408c440a8494dfaa74251efd3,0 104 | 94df6d1ae21c5bfaebe6f8daf8fcd85b,0 105 | 95a98df466d4f6c6689908ea9a8f324b,0 106 | 96042e205dd3dc055f084aaca245e550,0 107 | 96544665531e7f59bc2730e3c5f42e65,1 108 | 96cca9d8e5764daa4bcb6c0ba07735bc,0 109 | 993f1e68290d591f755669e97b49b4f4,0 110 | 995fc0581ed0e3ba0f97dbd7fe63db59,0 111 | 9a378249b799bbcefac2a7de46896c0a,1 112 | 9b871732b3935661e7639e84a6ab9747,1 113 | 9ca18e68b6b8d9c3112b4b69b7d6fad5,0 114 | 9cc74e673ec9807ee055973e1b185624,0 115 | 9de48cf43611478ffc1fef051b75dc8c,0 116 | a0e60d7a13f6bb4002cc4a08e60b0776,1 117 | a0fc609febe3eef5a4713a22996cf8e5,0 118 | a2558184e0f4a68e9fb13579d20cb244,1 119 | a2a4bc7708f6831470d757cd6f32bffe,1 120 | a334d15ac8d2d25bce76693b1b2a3ed7,0 121 | a5bb766ab3b1bc5a8023a50a956595f2,1 122 | a5d7909f14d43f01f44cdcaabed27b84,0 123 | a6c15206edadab0270898f03e770d730,0 124 | aa59b7a4aa4dfb2489feea527eda3e4d,1 125 | ab9c7bef62d1ad65b824414087b6f06b,0 126 | ac4056071f3cc98489b9db3aebfe2b6a,1 127 | ae2fdcd8daa3fede6ae23cc63a8d9a82,0 128 | ae4e9d8aab8f8f5ae975bcca923f468d,0 129 | ae61ec94b0b8de5439180f4776551e42,0 130 | aec5a58fea38b77b964007aa6975c049,0 131 | af1d0c2fcde369dd1b715460c2f704a2,1 132 | b0599ad2f33276e7cd065eaa8dcec8a2,0 133 | b17c07114dcf49ce71c8da4b43cf1192,1 134 | b4d5b618fdf3a5a1bcfb325a3715e99e,0 135 | b4db5b96c65a668a2e63f9a3ed36afe7,1 136 | b53d997901eb880c41fbfbc82847204c,0 137 | b6857d98b7b3dbe84f153617f4dfd14b,0 138 | b82efe72526c59a96257208d95e54baf,1 139 | b8793dbd40de88c0de0913abbaab0fe7,0 140 | bbf7a3e138f9353414f2d51f0c363561,0 141 | bdc2daa372a36f6f7c72abdc0b5639d1,0 142 | bdfb2c23a8c1dca5ea8c1cc3d89efee9,1 143 | be3e35bf8395366d235b8bcfc71a05ee,0 144 | be9a2df5a16434e581c6a0625c290591,0 145 | bf6a7a9ab4e18b18f43129c9e22fb448,0 146 | c0c5a155e6e59588783c2964975e7e1e,0 147 | c25876fb40d6f8dafd1ecb243193dd3f,1 148 | c2ef34cc347bc224b5a123426009d027,0 149 | c3a9046fbe2b0f0a4e43a669c321e472,0 150 | c46c3962c10e287f1c1e3af0d309a128,0 151 | c71d0db2086b7e2024ca9c11bd2ca504,1 152 | c7bdb83b7ca6269fac16ab7cff930a2e,0 153 | c87a713d17522698958de55c97654beb,1 154 | c95f2aa23e6d6702f5b16a3b35f89cf0,0 155 | cbb9bbd994c235b56fb77429291edf99,0 156 | cc1b7e34d9eba737c9fb91316463e8f7,0 157 | cc4805e3ebe8621bc94a621b1714fc84,0 158 | cd68d1a14cc504e3f7434d5cc324744d,0 159 | cd6be62834c72756738935f904ec9c2c,0 160 | cdb53f3be6d8cce07fa41c833488d8a5,0 161 | d03127f497cae40bcbd9996b4d1f5b90,0 162 | d032116d73789ff9c805f493357b4037,1 163 | d1131708024b32032ade1ef48d115915,0 164 | d1a20ef45bb03f93a407b492066f6d88,0 165 | d2ec8f0fc56a9168cda0c707e49974ab,0 166 | d3a8fb1da8f7a0dcbd5a8d65f3647757,0 167 | d42c998d037fb3003faba541e2cf649a,0 168 | d4a075768abe7fe43ad1caac92515256,1 169 | d5a0333be8795805fc39509f817780ee,0 170 | d654966fd2498de023552b830c07a659,0 171 | d753676c2c6c8ac6f97bd61ecab7554a,0 172 | d81852bffda09dc8033a45332397c495,1 173 | dbd9c8025907511e965e7abad955547d,0 174 | e0aa61b44c33e6a75940a8541c6894c9,0 175 | e314fd13809db0132443b924401d828b,1 176 | e33c25d0dbca5e54385f2100ce523467,0 177 | e3bc0a970a4af5d52826e06742f90e5b,0 178 | e42065c1145ccf734312cb9edbe5234b,0 179 | e60d99ea9648e1ce859eb0b386365e26,0 180 | e6160ed0ff2eb214abd4df9a3c336c1d,1 181 | e6d8ae8c3b0817df994a1ce3b37a7efb,1 182 | e9a27e2645e1fad9434ce765f678585f,0 183 | ea01deecde93cd9503a049d71d46e6d5,1 184 | ea3a771ef05e288409e0250ea893cf87,0 185 | eaeebb7a63edc8a329a7c5fbc583a507,0 186 | eb9db3f740f8e153e85f83c57bc4e522,0 187 | ebcdfabecf4b46b1e55e4a4c75a0afb0,1 188 | efcb6def7a2080243052b6046186ab24,1 189 | f0310ffc724faf9f7aef2c418127ee68,0 190 | f4d23e0272a2ce5bfc7f07033d4f2e7d,1 191 | f5ff7734997820b45dafa75dff60ece8,0 192 | f7c387290d7e3074501eac167c849000,1 193 | f89e3d0867e27be8e19d7ed50e1eb7e8,0 194 | fad57a1078ddbc685e517bd8f24aa8ac,1 195 | fb55849cee6473974612c17f094a38cd,0 196 | fb5874408966d7c6bebd3d84a5599e20,0 197 | fcfab3eddbdf0421c39f71d651cc5c56,0 198 | fdcd385b0d2d12341661e1abe845be0b,0 199 | ff8599dd7c1139be3bad5a0351ab749a,0 200 | -------------------------------------------------------------------------------- /src/data/adsb3/stage2_public.csv: -------------------------------------------------------------------------------- 1 | id,cancer 2 | 5a06ebc438b934a360a5e469a6874505,1 3 | 74645837e1bf20cb3b58d33d19201d09,0 4 | 769777492f8397ae6f3494288bd76635,0 5 | 97cdfe259a3bd124def9b444746b72bd,0 6 | 9fb26348e55b7b1b5247e59104beec5f,1 7 | a226ad784059cb82107a479bab21dedf,0 8 | -------------------------------------------------------------------------------- /src/dump_fts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | from __future__ import print_function 5 | 6 | import sys 7 | from adsb3 import * 8 | 9 | dim, fts = load_fts(sys.argv[1]) 10 | 11 | print(dim) 12 | for w, pos, one in fts: 13 | print(w, pos, one) 14 | 15 | -------------------------------------------------------------------------------- /src/mesh.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | import time 4 | import numpy as np 5 | import cv2 6 | import skimage 7 | from skimage import measure 8 | from scipy.ndimage.morphology import grey_dilation, binary_dilation, binary_fill_holes 9 | #from skimage import regionprops 10 | import scipy 11 | from adsb3 import * 12 | 13 | def pad (images, padding=2, dtype=None): 14 | Z, Y, X = images.shape 15 | if dtype is None: 16 | dtype = images.dtype 17 | out = np.zeros((Z+padding*2, Y+padding*2, X+padding*2), dtype=dtype) 18 | out[padding:(Z+padding),padding:(Y+padding),padding:(X+padding)] = images 19 | return out 20 | 21 | def segment_body (image, smooth=1, th=-300): 22 | blur = scipy.ndimage.filters.gaussian_filter(image, smooth, mode='constant') 23 | binary = np.array(blur < th, dtype=np.uint8) 24 | 25 | # body is a rough region covering human body 26 | body = np.zeros_like(binary) 27 | for i, sl in enumerate(binary): 28 | #H, W = sl.shape 29 | ll = measure.label(sl, background=1) # connected components 30 | # biggest CC should be body 31 | pp = measure.regionprops(ll) 32 | boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air 33 | boxes = sorted(boxes, key = lambda x: -x[0]) 34 | if len(boxes) == 0: 35 | continue 36 | y0, x0, y1, x1 = boxes[0][1] 37 | body[i,y0:y1,x0:x1] = boxes[0][2] 38 | pass 39 | return body, None 40 | 41 | def fill_convex (image): 42 | H, W = image.shape 43 | padded = np.zeros((H+20, W+20), dtype=np.uint8) 44 | padded[10:(10+H),10:(10+W)] = image 45 | 46 | contours = measure.find_contours(padded, 0.5) 47 | if len(contours) == 0: 48 | return image 49 | if len(contours) == 1: 50 | contour = contours[0] 51 | else: 52 | contour = np.vstack(contours) 53 | cc = np.zeros_like(contour, dtype=np.int32) 54 | cc[:,0] = contour[:, 1] 55 | cc[:,1] = contour[:, 0] 56 | hull = cv2.convexHull(cc) 57 | contour = hull.reshape((1, -1, 2)) 58 | cv2.fillPoly(padded, contour, 1) 59 | return padded[10:(10+H),10:(10+W)] 60 | 61 | def segment_lung (image, smooth=1, th=-300): 62 | 63 | padding_value = np.min(image) 64 | if padding_value < -1010: 65 | padding = [image == padding_value] 66 | else: 67 | padding = None 68 | 69 | imagex = image 70 | if padding: 71 | imagex = np.copy(image) 72 | imagex[padding] = 0 73 | blur = scipy.ndimage.filters.gaussian_filter(imagex, smooth, mode='constant') 74 | if padding: 75 | blur[padding] = padding_value 76 | 77 | binary = np.array(blur < th, dtype=np.uint8) 78 | 79 | 80 | # body is a rough region covering human body 81 | body = np.zeros_like(binary) 82 | 83 | for i, sl in enumerate(binary): 84 | #H, W = sl.shape 85 | ll = measure.label(sl, background=1) # connected components 86 | # biggest CC should be body 87 | pp = measure.regionprops(ll) 88 | boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air 89 | 90 | boxes = sorted(boxes, key = lambda x: -x[0]) 91 | if len(boxes) == 0: 92 | print 'no body detected' 93 | continue 94 | y0, x0, y1, x1 = boxes[0][1] 95 | body[i,y0:y1,x0:x1] = fill_convex(boxes[0][2]) 96 | pass 97 | 98 | binary *= body 99 | 100 | if False: 101 | padding = np.min(image) 102 | if padding < -1010: 103 | binary[image == padding] = 0 104 | 105 | # 0: body 106 | # 1: air & lung 107 | 108 | labels = measure.label(binary, background=1) 109 | 110 | # set air (same cc as corners) -> body 111 | bg_labels = set() 112 | # 8 corders of the image 113 | for z in [0, -1]: 114 | for y in [0, -1]: 115 | for x in [0, -1]: 116 | bg_labels.add(labels[z, y, x]) 117 | print bg_labels 118 | bg_labels = list(bg_labels) 119 | for bg_label in bg_labels: 120 | binary[bg_label == labels] = 0 121 | pass 122 | 123 | # now binary: 124 | # 0: non-lung & body tissue in lung & air 125 | # 1: lung & holes in body 126 | #inside = np.copy(binary) 127 | 128 | 129 | # now binary: 130 | # 0: non-lung & body tissue in lung 131 | # 1: lung & holes in body 132 | binary = np.swapaxes(binary, 0, 1) 133 | for i, sl in enumerate(binary): 134 | #H, W = sl.shape 135 | ll = measure.label(sl, background=1) # connected components 136 | # biggest CC should be body 137 | vv, cc = np.unique(ll, return_counts=True) 138 | cc[0] = 0 139 | assert len(vv) > 0 140 | body_ll = vv[np.argmax(cc)] 141 | binary[i][ll != body_ll] = 1 142 | pass 143 | binary = np.swapaxes(binary, 0, 1) 144 | if padding: 145 | binary[padding] = 0 146 | binary *= body 147 | 148 | # binary 0: body 149 | # 1: - anything inside lung 150 | # - holes in body 151 | # - possibly image corners 152 | # 153 | 154 | # inside 0: non-lung & air 155 | # body tissue in lung 156 | # 1: lung 157 | 158 | # set corner again 159 | labels = measure.label(binary, background=0) 160 | bg_labels = set([0]) 161 | for z in [0, -1]: 162 | for y in [0, -1]: 163 | for x in [0, -1]: 164 | bg_labels.add(labels[z, y, x]) 165 | 166 | #print 'bg', bg_labels 167 | val_counts = zip(*np.unique(labels, return_counts=True)) 168 | val_counts = [x for x in val_counts if (not x[0] in bg_labels) and (x[1] >= 10)] 169 | val_counts = sorted(val_counts, key=lambda x:-x[1])[:100] # sort by size 170 | body_counts = [c for _, c in val_counts] 171 | print val_counts 172 | binary = np.zeros_like(binary, dtype=np.uint8) 173 | print val_counts[0][0] 174 | binary[labels == val_counts[0][0]] = 1 175 | #for v, _ in val_counts[0:5]: 176 | # binary[labels == v] = 1 177 | if len(val_counts) > 1: 178 | if val_counts[1][1] * 3 > val_counts[0][1]: 179 | #binary[labels == val_counts[1][0]] = 1 180 | #if val_counts[1][1] * 4 > val_counts[0][1]: 181 | logging.warn('more than 2 lungs parts detected') 182 | 183 | # remove upper part of qiguan 184 | last = binary.shape[0] - 1 185 | for ri in range(binary.shape[0]): 186 | #H, W = sl.shape 187 | i = last - ri 188 | ll = measure.label(binary[i], background=0) # connected components 189 | nl = np.unique(ll) 190 | if len(nl) <= 2: 191 | binary[i,:,:] = 0 192 | else: 193 | print 'removed %d slices' % ri 194 | break 195 | pass 196 | 197 | return binary, body_counts #, inside 198 | 199 | def convex_hull (binary): 200 | swap_sequence = [(0, 1), # 102 201 | (0, 2), # 201 202 | (0, 2)] # 102 203 | 204 | output = np.ndarray(binary.shape, dtype=binary.dtype) 205 | for swp1, swp2 in swap_sequence: 206 | N = binary.shape[0] 207 | print 'shape', binary.shape 208 | for i in range(N): 209 | contours = measure.find_contours(binary[i], 0.5) 210 | if len(contours) == 0: 211 | continue 212 | if len(contours) == 1: 213 | contour = contours[0] 214 | else: 215 | contour = np.vstack(contours) 216 | cc = np.zeros_like(contour, dtype=np.int32) 217 | cc[:,0] = contour[:, 1] 218 | cc[:,1] = contour[:, 0] 219 | hull = cv2.convexHull(cc) 220 | contour = hull.reshape((1, -1, 2)) 221 | cv2.fillPoly(binary[i], contour, 1) 222 | #binary[i] = skimage.morphology.convex_hull_image(binary[i]) 223 | pass 224 | print 'swap', swp1, swp2 225 | nb = np.swapaxes(binary, swp1, swp2) 226 | binary = np.ndarray(nb.shape, dtype=nb.dtype) 227 | binary[:,:] = nb[:,:] 228 | pass 229 | binary = np.swapaxes(binary, 0, 1) 230 | output[:,:] = binary[:,:] 231 | return output; 232 | #binary = binary_dilation(output, iterations=dilate) 233 | #return binary 234 | 235 | def segment_lung_internal (image, smooth=1, th=-300): 236 | 237 | padding_value = np.min(image) 238 | if padding_value < -1010: 239 | padding = [image == padding_value] 240 | else: 241 | padding = None 242 | 243 | imagex = image 244 | if padding: 245 | imagex = np.copy(image) 246 | imagex[padding] = 0 247 | blur = scipy.ndimage.filters.gaussian_filter(imagex, smooth, mode='constant') 248 | if padding: 249 | blur[padding] = padding_value 250 | 251 | binary = np.array(blur < th, dtype=np.uint8) 252 | 253 | #not_slid = np.array(blur < th, dtype=np.uint8) 254 | not_solid = np.copy(binary) 255 | 256 | 257 | # body is a rough region covering human body 258 | body = np.zeros_like(binary) 259 | 260 | for i, sl in enumerate(binary): 261 | #H, W = sl.shape 262 | ll = measure.label(sl, background=1) # connected components 263 | # biggest CC should be body 264 | pp = measure.regionprops(ll) 265 | boxes = [(x.area, x.bbox, x.filled_image) for x in pp if x.label != 0] # label 0 is air 266 | 267 | boxes = sorted(boxes, key = lambda x: -x[0]) 268 | if len(boxes) == 0: 269 | print 'no body detected' 270 | continue 271 | y0, x0, y1, x1 = boxes[0][1] 272 | body[i,y0:y1,x0:x1] = fill_convex(boxes[0][2]) 273 | pass 274 | 275 | binary *= body 276 | 277 | if False: 278 | padding = np.min(image) 279 | if padding < -1010: 280 | binary[image == padding] = 0 281 | 282 | # 0: body 283 | # 1: air & lung 284 | 285 | labels = measure.label(binary, background=1) 286 | 287 | # set air (same cc as corners) -> body 288 | bg_labels = set() 289 | # 8 corders of the image 290 | for z in [0, -1]: 291 | for y in [0, -1]: 292 | for x in [0, -1]: 293 | bg_labels.add(labels[z, y, x]) 294 | print bg_labels 295 | bg_labels = list(bg_labels) 296 | for bg_label in bg_labels: 297 | binary[bg_label == labels] = 0 298 | pass 299 | 300 | # now binary: 301 | # 0: non-lung & body tissue in lung & air 302 | # 1: lung & holes in body 303 | #inside = np.copy(binary) 304 | 305 | 306 | # now binary: 307 | # 0: non-lung & body tissue in lung 308 | # 1: lung & holes in body 309 | binary = np.swapaxes(binary, 0, 1) 310 | for i, sl in enumerate(binary): 311 | #H, W = sl.shape 312 | ll = measure.label(sl, background=1) # connected components 313 | # biggest CC should be body 314 | vv, cc = np.unique(ll, return_counts=True) 315 | cc[0] = 0 316 | assert len(vv) > 0 317 | body_ll = vv[np.argmax(cc)] 318 | binary[i][ll != body_ll] = 1 319 | pass 320 | binary = np.swapaxes(binary, 0, 1) 321 | if padding: 322 | binary[padding] = 0 323 | binary *= body 324 | 325 | # binary 0: body 326 | # 1: - anything inside lung 327 | # - holes in body 328 | # - possibly image corners 329 | # 330 | 331 | # inside 0: non-lung & air 332 | # body tissue in lung 333 | # 1: lung 334 | 335 | # set corner again 336 | labels = measure.label(binary, background=0) 337 | bg_labels = set([0]) 338 | for z in [0, -1]: 339 | for y in [0, -1]: 340 | for x in [0, -1]: 341 | bg_labels.add(labels[z, y, x]) 342 | 343 | #print 'bg', bg_labels 344 | val_counts = zip(*np.unique(labels, return_counts=True)) 345 | val_counts = [x for x in val_counts if (not x[0] in bg_labels) and (x[1] >= 10)] 346 | val_counts = sorted(val_counts, key=lambda x:-x[1])[:100] # sort by size 347 | body_counts = [c for _, c in val_counts] 348 | print val_counts 349 | binary = np.zeros_like(binary, dtype=np.uint8) 350 | print val_counts[0][0] 351 | binary[labels == val_counts[0][0]] = 1 352 | #for v, _ in val_counts[0:5]: 353 | # binary[labels == v] = 1 354 | if len(val_counts) > 1: 355 | if val_counts[1][1] * 3 > val_counts[0][1]: 356 | #binary[labels == val_counts[1][0]] = 1 357 | #if val_counts[1][1] * 4 > val_counts[0][1]: 358 | logging.warn('more than 2 lungs parts detected') 359 | 360 | # remove upper part of qiguan 361 | last = binary.shape[0] - 1 362 | for ri in range(binary.shape[0]): 363 | #H, W = sl.shape 364 | i = last - ri 365 | ll = measure.label(binary[i], background=0) # connected components 366 | nl = np.unique(ll) 367 | if len(nl) <= 2: 368 | binary[i,:,:] = 0 369 | else: 370 | print 'removed %d slices' % ri 371 | break 372 | pass 373 | 374 | #not_solid = np.logical_and(not_solid, binary) # solid within lung 375 | return np.logical_and(not_solid, binary), body_counts #, inside 376 | 377 | -------------------------------------------------------------------------------- /src/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup, Extension 2 | 3 | _plumo = Extension('_plumo', 4 | language = 'c++', 5 | extra_compile_args = ['-O3', '-std=c++1y'], 6 | libraries = ['boost_python', 'glog'], 7 | include_dirs = ['/usr/local/include'], 8 | library_dirs = ['/usr/local/lib'], 9 | sources = ['plumo.cpp'] 10 | ) 11 | 12 | setup (name = 'plumo', 13 | version = '0.0.1', 14 | author = 'Wei Dong and Yuanfang Guan', 15 | author_email = 'wdong@wdong.org', 16 | license = 'MIT', 17 | description = 'This is a demo package', 18 | ext_modules = [_plumo], 19 | ) 20 | --------------------------------------------------------------------------------