├── oulukneeloc
├── svm_model.npy
├── __init__.py
├── proposals.py
└── detector.py
├── MANIFEST.in
├── create_conda_env.sh
├── setup.py
├── LICENSE.txt
├── README.md
└── .gitignore
/oulukneeloc/svm_model.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/imedslab/KneeLocalizer/HEAD/oulukneeloc/svm_model.npy
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include README.md LICENSE.txt MANIFEST.in
2 |
3 | recursive-include oulukneeloc *.npy
4 | include create_conda_env.sh
5 |
--------------------------------------------------------------------------------
/oulukneeloc/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 |
4 | SVM_MODEL_PATH = os.path.join(os.path.dirname(__file__),
5 | 'svm_model.npy')
6 |
--------------------------------------------------------------------------------
/create_conda_env.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | conda create -y -n knee_localizer python=3.6
4 | conda install -y -n knee_localizer numpy opencv scipy
5 | source activate knee_localizer
6 |
7 | pip install pip -U
8 | pip install pydicom
9 | pip install tqdm
10 |
11 | pip install -e .
12 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 |
4 | setup(
5 | name='oulu-knee-localizer',
6 | version='0.1',
7 | author='Aleksei Tiulpin',
8 | author_email='aleksei.tiulpin@oulu.fi',
9 | packages=find_packages(),
10 | include_package_data=True,
11 | license='LICENSE.txt',
12 | long_description=open('README.md').read(),
13 | )
14 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Aleksei Tiulpin
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Code for the paper from SCIA'17: A novel method for automatic localization of joint area on knee plain radiographs
2 |
3 | # Description
4 | Repository contains the code for an automatic knee joint detection on plain radiographs. It can be used to process very large amount of knee X-rays and generate bounding boxes (up to 6 000 000 per day on a high-end computer).
5 |
6 | Our package processes the data in a batch mode using multiple threads. To run in on your machine, you have to install the conda environment. For that, simply execute `create_conda_env.sh`.
7 |
8 | # How to run
9 | Run the script as follows:
10 | ```
11 | cd oulukneeloc
12 | python detector.py --path_input
\
13 | --fname_output
14 | ```
15 |
16 | Script will produce the bounding boxes of 120mm and save it to the specified file
17 | (by default, `../detection_results.txt`).
18 |
19 | # How to cite
20 | If you use our package in your own research, please cite us:
21 |
22 | ```
23 | @inproceedings{tiulpin2017novel,
24 | title={A novel method for automatic localization of joint area on knee plain radiographs},
25 | author={Tiulpin, Aleksei and Thevenot, Jerome and Rahtu, Esa and Saarakkala, Simo},
26 | booktitle={Scandinavian Conference on Image Analysis},
27 | pages={290--301},
28 | year={2017},
29 | organization={Springer}
30 | }
31 | ```
32 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 | .idea/*
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # SageMath parsed files
80 | *.sage.py
81 |
82 | # dotenv
83 | .env
84 |
85 | # virtualenv
86 | .venv
87 | venv/
88 | ENV/
89 |
90 | # Spyder project settings
91 | .spyderproject
92 | .spyproject
93 |
94 | # Rope project settings
95 | .ropeproject
96 |
97 | # mkdocs documentation
98 | /site
99 |
100 | # mypy
101 | .mypy_cache/
102 |
--------------------------------------------------------------------------------
/oulukneeloc/proposals.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pydicom as dicom
3 |
4 |
5 | def read_dicom(filename):
6 | """Read DICOM file and convert it to a decent quality uint8 image.
7 |
8 | Parameters
9 | ----------
10 | filename: str
11 | Existing DICOM file filename.
12 | """
13 | try:
14 | data = dicom.read_file(filename)
15 | img = np.frombuffer(data.PixelData, dtype=np.uint16).copy()
16 |
17 | if data.PhotometricInterpretation == 'MONOCHROME1':
18 | img = img.max() - img
19 | img = img.reshape((data.Rows, data.Columns))
20 | return img, data.ImagerPixelSpacing[0]
21 | except:
22 | return None
23 |
24 |
25 | def preprocess_xray(img, cut_min=5., cut_max=99.):
26 | """Preprocess the X-ray image using histogram clipping and global contrast normalization.
27 |
28 | Parameters
29 | ----------
30 | cut_min: int
31 | Lowest percentile which is used to cut the image histogram.
32 | cut_max: int
33 | Highest percentile.
34 | """
35 |
36 | img = img.astype(np.float64)
37 |
38 | lim1, lim2 = np.percentile(img, [cut_min, cut_max])
39 |
40 | img[img < lim1] = lim1
41 | img[img > lim2] = lim2
42 |
43 | img -= lim1
44 |
45 | img /= img.max()
46 | img *= 255
47 |
48 | return img.astype(np.uint8, casting='unsafe')
49 |
50 |
51 | def get_joint_y_proposals(img, av_points=11, margin=0.25):
52 | """Return Y-coordinates of the joint approximate locations."""
53 |
54 | R, C = img.shape
55 |
56 | # Sum the middle if the leg is along the X-axis
57 | segm_line = np.sum(img[int(R * margin):int(R * (1 - margin)),
58 | int(C / 3):int(C - C / 3)], axis=1)
59 | # Smooth the segmentation line and find the absolute of the derivative
60 | segm_line = np.abs(np.convolve(
61 | np.diff(segm_line), np.ones((av_points, )) / av_points)[(av_points-1):])
62 |
63 | # Get top tau % of the peaks
64 | peaks = np.argsort(segm_line)[::-1][:int(0.1 * R * (1 - 2 * margin))]
65 | return peaks[::10] + int(R * margin)
66 |
--------------------------------------------------------------------------------
/oulukneeloc/detector.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import argparse
4 | from multiprocessing import Pool, cpu_count
5 |
6 | import numpy as np
7 | import cv2
8 | from tqdm import tqdm
9 |
10 | from oulukneeloc import SVM_MODEL_PATH
11 | from oulukneeloc.proposals import (read_dicom, get_joint_y_proposals,
12 | preprocess_xray)
13 |
14 |
15 | class KneeLocalizer:
16 | def __init__(self, svm_model_path=SVM_MODEL_PATH, size_mm=120):
17 | super().__init__()
18 | self.win_size = (64, 64)
19 | self.win_stride = (64, 64)
20 | self.block_size = (16, 16)
21 | self.block_stride = (8, 8)
22 | self.cell_size = (8, 8)
23 | self.padding = (0, 0)
24 | self.nbins = 9
25 | self.scales = [3.2, 3.3, 3.4, 3.6, 3.8]
26 | self.step = 95
27 |
28 | self.size_mm = size_mm
29 | self.svm_w, self.svm_b = np.load(svm_model_path, encoding='bytes')
30 |
31 | def predict(self, fileobj, spacing=None):
32 | """Localize the left and the right knee joints in PA X-ray image.
33 |
34 | Parameters
35 | ----------
36 | fileobj: str or ndarray
37 | Filename of the DICOM image, or already extracted uint16 ndarray.
38 | spacing: float or None
39 | Spacing extracted from the previously read DICOM.
40 |
41 | Returns
42 | -------
43 | detections: list of lists
44 | The first list has the bbox for the left knee joint.
45 | The second list has the bbox for the right knee joint.
46 | """
47 |
48 | if isinstance(fileobj, str):
49 | tmp = read_dicom(fileobj)
50 | if tmp is None:
51 | return None
52 | if len(tmp) != 2:
53 | return None
54 | img, spacing = tmp
55 | img = preprocess_xray(img)
56 | elif isinstance(fileobj, np.ndarray):
57 | img = fileobj
58 | if spacing is None:
59 | raise ValueError
60 | else:
61 | raise ValueError
62 |
63 | R, C = img.shape
64 | split_point = C // 2
65 |
66 | right_leg = img[:, :split_point]
67 | left_leg = img[:, split_point:]
68 |
69 | sizepx = int(self.size_mm / spacing) # Proposal size
70 |
71 | # We will store the coordinates of the top left and
72 | # the bottom right corners of the bounding box
73 | hog = cv2.HOGDescriptor(self.win_size,
74 | self.block_size,
75 | self.block_stride,
76 | self.cell_size,
77 | self.nbins)
78 |
79 | # Make proposals for the right leg
80 | R, C = right_leg.shape
81 | displacements = range(-C // 4, 1 * C // 4 + 1, self.step)
82 | prop = get_joint_y_proposals(right_leg)
83 | best_score = -np.inf
84 |
85 | for y_coord in prop:
86 | for x_displ in displacements:
87 | for scale in self.scales:
88 | if C / 2 + x_displ - R / scale / 2 >= 0:
89 | # Candidate ROI
90 | roi = np.array([C / 2 + x_displ - R / scale / 2,
91 | y_coord - R / scale / 2,
92 | R / scale, R / scale], dtype=np.int)
93 | x1, y1 = roi[0], roi[1]
94 | x2, y2 = roi[0] + roi[2], roi[1] + roi[3]
95 | patch = cv2.resize(img[y1:y2, x1:x2], (64, 64))
96 |
97 | hog_descr = hog.compute(patch, self.win_stride, self.padding)
98 | score = np.inner(self.svm_w, hog_descr.ravel()) + self.svm_b
99 |
100 | if score > best_score:
101 | jc = np.array([C / 2 + x_displ, y_coord])
102 | best_score = score
103 |
104 | roi_R = np.array([jc[0] - sizepx // 2,
105 | jc[1] - sizepx // 2,
106 | jc[0] + sizepx // 2,
107 | jc[1] + sizepx // 2]).round().astype(np.int)
108 |
109 | # Make proposals for the left leg
110 | R, C = left_leg.shape
111 | displacements = range(-C // 4, 1 * C // 4 + 1, self.step)
112 | prop = get_joint_y_proposals(left_leg)
113 | best_score = -np.inf
114 |
115 | for y_coord in prop:
116 | for x_displ in displacements:
117 | for scale in self.scales:
118 | if split_point + x_displ + R / scale / 2 < img.shape[1]:
119 | roi = np.array([split_point + C / 2 + x_displ - R / scale / 2,
120 | y_coord - R / scale / 2,
121 | R / scale, R / scale], dtype=np.int)
122 | x1, y1 = roi[0], roi[1]
123 | x2, y2 = roi[0] + roi[2], roi[1] + roi[3]
124 | patch = np.fliplr(cv2.resize(img[y1:y2, x1:x2], (64, 64)))
125 |
126 | hog_descr = hog.compute(patch, self.win_stride, self.padding)
127 | score = np.inner(self.svm_w, hog_descr.ravel()) + self.svm_b
128 |
129 | if score > best_score:
130 | jc = np.array([split_point + C / 2 + x_displ, y_coord])
131 | best_score = score
132 |
133 | roi_L = np.array([jc[0] - sizepx // 2,
134 | jc[1] - sizepx // 2,
135 | jc[0] + sizepx // 2,
136 | jc[1] + sizepx // 2]).round().astype(np.int)
137 |
138 | return [roi_L.tolist(), roi_R.tolist()]
139 |
140 |
141 | def worker(fname, path_input, localizer):
142 | tmp = read_dicom(os.path.join(path_input, fname))
143 | if tmp is None:
144 | ret = [fname, ] + [-1, ] * 4 + [-1, ] * 4
145 | return ' '.join([str(e) for e in ret])
146 |
147 | img, spacing = tmp
148 | img = preprocess_xray(img)
149 | try:
150 | detections = localizer.predict(img, spacing)
151 | except:
152 | print('Error finding the knee joints')
153 | detections = [[-1]*4, [-1]*4]
154 |
155 | if detections is None:
156 | detections = [[-1]*4, [-1]*4]
157 | return ' '.join(map(str, [fname, ] + detections[0] + detections[1]))
158 |
159 |
160 | def parse_args():
161 | parser = argparse.ArgumentParser()
162 | parser.add_argument('--path_input', "--dir")
163 | parser.add_argument('--fname_output', "--output",
164 | default='../detection_results.txt')
165 |
166 | args = parser.parse_args()
167 | args.path_input = os.path.abspath(args.path_input)
168 | args.fname_output = os.path.abspath(args.fname_output)
169 | return args
170 |
171 |
172 | if __name__ == "__main__":
173 | args = parse_args()
174 |
175 | ts_start = time.time()
176 |
177 | localizer = KneeLocalizer()
178 |
179 | def worker_partial(fname):
180 | return worker(fname, args.path_input, localizer)
181 |
182 | fnames = os.listdir(args.path_input)
183 |
184 | with Pool(cpu_count()) as pool:
185 | res = list(tqdm(pool.imap(
186 | worker_partial, iter(fnames)), total=len(fnames)))
187 |
188 | with open(args.fname_output, 'w') as f:
189 | for entry in res:
190 | f.write(entry + '\n')
191 |
192 | ts_end = time.time() - ts_start
193 | print('Script execution took {} seconds'.format(ts_end))
194 |
--------------------------------------------------------------------------------