├── .gitignore
├── .idea
├── compiler.xml
├── encodings.xml
├── inspectionProfiles
│ └── Project_Default.xml
├── misc.xml
├── modules.xml
├── scala_settings.xml
└── vcs.xml
├── CarND-Vehicle-Detection.iml
├── README.md
├── camera_cal
├── calibration1.jpg
├── calibration10.jpg
├── calibration11.jpg
├── calibration12.jpg
├── calibration13.jpg
├── calibration14.jpg
├── calibration15.jpg
├── calibration16.jpg
├── calibration17.jpg
├── calibration18.jpg
├── calibration19.jpg
├── calibration2.jpg
├── calibration20.jpg
├── calibration3.jpg
├── calibration4.jpg
├── calibration5.jpg
├── calibration6.jpg
├── calibration7.jpg
├── calibration8.jpg
└── calibration9.jpg
├── camera_calibrate.py
├── color_space_plot.py
├── data
├── non-vehicles
│ └── .gitkeep
└── vehicles
│ └── .gitkeep
├── data_explorer.py
├── data_generators.py
├── data_load.py
├── environment.yml
├── examples
├── .DS_Store
├── HOG_example.jpg
├── bboxes_and_heat.png
├── car_not_car.png
├── labels_map.png
├── output_bboxes.png
├── sliding_window.jpg
└── sliding_windows.jpg
├── features.py
├── lane_finder.py
├── main.py
├── main_detect_unet.py
├── model.py
├── object_detect_yolo.py
├── other_images
├── Non-maximum-suppression.jpeg
├── Person.png
├── TrafficLightsAndVehicles.png
├── YOLO_Demo.png
├── YOLO_NN.png
├── YOLO_Parameters.jpeg
├── cnn-weights.jpeg
└── convnet-car.jpeg
├── output_images
├── .DS_Store
├── b+g+r_channels.png
├── calibration1_undist.png
├── camera_calibration_pickle.p
├── car1_hog.png
├── feature_normalize.png
├── features_search
│ └── test1.png
├── h_channel.png
├── image_combined_4.jpg
├── l_channel.png
├── lane-smooth
│ ├── combine_1047.jpg.png
│ ├── combine_602.jpg.png
│ ├── combine_714.jpg.png
│ ├── combine_746.jpg.png
│ ├── combine_993.jpg.png
│ ├── combine_straight_lines1.jpg.png
│ ├── combine_straight_lines2.jpg.png
│ ├── combine_test1.jpg.png
│ ├── combine_test2.jpg.png
│ ├── combine_test3.jpg.png
│ ├── combine_test4.jpg.png
│ ├── combine_test5.jpg.png
│ └── combine_test6.jpg.png
├── lane
│ ├── combine_1047.jpg.png
│ ├── combine_602.jpg.png
│ ├── combine_714.jpg.png
│ ├── combine_746.jpg.png
│ ├── combine_993.jpg.png
│ ├── combine_straight_lines1.jpg.png
│ ├── combine_straight_lines2.jpg.png
│ ├── combine_test1.jpg.png
│ ├── combine_test2.jpg.png
│ ├── combine_test3.jpg.png
│ ├── combine_test4.jpg.png
│ ├── combine_test5.jpg.png
│ └── combine_test6.jpg.png
├── object-detect
│ ├── 1047.jpg
│ ├── 602.jpg
│ ├── 714.jpg
│ ├── 746.jpg
│ ├── 993.jpg
│ ├── car1.png
│ ├── straight_lines1.jpg
│ ├── straight_lines2.jpg
│ ├── test1.jpg
│ ├── test2.jpg
│ ├── test3.jpg
│ ├── test4.jpg
│ ├── test5.jpg
│ └── test6.jpg
├── perspective
│ ├── 1047.jpg.png
│ ├── 602.jpg.png
│ ├── 616.jpg.png
│ ├── 714.jpg.png
│ ├── 746.jpg.png
│ ├── 993.jpg.png
│ ├── straight_lines1.jpg.png
│ ├── straight_lines2.jpg.png
│ ├── test1.jpg.png
│ ├── test2.jpg.png
│ ├── test3.jpg.png
│ ├── test4.jpg.png
│ ├── test5.jpg.png
│ └── test6.jpg.png
├── s_channel.png
├── save_output_here.txt
├── slide_window.png
├── slide_window
│ ├── 746.jpg.png
│ └── test1.jpg.png
├── sobel_combined.png
├── sobel_dir.png
├── sobel_final_pipe_line.png
├── sobel_l+dir.png
├── sobel_mag.png
├── sobel_s+x.png
├── sobel_x.png
├── sobel_y.png
├── thresholding
│ ├── 1047.jpg.png
│ ├── 602.jpg.png
│ ├── 616.jpg.png
│ ├── 714.jpg.png
│ ├── 993.jpg.png
│ ├── straight_lines1.jpg.png
│ ├── straight_lines2.jpg.png
│ ├── test1.jpg.png
│ ├── test2.jpg.png
│ ├── test3.jpg.png
│ ├── test4.jpg.png
│ ├── test5.jpg.png
│ └── test6.jpg.png
├── undistort
│ ├── 1047.jpg.png
│ ├── 602.jpg.png
│ ├── 714.jpg.png
│ ├── 993.jpg.png
│ ├── shadow.jpg.png
│ ├── straight_lines1.jpg.png
│ ├── straight_lines2.jpg.png
│ ├── test1.jpg.png
│ ├── test2.jpg.png
│ ├── test3.jpg.png
│ ├── test4.jpg.png
│ ├── test5.jpg.png
│ └── test6.jpg.png
└── vehicle
│ ├── 1047.jpg.png
│ ├── 602.jpg.png
│ ├── 714.jpg.png
│ ├── 746.jpg.png
│ ├── 993.jpg.png
│ ├── straight_lines1.jpg.png
│ ├── straight_lines2.jpg.png
│ ├── test1.jpg.png
│ ├── test2.jpg.png
│ ├── test3.jpg.png
│ ├── test4.jpg.png
│ ├── test5.jpg.png
│ └── test6.jpg.png
├── perspective_transform.py
├── project_video.mp4
├── search_windows.py
├── test
├── __init__.py
├── pipe_line_test.py
├── test_features.py
├── test_label_parser.py
└── test_window.py
├── test_images
├── .DS_Store
├── 1047.jpg
├── 602.jpg
├── 714.jpg
├── 746.jpg
├── 993.jpg
├── car1.png
├── straight_lines1.jpg
├── straight_lines2.jpg
├── test1.jpg
├── test2.jpg
├── test3.jpg
├── test4.jpg
├── test5.jpg
└── test6.jpg
├── test_video.mp4
├── thresholding.py
├── vehicle_detect.py
├── vehicle_detect_nn.py
├── writeup_template.md
└── yolo
├── __init__.py
├── cfg
├── darknet19_448.cfg
├── tiny-yolo-voc.cfg
├── yolo-voc.cfg
└── yolo.cfg
├── font
├── FiraMono-Medium.otf
└── SIL Open Font License.txt
├── model_data
├── coco_classes.txt
├── pascal_classes.txt
├── tiny-yolo-voc_anchors.txt
├── yolo-voc_anchors.txt
└── yolo_anchors.txt
├── test_yolo.py
├── yad2k.py
└── yad2k
├── __init__.py
├── models
├── __init__.py
├── keras_darknet19.py
└── keras_yolo.py
└── utils
├── __init__.py
└── utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | data/
2 |
3 | # Created by .ignore support plugin (hsz.mobi)
4 | ### JetBrains template
5 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
6 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
7 |
8 | # User-specific stuff:
9 | .idea/workspace.xml
10 | .idea/tasks.xml
11 |
12 | # Sensitive or high-churn files:
13 | .idea/dataSources/
14 | .idea/dataSources.ids
15 | .idea/dataSources.xml
16 | .idea/dataSources.local.xml
17 | .idea/sqlDataSources.xml
18 | .idea/dynamic.xml
19 | .idea/uiDesigner.xml
20 |
21 | # Gradle:
22 | .idea/gradle.xml
23 | .idea/libraries
24 |
25 | # Mongo Explorer plugin:
26 | .idea/mongoSettings.xml
27 |
28 | ## File-based project format:
29 | *.iws
30 |
31 | ## Plugin-specific files:
32 |
33 | # IntelliJ
34 | /out/
35 |
36 | # mpeltonen/sbt-idea plugin
37 | .idea_modules/
38 |
39 | # JIRA plugin
40 | atlassian-ide-plugin.xml
41 |
42 | # Crashlytics plugin (for Android Studio and IntelliJ)
43 | com_crashlytics_export_strings.xml
44 | crashlytics.properties
45 | crashlytics-build.properties
46 | fabric.properties
47 | ### Python template
48 | # Byte-compiled / optimized / DLL files
49 | __pycache__/
50 | *.py[cod]
51 | *$py.class
52 |
53 | # C extensions
54 | *.so
55 |
56 | # Distribution / packaging
57 | .Python
58 | env/
59 | build/
60 | develop-eggs/
61 | dist/
62 | downloads/
63 | eggs/
64 | .eggs/
65 | lib/
66 | lib64/
67 | parts/
68 | sdist/
69 | var/
70 | *.egg-info/
71 | .installed.cfg
72 | *.egg
73 |
74 | # PyInstaller
75 | # Usually these files are written by a python script from a template
76 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
77 | *.manifest
78 | *.spec
79 |
80 | # Installer logs
81 | pip-log.txt
82 | pip-delete-this-directory.txt
83 |
84 | # Unit test / coverage reports
85 | htmlcov/
86 | .tox/
87 | .coverage
88 | .coverage.*
89 | .cache
90 | nosetests.xml
91 | coverage.xml
92 | *,cover
93 | .hypothesis/
94 |
95 | # Translations
96 | *.mo
97 | *.pot
98 |
99 | # Django stuff:
100 | *.log
101 | local_settings.py
102 |
103 | # Flask stuff:
104 | instance/
105 | .webassets-cache
106 |
107 | # Scrapy stuff:
108 | .scrapy
109 |
110 | # Sphinx documentation
111 | docs/_build/
112 |
113 | # PyBuilder
114 | target/
115 |
116 | # Jupyter Notebook
117 | .ipynb_checkpoints
118 |
119 | # pyenv
120 | .python-version
121 |
122 | # celery beat schedule file
123 | celerybeat-schedule
124 |
125 | # dotenv
126 | .env
127 |
128 | # virtualenv
129 | .venv/
130 | venv/
131 | ENV/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 |
136 | # Rope project settings
137 | .ropeproject
138 |
139 |
140 | video_images/
--------------------------------------------------------------------------------
/.idea/compiler.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | CollectionsScala
19 |
20 |
21 | AOP
22 |
23 |
24 | Abstraction issuesJava
25 |
26 |
27 | Android
28 |
29 |
30 | Android Lint
31 |
32 |
33 | Assignment issuesGroovy
34 |
35 |
36 | Assignment issuesJava
37 |
38 |
39 | Assignment issuesJavaScript
40 |
41 |
42 | CSS
43 |
44 |
45 | Class metricsJava
46 |
47 |
48 | Class structureJava
49 |
50 |
51 | Cloning issuesJava
52 |
53 |
54 | Code StyleScala
55 |
56 |
57 | Code maturity issuesJava
58 |
59 |
60 | Code style issuesCSS
61 |
62 |
63 | Code style issuesJava
64 |
65 |
66 | Code style issuesJavaScript
67 |
68 |
69 | CodeSpring CoreSpring
70 |
71 |
72 | CoffeeScript
73 |
74 |
75 | Comparing CollectionsScala
76 |
77 |
78 | Compiler issuesJava
79 |
80 |
81 | Concurrency annotation issuesJava
82 |
83 |
84 | Control FlowGroovy
85 |
86 |
87 | Control flow issuesJava
88 |
89 |
90 | Control flow issuesJavaScript
91 |
92 |
93 | Cucumber
94 |
95 |
96 | Data flow issuesGroovy
97 |
98 |
99 | Data flow issuesJava
100 |
101 |
102 | Data flow issuesJavaScript
103 |
104 |
105 | Declaration redundancyGroovy
106 |
107 |
108 | Declaration redundancyJava
109 |
110 |
111 | Dependency issuesJava
112 |
113 |
114 | Encapsulation issuesJava
115 |
116 |
117 | Error handlingGroovy
118 |
119 |
120 | Error handlingJava
121 |
122 |
123 | Error handlingJavaScript
124 |
125 |
126 | Finalization issuesJava
127 |
128 |
129 | FlexUnit inspections
130 |
131 |
132 | FreeMarker inspections
133 |
134 |
135 | GPath inspectionsGroovy
136 |
137 |
138 | General
139 |
140 |
141 | GeneralCoffeeScript
142 |
143 |
144 | GeneralJavaScript
145 |
146 |
147 | GeneralScala
148 |
149 |
150 | Groovy
151 |
152 |
153 | Guice Inspections
154 |
155 |
156 | HTML
157 |
158 |
159 | Ignore
160 |
161 |
162 | ImportsJava
163 |
164 |
165 | Indices CollectionsScala
166 |
167 |
168 | Inheritance issuesJava
169 |
170 |
171 | Initialization issuesJava
172 |
173 |
174 | Internationalization issuesJava
175 |
176 |
177 | Invalid elementsCSS
178 |
179 |
180 | J2ME issuesJava
181 |
182 |
183 | JUnit issuesJava
184 |
185 |
186 | Java
187 |
188 |
189 | Java language level issuesJava
190 |
191 |
192 | Java language level migration aidsJava
193 |
194 |
195 | JavaFX
196 |
197 |
198 | JavaScript
199 |
200 |
201 | JavaScript function metricsJavaScript
202 |
203 |
204 | JavaScript validity issuesJavaScript
205 |
206 |
207 | Javadoc issuesJava
208 |
209 |
210 | Kotlin
211 |
212 |
213 | Less
214 |
215 |
216 | Logging issuesJava
217 |
218 |
219 | Manifest
220 |
221 |
222 | Maps CollectionsScala
223 |
224 |
225 | Maven
226 |
227 |
228 | Memory issuesJava
229 |
230 |
231 | Method MetricsGroovy
232 |
233 |
234 | Method metricsJava
235 |
236 |
237 | Method signatureScala
238 |
239 |
240 | Modularization issuesJava
241 |
242 |
243 | Naming conventionsJava
244 |
245 |
246 | Numeric issuesJava
247 |
248 |
249 | Options CollectionsScala
250 |
251 |
252 | Other CollectionsScala
253 |
254 |
255 | Packaging issuesJava
256 |
257 |
258 | Pattern Validation
259 |
260 |
261 | Performance issuesJava
262 |
263 |
264 | Plugin DevKit
265 |
266 |
267 | Portability issuesJava
268 |
269 |
270 | Potentially confusing code constructsGroovy
271 |
272 |
273 | Potentially confusing code constructsJavaScript
274 |
275 |
276 | Probable bugsCoffeeScript
277 |
278 |
279 | Probable bugsGroovy
280 |
281 |
282 | Probable bugsJava
283 |
284 |
285 | Probable bugsJavaScript
286 |
287 |
288 | Properties Files
289 |
290 |
291 | RELAX NG
292 |
293 |
294 | Resource management issuesJava
295 |
296 |
297 | SQL
298 |
299 |
300 | Sass/SCSS
301 |
302 |
303 | Scala
304 |
305 |
306 | Scala: General
307 |
308 |
309 | ScaladocScala
310 |
311 |
312 | Serialization issuesJava
313 |
314 |
315 | Simplifications: filter and exists CollectionsScala
316 |
317 |
318 | Simplifications: other CollectionsScala
319 |
320 |
321 | Size CollectionsScala
322 |
323 |
324 | Spring
325 |
326 |
327 | Spring AOPSpring
328 |
329 |
330 | Spring CoreSpring
331 |
332 |
333 | TestNGJava
334 |
335 |
336 | Threading issuesGroovy
337 |
338 |
339 | Threading issuesJava
340 |
341 |
342 | TypeScript
343 |
344 |
345 | Velocity inspections
346 |
347 |
348 | Verbose or redundant code constructsJava
349 |
350 |
351 | Visibility issuesJava
352 |
353 |
354 | XML
355 |
356 |
357 | XMLSpring CoreSpring
358 |
359 |
360 | XPath
361 |
362 |
363 | XSLT
364 |
365 |
366 | toString() issuesJava
367 |
368 |
369 |
370 |
371 | ScalaStyle
372 |
373 |
374 |
375 |
376 |
377 |
378 |
379 |
380 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/scala_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/CarND-Vehicle-Detection.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/camera_cal/calibration1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration1.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration10.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration11.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration12.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration13.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration14.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration14.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration15.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration15.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration16.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration16.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration17.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration17.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration18.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration18.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration19.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration19.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration2.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration20.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration20.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration3.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration4.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration5.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration6.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration7.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration8.jpg
--------------------------------------------------------------------------------
/camera_cal/calibration9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/camera_cal/calibration9.jpg
--------------------------------------------------------------------------------
/camera_calibrate.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import glob
4 | import matplotlib.pyplot as plt
5 | import pickle
6 |
7 |
8 | def show_image(image, cmap=None):
9 | plt.figure()
10 | plt.imshow(image, cmap)
11 |
12 |
13 | def calibrateCamera(img_size):
14 | # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
15 | objp = np.zeros((6 * 9, 3), np.float32)
16 | objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
17 |
18 | # Arrays to store object points and image points from all the images.
19 | objpoints = [] # 3d points in real world space
20 | imgpoints = [] # 2d points in image plane.
21 |
22 | # Make a list of calibration images
23 | images = glob.glob('./camera_cal/calibration*.jpg')
24 |
25 | # Step through the list and search for chessboard corners
26 | for fname in images:
27 | img = cv2.imread(fname)
28 | gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
29 |
30 | # Find the chessboard corners
31 | ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
32 |
33 | # If found, add object points, image points
34 | if ret == True:
35 | objpoints.append(objp)
36 | imgpoints.append(corners)
37 |
38 | # Draw and display the corners
39 | img = cv2.drawChessboardCorners(img, (9, 6), corners, ret)
40 | show_image(img)
41 |
42 | # Do camera calibration given object points and image points
43 | ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
44 | return mtx, dist
45 |
46 |
47 | def save_camera_calibration(mtx, dist):
48 | dist_pickle = {}
49 | dist_pickle["mtx"] = mtx
50 | dist_pickle["dist"] = dist
51 | pickle.dump(dist_pickle, open("./output_images/camera_calibration_pickle.p", "wb"))
52 |
53 |
54 | def load_camera_calibration(file_name):
55 | data = pickle.load(open(file_name, "rb"))
56 | return data["mtx"], data["dist"]
57 |
58 |
59 | def undistort(mtx, dist, img, dst_file_name):
60 | """
61 | given camera mtx, dist and image, this function undistort img and display them side by side
62 | """
63 | img_size = (img.shape[1], img.shape[0])
64 | dst = cv2.undistort(img, mtx, dist, None, mtx)
65 | # cv2.imwrite(dst_file_name, dst)
66 |
67 | # dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
68 | print("Visualize undistortion")
69 | f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
70 | ax1.imshow(img)
71 | ax1.set_title('Original Image', fontsize=30)
72 | ax2.imshow(dst)
73 | ax2.set_title('Undistorted Image', fontsize=30)
74 | plt.savefig(dst_file_name)
75 |
76 |
77 | if __name__ == "__main__":
78 | img = cv2.imread('./camera_cal/calibration1.jpg')
79 | img_size = (img.shape[1], img.shape[0])
80 | camera_matrix, distortion = calibrateCamera(img_size)
81 | save_camera_calibration(camera_matrix, distortion)
82 | undistort(camera_matrix, distortion, img, './output_images/calibration1_undist.png')
83 |
84 |
--------------------------------------------------------------------------------
/color_space_plot.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | from mpl_toolkits.mplot3d import Axes3D
5 |
6 |
7 | def plot3d(pixels, colors_rgb,
8 | axis_labels=list("RGB"), axis_limits=[(0, 255), (0, 255), (0, 255)]):
9 | """Plot pixels in 3D."""
10 |
11 | # Create figure and 3D axes
12 | fig = plt.figure(figsize=(8, 8))
13 | ax = Axes3D(fig)
14 |
15 | # Set axis limits
16 | ax.set_xlim(*axis_limits[0])
17 | ax.set_ylim(*axis_limits[1])
18 | ax.set_zlim(*axis_limits[2])
19 |
20 | # Set axis labels and sizes
21 | ax.tick_params(axis='both', which='major', labelsize=14, pad=8)
22 | ax.set_xlabel(axis_labels[0], fontsize=16, labelpad=16)
23 | ax.set_ylabel(axis_labels[1], fontsize=16, labelpad=16)
24 | ax.set_zlabel(axis_labels[2], fontsize=16, labelpad=16)
25 |
26 | # Plot pixel values with colors given in colors_rgb
27 | ax.scatter(
28 | pixels[:, :, 0].ravel(),
29 | pixels[:, :, 1].ravel(),
30 | pixels[:, :, 2].ravel(),
31 | c=colors_rgb.reshape((-1, 3)), edgecolors='none')
32 |
33 | return ax # return Axes3D object for further manipulation
34 |
35 |
36 | # Read a color image
37 | img = cv2.imread("test_images/602.jpg")
38 |
39 | # Select a small fraction of pixels to plot by subsampling it
40 | scale = max(img.shape[0], img.shape[1], 64) / 500 # at most 64 rows and columns
41 | img_small = cv2.resize(img, (np.int(img.shape[1] / scale), np.int(img.shape[0] / scale)),
42 | interpolation=cv2.INTER_NEAREST)
43 |
44 | # Convert subsampled image to desired color space(s)
45 | img_small_RGB = cv2.cvtColor(img_small, cv2.COLOR_BGR2RGB) # OpenCV uses BGR, matplotlib likes RGB
46 | img_small_HSV = cv2.cvtColor(img_small, cv2.COLOR_BGR2HSV)
47 | img_small_rgb = img_small_RGB / 255. # scaled to [0, 1], only for plotting
48 |
49 | # Plot and show
50 | plot3d(img_small_RGB, img_small_rgb)
51 | plt.show()
52 |
53 | plot3d(img_small_HSV, img_small_rgb, axis_labels=list("HSV"))
54 | plt.show()
--------------------------------------------------------------------------------
/data/non-vehicles/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/data/non-vehicles/.gitkeep
--------------------------------------------------------------------------------
/data/vehicles/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/data/vehicles/.gitkeep
--------------------------------------------------------------------------------
/data_explorer.py:
--------------------------------------------------------------------------------
1 | import matplotlib.image as mpimg
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import cv2
5 | import glob
6 |
7 | # from skimage.feature import hog
8 | # from skimage import color, exposure
9 | # images are divided up into vehicles and non-vehicles
10 |
11 | car_file_names = glob.glob('data/vehicles/*/*.png')
12 | notcar_file_names = glob.glob('data/non-vehicles/*/*.png')
13 |
14 |
15 | if __name__ == 'main':
16 | # Define a function to return some characteristics of the dataset
17 | def data_look(car_list, notcar_list):
18 | data_dict = {}
19 | # Define a key in data_dict "n_cars" and store the number of car images
20 | data_dict["n_cars"] = len(car_list)
21 | # Define a key "n_notcars" and store the number of notcar images
22 | data_dict["n_notcars"] = len(notcar_list)
23 | # Read in a test image, either car or notcar
24 | example_img = mpimg.imread(car_list[0])
25 | # Define a key "image_shape" and store the test image shape 3-tuple
26 | data_dict["image_shape"] = example_img.shape
27 | # Define a key "data_type" and store the data type of the test image.
28 | data_dict["data_type"] = example_img.dtype
29 | # Return data_dict
30 | return data_dict
31 |
32 |
33 | data_info = data_look(car_file_names, notcar_file_names)
34 |
35 | print('Your function returned a count of',
36 | data_info["n_cars"], ' cars and',
37 | data_info["n_notcars"], ' non-cars')
38 | print('of size: ', data_info["image_shape"], ' and data type:',
39 | data_info["data_type"])
40 | # Just for fun choose random car / not-car indices and plot example images
41 | car_ind = np.random.randint(0, len(car_file_names))
42 | notcar_ind = np.random.randint(0, len(notcar_file_names))
43 |
44 | # Read in car / not-car images
45 | car_image = mpimg.imread(car_file_names[car_ind])
46 | notcar_image = mpimg.imread(notcar_file_names[notcar_ind])
47 |
48 | # Plot the examples
49 | fig = plt.figure()
50 | plt.subplot(121)
51 | plt.imshow(car_image)
52 | plt.title('Example Car Image')
53 | plt.subplot(122)
54 | plt.imshow(notcar_image)
55 | plt.title('Example Not-car Image')
56 |
--------------------------------------------------------------------------------
/data_generators.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import scipy.ndimage
4 | from data_load import FeedingData
5 |
6 |
7 | def image_itself(feeding_data):
8 | return feeding_data.image(), feeding_data.steering_angle
9 |
10 |
11 | def shift_image_generator(angle_offset_pre_pixel=0.003):
12 | def _generator(feeding_data):
13 | image, angle, _ = _shift_image(
14 | feeding_data.image(), feeding_data.steering_angle, 100, 20, angle_offset_pre_pixel=angle_offset_pre_pixel)
15 | return image, angle
16 |
17 | return _generator
18 |
19 |
20 | def brightness_image_generator(brightness_range=0.25):
21 | def _generator(feeding_data):
22 | img = feeding_data.image()
23 | # Convert the image to HSV
24 | temp = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
25 | # Compute a random brightness value and apply to the image
26 | brightness = brightness_range + np.random.uniform()
27 | temp[:, :, 2] = temp[:, :, 2] * brightness
28 |
29 | # Convert back to RGB and return
30 | return cv2.cvtColor(temp, cv2.COLOR_HSV2RGB), feeding_data.steering_angle
31 |
32 | return _generator
33 |
34 |
35 | def shadow_generator(feeding_data):
36 | image = feeding_data.image()
37 | top_y = image.shape[1] * np.random.uniform()
38 | top_x = 0
39 | bot_x = image.shape[0]
40 | bot_y = image.shape[1] * np.random.uniform()
41 | image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
42 | shadow_mask = 0 * image_hls[:, :, 1]
43 | X_m = np.mgrid[0:image.shape[0], 0:image.shape[1]][0]
44 | Y_m = np.mgrid[0:image.shape[0], 0:image.shape[1]][1]
45 |
46 | shadow_mask[((X_m - top_x) * (bot_y - top_y) - (bot_x - top_x) * (Y_m - top_y) >= 0)] = 1
47 | if np.random.randint(2) == 1:
48 | random_bright = .5
49 | cond1 = shadow_mask == 1
50 | cond0 = shadow_mask == 0
51 | if np.random.randint(2) == 1:
52 | image_hls[:, :, 1][cond1] = image_hls[:, :, 1][cond1] * random_bright
53 | else:
54 | image_hls[:, :, 1][cond0] = image_hls[:, :, 1][cond0] * random_bright
55 | image = cv2.cvtColor(image_hls, cv2.COLOR_HLS2RGB)
56 | return image, feeding_data.steering_angle
57 |
58 |
59 | def random_generators(*generators):
60 | def _generator(feeding_data):
61 | index = np.random.randint(0, len(generators))
62 | return generators[index](feeding_data)
63 |
64 | return _generator
65 |
66 |
67 | def flip_generator(feeding_data):
68 | image, angle = feeding_data.image(), feeding_data.steering_angle
69 | return cv2.flip(image, 1), -angle
70 |
71 |
72 | def pipe_line_generators(*generators):
73 | """
74 | pipe line of generators, generator will run one by one
75 | :param generators:
76 | :return:
77 | """
78 | def _generator(feeding_data):
79 | intermediary_feeding_data = feeding_data
80 | for generator in generators:
81 | image, angle = generator(intermediary_feeding_data)
82 | intermediary_feeding_data = FeedingData(image, angle)
83 | return intermediary_feeding_data.image(), intermediary_feeding_data.steering_angle
84 |
85 | return _generator
86 |
87 |
88 | def pipe_line_random_generators(*generators):
89 | def _generator(feeding_data):
90 | count = np.random.randint(0, len(generators)+1)
91 | intermediary_feeding_data = feeding_data
92 | for index in range(count):
93 | generator = generators[index]
94 | image, angle = generator(intermediary_feeding_data)
95 | intermediary_feeding_data = FeedingData(image, angle)
96 | return intermediary_feeding_data.image(), intermediary_feeding_data.steering_angle
97 |
98 | return _generator
99 |
100 |
101 | def filter_generator(generator, angle_threshold=0.1):
102 | def _generator(feeding_data):
103 | image, angle = None, None
104 | for index in range(20):
105 | if angle is None or angle <= angle_threshold:
106 | image, angle = generator(feeding_data)
107 | else:
108 | break
109 |
110 | return image, angle
111 |
112 | return _generator
113 |
114 |
115 | def _shift_image(image, steer, left_right_shift_range, top_bottom_shift_range, angle_offset_pre_pixel=0.003):
116 | shift_size = round(left_right_shift_range * np.random.uniform(-0.5, 0.5))
117 | steer_ang = steer + shift_size * angle_offset_pre_pixel
118 | top_bottom_shift_size = round(top_bottom_shift_range * np.random.uniform(-0.5, 0.5))
119 | if shift_size >= image.shape[1]:
120 | image_tr = image
121 | # print("WARNING Image is smaller then shift size, original image returned")
122 | else:
123 | image_tr = scipy.ndimage.interpolation.shift(image, (top_bottom_shift_size, shift_size, 0))
124 | return image_tr, steer_ang, shift_size
125 |
126 |
127 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: ml
2 | channels:
3 | - conda-forge
4 | - defaults
5 | dependencies:
6 | - _nb_ext_conf=0.3.0=py35_0
7 | - anaconda-client=1.5.1=py35_0
8 | - appnope=0.1.0=py35_0
9 | - cffi=1.9.1=py35_0
10 | - click=6.6=py35_0
11 | - clyent=1.2.2=py35_0
12 | - colorama=0.3.7=py35_0
13 | - conda-forge::eventlet=0.19.0=py35_0
14 | - conda-forge::flask-socketio=2.7.1=py35_0
15 | - conda-forge::jasper=1.900.1=3
16 | - conda-forge::jpeg=9b=0
17 | - conda-forge::keras=1.0.7=py35_0
18 | - conda-forge::libpng=1.6.24=0
19 | - conda-forge::libtiff=4.0.6=7
20 | - conda-forge::opencv=3.1.0=np111py35_1
21 | - conda-forge::pillow=3.2.0=py35_1
22 | - conda-forge::protobuf=3.0.0=py35_0
23 | - conda-forge::python-engineio=1.0.0=py35_0
24 | - conda-forge::python-socketio=1.5.1=py35_0
25 | - conda-forge::sklearn-contrib-lightning=0.4.0=np111py35_0
26 | - conda-forge::tensorflow=0.11.0=py35_0
27 | - cryptography=1.7.1=py35_0
28 | - cycler=0.10.0=py35_0
29 | - decorator=4.0.10=py35_0
30 | - entrypoints=0.2.2=py35_0
31 | - flask=0.12=py35_0
32 | - freetype=2.5.5=1
33 | - greenlet=0.4.11=py35_0
34 | - h5py=2.6.0=np111py35_2
35 | - hdf5=1.8.17=1
36 | - icu=54.1=0
37 | - idna=2.1=py35_0
38 | - ipykernel=4.5.0=py35_0
39 | - ipython=5.1.0=py35_0
40 | - ipython_genutils=0.1.0=py35_0
41 | - ipywidgets=5.2.2=py35_0
42 | - itsdangerous=0.24=py35_0
43 | - jbig=2.1=0
44 | - jinja2=2.8=py35_1
45 | - jsonschema=2.5.1=py35_0
46 | - jupyter=1.0.0=py35_3
47 | - jupyter_client=4.4.0=py35_0
48 | - jupyter_console=5.0.0=py35_0
49 | - jupyter_core=4.1.1=py35_0
50 | - markupsafe=0.23=py35_2
51 | - matplotlib=1.5.3=np111py35_1
52 | - mistune=0.7.2=py35_1
53 | - mkl=11.3.3=0
54 | - mock=2.0.0=py35_0
55 | - nb_anacondacloud=1.2.0=py35_0
56 | - nb_conda=2.0.0=py35_0
57 | - nb_conda_kernels=2.0.0=py35_0
58 | - nbconvert=4.2.0=py35_0
59 | - nbformat=4.1.0=py35_0
60 | - nbpresent=3.0.2=py35_0
61 | - notebook=4.2.2=py35_0
62 | - numpy=1.11.1=py35_0
63 | - openssl=1.0.2h=2
64 | - pandas=0.19.1=np111py35_0
65 | - path.py=8.2.1=py35_0
66 | - pbr=1.10.0=py35_0
67 | - pexpect=4.0.1=py35_0
68 | - pickleshare=0.7.3=py35_0
69 | - pip=8.1.2=py35_0
70 | - prompt_toolkit=1.0.3=py35_0
71 | - ptyprocess=0.5.1=py35_0
72 | - pyasn1=0.1.9=py35_0
73 | - pycparser=2.17=py35_0
74 | - pygments=2.1.3=py35_0
75 | - pyopenssl=16.2.0=py35_0
76 | - pyparsing=2.1.4=py35_0
77 | - pyqt=5.6.0=py35_0
78 | - python=3.5.2=0
79 | - python-dateutil=2.5.3=py35_0
80 | - python.app=1.2=py35_4
81 | - pytz=2016.6.1=py35_0
82 | - pyyaml=3.11=py35_4
83 | - pyzmq=15.4.0=py35_0
84 | - qt=5.6.0=0
85 | - qtconsole=4.2.1=py35_1
86 | - readline=6.2=2
87 | - requests=2.11.1=py35_0
88 | - scikit-learn=0.18.1=np111py35_0
89 | - scipy=0.18.1=np111py35_0
90 | - seaborn=0.7.1=py35_0
91 | - setuptools=26.1.1=py35_0
92 | - simplegeneric=0.8.1=py35_1
93 | - sip=4.18=py35_0
94 | - six=1.10.0=py35_0
95 | - sqlite=3.13.0=0
96 | - terminado=0.6=py35_0
97 | - theano=0.8.2=py35_0
98 | - tk=8.5.18=0
99 | - tornado=4.4.1=py35_0
100 | - traitlets=4.2.2=py35_0
101 | - wcwidth=0.1.7=py35_0
102 | - werkzeug=0.11.13=py35_0
103 | - wheel=0.29.0=py35_0
104 | - widgetsnbextension=1.2.6=py35_0
105 | - xz=5.2.2=0
106 | - yaml=0.1.6=0
107 | - zlib=1.2.8=3
108 | - pip:
109 | - awscli==1.11.82
110 | - botocore==1.5.45
111 | - dask==0.13.0
112 | - docutils==0.13.1
113 | - eventlet==0.19.0
114 | - flask-socketio==2.7.1
115 | - imageio==1.6
116 | - ipython-genutils==0.1.0
117 | - jmespath==0.9.2
118 | - jupyter-client==4.4.0
119 | - jupyter-console==5.0.0
120 | - jupyter-core==4.1.1
121 | - keras==1.2.1
122 | - lasagne==0.2.dev1
123 | - moviepy==0.2.2.11
124 | - nb-anacondacloud==1.2.0
125 | - nb-conda==2.0.0
126 | - nb-conda-kernels==2.0.0
127 | - networkx==1.11
128 | - olefile==0.44
129 | - pillow==4.0.0
130 | - prompt-toolkit==1.0.3
131 | - protobuf==3.1.0.post1
132 | - python-engineio==1.0.0
133 | - python-socketio==1.5.1
134 | - rsa==3.4.2
135 | - s3transfer==0.1.10
136 | - scikit-image==0.12.3
137 | - sklearn-contrib-lightning==0.4.0
138 | - tensorflow==0.12.1
139 | - toolz==0.8.2
140 | - tqdm==4.10.0
141 | prefix: /Users/james/anaconda/envs/ml
142 |
143 |
--------------------------------------------------------------------------------
/examples/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/examples/.DS_Store
--------------------------------------------------------------------------------
/examples/HOG_example.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/examples/HOG_example.jpg
--------------------------------------------------------------------------------
/examples/bboxes_and_heat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/examples/bboxes_and_heat.png
--------------------------------------------------------------------------------
/examples/car_not_car.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/examples/car_not_car.png
--------------------------------------------------------------------------------
/examples/labels_map.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/examples/labels_map.png
--------------------------------------------------------------------------------
/examples/output_bboxes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/examples/output_bboxes.png
--------------------------------------------------------------------------------
/examples/sliding_window.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/examples/sliding_window.jpg
--------------------------------------------------------------------------------
/examples/sliding_windows.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/examples/sliding_windows.jpg
--------------------------------------------------------------------------------
/features.py:
--------------------------------------------------------------------------------
1 | import matplotlib.image as mpimg
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import cv2
5 | import glob
6 | from skimage.feature import hog
7 | from sklearn.preprocessing import StandardScaler
8 |
9 | # Define a function to return HOG features and visualization
10 | def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True):
11 | if vis == True:
12 | features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
13 | cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,
14 | visualise=True, feature_vector=False)
15 | return features, hog_image
16 | else:
17 | features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
18 | cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,
19 | visualise=False, feature_vector=feature_vec)
20 | return features
21 |
22 |
23 | # Define a function to compute binned color features
24 | def bin_spatial(img, size=(32, 32)):
25 | # Use cv2.resize().ravel() to create the feature vector
26 | features = cv2.resize(img, size).ravel()
27 | # Return the feature vector
28 | return features
29 |
30 |
31 | # Define a function to compute color histogram features
32 | def color_hist(img, nbins=32, bins_range=(0, 256)):
33 | # Compute the histogram of the color channels separately
34 | channel1_hist = np.histogram(img[:, :, 0], bins=nbins, range=bins_range)
35 | channel2_hist = np.histogram(img[:, :, 1], bins=nbins, range=bins_range)
36 | channel3_hist = np.histogram(img[:, :, 2], bins=nbins, range=bins_range)
37 | # Concatenate the histograms into a single feature vector
38 | hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
39 | # Return the individual histograms, bin_centers and feature vector
40 | return hist_features
41 |
42 |
43 | # Define a function to extract features from a list of images
44 | # Have this function call bin_spatial() and color_hist()
45 | def extract_color_features(imgs, cspace='RGB', spatial_size=(32, 32),
46 | hist_bins=32, hist_range=(0, 256)):
47 | # Create a list to append feature vectors to
48 | features = []
49 | # Iterate through the list of images
50 | for file in imgs:
51 | # Read in each one by one
52 | image = mpimg.imread(file)
53 | # apply color conversion if other than 'RGB'
54 | if cspace != 'RGB':
55 | if cspace == 'HSV':
56 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
57 | elif cspace == 'LUV':
58 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
59 | elif cspace == 'HLS':
60 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
61 | elif cspace == 'YUV':
62 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
63 | else:
64 | feature_image = np.copy(image)
65 | # Apply bin_spatial() to get spatial color features
66 | spatial_features = bin_spatial(feature_image, size=spatial_size)
67 | # Apply color_hist() also with a color space option now
68 | hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=hist_range)
69 | # Append the new feature vector to the features list
70 | features.append(np.concatenate((spatial_features, hist_features)))
71 | # Return list of feature vectors
72 | return features
73 |
74 |
75 | # Define a function to extract features from a list of images
76 | # Have this function call bin_spatial() and color_hist()
77 | def extract_hog_features(imgs, cspace='RGB', orient=9,
78 | pix_per_cell=8, cell_per_block=2, hog_channel=0):
79 | # Create a list to append feature vectors to
80 | features = []
81 | # Iterate through the list of images
82 | for file in imgs:
83 | # Read in each one by one
84 | image = mpimg.imread(file)
85 | # apply color conversion if other than 'RGB'
86 | if cspace != 'RGB':
87 | if cspace == 'HSV':
88 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
89 | elif cspace == 'LUV':
90 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
91 | elif cspace == 'HLS':
92 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
93 | elif cspace == 'YUV':
94 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
95 | elif cspace == 'YCrCb':
96 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
97 | else:
98 | feature_image = np.copy(image)
99 |
100 | # Call get_hog_features() with vis=False, feature_vec=True
101 | if hog_channel == 'ALL':
102 | hog_features = []
103 | for channel in range(feature_image.shape[2]):
104 | hog_features.append(get_hog_features(feature_image[:, :, channel],
105 | orient, pix_per_cell, cell_per_block,
106 | vis=False, feature_vec=True))
107 | hog_features = np.ravel(hog_features)
108 | else:
109 | hog_features = get_hog_features(feature_image[:, :, hog_channel], orient,
110 | pix_per_cell, cell_per_block, vis=False, feature_vec=True)
111 | # Append the new feature vector to the features list
112 | features.append(hog_features)
113 | # Return list of feature vectors
114 | return features
115 |
116 |
117 | # Define a function to extract features from a single image window
118 | # This function is very similar to extract_color_features()
119 | # just for a single image rather than list of images
120 | def single_img_features(img, color_space='RGB', spatial_size=(32, 32),
121 | hist_bins=32, orient=9,
122 | pix_per_cell=8, cell_per_block=2, hog_channel=0,
123 | spatial_feat=True, hist_feat=True, hog_feat=True):
124 | # 1) Define an empty list to receive features
125 | img_features = []
126 | # 2) Apply color conversion if other than 'RGB'
127 | if color_space != 'RGB':
128 | if color_space == 'HSV':
129 | feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
130 | elif color_space == 'LUV':
131 | feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
132 | elif color_space == 'HLS':
133 | feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
134 | elif color_space == 'YUV':
135 | feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
136 | elif color_space == 'YCrCb':
137 | feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
138 | else:
139 | feature_image = np.copy(img)
140 | # 3) Compute spatial features if flag is set
141 | if spatial_feat == True:
142 | spatial_features = bin_spatial(feature_image, size=spatial_size)
143 | # 4) Append features to list
144 | img_features.append(spatial_features)
145 | # 5) Compute histogram features if flag is set
146 | if hist_feat == True:
147 | hist_features = color_hist(feature_image, nbins=hist_bins)
148 | # 6) Append features to list
149 | img_features.append(hist_features)
150 | # 7) Compute HOG features if flag is set
151 | if hog_feat == True:
152 | if hog_channel == 'ALL':
153 | hog_features = []
154 | for channel in range(feature_image.shape[2]):
155 | hog_features.extend(get_hog_features(feature_image[:, :, channel],
156 | orient, pix_per_cell, cell_per_block,
157 | vis=False, feature_vec=True))
158 | else:
159 | hog_features = get_hog_features(feature_image[:, :, hog_channel], orient,
160 | pix_per_cell, cell_per_block, vis=False, feature_vec=True)
161 | # 8) Append features to list
162 | img_features.append(hog_features)
163 |
164 | # 9) Return concatenated array of features
165 | return np.concatenate(img_features)
166 |
167 |
168 | # Define a function to extract features from a list of images
169 | # Have this function call bin_spatial() and color_hist()
170 | def extract_features_all(imgs, color_space='RGB', spatial_size=(32, 32),
171 | hist_bins=32, orient=9,
172 | pix_per_cell=8, cell_per_block=2, hog_channel=0,
173 | spatial_feat=True, hist_feat=True, hog_feat=True):
174 | # Create a list to append feature vectors to
175 | features = []
176 | # Iterate through the list of images
177 | for file in imgs:
178 | file_features = []
179 | # Read in each one by one
180 | image = mpimg.imread(file)
181 | # apply color conversion if other than 'RGB'
182 | if color_space != 'RGB':
183 | if color_space == 'HSV':
184 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
185 | elif color_space == 'LUV':
186 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
187 | elif color_space == 'HLS':
188 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
189 | elif color_space == 'YUV':
190 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
191 | elif color_space == 'YCrCb':
192 | feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
193 | else:
194 | feature_image = np.copy(image)
195 |
196 | if spatial_feat == True:
197 | spatial_features = bin_spatial(feature_image, size=spatial_size)
198 | file_features.append(spatial_features)
199 | if hist_feat == True:
200 | # Apply color_hist()
201 | hist_features = color_hist(feature_image, nbins=hist_bins)
202 | file_features.append(hist_features)
203 | if hog_feat == True:
204 | # Call get_hog_features() with vis=False, feature_vec=True
205 | if hog_channel == 'ALL':
206 | hog_features = []
207 | for channel in range(feature_image.shape[2]):
208 | hog_features.append(get_hog_features(feature_image[:, :, channel],
209 | orient, pix_per_cell, cell_per_block,
210 | vis=False, feature_vec=True))
211 | hog_features = np.ravel(hog_features)
212 | else:
213 | hog_features = get_hog_features(feature_image[:, :, hog_channel], orient,
214 | pix_per_cell, cell_per_block, vis=False, feature_vec=True)
215 | # Append the new feature vector to the features list
216 | file_features.append(hog_features)
217 | features.append(np.concatenate(file_features))
218 | # Return list of feature vectors
219 | return features
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | from moviepy.editor import VideoFileClip
2 | from lane_finder import LaneFinder
3 | from object_detect_yolo import YoloDetector
4 |
5 |
6 | if __name__ == "__main__":
7 | def remove_mp4_extension(file_name):
8 | return file_name.replace(".mp4", "")
9 |
10 | yolo = YoloDetector()
11 | lane_finder = LaneFinder(save_original_images=True, object_detection_func=yolo.process_image_array)
12 | video_file = 'project_video.mp4'
13 | # video_file = 'challenge_video.mp4'
14 | # video_file = 'back_home.mov'
15 | # video_file = 'file01_2017322191247.mp4'
16 | clip = VideoFileClip(video_file, audio=False)
17 | t_start = 0
18 | t_end = 0
19 | if t_end > 0.0:
20 | clip = clip.subclip(t_start=t_start, t_end=t_end)
21 | else:
22 | clip = clip.subclip(t_start=t_start)
23 |
24 | clip = clip.fl_image(lane_finder.process_image)
25 | clip.write_videofile("{}_output.mp4".format(remove_mp4_extension(video_file)), audio=False)
26 | yolo.shutdown()
27 |
28 |
--------------------------------------------------------------------------------
/main_detect_unet.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import glob
5 | import os
6 | from camera_calibrate import load_camera_calibration
7 | from perspective_transform import *
8 | from thresholding import *
9 | from moviepy.editor import VideoFileClip
10 | from vehicle_detect_nn import VehicleDetector
11 |
12 | if __name__ == "__main__":
13 | detector = VehicleDetector(img_rows=640, img_cols=960, weights_file="model_segn_small_0p72.h5")
14 | lane_finder = LaneFinder(save_original_images=False, object_detection_mask=detector.get_Unet_mask)
15 | video_file = 'project_video.mp4'
16 | # video_file = 'challenge_video.mp4'
17 | clip = VideoFileClip(video_file, audio=False)
18 | t_start = 0
19 | t_end = 0
20 | if t_end > 0.0:
21 | clip = clip.subclip(t_start=t_start, t_end=t_end)
22 | else:
23 | clip = clip.subclip(t_start=t_start)
24 |
25 | clip = clip.fl_image(lane_finder.process_image)
26 | clip.write_videofile("{}_output.mp4".format(remove_mp4_extension(video_file)), audio=False)
27 |
28 |
--------------------------------------------------------------------------------
/model.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | tf.python.control_flow_ops = tf
3 | from keras.models import Sequential
4 | from keras.layers import Dense, Dropout, Activation, Flatten, Lambda, ELU
5 | from keras.layers import Convolution2D, MaxPooling2D
6 | from keras.regularizers import l2
7 |
8 | from keras import backend as K
9 | K.set_image_dim_ordering('tf')
10 |
11 | def nvidia(input_shape, dropout):
12 | model = Sequential()
13 | model.add(Lambda(lambda x: x / 127.5 - 1.,
14 | input_shape=input_shape))
15 | model.add(Convolution2D(24, 5, 5, name='conv_1', subsample=(2, 2)))
16 | model.add(ELU())
17 | model.add(Dropout(dropout))
18 | model.add(Convolution2D(36, 5, 5, name='conv_2', subsample=(2, 2)))
19 | model.add(ELU())
20 | model.add(Dropout(dropout))
21 | model.add(Convolution2D(48, 5, 5, name='conv_3', subsample=(2, 2)))
22 | model.add(ELU())
23 | model.add(Dropout(dropout))
24 | model.add(Convolution2D(64, 3, 3, name='conv_4', subsample=(1, 1)))
25 | model.add(ELU())
26 | model.add(Dropout(dropout))
27 | model.add(Convolution2D(64, 3, 3, name='conv_5', subsample=(1, 1)))
28 | model.add(ELU())
29 | model.add(Dropout(dropout))
30 |
31 | model.add(Flatten())
32 |
33 | model.add(Dense(100))
34 | model.add(ELU())
35 | model.add(Dropout(dropout))
36 | model.add(Dense(50))
37 | model.add(ELU())
38 | model.add(Dropout(dropout))
39 | model.add(Dense(10))
40 | model.add(ELU())
41 | model.add(Dropout(dropout))
42 |
43 | model.add(Dense(1))
44 |
45 | return model
46 |
47 |
48 | def nvidia_with_regularizer(input_shape, dropout):
49 | INIT = 'glorot_uniform'
50 | reg_val = 0.01
51 |
52 | model = Sequential()
53 | model.add(Lambda(lambda x: x / 127.5 - 1.,
54 | input_shape=input_shape))
55 | model.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode="valid", init=INIT, W_regularizer=l2(reg_val)))
56 | model.add(ELU())
57 | model.add(Dropout(dropout))
58 |
59 | model.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode="valid", init=INIT))
60 | model.add(ELU())
61 | model.add(Dropout(dropout))
62 |
63 | model.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode="valid", init=INIT))
64 | model.add(ELU())
65 | model.add(Dropout(dropout))
66 |
67 | model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode="valid", init=INIT))
68 | model.add(ELU())
69 | model.add(Dropout(dropout))
70 |
71 | model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode="valid", init=INIT))
72 | model.add(ELU())
73 | model.add(Dropout(dropout))
74 |
75 | model.add(Flatten())
76 |
77 | model.add(Dense(100))
78 | model.add(ELU())
79 | model.add(Dropout(dropout))
80 |
81 | model.add(Dense(50))
82 | model.add(ELU())
83 | model.add(Dropout(dropout))
84 |
85 | model.add(Dense(10))
86 | model.add(ELU())
87 |
88 | model.add(Dense(1))
89 |
90 | return model
--------------------------------------------------------------------------------
/object_detect_yolo.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import colorsys
3 | import imghdr
4 | import os
5 | import random
6 |
7 | import numpy as np
8 | from keras import backend as K
9 | from keras.models import load_model
10 | from PIL import Image, ImageDraw, ImageFont
11 | from moviepy.editor import VideoFileClip
12 |
13 | from yolo.yad2k.models.keras_yolo import yolo_eval, yolo_head
14 |
15 |
16 | class YoloDetector(object):
17 | def __init__(self,
18 | model_path="yolo/model_data/yolo.h5",
19 | anchors_path="yolo/model_data/yolo_anchors.txt",
20 | classes_path="yolo/model_data/coco_classes.txt",
21 | font_file_name="yolo/font/FiraMono-Medium.otf"):
22 | assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
23 |
24 | self.model_path = model_path
25 | self.class_names = self._read_class_names(classes_path)
26 | self.colors = self._generator_colors(self.class_names)
27 | self.font_file_name = font_file_name
28 |
29 | self.sess = K.get_session()
30 | self.yolo_model = load_model(model_path)
31 | self.yolo_model.summary()
32 |
33 | anchors = self._read_anchors(anchors_path)
34 | self._validate_model_and_data(self.class_names, anchors, self.yolo_model)
35 |
36 | print('{} model, anchors, and classes loaded.'.format(model_path))
37 |
38 | # Check if model is fully convolutional, assuming channel last order.
39 | self.model_image_size = self.yolo_model.layers[0].input_shape[1:3]
40 | self.is_fixed_size = self.model_image_size != (None, None)
41 |
42 | # Generate output tensor targets for filtered bounding boxes.
43 | yolo_outputs = yolo_head(self.yolo_model.output, anchors, len(self.class_names))
44 | self.input_image_shape = K.placeholder(shape=(2,))
45 | boxes, scores, classes = yolo_eval(
46 | yolo_outputs,
47 | self.input_image_shape,
48 | score_threshold=0.3,
49 | iou_threshold=0.3)
50 | self.boxes = boxes
51 | self.scores = scores
52 | self.classes = classes
53 |
54 | @staticmethod
55 | def _validate_model_and_data(class_names, anchors, yolo_model):
56 | num_classes = len(class_names)
57 | num_anchors = len(anchors)
58 | # TODO: Assumes dim ordering is channel last
59 | model_output_channels = yolo_model.layers[-1].output_shape[-1]
60 | assert model_output_channels == num_anchors * (num_classes + 5), \
61 | 'Mismatch between model and given anchor and class sizes. ' \
62 | 'Specify matching anchors and classes with --anchors_path and ' \
63 | '--classes_path flags.'
64 |
65 | @staticmethod
66 | def _generator_colors(class_names):
67 | # Generate colors for drawing bounding boxes.
68 | hsv_tuples = [(x / len(class_names), 1., 1.)
69 | for x in range(len(class_names))]
70 | colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
71 | colors = list(
72 | map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
73 | colors))
74 | random.seed(10101) # Fixed seed for consistent colors across runs.
75 | random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
76 | random.seed(None) # Reset seed to default.
77 | return colors
78 |
79 | @staticmethod
80 | def _read_class_names(classes_path):
81 | with open(classes_path) as f:
82 | class_names = f.readlines()
83 |
84 | class_names = [c.strip() for c in class_names]
85 | return class_names
86 |
87 | @staticmethod
88 | def _read_anchors(anchors_path):
89 | with open(anchors_path) as f:
90 | anchors = f.readline()
91 | anchors = [float(x) for x in anchors.split(',')]
92 | anchors = np.array(anchors).reshape(-1, 2)
93 | return anchors
94 |
95 | def predict(self, image_pil):
96 | if self.is_fixed_size:
97 | resized_image = image_pil.resize(
98 | tuple(reversed(self.model_image_size)), Image.BICUBIC)
99 | image_data = np.array(resized_image, dtype='float32')
100 | else:
101 | image_data = np.array(image_pil, dtype='float32')
102 |
103 | image_data /= 255.
104 | image_data = np.expand_dims(image_data, 0) # Add batch dimension.
105 |
106 | out_boxes, out_scores, out_classes = self.sess.run(
107 | [self.boxes, self.scores, self.classes],
108 | feed_dict={
109 | self.yolo_model.input: image_data,
110 | self.input_image_shape: [image_pil.size[1], image_pil.size[0]],
111 | K.learning_phase(): 0
112 | })
113 |
114 | return out_boxes, out_scores, out_classes
115 |
116 | def draw_border_boxes(self, image, out_boxes, out_scores, out_classes):
117 | font = ImageFont.truetype(
118 | font=self.font_file_name,
119 | size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
120 | thickness = (image.size[0] + image.size[1]) // 300
121 |
122 | for i, c in reversed(list(enumerate(out_classes))):
123 | predicted_class = self.class_names[c]
124 | box = out_boxes[i]
125 | score = out_scores[i]
126 |
127 | label = '{} {:.2f}'.format(predicted_class, score)
128 |
129 | draw = ImageDraw.Draw(image)
130 | label_size = draw.textsize(label, font)
131 |
132 | top, left, bottom, right = box
133 | top = max(0, np.floor(top + 0.5).astype('int32'))
134 | left = max(0, np.floor(left + 0.5).astype('int32'))
135 | bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
136 | right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
137 | print(label, (left, top), (right, bottom))
138 |
139 | if top - label_size[1] >= 0:
140 | text_origin = np.array([left, top - label_size[1]])
141 | else:
142 | text_origin = np.array([left, top + 1])
143 |
144 | for i in range(thickness):
145 | draw.rectangle(
146 | [left + i, top + i, right - i, bottom - i],
147 | outline=self.colors[c])
148 | draw.rectangle(
149 | [tuple(text_origin), tuple(text_origin + label_size)],
150 | fill=self.colors[c])
151 | draw.text(text_origin, label, fill=(0, 0, 0), font=font)
152 | del draw
153 |
154 | def process_image_array(self, image_array):
155 | image = Image.fromarray(image_array)
156 | image = self.process_image_pil(image)
157 | return np.array(image)
158 |
159 | def process_image_pil(self, image_pil):
160 | out_boxes, out_scores, out_classes = self.predict(image_pil)
161 | self.draw_border_boxes(image_pil, out_boxes, out_scores, out_classes)
162 | return image_pil
163 |
164 | def process_image_file(self, input_image_file_name, output_image_filename):
165 | image = Image.open(input_image_file_name)
166 | image = self.process_image_pil(image)
167 | image.save(output_image_filename, quality=90)
168 |
169 | def process_folder(self, input_folder, output_folder):
170 | for image_file in os.listdir(input_folder):
171 | try:
172 | image_type = imghdr.what(os.path.join(input_folder, image_file))
173 | if not image_type:
174 | continue
175 | except IsADirectoryError:
176 | continue
177 |
178 | self.process_image_file(os.path.join(input_folder, image_file), os.path.join(output_folder, image_file))
179 |
180 | def shutdown(self):
181 | self.sess.close()
182 |
183 |
184 | if __name__ == "__main__":
185 |
186 | def remove_mp4_extension(file_name):
187 | return file_name.replace(".mp4", "")
188 |
189 | def process_folder(yolo):
190 | yolo.process_folder(input_folder="test_images", output_folder="output_images/object-detect")
191 |
192 | def process_video(yolo):
193 | video_file = 'back_home_fast.mp4'
194 | # video_file = 'project_video.mp4'
195 | # video_file = 'challenge_video.mp4'
196 | clip = VideoFileClip(video_file, audio=False)
197 | t_start = 0
198 | t_end = 0
199 | if t_end > 0.0:
200 | clip = clip.subclip(t_start=t_start, t_end=t_end)
201 | else:
202 | clip = clip.subclip(t_start=t_start)
203 |
204 | clip = clip.fl_image(yolo.process_image_array)
205 | clip.write_videofile("{}_output_detect.mp4".format(remove_mp4_extension(video_file)), audio=False)
206 |
207 | yolo = YoloDetector()
208 | process_folder(yolo)
209 | # process_video(yolo)
210 | yolo.shutdown()
211 |
212 |
--------------------------------------------------------------------------------
/other_images/Non-maximum-suppression.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/other_images/Non-maximum-suppression.jpeg
--------------------------------------------------------------------------------
/other_images/Person.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/other_images/Person.png
--------------------------------------------------------------------------------
/other_images/TrafficLightsAndVehicles.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/other_images/TrafficLightsAndVehicles.png
--------------------------------------------------------------------------------
/other_images/YOLO_Demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/other_images/YOLO_Demo.png
--------------------------------------------------------------------------------
/other_images/YOLO_NN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/other_images/YOLO_NN.png
--------------------------------------------------------------------------------
/other_images/YOLO_Parameters.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/other_images/YOLO_Parameters.jpeg
--------------------------------------------------------------------------------
/other_images/cnn-weights.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/other_images/cnn-weights.jpeg
--------------------------------------------------------------------------------
/other_images/convnet-car.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/other_images/convnet-car.jpeg
--------------------------------------------------------------------------------
/output_images/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/.DS_Store
--------------------------------------------------------------------------------
/output_images/b+g+r_channels.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/b+g+r_channels.png
--------------------------------------------------------------------------------
/output_images/calibration1_undist.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/calibration1_undist.png
--------------------------------------------------------------------------------
/output_images/camera_calibration_pickle.p:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/camera_calibration_pickle.p
--------------------------------------------------------------------------------
/output_images/car1_hog.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/car1_hog.png
--------------------------------------------------------------------------------
/output_images/feature_normalize.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/feature_normalize.png
--------------------------------------------------------------------------------
/output_images/features_search/test1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/features_search/test1.png
--------------------------------------------------------------------------------
/output_images/h_channel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/h_channel.png
--------------------------------------------------------------------------------
/output_images/image_combined_4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/image_combined_4.jpg
--------------------------------------------------------------------------------
/output_images/l_channel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/l_channel.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_1047.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_1047.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_602.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_602.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_714.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_714.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_746.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_746.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_993.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_993.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_straight_lines1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_straight_lines1.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_straight_lines2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_straight_lines2.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_test1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_test1.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_test2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_test2.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_test3.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_test3.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_test4.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_test4.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_test5.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_test5.jpg.png
--------------------------------------------------------------------------------
/output_images/lane-smooth/combine_test6.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane-smooth/combine_test6.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_1047.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_1047.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_602.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_602.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_714.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_714.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_746.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_746.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_993.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_993.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_straight_lines1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_straight_lines1.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_straight_lines2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_straight_lines2.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_test1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_test1.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_test2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_test2.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_test3.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_test3.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_test4.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_test4.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_test5.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_test5.jpg.png
--------------------------------------------------------------------------------
/output_images/lane/combine_test6.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/lane/combine_test6.jpg.png
--------------------------------------------------------------------------------
/output_images/object-detect/1047.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/1047.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/602.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/602.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/714.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/714.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/746.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/746.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/993.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/993.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/car1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/car1.png
--------------------------------------------------------------------------------
/output_images/object-detect/straight_lines1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/straight_lines1.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/straight_lines2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/straight_lines2.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/test1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/test1.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/test2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/test2.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/test3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/test3.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/test4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/test4.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/test5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/test5.jpg
--------------------------------------------------------------------------------
/output_images/object-detect/test6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/object-detect/test6.jpg
--------------------------------------------------------------------------------
/output_images/perspective/1047.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/1047.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/602.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/602.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/616.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/616.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/714.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/714.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/746.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/746.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/993.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/993.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/straight_lines1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/straight_lines1.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/straight_lines2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/straight_lines2.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/test1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/test1.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/test2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/test2.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/test3.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/test3.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/test4.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/test4.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/test5.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/test5.jpg.png
--------------------------------------------------------------------------------
/output_images/perspective/test6.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/perspective/test6.jpg.png
--------------------------------------------------------------------------------
/output_images/s_channel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/s_channel.png
--------------------------------------------------------------------------------
/output_images/save_output_here.txt:
--------------------------------------------------------------------------------
1 | Please save your output images to this folder and include a description in your README of what each image shows.
--------------------------------------------------------------------------------
/output_images/slide_window.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/slide_window.png
--------------------------------------------------------------------------------
/output_images/slide_window/746.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/slide_window/746.jpg.png
--------------------------------------------------------------------------------
/output_images/slide_window/test1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/slide_window/test1.jpg.png
--------------------------------------------------------------------------------
/output_images/sobel_combined.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/sobel_combined.png
--------------------------------------------------------------------------------
/output_images/sobel_dir.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/sobel_dir.png
--------------------------------------------------------------------------------
/output_images/sobel_final_pipe_line.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/sobel_final_pipe_line.png
--------------------------------------------------------------------------------
/output_images/sobel_l+dir.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/sobel_l+dir.png
--------------------------------------------------------------------------------
/output_images/sobel_mag.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/sobel_mag.png
--------------------------------------------------------------------------------
/output_images/sobel_s+x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/sobel_s+x.png
--------------------------------------------------------------------------------
/output_images/sobel_x.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/sobel_x.png
--------------------------------------------------------------------------------
/output_images/sobel_y.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/sobel_y.png
--------------------------------------------------------------------------------
/output_images/thresholding/1047.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/1047.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/602.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/602.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/616.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/616.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/714.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/714.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/993.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/993.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/straight_lines1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/straight_lines1.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/straight_lines2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/straight_lines2.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/test1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/test1.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/test2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/test2.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/test3.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/test3.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/test4.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/test4.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/test5.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/test5.jpg.png
--------------------------------------------------------------------------------
/output_images/thresholding/test6.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/thresholding/test6.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/1047.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/1047.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/602.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/602.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/714.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/714.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/993.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/993.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/shadow.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/shadow.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/straight_lines1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/straight_lines1.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/straight_lines2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/straight_lines2.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/test1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/test1.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/test2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/test2.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/test3.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/test3.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/test4.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/test4.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/test5.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/test5.jpg.png
--------------------------------------------------------------------------------
/output_images/undistort/test6.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/undistort/test6.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/1047.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/1047.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/602.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/602.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/714.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/714.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/746.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/746.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/993.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/993.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/straight_lines1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/straight_lines1.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/straight_lines2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/straight_lines2.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/test1.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/test1.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/test2.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/test2.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/test3.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/test3.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/test4.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/test4.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/test5.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/test5.jpg.png
--------------------------------------------------------------------------------
/output_images/vehicle/test6.jpg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/output_images/vehicle/test6.jpg.png
--------------------------------------------------------------------------------
/perspective_transform.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 |
5 | def calculate_transform_matrices(image_width, image_height):
6 | bottomW = image_width
7 | topW = 249
8 | bottomH = image_height - 20
9 | topH = bottomH - 228
10 | region_vertices = np.array([[((image_width - bottomW) // 2, bottomH),
11 | ((image_width - topW) // 2, topH),
12 | ((image_width + topW) // 2, topH),
13 | ((image_width + bottomW) // 2, bottomH)]])
14 | offsetH = 10
15 | offsetW = 100
16 | dest_vertices = np.array([[(offsetW, image_height - offsetH),
17 | (offsetW, offsetH),
18 | (image_width - offsetW, offsetH),
19 | (image_width - offsetW, image_height - offsetH)]])
20 |
21 | perspective_transform_matrix = cv2.getPerspectiveTransform(
22 | np.float32(region_vertices), np.float32(dest_vertices))
23 | inversion_perspective_transform_matrix = cv2.getPerspectiveTransform(
24 | np.float32(dest_vertices), np.float32(region_vertices))
25 |
26 | return perspective_transform_matrix, inversion_perspective_transform_matrix
27 |
28 |
29 | def perspective_transform(img, perspective_transform_matrix):
30 | return cv2.warpPerspective(img, perspective_transform_matrix, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
31 |
32 |
33 | def inversion_perspective_transform(img, inversion_perspective_transform_matrix):
34 | return cv2.warpPerspective(img, inversion_perspective_transform_matrix, (img.shape[1], img.shape[0]),
35 | flags=cv2.INTER_LINEAR)
36 |
--------------------------------------------------------------------------------
/project_video.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/project_video.mp4
--------------------------------------------------------------------------------
/search_windows.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import matplotlib.pyplot as plt
4 | import matplotlib.image as mpimg
5 | from features import single_img_features
6 |
7 | # Here is your draw_boxes function from the previous exercise
8 | def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
9 | # Make a copy of the image
10 | imcopy = np.copy(img)
11 | # Iterate through the bounding boxes
12 | for bbox in bboxes:
13 | # Draw a rectangle given bbox coordinates
14 | cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
15 | # Return the image copy with boxes drawn
16 | return imcopy
17 |
18 |
19 | # Define a function that takes an image,
20 | # start and stop positions in both x and y,
21 | # window size (x and y dimensions),
22 | # and overlap fraction (for both x and y)
23 | def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
24 | xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
25 | # If x and/or y start/stop positions not defined, set to image size
26 | if x_start_stop[0] == None:
27 | x_start_stop[0] = 0
28 | if x_start_stop[1] == None:
29 | x_start_stop[1] = img.shape[1]
30 | if y_start_stop[0] == None:
31 | y_start_stop[0] = 0
32 | if y_start_stop[1] == None:
33 | y_start_stop[1] = img.shape[0]
34 | # Compute the span of the region to be searched
35 | xspan = x_start_stop[1] - x_start_stop[0]
36 | yspan = y_start_stop[1] - y_start_stop[0]
37 | # Compute the number of pixels per step in x/y
38 | nx_pix_per_step = np.int(xy_window[0] * (1 - xy_overlap[0]))
39 | ny_pix_per_step = np.int(xy_window[1] * (1 - xy_overlap[1]))
40 | # Compute the number of windows in x/y
41 | nx_windows = np.int(xspan // nx_pix_per_step)
42 | ny_windows = np.int(yspan // ny_pix_per_step)
43 | # Initialize a list to append window positions to
44 | window_list = []
45 | # Loop through finding x and y window positions
46 | # Note: you could vectorize this step, but in practice
47 | # you'll be considering windows one by one with your
48 | # classifier, so looping makes sense
49 | for ys in range(ny_windows):
50 | for xs in range(nx_windows):
51 | # Calculate window position
52 | startx = xs * nx_pix_per_step + x_start_stop[0]
53 | endx = startx + xy_window[0]
54 | starty = ys * ny_pix_per_step + y_start_stop[0]
55 | endy = starty + xy_window[1]
56 | # Append window position to list
57 | window_list.append(((startx, starty), (endx, endy)))
58 | # Return the list of windows
59 | return window_list
60 |
61 |
62 | # Define a function you will pass an image
63 | # and the list of windows to be searched (output of slide_windows())
64 | def search_windows(img, windows, clf, scaler, color_space='RGB',
65 | spatial_size=(32, 32), hist_bins=32,
66 | hist_range=(0, 256), orient=9,
67 | pix_per_cell=8, cell_per_block=2,
68 | hog_channel=0, spatial_feat=True,
69 | hist_feat=True, hog_feat=True):
70 | # 1) Create an empty list to receive positive detection windows
71 | on_windows = []
72 | # 2) Iterate over all windows in the list
73 | for window in windows:
74 | # 3) Extract the test window from original image
75 | test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
76 | # 4) Extract features for that window using single_img_features()
77 | features = single_img_features(test_img, color_space=color_space,
78 | spatial_size=spatial_size, hist_bins=hist_bins,
79 | orient=orient, pix_per_cell=pix_per_cell,
80 | cell_per_block=cell_per_block,
81 | hog_channel=hog_channel, spatial_feat=spatial_feat,
82 | hist_feat=hist_feat, hog_feat=hog_feat)
83 | # 5) Scale extracted features to be fed to classifier
84 | test_features = scaler.transform(np.array(features).reshape(1, -1))
85 | # 6) Predict using your classifier
86 | prediction = clf.predict(test_features)
87 | # 7) If positive (prediction == 1) then save the window
88 | if prediction == 1:
89 | on_windows.append(window)
90 | # 8) Return windows for positive detections
91 | return on_windows
--------------------------------------------------------------------------------
/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test/__init__.py
--------------------------------------------------------------------------------
/test/pipe_line_test.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import cv2
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import glob
6 | import os
7 | from camera_calibrate import load_camera_calibration
8 | from perspective_transform import *
9 | from thresholding import *
10 | from main import LaneFinder
11 | from vehicle_detect_nn import VehicleDetector
12 | import matplotlib.image as mpimg
13 | from object_detect_yolo import YoloDetector
14 |
15 |
16 | def bin_to_rgb(bin_image):
17 | return cv2.cvtColor(bin_image * 255, cv2.COLOR_GRAY2BGR)
18 |
19 |
20 | def compose_images(dst_image, src_image, split_rows, split_columns, which_section):
21 | assert 0 < which_section <= split_rows * split_columns
22 |
23 | if split_rows > split_columns:
24 | newH = int(dst_image.shape[0] / split_rows)
25 | dim = (int(dst_image.shape[1] * newH / dst_image.shape[0]), newH)
26 | else:
27 | newW = int(dst_image.shape[1] / split_columns)
28 | dim = (newW, int(dst_image.shape[0] * newW / dst_image.shape[1]))
29 |
30 | if len(src_image.shape) == 2:
31 | srcN = bin_to_rgb(src_image)
32 | else:
33 | srcN = np.copy(src_image)
34 |
35 | img = cv2.resize(srcN, dim, interpolation=cv2.INTER_AREA)
36 | nr = (which_section - 1) // split_columns
37 | nc = (which_section - 1) % split_columns
38 | dst_image[nr * img.shape[0]:(nr + 1) * img.shape[0], nc * img.shape[1]:(nc + 1) * img.shape[1]] = img
39 | return dst_image
40 |
41 |
42 | def plot_to_image(plt):
43 | plt.savefig('tmp_plt.png')
44 | img = cv2.imread('tmp_plt.png')
45 | # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
46 | return img
47 |
48 |
49 | def show_image(image, cmap=None):
50 | plt.figure()
51 | plt.imshow(image, cmap)
52 |
53 |
54 | def save_image(image, file_name):
55 | plt.figure()
56 | plt.imshow(image)
57 | plt.savefig(file_name)
58 |
59 |
60 | def save_image_gray(image, file_name):
61 | plt.figure()
62 | plt.imshow(image, cmap='gray')
63 | plt.savefig(file_name)
64 |
65 |
66 | def plot_two_image(image1, image2):
67 | f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
68 | f.tight_layout()
69 |
70 | ax1.imshow(image1[0])
71 | ax1.set_title(image1[1], fontsize=40)
72 |
73 | ax2.imshow(image2[0])
74 | ax2.set_title(image2[1], fontsize=40)
75 | plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
76 |
77 | return plt, ax1, ax2
78 |
79 |
80 | class PipeLineTest(unittest.TestCase):
81 | def test_vehicle_detection(self):
82 | images = glob.glob('../test_images/*.jpg')
83 | for fname in images:
84 | detector = VehicleDetector(img_rows=640, img_cols=960, weights_file="../model_segn_small_0p72.h5")
85 | image = mpimg.imread(fname)
86 | result_pipe = image
87 | result_pipe = detector.get_BB_new_img(result_pipe)
88 | save_image(result_pipe, '../output_images/vehicle/{}.png'.format(os.path.basename(fname)))
89 |
90 | def test_perspective_transform(self):
91 | images = glob.glob('../test_images/*.jpg')
92 | for fname in images:
93 | img = cv2.imread(fname)
94 | matrix, invent_matrix = calculate_transform_matrices(img.shape[1], img.shape[0])
95 | perspective_img = perspective_transform(img, matrix)
96 | save_image(perspective_img, '../output_images/perspective/{}.png'.format(os.path.basename(fname)))
97 |
98 | def test_combine_with_or(self):
99 | a1 = np.array([[1, 0, 1]])
100 | a2 = np.array([[1, 1, 0]])
101 | a3 = combine_with_or(a1, a2)
102 | np.testing.assert_almost_equal([[1, 1, 1]], a3)
103 |
104 | def test_combine_with_and(self):
105 | a1 = np.array([[1, 0, 1]])
106 | a2 = np.array([[1, 1, 0]])
107 | result = np.array([[1, 0, 0]])
108 | a3 = combine_with_and(a1, a2)
109 | np.testing.assert_almost_equal([[1, 0, 0]], a3)
110 |
111 | def test_threshold(self):
112 | image = cv2.imread('../test_images/test1.jpg')
113 | # image = cv2.imread('../test_images/test1.jpg')
114 | hls_image = cv2.cvtColor(image, cv2.COLOR_BGR2HLS).astype(np.float)
115 | h_binary, l_binary, s_binary = hls_channel_threshold(
116 | hls_image, h_thresh=(170, 255), l_thresh=(190, 255),
117 | s_thresh=(170, 255))
118 | save_image_gray(h_binary, "../output_images/h_channel.png")
119 | save_image_gray(l_binary, "../output_images/l_channel.png")
120 | save_image_gray(s_binary, "../output_images/s_channel.png")
121 |
122 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
123 | ksize = 25
124 | save_image_gray(mag_thresh(gray, ksize, (50, 255)), "../output_images/sobel_mag.png")
125 | save_image_gray(dir_threshold(gray, sobel_kernel=31, thresh=(0.7, 1.2)), "../output_images/sobel_dir.png")
126 | save_image_gray(combine_threshold(gray), "../output_images/sobel_combined.png")
127 |
128 | save_image_gray(
129 | abs_sobel_thresh(gray, "x", sobel_kernel=ksize, thresh=(50, 150)), "../output_images/sobel_x.png")
130 | save_image_gray(
131 | abs_sobel_thresh(gray, "y", sobel_kernel=ksize, thresh=(30, 100)), "../output_images/sobel_y.png")
132 |
133 | save_image_gray(
134 | combine_with_and(
135 | hls_channel_threshold(hls_image, l_thresh=(190, 255))[1],
136 | dir_threshold(gray, sobel_kernel=31, thresh=(0.7, 1.2))
137 | ), "../output_images/sobel_l+dir.png")
138 | save_image_gray(
139 | combine_with_and(
140 | hls_channel_threshold(hls_image, s_thresh=(170, 255))[2],
141 | abs_sobel_thresh(gray, orient='x', sobel_kernel=5, thresh=(10, 100))
142 | ), "../output_images/sobel_s+x.png")
143 | save_image_gray(
144 | combine_with_or(
145 | *bgr_channel_threshold(image)
146 | ), "../output_images/b+g+r_channels.png"
147 | )
148 | save_image_gray(
149 | pipeline(image), "../output_images/sobel_final_pipe_line.png")
150 |
151 | def test_threshold_pipe_line(self):
152 | images = glob.glob('../test_images/*.jpg')
153 | for fname in images:
154 | image = cv2.imread(fname)
155 | combined = pipeline(image)
156 |
157 | f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
158 | f.tight_layout()
159 |
160 | ax1.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
161 | ax1.set_title('Original Image', fontsize=40)
162 |
163 | ax2.imshow(combined, cmap='gray')
164 | ax2.set_title('Pipeline Result', fontsize=40)
165 | plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
166 | plt.savefig("../output_images/thresholding/{}.png".format(os.path.basename(fname)))
167 |
168 | def test_lane_line(self):
169 | yolo = YoloDetector(model_path="../yolo/model_data/yolo.h5",
170 | anchors_path="../yolo/model_data/yolo_anchors.txt",
171 | classes_path="../yolo/model_data/coco_classes.txt",
172 | font_file_name="../yolo/font/FiraMono-Medium.otf")
173 | images = glob.glob('../test_images/*.jpg')
174 | for fname in images:
175 | lane_finder = LaneFinder(save_original_images=False, object_detection_func=yolo.process_image_array,
176 | camera_calibration_file="../output_images/camera_calibration_pickle.p")
177 | image = cv2.imread(fname)
178 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
179 | final_image = lane_finder.process_image(image)
180 | cv2.imwrite("../output_images/lane/combine_{}.png".format(os.path.basename(fname)),
181 | cv2.cvtColor(final_image, cv2.COLOR_RGB2BGR))
182 |
183 | def test_find_line_one_image(self):
184 | fname = "../test_images/602.jpg"
185 |
186 | detector = VehicleDetector(img_rows=640, img_cols=960, weights_file="../model_segn_small_0p72.h5")
187 |
188 | lane_finder = LaneFinder(save_original_images=False, object_detection_func=detector.get_Unet_mask,
189 | camera_calibration_file="../output_images/camera_calibration_pickle.p")
190 | image = cv2.imread(fname)
191 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
192 | final_image = lane_finder.process_image(image)
193 | cv2.imwrite("../output_images/lane/combine_{}.png".format(os.path.basename(fname)),
194 | cv2.cvtColor(final_image, cv2.COLOR_RGB2BGR))
195 |
196 |
197 | def test_find_line_one_image_yolo(self):
198 | fname = "../test_images/602.jpg"
199 |
200 | yolo = YoloDetector(model_path="../yolo/model_data/yolo.h5",
201 | anchors_path="../yolo/model_data/yolo_anchors.txt",
202 | classes_path="../yolo/model_data/coco_classes.txt",
203 | font_file_name="../yolo/font/FiraMono-Medium.otf")
204 |
205 | lane_finder = LaneFinder(save_original_images=False, object_detection_func=yolo.process_image_array,
206 | camera_calibration_file="../output_images/camera_calibration_pickle.p")
207 | image = cv2.imread(fname)
208 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
209 | final_image = lane_finder.process_image(image)
210 | cv2.imwrite("../output_images/lane/combine_{}.png".format(os.path.basename(fname)),
211 | cv2.cvtColor(final_image, cv2.COLOR_RGB2BGR))
212 | yolo.shutdown()
213 |
214 | def test_undistorted(self):
215 | camera_matrix, distortion = load_camera_calibration("../output_images/camera_calibration_pickle.p")
216 |
217 | images = glob.glob('../test_images/*.jpg')
218 | for fname in images:
219 | image = cv2.imread(fname)
220 | undistorted_image = cv2.undistort(image, camera_matrix, distortion, None, camera_matrix)
221 | plt, _, _ = plot_two_image(
222 | (cv2.cvtColor(image, cv2.COLOR_BGR2RGB), "original"),
223 | (cv2.cvtColor(undistorted_image, cv2.COLOR_BGR2RGB), "undistorted"))
224 | plt.savefig("../output_images/undistort/{}.png".format(os.path.basename(fname)))
225 |
226 | def test_line_search_base_position_should_find_middle_point_if_no_last_knowledge(self):
227 | histogram = np.array([1, 2, 1, 3, 4, 3])
228 | left, right = LaneFinder._line_search_base_position(histogram, None, None)
229 | self.assertEqual(left, 1)
230 | self.assertEqual(right, 4)
231 |
232 | def test_line_search_base_position_should_find_peak_point_near_last_know_position(self):
233 | histogram = np.array([1, 4, 1, 2, 1, 3, 4, 3, 5, 3])
234 | left, right = LaneFinder._line_search_base_position(histogram, None, None)
235 | self.assertEqual(left, 1)
236 | self.assertEqual(right, 8)
237 | left, right = LaneFinder._line_search_base_position(
238 | histogram, last_know_leftx_base=4, last_know_rightx_base=6, peak_detect_offset=1)
239 | self.assertEqual(left, 5)
240 | self.assertEqual(right, 6)
241 | left, right = LaneFinder._line_search_base_position(
242 | histogram, last_know_leftx_base=4, last_know_rightx_base=9, peak_detect_offset=2)
243 | self.assertEqual(left, 6)
244 | self.assertEqual(right, 8)
245 |
246 |
--------------------------------------------------------------------------------
/test/test_features.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import cv2
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import glob
6 | import os
7 | import time
8 | from features import get_hog_features, extract_color_features, extract_hog_features, extract_features_all
9 | from search_windows import search_windows, slide_window, draw_boxes
10 | import matplotlib.image as mpimg
11 | from sklearn.preprocessing import StandardScaler
12 | from sklearn.model_selection import train_test_split
13 | from sklearn.svm import LinearSVC
14 |
15 |
16 | def bin_to_rgb(bin_image):
17 | return cv2.cvtColor(bin_image * 255, cv2.COLOR_GRAY2BGR)
18 |
19 |
20 | def compose_images(dst_image, src_image, split_rows, split_columns, which_section):
21 | assert 0 < which_section <= split_rows * split_columns
22 |
23 | if split_rows > split_columns:
24 | newH = int(dst_image.shape[0] / split_rows)
25 | dim = (int(dst_image.shape[1] * newH / dst_image.shape[0]), newH)
26 | else:
27 | newW = int(dst_image.shape[1] / split_columns)
28 | dim = (newW, int(dst_image.shape[0] * newW / dst_image.shape[1]))
29 |
30 | if len(src_image.shape) == 2:
31 | srcN = bin_to_rgb(src_image)
32 | else:
33 | srcN = np.copy(src_image)
34 |
35 | img = cv2.resize(srcN, dim, interpolation=cv2.INTER_AREA)
36 | nr = (which_section - 1) // split_columns
37 | nc = (which_section - 1) % split_columns
38 | dst_image[nr * img.shape[0]:(nr + 1) * img.shape[0], nc * img.shape[1]:(nc + 1) * img.shape[1]] = img
39 | return dst_image
40 |
41 |
42 | def plot_to_image(plt):
43 | plt.savefig('tmp_plt.png')
44 | img = cv2.imread('tmp_plt.png')
45 | # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
46 | return img
47 |
48 |
49 | def show_image(image, cmap=None):
50 | plt.figure()
51 | plt.imshow(image, cmap)
52 |
53 |
54 | def save_image(image, file_name):
55 | plt.figure()
56 | plt.imshow(image)
57 | plt.savefig(file_name)
58 |
59 |
60 | def save_image_gray(image, file_name):
61 | plt.figure()
62 | plt.imshow(image, cmap='gray')
63 | plt.savefig(file_name)
64 |
65 |
66 | def plot_two_image(image1, image2):
67 | f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
68 | f.tight_layout()
69 |
70 | ax1.imshow(image1[0])
71 | ax1.set_title(image1[1], fontsize=40)
72 |
73 | ax2.imshow(image2[0])
74 | ax2.set_title(image2[1], fontsize=40)
75 | plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
76 |
77 | return plt, ax1, ax2
78 |
79 |
80 | class FeaturesTest(unittest.TestCase):
81 |
82 | def test_hog_rect(self):
83 | gray_image = np.array([
84 | [255, 255, 255, 255, 255, 255],
85 | [255, 0, 0, 255, 255, 255],
86 | [255, 0, 0, 255, 255, 255],
87 | [255, 255, 255, 255, 255, 255]
88 | ])
89 | features, hog_image = get_hog_features(gray_image, orient=9,
90 | pix_per_cell=2, cell_per_block=2,
91 | vis=True, feature_vec=False)
92 | self.assertTupleEqual(features.shape, (1, 2, 2, 2, 9))
93 | plt.figure()
94 | plt.subplot(121)
95 | plt.imshow(gray_image, cmap='gray')
96 | plt.title('Example Car Image')
97 | plt.subplot(122)
98 | plt.imshow(hog_image, cmap='gray')
99 | plt.title('HOG Visualization')
100 | plt.show()
101 |
102 | def test_hog_with_car(self):
103 | image = mpimg.imread("../test_images/car1.png")
104 | gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
105 | features, hog_image = get_hog_features(gray_image, orient=9,
106 | pix_per_cell=8, cell_per_block=2,
107 | vis=True, feature_vec=False)
108 | fig = plt.figure()
109 | plt.subplot(121)
110 | plt.imshow(gray_image, cmap='gray')
111 | plt.title('Example Car Image')
112 | plt.subplot(122)
113 | plt.imshow(hog_image, cmap='gray')
114 | plt.title('HOG Visualization')
115 | plt.savefig("../output_images/car1_hog.png")
116 | self.assertTupleEqual(gray_image.shape, (64, 64))
117 | self.assertTupleEqual(features.shape, (7, 7, 2, 2, 9))
118 |
119 | def test_feature_normalize_should_have_similar_max_min_range(self):
120 | car_file_names = glob.glob('../data/vehicles/*/*.png')
121 | notcar_file_names = glob.glob('../data/non-vehicles/*/*.png')
122 |
123 | car_features = extract_color_features(car_file_names, cspace='RGB')
124 | notcar_features = extract_color_features(notcar_file_names, cspace='RGB')
125 |
126 | X = np.vstack((car_features, notcar_features)).astype(np.float64)
127 | # Fit a per-column scaler
128 |
129 | X_scaler = StandardScaler().fit(X)
130 | # Apply the scaler to X
131 |
132 | scaled_X = X_scaler.transform(X)
133 | car_ind = np.random.randint(0, len(car_file_names))
134 | # Plot an example of raw and scaled features
135 | fig = plt.figure(figsize=(12, 4))
136 | plt.subplot(131)
137 | plt.imshow(mpimg.imread(car_file_names[car_ind]))
138 | plt.title('Original Image')
139 | plt.subplot(132)
140 | plt.plot(X[car_ind])
141 | plt.title('Raw Features')
142 | plt.subplot(133)
143 | plt.plot(scaled_X[car_ind])
144 | plt.title('Normalized Features')
145 | fig.tight_layout()
146 | plt.savefig("../output_images/feature_normalize.png")
147 |
148 |
149 | def test_svc_predict_should_have_acturacy_91(self):
150 | car_file_names = glob.glob('../data/vehicles/*/*.png')
151 | notcar_file_names = glob.glob('../data/non-vehicles/*/*.png')
152 |
153 | spatial = 32
154 | histbin = 32
155 | car_features = extract_color_features(car_file_names, cspace='RGB', spatial_size=(spatial, spatial),
156 | hist_bins=histbin)
157 | notcar_features = extract_color_features(notcar_file_names, cspace='RGB', spatial_size=(spatial, spatial),
158 | hist_bins=histbin)
159 | X = np.vstack((car_features, notcar_features)).astype(np.float64)
160 | # Fit a per-column scaler
161 |
162 | X_scaler = StandardScaler().fit(X)
163 | # Apply the scaler to X
164 |
165 | scaled_X = X_scaler.transform(X)
166 |
167 | # Define the labels vector
168 | y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
169 |
170 |
171 | # Split up data into randomized training and test sets
172 | rand_state = np.random.randint(0, 100)
173 | X_train, X_test, y_train, y_test = train_test_split(
174 | scaled_X, y, test_size=0.2, random_state=rand_state)
175 |
176 | print('Using spatial binning of:', spatial,
177 | 'and', histbin, 'histogram bins')
178 | print('Feature vector length:', len(X_train[0]))
179 | # Use a linear SVC
180 | svc = LinearSVC()
181 | # Check the training time for the SVC
182 | t = time.time()
183 | svc.fit(X_train, y_train)
184 | t2 = time.time()
185 | print(round(t2 - t, 2), 'Seconds to train SVC...')
186 | # Check the score of the SVC
187 | print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
188 | # Check the prediction time for a single sample
189 | t = time.time()
190 | n_predict = 10
191 | print('My SVC predicts: ', svc.predict(X_test[0:n_predict]))
192 | print('For these', n_predict, 'labels: ', y_test[0:n_predict])
193 | t2 = time.time()
194 | print(round(t2 - t, 5), 'Seconds to predict', n_predict, 'labels with SVC')
195 |
196 | def test_hog_should_give_98_acturacy(self):
197 | car_file_names = glob.glob('../data/vehicles/*/*.png')
198 | notcar_file_names = glob.glob('../data/non-vehicles/*/*.png')
199 |
200 | colorspace = 'HSV' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
201 |
202 | orient = 9
203 | pix_per_cell = 8
204 | cell_per_block = 2
205 | hog_channel = "ALL" # Can be 0, 1, 2, or "ALL"
206 |
207 | t = time.time()
208 | car_features = extract_hog_features(car_file_names, cspace=colorspace, orient=orient,
209 | pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
210 | hog_channel=hog_channel)
211 | notcar_features = extract_hog_features(notcar_file_names, cspace=colorspace, orient=orient,
212 | pix_per_cell=pix_per_cell, cell_per_block=cell_per_block,
213 | hog_channel=hog_channel)
214 | t2 = time.time()
215 | print(round(t2 - t, 2), 'Seconds to extract HOG features...')
216 | # Create an array stack of feature vectors
217 | X = np.vstack((car_features, notcar_features)).astype(np.float64)
218 | # Fit a per-column scaler
219 | X_scaler = StandardScaler().fit(X)
220 | # Apply the scaler to X
221 | scaled_X = X_scaler.transform(X)
222 |
223 | # Define the labels vector
224 | y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
225 |
226 | # Split up data into randomized training and test sets
227 | rand_state = np.random.randint(0, 100)
228 | X_train, X_test, y_train, y_test = train_test_split(
229 | scaled_X, y, test_size=0.2, random_state=rand_state)
230 |
231 | print('Using:', orient, 'orientations', pix_per_cell,
232 | 'pixels per cell and', cell_per_block, 'cells per block')
233 | print('Feature vector length:', len(X_train[0]))
234 | # Use a linear SVC
235 | svc = LinearSVC()
236 | # Check the training time for the SVC
237 | t = time.time()
238 | svc.fit(X_train, y_train)
239 | t2 = time.time()
240 | print(round(t2 - t, 2), 'Seconds to train SVC...')
241 | # Check the score of the SVC
242 | print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
243 | # Check the prediction time for a single sample
244 | t = time.time()
245 | n_predict = 10
246 | print('My SVC predicts: ', svc.predict(X_test[0:n_predict]))
247 | print('For these', n_predict, 'labels: ', y_test[0:n_predict])
248 | t2 = time.time()
249 | print(round(t2 - t, 5), 'Seconds to predict', n_predict, 'labels with SVC')
250 |
251 |
252 | def test_search_features(self):
253 | ### TODO: Tweak these parameters and see how the results change.
254 | car_file_names = glob.glob('../data/vehicles/*/*.png')
255 | notcar_file_names = glob.glob('../data/non-vehicles/*/*.png')
256 |
257 | color_space = 'HSV' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
258 | orient = 9 # HOG orientations
259 | pix_per_cell = 8 # HOG pixels per cell
260 | cell_per_block = 2 # HOG cells per block
261 | hog_channel = "ALL" # Can be 0, 1, 2, or "ALL"
262 | spatial_size = (16, 16) # Spatial binning dimensions
263 | hist_bins = 16 # Number of histogram bins
264 | spatial_feat = True # Spatial features on or off
265 | hist_feat = True # Histogram features on or off
266 | hog_feat = True # HOG features on or off
267 | y_start_stop = [None, None] # Min and max in y to search in slide_window()
268 |
269 | car_features = extract_features_all(car_file_names, color_space=color_space,
270 | spatial_size=spatial_size, hist_bins=hist_bins,
271 | orient=orient, pix_per_cell=pix_per_cell,
272 | cell_per_block=cell_per_block,
273 | hog_channel=hog_channel, spatial_feat=spatial_feat,
274 | hist_feat=hist_feat, hog_feat=hog_feat)
275 | notcar_features = extract_features_all(notcar_file_names, color_space=color_space,
276 | spatial_size=spatial_size, hist_bins=hist_bins,
277 | orient=orient, pix_per_cell=pix_per_cell,
278 | cell_per_block=cell_per_block,
279 | hog_channel=hog_channel, spatial_feat=spatial_feat,
280 | hist_feat=hist_feat, hog_feat=hog_feat)
281 |
282 | X = np.vstack((car_features, notcar_features)).astype(np.float64)
283 | # Fit a per-column scaler
284 | X_scaler = StandardScaler().fit(X)
285 | # Apply the scaler to X
286 | scaled_X = X_scaler.transform(X)
287 |
288 | # Define the labels vector
289 | y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
290 |
291 | # Split up data into randomized training and test sets
292 | rand_state = np.random.randint(0, 100)
293 | X_train, X_test, y_train, y_test = train_test_split(
294 | scaled_X, y, test_size=0.2, random_state=rand_state)
295 |
296 | print('Using:', orient, 'orientations', pix_per_cell,
297 | 'pixels per cell and', cell_per_block, 'cells per block')
298 | print('Feature vector length:', len(X_train[0]))
299 | # Use a linear SVC
300 | svc = LinearSVC()
301 | # Check the training time for the SVC
302 | t = time.time()
303 | svc.fit(X_train, y_train)
304 | t2 = time.time()
305 | print(round(t2 - t, 2), 'Seconds to train SVC...')
306 | # Check the score of the SVC
307 | print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
308 | # Check the prediction time for a single sample
309 | t = time.time()
310 |
311 | image = mpimg.imread("../test_images/test1.jpg")
312 | draw_image = np.copy(image)
313 |
314 | # Uncomment the following line if you extracted training
315 | # data from .png images (scaled 0 to 1 by mpimg) and the
316 | # image you are searching is a .jpg (scaled 0 to 255)
317 | # image = image.astype(np.float32)/255
318 |
319 | windows = slide_window(image, x_start_stop=[None, None], y_start_stop=y_start_stop,
320 | xy_window=(350, 200), xy_overlap=(0.1, 0.1))
321 |
322 | hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space,
323 | spatial_size=spatial_size, hist_bins=hist_bins,
324 | orient=orient, pix_per_cell=pix_per_cell,
325 | cell_per_block=cell_per_block,
326 | hog_channel=hog_channel, spatial_feat=spatial_feat,
327 | hist_feat=hist_feat, hog_feat=hog_feat)
328 |
329 | window_img = draw_boxes(draw_image, hot_windows, color=(0, 0, 255), thick=2)
330 |
331 | plt.imshow(window_img)
332 | plt.savefig("../output_images/features_search/test1.png")
333 |
334 |
--------------------------------------------------------------------------------
/test/test_label_parser.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import cv2
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import glob
6 | import os
7 | import time
8 | from features import get_hog_features, extract_color_features, extract_hog_features, extract_features_all
9 | from search_windows import search_windows, slide_window, draw_boxes
10 | import matplotlib.image as mpimg
11 | from sklearn.preprocessing import StandardScaler
12 | from sklearn.model_selection import train_test_split
13 | from sklearn.svm import LinearSVC
14 |
15 |
16 | import pandas as pd
17 |
18 | dir_label = ['../data/object-dataset',
19 | 'object-detection-crowdai']
20 |
21 | class LabelParseTest(unittest.TestCase):
22 |
23 | def test_parse_label_csv_file(self):
24 | df_files2 = pd.read_csv(dir_label[0] + '/labels.csv', header=None, sep=' ')
25 |
26 | df_files2.columns = ['Frame', 'xmin', 'xmax', 'ymin', 'ymax', 'ind', 'Label', 'RM']
27 | df_vehicles2 = df_files2[(df_files2['Label'] == 'car') | (df_files2['Label'] == 'truck')].reset_index()
28 | df_vehicles2 = df_vehicles2.drop('index', 1)
29 | df_vehicles2 = df_vehicles2.drop('RM', 1)
30 | df_vehicles2 = df_vehicles2.drop('ind', 1)
31 |
32 | df_vehicles2['File_Path'] = dir_label[0] + '/' + df_vehicles2['Frame']
33 |
34 | print(df_vehicles2.head())
35 |
36 |
--------------------------------------------------------------------------------
/test/test_window.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import cv2
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | import glob
6 | import os
7 | import time
8 | from features import get_hog_features, extract_color_features, extract_hog_features, extract_features_all
9 | from search_windows import search_windows, slide_window, draw_boxes
10 | import matplotlib.image as mpimg
11 | from sklearn.preprocessing import StandardScaler
12 | from sklearn.model_selection import train_test_split
13 | from sklearn.svm import LinearSVC
14 |
15 |
16 | class SlideWindowSettings(object):
17 | def __init__(self, name, y_start_stop, size, rect_color, xy_overlap, thick):
18 | self.name = name
19 | self.y_start_stop = y_start_stop
20 | self.size = size
21 | self.rect_color = rect_color
22 | self.xy_overlap = xy_overlap
23 | self.thick = thick
24 |
25 |
26 | def create_slide_window_settings(image_height, disable_overlap):
27 | def overlap(real_overlap):
28 | if disable_overlap:
29 | return (1., 1.)
30 | else:
31 | return real_overlap
32 |
33 | half = image_height // 2
34 | return [
35 | SlideWindowSettings("far", y_start_stop=(half, half + 130), size=(60, 60), rect_color=(0, 0, 200),
36 | xy_overlap=overlap((0.5, 0.5)), thick=1),
37 | SlideWindowSettings("far", y_start_stop=(half, half + 180), size=(130, 80), rect_color=(0, 0, 200),
38 | xy_overlap=overlap((0.5, 0.5)), thick=1),
39 | SlideWindowSettings("mid", y_start_stop=(half, half + 180), size=(170, 100), rect_color=(0, 0, 200),
40 | xy_overlap=overlap((0.5, 0.5)), thick=1),
41 | SlideWindowSettings("mid left far away", y_start_stop=(half, half + 180), size=(170, 170), rect_color=(0, 0, 200),
42 | xy_overlap=overlap((0.5, 0.5)), thick=1),
43 | SlideWindowSettings("mid", y_start_stop=(half, half + 180), size=(290, 140), rect_color=(0, 0, 200),
44 | xy_overlap=overlap((0.5, 0.5)), thick=1),
45 | SlideWindowSettings("mid", y_start_stop=(half + 50, half + 300), size=(350, 200), rect_color=(0, 0, 200),
46 | xy_overlap=(0.6, 0.6), thick=3)
47 | ]
48 |
49 |
50 |
51 | class SearchWindowTest(unittest.TestCase):
52 |
53 | @staticmethod
54 | def apply_slide_window_to_file(file_name, slide_window_settings):
55 | image = mpimg.imread("../test_images/{}".format(file_name))
56 | image_height = image.shape[0]
57 | half = image_height // 2
58 |
59 | draw_image = np.copy(image)
60 | all_windows = []
61 | windows = slide_window(image, x_start_stop=[None, None], y_start_stop=slide_window_settings.y_start_stop,
62 | xy_window=slide_window_settings.size, xy_overlap=slide_window_settings.xy_overlap)
63 |
64 | draw_image = draw_boxes(draw_image, windows,
65 | color=slide_window_settings.rect_color, thick=slide_window_settings.thick)
66 | all_windows += windows
67 |
68 | plt.imshow(draw_image)
69 | plt.savefig("../output_images/slide_window/{}.png".format(file_name))
70 |
71 |
72 | def test_slide_window_close(self):
73 | SearchWindowTest.apply_slide_window_to_file(
74 | "746.jpg",
75 | create_slide_window_settings(720, False)[5]
76 | )
77 |
78 | def test_side_window_mid(self):
79 | SearchWindowTest.apply_slide_window_to_file(
80 | "test1.jpg",
81 | create_slide_window_settings(720, False)[4]
82 | )
83 |
84 | def test_search_window(self):
85 | image = mpimg.imread("../test_images/test1.jpg")
86 | height = image.shape[0]
87 | y_start_stop = (height // 2, height - 30)
88 |
89 | window_settings = [
90 | {"y_start_stop": (height // 2, height // 2 + 120), "size": (60, 60), "color": (0, 0, 200)},
91 | {"y_start_stop": (height // 2, height // 2 + 180), "size": (90, 90), "color": (0, 200, 200)},
92 | {"y_start_stop": (height // 2, height - 30), "size": (300, 160), "color": (0, 200, 0)},
93 | {"y_start_stop": (height // 2, height - 10), "size": (400, 180), "color": (200, 0, 0)},
94 | ]
95 |
96 | draw_image = np.copy(image)
97 | all_windows = []
98 | for settings in window_settings:
99 | windows = slide_window(image, x_start_stop=[None, None], y_start_stop=settings.get("y_start_stop"),
100 | xy_window=settings.get("size"), xy_overlap=(0.5, 0.5))
101 |
102 | draw_image = draw_boxes(draw_image, windows, color=settings.get("color"), thick=3)
103 | all_windows += windows
104 |
105 | plt.imshow(draw_image)
106 | plt.savefig("../output_images/slide_window.png")
107 |
--------------------------------------------------------------------------------
/test_images/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/.DS_Store
--------------------------------------------------------------------------------
/test_images/1047.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/1047.jpg
--------------------------------------------------------------------------------
/test_images/602.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/602.jpg
--------------------------------------------------------------------------------
/test_images/714.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/714.jpg
--------------------------------------------------------------------------------
/test_images/746.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/746.jpg
--------------------------------------------------------------------------------
/test_images/993.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/993.jpg
--------------------------------------------------------------------------------
/test_images/car1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/car1.png
--------------------------------------------------------------------------------
/test_images/straight_lines1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/straight_lines1.jpg
--------------------------------------------------------------------------------
/test_images/straight_lines2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/straight_lines2.jpg
--------------------------------------------------------------------------------
/test_images/test1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/test1.jpg
--------------------------------------------------------------------------------
/test_images/test2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/test2.jpg
--------------------------------------------------------------------------------
/test_images/test3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/test3.jpg
--------------------------------------------------------------------------------
/test_images/test4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/test4.jpg
--------------------------------------------------------------------------------
/test_images/test5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/test5.jpg
--------------------------------------------------------------------------------
/test_images/test6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_images/test6.jpg
--------------------------------------------------------------------------------
/test_video.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/test_video.mp4
--------------------------------------------------------------------------------
/thresholding.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 |
5 | # Define a function that takes an image, gradient orientation,
6 | # and threshold min / max values.
7 | def abs_sobel_thresh(gray, orient='x', sobel_kernel=3, thresh=(0, 255)):
8 | # Apply x or y gradient with the OpenCV Sobel() function
9 | # and take the absolute value
10 | if orient == 'x':
11 | abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
12 | if orient == 'y':
13 | abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
14 | # Rescale back to 8 bit integer
15 | scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))
16 | # Create a copy and apply the threshold
17 | binary_output = np.zeros_like(scaled_sobel)
18 | # Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
19 | binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
20 |
21 | # Return the result
22 | return binary_output
23 |
24 |
25 | # Define a function to return the magnitude of the gradient
26 | # for a given sobel kernel size and threshold values
27 | def mag_thresh(gray, sobel_kernel=3, mag_thresh=(0, 255)):
28 | # Take both Sobel x and y gradients
29 | sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
30 | sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
31 | # Calculate the gradient magnitude
32 | gradmag = np.sqrt(sobelx ** 2 + sobely ** 2)
33 | # Rescale to 8 bit
34 | scale_factor = np.max(gradmag) / 255
35 | gradmag = (gradmag / scale_factor).astype(np.uint8)
36 | # Create a binary image of ones where threshold is met, zeros otherwise
37 | binary_output = np.zeros_like(gradmag)
38 | binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
39 |
40 | # Return the binary image
41 | return binary_output
42 |
43 |
44 | # Define a function to threshold an image for a given range and Sobel kernel
45 | def dir_threshold(gray, sobel_kernel=3, thresh=(0, np.pi / 2)):
46 | # Calculate the x and y gradients
47 | sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
48 | sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
49 | # Take the absolute value of the gradient direction,
50 | # apply a threshold, and create a binary image result
51 | absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
52 | binary_output = np.zeros_like(absgraddir)
53 | binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
54 |
55 | # Return the binary image
56 | return binary_output
57 |
58 |
59 | def combine_threshold(gray_image):
60 | # Choose a Sobel kernel size
61 | ksize = 5 # Choose a larger odd number to smooth gradient measurements
62 |
63 | # Apply each of the thresholding functions
64 | gradx = abs_sobel_thresh(gray_image, orient='x', sobel_kernel=ksize, thresh=(10, 100))
65 | grady = abs_sobel_thresh(gray_image, orient='y', sobel_kernel=ksize, thresh=(20, 100))
66 | mag_binary = mag_thresh(gray_image, sobel_kernel=ksize, mag_thresh=(30, 100))
67 | dir_binary = dir_threshold(gray_image, sobel_kernel=ksize, thresh=(0.7, 1.3))
68 |
69 | combined = np.zeros_like(dir_binary)
70 |
71 | combined[((gradx == 1) & (grady == 1))
72 | | ((gradx == 1) & (dir_binary == 1))
73 | | ((mag_binary == 1) & (dir_binary == 1))] = 1
74 |
75 | return combined
76 |
77 |
78 | def hls_channel_threshold(hls_img, h_thresh=(170, 255), l_thresh=(170, 255), s_thresh=(170, 255)):
79 | hls_img = np.copy(hls_img)
80 | h_channel = hls_img[:, :, 0]
81 | l_channel = hls_img[:, :, 1]
82 | s_channel = hls_img[:, :, 2]
83 |
84 | h_binary = np.zeros_like(h_channel)
85 | h_binary[(h_channel >= h_thresh[0]) & (h_channel <= h_thresh[1])] = 1
86 |
87 | l_binary = np.zeros_like(l_channel)
88 | l_binary[(l_channel >= l_thresh[0]) & (l_channel <= l_thresh[1])] = 1
89 |
90 | s_binary = np.zeros_like(s_channel)
91 | s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
92 | return h_binary, l_binary, s_binary
93 |
94 |
95 | def bgr_channel_threshold(bgr_img, b_thresh=(250, 255), g_thresh=(250, 255), r_thresh=(250, 255)):
96 | bgr_img = np.copy(bgr_img)
97 | b_channel = bgr_img[:, :, 0]
98 | g_channel = bgr_img[:, :, 1]
99 | r_channel = bgr_img[:, :, 2]
100 |
101 | b_binary = np.zeros_like(b_channel)
102 | b_binary[(b_channel >= b_thresh[0]) & (b_channel <= b_thresh[1])] = 1
103 |
104 | g_binary = np.zeros_like(g_channel)
105 | g_binary[(g_channel >= g_thresh[0]) & (g_channel <= g_thresh[1])] = 1
106 |
107 | r_binary = np.zeros_like(r_channel)
108 | r_binary[(r_channel >= r_thresh[0]) & (r_channel <= r_thresh[1])] = 1
109 | return b_binary, g_binary, r_binary
110 |
111 |
112 | def combine_with_or(*channels):
113 | combined = None
114 | for channel in channels:
115 | if combined is None:
116 | combined = np.copy(channel)
117 | else:
118 | combined[(combined == 1) | (channel == 1)] = 1
119 |
120 | return combined
121 |
122 |
123 | def combine_with_and(*channels):
124 | combined = None
125 | for channel in channels:
126 | if combined is None:
127 | combined = np.copy(channel)
128 | else:
129 | new = np.zeros_like(combined)
130 | new[(combined == 1) & (channel == 1)] = 1
131 | combined = new
132 |
133 | return combined
134 |
135 |
136 | def pipeline(img):
137 | img = np.copy(img)
138 | gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
139 | hls_image = cv2.cvtColor(img, cv2.COLOR_BGR2HLS).astype(np.float)
140 |
141 | combined = combine_with_or(
142 | abs_sobel_thresh(gray_image, orient='x', sobel_kernel=25, thresh=(50, 150)),
143 | combine_with_or(
144 | *bgr_channel_threshold(img, b_thresh=(220, 255), g_thresh=(220, 255), r_thresh=(220, 255))
145 | ),
146 | combine_with_and(
147 | hls_channel_threshold(hls_image, s_thresh=(170, 255))[2],
148 | abs_sobel_thresh(gray_image, orient='x', sobel_kernel=5, thresh=(10, 100))
149 | )
150 | )
151 | return combined
152 |
--------------------------------------------------------------------------------
/vehicle_detect_nn.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | tf.python.control_flow_ops = tf
3 | import numpy as np
4 | import cv2
5 |
6 | from keras.models import Model
7 | from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, Lambda
8 | from keras.optimizers import Adam
9 | from keras import backend as K
10 | from scipy.ndimage.measurements import label
11 |
12 |
13 | class VehicleDetector(object):
14 | def __init__(self, img_rows, img_cols, weights_file="model_segn_small_0p72.h5"):
15 | self.smooth = 1.0
16 | self.img_rows = img_rows
17 | self.img_cols = img_cols
18 | self.model = self.get_small_unet(img_rows, img_cols)
19 |
20 | self.model.compile(optimizer=Adam(lr=1e-4),
21 | loss=self.IOU_calc_loss, metrics=[self.IOU_calc])
22 | self.model.load_weights(weights_file)
23 |
24 | self.heatmap_prev = np.zeros((640, 960))
25 | self.heatmap_10 = [np.zeros((640, 960))] * 10
26 |
27 | def smooth_heatmap(self, heatmap):
28 | heatmap_10_1 = self.heatmap_10[1:]
29 | heatmap_10_1.append(heatmap)
30 |
31 | self.heatmap_10 = heatmap_10_1
32 |
33 | heatmap = np.mean(self.heatmap_10, axis=0)
34 |
35 | # heatmap = heatmap_prev*.2 + heatmap*.8
36 | # heatmap[heatmap>240] = 255
37 | # heatmap[heatmap<240] = 0
38 |
39 | return heatmap
40 |
41 | @staticmethod
42 | def get_small_unet(img_rows, img_cols):
43 | ## Redefining small U-net
44 | inputs = Input((img_rows, img_cols, 3))
45 | inputs_norm = Lambda(lambda x: x / 127.5 - 1.)
46 | conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(inputs)
47 | conv1 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(conv1)
48 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
49 |
50 | conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(pool1)
51 | conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv2)
52 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
53 |
54 | conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool2)
55 | conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv3)
56 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
57 |
58 | conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool3)
59 | conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
60 | pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
61 |
62 | conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool4)
63 | conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv5)
64 |
65 | up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=3)
66 | conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up6)
67 | conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv6)
68 |
69 | up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=3)
70 | conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up7)
71 | conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv7)
72 |
73 | up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=3)
74 | conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up8)
75 | conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv8)
76 |
77 | up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=3)
78 | conv9 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(up9)
79 | conv9 = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(conv9)
80 |
81 | conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
82 |
83 | model = Model(input=inputs, output=conv10)
84 |
85 | return model
86 |
87 | def IOU_calc(self, y_true, y_pred):
88 | # defining cost
89 | y_true_f = K.flatten(y_true)
90 | y_pred_f = K.flatten(y_pred)
91 | intersection = K.sum(y_true_f * y_pred_f)
92 |
93 | return 2 * (intersection + self.smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + self.smooth)
94 |
95 | def IOU_calc_loss(self, y_true, y_pred):
96 | # defining cost
97 | return -self.IOU_calc(y_true, y_pred)
98 |
99 | @staticmethod
100 | def _draw_labeled_bboxes(img, labels):
101 | # Iterate through all detected cars
102 | for car_number in range(1, labels[1] + 1):
103 | # Find pixels with each car_number label value
104 | nonzero = (labels[0] == car_number).nonzero()
105 | # Identify x and y values of those pixels
106 | nonzeroy = np.array(nonzero[0])
107 | nonzerox = np.array(nonzero[1])
108 | # Define a bounding box based on min/max x and y
109 | if ((np.max(nonzeroy) - np.min(nonzeroy) > 40) & (np.max(nonzerox) - np.min(nonzerox) > 40)):
110 | bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
111 | # Draw the box on the image
112 | print(bbox)
113 | cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)
114 | # Return the image
115 | return img
116 |
117 | def _test_new_img(self, img):
118 | # Test Unet on new image
119 | img = cv2.resize(img, (self.img_cols, self.img_rows))
120 | img = np.reshape(img, (1, self.img_rows, self.img_cols, 3))
121 | pred = self.model.predict(img)
122 | return pred, img[0]
123 |
124 | def get_BB_new_img(self, img):
125 | # Get bounding boxes
126 | pred, img = self._test_new_img(img)
127 | img = np.array(img, dtype=np.uint8)
128 | img_pred = np.array(255 * pred[0], dtype=np.uint8)
129 | heatmap = img_pred[:, :, 0]
130 | heatmap = self.smooth_heatmap(heatmap)
131 | labels = label(heatmap)
132 | draw_img = self._draw_labeled_bboxes(np.copy(img), labels)
133 | return draw_img
134 |
135 | @staticmethod
136 | def get_labeled_bboxes(img, labels):
137 | # Get labeled boxex
138 | bbox_all = []
139 | for car_number in range(1, labels[1] + 1):
140 | # Find pixels with each car_number label value
141 | nonzero = (labels[0] == car_number).nonzero()
142 | # Identify x and y values of those pixels
143 | nonzeroy = np.array(nonzero[0])
144 | nonzerox = np.array(nonzero[1])
145 |
146 | # Define a bounding box based on min/max x and y
147 | if ((np.max(nonzeroy) - np.min(nonzeroy) > 40) & (np.max(nonzerox) - np.min(nonzerox) > 40)):
148 | bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
149 | # Draw the box on the image
150 | # cv2.rectangle(img, bbox[0], bbox[1], (0,0,255),6)
151 | bbox_all.append(bbox)
152 | # Return the image
153 | return bbox_all
154 |
155 | def get_BB_new(self, img):
156 | # Take in RGB image
157 | pred, img = self._test_new_img(img)
158 | img = np.array(img, dtype=np.uint8)
159 | img_pred = np.array(255 * pred[0], dtype=np.uint8)
160 | heatmap = img_pred[:, :, 0]
161 | heatmap = self.smooth_heatmap(heatmap)
162 | # print(np.max(heatmap))
163 | heatmap[heatmap > 240] = 255
164 | heatmap[heatmap <= 240] = 0
165 | labels = label(heatmap)
166 |
167 | bbox_all = self.get_labeled_bboxes(np.copy(img), labels)
168 | return bbox_all
169 |
170 | def get_Unet_mask(self, img):
171 | # Take in RGB image
172 | pred, img = self._test_new_img(img)
173 | img = np.array(img, dtype=np.uint8)
174 | img_pred = np.array(255 * pred[0], dtype=np.uint8)
175 | heatmap = img_pred[:, :, 0]
176 | heatmap = self.smooth_heatmap(heatmap)
177 | # labels = label(heatmap)
178 | return self.stack_arr(heatmap)
179 |
180 | @staticmethod
181 | def stack_arr(arr):
182 | return np.stack((arr, arr, arr), axis=2)
183 |
184 |
--------------------------------------------------------------------------------
/writeup_template.md:
--------------------------------------------------------------------------------
1 | ##Writeup Template
2 | ###You can use this file as a template for your writeup if you want to submit it as a markdown file, but feel free to use some other method and submit a pdf if you prefer.
3 |
4 | ---
5 |
6 | **Vehicle Detection Project**
7 |
8 | The goals / steps of this project are the following:
9 |
10 | * Perform a Histogram of Oriented Gradients (HOG) feature extraction on a labeled training set of images and train a classifier Linear SVM classifier
11 | * Optionally, you can also apply a color transform and append binned color features, as well as histograms of color, to your HOG feature vector.
12 | * Note: for those first two steps don't forget to normalize your features and randomize a selection for training and testing.
13 | * Implement a sliding-window technique and use your trained classifier to search for vehicles in images.
14 | * Run your pipeline on a video stream (start with the test_video.mp4 and later implement on full project_video.mp4) and create a heat map of recurring detections frame by frame to reject outliers and follow detected vehicles.
15 | * Estimate a bounding box for vehicles detected.
16 |
17 | [//]: # (Image References)
18 | [image1]: ./examples/car_not_car.png
19 | [image2]: ./examples/HOG_example.jpg
20 | [image3]: ./examples/sliding_windows.jpg
21 | [image4]: ./examples/sliding_window.jpg
22 | [image5]: ./examples/bboxes_and_heat.png
23 | [image6]: ./examples/labels_map.png
24 | [image7]: ./examples/output_bboxes.png
25 | [video1]: ./project_video.mp4
26 |
27 | ## [Rubric](https://review.udacity.com/#!/rubrics/513/view) Points
28 | ###Here I will consider the rubric points individually and describe how I addressed each point in my implementation.
29 |
30 | ---
31 | ###Writeup / README
32 |
33 | ####1. Provide a Writeup / README that includes all the rubric points and how you addressed each one. You can submit your writeup as markdown or pdf. [Here](https://github.com/udacity/CarND-Vehicle-Detection/blob/master/writeup_template.md) is a template writeup for this project you can use as a guide and a starting point.
34 |
35 | You're reading it!
36 |
37 | ###Histogram of Oriented Gradients (HOG)
38 |
39 | ####1. Explain how (and identify where in your code) you extracted HOG features from the training images.
40 |
41 | The code for this step is contained in the first code cell of the IPython notebook (or in lines # through # of the file called `some_file.py`).
42 |
43 | I started by reading in all the `vehicle` and `non-vehicle` images. Here is an example of one of each of the `vehicle` and `non-vehicle` classes:
44 |
45 | ![alt text][image1]
46 |
47 | I then explored different color spaces and different `skimage.hog()` parameters (`orientations`, `pixels_per_cell`, and `cells_per_block`). I grabbed random images from each of the two classes and displayed them to get a feel for what the `skimage.hog()` output looks like.
48 |
49 | Here is an example using the `YCrCb` color space and HOG parameters of `orientations=8`, `pixels_per_cell=(8, 8)` and `cells_per_block=(2, 2)`:
50 |
51 |
52 | ![alt text][image2]
53 |
54 | ####2. Explain how you settled on your final choice of HOG parameters.
55 |
56 | I tried various combinations of parameters and...
57 |
58 | ####3. Describe how (and identify where in your code) you trained a classifier using your selected HOG features (and color features if you used them).
59 |
60 | I trained a linear SVM using...
61 |
62 | ###Sliding Window Search
63 |
64 | ####1. Describe how (and identify where in your code) you implemented a sliding window search. How did you decide what scales to search and how much to overlap windows?
65 |
66 | I decided to search random window positions at random scales all over the image and came up with this (ok just kidding I didn't actually ;):
67 |
68 | ![alt text][image3]
69 |
70 | ####2. Show some examples of test images to demonstrate how your pipeline is working. What did you do to optimize the performance of your classifier?
71 |
72 | Ultimately I searched on two scales using YCrCb 3-channel HOG features plus spatially binned color and histograms of color in the feature vector, which provided a nice result. Here are some example images:
73 |
74 | ![alt text][image4]
75 | ---
76 |
77 | ### Video Implementation
78 |
79 | ####1. Provide a link to your final video output. Your pipeline should perform reasonably well on the entire project video (somewhat wobbly or unstable bounding boxes are ok as long as you are identifying the vehicles most of the time with minimal false positives.)
80 | Here's a [link to my video result](./project_video.mp4)
81 |
82 |
83 | ####2. Describe how (and identify where in your code) you implemented some kind of filter for false positives and some method for combining overlapping bounding boxes.
84 |
85 | I recorded the positions of positive detections in each frame of the video. From the positive detections I created a heatmap and then thresholded that map to identify vehicle positions. I then used `scipy.ndimage.measurements.label()` to identify individual blobs in the heatmap. I then assumed each blob corresponded to a vehicle. I constructed bounding boxes to cover the area of each blob detected.
86 |
87 | Here's an example result showing the heatmap from a series of frames of video, the result of `scipy.ndimage.measurements.label()` and the bounding boxes then overlaid on the last frame of video:
88 |
89 | ### Here are six frames and their corresponding heatmaps:
90 |
91 | ![alt text][image5]
92 |
93 | ### Here is the output of `scipy.ndimage.measurements.label()` on the integrated heatmap from all six frames:
94 | ![alt text][image6]
95 |
96 | ### Here the resulting bounding boxes are drawn onto the last frame in the series:
97 | ![alt text][image7]
98 |
99 |
100 |
101 | ---
102 |
103 | ###Discussion
104 |
105 | ####1. Briefly discuss any problems / issues you faced in your implementation of this project. Where will your pipeline likely fail? What could you do to make it more robust?
106 |
107 | Here I'll talk about the approach I took, what techniques I used, what worked and why, where the pipeline might fail and how I might improve it if I were going to pursue this project further.
108 |
109 |
--------------------------------------------------------------------------------
/yolo/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/yolo/__init__.py
--------------------------------------------------------------------------------
/yolo/cfg/darknet19_448.cfg:
--------------------------------------------------------------------------------
1 | [net]
2 | batch=128
3 | subdivisions=4
4 | height=448
5 | width=448
6 | max_crop=512
7 | channels=3
8 | momentum=0.9
9 | decay=0.0005
10 |
11 | learning_rate=0.001
12 | policy=poly
13 | power=4
14 | max_batches=100000
15 |
16 | angle=7
17 | hue = .1
18 | saturation=.75
19 | exposure=.75
20 | aspect=.75
21 |
22 | [convolutional]
23 | batch_normalize=1
24 | filters=32
25 | size=3
26 | stride=1
27 | pad=1
28 | activation=leaky
29 |
30 | [maxpool]
31 | size=2
32 | stride=2
33 |
34 | [convolutional]
35 | batch_normalize=1
36 | filters=64
37 | size=3
38 | stride=1
39 | pad=1
40 | activation=leaky
41 |
42 | [maxpool]
43 | size=2
44 | stride=2
45 |
46 | [convolutional]
47 | batch_normalize=1
48 | filters=128
49 | size=3
50 | stride=1
51 | pad=1
52 | activation=leaky
53 |
54 | [convolutional]
55 | batch_normalize=1
56 | filters=64
57 | size=1
58 | stride=1
59 | pad=1
60 | activation=leaky
61 |
62 | [convolutional]
63 | batch_normalize=1
64 | filters=128
65 | size=3
66 | stride=1
67 | pad=1
68 | activation=leaky
69 |
70 | [maxpool]
71 | size=2
72 | stride=2
73 |
74 | [convolutional]
75 | batch_normalize=1
76 | filters=256
77 | size=3
78 | stride=1
79 | pad=1
80 | activation=leaky
81 |
82 | [convolutional]
83 | batch_normalize=1
84 | filters=128
85 | size=1
86 | stride=1
87 | pad=1
88 | activation=leaky
89 |
90 | [convolutional]
91 | batch_normalize=1
92 | filters=256
93 | size=3
94 | stride=1
95 | pad=1
96 | activation=leaky
97 |
98 | [maxpool]
99 | size=2
100 | stride=2
101 |
102 | [convolutional]
103 | batch_normalize=1
104 | filters=512
105 | size=3
106 | stride=1
107 | pad=1
108 | activation=leaky
109 |
110 | [convolutional]
111 | batch_normalize=1
112 | filters=256
113 | size=1
114 | stride=1
115 | pad=1
116 | activation=leaky
117 |
118 | [convolutional]
119 | batch_normalize=1
120 | filters=512
121 | size=3
122 | stride=1
123 | pad=1
124 | activation=leaky
125 |
126 | [convolutional]
127 | batch_normalize=1
128 | filters=256
129 | size=1
130 | stride=1
131 | pad=1
132 | activation=leaky
133 |
134 | [convolutional]
135 | batch_normalize=1
136 | filters=512
137 | size=3
138 | stride=1
139 | pad=1
140 | activation=leaky
141 |
142 | [maxpool]
143 | size=2
144 | stride=2
145 |
146 | [convolutional]
147 | batch_normalize=1
148 | filters=1024
149 | size=3
150 | stride=1
151 | pad=1
152 | activation=leaky
153 |
154 | [convolutional]
155 | batch_normalize=1
156 | filters=512
157 | size=1
158 | stride=1
159 | pad=1
160 | activation=leaky
161 |
162 | [convolutional]
163 | batch_normalize=1
164 | filters=1024
165 | size=3
166 | stride=1
167 | pad=1
168 | activation=leaky
169 |
170 | [convolutional]
171 | batch_normalize=1
172 | filters=512
173 | size=1
174 | stride=1
175 | pad=1
176 | activation=leaky
177 |
178 | [convolutional]
179 | batch_normalize=1
180 | filters=1024
181 | size=3
182 | stride=1
183 | pad=1
184 | activation=leaky
185 |
186 | [convolutional]
187 | filters=1000
188 | size=1
189 | stride=1
190 | pad=1
191 | activation=linear
192 |
193 | [avgpool]
194 |
195 | [softmax]
196 | groups=1
197 |
198 | [cost]
199 | type=sse
200 |
201 |
--------------------------------------------------------------------------------
/yolo/cfg/tiny-yolo-voc.cfg:
--------------------------------------------------------------------------------
1 | [net]
2 | batch=64
3 | subdivisions=8
4 | width=416
5 | height=416
6 | channels=3
7 | momentum=0.9
8 | decay=0.0005
9 | angle=0
10 | saturation = 1.5
11 | exposure = 1.5
12 | hue=.1
13 |
14 | learning_rate=0.001
15 | max_batches = 40100
16 | policy=steps
17 | steps=-1,100,20000,30000
18 | scales=.1,10,.1,.1
19 |
20 | [convolutional]
21 | batch_normalize=1
22 | filters=16
23 | size=3
24 | stride=1
25 | pad=1
26 | activation=leaky
27 |
28 | [maxpool]
29 | size=2
30 | stride=2
31 |
32 | [convolutional]
33 | batch_normalize=1
34 | filters=32
35 | size=3
36 | stride=1
37 | pad=1
38 | activation=leaky
39 |
40 | [maxpool]
41 | size=2
42 | stride=2
43 |
44 | [convolutional]
45 | batch_normalize=1
46 | filters=64
47 | size=3
48 | stride=1
49 | pad=1
50 | activation=leaky
51 |
52 | [maxpool]
53 | size=2
54 | stride=2
55 |
56 | [convolutional]
57 | batch_normalize=1
58 | filters=128
59 | size=3
60 | stride=1
61 | pad=1
62 | activation=leaky
63 |
64 | [maxpool]
65 | size=2
66 | stride=2
67 |
68 | [convolutional]
69 | batch_normalize=1
70 | filters=256
71 | size=3
72 | stride=1
73 | pad=1
74 | activation=leaky
75 |
76 | [maxpool]
77 | size=2
78 | stride=2
79 |
80 | [convolutional]
81 | batch_normalize=1
82 | filters=512
83 | size=3
84 | stride=1
85 | pad=1
86 | activation=leaky
87 |
88 | [maxpool]
89 | size=2
90 | stride=1
91 |
92 | [convolutional]
93 | batch_normalize=1
94 | filters=1024
95 | size=3
96 | stride=1
97 | pad=1
98 | activation=leaky
99 |
100 | ###########
101 |
102 | [convolutional]
103 | batch_normalize=1
104 | size=3
105 | stride=1
106 | pad=1
107 | filters=1024
108 | activation=leaky
109 |
110 | [convolutional]
111 | size=1
112 | stride=1
113 | pad=1
114 | filters=125
115 | activation=linear
116 |
117 | [region]
118 | anchors = 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52
119 | bias_match=1
120 | classes=20
121 | coords=4
122 | num=5
123 | softmax=1
124 | jitter=.2
125 | rescore=1
126 |
127 | object_scale=5
128 | noobject_scale=1
129 | class_scale=1
130 | coord_scale=1
131 |
132 | absolute=1
133 | thresh = .6
134 | random=1
135 |
--------------------------------------------------------------------------------
/yolo/cfg/yolo-voc.cfg:
--------------------------------------------------------------------------------
1 | [net]
2 | batch=64
3 | subdivisions=8
4 | height=416
5 | width=416
6 | channels=3
7 | momentum=0.9
8 | decay=0.0005
9 | angle=0
10 | saturation = 1.5
11 | exposure = 1.5
12 | hue=.1
13 |
14 | learning_rate=0.0001
15 | max_batches = 45000
16 | policy=steps
17 | steps=100,25000,35000
18 | scales=10,.1,.1
19 |
20 | [convolutional]
21 | batch_normalize=1
22 | filters=32
23 | size=3
24 | stride=1
25 | pad=1
26 | activation=leaky
27 |
28 | [maxpool]
29 | size=2
30 | stride=2
31 |
32 | [convolutional]
33 | batch_normalize=1
34 | filters=64
35 | size=3
36 | stride=1
37 | pad=1
38 | activation=leaky
39 |
40 | [maxpool]
41 | size=2
42 | stride=2
43 |
44 | [convolutional]
45 | batch_normalize=1
46 | filters=128
47 | size=3
48 | stride=1
49 | pad=1
50 | activation=leaky
51 |
52 | [convolutional]
53 | batch_normalize=1
54 | filters=64
55 | size=1
56 | stride=1
57 | pad=1
58 | activation=leaky
59 |
60 | [convolutional]
61 | batch_normalize=1
62 | filters=128
63 | size=3
64 | stride=1
65 | pad=1
66 | activation=leaky
67 |
68 | [maxpool]
69 | size=2
70 | stride=2
71 |
72 | [convolutional]
73 | batch_normalize=1
74 | filters=256
75 | size=3
76 | stride=1
77 | pad=1
78 | activation=leaky
79 |
80 | [convolutional]
81 | batch_normalize=1
82 | filters=128
83 | size=1
84 | stride=1
85 | pad=1
86 | activation=leaky
87 |
88 | [convolutional]
89 | batch_normalize=1
90 | filters=256
91 | size=3
92 | stride=1
93 | pad=1
94 | activation=leaky
95 |
96 | [maxpool]
97 | size=2
98 | stride=2
99 |
100 | [convolutional]
101 | batch_normalize=1
102 | filters=512
103 | size=3
104 | stride=1
105 | pad=1
106 | activation=leaky
107 |
108 | [convolutional]
109 | batch_normalize=1
110 | filters=256
111 | size=1
112 | stride=1
113 | pad=1
114 | activation=leaky
115 |
116 | [convolutional]
117 | batch_normalize=1
118 | filters=512
119 | size=3
120 | stride=1
121 | pad=1
122 | activation=leaky
123 |
124 | [convolutional]
125 | batch_normalize=1
126 | filters=256
127 | size=1
128 | stride=1
129 | pad=1
130 | activation=leaky
131 |
132 | [convolutional]
133 | batch_normalize=1
134 | filters=512
135 | size=3
136 | stride=1
137 | pad=1
138 | activation=leaky
139 |
140 | [maxpool]
141 | size=2
142 | stride=2
143 |
144 | [convolutional]
145 | batch_normalize=1
146 | filters=1024
147 | size=3
148 | stride=1
149 | pad=1
150 | activation=leaky
151 |
152 | [convolutional]
153 | batch_normalize=1
154 | filters=512
155 | size=1
156 | stride=1
157 | pad=1
158 | activation=leaky
159 |
160 | [convolutional]
161 | batch_normalize=1
162 | filters=1024
163 | size=3
164 | stride=1
165 | pad=1
166 | activation=leaky
167 |
168 | [convolutional]
169 | batch_normalize=1
170 | filters=512
171 | size=1
172 | stride=1
173 | pad=1
174 | activation=leaky
175 |
176 | [convolutional]
177 | batch_normalize=1
178 | filters=1024
179 | size=3
180 | stride=1
181 | pad=1
182 | activation=leaky
183 |
184 |
185 | #######
186 |
187 | [convolutional]
188 | batch_normalize=1
189 | size=3
190 | stride=1
191 | pad=1
192 | filters=1024
193 | activation=leaky
194 |
195 | [convolutional]
196 | batch_normalize=1
197 | size=3
198 | stride=1
199 | pad=1
200 | filters=1024
201 | activation=leaky
202 |
203 | [route]
204 | layers=-9
205 |
206 | [reorg]
207 | stride=2
208 |
209 | [route]
210 | layers=-1,-3
211 |
212 | [convolutional]
213 | batch_normalize=1
214 | size=3
215 | stride=1
216 | pad=1
217 | filters=1024
218 | activation=leaky
219 |
220 | [convolutional]
221 | size=1
222 | stride=1
223 | pad=1
224 | filters=125
225 | activation=linear
226 |
227 | [region]
228 | anchors = 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52
229 | bias_match=1
230 | classes=20
231 | coords=4
232 | num=5
233 | softmax=1
234 | jitter=.2
235 | rescore=1
236 |
237 | object_scale=5
238 | noobject_scale=1
239 | class_scale=1
240 | coord_scale=1
241 |
242 | absolute=1
243 | thresh = .6
244 | random=0
245 |
--------------------------------------------------------------------------------
/yolo/cfg/yolo.cfg:
--------------------------------------------------------------------------------
1 | [net]
2 | batch=1
3 | subdivisions=1
4 | width=416
5 | height=416
6 | channels=3
7 | momentum=0.9
8 | decay=0.0005
9 | angle=0
10 | saturation = 1.5
11 | exposure = 1.5
12 | hue=.1
13 |
14 | learning_rate=0.001
15 | max_batches = 120000
16 | policy=steps
17 | steps=-1,100,80000,100000
18 | scales=.1,10,.1,.1
19 |
20 | [convolutional]
21 | batch_normalize=1
22 | filters=32
23 | size=3
24 | stride=1
25 | pad=1
26 | activation=leaky
27 |
28 | [maxpool]
29 | size=2
30 | stride=2
31 |
32 | [convolutional]
33 | batch_normalize=1
34 | filters=64
35 | size=3
36 | stride=1
37 | pad=1
38 | activation=leaky
39 |
40 | [maxpool]
41 | size=2
42 | stride=2
43 |
44 | [convolutional]
45 | batch_normalize=1
46 | filters=128
47 | size=3
48 | stride=1
49 | pad=1
50 | activation=leaky
51 |
52 | [convolutional]
53 | batch_normalize=1
54 | filters=64
55 | size=1
56 | stride=1
57 | pad=1
58 | activation=leaky
59 |
60 | [convolutional]
61 | batch_normalize=1
62 | filters=128
63 | size=3
64 | stride=1
65 | pad=1
66 | activation=leaky
67 |
68 | [maxpool]
69 | size=2
70 | stride=2
71 |
72 | [convolutional]
73 | batch_normalize=1
74 | filters=256
75 | size=3
76 | stride=1
77 | pad=1
78 | activation=leaky
79 |
80 | [convolutional]
81 | batch_normalize=1
82 | filters=128
83 | size=1
84 | stride=1
85 | pad=1
86 | activation=leaky
87 |
88 | [convolutional]
89 | batch_normalize=1
90 | filters=256
91 | size=3
92 | stride=1
93 | pad=1
94 | activation=leaky
95 |
96 | [maxpool]
97 | size=2
98 | stride=2
99 |
100 | [convolutional]
101 | batch_normalize=1
102 | filters=512
103 | size=3
104 | stride=1
105 | pad=1
106 | activation=leaky
107 |
108 | [convolutional]
109 | batch_normalize=1
110 | filters=256
111 | size=1
112 | stride=1
113 | pad=1
114 | activation=leaky
115 |
116 | [convolutional]
117 | batch_normalize=1
118 | filters=512
119 | size=3
120 | stride=1
121 | pad=1
122 | activation=leaky
123 |
124 | [convolutional]
125 | batch_normalize=1
126 | filters=256
127 | size=1
128 | stride=1
129 | pad=1
130 | activation=leaky
131 |
132 | [convolutional]
133 | batch_normalize=1
134 | filters=512
135 | size=3
136 | stride=1
137 | pad=1
138 | activation=leaky
139 |
140 | [maxpool]
141 | size=2
142 | stride=2
143 |
144 | [convolutional]
145 | batch_normalize=1
146 | filters=1024
147 | size=3
148 | stride=1
149 | pad=1
150 | activation=leaky
151 |
152 | [convolutional]
153 | batch_normalize=1
154 | filters=512
155 | size=1
156 | stride=1
157 | pad=1
158 | activation=leaky
159 |
160 | [convolutional]
161 | batch_normalize=1
162 | filters=1024
163 | size=3
164 | stride=1
165 | pad=1
166 | activation=leaky
167 |
168 | [convolutional]
169 | batch_normalize=1
170 | filters=512
171 | size=1
172 | stride=1
173 | pad=1
174 | activation=leaky
175 |
176 | [convolutional]
177 | batch_normalize=1
178 | filters=1024
179 | size=3
180 | stride=1
181 | pad=1
182 | activation=leaky
183 |
184 |
185 | #######
186 |
187 | [convolutional]
188 | batch_normalize=1
189 | size=3
190 | stride=1
191 | pad=1
192 | filters=1024
193 | activation=leaky
194 |
195 | [convolutional]
196 | batch_normalize=1
197 | size=3
198 | stride=1
199 | pad=1
200 | filters=1024
201 | activation=leaky
202 |
203 | [route]
204 | layers=-9
205 |
206 | [reorg]
207 | stride=2
208 |
209 | [route]
210 | layers=-1,-3
211 |
212 | [convolutional]
213 | batch_normalize=1
214 | size=3
215 | stride=1
216 | pad=1
217 | filters=1024
218 | activation=leaky
219 |
220 | [convolutional]
221 | size=1
222 | stride=1
223 | pad=1
224 | filters=425
225 | activation=linear
226 |
227 | [region]
228 | anchors = 0.738768,0.874946, 2.42204,2.65704, 4.30971,7.04493, 10.246,4.59428, 12.6868,11.8741
229 | bias_match=1
230 | classes=80
231 | coords=4
232 | num=5
233 | softmax=1
234 | jitter=.2
235 | rescore=1
236 |
237 | object_scale=5
238 | noobject_scale=1
239 | class_scale=1
240 | coord_scale=1
241 |
242 | absolute=1
243 | thresh=.3
244 | random=0
245 |
--------------------------------------------------------------------------------
/yolo/font/FiraMono-Medium.otf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/yolo/font/FiraMono-Medium.otf
--------------------------------------------------------------------------------
/yolo/font/SIL Open Font License.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2014, Mozilla Foundation https://mozilla.org/ with Reserved Font Name Fira Mono.
2 |
3 | Copyright (c) 2014, Telefonica S.A.
4 |
5 | This Font Software is licensed under the SIL Open Font License, Version 1.1.
6 | This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL
7 |
8 | -----------------------------------------------------------
9 | SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
10 | -----------------------------------------------------------
11 |
12 | PREAMBLE
13 | The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others.
14 |
15 | The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives.
16 |
17 | DEFINITIONS
18 | "Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation.
19 |
20 | "Reserved Font Name" refers to any names specified as such after the copyright statement(s).
21 |
22 | "Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s).
23 |
24 | "Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment.
25 |
26 | "Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software.
27 |
28 | PERMISSION & CONDITIONS
29 | Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions:
30 |
31 | 1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself.
32 |
33 | 2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user.
34 |
35 | 3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users.
36 |
37 | 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission.
38 |
39 | 5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software.
40 |
41 | TERMINATION
42 | This license becomes null and void if any of the above conditions are not met.
43 |
44 | DISCLAIMER
45 | THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
--------------------------------------------------------------------------------
/yolo/model_data/coco_classes.txt:
--------------------------------------------------------------------------------
1 | person
2 | bicycle
3 | car
4 | motorbike
5 | aeroplane
6 | bus
7 | train
8 | truck
9 | boat
10 | traffic light
11 | fire hydrant
12 | stop sign
13 | parking meter
14 | bench
15 | bird
16 | cat
17 | dog
18 | horse
19 | sheep
20 | cow
21 | elephant
22 | bear
23 | zebra
24 | giraffe
25 | backpack
26 | umbrella
27 | handbag
28 | tie
29 | suitcase
30 | frisbee
31 | skis
32 | snowboard
33 | sports ball
34 | kite
35 | baseball bat
36 | baseball glove
37 | skateboard
38 | surfboard
39 | tennis racket
40 | bottle
41 | wine glass
42 | cup
43 | fork
44 | knife
45 | spoon
46 | bowl
47 | banana
48 | apple
49 | sandwich
50 | orange
51 | broccoli
52 | carrot
53 | hot dog
54 | pizza
55 | donut
56 | cake
57 | chair
58 | sofa
59 | pottedplant
60 | bed
61 | diningtable
62 | toilet
63 | tvmonitor
64 | laptop
65 | mouse
66 | remote
67 | keyboard
68 | cell phone
69 | microwave
70 | oven
71 | toaster
72 | sink
73 | refrigerator
74 | book
75 | clock
76 | vase
77 | scissors
78 | teddy bear
79 | hair drier
80 | toothbrush
81 |
--------------------------------------------------------------------------------
/yolo/model_data/pascal_classes.txt:
--------------------------------------------------------------------------------
1 | aeroplane
2 | bicycle
3 | bird
4 | boat
5 | bottle
6 | bus
7 | car
8 | cat
9 | chair
10 | cow
11 | diningtable
12 | dog
13 | horse
14 | motorbike
15 | person
16 | pottedplant
17 | sheep
18 | sofa
19 | train
20 | tvmonitor
21 |
--------------------------------------------------------------------------------
/yolo/model_data/tiny-yolo-voc_anchors.txt:
--------------------------------------------------------------------------------
1 | 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52
2 |
--------------------------------------------------------------------------------
/yolo/model_data/yolo-voc_anchors.txt:
--------------------------------------------------------------------------------
1 | 1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52
2 |
--------------------------------------------------------------------------------
/yolo/model_data/yolo_anchors.txt:
--------------------------------------------------------------------------------
1 | 0.738768,0.874946, 2.42204,2.65704, 4.30971,7.04493, 10.246,4.59428, 12.6868,11.8741
2 |
--------------------------------------------------------------------------------
/yolo/test_yolo.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | """Run a YOLO_v2 style detection model on test images."""
3 | import argparse
4 | import colorsys
5 | import imghdr
6 | import os
7 | import random
8 |
9 | import numpy as np
10 | from keras import backend as K
11 | from keras.models import load_model
12 | from PIL import Image, ImageDraw, ImageFont
13 |
14 | from yad2k.models.keras_yolo import yolo_eval, yolo_head
15 |
16 | parser = argparse.ArgumentParser(
17 | description='Run a YOLO_v2 style detection model on test images..')
18 | parser.add_argument(
19 | 'model_path',
20 | help='path to h5 model file containing body'
21 | 'of a YOLO_v2 model')
22 | parser.add_argument(
23 | '-a',
24 | '--anchors_path',
25 | help='path to anchors file, defaults to yolo_anchors.txt',
26 | default='model_data/yolo_anchors.txt')
27 | parser.add_argument(
28 | '-c',
29 | '--classes_path',
30 | help='path to classes file, defaults to coco_classes.txt',
31 | default='model_data/coco_classes.txt')
32 | parser.add_argument(
33 | '-t',
34 | '--test_path',
35 | help='path to directory of test images, defaults to images/',
36 | default='images')
37 | parser.add_argument(
38 | '-o',
39 | '--output_path',
40 | help='path to output test images, defaults to images/out',
41 | default='images/out')
42 | parser.add_argument(
43 | '-s',
44 | '--score_threshold',
45 | type=float,
46 | help='threshold for bounding box scores, default .3',
47 | default=.3)
48 | parser.add_argument(
49 | '-iou',
50 | '--iou_threshold',
51 | type=float,
52 | help='threshold for non max suppression IOU, default .5',
53 | default=.5)
54 |
55 |
56 | def _main(args):
57 | model_path = os.path.expanduser(args.model_path)
58 | assert model_path.endswith('.h5'), 'Keras model must be a .h5 file.'
59 | anchors_path = os.path.expanduser(args.anchors_path)
60 | classes_path = os.path.expanduser(args.classes_path)
61 | test_path = os.path.expanduser(args.test_path)
62 | output_path = os.path.expanduser(args.output_path)
63 |
64 | if not os.path.exists(output_path):
65 | print('Creating output path {}'.format(output_path))
66 | os.mkdir(output_path)
67 |
68 | sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
69 |
70 | with open(classes_path) as f:
71 | class_names = f.readlines()
72 | class_names = [c.strip() for c in class_names]
73 |
74 | with open(anchors_path) as f:
75 | anchors = f.readline()
76 | anchors = [float(x) for x in anchors.split(',')]
77 | anchors = np.array(anchors).reshape(-1, 2)
78 |
79 | yolo_model = load_model(model_path)
80 |
81 | # Verify model, anchors, and classes are compatible
82 | num_classes = len(class_names)
83 | num_anchors = len(anchors)
84 | # TODO: Assumes dim ordering is channel last
85 | model_output_channels = yolo_model.layers[-1].output_shape[-1]
86 | assert model_output_channels == num_anchors * (num_classes + 5), \
87 | 'Mismatch between model and given anchor and class sizes. ' \
88 | 'Specify matching anchors and classes with --anchors_path and ' \
89 | '--classes_path flags.'
90 | print('{} model, anchors, and classes loaded.'.format(model_path))
91 |
92 | # Check if model is fully convolutional, assuming channel last order.
93 | model_image_size = yolo_model.layers[0].input_shape[1:3]
94 | is_fixed_size = model_image_size != (None, None)
95 |
96 | # Generate colors for drawing bounding boxes.
97 | hsv_tuples = [(x / len(class_names), 1., 1.)
98 | for x in range(len(class_names))]
99 | colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
100 | colors = list(
101 | map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
102 | colors))
103 | random.seed(10101) # Fixed seed for consistent colors across runs.
104 | random.shuffle(colors) # Shuffle colors to decorrelate adjacent classes.
105 | random.seed(None) # Reset seed to default.
106 |
107 | # Generate output tensor targets for filtered bounding boxes.
108 | # TODO: Wrap these backend operations with Keras layers.
109 | yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
110 | input_image_shape = K.placeholder(shape=(2, ))
111 | boxes, scores, classes = yolo_eval(
112 | yolo_outputs,
113 | input_image_shape,
114 | score_threshold=0.3,
115 | iou_threshold=0.6)
116 |
117 | for image_file in os.listdir(test_path):
118 | try:
119 | image_type = imghdr.what(os.path.join(test_path, image_file))
120 | if not image_type:
121 | continue
122 | except IsADirectoryError:
123 | continue
124 |
125 | image = Image.open(os.path.join(test_path, image_file))
126 | if is_fixed_size: # TODO: When resizing we can use minibatch input.
127 | resized_image = image.resize(
128 | tuple(reversed(model_image_size)), Image.BICUBIC)
129 | image_data = np.array(resized_image, dtype='float32')
130 | else:
131 | image_data = np.array(image, dtype='float32')
132 |
133 | image_data /= 255.
134 | image_data = np.expand_dims(image_data, 0) # Add batch dimension.
135 |
136 | out_boxes, out_scores, out_classes = sess.run(
137 | [boxes, scores, classes],
138 | feed_dict={
139 | yolo_model.input: image_data,
140 | input_image_shape: [image.size[1], image.size[0]],
141 | K.learning_phase(): 0
142 | })
143 | print('Found {} boxes for {}'.format(len(out_boxes), image_file))
144 |
145 | font = ImageFont.truetype(
146 | font='font/FiraMono-Medium.otf',
147 | size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
148 | thickness = (image.size[0] + image.size[1]) // 300
149 |
150 | for i, c in reversed(list(enumerate(out_classes))):
151 | predicted_class = class_names[c]
152 | box = out_boxes[i]
153 | score = out_scores[i]
154 |
155 | label = '{} {:.2f}'.format(predicted_class, score)
156 |
157 | draw = ImageDraw.Draw(image)
158 | label_size = draw.textsize(label, font)
159 |
160 | top, left, bottom, right = box
161 | top = max(0, np.floor(top + 0.5).astype('int32'))
162 | left = max(0, np.floor(left + 0.5).astype('int32'))
163 | bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
164 | right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
165 | print(label, (left, top), (right, bottom))
166 |
167 | if top - label_size[1] >= 0:
168 | text_origin = np.array([left, top - label_size[1]])
169 | else:
170 | text_origin = np.array([left, top + 1])
171 |
172 | # My kingdom for a good redistributable image drawing library.
173 | for i in range(thickness):
174 | draw.rectangle(
175 | [left + i, top + i, right - i, bottom - i],
176 | outline=colors[c])
177 | draw.rectangle(
178 | [tuple(text_origin), tuple(text_origin + label_size)],
179 | fill=colors[c])
180 | draw.text(text_origin, label, fill=(0, 0, 0), font=font)
181 | del draw
182 |
183 | image.save(os.path.join(output_path, image_file), quality=90)
184 | sess.close()
185 |
186 |
187 | if __name__ == '__main__':
188 | _main(parser.parse_args())
189 |
--------------------------------------------------------------------------------
/yolo/yad2k.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | """
3 | Reads Darknet19 config and weights and creates Keras model with TF backend.
4 |
5 | Currently only supports layers in Darknet19 config.
6 | """
7 |
8 | import argparse
9 | import configparser
10 | import io
11 | import os
12 | from collections import defaultdict
13 |
14 | import numpy as np
15 | from keras import backend as K
16 | from keras.layers import (Convolution2D, GlobalAveragePooling2D, Input, Lambda,
17 | MaxPooling2D, merge)
18 | from keras.layers.advanced_activations import LeakyReLU
19 | from keras.layers.normalization import BatchNormalization
20 | from keras.models import Model
21 | from keras.regularizers import l2
22 | from keras.utils.visualize_util import plot
23 |
24 | from yad2k.models.keras_yolo import (space_to_depth_x2,
25 | space_to_depth_x2_output_shape)
26 |
27 | parser = argparse.ArgumentParser(
28 | description='Yet Another Darknet To Keras Converter.')
29 | parser.add_argument('config_path', help='Path to Darknet cfg file.')
30 | parser.add_argument('weights_path', help='Path to Darknet weights file.')
31 | parser.add_argument('output_path', help='Path to output Keras model file.')
32 | parser.add_argument(
33 | '-p',
34 | '--plot_model',
35 | help='Plot generated Keras model and save as image.',
36 | action='store_true')
37 | parser.add_argument(
38 | '-flcl',
39 | '--fully_convolutional',
40 | help='Model is fully convolutional so set input shape to (None, None, 3). '
41 | 'WARNING: This experimental option does not work properly for YOLO_v2.',
42 | action='store_true')
43 |
44 |
45 | def unique_config_sections(config_file):
46 | """Convert all config sections to have unique names.
47 |
48 | Adds unique suffixes to config sections for compability with configparser.
49 | """
50 | section_counters = defaultdict(int)
51 | output_stream = io.StringIO()
52 | with open(config_file) as fin:
53 | for line in fin:
54 | if line.startswith('['):
55 | section = line.strip().strip('[]')
56 | _section = section + '_' + str(section_counters[section])
57 | section_counters[section] += 1
58 | line = line.replace(section, _section)
59 | output_stream.write(line)
60 | output_stream.seek(0)
61 | return output_stream
62 |
63 |
64 | # %%
65 | def _main(args):
66 | config_path = os.path.expanduser(args.config_path)
67 | weights_path = os.path.expanduser(args.weights_path)
68 | assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
69 | config_path)
70 | assert weights_path.endswith(
71 | '.weights'), '{} is not a .weights file'.format(weights_path)
72 |
73 | output_path = os.path.expanduser(args.output_path)
74 | assert output_path.endswith(
75 | '.h5'), 'output path {} is not a .h5 file'.format(output_path)
76 | output_root = os.path.splitext(output_path)[0]
77 |
78 | # Load weights and config.
79 | print('Loading weights.')
80 | weights_file = open(weights_path, 'rb')
81 | weights_header = np.ndarray(
82 | shape=(4, ), dtype='int32', buffer=weights_file.read(16))
83 | print('Weights Header: ', weights_header)
84 | # TODO: Check transpose flag when implementing fully connected layers.
85 | # transpose = (weight_header[0] > 1000) or (weight_header[1] > 1000)
86 |
87 | print('Parsing Darknet config.')
88 | unique_config_file = unique_config_sections(config_path)
89 | cfg_parser = configparser.ConfigParser()
90 | cfg_parser.read_file(unique_config_file)
91 |
92 | print('Creating Keras model.')
93 | if args.fully_convolutional:
94 | image_height, image_width = None, None
95 | else:
96 | image_height = int(cfg_parser['net_0']['height'])
97 | image_width = int(cfg_parser['net_0']['width'])
98 | prev_layer = Input(shape=(image_height, image_width, 3))
99 | all_layers = [prev_layer]
100 |
101 | weight_decay = float(cfg_parser['net_0']['decay']
102 | ) if 'net_0' in cfg_parser.sections() else 5e-4
103 | count = 0
104 | for section in cfg_parser.sections():
105 | print('Parsing section {}'.format(section))
106 | if section.startswith('convolutional'):
107 | filters = int(cfg_parser[section]['filters'])
108 | size = int(cfg_parser[section]['size'])
109 | stride = int(cfg_parser[section]['stride'])
110 | pad = int(cfg_parser[section]['pad'])
111 | activation = cfg_parser[section]['activation']
112 | batch_normalize = 'batch_normalize' in cfg_parser[section]
113 |
114 | # border_mode='same' is equivalent to Darknet pad=1
115 | border_mode = 'same' if pad == 1 else 'valid'
116 |
117 | # Setting weights.
118 | # Darknet serializes convolutional weights as:
119 | # [bias/beta, [gamma, mean, variance], conv_weights]
120 | prev_layer_shape = K.int_shape(prev_layer)
121 |
122 | # TODO: This assumes channel last dim_ordering.
123 | weights_shape = (size, size, prev_layer_shape[-1], filters)
124 | darknet_w_shape = (filters, weights_shape[2], size, size)
125 | weights_size = np.product(weights_shape)
126 |
127 | print('conv2d', 'bn'
128 | if batch_normalize else ' ', activation, weights_shape)
129 |
130 | conv_bias = np.ndarray(
131 | shape=(filters, ),
132 | dtype='float32',
133 | buffer=weights_file.read(filters * 4))
134 | count += filters
135 |
136 | if batch_normalize:
137 | bn_weights = np.ndarray(
138 | shape=(3, filters),
139 | dtype='float32',
140 | buffer=weights_file.read(filters * 12))
141 | count += 3 * filters
142 |
143 | # TODO: Keras BatchNormalization mistakenly refers to var
144 | # as std.
145 | bn_weight_list = [
146 | bn_weights[0], # scale gamma
147 | conv_bias, # shift beta
148 | bn_weights[1], # running mean
149 | bn_weights[2] # running var
150 | ]
151 |
152 | conv_weights = np.ndarray(
153 | shape=darknet_w_shape,
154 | dtype='float32',
155 | buffer=weights_file.read(weights_size * 4))
156 | count += weights_size
157 |
158 | # DarkNet conv_weights are serialized Caffe-style:
159 | # (out_dim, in_dim, height, width)
160 | # We would like to set these to Tensorflow order:
161 | # (height, width, in_dim, out_dim)
162 | # TODO: Add check for Theano dim ordering.
163 | conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
164 | conv_weights = [conv_weights] if batch_normalize else [
165 | conv_weights, conv_bias
166 | ]
167 |
168 | # Handle activation.
169 | act_fn = None
170 | if activation == 'leaky':
171 | pass # Add advanced activation later.
172 | elif activation != 'linear':
173 | raise ValueError(
174 | 'Unknown activation function `{}` in section {}'.format(
175 | activation, section))
176 |
177 | # Create Conv2D layer
178 | conv_layer = (Convolution2D(
179 | filters,
180 | size,
181 | size,
182 | border_mode=border_mode,
183 | subsample=(stride, stride),
184 | bias=not batch_normalize,
185 | weights=conv_weights,
186 | activation=act_fn,
187 | W_regularizer=l2(weight_decay)))(prev_layer)
188 |
189 | if batch_normalize:
190 | conv_layer = (BatchNormalization(
191 | weights=bn_weight_list))(conv_layer)
192 | prev_layer = conv_layer
193 |
194 | if activation == 'linear':
195 | all_layers.append(prev_layer)
196 | elif activation == 'leaky':
197 | act_layer = LeakyReLU(alpha=0.1)(prev_layer)
198 | prev_layer = act_layer
199 | all_layers.append(act_layer)
200 |
201 | elif section.startswith('maxpool'):
202 | size = int(cfg_parser[section]['size'])
203 | stride = int(cfg_parser[section]['stride'])
204 | all_layers.append(
205 | MaxPooling2D(
206 | pool_size=(size, size),
207 | strides=(stride, stride),
208 | border_mode='same')(prev_layer))
209 | prev_layer = all_layers[-1]
210 |
211 | elif section.startswith('avgpool'):
212 | if cfg_parser.items(section) != []:
213 | raise ValueError('{} with params unsupported.'.format(section))
214 | all_layers.append(GlobalAveragePooling2D()(prev_layer))
215 | prev_layer = all_layers[-1]
216 |
217 | elif section.startswith('route'):
218 | ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
219 | layers = [all_layers[i] for i in ids]
220 | if len(layers) > 1:
221 | print('Merging layers:', layers)
222 | merge_layer = merge(layers, mode='concat')
223 | all_layers.append(merge_layer)
224 | prev_layer = merge_layer
225 | else:
226 | skip_layer = layers[0] # only one layer to route
227 | all_layers.append(skip_layer)
228 | prev_layer = skip_layer
229 |
230 | elif section.startswith('reorg'):
231 | block_size = int(cfg_parser[section]['stride'])
232 | assert block_size == 2, 'Only reorg with stride 2 supported.'
233 | all_layers.append(
234 | Lambda(
235 | space_to_depth_x2,
236 | output_shape=space_to_depth_x2_output_shape,
237 | name='space_to_depth_x2')(prev_layer))
238 | prev_layer = all_layers[-1]
239 |
240 | elif section.startswith('region'):
241 | anchors = cfg_parser[section]['anchors']
242 |
243 | elif (section.startswith('net') or section.startswith('cost') or
244 | section.startswith('softmax')):
245 | pass # Configs not currently handled during model definition.
246 |
247 | else:
248 | raise ValueError(
249 | 'Unsupported section header type: {}'.format(section))
250 |
251 | # Create and save model.
252 | model = Model(input=all_layers[0], output=all_layers[-1])
253 | print(model.summary())
254 | model.save('{}'.format(output_path))
255 | print('Saved Keras model to {}'.format(output_path))
256 | # Check to see if all weights have been read.
257 | remaining_weights = len(weights_file.read()) / 4
258 | weights_file.close()
259 | print('Read {} of {} from Darknet weights.'.format(count, count +
260 | remaining_weights))
261 | if remaining_weights > 0:
262 | print('Warning: {} unused weights'.format(len(remaining_weights)))
263 |
264 | if 'region' in cfg_parser.sections():
265 | with open('{}_anchors.txt'.format(output_root), 'w') as f:
266 | print(anchors, file=f)
267 |
268 | if args.plot_model:
269 | plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
270 | print('Saved model plot to {}.png'.format(output_root))
271 |
272 |
273 | if __name__ == '__main__':
274 | _main(parser.parse_args())
275 |
--------------------------------------------------------------------------------
/yolo/yad2k/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/yolo/yad2k/__init__.py
--------------------------------------------------------------------------------
/yolo/yad2k/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jluo-bgl/Self-Driving-Car-Vehicle-Detection/3e6eb858a37bec8b3ca3afe971645ebb22b15f59/yolo/yad2k/models/__init__.py
--------------------------------------------------------------------------------
/yolo/yad2k/models/keras_darknet19.py:
--------------------------------------------------------------------------------
1 | """Darknet19 Model Defined in Keras."""
2 | import functools
3 | from functools import partial
4 |
5 | from keras.layers import Convolution2D, MaxPooling2D
6 | from keras.layers.advanced_activations import LeakyReLU
7 | from keras.layers.normalization import BatchNormalization
8 | from keras.models import Model
9 | from keras.regularizers import l2
10 |
11 | from ..utils import compose
12 |
13 | # Partial wrapper for Convolution2D with static default argument.
14 | _DarknetConv2D = partial(Convolution2D, border_mode='same')
15 |
16 |
17 | @functools.wraps(Convolution2D)
18 | def DarknetConv2D(*args, **kwargs):
19 | """Wrapper to set Darknet weight regularizer for Convolution2D."""
20 | darknet_conv_kwargs = {'W_regularizer': l2(5e-4)}
21 | darknet_conv_kwargs.update(kwargs)
22 | return _DarknetConv2D(*args, **darknet_conv_kwargs)
23 |
24 |
25 | def DarknetConv2D_BN_Leaky(*args, **kwargs):
26 | """Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
27 | return compose(
28 | DarknetConv2D(*args, **kwargs),
29 | BatchNormalization(),
30 | LeakyReLU(alpha=0.1))
31 |
32 |
33 | def bottleneck_block(nb_outer, nb_bottleneck):
34 | """Bottleneck block of 3x3, 1x1, 3x3 convolutions."""
35 | return compose(
36 | DarknetConv2D_BN_Leaky(nb_outer, 3, 3),
37 | DarknetConv2D_BN_Leaky(nb_bottleneck, 1, 1),
38 | DarknetConv2D_BN_Leaky(nb_outer, 3, 3))
39 |
40 |
41 | def bottleneck_x2_block(nb_outer, nb_bottleneck):
42 | """Bottleneck block of 3x3, 1x1, 3x3, 1x1, 3x3 convolutions."""
43 | return compose(
44 | bottleneck_block(nb_outer, nb_bottleneck),
45 | DarknetConv2D_BN_Leaky(nb_bottleneck, 1, 1),
46 | DarknetConv2D_BN_Leaky(nb_outer, 3, 3))
47 |
48 |
49 | def darknet_body():
50 | """Generate first 18 conv layers of Darknet-19."""
51 | return compose(
52 | DarknetConv2D_BN_Leaky(32, 3, 3),
53 | MaxPooling2D(),
54 | DarknetConv2D_BN_Leaky(64, 3, 3),
55 | MaxPooling2D(),
56 | bottleneck_block(128, 64),
57 | MaxPooling2D(),
58 | bottleneck_block(256, 128),
59 | MaxPooling2D(),
60 | bottleneck_x2_block(512, 256),
61 | MaxPooling2D(),
62 | bottleneck_x2_block(1024, 512))
63 |
64 |
65 | def darknet19(inputs):
66 | """Generate Darknet-19 model for Imagenet classification."""
67 | body = darknet_body()(inputs)
68 | logits = DarknetConv2D(1000, 1, 1)(body)
69 | return Model(inputs, logits)
70 |
--------------------------------------------------------------------------------
/yolo/yad2k/models/keras_yolo.py:
--------------------------------------------------------------------------------
1 | """YOLO_v2 Model Defined in Keras."""
2 | import sys
3 |
4 | import numpy as np
5 | import tensorflow as tf
6 | from keras import backend as K
7 | from keras.layers import Lambda, Reshape, merge
8 | from keras.models import Model
9 |
10 | from ..utils import compose
11 | from .keras_darknet19 import (DarknetConv2D, DarknetConv2D_BN_Leaky,
12 | darknet_body)
13 |
14 | sys.path.append('..')
15 |
16 | voc_anchors = np.array(
17 | [[1.08, 1.19], [3.42, 4.41], [6.63, 11.38], [9.42, 5.11], [16.62, 10.52]])
18 |
19 | voc_classes = [
20 | "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
21 | "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
22 | "pottedplant", "sheep", "sofa", "train", "tvmonitor"
23 | ]
24 |
25 |
26 | def space_to_depth_x2(x):
27 | """Thin wrapper for Tensorflow space_to_depth with block_size=2."""
28 | # Import currently required to make Lambda work.
29 | # See: https://github.com/fchollet/keras/issues/5088#issuecomment-273851273
30 | import tensorflow as tf
31 | return tf.space_to_depth(x, block_size=2)
32 |
33 |
34 | def space_to_depth_x2_output_shape(input_shape):
35 | """Determine space_to_depth output shape for block_size=2.
36 |
37 | Note: For Lambda with TensorFlow backend, output shape may not be needed.
38 | """
39 | return (input_shape[0], input_shape[1] // 2, input_shape[2] // 2, 4 *
40 | input_shape[3]) if input_shape[1] else (input_shape[0], None, None,
41 | 4 * input_shape[3])
42 |
43 |
44 | def yolo_body(inputs, num_anchors, num_classes):
45 | """Create YOLO_V2 model CNN body in Keras."""
46 | darknet = Model(inputs, darknet_body()(inputs))
47 | conv13 = darknet.get_layer('batchnormalization_13').output
48 | conv20 = compose(
49 | DarknetConv2D_BN_Leaky(1024, 3, 3),
50 | DarknetConv2D_BN_Leaky(1024, 3, 3))(darknet.output)
51 |
52 | # TODO: Allow Keras Lambda to use func arguments for output_shape?
53 | conv13_reshaped = Lambda(
54 | space_to_depth_x2,
55 | output_shape=space_to_depth_x2_output_shape,
56 | name='space_to_depth')(conv13)
57 |
58 | # Concat conv13 with conv20.
59 | x = merge([conv13_reshaped, conv20], mode='concat')
60 | x = DarknetConv2D_BN_Leaky(1024, 3, 3)(x)
61 | x = DarknetConv2D(num_anchors * (num_classes + 5), 1, 1)(x)
62 | return Model(inputs, x)
63 |
64 |
65 | def yolo_head(feats, anchors, num_classes):
66 | """Convert final layer features to bounding box parameters.
67 |
68 | Parameters
69 | ----------
70 | feats : tensor
71 | Final convolutional layer features.
72 | anchors : array-like
73 | Anchor box widths and heights.
74 | num_classes : int
75 | Number of target classes.
76 |
77 | Returns
78 | -------
79 | box_xy : tensor
80 | x, y box predictions adjusted by spatial location in conv layer.
81 | box_wh : tensor
82 | w, h box predictions adjusted by anchors and conv spatial resolution.
83 | box_conf : tensor
84 | Probability estimate for whether each box contains any object.
85 | box_class_pred : tensor
86 | Probability distribution estimate for each box over class labels.
87 | """
88 | num_anchors = len(anchors)
89 | # Reshape to batch, height, width, num_anchors, box_params.
90 | anchors_tensor = K.reshape(K.variable(anchors), [1, 1, 1, num_anchors, 2])
91 |
92 | # Static implementation for fixed models.
93 | # TODO: Remove or add option for static implementation.
94 | # _, conv_height, conv_width, _ = K.int_shape(feats)
95 | # conv_dims = K.variable([conv_width, conv_height])
96 |
97 | # Dynamic implementation of conv dims for fully convolutional model.
98 | conv_dims = K.shape(feats)[1:3] # assuming channels last
99 | # In YOLO the height index is the inner most iteration.
100 | conv_height_index = K.arange(0, stop=conv_dims[0])
101 | conv_width_index = K.arange(0, stop=conv_dims[1])
102 | conv_height_index = K.tile(conv_height_index, [conv_dims[0]])
103 |
104 | # TODO: Repeat_elements and tf.split doesn't support dynamic splits.
105 | # conv_width_index = K.repeat_elements(conv_width_index, conv_dims[1], axis=0)
106 | conv_width_index = K.tile(
107 | K.expand_dims(conv_width_index, 0), [conv_dims[1], 1])
108 | conv_width_index = K.flatten(K.transpose(conv_width_index))
109 | conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
110 | conv_index = K.reshape(conv_index, [conv_dims[0], conv_dims[1], 2])
111 | conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
112 | conv_index = K.cast(conv_index, K.dtype(feats))
113 |
114 | feats = K.reshape(
115 | feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
116 | conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))
117 |
118 | # Static generation of conv_index:
119 | # conv_index = np.array([_ for _ in np.ndindex(conv_width, conv_height)])
120 | # conv_index = conv_index[:, [1, 0]] # swap columns for YOLO ordering.
121 | # conv_index = K.variable(
122 | # conv_index.reshape(1, conv_height, conv_width, 1, 2))
123 | # feats = Reshape(
124 | # (conv_dims[0], conv_dims[1], num_anchors, num_classes + 5))(feats)
125 |
126 | box_xy = K.sigmoid(feats[..., :2])
127 | box_wh = K.exp(feats[..., 2:4])
128 | box_confidence = K.sigmoid(feats[..., 4:5])
129 | box_class_probs = K.softmax(feats[..., 5:])
130 |
131 | # Adjust preditions to each spatial grid point and anchor size.
132 | # Note: YOLO iterates over height index before width index.
133 | box_xy = (box_xy + conv_index) / conv_dims
134 | box_wh = box_wh * anchors_tensor / conv_dims
135 |
136 | return box_xy, box_wh, box_confidence, box_class_probs
137 |
138 |
139 | def yolo_boxes_to_corners(box_xy, box_wh):
140 | """Convert YOLO box predictions to bounding box corners."""
141 | box_mins = box_xy - (box_wh / 2.)
142 | box_maxes = box_xy + (box_wh / 2.)
143 |
144 | return K.concatenate([
145 | box_mins[..., 1:2], # y_min
146 | box_mins[..., 0:1], # x_min
147 | box_maxes[..., 1:2], # y_max
148 | box_maxes[..., 0:1] # x_max
149 | ])
150 |
151 |
152 | def yolo(inputs, anchors, num_classes):
153 | """Generate a complete YOLO_v2 localization model."""
154 | num_anchors = len(anchors)
155 | body = yolo_body(inputs, num_anchors, num_classes)
156 | outputs = yolo_head(body.output, anchors, num_classes)
157 | return outputs
158 |
159 |
160 | def yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold=.6):
161 | """Filter YOLO boxes based on object and class confidence."""
162 | box_scores = box_confidence * box_class_probs
163 | box_classes = K.argmax(box_scores, axis=-1)
164 | box_class_scores = K.max(box_scores, axis=-1)
165 | prediction_mask = box_class_scores >= threshold
166 |
167 | # TODO: Expose tf.boolean_mask to Keras backend?
168 | boxes = tf.boolean_mask(boxes, prediction_mask)
169 | scores = tf.boolean_mask(box_class_scores, prediction_mask)
170 | classes = tf.boolean_mask(box_classes, prediction_mask)
171 | return boxes, scores, classes
172 |
173 |
174 | def yolo_eval(yolo_outputs,
175 | image_shape,
176 | max_boxes=10,
177 | score_threshold=.6,
178 | iou_threshold=.5):
179 | """Evaluate YOLO model on given input batch and return filtered boxes."""
180 | box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
181 | boxes = yolo_boxes_to_corners(box_xy, box_wh)
182 | boxes, scores, classes = yolo_filter_boxes(
183 | boxes, box_confidence, box_class_probs, threshold=score_threshold)
184 |
185 | # Scale boxes back to original image shape.
186 | height = image_shape[0]
187 | width = image_shape[1]
188 | image_dims = K.stack([height, width, height, width])
189 | image_dims = K.reshape(image_dims, [1, 4])
190 | boxes = boxes * image_dims
191 |
192 | # TODO: Something must be done about this ugly hack!
193 | max_boxes_tensor = K.variable(max_boxes, dtype='int32')
194 | K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
195 | nms_index = tf.image.non_max_suppression(
196 | boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
197 | boxes = K.gather(boxes, nms_index)
198 | scores = K.gather(scores, nms_index)
199 | classes = K.gather(classes, nms_index)
200 | return boxes, scores, classes
201 |
--------------------------------------------------------------------------------
/yolo/yad2k/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import *
2 |
--------------------------------------------------------------------------------
/yolo/yad2k/utils/utils.py:
--------------------------------------------------------------------------------
1 | """Miscellaneous utility functions."""
2 |
3 | from functools import reduce
4 |
5 |
6 | def compose(*funcs):
7 | """Compose arbitrarily many functions, evaluated left to right.
8 |
9 | Reference: https://mathieularose.com/function-composition-in-python/
10 | """
11 | # return lambda x: reduce(lambda v, f: f(v), funcs, x)
12 | if funcs:
13 | return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
14 | else:
15 | raise ValueError('Composition of empty sequence not supported.')
16 |
--------------------------------------------------------------------------------