├── .gitignore
├── Dataset
├── Naive-Bayes-Classification-Data.csv
└── Titanic.csv
├── LICENSE
├── MANIFEST.in
├── Notebooks
├── Iris_Load_and_Save.py
├── Iris_Multi_Class.py
└── Titanic.ipynb
├── README.md
├── assets
└── Logo.webp
├── checkGridSearchCV.py
├── licences
├── numpy_LICENSE.txt
├── pandas_LICENSE.txt
└── scikit-learn_LICENSE.txt
├── pydeepflow
├── __init__.py
├── activations.py
├── batch_normalization.py
├── checkpoints.py
├── cross_validator.py
├── device.py
├── early_stopping.py
├── gridSearch.py
├── learning_rate_scheduler.py
├── losses.py
├── main.py
├── model.py
└── regularization.py
├── requirements.txt
├── runner.py
├── setup.py
└── tests
├── test_activations.py
├── test_batch_normalization.py
├── test_device.py
├── test_losses.py
└── test_model.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 |
59 | # Pickle Stuff:
60 | *.pkl
61 |
62 | # Django stuff:
63 | *.log
64 | local_settings.py
65 | db.sqlite3
66 | db.sqlite3-journal
67 |
68 | # Flask stuff:
69 | instance/
70 | .webassets-cache
71 |
72 | # Scrapy stuff:
73 | .scrapy
74 |
75 | # Sphinx documentation
76 | docs/_build/
77 |
78 | # PyBuilder
79 | .pybuilder/
80 | target/
81 |
82 | # Jupyter Notebook
83 | .ipynb_checkpoints
84 |
85 | # Checkpoint files (Added by ravin-d-27 for testing purposes)
86 | checkpoints
87 |
88 | # Eliminate the visualization things
89 | history.png
90 |
91 | # IPython
92 | profile_default/
93 | ipython_config.py
94 |
95 | # pyenv
96 | # For a library or package, you might want to ignore these files since the code is
97 | # intended to run in multiple environments; otherwise, check them in:
98 | # .python-version
99 |
100 | # pipenv
101 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
102 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
103 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
104 | # install all needed dependencies.
105 | #Pipfile.lock
106 |
107 | # poetry
108 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
109 | # This is especially recommended for binary packages to ensure reproducibility, and is more
110 | # commonly ignored for libraries.
111 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
112 | #poetry.lock
113 |
114 | # pdm
115 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
116 | #pdm.lock
117 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
118 | # in version control.
119 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
120 | .pdm.toml
121 | .pdm-python
122 | .pdm-build/
123 |
124 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
125 | __pypackages__/
126 |
127 | # Celery stuff
128 | celerybeat-schedule
129 | celerybeat.pid
130 |
131 | # SageMath parsed files
132 | *.sage.py
133 |
134 | # Environments
135 | .env
136 | .venv
137 | env/
138 | venv/
139 | ENV/
140 | env.bak/
141 | venv.bak/
142 |
143 | # Spyder project settings
144 | .spyderproject
145 | .spyproject
146 |
147 | # Rope project settings
148 | .ropeproject
149 |
150 | # mkdocs documentation
151 | /site
152 |
153 | # mypy
154 | .mypy_cache/
155 | .dmypy.json
156 | dmypy.json
157 |
158 | # Pyre type checker
159 | .pyre/
160 |
161 | # pytype static type analyzer
162 | .pytype/
163 |
164 | # Cython debug symbols
165 | cython_debug/
166 |
167 | # PyCharm
168 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
169 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
170 | # and can be added to the global gitignore or merged into this file. For a more nuclear
171 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
172 | #.idea/
173 |
--------------------------------------------------------------------------------
/Dataset/Naive-Bayes-Classification-Data.csv:
--------------------------------------------------------------------------------
1 | glucose,bloodpressure,diabetes
2 | 40,85,0
3 | 40,92,0
4 | 45,63,1
5 | 45,80,0
6 | 40,73,1
7 | 45,82,0
8 | 40,85,0
9 | 30,63,1
10 | 65,65,1
11 | 45,82,0
12 | 35,73,1
13 | 45,90,0
14 | 50,68,1
15 | 40,93,0
16 | 35,80,1
17 | 50,70,1
18 | 40,73,1
19 | 40,67,1
20 | 40,75,1
21 | 40,80,1
22 | 40,72,1
23 | 40,88,0
24 | 40,78,1
25 | 45,98,0
26 | 40,88,0
27 | 60,67,1
28 | 40,85,0
29 | 40,88,0
30 | 45,78,0
31 | 55,73,1
32 | 45,77,1
33 | 50,68,1
34 | 45,77,1
35 | 40,85,0
36 | 45,70,1
37 | 45,72,1
38 | 45,90,0
39 | 40,65,1
40 | 45,88,0
41 | 45,88,0
42 | 40,68,1
43 | 40,73,1
44 | 45,88,0
45 | 45,78,0
46 | 45,85,0
47 | 40,83,0
48 | 40,63,1
49 | 45,73,1
50 | 45,90,0
51 | 45,87,0
52 | 40,90,0
53 | 45,93,0
54 | 50,73,1
55 | 40,68,1
56 | 50,68,1
57 | 50,90,0
58 | 50,75,1
59 | 50,85,0
60 | 45,83,0
61 | 50,65,1
62 | 45,80,0
63 | 40,75,1
64 | 35,77,1
65 | 55,68,1
66 | 45,85,0
67 | 45,87,0
68 | 25,82,1
69 | 40,90,0
70 | 45,82,0
71 | 45,80,0
72 | 45,88,0
73 | 40,87,0
74 | 45,70,1
75 | 45,88,0
76 | 45,88,0
77 | 50,67,1
78 | 45,93,0
79 | 45,90,0
80 | 55,77,1
81 | 35,62,1
82 | 30,78,1
83 | 50,82,0
84 | 45,83,0
85 | 40,83,0
86 | 40,82,0
87 | 50,70,1
88 | 45,92,0
89 | 40,90,0
90 | 40,85,0
91 | 40,73,1
92 | 50,88,0
93 | 30,80,1
94 | 40,85,1
95 | 30,75,1
96 | 45,85,0
97 | 40,85,0
98 | 60,80,1
99 | 40,90,0
100 | 35,65,1
101 | 40,87,0
102 | 45,87,0
103 | 45,92,0
104 | 45,95,0
105 | 50,67,1
106 | 40,90,0
107 | 45,85,0
108 | 45,75,1
109 | 50,73,1
110 | 40,70,1
111 | 40,92,0
112 | 30,72,1
113 | 50,87,0
114 | 40,93,0
115 | 55,60,1
116 | 40,82,1
117 | 40,95,0
118 | 40,72,1
119 | 60,68,1
120 | 45,92,0
121 | 40,70,1
122 | 45,97,0
123 | 50,83,0
124 | 60,63,1
125 | 35,65,1
126 | 45,90,0
127 | 40,65,1
128 | 40,95,0
129 | 30,68,1
130 | 45,83,0
131 | 45,92,0
132 | 45,87,0
133 | 45,72,1
134 | 45,83,0
135 | 45,85,0
136 | 45,88,0
137 | 55,68,1
138 | 60,65,1
139 | 40,73,1
140 | 35,73,1
141 | 40,83,0
142 | 50,67,1
143 | 45,80,0
144 | 55,68,1
145 | 45,80,0
146 | 50,80,0
147 | 40,92,0
148 | 50,77,1
149 | 50,92,0
150 | 45,85,0
151 | 65,70,1
152 | 45,77,1
153 | 45,82,0
154 | 40,95,0
155 | 45,75,1
156 | 45,78,1
157 | 45,87,0
158 | 50,83,0
159 | 45,92,0
160 | 40,73,1
161 | 45,88,0
162 | 45,72,1
163 | 40,67,1
164 | 45,78,0
165 | 40,78,0
166 | 45,87,0
167 | 50,78,1
168 | 45,75,1
169 | 55,73,1
170 | 45,87,0
171 | 45,80,0
172 | 45,73,1
173 | 45,93,0
174 | 45,73,1
175 | 40,87,0
176 | 40,87,0
177 | 45,87,0
178 | 25,72,1
179 | 45,88,0
180 | 55,68,1
181 | 40,90,0
182 | 40,93,0
183 | 45,82,0
184 | 35,77,1
185 | 50,72,1
186 | 40,100,0
187 | 25,83,1
188 | 55,72,1
189 | 45,82,0
190 | 40,75,1
191 | 35,80,1
192 | 40,90,0
193 | 45,90,0
194 | 45,85,0
195 | 55,63,1
196 | 45,92,0
197 | 40,87,0
198 | 45,93,0
199 | 45,85,0
200 | 35,70,1
201 | 55,73,1
202 | 50,67,1
203 | 50,65,1
204 | 55,75,1
205 | 45,85,0
206 | 35,68,1
207 | 40,80,0
208 | 40,63,1
209 | 40,90,0
210 | 50,90,0
211 | 25,67,1
212 | 55,67,1
213 | 60,67,1
214 | 50,92,0
215 | 45,80,0
216 | 50,77,1
217 | 40,88,0
218 | 45,93,0
219 | 40,93,0
220 | 45,77,0
221 | 40,77,0
222 | 55,75,1
223 | 45,87,0
224 | 60,67,1
225 | 45,90,0
226 | 30,73,1
227 | 45,87,0
228 | 40,88,0
229 | 45,95,0
230 | 45,77,1
231 | 55,68,1
232 | 45,83,0
233 | 40,90,0
234 | 40,83,0
235 | 45,97,0
236 | 45,85,0
237 | 45,83,0
238 | 45,72,1
239 | 45,68,1
240 | 40,93,0
241 | 40,87,0
242 | 40,87,0
243 | 55,70,1
244 | 60,68,1
245 | 50,90,0
246 | 40,90,0
247 | 40,78,1
248 | 50,80,1
249 | 55,75,1
250 | 40,72,1
251 | 50,73,1
252 | 45,58,1
253 | 55,68,1
254 | 40,90,0
255 | 55,72,1
256 | 35,82,0
257 | 40,70,1
258 | 55,57,1
259 | 50,80,0
260 | 45,83,0
261 | 45,85,0
262 | 45,72,1
263 | 40,75,1
264 | 40,85,0
265 | 40,83,0
266 | 40,72,1
267 | 35,63,1
268 | 20,70,1
269 | 40,92,0
270 | 45,87,0
271 | 45,83,0
272 | 55,67,1
273 | 45,80,0
274 | 45,75,1
275 | 40,70,1
276 | 40,88,0
277 | 35,78,1
278 | 55,63,1
279 | 40,82,0
280 | 40,65,1
281 | 45,90,0
282 | 40,72,1
283 | 55,62,1
284 | 50,83,0
285 | 50,58,1
286 | 45,72,1
287 | 50,68,1
288 | 65,60,1
289 | 25,73,1
290 | 35,68,1
291 | 45,58,1
292 | 45,92,0
293 | 45,67,1
294 | 50,72,1
295 | 40,87,0
296 | 35,77,1
297 | 50,65,1
298 | 60,77,1
299 | 40,68,1
300 | 45,88,1
301 | 50,77,1
302 | 45,82,0
303 | 50,73,1
304 | 35,68,1
305 | 40,92,0
306 | 55,65,1
307 | 45,83,0
308 | 50,67,1
309 | 40,68,1
310 | 45,83,0
311 | 45,90,0
312 | 45,83,0
313 | 40,72,1
314 | 45,78,1
315 | 55,68,1
316 | 35,82,1
317 | 50,87,0
318 | 50,83,0
319 | 45,73,0
320 | 45,83,0
321 | 30,73,1
322 | 45,83,0
323 | 40,68,1
324 | 35,77,1
325 | 45,85,0
326 | 45,78,1
327 | 25,73,1
328 | 40,88,0
329 | 45,82,1
330 | 60,68,1
331 | 70,65,1
332 | 40,87,0
333 | 35,70,1
334 | 55,68,1
335 | 35,90,0
336 | 40,65,1
337 | 40,65,1
338 | 55,60,1
339 | 50,83,0
340 | 40,87,0
341 | 40,82,0
342 | 45,85,0
343 | 40,85,0
344 | 55,68,1
345 | 40,83,0
346 | 50,88,0
347 | 40,88,0
348 | 50,85,0
349 | 35,62,1
350 | 40,75,1
351 | 40,75,1
352 | 45,90,0
353 | 60,85,1
354 | 50,85,0
355 | 40,82,0
356 | 40,63,1
357 | 40,88,0
358 | 30,82,1
359 | 45,83,0
360 | 50,77,1
361 | 45,97,0
362 | 45,93,0
363 | 50,68,1
364 | 40,87,0
365 | 45,87,0
366 | 40,67,1
367 | 50,85,0
368 | 50,90,0
369 | 35,70,1
370 | 45,92,0
371 | 30,78,1
372 | 45,88,0
373 | 55,70,1
374 | 45,88,0
375 | 50,78,1
376 | 40,70,1
377 | 45,73,1
378 | 40,88,0
379 | 35,75,1
380 | 45,82,0
381 | 50,68,1
382 | 35,77,1
383 | 40,73,1
384 | 45,75,1
385 | 45,82,0
386 | 45,78,1
387 | 40,70,1
388 | 45,88,0
389 | 35,77,1
390 | 50,65,1
391 | 40,90,0
392 | 45,83,0
393 | 50,67,1
394 | 45,78,1
395 | 45,82,0
396 | 45,85,0
397 | 40,70,1
398 | 45,68,1
399 | 45,73,1
400 | 40,82,0
401 | 50,78,1
402 | 50,92,0
403 | 45,82,0
404 | 45,92,0
405 | 55,65,1
406 | 40,72,1
407 | 50,85,0
408 | 50,62,1
409 | 45,92,0
410 | 55,72,1
411 | 40,83,0
412 | 45,67,1
413 | 55,65,1
414 | 45,73,1
415 | 50,85,0
416 | 45,90,0
417 | 40,72,1
418 | 50,92,0
419 | 45,87,0
420 | 45,75,1
421 | 45,78,0
422 | 55,73,1
423 | 35,90,0
424 | 40,70,1
425 | 40,88,0
426 | 45,95,0
427 | 40,77,1
428 | 25,88,1
429 | 40,88,0
430 | 65,62,1
431 | 40,85,0
432 | 30,83,1
433 | 50,52,1
434 | 50,75,1
435 | 35,78,1
436 | 45,87,0
437 | 40,93,0
438 | 45,82,0
439 | 45,67,1
440 | 55,70,1
441 | 40,82,0
442 | 40,90,0
443 | 45,67,1
444 | 40,80,1
445 | 40,60,1
446 | 40,83,0
447 | 40,88,0
448 | 50,90,0
449 | 50,83,0
450 | 50,68,1
451 | 45,82,0
452 | 55,70,1
453 | 35,72,1
454 | 50,87,0
455 | 45,90,0
456 | 45,90,0
457 | 45,92,0
458 | 45,68,1
459 | 45,90,0
460 | 50,88,0
461 | 45,92,0
462 | 45,88,0
463 | 45,80,0
464 | 55,72,1
465 | 35,83,0
466 | 50,85,0
467 | 50,70,1
468 | 40,83,0
469 | 40,92,0
470 | 50,88,0
471 | 40,100,0
472 | 40,77,1
473 | 35,70,1
474 | 35,85,1
475 | 45,88,0
476 | 40,73,1
477 | 40,65,1
478 | 40,97,0
479 | 35,87,1
480 | 40,83,0
481 | 50,75,1
482 | 45,78,1
483 | 50,95,0
484 | 50,90,0
485 | 40,78,1
486 | 30,75,1
487 | 45,67,1
488 | 50,83,0
489 | 45,80,0
490 | 45,85,0
491 | 60,68,1
492 | 55,67,1
493 | 30,82,1
494 | 45,92,0
495 | 45,62,1
496 | 40,88,0
497 | 35,78,1
498 | 40,75,1
499 | 30,70,1
500 | 30,78,1
501 | 30,78,1
502 | 45,85,1
503 | 50,60,1
504 | 40,92,0
505 | 45,73,1
506 | 40,78,0
507 | 50,72,1
508 | 45,73,1
509 | 40,88,0
510 | 45,90,0
511 | 40,83,0
512 | 45,73,1
513 | 45,68,1
514 | 55,65,1
515 | 45,85,0
516 | 50,63,1
517 | 40,70,1
518 | 50,65,1
519 | 50,75,1
520 | 40,88,0
521 | 45,77,1
522 | 40,93,0
523 | 45,87,0
524 | 45,77,0
525 | 40,87,0
526 | 35,73,1
527 | 40,75,1
528 | 45,87,0
529 | 30,77,1
530 | 40,72,1
531 | 45,77,1
532 | 40,93,0
533 | 35,68,1
534 | 40,75,1
535 | 25,70,1
536 | 40,85,0
537 | 50,77,1
538 | 45,88,0
539 | 45,78,1
540 | 50,68,1
541 | 40,65,1
542 | 50,78,0
543 | 40,60,1
544 | 40,82,0
545 | 40,82,1
546 | 50,80,1
547 | 50,83,0
548 | 35,87,1
549 | 40,92,0
550 | 45,88,0
551 | 55,68,1
552 | 50,80,0
553 | 50,72,1
554 | 65,72,1
555 | 40,85,0
556 | 50,63,1
557 | 45,92,0
558 | 30,78,1
559 | 50,88,0
560 | 40,85,0
561 | 50,90,0
562 | 45,73,1
563 | 50,60,1
564 | 45,85,0
565 | 55,70,1
566 | 35,70,1
567 | 50,80,0
568 | 45,87,0
569 | 45,65,1
570 | 45,70,1
571 | 45,85,0
572 | 40,63,1
573 | 40,87,1
574 | 45,83,0
575 | 50,87,0
576 | 45,82,1
577 | 50,90,0
578 | 50,80,0
579 | 35,88,0
580 | 40,87,0
581 | 45,83,0
582 | 45,80,0
583 | 40,83,1
584 | 45,87,0
585 | 40,95,0
586 | 40,88,0
587 | 45,88,0
588 | 45,83,0
589 | 45,78,1
590 | 45,57,1
591 | 50,83,0
592 | 45,82,0
593 | 45,57,1
594 | 45,83,0
595 | 45,77,1
596 | 30,83,1
597 | 50,75,0
598 | 40,87,0
599 | 35,62,1
600 | 45,78,0
601 | 55,75,1
602 | 45,88,0
603 | 45,68,1
604 | 30,73,1
605 | 45,73,1
606 | 50,73,1
607 | 40,68,1
608 | 35,80,1
609 | 40,85,0
610 | 45,73,1
611 | 40,92,0
612 | 35,77,1
613 | 40,93,0
614 | 55,78,1
615 | 45,73,1
616 | 55,67,1
617 | 45,68,1
618 | 45,93,0
619 | 40,92,0
620 | 50,92,0
621 | 65,63,1
622 | 20,80,1
623 | 45,70,1
624 | 50,88,0
625 | 45,72,1
626 | 65,73,1
627 | 50,85,0
628 | 30,80,1
629 | 45,63,1
630 | 45,87,0
631 | 40,83,0
632 | 45,80,0
633 | 55,75,1
634 | 50,78,0
635 | 45,88,0
636 | 45,77,1
637 | 45,90,0
638 | 50,88,0
639 | 40,87,0
640 | 50,83,0
641 | 45,83,0
642 | 40,92,0
643 | 45,67,1
644 | 50,85,1
645 | 70,62,1
646 | 40,87,0
647 | 45,88,0
648 | 35,77,1
649 | 45,85,0
650 | 45,90,0
651 | 45,88,0
652 | 55,60,1
653 | 45,70,1
654 | 45,75,1
655 | 45,92,0
656 | 50,82,0
657 | 50,80,1
658 | 40,67,1
659 | 45,93,0
660 | 45,92,0
661 | 40,87,0
662 | 45,87,1
663 | 50,85,0
664 | 55,73,1
665 | 45,73,1
666 | 35,73,1
667 | 45,88,0
668 | 50,90,0
669 | 30,80,1
670 | 25,73,1
671 | 35,80,1
672 | 40,65,1
673 | 45,73,0
674 | 35,75,1
675 | 45,70,1
676 | 60,70,1
677 | 55,75,1
678 | 45,68,1
679 | 35,83,1
680 | 45,67,1
681 | 45,85,0
682 | 55,63,1
683 | 45,88,1
684 | 45,57,1
685 | 50,58,1
686 | 50,75,1
687 | 45,92,0
688 | 40,85,0
689 | 45,80,0
690 | 40,88,0
691 | 50,83,0
692 | 45,90,0
693 | 45,82,1
694 | 40,82,1
695 | 45,75,1
696 | 50,93,0
697 | 45,75,1
698 | 45,93,0
699 | 50,72,1
700 | 50,73,1
701 | 40,82,0
702 | 40,90,0
703 | 35,67,1
704 | 40,93,0
705 | 50,70,1
706 | 50,85,0
707 | 40,90,0
708 | 40,80,1
709 | 40,87,0
710 | 50,85,0
711 | 40,88,0
712 | 40,82,1
713 | 50,85,0
714 | 40,85,1
715 | 45,68,1
716 | 60,60,1
717 | 50,83,0
718 | 50,67,1
719 | 50,78,0
720 | 40,70,1
721 | 40,70,1
722 | 45,77,0
723 | 45,88,0
724 | 45,87,0
725 | 50,62,1
726 | 50,63,1
727 | 40,72,1
728 | 40,90,0
729 | 65,73,1
730 | 55,73,1
731 | 40,67,1
732 | 45,80,0
733 | 45,90,0
734 | 45,82,0
735 | 40,87,0
736 | 45,88,0
737 | 40,67,1
738 | 45,85,0
739 | 50,88,0
740 | 60,75,1
741 | 45,60,1
742 | 35,72,1
743 | 50,77,0
744 | 40,75,1
745 | 55,73,1
746 | 40,63,1
747 | 45,90,0
748 | 45,92,0
749 | 40,98,0
750 | 40,83,0
751 | 60,67,1
752 | 45,88,0
753 | 40,72,1
754 | 45,82,1
755 | 40,82,1
756 | 45,87,0
757 | 45,88,0
758 | 55,67,1
759 | 40,67,1
760 | 45,85,0
761 | 60,75,1
762 | 40,80,1
763 | 45,68,1
764 | 40,93,0
765 | 45,83,0
766 | 45,70,1
767 | 25,73,1
768 | 55,93,0
769 | 55,67,1
770 | 55,62,1
771 | 60,68,1
772 | 50,78,1
773 | 55,78,1
774 | 45,88,0
775 | 50,85,0
776 | 35,83,1
777 | 45,83,0
778 | 45,75,1
779 | 50,70,1
780 | 45,85,0
781 | 50,87,0
782 | 45,78,1
783 | 40,93,0
784 | 30,78,1
785 | 50,70,1
786 | 35,90,0
787 | 45,83,0
788 | 60,62,1
789 | 45,92,0
790 | 40,62,1
791 | 50,75,1
792 | 40,65,1
793 | 50,90,0
794 | 30,75,1
795 | 35,67,1
796 | 40,70,1
797 | 40,78,0
798 | 50,93,0
799 | 45,87,0
800 | 45,90,0
801 | 45,85,0
802 | 40,77,1
803 | 50,95,0
804 | 45,90,0
805 | 35,80,1
806 | 45,83,0
807 | 45,90,0
808 | 45,95,0
809 | 35,73,1
810 | 60,70,1
811 | 45,92,0
812 | 45,82,0
813 | 45,70,1
814 | 45,77,1
815 | 30,70,1
816 | 40,85,0
817 | 45,67,1
818 | 55,68,1
819 | 45,80,1
820 | 55,72,1
821 | 35,67,1
822 | 50,78,1
823 | 35,82,0
824 | 50,77,1
825 | 45,92,0
826 | 45,85,0
827 | 45,75,1
828 | 50,88,0
829 | 40,87,0
830 | 40,73,1
831 | 45,63,1
832 | 50,67,1
833 | 55,73,1
834 | 35,82,0
835 | 45,85,0
836 | 45,85,0
837 | 40,65,1
838 | 40,85,0
839 | 45,80,0
840 | 40,87,0
841 | 55,77,1
842 | 40,67,1
843 | 45,82,0
844 | 50,78,1
845 | 50,83,0
846 | 50,65,1
847 | 40,87,0
848 | 45,93,0
849 | 50,88,0
850 | 45,85,0
851 | 45,92,0
852 | 45,68,1
853 | 55,72,1
854 | 40,77,1
855 | 50,65,1
856 | 40,75,1
857 | 40,80,0
858 | 40,92,0
859 | 40,75,1
860 | 45,83,1
861 | 45,87,0
862 | 35,78,1
863 | 50,85,0
864 | 50,65,1
865 | 40,88,0
866 | 45,73,1
867 | 45,87,0
868 | 40,87,0
869 | 50,92,0
870 | 40,87,0
871 | 50,85,0
872 | 45,70,1
873 | 35,83,0
874 | 40,88,0
875 | 20,73,1
876 | 45,60,1
877 | 45,88,0
878 | 55,77,1
879 | 40,87,0
880 | 40,87,0
881 | 45,82,0
882 | 50,80,1
883 | 50,95,0
884 | 40,67,1
885 | 45,67,1
886 | 45,85,0
887 | 45,78,0
888 | 40,88,0
889 | 35,72,1
890 | 45,80,0
891 | 45,85,0
892 | 45,88,0
893 | 40,87,0
894 | 35,70,1
895 | 50,82,0
896 | 45,87,0
897 | 45,80,0
898 | 40,78,0
899 | 45,80,0
900 | 45,72,1
901 | 45,77,1
902 | 40,88,0
903 | 45,87,0
904 | 45,90,0
905 | 45,83,0
906 | 45,88,0
907 | 35,88,0
908 | 60,63,1
909 | 40,80,1
910 | 45,87,0
911 | 45,90,0
912 | 35,82,0
913 | 40,72,1
914 | 45,72,1
915 | 40,90,0
916 | 55,87,0
917 | 30,77,1
918 | 45,85,0
919 | 45,72,1
920 | 45,87,0
921 | 40,87,0
922 | 40,70,1
923 | 50,83,0
924 | 40,80,0
925 | 45,83,0
926 | 50,82,0
927 | 45,78,0
928 | 45,85,0
929 | 45,90,0
930 | 45,88,0
931 | 45,67,1
932 | 30,75,1
933 | 35,77,1
934 | 40,88,0
935 | 40,75,1
936 | 45,95,0
937 | 40,72,1
938 | 40,65,1
939 | 45,90,0
940 | 40,77,1
941 | 45,90,0
942 | 40,88,0
943 | 45,75,1
944 | 45,73,1
945 | 35,75,1
946 | 55,65,1
947 | 40,95,0
948 | 45,87,0
949 | 45,67,1
950 | 50,80,1
951 | 50,67,1
952 | 45,77,0
953 | 45,92,0
954 | 45,73,1
955 | 40,75,0
956 | 35,70,1
957 | 45,50,1
958 | 45,87,0
959 | 35,73,1
960 | 40,83,1
961 | 45,88,0
962 | 45,73,1
963 | 40,72,1
964 | 45,88,0
965 | 50,88,0
966 | 40,75,1
967 | 50,95,0
968 | 35,68,1
969 | 45,75,1
970 | 50,90,0
971 | 40,88,0
972 | 40,78,1
973 | 55,68,1
974 | 45,85,0
975 | 60,63,1
976 | 40,78,0
977 | 40,92,0
978 | 50,65,1
979 | 45,82,0
980 | 45,97,0
981 | 50,83,1
982 | 45,73,1
983 | 45,75,1
984 | 50,67,1
985 | 45,65,1
986 | 50,73,1
987 | 40,73,1
988 | 45,97,0
989 | 45,90,0
990 | 45,90,0
991 | 45,68,1
992 | 45,87,0
993 | 40,83,0
994 | 40,83,0
995 | 40,60,1
996 | 45,82,0
997 |
--------------------------------------------------------------------------------
/Dataset/Titanic.csv:
--------------------------------------------------------------------------------
1 | Passengerid,Age,Fare,Sex,sibsp,Parch,Pclass,Embarked,Survived
2 | 1,22,7.25,0,1,0,3,2,0
3 | 2,38,71.2833,1,1,0,1,0,1
4 | 3,26,7.925,1,0,0,3,2,1
5 | 4,35,53.1,1,1,0,1,2,1
6 | 5,35,8.05,0,0,0,3,2,0
7 | 6,28,8.4583,0,0,0,3,1,0
8 | 7,54,51.8625,0,0,0,1,2,0
9 | 8,2,21.075,0,3,1,3,2,0
10 | 9,27,11.1333,1,0,2,3,2,1
11 | 10,14,30.0708,1,1,0,2,0,1
12 | 11,4,16.7,1,1,1,3,2,1
13 | 12,58,26.55,1,0,0,1,2,1
14 | 13,20,8.05,0,0,0,3,2,0
15 | 14,39,31.275,0,1,5,3,2,0
16 | 15,14,7.8542,1,0,0,3,2,0
17 | 16,55,16,1,0,0,2,2,1
18 | 17,2,29.125,0,4,1,3,1,0
19 | 18,28,13,0,0,0,2,2,1
20 | 19,31,18,1,1,0,3,2,0
21 | 20,28,7.225,1,0,0,3,0,1
22 | 21,35,26,0,0,0,2,2,0
23 | 22,34,13,0,0,0,2,2,1
24 | 23,15,8.0292,1,0,0,3,1,1
25 | 24,28,35.5,0,0,0,1,2,1
26 | 25,8,21.075,1,3,1,3,2,0
27 | 26,38,31.3875,1,1,5,3,2,1
28 | 27,28,7.225,0,0,0,3,0,0
29 | 28,19,263,0,3,2,1,2,0
30 | 29,28,7.8792,1,0,0,3,1,1
31 | 30,28,7.8958,0,0,0,3,2,0
32 | 31,40,27.7208,0,0,0,1,0,0
33 | 32,28,146.5208,1,1,0,1,0,1
34 | 33,28,7.75,1,0,0,3,1,1
35 | 34,66,10.5,0,0,0,2,2,0
36 | 35,28,82.1708,0,1,0,1,0,0
37 | 36,42,52,0,1,0,1,2,0
38 | 37,28,7.2292,0,0,0,3,0,1
39 | 38,21,8.05,0,0,0,3,2,0
40 | 39,18,18,1,2,0,3,2,0
41 | 40,14,11.2417,1,1,0,3,0,1
42 | 41,40,9.475,1,1,0,3,2,0
43 | 42,27,21,1,1,0,2,2,0
44 | 43,28,7.8958,0,0,0,3,0,0
45 | 44,3,41.5792,1,1,2,2,0,1
46 | 45,19,7.8792,1,0,0,3,1,1
47 | 46,28,8.05,0,0,0,3,2,0
48 | 47,28,15.5,0,1,0,3,1,0
49 | 48,28,7.75,1,0,0,3,1,1
50 | 49,28,21.6792,0,2,0,3,0,0
51 | 50,18,17.8,1,1,0,3,2,0
52 | 51,7,39.6875,0,4,1,3,2,0
53 | 52,21,7.8,0,0,0,3,2,0
54 | 53,49,76.7292,1,1,0,1,0,1
55 | 54,29,26,1,1,0,2,2,1
56 | 55,65,61.9792,0,0,1,1,0,0
57 | 56,28,35.5,0,0,0,1,2,1
58 | 57,21,10.5,1,0,0,2,2,1
59 | 58,28.5,7.2292,0,0,0,3,0,0
60 | 59,5,27.75,1,1,2,2,2,1
61 | 60,11,46.9,0,5,2,3,2,0
62 | 61,22,7.2292,0,0,0,3,0,0
63 | 62,38,80,1,0,0,1,,1
64 | 63,45,83.475,0,1,0,1,2,0
65 | 64,4,27.9,0,3,2,3,2,0
66 | 65,28,27.7208,0,0,0,1,0,0
67 | 66,28,15.2458,0,1,1,3,0,1
68 | 67,29,10.5,1,0,0,2,2,1
69 | 68,19,8.1583,0,0,0,3,2,0
70 | 69,17,7.925,1,4,2,3,2,1
71 | 70,26,8.6625,0,2,0,3,2,0
72 | 71,32,10.5,0,0,0,2,2,0
73 | 72,16,46.9,1,5,2,3,2,0
74 | 73,21,73.5,0,0,0,2,2,0
75 | 74,26,14.4542,0,1,0,3,0,0
76 | 75,32,56.4958,0,0,0,3,2,1
77 | 76,25,7.65,0,0,0,3,2,0
78 | 77,28,7.8958,0,0,0,3,2,0
79 | 78,28,8.05,0,0,0,3,2,0
80 | 79,0.83,29,0,0,2,2,2,1
81 | 80,30,12.475,1,0,0,3,2,1
82 | 81,22,9,0,0,0,3,2,0
83 | 82,29,9.5,0,0,0,3,2,1
84 | 83,28,7.7875,1,0,0,3,1,1
85 | 84,28,47.1,0,0,0,1,2,0
86 | 85,17,10.5,1,0,0,2,2,1
87 | 86,33,15.85,1,3,0,3,2,1
88 | 87,16,34.375,0,1,3,3,2,0
89 | 88,28,8.05,0,0,0,3,2,0
90 | 89,23,263,1,3,2,1,2,1
91 | 90,24,8.05,0,0,0,3,2,0
92 | 91,29,8.05,0,0,0,3,2,0
93 | 92,20,7.8542,0,0,0,3,2,0
94 | 93,46,61.175,0,1,0,1,2,0
95 | 94,26,20.575,0,1,2,3,2,0
96 | 95,59,7.25,0,0,0,3,2,0
97 | 96,28,8.05,0,0,0,3,2,0
98 | 97,71,34.6542,0,0,0,1,0,0
99 | 98,23,63.3583,0,0,1,1,0,1
100 | 99,34,23,1,0,1,2,2,1
101 | 100,34,26,0,1,0,2,2,0
102 | 101,28,7.8958,1,0,0,3,2,0
103 | 102,28,7.8958,0,0,0,3,2,0
104 | 103,21,77.2875,0,0,1,1,2,0
105 | 104,33,8.6542,0,0,0,3,2,0
106 | 105,37,7.925,0,2,0,3,2,0
107 | 106,28,7.8958,0,0,0,3,2,0
108 | 107,21,7.65,1,0,0,3,2,1
109 | 108,28,7.775,0,0,0,3,2,1
110 | 109,38,7.8958,0,0,0,3,2,0
111 | 110,28,24.15,1,1,0,3,1,1
112 | 111,47,52,0,0,0,1,2,0
113 | 112,14.5,14.4542,1,1,0,3,0,0
114 | 113,22,8.05,0,0,0,3,2,0
115 | 114,20,9.825,1,1,0,3,2,0
116 | 115,17,14.4583,1,0,0,3,0,0
117 | 116,21,7.925,0,0,0,3,2,0
118 | 117,70.5,7.75,0,0,0,3,1,0
119 | 118,29,21,0,1,0,2,2,0
120 | 119,24,247.5208,0,0,1,1,0,0
121 | 120,2,31.275,1,4,2,3,2,0
122 | 121,21,73.5,0,2,0,2,2,0
123 | 122,28,8.05,0,0,0,3,2,0
124 | 123,32.5,30.0708,0,1,0,2,0,0
125 | 124,32.5,13,1,0,0,2,2,1
126 | 125,54,77.2875,0,0,1,1,2,0
127 | 126,12,11.2417,0,1,0,3,0,1
128 | 127,28,7.75,0,0,0,3,1,0
129 | 128,24,7.1417,0,0,0,3,2,1
130 | 129,28,22.3583,1,1,1,3,0,1
131 | 130,45,6.975,0,0,0,3,2,0
132 | 131,33,7.8958,0,0,0,3,0,0
133 | 132,20,7.05,0,0,0,3,2,0
134 | 133,47,14.5,1,1,0,3,2,0
135 | 134,29,26,1,1,0,2,2,1
136 | 135,25,13,0,0,0,2,2,0
137 | 136,23,15.0458,0,0,0,2,0,0
138 | 137,19,26.2833,1,0,2,1,2,1
139 | 138,37,53.1,0,1,0,1,2,0
140 | 139,16,9.2167,0,0,0,3,2,0
141 | 140,24,79.2,0,0,0,1,0,0
142 | 141,28,15.2458,1,0,2,3,0,0
143 | 142,22,7.75,1,0,0,3,2,1
144 | 143,24,15.85,1,1,0,3,2,1
145 | 144,19,6.75,0,0,0,3,1,0
146 | 145,18,11.5,0,0,0,2,2,0
147 | 146,19,36.75,0,1,1,2,2,0
148 | 147,27,7.7958,0,0,0,3,2,1
149 | 148,9,34.375,1,2,2,3,2,0
150 | 149,36.5,26,0,0,2,2,2,0
151 | 150,42,13,0,0,0,2,2,0
152 | 151,51,12.525,0,0,0,2,2,0
153 | 152,22,66.6,1,1,0,1,2,1
154 | 153,55.5,8.05,0,0,0,3,2,0
155 | 154,40.5,14.5,0,0,2,3,2,0
156 | 155,28,7.3125,0,0,0,3,2,0
157 | 156,51,61.3792,0,0,1,1,0,0
158 | 157,16,7.7333,1,0,0,3,1,1
159 | 158,30,8.05,0,0,0,3,2,0
160 | 159,28,8.6625,0,0,0,3,2,0
161 | 160,28,69.55,0,8,2,3,2,0
162 | 161,44,16.1,0,0,1,3,2,0
163 | 162,40,15.75,1,0,0,2,2,1
164 | 163,26,7.775,0,0,0,3,2,0
165 | 164,17,8.6625,0,0,0,3,2,0
166 | 165,1,39.6875,0,4,1,3,2,0
167 | 166,9,20.525,0,0,2,3,2,1
168 | 167,28,55,1,0,1,1,2,1
169 | 168,45,27.9,1,1,4,3,2,0
170 | 169,28,25.925,0,0,0,1,2,0
171 | 170,28,56.4958,0,0,0,3,2,0
172 | 171,61,33.5,0,0,0,1,2,0
173 | 172,4,29.125,0,4,1,3,1,0
174 | 173,1,11.1333,1,1,1,3,2,1
175 | 174,21,7.925,0,0,0,3,2,0
176 | 175,56,30.6958,0,0,0,1,0,0
177 | 176,18,7.8542,0,1,1,3,2,0
178 | 177,28,25.4667,0,3,1,3,2,0
179 | 178,50,28.7125,1,0,0,1,0,0
180 | 179,30,13,0,0,0,2,2,0
181 | 180,36,0,0,0,0,3,2,0
182 | 181,28,69.55,1,8,2,3,2,0
183 | 182,28,15.05,0,0,0,2,0,0
184 | 183,9,31.3875,0,4,2,3,2,0
185 | 184,1,39,0,2,1,2,2,1
186 | 185,4,22.025,1,0,2,3,2,1
187 | 186,28,50,0,0,0,1,2,0
188 | 187,28,15.5,1,1,0,3,1,1
189 | 188,45,26.55,0,0,0,1,2,1
190 | 189,40,15.5,0,1,1,3,1,0
191 | 190,36,7.8958,0,0,0,3,2,0
192 | 191,32,13,1,0,0,2,2,1
193 | 192,19,13,0,0,0,2,2,0
194 | 193,19,7.8542,1,1,0,3,2,1
195 | 194,3,26,0,1,1,2,2,1
196 | 195,44,27.7208,1,0,0,1,0,1
197 | 196,58,146.5208,1,0,0,1,0,1
198 | 197,28,7.75,0,0,0,3,1,0
199 | 198,42,8.4042,0,0,1,3,2,0
200 | 199,28,7.75,1,0,0,3,1,1
201 | 200,24,13,1,0,0,2,2,0
202 | 201,28,9.5,0,0,0,3,2,0
203 | 202,28,69.55,0,8,2,3,2,0
204 | 203,34,6.4958,0,0,0,3,2,0
205 | 204,45.5,7.225,0,0,0,3,0,0
206 | 205,18,8.05,0,0,0,3,2,1
207 | 206,2,10.4625,1,0,1,3,2,0
208 | 207,32,15.85,0,1,0,3,2,0
209 | 208,26,18.7875,0,0,0,3,0,1
210 | 209,16,7.75,1,0,0,3,1,1
211 | 210,40,31,0,0,0,1,0,1
212 | 211,24,7.05,0,0,0,3,2,0
213 | 212,35,21,1,0,0,2,2,1
214 | 213,22,7.25,0,0,0,3,2,0
215 | 214,30,13,0,0,0,2,2,0
216 | 215,28,7.75,0,1,0,3,1,0
217 | 216,31,113.275,1,1,0,1,0,1
218 | 217,27,7.925,1,0,0,3,2,1
219 | 218,42,27,0,1,0,2,2,0
220 | 219,32,76.2917,1,0,0,1,0,1
221 | 220,30,10.5,0,0,0,2,2,0
222 | 221,16,8.05,0,0,0,3,2,1
223 | 222,27,13,0,0,0,2,2,0
224 | 223,51,8.05,0,0,0,3,2,0
225 | 224,28,7.8958,0,0,0,3,2,0
226 | 225,38,90,0,1,0,1,2,1
227 | 226,22,9.35,0,0,0,3,2,0
228 | 227,19,10.5,0,0,0,2,2,1
229 | 228,20.5,7.25,0,0,0,3,2,0
230 | 229,18,13,0,0,0,2,2,0
231 | 230,28,25.4667,1,3,1,3,2,0
232 | 231,35,83.475,1,1,0,1,2,1
233 | 232,29,7.775,0,0,0,3,2,0
234 | 233,59,13.5,0,0,0,2,2,0
235 | 234,5,31.3875,1,4,2,3,2,1
236 | 235,24,10.5,0,0,0,2,2,0
237 | 236,28,7.55,1,0,0,3,2,0
238 | 237,44,26,0,1,0,2,2,0
239 | 238,8,26.25,1,0,2,2,2,1
240 | 239,19,10.5,0,0,0,2,2,0
241 | 240,33,12.275,0,0,0,2,2,0
242 | 241,28,14.4542,1,1,0,3,0,0
243 | 242,28,15.5,1,1,0,3,1,1
244 | 243,29,10.5,0,0,0,2,2,0
245 | 244,22,7.125,0,0,0,3,2,0
246 | 245,30,7.225,0,0,0,3,0,0
247 | 246,44,90,0,2,0,1,1,0
248 | 247,25,7.775,1,0,0,3,2,0
249 | 248,24,14.5,1,0,2,2,2,1
250 | 249,37,52.5542,0,1,1,1,2,1
251 | 250,54,26,0,1,0,2,2,0
252 | 251,28,7.25,0,0,0,3,2,0
253 | 252,29,10.4625,1,1,1,3,2,0
254 | 253,62,26.55,0,0,0,1,2,0
255 | 254,30,16.1,0,1,0,3,2,0
256 | 255,41,20.2125,1,0,2,3,2,0
257 | 256,29,15.2458,1,0,2,3,0,1
258 | 257,28,79.2,1,0,0,1,0,1
259 | 258,30,86.5,1,0,0,1,2,1
260 | 259,35,512.3292,1,0,0,1,0,1
261 | 260,50,26,1,0,1,2,2,1
262 | 261,28,7.75,0,0,0,3,1,0
263 | 262,3,31.3875,0,4,2,3,2,1
264 | 263,52,79.65,0,1,1,1,2,0
265 | 264,40,0,0,0,0,1,2,0
266 | 265,28,7.75,1,0,0,3,1,0
267 | 266,36,10.5,0,0,0,2,2,0
268 | 267,16,39.6875,0,4,1,3,2,0
269 | 268,25,7.775,0,1,0,3,2,1
270 | 269,58,153.4625,1,0,1,1,2,1
271 | 270,35,135.6333,1,0,0,1,2,1
272 | 271,28,31,0,0,0,1,2,0
273 | 272,25,0,0,0,0,3,2,1
274 | 273,41,19.5,1,0,1,2,2,1
275 | 274,37,29.7,0,0,1,1,0,0
276 | 275,28,7.75,1,0,0,3,1,1
277 | 276,63,77.9583,1,1,0,1,2,1
278 | 277,45,7.75,1,0,0,3,2,0
279 | 278,28,0,0,0,0,2,2,0
280 | 279,7,29.125,0,4,1,3,1,0
281 | 280,35,20.25,1,1,1,3,2,1
282 | 281,65,7.75,0,0,0,3,1,0
283 | 282,28,7.8542,0,0,0,3,2,0
284 | 283,16,9.5,0,0,0,3,2,0
285 | 284,19,8.05,0,0,0,3,2,1
286 | 285,28,26,0,0,0,1,2,0
287 | 286,33,8.6625,0,0,0,3,0,0
288 | 287,30,9.5,0,0,0,3,2,1
289 | 288,22,7.8958,0,0,0,3,2,0
290 | 289,42,13,0,0,0,2,2,1
291 | 290,22,7.75,1,0,0,3,1,1
292 | 291,26,78.85,1,0,0,1,2,1
293 | 292,19,91.0792,1,1,0,1,0,1
294 | 293,36,12.875,0,0,0,2,0,0
295 | 294,24,8.85,1,0,0,3,2,0
296 | 295,24,7.8958,0,0,0,3,2,0
297 | 296,28,27.7208,0,0,0,1,0,0
298 | 297,23.5,7.2292,0,0,0,3,0,0
299 | 298,2,151.55,1,1,2,1,2,0
300 | 299,28,30.5,0,0,0,1,2,1
301 | 300,50,247.5208,1,0,1,1,0,1
302 | 301,28,7.75,1,0,0,3,1,1
303 | 302,28,23.25,0,2,0,3,1,1
304 | 303,19,0,0,0,0,3,2,0
305 | 304,28,12.35,1,0,0,2,1,1
306 | 305,28,8.05,0,0,0,3,2,0
307 | 306,0.92,151.55,0,1,2,1,2,1
308 | 307,28,110.8833,1,0,0,1,0,1
309 | 308,17,108.9,1,1,0,1,0,1
310 | 309,30,24,0,1,0,2,0,0
311 | 310,30,56.9292,1,0,0,1,0,1
312 | 311,24,83.1583,1,0,0,1,0,1
313 | 312,18,262.375,1,2,2,1,0,1
314 | 313,26,26,1,1,1,2,2,0
315 | 314,28,7.8958,0,0,0,3,2,0
316 | 315,43,26.25,0,1,1,2,2,0
317 | 316,26,7.8542,1,0,0,3,2,1
318 | 317,24,26,1,1,0,2,2,1
319 | 318,54,14,0,0,0,2,2,0
320 | 319,31,164.8667,1,0,2,1,2,1
321 | 320,40,134.5,1,1,1,1,0,1
322 | 321,22,7.25,0,0,0,3,2,0
323 | 322,27,7.8958,0,0,0,3,2,0
324 | 323,30,12.35,1,0,0,2,1,1
325 | 324,22,29,1,1,1,2,2,1
326 | 325,28,69.55,0,8,2,3,2,0
327 | 326,36,135.6333,1,0,0,1,0,1
328 | 327,61,6.2375,0,0,0,3,2,0
329 | 328,36,13,1,0,0,2,2,1
330 | 329,31,20.525,1,1,1,3,2,1
331 | 330,16,57.9792,1,0,1,1,0,1
332 | 331,28,23.25,1,2,0,3,1,1
333 | 332,45.5,28.5,0,0,0,1,2,0
334 | 333,38,153.4625,0,0,1,1,2,0
335 | 334,16,18,0,2,0,3,2,0
336 | 335,28,133.65,1,1,0,1,2,1
337 | 336,28,7.8958,0,0,0,3,2,0
338 | 337,29,66.6,0,1,0,1,2,0
339 | 338,41,134.5,1,0,0,1,0,1
340 | 339,45,8.05,0,0,0,3,2,1
341 | 340,45,35.5,0,0,0,1,2,0
342 | 341,2,26,0,1,1,2,2,1
343 | 342,24,263,1,3,2,1,2,1
344 | 343,28,13,0,0,0,2,2,0
345 | 344,25,13,0,0,0,2,2,0
346 | 345,36,13,0,0,0,2,2,0
347 | 346,24,13,1,0,0,2,2,1
348 | 347,40,13,1,0,0,2,2,1
349 | 348,28,16.1,1,1,0,3,2,1
350 | 349,3,15.9,0,1,1,3,2,1
351 | 350,42,8.6625,0,0,0,3,2,0
352 | 351,23,9.225,0,0,0,3,2,0
353 | 352,28,35,0,0,0,1,2,0
354 | 353,15,7.2292,0,1,1,3,0,0
355 | 354,25,17.8,0,1,0,3,2,0
356 | 355,28,7.225,0,0,0,3,0,0
357 | 356,28,9.5,0,0,0,3,2,0
358 | 357,22,55,1,0,1,1,2,1
359 | 358,38,13,1,0,0,2,2,0
360 | 359,28,7.8792,1,0,0,3,1,1
361 | 360,28,7.8792,1,0,0,3,1,1
362 | 361,40,27.9,0,1,4,3,2,0
363 | 362,29,27.7208,0,1,0,2,0,0
364 | 363,45,14.4542,1,0,1,3,0,0
365 | 364,35,7.05,0,0,0,3,2,0
366 | 365,28,15.5,0,1,0,3,1,0
367 | 366,30,7.25,0,0,0,3,2,0
368 | 367,60,75.25,1,1,0,1,0,1
369 | 368,28,7.2292,1,0,0,3,0,1
370 | 369,28,7.75,1,0,0,3,1,1
371 | 370,24,69.3,1,0,0,1,0,1
372 | 371,25,55.4417,0,1,0,1,0,1
373 | 372,18,6.4958,0,1,0,3,2,0
374 | 373,19,8.05,0,0,0,3,2,0
375 | 374,22,135.6333,0,0,0,1,0,0
376 | 375,3,21.075,1,3,1,3,2,0
377 | 376,28,82.1708,1,1,0,1,0,1
378 | 377,22,7.25,1,0,0,3,2,1
379 | 378,27,211.5,0,0,2,1,0,0
380 | 379,20,4.0125,0,0,0,3,0,0
381 | 380,19,7.775,0,0,0,3,2,0
382 | 381,42,227.525,1,0,0,1,0,1
383 | 382,1,15.7417,1,0,2,3,0,1
384 | 383,32,7.925,0,0,0,3,2,0
385 | 384,35,52,1,1,0,1,2,1
386 | 385,28,7.8958,0,0,0,3,2,0
387 | 386,18,73.5,0,0,0,2,2,0
388 | 387,1,46.9,0,5,2,3,2,0
389 | 388,36,13,1,0,0,2,2,1
390 | 389,28,7.7292,0,0,0,3,1,0
391 | 390,17,12,1,0,0,2,0,1
392 | 391,36,120,0,1,2,1,2,1
393 | 392,21,7.7958,0,0,0,3,2,1
394 | 393,28,7.925,0,2,0,3,2,0
395 | 394,23,113.275,1,1,0,1,0,1
396 | 395,24,16.7,1,0,2,3,2,1
397 | 396,22,7.7958,0,0,0,3,2,0
398 | 397,31,7.8542,1,0,0,3,2,0
399 | 398,46,26,0,0,0,2,2,0
400 | 399,23,10.5,0,0,0,2,2,0
401 | 400,28,12.65,1,0,0,2,2,1
402 | 401,39,7.925,0,0,0,3,2,1
403 | 402,26,8.05,0,0,0,3,2,0
404 | 403,21,9.825,1,1,0,3,2,0
405 | 404,28,15.85,0,1,0,3,2,0
406 | 405,20,8.6625,1,0,0,3,2,0
407 | 406,34,21,0,1,0,2,2,0
408 | 407,51,7.75,0,0,0,3,2,0
409 | 408,3,18.75,0,1,1,2,2,1
410 | 409,21,7.775,0,0,0,3,2,0
411 | 410,28,25.4667,1,3,1,3,2,0
412 | 411,28,7.8958,0,0,0,3,2,0
413 | 412,28,6.8583,0,0,0,3,1,0
414 | 413,33,90,1,1,0,1,1,1
415 | 414,28,0,0,0,0,2,2,0
416 | 415,44,7.925,0,0,0,3,2,1
417 | 416,28,8.05,1,0,0,3,2,0
418 | 417,34,32.5,1,1,1,2,2,1
419 | 418,18,13,1,0,2,2,2,1
420 | 419,30,13,0,0,0,2,2,0
421 | 420,10,24.15,1,0,2,3,2,0
422 | 421,28,7.8958,0,0,0,3,0,0
423 | 422,21,7.7333,0,0,0,3,1,0
424 | 423,29,7.875,0,0,0,3,2,0
425 | 424,28,14.4,1,1,1,3,2,0
426 | 425,18,20.2125,0,1,1,3,2,0
427 | 426,28,7.25,0,0,0,3,2,0
428 | 427,28,26,1,1,0,2,2,1
429 | 428,19,26,1,0,0,2,2,1
430 | 429,28,7.75,0,0,0,3,1,0
431 | 430,32,8.05,0,0,0,3,2,1
432 | 431,28,26.55,0,0,0,1,2,1
433 | 432,28,16.1,1,1,0,3,2,1
434 | 433,42,26,1,1,0,2,2,1
435 | 434,17,7.125,0,0,0,3,2,0
436 | 435,50,55.9,0,1,0,1,2,0
437 | 436,14,120,1,1,2,1,2,1
438 | 437,21,34.375,1,2,2,3,2,0
439 | 438,24,18.75,1,2,3,2,2,1
440 | 439,64,263,0,1,4,1,2,0
441 | 440,31,10.5,0,0,0,2,2,0
442 | 441,45,26.25,1,1,1,2,2,1
443 | 442,20,9.5,0,0,0,3,2,0
444 | 443,25,7.775,0,1,0,3,2,0
445 | 444,28,13,1,0,0,2,2,1
446 | 445,28,8.1125,0,0,0,3,2,1
447 | 446,4,81.8583,0,0,2,1,2,1
448 | 447,13,19.5,1,0,1,2,2,1
449 | 448,34,26.55,0,0,0,1,2,1
450 | 449,5,19.2583,1,2,1,3,0,1
451 | 450,52,30.5,0,0,0,1,2,1
452 | 451,36,27.75,0,1,2,2,2,0
453 | 452,28,19.9667,0,1,0,3,2,0
454 | 453,30,27.75,0,0,0,1,0,0
455 | 454,49,89.1042,0,1,0,1,0,1
456 | 455,28,8.05,0,0,0,3,2,0
457 | 456,29,7.8958,0,0,0,3,0,1
458 | 457,65,26.55,0,0,0,1,2,0
459 | 458,28,51.8625,1,1,0,1,2,1
460 | 459,50,10.5,1,0,0,2,2,1
461 | 460,28,7.75,0,0,0,3,1,0
462 | 461,48,26.55,0,0,0,1,2,1
463 | 462,34,8.05,0,0,0,3,2,0
464 | 463,47,38.5,0,0,0,1,2,0
465 | 464,48,13,0,0,0,2,2,0
466 | 465,28,8.05,0,0,0,3,2,0
467 | 466,38,7.05,0,0,0,3,2,0
468 | 467,28,0,0,0,0,2,2,0
469 | 468,56,26.55,0,0,0,1,2,0
470 | 469,28,7.725,0,0,0,3,1,0
471 | 470,0.75,19.2583,1,2,1,3,0,1
472 | 471,28,7.25,0,0,0,3,2,0
473 | 472,38,8.6625,0,0,0,3,2,0
474 | 473,33,27.75,1,1,2,2,2,1
475 | 474,23,13.7917,1,0,0,2,0,1
476 | 475,22,9.8375,1,0,0,3,2,0
477 | 476,28,52,0,0,0,1,2,0
478 | 477,34,21,0,1,0,2,2,0
479 | 478,29,7.0458,0,1,0,3,2,0
480 | 479,22,7.5208,0,0,0,3,2,0
481 | 480,2,12.2875,1,0,1,3,2,1
482 | 481,9,46.9,0,5,2,3,2,0
483 | 482,28,0,0,0,0,2,2,0
484 | 483,50,8.05,0,0,0,3,2,0
485 | 484,63,9.5875,1,0,0,3,2,1
486 | 485,25,91.0792,0,1,0,1,0,1
487 | 486,28,25.4667,1,3,1,3,2,0
488 | 487,35,90,1,1,0,1,2,1
489 | 488,58,29.7,0,0,0,1,0,0
490 | 489,30,8.05,0,0,0,3,2,0
491 | 490,9,15.9,0,1,1,3,2,1
492 | 491,28,19.9667,0,1,0,3,2,0
493 | 492,21,7.25,0,0,0,3,2,0
494 | 493,55,30.5,0,0,0,1,2,0
495 | 494,71,49.5042,0,0,0,1,0,0
496 | 495,21,8.05,0,0,0,3,2,0
497 | 496,28,14.4583,0,0,0,3,0,0
498 | 497,54,78.2667,1,1,0,1,0,1
499 | 498,28,15.1,0,0,0,3,2,0
500 | 499,25,151.55,1,1,2,1,2,0
501 | 500,24,7.7958,0,0,0,3,2,0
502 | 501,17,8.6625,0,0,0,3,2,0
503 | 502,21,7.75,1,0,0,3,1,0
504 | 503,28,7.6292,1,0,0,3,1,0
505 | 504,37,9.5875,1,0,0,3,2,0
506 | 505,16,86.5,1,0,0,1,2,1
507 | 506,18,108.9,0,1,0,1,0,0
508 | 507,33,26,1,0,2,2,2,1
509 | 508,28,26.55,0,0,0,1,2,1
510 | 509,28,22.525,0,0,0,3,2,0
511 | 510,26,56.4958,0,0,0,3,2,1
512 | 511,29,7.75,0,0,0,3,1,1
513 | 512,28,8.05,0,0,0,3,2,0
514 | 513,36,26.2875,0,0,0,1,2,1
515 | 514,54,59.4,1,1,0,1,0,1
516 | 515,24,7.4958,0,0,0,3,2,0
517 | 516,47,34.0208,0,0,0,1,2,0
518 | 517,34,10.5,1,0,0,2,2,1
519 | 518,28,24.15,0,0,0,3,1,0
520 | 519,36,26,1,1,0,2,2,1
521 | 520,32,7.8958,0,0,0,3,2,0
522 | 521,30,93.5,1,0,0,1,2,1
523 | 522,22,7.8958,0,0,0,3,2,0
524 | 523,28,7.225,0,0,0,3,0,0
525 | 524,44,57.9792,1,0,1,1,0,1
526 | 525,28,7.2292,0,0,0,3,0,0
527 | 526,40.5,7.75,0,0,0,3,1,0
528 | 527,50,10.5,1,0,0,2,2,1
529 | 528,28,221.7792,0,0,0,1,2,0
530 | 529,39,7.925,0,0,0,3,2,0
531 | 530,23,11.5,0,2,1,2,2,0
532 | 531,2,26,1,1,1,2,2,1
533 | 532,28,7.2292,0,0,0,3,0,0
534 | 533,17,7.2292,0,1,1,3,0,0
535 | 534,28,22.3583,1,0,2,3,0,1
536 | 535,30,8.6625,1,0,0,3,2,0
537 | 536,7,26.25,1,0,2,2,2,1
538 | 537,45,26.55,0,0,0,1,2,0
539 | 538,30,106.425,1,0,0,1,0,1
540 | 539,28,14.5,0,0,0,3,2,0
541 | 540,22,49.5,1,0,2,1,0,1
542 | 541,36,71,1,0,2,1,2,1
543 | 542,9,31.275,1,4,2,3,2,0
544 | 543,11,31.275,1,4,2,3,2,0
545 | 544,32,26,0,1,0,2,2,1
546 | 545,50,106.425,0,1,0,1,0,0
547 | 546,64,26,0,0,0,1,2,0
548 | 547,19,26,1,1,0,2,2,1
549 | 548,28,13.8625,0,0,0,2,0,1
550 | 549,33,20.525,0,1,1,3,2,0
551 | 550,8,36.75,0,1,1,2,2,1
552 | 551,17,110.8833,0,0,2,1,0,1
553 | 552,27,26,0,0,0,2,2,0
554 | 553,28,7.8292,0,0,0,3,1,0
555 | 554,22,7.225,0,0,0,3,0,1
556 | 555,22,7.775,1,0,0,3,2,1
557 | 556,62,26.55,0,0,0,1,2,0
558 | 557,48,39.6,1,1,0,1,0,1
559 | 558,28,227.525,0,0,0,1,0,0
560 | 559,39,79.65,1,1,1,1,2,1
561 | 560,36,17.4,1,1,0,3,2,1
562 | 561,28,7.75,0,0,0,3,1,0
563 | 562,40,7.8958,0,0,0,3,2,0
564 | 563,28,13.5,0,0,0,2,2,0
565 | 564,28,8.05,0,0,0,3,2,0
566 | 565,28,8.05,1,0,0,3,2,0
567 | 566,24,24.15,0,2,0,3,2,0
568 | 567,19,7.8958,0,0,0,3,2,0
569 | 568,29,21.075,1,0,4,3,2,0
570 | 569,28,7.2292,0,0,0,3,0,0
571 | 570,32,7.8542,0,0,0,3,2,1
572 | 571,62,10.5,0,0,0,2,2,1
573 | 572,53,51.4792,1,2,0,1,2,1
574 | 573,36,26.3875,0,0,0,1,2,1
575 | 574,28,7.75,1,0,0,3,1,1
576 | 575,16,8.05,0,0,0,3,2,0
577 | 576,19,14.5,0,0,0,3,2,0
578 | 577,34,13,1,0,0,2,2,1
579 | 578,39,55.9,1,1,0,1,2,1
580 | 579,28,14.4583,1,1,0,3,0,0
581 | 580,32,7.925,0,0,0,3,2,1
582 | 581,25,30,1,1,1,2,2,1
583 | 582,39,110.8833,1,1,1,1,0,1
584 | 583,54,26,0,0,0,2,2,0
585 | 584,36,40.125,0,0,0,1,0,0
586 | 585,28,8.7125,0,0,0,3,0,0
587 | 586,18,79.65,1,0,2,1,2,1
588 | 587,47,15,0,0,0,2,2,0
589 | 588,60,79.2,0,1,1,1,0,1
590 | 589,22,8.05,0,0,0,3,2,0
591 | 590,28,8.05,0,0,0,3,2,0
592 | 591,35,7.125,0,0,0,3,2,0
593 | 592,52,78.2667,1,1,0,1,0,1
594 | 593,47,7.25,0,0,0,3,2,0
595 | 594,28,7.75,1,0,2,3,1,0
596 | 595,37,26,0,1,0,2,2,0
597 | 596,36,24.15,0,1,1,3,2,0
598 | 597,28,33,1,0,0,2,2,1
599 | 598,49,0,0,0,0,3,2,0
600 | 599,28,7.225,0,0,0,3,0,0
601 | 600,49,56.9292,0,1,0,1,0,1
602 | 601,24,27,1,2,1,2,2,1
603 | 602,28,7.8958,0,0,0,3,2,0
604 | 603,28,42.4,0,0,0,1,2,0
605 | 604,44,8.05,0,0,0,3,2,0
606 | 605,35,26.55,0,0,0,1,0,1
607 | 606,36,15.55,0,1,0,3,2,0
608 | 607,30,7.8958,0,0,0,3,2,0
609 | 608,27,30.5,0,0,0,1,2,1
610 | 609,22,41.5792,1,1,2,2,0,1
611 | 610,40,153.4625,1,0,0,1,2,1
612 | 611,39,31.275,1,1,5,3,2,0
613 | 612,28,7.05,0,0,0,3,2,0
614 | 613,28,15.5,1,1,0,3,1,1
615 | 614,28,7.75,0,0,0,3,1,0
616 | 615,35,8.05,0,0,0,3,2,0
617 | 616,24,65,1,1,2,2,2,1
618 | 617,34,14.4,0,1,1,3,2,0
619 | 618,26,16.1,1,1,0,3,2,0
620 | 619,4,39,1,2,1,2,2,1
621 | 620,26,10.5,0,0,0,2,2,0
622 | 621,27,14.4542,0,1,0,3,0,0
623 | 622,42,52.5542,0,1,0,1,2,1
624 | 623,20,15.7417,0,1,1,3,0,1
625 | 624,21,7.8542,0,0,0,3,2,0
626 | 625,21,16.1,0,0,0,3,2,0
627 | 626,61,32.3208,0,0,0,1,2,0
628 | 627,57,12.35,0,0,0,2,1,0
629 | 628,21,77.9583,1,0,0,1,2,1
630 | 629,26,7.8958,0,0,0,3,2,0
631 | 630,28,7.7333,0,0,0,3,1,0
632 | 631,80,30,0,0,0,1,2,1
633 | 632,51,7.0542,0,0,0,3,2,0
634 | 633,32,30.5,0,0,0,1,0,1
635 | 634,28,0,0,0,0,1,2,0
636 | 635,9,27.9,1,3,2,3,2,0
637 | 636,28,13,1,0,0,2,2,1
638 | 637,32,7.925,0,0,0,3,2,0
639 | 638,31,26.25,0,1,1,2,2,0
640 | 639,41,39.6875,1,0,5,3,2,0
641 | 640,28,16.1,0,1,0,3,2,0
642 | 641,20,7.8542,0,0,0,3,2,0
643 | 642,24,69.3,1,0,0,1,0,1
644 | 643,2,27.9,1,3,2,3,2,0
645 | 644,28,56.4958,0,0,0,3,2,1
646 | 645,0.75,19.2583,1,2,1,3,0,1
647 | 646,48,76.7292,0,1,0,1,0,1
648 | 647,19,7.8958,0,0,0,3,2,0
649 | 648,56,35.5,0,0,0,1,0,1
650 | 649,28,7.55,0,0,0,3,2,0
651 | 650,23,7.55,1,0,0,3,2,1
652 | 651,28,7.8958,0,0,0,3,2,0
653 | 652,18,23,1,0,1,2,2,1
654 | 653,21,8.4333,0,0,0,3,2,0
655 | 654,28,7.8292,1,0,0,3,1,1
656 | 655,18,6.75,1,0,0,3,1,0
657 | 656,24,73.5,0,2,0,2,2,0
658 | 657,28,7.8958,0,0,0,3,2,0
659 | 658,32,15.5,1,1,1,3,1,0
660 | 659,23,13,0,0,0,2,2,0
661 | 660,58,113.275,0,0,2,1,0,0
662 | 661,50,133.65,0,2,0,1,2,1
663 | 662,40,7.225,0,0,0,3,0,0
664 | 663,47,25.5875,0,0,0,1,2,0
665 | 664,36,7.4958,0,0,0,3,2,0
666 | 665,20,7.925,0,1,0,3,2,1
667 | 666,32,73.5,0,2,0,2,2,0
668 | 667,25,13,0,0,0,2,2,0
669 | 668,28,7.775,0,0,0,3,2,0
670 | 669,43,8.05,0,0,0,3,2,0
671 | 670,28,52,1,1,0,1,2,1
672 | 671,40,39,1,1,1,2,2,1
673 | 672,31,52,0,1,0,1,2,0
674 | 673,70,10.5,0,0,0,2,2,0
675 | 674,31,13,0,0,0,2,2,1
676 | 675,28,0,0,0,0,2,2,0
677 | 676,18,7.775,0,0,0,3,2,0
678 | 677,24.5,8.05,0,0,0,3,2,0
679 | 678,18,9.8417,1,0,0,3,2,1
680 | 679,43,46.9,1,1,6,3,2,0
681 | 680,36,512.3292,0,0,1,1,0,1
682 | 681,28,8.1375,1,0,0,3,1,0
683 | 682,27,76.7292,0,0,0,1,0,1
684 | 683,20,9.225,0,0,0,3,2,0
685 | 684,14,46.9,0,5,2,3,2,0
686 | 685,60,39,0,1,1,2,2,0
687 | 686,25,41.5792,0,1,2,2,0,0
688 | 687,14,39.6875,0,4,1,3,2,0
689 | 688,19,10.1708,0,0,0,3,2,0
690 | 689,18,7.7958,0,0,0,3,2,0
691 | 690,15,211.3375,1,0,1,1,2,1
692 | 691,31,57,0,1,0,1,2,1
693 | 692,4,13.4167,1,0,1,3,0,1
694 | 693,28,56.4958,0,0,0,3,2,1
695 | 694,25,7.225,0,0,0,3,0,0
696 | 695,60,26.55,0,0,0,1,2,0
697 | 696,52,13.5,0,0,0,2,2,0
698 | 697,44,8.05,0,0,0,3,2,0
699 | 698,28,7.7333,1,0,0,3,1,1
700 | 699,49,110.8833,0,1,1,1,0,0
701 | 700,42,7.65,0,0,0,3,2,0
702 | 701,18,227.525,1,1,0,1,0,1
703 | 702,35,26.2875,0,0,0,1,2,1
704 | 703,18,14.4542,1,0,1,3,0,0
705 | 704,25,7.7417,0,0,0,3,1,0
706 | 705,26,7.8542,0,1,0,3,2,0
707 | 706,39,26,0,0,0,2,2,0
708 | 707,45,13.5,1,0,0,2,2,1
709 | 708,42,26.2875,0,0,0,1,2,1
710 | 709,22,151.55,1,0,0,1,2,1
711 | 710,28,15.2458,0,1,1,3,0,1
712 | 711,24,49.5042,1,0,0,1,0,1
713 | 712,28,26.55,0,0,0,1,2,0
714 | 713,48,52,0,1,0,1,2,1
715 | 714,29,9.4833,0,0,0,3,2,0
716 | 715,52,13,0,0,0,2,2,0
717 | 716,19,7.65,0,0,0,3,2,0
718 | 717,38,227.525,1,0,0,1,0,1
719 | 718,27,10.5,1,0,0,2,2,1
720 | 719,28,15.5,0,0,0,3,1,0
721 | 720,33,7.775,0,0,0,3,2,0
722 | 721,6,33,1,0,1,2,2,1
723 | 722,17,7.0542,0,1,0,3,2,0
724 | 723,34,13,0,0,0,2,2,0
725 | 724,50,13,0,0,0,2,2,0
726 | 725,27,53.1,0,1,0,1,2,1
727 | 726,20,8.6625,0,0,0,3,2,0
728 | 727,30,21,1,3,0,2,2,1
729 | 728,28,7.7375,1,0,0,3,1,1
730 | 729,25,26,0,1,0,2,2,0
731 | 730,25,7.925,1,1,0,3,2,0
732 | 731,29,211.3375,1,0,0,1,2,1
733 | 732,11,18.7875,0,0,0,3,0,0
734 | 733,28,0,0,0,0,2,2,0
735 | 734,23,13,0,0,0,2,2,0
736 | 735,23,13,0,0,0,2,2,0
737 | 736,28.5,16.1,0,0,0,3,2,0
738 | 737,48,34.375,1,1,3,3,2,0
739 | 738,35,512.3292,0,0,0,1,0,1
740 | 739,28,7.8958,0,0,0,3,2,0
741 | 740,28,7.8958,0,0,0,3,2,0
742 | 741,28,30,0,0,0,1,2,1
743 | 742,36,78.85,0,1,0,1,2,0
744 | 743,21,262.375,1,2,2,1,0,1
745 | 744,24,16.1,0,1,0,3,2,0
746 | 745,31,7.925,0,0,0,3,2,1
747 | 746,70,71,0,1,1,1,2,0
748 | 747,16,20.25,0,1,1,3,2,0
749 | 748,30,13,1,0,0,2,2,1
750 | 749,19,53.1,0,1,0,1,2,0
751 | 750,31,7.75,0,0,0,3,1,0
752 | 751,4,23,1,1,1,2,2,1
753 | 752,6,12.475,0,0,1,3,2,1
754 | 753,33,9.5,0,0,0,3,2,0
755 | 754,23,7.8958,0,0,0,3,2,0
756 | 755,48,65,1,1,2,2,2,1
757 | 756,0.67,14.5,0,1,1,2,2,1
758 | 757,28,7.7958,0,0,0,3,2,0
759 | 758,18,11.5,0,0,0,2,2,0
760 | 759,34,8.05,0,0,0,3,2,0
761 | 760,33,86.5,1,0,0,1,2,1
762 | 761,28,14.5,0,0,0,3,2,0
763 | 762,41,7.125,0,0,0,3,2,0
764 | 763,20,7.2292,0,0,0,3,0,1
765 | 764,36,120,1,1,2,1,2,1
766 | 765,16,7.775,0,0,0,3,2,0
767 | 766,51,77.9583,1,1,0,1,2,1
768 | 767,28,39.6,0,0,0,1,0,0
769 | 768,30.5,7.75,1,0,0,3,1,0
770 | 769,28,24.15,0,1,0,3,1,0
771 | 770,32,8.3625,0,0,0,3,2,0
772 | 771,24,9.5,0,0,0,3,2,0
773 | 772,48,7.8542,0,0,0,3,2,0
774 | 773,57,10.5,1,0,0,2,2,0
775 | 774,28,7.225,0,0,0,3,0,0
776 | 775,54,23,1,1,3,2,2,1
777 | 776,18,7.75,0,0,0,3,2,0
778 | 777,28,7.75,0,0,0,3,1,0
779 | 778,5,12.475,1,0,0,3,2,1
780 | 779,28,7.7375,0,0,0,3,1,0
781 | 780,43,211.3375,1,0,1,1,2,1
782 | 781,13,7.2292,1,0,0,3,0,1
783 | 782,17,57,1,1,0,1,2,1
784 | 783,29,30,0,0,0,1,2,0
785 | 784,28,23.45,0,1,2,3,2,0
786 | 785,25,7.05,0,0,0,3,2,0
787 | 786,25,7.25,0,0,0,3,2,0
788 | 787,18,7.4958,1,0,0,3,2,1
789 | 788,8,29.125,0,4,1,3,1,0
790 | 789,1,20.575,0,1,2,3,2,1
791 | 790,46,79.2,0,0,0,1,0,0
792 | 791,28,7.75,0,0,0,3,1,0
793 | 792,16,26,0,0,0,2,2,0
794 | 793,28,69.55,1,8,2,3,2,0
795 | 794,28,30.6958,0,0,0,1,0,0
796 | 795,25,7.8958,0,0,0,3,2,0
797 | 796,39,13,0,0,0,2,2,0
798 | 797,49,25.9292,1,0,0,1,2,1
799 | 798,31,8.6833,1,0,0,3,2,1
800 | 799,30,7.2292,0,0,0,3,0,0
801 | 800,30,24.15,1,1,1,3,2,0
802 | 801,34,13,0,0,0,2,2,0
803 | 802,31,26.25,1,1,1,2,2,1
804 | 803,11,120,0,1,2,1,2,1
805 | 804,0.42,8.5167,0,0,1,3,0,1
806 | 805,27,6.975,0,0,0,3,2,1
807 | 806,31,7.775,0,0,0,3,2,0
808 | 807,39,0,0,0,0,1,2,0
809 | 808,18,7.775,1,0,0,3,2,0
810 | 809,39,13,0,0,0,2,2,0
811 | 810,33,53.1,1,1,0,1,2,1
812 | 811,26,7.8875,0,0,0,3,2,0
813 | 812,39,24.15,0,0,0,3,2,0
814 | 813,35,10.5,0,0,0,2,2,0
815 | 814,6,31.275,1,4,2,3,2,0
816 | 815,30.5,8.05,0,0,0,3,2,0
817 | 816,28,0,0,0,0,1,2,0
818 | 817,23,7.925,1,0,0,3,2,0
819 | 818,31,37.0042,0,1,1,2,0,0
820 | 819,43,6.45,0,0,0,3,2,0
821 | 820,10,27.9,0,3,2,3,2,0
822 | 821,52,93.5,1,1,1,1,2,1
823 | 822,27,8.6625,0,0,0,3,2,1
824 | 823,38,0,0,0,0,1,2,0
825 | 824,27,12.475,1,0,1,3,2,1
826 | 825,2,39.6875,0,4,1,3,2,0
827 | 826,28,6.95,0,0,0,3,1,0
828 | 827,28,56.4958,0,0,0,3,2,0
829 | 828,1,37.0042,0,0,2,2,0,1
830 | 829,28,7.75,0,0,0,3,1,1
831 | 830,62,80,1,0,0,1,,1
832 | 831,15,14.4542,1,1,0,3,0,1
833 | 832,0.83,18.75,0,1,1,2,2,1
834 | 833,28,7.2292,0,0,0,3,0,0
835 | 834,23,7.8542,0,0,0,3,2,0
836 | 835,18,8.3,0,0,0,3,2,0
837 | 836,39,83.1583,1,1,1,1,0,1
838 | 837,21,8.6625,0,0,0,3,2,0
839 | 838,28,8.05,0,0,0,3,2,0
840 | 839,32,56.4958,0,0,0,3,2,1
841 | 840,28,29.7,0,0,0,1,0,1
842 | 841,20,7.925,0,0,0,3,2,0
843 | 842,16,10.5,0,0,0,2,2,0
844 | 843,30,31,1,0,0,1,0,1
845 | 844,34.5,6.4375,0,0,0,3,0,0
846 | 845,17,8.6625,0,0,0,3,2,0
847 | 846,42,7.55,0,0,0,3,2,0
848 | 847,28,69.55,0,8,2,3,2,0
849 | 848,35,7.8958,0,0,0,3,0,0
850 | 849,28,33,0,0,1,2,2,0
851 | 850,28,89.1042,1,1,0,1,0,1
852 | 851,4,31.275,0,4,2,3,2,0
853 | 852,74,7.775,0,0,0,3,2,0
854 | 853,9,15.2458,1,1,1,3,0,0
855 | 854,16,39.4,1,0,1,1,2,1
856 | 855,44,26,1,1,0,2,2,0
857 | 856,18,9.35,1,0,1,3,2,1
858 | 857,45,164.8667,1,1,1,1,2,1
859 | 858,51,26.55,0,0,0,1,2,1
860 | 859,24,19.2583,1,0,3,3,0,1
861 | 860,28,7.2292,0,0,0,3,0,0
862 | 861,41,14.1083,0,2,0,3,2,0
863 | 862,21,11.5,0,1,0,2,2,0
864 | 863,48,25.9292,1,0,0,1,2,1
865 | 864,28,69.55,1,8,2,3,2,0
866 | 865,24,13,0,0,0,2,2,0
867 | 866,42,13,1,0,0,2,2,1
868 | 867,27,13.8583,1,1,0,2,0,1
869 | 868,31,50.4958,0,0,0,1,2,0
870 | 869,28,9.5,0,0,0,3,2,0
871 | 870,4,11.1333,0,1,1,3,2,1
872 | 871,26,7.8958,0,0,0,3,2,0
873 | 872,47,52.5542,1,1,1,1,2,1
874 | 873,33,5,0,0,0,1,2,0
875 | 874,47,9,0,0,0,3,2,0
876 | 875,28,24,1,1,0,2,0,1
877 | 876,15,7.225,1,0,0,3,0,1
878 | 877,20,9.8458,0,0,0,3,2,0
879 | 878,19,7.8958,0,0,0,3,2,0
880 | 879,28,7.8958,0,0,0,3,2,0
881 | 880,56,83.1583,1,0,1,1,0,1
882 | 881,25,26,1,0,1,2,2,1
883 | 882,33,7.8958,0,0,0,3,2,0
884 | 883,22,10.5167,1,0,0,3,2,0
885 | 884,28,10.5,0,0,0,2,2,0
886 | 885,25,7.05,0,0,0,3,2,0
887 | 886,39,29.125,1,0,5,3,1,0
888 | 887,27,13,0,0,0,2,2,0
889 | 888,19,30,1,0,0,1,2,1
890 | 889,28,23.45,1,1,2,3,2,0
891 | 890,26,30,0,0,0,1,0,1
892 | 891,32,7.75,0,0,0,3,1,0
893 | 892,34.5,7.8292,0,0,0,3,1,0
894 | 893,47,7,1,1,0,3,2,0
895 | 894,62,9.6875,0,0,0,2,1,0
896 | 895,27,8.6625,0,0,0,3,2,0
897 | 896,22,12.2875,1,1,1,3,2,0
898 | 897,14,9.225,0,0,0,3,2,0
899 | 898,30,7.6292,1,0,0,3,1,0
900 | 899,26,29,0,1,1,2,2,0
901 | 900,18,7.2292,1,0,0,3,0,0
902 | 901,21,24.15,0,2,0,3,2,0
903 | 902,28,7.8958,0,0,0,3,2,0
904 | 903,46,26,0,0,0,1,2,0
905 | 904,23,82.2667,1,1,0,1,2,0
906 | 905,63,26,0,1,0,2,2,0
907 | 906,47,61.175,1,1,0,1,2,0
908 | 907,24,27.7208,1,1,0,2,0,0
909 | 908,35,12.35,0,0,0,2,1,0
910 | 909,21,7.225,0,0,0,3,0,0
911 | 910,27,7.925,1,1,0,3,2,0
912 | 911,45,7.225,1,0,0,3,0,0
913 | 912,55,59.4,0,1,0,1,0,0
914 | 913,9,3.1708,0,0,1,3,2,0
915 | 914,28,31.6833,1,0,0,1,2,0
916 | 915,21,61.3792,0,0,1,1,0,0
917 | 916,48,262.375,1,1,3,1,0,0
918 | 917,50,14.5,0,1,0,3,2,0
919 | 918,22,61.9792,1,0,1,1,0,0
920 | 919,22.5,7.225,0,0,0,3,0,0
921 | 920,41,30.5,0,0,0,1,2,0
922 | 921,28,21.6792,0,2,0,3,0,0
923 | 922,50,26,0,1,0,2,2,0
924 | 923,24,31.5,0,2,0,2,2,0
925 | 924,33,20.575,1,1,2,3,2,0
926 | 925,28,23.45,1,1,2,3,2,0
927 | 926,30,57.75,0,1,0,1,0,0
928 | 927,18.5,7.2292,0,0,0,3,0,0
929 | 928,28,8.05,1,0,0,3,2,0
930 | 929,21,8.6625,1,0,0,3,2,0
931 | 930,25,9.5,0,0,0,3,2,0
932 | 931,28,56.4958,0,0,0,3,2,0
933 | 932,39,13.4167,0,0,1,3,0,0
934 | 933,28,26.55,0,0,0,1,2,0
935 | 934,41,7.85,0,0,0,3,2,0
936 | 935,30,13,1,0,0,2,2,0
937 | 936,45,52.5542,1,1,0,1,2,0
938 | 937,25,7.925,0,0,0,3,2,0
939 | 938,45,29.7,0,0,0,1,0,0
940 | 939,28,7.75,0,0,0,3,1,0
941 | 940,60,76.2917,1,0,0,1,0,0
942 | 941,36,15.9,1,0,2,3,2,0
943 | 942,24,60,0,1,0,1,2,0
944 | 943,27,15.0333,0,0,0,2,0,0
945 | 944,20,23,1,2,1,2,2,0
946 | 945,28,263,1,3,2,1,2,0
947 | 946,28,15.5792,0,0,0,2,0,0
948 | 947,10,29.125,0,4,1,3,1,0
949 | 948,35,7.8958,0,0,0,3,2,0
950 | 949,25,7.65,0,0,0,3,2,0
951 | 950,28,16.1,0,1,0,3,2,0
952 | 951,36,262.375,1,0,0,1,0,0
953 | 952,17,7.8958,0,0,0,3,2,0
954 | 953,32,13.5,0,0,0,2,2,0
955 | 954,18,7.75,0,0,0,3,2,0
956 | 955,22,7.725,1,0,0,3,1,0
957 | 956,13,262.375,0,2,2,1,0,0
958 | 957,28,21,1,0,0,2,2,0
959 | 958,18,7.8792,1,0,0,3,1,0
960 | 959,47,42.4,0,0,0,1,2,0
961 | 960,31,28.5375,0,0,0,1,0,0
962 | 961,60,263,1,1,4,1,2,0
963 | 962,24,7.75,1,0,0,3,1,0
964 | 963,21,7.8958,0,0,0,3,2,0
965 | 964,29,7.925,1,0,0,3,2,0
966 | 965,28.5,27.7208,0,0,0,1,0,0
967 | 966,35,211.5,1,0,0,1,0,0
968 | 967,32.5,211.5,0,0,0,1,0,0
969 | 968,28,8.05,0,0,0,3,2,0
970 | 969,55,25.7,1,2,0,1,2,0
971 | 970,30,13,0,0,0,2,2,0
972 | 971,24,7.75,1,0,0,3,1,0
973 | 972,6,15.2458,0,1,1,3,0,0
974 | 973,67,221.7792,0,1,0,1,2,0
975 | 974,49,26,0,0,0,1,2,0
976 | 975,28,7.8958,0,0,0,3,2,0
977 | 976,28,10.7083,0,0,0,2,1,0
978 | 977,28,14.4542,0,1,0,3,0,0
979 | 978,27,7.8792,1,0,0,3,1,0
980 | 979,18,8.05,1,0,0,3,2,0
981 | 980,28,7.75,1,0,0,3,1,0
982 | 981,2,23,0,1,1,2,2,0
983 | 982,22,13.9,1,1,0,3,2,0
984 | 983,28,7.775,0,0,0,3,2,0
985 | 984,27,52,1,1,2,1,2,0
986 | 985,28,8.05,0,0,0,3,2,0
987 | 986,25,26,0,0,0,1,0,0
988 | 987,25,7.7958,0,0,0,3,2,0
989 | 988,76,78.85,1,1,0,1,2,0
990 | 989,29,7.925,0,0,0,3,2,0
991 | 990,20,7.8542,1,0,0,3,2,0
992 | 991,33,8.05,0,0,0,3,2,0
993 | 992,43,55.4417,1,1,0,1,0,0
994 | 993,27,26,0,1,0,2,2,0
995 | 994,28,7.75,0,0,0,3,1,0
996 | 995,26,7.775,0,0,0,3,2,0
997 | 996,16,8.5167,1,1,1,3,0,0
998 | 997,28,22.525,0,0,0,3,2,0
999 | 998,21,7.8208,0,0,0,3,1,0
1000 | 999,28,7.75,0,0,0,3,1,0
1001 | 1000,28,8.7125,0,0,0,3,2,0
1002 | 1001,18.5,13,0,0,0,2,2,0
1003 | 1002,41,15.0458,0,0,0,2,0,0
1004 | 1003,28,7.7792,1,0,0,3,1,0
1005 | 1004,36,31.6792,1,0,0,1,0,0
1006 | 1005,18.5,7.2833,1,0,0,3,1,0
1007 | 1006,63,221.7792,1,1,0,1,2,0
1008 | 1007,18,14.4542,0,1,0,3,0,0
1009 | 1008,28,6.4375,0,0,0,3,0,0
1010 | 1009,1,16.7,1,1,1,3,2,0
1011 | 1010,36,75.2417,0,0,0,1,0,0
1012 | 1011,29,26,1,1,0,2,2,0
1013 | 1012,12,15.75,1,0,0,2,2,0
1014 | 1013,28,7.75,0,1,0,3,1,0
1015 | 1014,35,57.75,1,1,0,1,0,0
1016 | 1015,28,7.25,0,0,0,3,2,0
1017 | 1016,28,7.75,0,0,0,3,1,0
1018 | 1017,17,16.1,1,0,1,3,2,0
1019 | 1018,22,7.7958,0,0,0,3,2,0
1020 | 1019,28,23.25,1,2,0,3,1,0
1021 | 1020,42,13,0,0,0,2,2,0
1022 | 1021,24,8.05,0,0,0,3,2,0
1023 | 1022,32,8.05,0,0,0,3,2,0
1024 | 1023,53,28.5,0,0,0,1,0,0
1025 | 1024,28,25.4667,1,0,4,3,2,0
1026 | 1025,28,6.4375,0,1,0,3,0,0
1027 | 1026,43,7.8958,0,0,0,3,2,0
1028 | 1027,24,7.8542,0,0,0,3,2,0
1029 | 1028,26.5,7.225,0,0,0,3,0,0
1030 | 1029,26,13,0,0,0,2,2,0
1031 | 1030,23,8.05,1,0,0,3,2,0
1032 | 1031,40,46.9,0,1,6,3,2,0
1033 | 1032,10,46.9,1,5,2,3,2,0
1034 | 1033,33,151.55,1,0,0,1,2,0
1035 | 1034,61,262.375,0,1,3,1,0,0
1036 | 1035,28,26,0,0,0,2,2,0
1037 | 1036,42,26.55,0,0,0,1,2,0
1038 | 1037,31,18,0,3,0,3,2,0
1039 | 1038,28,51.8625,0,0,0,1,2,0
1040 | 1039,22,8.05,0,0,0,3,2,0
1041 | 1040,28,26.55,0,0,0,1,2,0
1042 | 1041,30,26,0,1,1,2,2,0
1043 | 1042,23,83.1583,1,0,1,1,0,0
1044 | 1043,28,7.8958,0,0,0,3,0,0
1045 | 1044,60.5,14.4542,0,0,0,3,2,0
1046 | 1045,36,12.1833,1,0,2,3,2,0
1047 | 1046,13,31.3875,0,4,2,3,2,0
1048 | 1047,24,7.55,0,0,0,3,2,0
1049 | 1048,29,221.7792,1,0,0,1,2,0
1050 | 1049,23,7.8542,1,0,0,3,2,0
1051 | 1050,42,26.55,0,0,0,1,2,0
1052 | 1051,26,13.775,1,0,2,3,2,0
1053 | 1052,28,7.7333,1,0,0,3,1,0
1054 | 1053,7,15.2458,0,1,1,3,0,0
1055 | 1054,26,13.5,1,0,0,2,2,0
1056 | 1055,28,7,0,0,0,3,2,0
1057 | 1056,41,13,0,0,0,2,2,0
1058 | 1057,26,22.025,1,1,1,3,2,0
1059 | 1058,48,50.4958,0,0,0,1,0,0
1060 | 1059,18,34.375,0,2,2,3,2,0
1061 | 1060,28,27.7208,1,0,0,1,0,0
1062 | 1061,22,8.9625,1,0,0,3,2,0
1063 | 1062,28,7.55,0,0,0,3,2,0
1064 | 1063,27,7.225,0,0,0,3,0,0
1065 | 1064,23,13.9,0,1,0,3,2,0
1066 | 1065,28,7.2292,0,0,0,3,0,0
1067 | 1066,40,31.3875,0,1,5,3,2,0
1068 | 1067,15,39,1,0,2,2,2,0
1069 | 1068,20,36.75,1,0,0,2,2,0
1070 | 1069,54,55.4417,0,1,0,1,0,0
1071 | 1070,36,39,1,0,3,2,2,0
1072 | 1071,64,83.1583,1,0,2,1,0,0
1073 | 1072,30,13,0,0,0,2,2,0
1074 | 1073,37,83.1583,0,1,1,1,0,0
1075 | 1074,18,53.1,1,1,0,1,2,0
1076 | 1075,28,7.75,0,0,0,3,1,0
1077 | 1076,27,247.5208,1,1,1,1,0,0
1078 | 1077,40,16,0,0,0,2,2,0
1079 | 1078,21,21,1,0,1,2,2,0
1080 | 1079,17,8.05,0,2,0,3,2,0
1081 | 1080,28,69.55,1,8,2,3,2,0
1082 | 1081,40,13,0,0,0,2,2,0
1083 | 1082,34,26,0,1,0,2,2,0
1084 | 1083,28,26,0,0,0,1,2,0
1085 | 1084,11.5,14.5,0,1,1,3,2,0
1086 | 1085,61,12.35,0,0,0,2,1,0
1087 | 1086,8,32.5,0,0,2,2,2,0
1088 | 1087,33,7.8542,0,0,0,3,2,0
1089 | 1088,6,134.5,0,0,2,1,0,0
1090 | 1089,18,7.775,1,0,0,3,2,0
1091 | 1090,23,10.5,0,0,0,2,2,0
1092 | 1091,28,8.1125,1,0,0,3,2,0
1093 | 1092,28,15.5,1,0,0,3,1,0
1094 | 1093,0.33,14.4,0,0,2,3,2,0
1095 | 1094,47,227.525,0,1,0,1,0,0
1096 | 1095,8,26,1,1,1,2,2,0
1097 | 1096,25,10.5,0,0,0,2,2,0
1098 | 1097,28,25.7417,0,0,0,1,0,0
1099 | 1098,35,7.75,1,0,0,3,1,0
1100 | 1099,24,10.5,0,0,0,2,2,0
1101 | 1100,33,27.7208,1,0,0,1,0,0
1102 | 1101,25,7.8958,0,0,0,3,2,0
1103 | 1102,32,22.525,0,0,0,3,2,0
1104 | 1103,28,7.05,0,0,0,3,2,0
1105 | 1104,17,73.5,0,0,0,2,2,0
1106 | 1105,60,26,1,1,0,2,2,0
1107 | 1106,38,7.775,1,4,2,3,2,0
1108 | 1107,42,42.5,0,0,0,1,2,0
1109 | 1108,28,7.8792,1,0,0,3,1,0
1110 | 1109,57,164.8667,0,1,1,1,2,0
1111 | 1110,50,211.5,1,1,1,1,0,0
1112 | 1111,28,8.05,0,0,0,3,2,0
1113 | 1112,30,13.8583,1,1,0,2,0,0
1114 | 1113,21,8.05,0,0,0,3,2,0
1115 | 1114,22,10.5,1,0,0,2,2,0
1116 | 1115,21,7.7958,0,0,0,3,2,0
1117 | 1116,53,27.4458,1,0,0,1,0,0
1118 | 1117,28,15.2458,1,0,2,3,0,0
1119 | 1118,23,7.7958,0,0,0,3,2,0
1120 | 1119,28,7.75,1,0,0,3,1,0
1121 | 1120,40.5,15.1,0,0,0,3,2,0
1122 | 1121,36,13,0,0,0,2,2,0
1123 | 1122,14,65,0,0,0,2,2,0
1124 | 1123,21,26.55,1,0,0,1,2,0
1125 | 1124,21,6.4958,0,1,0,3,2,0
1126 | 1125,28,7.8792,0,0,0,3,1,0
1127 | 1126,39,71.2833,0,1,0,1,0,0
1128 | 1127,20,7.8542,0,0,0,3,2,0
1129 | 1128,64,75.25,0,1,0,1,0,0
1130 | 1129,20,7.225,0,0,0,3,0,0
1131 | 1130,18,13,1,1,1,2,2,0
1132 | 1131,48,106.425,1,1,0,1,0,0
1133 | 1132,55,27.7208,1,0,0,1,0,0
1134 | 1133,45,30,1,0,2,2,2,0
1135 | 1134,45,134.5,0,1,1,1,0,0
1136 | 1135,28,7.8875,0,0,0,3,2,0
1137 | 1136,28,23.45,0,1,2,3,2,0
1138 | 1137,41,51.8625,0,1,0,1,2,0
1139 | 1138,22,21,1,0,0,2,2,0
1140 | 1139,42,32.5,0,1,1,2,2,0
1141 | 1140,29,26,1,1,0,2,2,0
1142 | 1141,28,14.4542,1,1,0,3,0,0
1143 | 1142,0.92,27.75,1,1,2,2,2,0
1144 | 1143,20,7.925,0,0,0,3,2,0
1145 | 1144,27,136.7792,0,1,0,1,0,0
1146 | 1145,24,9.325,0,0,0,3,2,0
1147 | 1146,32.5,9.5,0,0,0,3,2,0
1148 | 1147,28,7.55,0,0,0,3,2,0
1149 | 1148,28,7.75,0,0,0,3,1,0
1150 | 1149,28,8.05,0,0,0,3,2,0
1151 | 1150,19,13,1,0,0,2,2,0
1152 | 1151,21,7.775,0,0,0,3,2,0
1153 | 1152,36.5,17.4,0,1,0,3,2,0
1154 | 1153,21,7.8542,0,0,0,3,2,0
1155 | 1154,29,23,1,0,2,2,2,0
1156 | 1155,1,12.1833,1,1,1,3,2,0
1157 | 1156,30,12.7375,0,0,0,2,0,0
1158 | 1157,28,7.8958,0,0,0,3,2,0
1159 | 1158,28,0,0,0,0,1,2,0
1160 | 1159,28,7.55,0,0,0,3,2,0
1161 | 1160,28,8.05,1,0,0,3,2,0
1162 | 1161,17,8.6625,0,0,0,3,2,0
1163 | 1162,46,75.2417,0,0,0,1,0,0
1164 | 1163,28,7.75,0,0,0,3,1,0
1165 | 1164,26,136.7792,1,1,0,1,0,0
1166 | 1165,28,15.5,1,1,0,3,1,0
1167 | 1166,28,7.225,0,0,0,3,0,0
1168 | 1167,20,26,1,1,0,2,2,0
1169 | 1168,28,10.5,0,0,0,2,2,0
1170 | 1169,40,26,0,1,0,2,2,0
1171 | 1170,30,21,0,1,0,2,2,0
1172 | 1171,22,10.5,0,0,0,2,2,0
1173 | 1172,23,8.6625,1,0,0,3,2,0
1174 | 1173,0.75,13.775,0,1,1,3,2,0
1175 | 1174,28,7.75,1,0,0,3,1,0
1176 | 1175,9,15.2458,1,1,1,3,0,0
1177 | 1176,2,20.2125,1,1,1,3,2,0
1178 | 1177,36,7.25,0,0,0,3,2,0
1179 | 1178,28,7.25,0,0,0,3,2,0
1180 | 1179,24,82.2667,0,1,0,1,2,0
1181 | 1180,28,7.2292,0,0,0,3,0,0
1182 | 1181,28,8.05,0,0,0,3,2,0
1183 | 1182,28,39.6,0,0,0,1,2,0
1184 | 1183,30,6.95,1,0,0,3,1,0
1185 | 1184,28,7.2292,0,0,0,3,0,0
1186 | 1185,53,81.8583,0,1,1,1,2,0
1187 | 1186,36,9.5,0,0,0,3,2,0
1188 | 1187,26,7.8958,0,0,0,3,2,0
1189 | 1188,1,41.5792,1,1,2,2,0,0
1190 | 1189,28,21.6792,0,2,0,3,0,0
1191 | 1190,30,45.5,0,0,0,1,2,0
1192 | 1191,29,7.8542,0,0,0,3,2,0
1193 | 1192,32,7.775,0,0,0,3,2,0
1194 | 1193,28,15.0458,0,0,0,2,0,0
1195 | 1194,43,21,0,0,1,2,2,0
1196 | 1195,24,8.6625,0,0,0,3,2,0
1197 | 1196,28,7.75,1,0,0,3,1,0
1198 | 1197,64,26.55,1,1,1,1,2,0
1199 | 1198,30,151.55,0,1,2,1,2,0
1200 | 1199,0.83,9.35,0,0,1,3,2,0
1201 | 1200,55,93.5,0,1,1,1,2,0
1202 | 1201,45,14.1083,1,1,0,3,2,0
1203 | 1202,18,8.6625,0,0,0,3,2,0
1204 | 1203,22,7.225,0,0,0,3,0,0
1205 | 1204,28,7.575,0,0,0,3,2,0
1206 | 1205,37,7.75,1,0,0,3,1,0
1207 | 1206,55,135.6333,1,0,0,1,0,0
1208 | 1207,17,7.7333,1,0,0,3,1,0
1209 | 1208,57,146.5208,0,1,0,1,0,0
1210 | 1209,19,10.5,0,0,0,2,2,0
1211 | 1210,27,7.8542,0,0,0,3,2,0
1212 | 1211,22,31.5,0,2,0,2,2,0
1213 | 1212,26,7.775,0,0,0,3,2,0
1214 | 1213,25,7.2292,0,0,0,3,0,0
1215 | 1214,26,13,0,0,0,2,2,0
1216 | 1215,33,26.55,0,0,0,1,2,0
1217 | 1216,39,211.3375,1,0,0,1,2,0
1218 | 1217,23,7.05,0,0,0,3,2,0
1219 | 1218,12,39,1,2,1,2,2,0
1220 | 1219,46,79.2,0,0,0,1,0,0
1221 | 1220,29,26,0,1,0,2,2,0
1222 | 1221,21,13,0,0,0,2,2,0
1223 | 1222,48,36.75,1,0,2,2,2,0
1224 | 1223,39,29.7,0,0,0,1,0,0
1225 | 1224,28,7.225,0,0,0,3,0,0
1226 | 1225,19,15.7417,1,1,1,3,0,0
1227 | 1226,27,7.8958,0,0,0,3,2,0
1228 | 1227,30,26,0,0,0,1,2,0
1229 | 1228,32,13,0,0,0,2,2,0
1230 | 1229,39,7.2292,0,0,2,3,0,0
1231 | 1230,25,31.5,0,0,0,2,2,0
1232 | 1231,28,7.2292,0,0,0,3,0,0
1233 | 1232,18,10.5,0,0,0,2,2,0
1234 | 1233,32,7.5792,0,0,0,3,2,0
1235 | 1234,28,69.55,0,1,9,3,2,0
1236 | 1235,58,512.3292,1,0,1,1,0,0
1237 | 1236,28,14.5,0,1,1,3,2,0
1238 | 1237,16,7.65,1,0,0,3,2,0
1239 | 1238,26,13,0,0,0,2,2,0
1240 | 1239,38,7.2292,1,0,0,3,0,0
1241 | 1240,24,13.5,0,0,0,2,2,0
1242 | 1241,31,21,1,0,0,2,2,0
1243 | 1242,45,63.3583,1,0,1,1,0,0
1244 | 1243,25,10.5,0,0,0,2,2,0
1245 | 1244,18,73.5,0,0,0,2,2,0
1246 | 1245,49,65,0,1,2,2,2,0
1247 | 1246,0.17,20.575,1,1,2,3,2,0
1248 | 1247,50,26,0,0,0,1,2,0
1249 | 1248,59,51.4792,1,2,0,1,2,0
1250 | 1249,28,7.8792,0,0,0,3,2,0
1251 | 1250,28,7.75,0,0,0,3,1,0
1252 | 1251,30,15.55,1,1,0,3,2,0
1253 | 1252,14.5,69.55,0,8,2,3,2,0
1254 | 1253,24,37.0042,1,1,1,2,0,0
1255 | 1254,31,21,1,0,0,2,2,0
1256 | 1255,27,8.6625,0,0,0,3,2,0
1257 | 1256,25,55.4417,1,1,0,1,0,0
1258 | 1257,28,69.55,1,1,9,3,2,0
1259 | 1258,28,14.4583,0,1,0,3,0,0
1260 | 1259,22,39.6875,1,0,0,3,2,0
1261 | 1260,45,59.4,1,0,1,1,0,0
1262 | 1261,29,13.8583,0,0,0,2,0,0
1263 | 1262,21,11.5,0,1,0,2,2,0
1264 | 1263,31,134.5,1,0,0,1,0,0
1265 | 1264,49,0,0,0,0,1,2,0
1266 | 1265,44,13,0,0,0,2,2,0
1267 | 1266,54,81.8583,1,1,1,1,2,0
1268 | 1267,45,262.375,1,0,0,1,0,0
1269 | 1268,22,8.6625,1,2,0,3,2,0
1270 | 1269,21,11.5,0,0,0,2,2,0
1271 | 1270,55,50,0,0,0,1,2,0
1272 | 1271,5,31.3875,0,4,2,3,2,0
1273 | 1272,28,7.75,0,0,0,3,1,0
1274 | 1273,26,7.8792,0,0,0,3,1,0
1275 | 1274,28,14.5,1,0,0,3,2,0
1276 | 1275,19,16.1,1,1,0,3,2,0
1277 | 1276,28,12.875,0,0,0,2,2,0
1278 | 1277,24,65,1,1,2,2,2,0
1279 | 1278,24,7.775,0,0,0,3,2,0
1280 | 1279,57,13,0,0,0,2,2,0
1281 | 1280,21,7.75,0,0,0,3,1,0
1282 | 1281,6,21.075,0,3,1,3,2,0
1283 | 1282,23,93.5,0,0,0,1,2,0
1284 | 1283,51,39.4,1,0,1,1,2,0
1285 | 1284,13,20.25,0,0,2,3,2,0
1286 | 1285,47,10.5,0,0,0,2,2,0
1287 | 1286,29,22.025,0,3,1,3,2,0
1288 | 1287,18,60,1,1,0,1,2,0
1289 | 1288,24,7.25,0,0,0,3,1,0
1290 | 1289,48,79.2,1,1,1,1,0,0
1291 | 1290,22,7.775,0,0,0,3,2,0
1292 | 1291,31,7.7333,0,0,0,3,1,0
1293 | 1292,30,164.8667,1,0,0,1,2,0
1294 | 1293,38,21,0,1,0,2,2,0
1295 | 1294,22,59.4,1,0,1,1,0,0
1296 | 1295,17,47.1,0,0,0,1,2,0
1297 | 1296,43,27.7208,0,1,0,1,0,0
1298 | 1297,20,13.8625,0,0,0,2,0,0
1299 | 1298,23,10.5,0,1,0,2,2,0
1300 | 1299,50,211.5,0,1,1,1,0,0
1301 | 1300,28,7.7208,1,0,0,3,1,0
1302 | 1301,3,13.775,1,1,1,3,2,0
1303 | 1302,28,7.75,1,0,0,3,1,0
1304 | 1303,37,90,1,1,0,1,1,0
1305 | 1304,28,7.775,1,0,0,3,2,0
1306 | 1305,28,8.05,0,0,0,3,2,0
1307 | 1306,39,108.9,1,0,0,1,0,0
1308 | 1307,38.5,7.25,0,0,0,3,2,0
1309 | 1308,28,8.05,0,0,0,3,2,0
1310 | 1309,28,22.3583,0,1,1,3,0,0
1311 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 ravin-d-27
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include README.mdpip
3 | recursive-include licenses *
4 |
--------------------------------------------------------------------------------
/Notebooks/Iris_Load_and_Save.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | # Get the current directory of the notebook and go up to the project root
5 | current_dir = os.getcwd() # Get the current working directory
6 | project_root = os.path.abspath(os.path.join(current_dir, '..'))
7 | print(project_root)
8 | sys.path.append(project_root)
9 |
10 | import numpy as np
11 | import pandas as pd
12 | from sklearn.model_selection import train_test_split
13 | from sklearn.preprocessing import StandardScaler
14 | from model import Multi_Layer_ANN
15 |
16 |
17 | url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
18 | df = pd.read_csv(url, header=None, names=["sepal_length", "sepal_width", "petal_length", "petal_width", "species"])
19 |
20 | print(df.head())
21 |
22 | df['species'] = df['species'].astype('category').cat.codes
23 |
24 |
25 | X = df.iloc[:, :-1].values
26 | y = df.iloc[:, -1].values
27 |
28 | y_one_hot = np.eye(len(np.unique(y)))[y]
29 |
30 | X_train, X_test, y_train, y_test = train_test_split(X, y_one_hot, test_size=0.2, random_state=42)
31 |
32 | scaler = StandardScaler()
33 | X_train = scaler.fit_transform(X_train)
34 | X_test = scaler.transform(X_test)
35 |
36 |
37 | # Define the architecture
38 | hidden_layers = [5, 5]
39 | activations = ['relu', 'relu']
40 |
41 | ann = Multi_Layer_ANN(X_train, y_train, hidden_layers, activations, loss='categorical_crossentropy')
42 | ann.fit(epochs=1000, learning_rate=0.01)
43 |
44 | # Make predictions
45 | y_pred = ann.predict(X_test)
46 |
47 | print(y_pred)
48 |
49 | # Convert predictions back to original labels
50 | y_test_labels = np.argmax(y_test, axis=1)
51 |
52 | # Calculate accuracy
53 | accuracy = np.mean(y_pred == y_test_labels)
54 | print(f"Test Accuracy: {accuracy * 100:.2f}%")
55 |
56 |
57 | # After training your model
58 | ann.save_model('my_ann_model.pkl')
59 |
60 | # To load the model later
61 | ann_loaded = Multi_Layer_ANN(X_train, y_train, hidden_layers, activations, loss='categorical_crossentropy')
62 | ann_loaded.load_model('my_ann_model.pkl')
63 |
64 | print("After Loading...")
65 | print(ann_loaded.predict(X_test))
66 |
--------------------------------------------------------------------------------
/Notebooks/Iris_Multi_Class.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 |
4 | # Get the current directory of the notebook and go up to the project root
5 | current_dir = os.getcwd() # Get the current working directory
6 | project_root = os.path.abspath(os.path.join(current_dir, '..'))
7 | print(project_root)
8 | sys.path.append(project_root)
9 |
10 | import numpy as np
11 | import pandas as pd
12 | from sklearn.model_selection import train_test_split
13 | from sklearn.preprocessing import StandardScaler
14 | from model import Multi_Layer_ANN
15 |
16 |
17 | url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
18 | df = pd.read_csv(url, header=None, names=["sepal_length", "sepal_width", "petal_length", "petal_width", "species"])
19 |
20 | print(df.head())
21 |
22 | df['species'] = df['species'].astype('category').cat.codes
23 |
24 |
25 | X = df.iloc[:, :-1].values
26 | y = df.iloc[:, -1].values
27 |
28 | y_one_hot = np.eye(len(np.unique(y)))[y]
29 |
30 | X_train, X_test, y_train, y_test = train_test_split(X, y_one_hot, test_size=0.2, random_state=42)
31 |
32 | scaler = StandardScaler()
33 | X_train = scaler.fit_transform(X_train)
34 | X_test = scaler.transform(X_test)
35 |
36 |
37 | # Define the architecture
38 | hidden_layers = [5, 5]
39 | activations = ['relu', 'relu']
40 |
41 | ann = Multi_Layer_ANN(X_train, y_train, hidden_layers, activations, loss='categorical_crossentropy')
42 | ann.fit(epochs=1000, learning_rate=0.01)
43 |
44 | # Make predictions
45 | y_pred = ann.predict(X_test)
46 |
47 | print(y_pred)
48 |
49 | # Convert predictions back to original labels
50 | y_test_labels = np.argmax(y_test, axis=1)
51 |
52 | # Calculate accuracy
53 | accuracy = np.mean(y_pred == y_test_labels)
54 | print(f"Test Accuracy: {accuracy * 100:.2f}%")
55 |
56 |
57 |
--------------------------------------------------------------------------------
/Notebooks/Titanic.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Step 1: Load the Data"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "import pandas as pd\n",
17 | "import numpy as np\n",
18 | "\n",
19 | "# Load the dataset\n",
20 | "df = pd.read_csv(\"E:/Github_Repos/Artificial_Neural_Networks_From_Scratch/Dataset/Titanic.csv\")"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 2,
26 | "metadata": {},
27 | "outputs": [
28 | {
29 | "name": "stdout",
30 | "output_type": "stream",
31 | "text": [
32 | " Passengerid Age Fare Sex sibsp Parch Pclass Embarked Survived\n",
33 | "0 1 22.0 7.2500 0 1 0 3 2.0 0\n",
34 | "1 2 38.0 71.2833 1 1 0 1 0.0 1\n",
35 | "2 3 26.0 7.9250 1 0 0 3 2.0 1\n",
36 | "3 4 35.0 53.1000 1 1 0 1 2.0 1\n",
37 | "4 5 35.0 8.0500 0 0 0 3 2.0 0\n"
38 | ]
39 | }
40 | ],
41 | "source": [
42 | "# Display the first few rows\n",
43 | "print(df.head())"
44 | ]
45 | },
46 | {
47 | "cell_type": "markdown",
48 | "metadata": {},
49 | "source": [
50 | "# Step 2: Data Cleaning"
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": 3,
56 | "metadata": {},
57 | "outputs": [
58 | {
59 | "name": "stdout",
60 | "output_type": "stream",
61 | "text": [
62 | "Passengerid 0\n",
63 | "Age 0\n",
64 | "Fare 0\n",
65 | "Sex 0\n",
66 | "sibsp 0\n",
67 | "Parch 0\n",
68 | "Pclass 0\n",
69 | "Embarked 2\n",
70 | "Survived 0\n",
71 | "dtype: int64\n"
72 | ]
73 | }
74 | ],
75 | "source": [
76 | "\n",
77 | "# Check for missing values\n",
78 | "print(df.isnull().sum())\n"
79 | ]
80 | },
81 | {
82 | "cell_type": "code",
83 | "execution_count": 4,
84 | "metadata": {},
85 | "outputs": [],
86 | "source": [
87 | "# Drop 'PassengerId' (not needed for training) and handle missing values\n",
88 | "df.drop(columns=['Passengerid'], inplace=True)"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 5,
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | "# Fill or drop missing values\n",
98 | "# For example, fill missing 'Age' with the median and drop 'Cabin' and 'Ticket' for simplicity\n",
99 | "df['Age'].fillna(df['Age'].median(), inplace=True)"
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": 6,
105 | "metadata": {},
106 | "outputs": [],
107 | "source": [
108 | "# Drop rows with missing 'Embarked' values (or you could fill them)\n",
109 | "df.dropna(subset=['Embarked'], inplace=True)"
110 | ]
111 | },
112 | {
113 | "cell_type": "markdown",
114 | "metadata": {},
115 | "source": [
116 | "# Step 3: Encoding Categorical Variables"
117 | ]
118 | },
119 | {
120 | "cell_type": "code",
121 | "execution_count": 7,
122 | "metadata": {},
123 | "outputs": [],
124 | "source": [
125 | "# One-hot encode 'Sex' and 'Embarked'\n",
126 | "df = pd.get_dummies(df, columns=['Sex', 'Embarked'], drop_first=True)\n",
127 | "\n",
128 | "# Separate features (X) and target (y)\n",
129 | "X = df.drop(columns=['Survived'])\n",
130 | "y = df['Survived']"
131 | ]
132 | },
133 | {
134 | "cell_type": "markdown",
135 | "metadata": {},
136 | "source": [
137 | "# Step 4: Feature Scaling"
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": 8,
143 | "metadata": {},
144 | "outputs": [],
145 | "source": [
146 | "from sklearn.preprocessing import StandardScaler\n",
147 | "\n",
148 | "scaler = StandardScaler()\n",
149 | "X_scaled = scaler.fit_transform(X)\n"
150 | ]
151 | },
152 | {
153 | "cell_type": "markdown",
154 | "metadata": {},
155 | "source": [
156 | "# Step 5: Train-Test Split"
157 | ]
158 | },
159 | {
160 | "cell_type": "code",
161 | "execution_count": 9,
162 | "metadata": {},
163 | "outputs": [],
164 | "source": [
165 | "from sklearn.model_selection import train_test_split\n",
166 | "\n",
167 | "X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)"
168 | ]
169 | },
170 | {
171 | "cell_type": "markdown",
172 | "metadata": {},
173 | "source": [
174 | "# Step 6: Initialize and Train the Custom ANN"
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": 10,
180 | "metadata": {},
181 | "outputs": [
182 | {
183 | "name": "stderr",
184 | "output_type": "stream",
185 | "text": [
186 | "Training Progress: 1%|\u001b[32m░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 10/1000 [00:00<00:10, 95.06it/s]\u001b[0m"
187 | ]
188 | },
189 | {
190 | "name": "stdout",
191 | "output_type": "stream",
192 | "text": [
193 | "Loss: \u001b[31m1.0599\u001b[0m | Accuracy: \u001b[32m34.95%\u001b[0m | Time: 0.01s\n",
194 | "Loss: \u001b[31m2.4924\u001b[0m | Accuracy: \u001b[32m74.16%\u001b[0m | Time: 0.00s\n"
195 | ]
196 | },
197 | {
198 | "name": "stderr",
199 | "output_type": "stream",
200 | "text": [
201 | "Training Progress: 3%|\u001b[32m█░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 32/1000 [00:00<00:09, 99.81it/s]\u001b[0m"
202 | ]
203 | },
204 | {
205 | "name": "stdout",
206 | "output_type": "stream",
207 | "text": [
208 | "Loss: \u001b[31m1.3797\u001b[0m | Accuracy: \u001b[32m74.16%\u001b[0m | Time: 0.00s\n",
209 | "Loss: \u001b[31m1.2606\u001b[0m | Accuracy: \u001b[32m74.16%\u001b[0m | Time: 0.00s\n"
210 | ]
211 | },
212 | {
213 | "name": "stderr",
214 | "output_type": "stream",
215 | "text": [
216 | "Training Progress: 5%|\u001b[32m██░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 54/1000 [00:00<00:09, 99.03it/s]\u001b[0m"
217 | ]
218 | },
219 | {
220 | "name": "stdout",
221 | "output_type": "stream",
222 | "text": [
223 | "Loss: \u001b[31m0.7706\u001b[0m | Accuracy: \u001b[32m74.16%\u001b[0m | Time: 0.02s\n",
224 | "Loss: \u001b[31m0.6897\u001b[0m | Accuracy: \u001b[32m56.87%\u001b[0m | Time: 0.02s\n"
225 | ]
226 | },
227 | {
228 | "name": "stderr",
229 | "output_type": "stream",
230 | "text": [
231 | "Training Progress: 8%|\u001b[32m███░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 75/1000 [00:00<00:09, 97.47it/s]\u001b[0m"
232 | ]
233 | },
234 | {
235 | "name": "stdout",
236 | "output_type": "stream",
237 | "text": [
238 | "Loss: \u001b[31m0.8049\u001b[0m | Accuracy: \u001b[32m56.13%\u001b[0m | Time: 0.02s\n",
239 | "Loss: \u001b[31m0.7876\u001b[0m | Accuracy: \u001b[32m56.73%\u001b[0m | Time: 0.01s\n"
240 | ]
241 | },
242 | {
243 | "name": "stderr",
244 | "output_type": "stream",
245 | "text": [
246 | "Training Progress: 9%|\u001b[32m███▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 94/1000 [00:01<00:10, 83.91it/s]\u001b[0m"
247 | ]
248 | },
249 | {
250 | "name": "stdout",
251 | "output_type": "stream",
252 | "text": [
253 | "Loss: \u001b[31m0.7536\u001b[0m | Accuracy: \u001b[32m56.22%\u001b[0m | Time: 0.01s\n",
254 | "Loss: \u001b[31m0.7990\u001b[0m | Accuracy: \u001b[32m56.40%\u001b[0m | Time: 0.01s\n"
255 | ]
256 | },
257 | {
258 | "name": "stderr",
259 | "output_type": "stream",
260 | "text": [
261 | "Training Progress: 11%|\u001b[32m████▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 114/1000 [00:01<00:10, 85.14it/s]\u001b[0m"
262 | ]
263 | },
264 | {
265 | "name": "stdout",
266 | "output_type": "stream",
267 | "text": [
268 | "Loss: \u001b[31m0.8466\u001b[0m | Accuracy: \u001b[32m56.08%\u001b[0m | Time: 0.01s\n",
269 | "Loss: \u001b[31m0.7774\u001b[0m | Accuracy: \u001b[32m55.62%\u001b[0m | Time: 0.02s\n",
270 | "Loss: \u001b[31m0.9879\u001b[0m | Accuracy: \u001b[32m58.90%\u001b[0m | Time: 0.01s\n"
271 | ]
272 | },
273 | {
274 | "name": "stderr",
275 | "output_type": "stream",
276 | "text": [
277 | "Training Progress: 15%|\u001b[32m█████▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 146/1000 [00:01<00:09, 93.62it/s]\u001b[0m"
278 | ]
279 | },
280 | {
281 | "name": "stdout",
282 | "output_type": "stream",
283 | "text": [
284 | "Loss: \u001b[31m0.9132\u001b[0m | Accuracy: \u001b[32m74.16%\u001b[0m | Time: 0.02s\n",
285 | "Loss: \u001b[31m1.0948\u001b[0m | Accuracy: \u001b[32m74.16%\u001b[0m | Time: 0.02s\n",
286 | "Loss: \u001b[31m1.0085\u001b[0m | Accuracy: \u001b[32m58.35%\u001b[0m | Time: 0.01s\n"
287 | ]
288 | },
289 | {
290 | "name": "stderr",
291 | "output_type": "stream",
292 | "text": [
293 | "Training Progress: 18%|\u001b[32m███████░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 178/1000 [00:01<00:09, 91.08it/s]\u001b[0m"
294 | ]
295 | },
296 | {
297 | "name": "stdout",
298 | "output_type": "stream",
299 | "text": [
300 | "Loss: \u001b[31m0.9424\u001b[0m | Accuracy: \u001b[32m57.56%\u001b[0m | Time: 0.01s\n",
301 | "Loss: \u001b[31m0.8445\u001b[0m | Accuracy: \u001b[32m56.91%\u001b[0m | Time: 0.01s\n"
302 | ]
303 | },
304 | {
305 | "name": "stderr",
306 | "output_type": "stream",
307 | "text": [
308 | "Training Progress: 19%|\u001b[32m███████▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 188/1000 [00:02<00:09, 89.73it/s]\u001b[0m"
309 | ]
310 | },
311 | {
312 | "name": "stdout",
313 | "output_type": "stream",
314 | "text": [
315 | "Loss: \u001b[31m0.8716\u001b[0m | Accuracy: \u001b[32m57.24%\u001b[0m | Time: 0.02s\n",
316 | "Loss: \u001b[31m0.9174\u001b[0m | Accuracy: \u001b[32m56.68%\u001b[0m | Time: 0.02s\n"
317 | ]
318 | },
319 | {
320 | "name": "stderr",
321 | "output_type": "stream",
322 | "text": [
323 | "Training Progress: 22%|\u001b[32m████████▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 216/1000 [00:02<00:08, 87.69it/s]\u001b[0m"
324 | ]
325 | },
326 | {
327 | "name": "stdout",
328 | "output_type": "stream",
329 | "text": [
330 | "Loss: \u001b[31m0.8551\u001b[0m | Accuracy: \u001b[32m55.99%\u001b[0m | Time: 0.01s\n",
331 | "Loss: \u001b[31m0.9058\u001b[0m | Accuracy: \u001b[32m57.14%\u001b[0m | Time: 0.01s\n"
332 | ]
333 | },
334 | {
335 | "name": "stderr",
336 | "output_type": "stream",
337 | "text": [
338 | "Training Progress: 24%|\u001b[32m█████████▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 235/1000 [00:02<00:08, 87.87it/s]\u001b[0m"
339 | ]
340 | },
341 | {
342 | "name": "stdout",
343 | "output_type": "stream",
344 | "text": [
345 | "Loss: \u001b[31m0.9761\u001b[0m | Accuracy: \u001b[32m57.98%\u001b[0m | Time: 0.02s\n",
346 | "Loss: \u001b[31m1.0729\u001b[0m | Accuracy: \u001b[32m58.02%\u001b[0m | Time: 0.01s\n"
347 | ]
348 | },
349 | {
350 | "name": "stderr",
351 | "output_type": "stream",
352 | "text": [
353 | "Training Progress: 25%|\u001b[32m██████████░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 253/1000 [00:02<00:09, 81.83it/s]\u001b[0m"
354 | ]
355 | },
356 | {
357 | "name": "stdout",
358 | "output_type": "stream",
359 | "text": [
360 | "Loss: \u001b[31m1.2279\u001b[0m | Accuracy: \u001b[32m60.61%\u001b[0m | Time: 0.02s\n",
361 | "Loss: \u001b[31m0.9157\u001b[0m | Accuracy: \u001b[32m57.65%\u001b[0m | Time: 0.01s\n"
362 | ]
363 | },
364 | {
365 | "name": "stderr",
366 | "output_type": "stream",
367 | "text": [
368 | "Training Progress: 27%|\u001b[32m███████████░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 271/1000 [00:03<00:09, 75.54it/s]\u001b[0m"
369 | ]
370 | },
371 | {
372 | "name": "stdout",
373 | "output_type": "stream",
374 | "text": [
375 | "Loss: \u001b[31m0.9184\u001b[0m | Accuracy: \u001b[32m56.82%\u001b[0m | Time: 0.01s\n",
376 | "Loss: \u001b[31m1.6454\u001b[0m | Accuracy: \u001b[32m56.17%\u001b[0m | Time: 0.02s\n"
377 | ]
378 | },
379 | {
380 | "name": "stderr",
381 | "output_type": "stream",
382 | "text": [
383 | "Training Progress: 30%|\u001b[32m████████████░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 297/1000 [00:03<00:09, 77.60it/s]\u001b[0m"
384 | ]
385 | },
386 | {
387 | "name": "stdout",
388 | "output_type": "stream",
389 | "text": [
390 | "Loss: \u001b[31m0.8452\u001b[0m | Accuracy: \u001b[32m55.48%\u001b[0m | Time: 0.01s\n",
391 | "Loss: \u001b[31m0.9320\u001b[0m | Accuracy: \u001b[32m52.24%\u001b[0m | Time: 0.00s\n"
392 | ]
393 | },
394 | {
395 | "name": "stderr",
396 | "output_type": "stream",
397 | "text": [
398 | "Training Progress: 32%|\u001b[32m████████████▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 315/1000 [00:03<00:08, 80.20it/s]\u001b[0m"
399 | ]
400 | },
401 | {
402 | "name": "stdout",
403 | "output_type": "stream",
404 | "text": [
405 | "Loss: \u001b[31m1.1146\u001b[0m | Accuracy: \u001b[32m56.73%\u001b[0m | Time: 0.01s\n",
406 | "Loss: \u001b[31m1.2832\u001b[0m | Accuracy: \u001b[32m58.81%\u001b[0m | Time: 0.01s\n"
407 | ]
408 | },
409 | {
410 | "name": "stderr",
411 | "output_type": "stream",
412 | "text": [
413 | "Training Progress: 33%|\u001b[32m█████████████▒░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 332/1000 [00:03<00:08, 77.64it/s]\u001b[0m"
414 | ]
415 | },
416 | {
417 | "name": "stdout",
418 | "output_type": "stream",
419 | "text": [
420 | "Loss: \u001b[31m1.4671\u001b[0m | Accuracy: \u001b[32m58.07%\u001b[0m | Time: 0.01s\n",
421 | "Loss: \u001b[31m0.8815\u001b[0m | Accuracy: \u001b[32m55.20%\u001b[0m | Time: 0.01s\n"
422 | ]
423 | },
424 | {
425 | "name": "stderr",
426 | "output_type": "stream",
427 | "text": [
428 | "Training Progress: 35%|\u001b[32m██████████████░░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 349/1000 [00:04<00:08, 79.59it/s]\u001b[0m"
429 | ]
430 | },
431 | {
432 | "name": "stdout",
433 | "output_type": "stream",
434 | "text": [
435 | "Loss: \u001b[31m0.8212\u001b[0m | Accuracy: \u001b[32m57.14%\u001b[0m | Time: 0.01s\n",
436 | "Loss: \u001b[31m0.8193\u001b[0m | Accuracy: \u001b[32m57.28%\u001b[0m | Time: 0.01s\n"
437 | ]
438 | },
439 | {
440 | "name": "stderr",
441 | "output_type": "stream",
442 | "text": [
443 | "Training Progress: 38%|\u001b[32m███████████████░░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 376/1000 [00:04<00:07, 79.57it/s]\u001b[0m"
444 | ]
445 | },
446 | {
447 | "name": "stdout",
448 | "output_type": "stream",
449 | "text": [
450 | "Loss: \u001b[31m1.0284\u001b[0m | Accuracy: \u001b[32m57.79%\u001b[0m | Time: 0.01s\n",
451 | "Loss: \u001b[31m1.1538\u001b[0m | Accuracy: \u001b[32m57.61%\u001b[0m | Time: 0.01s\n"
452 | ]
453 | },
454 | {
455 | "name": "stderr",
456 | "output_type": "stream",
457 | "text": [
458 | "Training Progress: 39%|\u001b[32m████████████████░░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 393/1000 [00:04<00:07, 79.29it/s]\u001b[0m"
459 | ]
460 | },
461 | {
462 | "name": "stdout",
463 | "output_type": "stream",
464 | "text": [
465 | "Loss: \u001b[31m1.0093\u001b[0m | Accuracy: \u001b[32m57.14%\u001b[0m | Time: 0.01s\n",
466 | "Loss: \u001b[31m1.2125\u001b[0m | Accuracy: \u001b[32m58.12%\u001b[0m | Time: 0.01s\n"
467 | ]
468 | },
469 | {
470 | "name": "stderr",
471 | "output_type": "stream",
472 | "text": [
473 | "Training Progress: 41%|\u001b[32m████████████████▒░░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 411/1000 [00:04<00:07, 83.08it/s]\u001b[0m"
474 | ]
475 | },
476 | {
477 | "name": "stdout",
478 | "output_type": "stream",
479 | "text": [
480 | "Loss: \u001b[31m1.1114\u001b[0m | Accuracy: \u001b[32m55.57%\u001b[0m | Time: 0.01s\n",
481 | "Loss: \u001b[31m1.3387\u001b[0m | Accuracy: \u001b[32m57.38%\u001b[0m | Time: 0.01s\n"
482 | ]
483 | },
484 | {
485 | "name": "stderr",
486 | "output_type": "stream",
487 | "text": [
488 | "Training Progress: 43%|\u001b[32m█████████████████▒░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 431/1000 [00:05<00:06, 84.70it/s]\u001b[0m"
489 | ]
490 | },
491 | {
492 | "name": "stdout",
493 | "output_type": "stream",
494 | "text": [
495 | "Loss: \u001b[31m2.4286\u001b[0m | Accuracy: \u001b[32m67.50%\u001b[0m | Time: 0.02s\n",
496 | "Loss: \u001b[31m0.9774\u001b[0m | Accuracy: \u001b[32m56.87%\u001b[0m | Time: 0.02s\n"
497 | ]
498 | },
499 | {
500 | "name": "stderr",
501 | "output_type": "stream",
502 | "text": [
503 | "Training Progress: 45%|\u001b[32m██████████████████░░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 450/1000 [00:05<00:06, 86.05it/s]\u001b[0m"
504 | ]
505 | },
506 | {
507 | "name": "stdout",
508 | "output_type": "stream",
509 | "text": [
510 | "Loss: \u001b[31m1.0392\u001b[0m | Accuracy: \u001b[32m58.90%\u001b[0m | Time: 0.01s\n",
511 | "Loss: \u001b[31m1.9286\u001b[0m | Accuracy: \u001b[32m74.16%\u001b[0m | Time: 0.01s\n"
512 | ]
513 | },
514 | {
515 | "name": "stderr",
516 | "output_type": "stream",
517 | "text": [
518 | "Training Progress: 47%|\u001b[32m███████████████████░░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 470/1000 [00:05<00:05, 88.74it/s]\u001b[0m"
519 | ]
520 | },
521 | {
522 | "name": "stdout",
523 | "output_type": "stream",
524 | "text": [
525 | "Loss: \u001b[31m3.4263\u001b[0m | Accuracy: \u001b[32m74.16%\u001b[0m | Time: 0.01s\n",
526 | "Loss: \u001b[31m2.5584\u001b[0m | Accuracy: \u001b[32m57.19%\u001b[0m | Time: 0.01s\n"
527 | ]
528 | },
529 | {
530 | "name": "stderr",
531 | "output_type": "stream",
532 | "text": [
533 | "Training Progress: 49%|\u001b[32m████████████████████░░░░░░░░░░░░░░░░░░░░░\u001b[0m| 492/1000 [00:05<00:05, 96.32it/s]\u001b[0m"
534 | ]
535 | },
536 | {
537 | "name": "stdout",
538 | "output_type": "stream",
539 | "text": [
540 | "Loss: \u001b[31m1.8240\u001b[0m | Accuracy: \u001b[32m56.77%\u001b[0m | Time: 0.02s\n",
541 | "Loss: \u001b[31m1.8005\u001b[0m | Accuracy: \u001b[32m58.35%\u001b[0m | Time: 0.02s\n",
542 | "Loss: \u001b[31m1.9708\u001b[0m | Accuracy: \u001b[32m58.16%\u001b[0m | Time: 0.02s\n"
543 | ]
544 | },
545 | {
546 | "name": "stderr",
547 | "output_type": "stream",
548 | "text": [
549 | "Training Progress: 52%|\u001b[32m█████████████████████▒░░░░░░░░░░░░░░░░░░░\u001b[0m| 525/1000 [00:06<00:04, 95.62it/s]\u001b[0m"
550 | ]
551 | },
552 | {
553 | "name": "stdout",
554 | "output_type": "stream",
555 | "text": [
556 | "Loss: \u001b[31m1.9304\u001b[0m | Accuracy: \u001b[32m57.84%\u001b[0m | Time: 0.02s\n",
557 | "Loss: \u001b[31m2.1251\u001b[0m | Accuracy: \u001b[32m59.92%\u001b[0m | Time: 0.00s\n",
558 | "Loss: \u001b[31m2.6270\u001b[0m | Accuracy: \u001b[32m61.63%\u001b[0m | Time: 0.00s\n"
559 | ]
560 | },
561 | {
562 | "name": "stderr",
563 | "output_type": "stream",
564 | "text": [
565 | "Training Progress: 56%|\u001b[32m██████████████████████▒░░░░░░░░░░░░░░░░░░\u001b[0m| 558/1000 [00:06<00:04, 98.76it/s]\u001b[0m"
566 | ]
567 | },
568 | {
569 | "name": "stdout",
570 | "output_type": "stream",
571 | "text": [
572 | "Loss: \u001b[31m2.0145\u001b[0m | Accuracy: \u001b[32m59.87%\u001b[0m | Time: 0.01s\n",
573 | "Loss: \u001b[31m1.9471\u001b[0m | Accuracy: \u001b[32m59.69%\u001b[0m | Time: 0.01s\n",
574 | "Loss: \u001b[31m1.9030\u001b[0m | Accuracy: \u001b[32m59.78%\u001b[0m | Time: 0.01s\n"
575 | ]
576 | },
577 | {
578 | "name": "stderr",
579 | "output_type": "stream",
580 | "text": [
581 | "Training Progress: 58%|\u001b[32m███████████████████████▒░░░░░░░░░░░░░░░░░\u001b[0m| 580/1000 [00:06<00:04, 99.78it/s]\u001b[0m"
582 | ]
583 | },
584 | {
585 | "name": "stdout",
586 | "output_type": "stream",
587 | "text": [
588 | "Loss: \u001b[31m1.7392\u001b[0m | Accuracy: \u001b[32m59.73%\u001b[0m | Time: 0.01s\n",
589 | "Loss: \u001b[31m1.5751\u001b[0m | Accuracy: \u001b[32m59.83%\u001b[0m | Time: 0.01s\n"
590 | ]
591 | },
592 | {
593 | "name": "stderr",
594 | "output_type": "stream",
595 | "text": [
596 | "Training Progress: 60%|\u001b[32m████████████████████████░░░░░░░░░░░░░░░░\u001b[0m| 602/1000 [00:06<00:03, 101.96it/s]\u001b[0m"
597 | ]
598 | },
599 | {
600 | "name": "stdout",
601 | "output_type": "stream",
602 | "text": [
603 | "Loss: \u001b[31m1.2592\u001b[0m | Accuracy: \u001b[32m60.29%\u001b[0m | Time: 0.02s\n",
604 | "Loss: \u001b[31m1.2371\u001b[0m | Accuracy: \u001b[32m63.43%\u001b[0m | Time: 0.00s\n",
605 | "Loss: \u001b[31m1.1092\u001b[0m | Accuracy: \u001b[32m62.00%\u001b[0m | Time: 0.02s\n"
606 | ]
607 | },
608 | {
609 | "name": "stderr",
610 | "output_type": "stream",
611 | "text": [
612 | "Training Progress: 64%|\u001b[32m█████████████████████████░░░░░░░░░░░░░░░\u001b[0m| 636/1000 [00:07<00:03, 105.86it/s]\u001b[0m"
613 | ]
614 | },
615 | {
616 | "name": "stdout",
617 | "output_type": "stream",
618 | "text": [
619 | "Loss: \u001b[31m2.5226\u001b[0m | Accuracy: \u001b[32m61.08%\u001b[0m | Time: 0.01s\n",
620 | "Loss: \u001b[31m2.2818\u001b[0m | Accuracy: \u001b[32m62.05%\u001b[0m | Time: 0.00s\n",
621 | "Loss: \u001b[31m1.7529\u001b[0m | Accuracy: \u001b[32m61.35%\u001b[0m | Time: 0.00s\n"
622 | ]
623 | },
624 | {
625 | "name": "stderr",
626 | "output_type": "stream",
627 | "text": [
628 | "Training Progress: 67%|\u001b[32m██████████████████████████▒░░░░░░░░░░░░░\u001b[0m| 669/1000 [00:07<00:03, 104.37it/s]\u001b[0m"
629 | ]
630 | },
631 | {
632 | "name": "stdout",
633 | "output_type": "stream",
634 | "text": [
635 | "Loss: \u001b[31m1.7093\u001b[0m | Accuracy: \u001b[32m61.68%\u001b[0m | Time: 0.01s\n",
636 | "Loss: \u001b[31m1.5475\u001b[0m | Accuracy: \u001b[32m61.12%\u001b[0m | Time: 0.01s\n",
637 | "Loss: \u001b[31m1.1251\u001b[0m | Accuracy: \u001b[32m61.82%\u001b[0m | Time: 0.02s\n"
638 | ]
639 | },
640 | {
641 | "name": "stderr",
642 | "output_type": "stream",
643 | "text": [
644 | "Training Progress: 69%|\u001b[32m███████████████████████████▒░░░░░░░░░░░░\u001b[0m| 691/1000 [00:07<00:02, 105.14it/s]\u001b[0m"
645 | ]
646 | },
647 | {
648 | "name": "stdout",
649 | "output_type": "stream",
650 | "text": [
651 | "Loss: \u001b[31m3.2724\u001b[0m | Accuracy: \u001b[32m61.63%\u001b[0m | Time: 0.01s\n",
652 | "Loss: \u001b[31m3.1351\u001b[0m | Accuracy: \u001b[32m61.58%\u001b[0m | Time: 0.02s\n"
653 | ]
654 | },
655 | {
656 | "name": "stderr",
657 | "output_type": "stream",
658 | "text": [
659 | "Training Progress: 71%|\u001b[32m█████████████████████████████░░░░░░░░░░░░\u001b[0m| 713/1000 [00:07<00:02, 96.27it/s]\u001b[0m"
660 | ]
661 | },
662 | {
663 | "name": "stdout",
664 | "output_type": "stream",
665 | "text": [
666 | "Loss: \u001b[31m2.5594\u001b[0m | Accuracy: \u001b[32m61.08%\u001b[0m | Time: 0.00s\n",
667 | "Loss: \u001b[31m2.4339\u001b[0m | Accuracy: \u001b[32m61.58%\u001b[0m | Time: 0.01s\n"
668 | ]
669 | },
670 | {
671 | "name": "stderr",
672 | "output_type": "stream",
673 | "text": [
674 | "Training Progress: 73%|\u001b[32m██████████████████████████████░░░░░░░░░░░\u001b[0m| 733/1000 [00:08<00:02, 92.10it/s]\u001b[0m"
675 | ]
676 | },
677 | {
678 | "name": "stdout",
679 | "output_type": "stream",
680 | "text": [
681 | "Loss: \u001b[31m2.3730\u001b[0m | Accuracy: \u001b[32m60.10%\u001b[0m | Time: 0.01s\n",
682 | "Loss: \u001b[31m2.6738\u001b[0m | Accuracy: \u001b[32m60.10%\u001b[0m | Time: 0.01s\n"
683 | ]
684 | },
685 | {
686 | "name": "stderr",
687 | "output_type": "stream",
688 | "text": [
689 | "Training Progress: 75%|\u001b[32m██████████████████████████████▒░░░░░░░░░░\u001b[0m| 752/1000 [00:08<00:02, 86.32it/s]\u001b[0m"
690 | ]
691 | },
692 | {
693 | "name": "stdout",
694 | "output_type": "stream",
695 | "text": [
696 | "Loss: \u001b[31m2.7083\u001b[0m | Accuracy: \u001b[32m60.10%\u001b[0m | Time: 0.01s\n",
697 | "Loss: \u001b[31m2.7161\u001b[0m | Accuracy: \u001b[32m60.10%\u001b[0m | Time: 0.01s\n"
698 | ]
699 | },
700 | {
701 | "name": "stderr",
702 | "output_type": "stream",
703 | "text": [
704 | "Training Progress: 77%|\u001b[32m███████████████████████████████▒░░░░░░░░░\u001b[0m| 770/1000 [00:08<00:02, 78.03it/s]\u001b[0m"
705 | ]
706 | },
707 | {
708 | "name": "stdout",
709 | "output_type": "stream",
710 | "text": [
711 | "Loss: \u001b[31m2.6335\u001b[0m | Accuracy: \u001b[32m59.97%\u001b[0m | Time: 0.01s\n",
712 | "Loss: \u001b[31m2.6081\u001b[0m | Accuracy: \u001b[32m59.78%\u001b[0m | Time: 0.01s\n"
713 | ]
714 | },
715 | {
716 | "name": "stderr",
717 | "output_type": "stream",
718 | "text": [
719 | "Training Progress: 80%|\u001b[32m████████████████████████████████▒░░░░░░░░\u001b[0m| 796/1000 [00:08<00:02, 83.58it/s]\u001b[0m"
720 | ]
721 | },
722 | {
723 | "name": "stdout",
724 | "output_type": "stream",
725 | "text": [
726 | "Loss: \u001b[31m2.4015\u001b[0m | Accuracy: \u001b[32m60.98%\u001b[0m | Time: 0.01s\n",
727 | "Loss: \u001b[31m2.6387\u001b[0m | Accuracy: \u001b[32m61.58%\u001b[0m | Time: 0.01s\n"
728 | ]
729 | },
730 | {
731 | "name": "stderr",
732 | "output_type": "stream",
733 | "text": [
734 | "Training Progress: 81%|\u001b[32m█████████████████████████████████░░░░░░░░\u001b[0m| 814/1000 [00:09<00:02, 85.01it/s]\u001b[0m"
735 | ]
736 | },
737 | {
738 | "name": "stdout",
739 | "output_type": "stream",
740 | "text": [
741 | "Loss: \u001b[31m2.6079\u001b[0m | Accuracy: \u001b[32m61.17%\u001b[0m | Time: 0.02s\n",
742 | "Loss: \u001b[31m2.6014\u001b[0m | Accuracy: \u001b[32m60.34%\u001b[0m | Time: 0.01s\n"
743 | ]
744 | },
745 | {
746 | "name": "stderr",
747 | "output_type": "stream",
748 | "text": [
749 | "Training Progress: 83%|\u001b[32m██████████████████████████████████░░░░░░░\u001b[0m| 832/1000 [00:09<00:02, 80.46it/s]\u001b[0m"
750 | ]
751 | },
752 | {
753 | "name": "stdout",
754 | "output_type": "stream",
755 | "text": [
756 | "Loss: \u001b[31m2.7326\u001b[0m | Accuracy: \u001b[32m61.77%\u001b[0m | Time: 0.01s\n",
757 | "Loss: \u001b[31m2.5721\u001b[0m | Accuracy: \u001b[32m61.86%\u001b[0m | Time: 0.02s\n"
758 | ]
759 | },
760 | {
761 | "name": "stderr",
762 | "output_type": "stream",
763 | "text": [
764 | "Training Progress: 86%|\u001b[32m███████████████████████████████████░░░░░░\u001b[0m| 855/1000 [00:09<00:01, 94.11it/s]\u001b[0m"
765 | ]
766 | },
767 | {
768 | "name": "stdout",
769 | "output_type": "stream",
770 | "text": [
771 | "Loss: \u001b[31m2.5893\u001b[0m | Accuracy: \u001b[32m61.82%\u001b[0m | Time: 0.01s\n",
772 | "Loss: \u001b[31m2.5970\u001b[0m | Accuracy: \u001b[32m61.82%\u001b[0m | Time: 0.01s\n",
773 | "Loss: \u001b[31m2.6432\u001b[0m | Accuracy: \u001b[32m61.82%\u001b[0m | Time: 0.01s\n"
774 | ]
775 | },
776 | {
777 | "name": "stderr",
778 | "output_type": "stream",
779 | "text": [
780 | "Training Progress: 89%|\u001b[32m███████████████████████████████████▒░░░░\u001b[0m| 889/1000 [00:09<00:01, 100.95it/s]\u001b[0m"
781 | ]
782 | },
783 | {
784 | "name": "stdout",
785 | "output_type": "stream",
786 | "text": [
787 | "Loss: \u001b[31m2.6200\u001b[0m | Accuracy: \u001b[32m61.82%\u001b[0m | Time: 0.01s\n",
788 | "Loss: \u001b[31m2.6579\u001b[0m | Accuracy: \u001b[32m61.82%\u001b[0m | Time: 0.01s\n",
789 | "Loss: \u001b[31m2.7170\u001b[0m | Accuracy: \u001b[32m62.05%\u001b[0m | Time: 0.01s\n"
790 | ]
791 | },
792 | {
793 | "name": "stderr",
794 | "output_type": "stream",
795 | "text": [
796 | "Training Progress: 91%|\u001b[32m█████████████████████████████████████░░░░\u001b[0m| 911/1000 [00:10<00:00, 92.57it/s]\u001b[0m"
797 | ]
798 | },
799 | {
800 | "name": "stdout",
801 | "output_type": "stream",
802 | "text": [
803 | "Loss: \u001b[31m2.7116\u001b[0m | Accuracy: \u001b[32m61.58%\u001b[0m | Time: 0.02s\n",
804 | "Loss: \u001b[31m2.7072\u001b[0m | Accuracy: \u001b[32m61.91%\u001b[0m | Time: 0.01s\n"
805 | ]
806 | },
807 | {
808 | "name": "stderr",
809 | "output_type": "stream",
810 | "text": [
811 | "Training Progress: 93%|\u001b[32m██████████████████████████████████████░░░\u001b[0m| 931/1000 [00:10<00:00, 90.22it/s]\u001b[0m"
812 | ]
813 | },
814 | {
815 | "name": "stdout",
816 | "output_type": "stream",
817 | "text": [
818 | "Loss: \u001b[31m2.6398\u001b[0m | Accuracy: \u001b[32m63.67%\u001b[0m | Time: 0.01s\n",
819 | "Loss: \u001b[31m2.6531\u001b[0m | Accuracy: \u001b[32m61.21%\u001b[0m | Time: 0.01s\n"
820 | ]
821 | },
822 | {
823 | "name": "stderr",
824 | "output_type": "stream",
825 | "text": [
826 | "Training Progress: 95%|\u001b[32m██████████████████████████████████████▒░░\u001b[0m| 951/1000 [00:10<00:00, 86.56it/s]\u001b[0m"
827 | ]
828 | },
829 | {
830 | "name": "stdout",
831 | "output_type": "stream",
832 | "text": [
833 | "Loss: \u001b[31m2.3921\u001b[0m | Accuracy: \u001b[32m60.66%\u001b[0m | Time: 0.01s\n",
834 | "Loss: \u001b[31m2.3728\u001b[0m | Accuracy: \u001b[32m59.73%\u001b[0m | Time: 0.01s\n"
835 | ]
836 | },
837 | {
838 | "name": "stderr",
839 | "output_type": "stream",
840 | "text": [
841 | "Training Progress: 97%|\u001b[32m███████████████████████████████████████▒░\u001b[0m| 969/1000 [00:10<00:00, 75.22it/s]\u001b[0m"
842 | ]
843 | },
844 | {
845 | "name": "stdout",
846 | "output_type": "stream",
847 | "text": [
848 | "Loss: \u001b[31m2.5069\u001b[0m | Accuracy: \u001b[32m59.64%\u001b[0m | Time: 0.01s\n",
849 | "Loss: \u001b[31m2.2291\u001b[0m | Accuracy: \u001b[32m59.87%\u001b[0m | Time: 0.01s\n"
850 | ]
851 | },
852 | {
853 | "name": "stderr",
854 | "output_type": "stream",
855 | "text": [
856 | "Training Progress: 100%|\u001b[32m████████████████████████████████████████▒\u001b[0m| 995/1000 [00:11<00:00, 77.92it/s]\u001b[0m"
857 | ]
858 | },
859 | {
860 | "name": "stdout",
861 | "output_type": "stream",
862 | "text": [
863 | "Loss: \u001b[31m2.2856\u001b[0m | Accuracy: \u001b[32m59.92%\u001b[0m | Time: 0.01s\n",
864 | "Loss: \u001b[31m2.3157\u001b[0m | Accuracy: \u001b[32m59.92%\u001b[0m | Time: 0.01s\n"
865 | ]
866 | },
867 | {
868 | "name": "stderr",
869 | "output_type": "stream",
870 | "text": [
871 | "Training Progress: 100%|\u001b[32m████████████████████████████████████████\u001b[0m| 1000/1000 [00:11<00:00, 88.61it/s]\u001b[0m"
872 | ]
873 | },
874 | {
875 | "name": "stdout",
876 | "output_type": "stream",
877 | "text": [
878 | "\n",
879 | "\n",
880 | "Training is Completed Successfully !\n",
881 | "\n",
882 | "\n"
883 | ]
884 | },
885 | {
886 | "name": "stderr",
887 | "output_type": "stream",
888 | "text": [
889 | "\n"
890 | ]
891 | }
892 | ],
893 | "source": [
894 | "import sys\n",
895 | "import os\n",
896 | "\n",
897 | "# Get the current directory of the notebook and go up to the project root\n",
898 | "current_dir = os.getcwd() # Get the current working directory\n",
899 | "project_root = os.path.abspath(os.path.join(current_dir, '..'))\n",
900 | "\n",
901 | "# Add the project root directory to the system path\n",
902 | "sys.path.append(project_root)\n",
903 | "\n",
904 | "# Now you can import the class\n",
905 | "from model import Multi_Layer_ANN\n",
906 | "\n",
907 | "hidden_layers = [20, 10, 5] # You can adjust these\n",
908 | "activations = ['relu', 'relu', 'relu']\n",
909 | "\n",
910 | "# Initialize the ANN\n",
911 | "ann = Multi_Layer_ANN(X_train, y_train.values, hidden_layers, activations, loss='binary_crossentropy')\n",
912 | "\n",
913 | "# Fit the model\n",
914 | "ann.fit(epochs=1000, learning_rate=0.05)"
915 | ]
916 | },
917 | {
918 | "cell_type": "markdown",
919 | "metadata": {},
920 | "source": [
921 | "# Step 7: Make Predictions and Evaluate the Model"
922 | ]
923 | },
924 | {
925 | "cell_type": "code",
926 | "execution_count": 11,
927 | "metadata": {},
928 | "outputs": [
929 | {
930 | "name": "stdout",
931 | "output_type": "stream",
932 | "text": [
933 | "Test Accuracy: 78.63%\n"
934 | ]
935 | }
936 | ],
937 | "source": [
938 | "# Make predictions\n",
939 | "y_pred = ann.predict(X_test)\n",
940 | "\n",
941 | "# Calculate accuracy\n",
942 | "accuracy = np.mean(y_pred == y_test.values)\n",
943 | "print(f\"Test Accuracy: {accuracy * 100:.2f}%\")\n"
944 | ]
945 | },
946 | {
947 | "cell_type": "code",
948 | "execution_count": null,
949 | "metadata": {},
950 | "outputs": [],
951 | "source": []
952 | }
953 | ],
954 | "metadata": {
955 | "kernelspec": {
956 | "display_name": "Python 3",
957 | "language": "python",
958 | "name": "python3"
959 | },
960 | "language_info": {
961 | "codemirror_mode": {
962 | "name": "ipython",
963 | "version": 3
964 | },
965 | "file_extension": ".py",
966 | "mimetype": "text/x-python",
967 | "name": "python",
968 | "nbconvert_exporter": "python",
969 | "pygments_lexer": "ipython3",
970 | "version": "3.10.9"
971 | }
972 | },
973 | "nbformat": 4,
974 | "nbformat_minor": 2
975 | }
976 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | ---
3 |
4 | # **PyDeepFlow**
5 |
6 |
7 |
8 |
9 |
10 | ## **Author**
11 |
12 | **Author Name**: Ravin D
13 | **GitHub**: [ravin-d-27](https://github.com/ravin-d-27)
14 | **Email**: ravin.d3107@outlook.com
15 |
16 | The author is passionate about deep learning and is dedicated to creating tools that make neural networks more accessible to everyone.
17 |
18 | ## Contributors
19 |
20 | Thanks to these amazing people for contributing to this project:
21 |
22 |
23 |
24 |
25 |
26 |
27 | ## **What is Pydeepflow?**
28 |
29 | `pydeepflow` is a Python library designed for building and training deep learning models with an emphasis on ease of use and flexibility. It abstracts many of the complexities found in traditional deep learning libraries while still offering powerful functionality.
30 |
31 | ### **Key Features of Pydeepflow:**
32 |
33 | - **Simplicity**: Designed for ease of use, making it accessible to beginners.
34 | - **Configurability**: Users can easily modify network architectures, loss functions, and optimizers.
35 | - **Flexibility**: Can seamlessly switch between CPU and GPU for training.
36 |
37 | ## **Why is Pydeepflow Better than TensorFlow and PyTorch?**
38 |
39 | While TensorFlow and PyTorch are widely used and powerful frameworks, `pydeepflow` offers specific advantages for certain use cases:
40 |
41 | 1. **User-Friendly API**: `pydeepflow` is designed to be intuitive, allowing users to create and train neural networks without delving into complex configurations.
42 |
43 | 2. **Rapid Prototyping**: It enables quick testing of ideas with minimal boilerplate code, which is particularly beneficial for educational purposes and research.
44 |
45 | 3. **Lightweight**: The library has a smaller footprint compared to TensorFlow and PyTorch, making it faster to install and easier to use in lightweight environments.
46 |
47 | 4. **Focused Learning**: It provides a straightforward approach to understanding deep learning concepts without getting bogged down by the extensive features available in larger libraries.
48 |
49 | ## **Dependencies**
50 |
51 | The project requires the following Python libraries:
52 |
53 | - `numpy`: For numerical operations and handling arrays.
54 | - `pandas`: For data manipulation and loading datasets.
55 | - `scikit-learn`: For splitting data and preprocessing.
56 | - `tqdm`: For progress bars in training.
57 | - `jupyter`: (Optional) For working with Jupyter notebooks.
58 | - `pydeepflow`: The core library used to implement the Multi-Layer ANN.
59 |
60 | You can find the full list in `requirements.txt`.
61 |
62 | ## **How to Install and Use Pydeepflow from PyPI**
63 |
64 | ### **Installation**
65 |
66 | You can install `pydeepflow` directly from PyPI using pip. Open your command line and run:
67 |
68 | ```bash
69 | pip install pydeepflow
70 | ```
71 |
72 | ### **Using Pydeepflow**
73 |
74 | After installing, you can start using `pydeepflow` to create and train neural networks. Below is a brief example:
75 |
76 | ```python
77 | import pandas as pd
78 | import numpy as np
79 | from sklearn.model_selection import train_test_split
80 | from sklearn.preprocessing import StandardScaler
81 | from pydeepflow.model import Multi_Layer_ANN
82 |
83 | # Load Iris dataset
84 | url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
85 | df = pd.read_csv(url, header=None, names=["sepal_length", "sepal_width", "petal_length", "petal_width", "species"])
86 |
87 | # Data preprocessing
88 | df['species'] = df['species'].astype('category').cat.codes
89 | X = df.iloc[:, :-1].values
90 | y = np.eye(len(np.unique(y)))[y]
91 |
92 | # Split data into training and testing sets
93 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
94 |
95 | # Standardization
96 | scaler = StandardScaler()
97 | X_train = scaler.fit_transform(X_train)
98 | X_test = scaler.transform(X_test)
99 |
100 | # Train ANN
101 | ann = Multi_Layer_ANN(X_train, y_train, hidden_layers=[5, 5], activations=['relu', 'relu'], loss='categorical_crossentropy')
102 | ann.fit(epochs=1000, learning_rate=0.01)
103 |
104 | # Evaluate
105 | y_pred = ann.predict(X_test)
106 | accuracy = np.mean(np.argmax(y_pred, axis=1) == np.argmax(y_test, axis=1))
107 | print(f"Test Accuracy: {accuracy * 100:.2f}%")
108 | ```
109 |
110 | ## **Contributing to Pydeepflow on GitHub**
111 |
112 | Contributions are welcome! If you would like to contribute to `pydeepflow`, follow these steps:
113 |
114 | 1. **Fork the Repository**: Click the "Fork" button at the top right of the repository page.
115 |
116 | 2. **Clone Your Fork**: Use git to clone your forked repository:
117 | ```bash
118 | git clone https://github.com/ravin-d-27/PyDeepFlow.git
119 | cd pydeepflow
120 | ```
121 |
122 | 3. **Create a Branch**: Create a new branch for your feature or bug fix:
123 | ```bash
124 | git checkout -b my-feature-branch
125 | ```
126 |
127 | 4. **Make Your Changes**: Implement your changes and commit them:
128 | ```bash
129 | git commit -m "Add some feature"
130 | ```
131 |
132 | 5. **Push to Your Fork**:
133 | ```bash
134 | git push origin my-feature-branch
135 | ```
136 |
137 | 6. **Submit a Pull Request**: Go to the original repository and submit a pull request.
138 |
139 | ## **References**
140 |
141 | - **Iris Dataset**: The dataset used in this project can be found at the UCI Machine Learning Repository: [Iris Dataset](https://archive.ics.uci.edu/ml/machine-learning-databases/iris/)
142 |
143 | - **pydeepflow Documentation**: [pydeepflow Documentation](https://pypi.org/project/pydeepflow/)
144 |
145 | - **Deep Learning Resources**: For more about deep learning, consider the following:
146 | - Goodfellow, Ian, et al. *Deep Learning*. MIT Press, 2016.
147 | - Chollet, François. *Deep Learning with Python*. Manning Publications, 2017.
148 |
149 | ---
150 |
--------------------------------------------------------------------------------
/assets/Logo.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ravin-d-27/PyDeepFlow/dcc01294b28360e2fed925b70ff9cf5c898e0f00/assets/Logo.webp
--------------------------------------------------------------------------------
/checkGridSearchCV.py:
--------------------------------------------------------------------------------
1 |
2 | import numpy as np
3 | from pydeepflow.gridSearch import GridSearchCV
4 | from pydeepflow.model import Multi_Layer_ANN
5 | # Assuming X and Y are your training data and labels
6 | X_train = np.random.rand(100, 20) # Example feature data
7 | Y_train = np.random.randint(0, 2, size=(100, 1)) # Example binary labels
8 |
9 | # Define the parameter grid
10 | param_grid = {
11 | 'hidden_layers': [[5,5], [20,20], [10, 10]], # Different configurations of hidden layers
12 | 'activations': [['relu','relu'], ['tanh','relu'], ['sigmoid','relu']], # Different activation functions
13 | 'l2_lambda': [0.0, 0.01], # Different regularization strengths
14 | 'dropout_rate': [0.0, 0.5] # Different dropout rates
15 | }
16 |
17 | # Initialize GridSearchCV
18 | grid_search = GridSearchCV(Multi_Layer_ANN, param_grid, scoring='accuracy', cv=3)
19 |
20 | # Fit Grid Search
21 | grid_search.fit(X_train, Y_train)
--------------------------------------------------------------------------------
/licences/numpy_LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) 2005-2024, NumPy Developers.
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are
6 | met:
7 |
8 | * Redistributions of source code must retain the above copyright
9 | notice, this list of conditions and the following disclaimer.
10 |
11 | * Redistributions in binary form must reproduce the above
12 | copyright notice, this list of conditions and the following
13 | disclaimer in the documentation and/or other materials provided
14 | with the distribution.
15 |
16 | * Neither the name of the NumPy Developers nor the names of any
17 | contributors may be used to endorse or promote products derived
18 | from this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/licences/pandas_LICENSE.txt:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team
4 | All rights reserved.
5 |
6 | Copyright (c) 2011-2024, Open source contributors.
7 |
8 | Redistribution and use in source and binary forms, with or without
9 | modification, are permitted provided that the following conditions are met:
10 |
11 | * Redistributions of source code must retain the above copyright notice, this
12 | list of conditions and the following disclaimer.
13 |
14 | * Redistributions in binary form must reproduce the above copyright notice,
15 | this list of conditions and the following disclaimer in the documentation
16 | and/or other materials provided with the distribution.
17 |
18 | * Neither the name of the copyright holder nor the names of its
19 | contributors may be used to endorse or promote products derived from
20 | this software without specific prior written permission.
21 |
22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/licences/scikit-learn_LICENSE.txt:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2007-2024 The scikit-learn developers.
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/pydeepflow/__init__.py:
--------------------------------------------------------------------------------
1 | from .activations import activation, activation_derivative
2 | from .losses import get_loss_function, get_loss_derivative
3 | from .device import Device
4 | from .model import Multi_Layer_ANN, Plotting_Utils
5 | from .learning_rate_scheduler import LearningRateScheduler
6 | from .checkpoints import ModelCheckpoint
7 | from .regularization import Regularization
8 | from .early_stopping import EarlyStopping
9 | from .cross_validator import CrossValidator
10 | from .batch_normalization import BatchNormalization
11 | from .gridSearch import GridSearchCV
12 |
13 | __all__ = [
14 | "activation",
15 | "activation_derivative",
16 | "get_loss_function",
17 | "get_loss_derivative",
18 | "Device",
19 | "Multi_Layer_ANN",
20 | "Plotting_Utils",
21 | "LearningRateScheduler",
22 | "ModelCheckpoint",
23 | "Regularization",
24 | "EarlyStopping",
25 | "CrossValidator",
26 | "BatchNormalization",
27 | "GridSearchCV",
28 | ]
29 |
--------------------------------------------------------------------------------
/pydeepflow/activations.py:
--------------------------------------------------------------------------------
1 | def activation(x, func, device, alpha=0.01):
2 | """
3 | Applies the specified activation function to the input data.
4 |
5 | Parameters:
6 | -----------
7 | x : np.ndarray or similar
8 | The input data to which the activation function will be applied.
9 | func : str
10 | The activation function to apply.
11 | Supported values: 'relu', 'leaky_relu', 'prelu', 'elu', 'gelu', 'swish', 'selu',
12 | 'softplus', 'mish', 'rrelu', 'hardswish', 'sigmoid', 'softsign',
13 | 'tanh', 'hardtanh', 'hardsigmoid', 'tanhshrink', 'softshrink',
14 | 'hardshrink', 'softmax'.
15 | device : object
16 | The computational device (CPU or GPU) that handles array operations.
17 | alpha : float, optional (default=0.01)
18 | Parameter used for activations like Leaky ReLU, PReLU, and RReLU.
19 |
20 | Returns:
21 | --------
22 | np.ndarray
23 | The result of applying the specified activation function to the input data.
24 |
25 | Raises:
26 | -------
27 | ValueError
28 | If the specified activation function is unsupported.
29 |
30 | Notes:
31 | ------
32 | - ReLU (Rectified Linear Unit): Returns 0 for negative inputs, otherwise returns the input value.
33 | - Leaky ReLU: Similar to ReLU, but allows a small negative slope (alpha * x) for x < 0.
34 | - PReLU (Parametric ReLU): Similar to Leaky ReLU but with a learnable parameter for the negative slope.
35 | - ELU (Exponential Linear Unit): Applies exponential transformation for x < 0, linear for x > 0.
36 | - GELU (Gaussian Error Linear Unit): Approximates a Gaussian error function. Smooth curve activation.
37 | - Swish: Uses sigmoid(x) * x, introducing smooth non-linearity.
38 | - SELU: Scaled ELU with fixed scaling factors to promote self-normalization in neural networks.
39 | - Softplus: Smooth approximation of ReLU, calculated as log(1 + exp(x)).
40 | - Mish: A newer activation that uses x * tanh(softplus(x)).
41 | - RReLU (Randomized Leaky ReLU): A randomized variant of Leaky ReLU used mainly for regularization.
42 | - HardSwish: Similar to Swish but uses a piecewise linear approximation for faster computation.
43 | - Sigmoid: S-shaped curve that maps any input to the range (0, 1).
44 | - Softsign: Another S-shaped function, but uses x / (1 + |x|) for smoother transitions.
45 | - Tanh: Maps input to the range (-1, 1) with a hyperbolic tangent curve.
46 | - HardTanh: Similar to Tanh but clamped to the range [-1, 1].
47 | - HardSigmoid: A faster approximation of sigmoid, producing values in the range (0, 1).
48 | - Tanhshrink: Subtracts the tanh activation from the input: x - tanh(x).
49 | - Softshrink: Shrinks the values towards zero by a threshold of alpha.
50 | - Hardshrink: Similar to Softshrink but with hard cutoffs at alpha and -alpha.
51 | - Softmax: Maps input to a probability distribution by exponentiating and normalizing the inputs.
52 | """
53 | if func == 'relu':
54 | return device.maximum(0, x)
55 | elif func == 'leaky_relu':
56 | return device.where(x > 0, x, alpha * x)
57 | elif func == 'prelu':
58 | return device.where(x > 0, x, alpha * x)
59 | elif func == 'elu':
60 | return device.where(x > 0, x, alpha * (device.exp(x) - 1))
61 | elif func == 'gelu':
62 | return 0.5 * x * (1 + device.tanh(device.sqrt(2 / device.pi) * (x + 0.044715 * x ** 3)))
63 | elif func == 'swish':
64 | return x / (1 + device.exp(-x))
65 | elif func == 'selu':
66 | lam = 1.0507
67 | alpha_selu = 1.67326
68 | return lam * device.where(x > 0, x, alpha_selu * (device.exp(x) - 1))
69 | elif func == 'softplus':
70 | return device.log(1 + device.exp(x))
71 | elif func == 'mish':
72 | return x * device.tanh(device.log(1 + device.exp(x)))
73 | elif func == 'rrelu':
74 | return device.where(x > 0, x, alpha * x)
75 | elif func == 'hardswish':
76 | return x * device.where(x > 3, 1, device.where(x < -3, 0, (x + 3) / 6))
77 | elif func == 'sigmoid':
78 | return 1 / (1 + device.exp(-x))
79 | elif func == 'softsign':
80 | return x / (1 + device.abs(x))
81 | elif func == 'tanh':
82 | return device.tanh(x)
83 | elif func == 'hardtanh':
84 | return device.where(x > 1, 1, device.where(x < -1, -1, x))
85 | elif func == 'hardsigmoid':
86 | return device.where(x > 1, 1, device.where(x < -1, 0, (x + 1) / 2))
87 | elif func == 'tanhshrink':
88 | return x - device.tanh(x)
89 | elif func == 'softshrink':
90 | return device.where(device.abs(x) > alpha, x - alpha * device.sign(x), 0)
91 | elif func == 'hardshrink':
92 | return device.where(device.abs(x) > alpha, x, 0)
93 | elif func == 'softmax':
94 | exp_x = device.exp(x - device.max(x, axis=-1, keepdims=True))
95 | return exp_x / device.sum(exp_x, axis=-1, keepdims=True)
96 | else:
97 | raise ValueError(f"Unsupported activation function: {func}")
98 |
99 | def activation_derivative(x, func, device, alpha=0.01):
100 | """
101 | Computes the derivative of the specified activation function.
102 |
103 | Parameters:
104 | -----------
105 | x : np.ndarray or similar
106 | The input data on which the derivative will be computed.
107 | func : str
108 | The activation function whose derivative is to be computed.
109 | Supported values: 'relu', 'leaky_relu', 'prelu', 'elu', 'gelu', 'swish', 'selu',
110 | 'softplus', 'mish', 'rrelu', 'hardswish', 'sigmoid', 'softsign',
111 | 'tanh', 'hardtanh', 'hardsigmoid', 'tanhshrink', 'softshrink',
112 | 'hardshrink', 'softmax'.
113 | device : object
114 | The computational device (CPU or GPU) that handles array operations.
115 | alpha : float, optional (default=0.01)
116 | Parameter used for activations like Leaky ReLU, PReLU, and RReLU.
117 |
118 | Returns:
119 | --------
120 | np.ndarray
121 | The derivative of the activation function applied to the input data.
122 |
123 | Raises:
124 | -------
125 | ValueError
126 | If the specified activation function's derivative is unsupported.
127 |
128 | Notes:
129 | ------
130 | - The ReLU derivative is 1 if x > 0, otherwise 0.
131 | - The Leaky ReLU derivative is 1 if x > 0, otherwise alpha.
132 | - The PReLU derivative is 1 if x > 0, otherwise alpha.
133 | - The ELU derivative is 1 if x > 0, otherwise alpha * exp(x).
134 | - The GELU derivative is complex but approximated as a smooth function using tanh and polynomials.
135 | - The Swish derivative is sigmoid(x) + x * sigmoid'(x).
136 | - The SELU derivative is lam if x > 0, otherwise lam * alpha_selu * exp(x).
137 | - The Softplus derivative is sigmoid(x).
138 | - The Mish derivative is a combination of tanh(softplus(x)) and x.
139 | - The RReLU derivative is 1 if x > 0, otherwise alpha.
140 | - The HardSwish derivative is a piecewise function that ranges from 0 to 1, depending on x.
141 | - The Sigmoid derivative is sigmoid(x) * (1 - sigmoid(x)).
142 | - The Softsign derivative is 1 / (1 + |x|)^2.
143 | - The Tanh derivative is 1 - tanh(x)^2.
144 | - The HardTanh derivative is 1 in the range [-1, 1], otherwise 0.
145 | - The HardSigmoid derivative is 0.5 for x in [-1, 1], otherwise 0.
146 | - The Tanhshrink derivative is 1 - tanh(x)^2.
147 | - The Softshrink and Hardshrink derivatives are 1 where |x| > alpha, otherwise 0.
148 | - The Softmax derivative assumes usage with cross-entropy loss, resulting in softmax(x) * (1 - softmax(x)).
149 | """
150 | if func == 'relu':
151 | return device.where(x > 0, 1, 0)
152 | elif func == 'leaky_relu':
153 | return device.where(x > 0, 1, alpha)
154 | elif func == 'prelu':
155 | return device.where(x > 0, 1, alpha)
156 | elif func == 'elu':
157 | return device.where(x > 0, 1, alpha * device.exp(x))
158 | elif func == 'gelu':
159 | return 0.5 * (1 + device.tanh(device.sqrt(2 / device.pi) * (x + 0.044715 * x ** 3))) + \
160 | 0.5 * x * (1 - device.tanh(device.sqrt(2 / device.pi) * (x + 0.044715 * x ** 3)) ** 2)
161 | elif func == 'swish':
162 | sigma = 1 / (1 + device.exp(-x))
163 | return sigma + x * sigma * (1 - sigma)
164 | elif func == 'selu':
165 | lam = 1.0507
166 | alpha_selu = 1.67326
167 | return lam * device.where(x > 0, 1, alpha_selu * device.exp(x))
168 | elif func == 'softplus':
169 | return 1 / (1 + device.exp(-x))
170 | elif func == 'mish':
171 | sp = device.log(1 + device.exp(x))
172 | tanh_sp = device.tanh(sp)
173 | return device.exp(x) * (tanh_sp + x * (1 - tanh_sp ** 2) / sp) / (1 + device.exp(-x))
174 | elif func == 'rrelu':
175 | return device.where(x > 0, 1, alpha)
176 | elif func == 'hardswish':
177 | return device.where(x > -3, device.where(x < 3, x / 3 + 0.5, 1), 0)
178 | elif func == 'sigmoid':
179 | return x * (1 - x)
180 | elif func == 'softsign':
181 | return 1 / (1 + device.abs(x)) ** 2
182 | elif func == 'tanh':
183 | return 1 - x ** 2
184 | elif func == 'hardtanh':
185 | return device.where(device.abs(x) <= 1, 1, 0)
186 | elif func == 'hardsigmoid':
187 | return device.where(device.abs(x) <= 1, 0.5, 0)
188 | elif func == 'tanhshrink':
189 | return 1 - device.tanh(x) ** 2
190 | elif func == 'softshrink':
191 | return device.where(device.abs(x) > alpha, 1, 0)
192 | elif func == 'hardshrink':
193 | return device.where(device.abs(x) > alpha, 1, 0)
194 | elif func == 'softmax':
195 | return x * (1 - x)
196 | else:
197 | raise ValueError(f"Unsupported activation derivative: {func}")
198 |
--------------------------------------------------------------------------------
/pydeepflow/batch_normalization.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class BatchNormalization:
4 | """
5 | A class that implements Batch Normalization for a layer in a neural network.
6 |
7 | Batch Normalization helps stabilize the learning process and accelerate training
8 | by normalizing the inputs of each layer. This class can be used during training
9 | and inference.
10 | """
11 |
12 | def __init__(self, layer_size, epsilon=1e-5, momentum=0.9, device=np):
13 | """
14 | Initializes the BatchNormalization object.
15 |
16 | Parameters:
17 | layer_size (int): The size of the layer to which batch normalization is applied.
18 | epsilon (float): A small constant added to the variance for numerical stability.
19 | momentum (float): The momentum for updating the running mean and variance.
20 | device (module): The device module (e.g., numpy) to perform calculations on.
21 | """
22 | self.epsilon = epsilon
23 | self.momentum = momentum
24 | self.device = device
25 |
26 | self.gamma = self.device.ones((1, layer_size))
27 | self.beta = self.device.zeros((1, layer_size))
28 |
29 | self.running_mean = self.device.zeros((1, layer_size))
30 | self.running_variance = self.device.ones((1, layer_size))
31 |
32 | def normalize(self, Z, training=True):
33 | """
34 | Normalizes the input data Z.
35 |
36 | During training, it computes the batch mean and variance, and updates the
37 | running mean and variance. During inference, it uses the running statistics.
38 |
39 | Parameters:
40 | Z (ndarray): The input data of shape (batch_size, layer_size) to normalize.
41 | training (bool): A flag indicating whether the model is in training mode.
42 | If True, updates running statistics; otherwise uses them.
43 |
44 | Returns:
45 | ndarray: The normalized and scaled output data.
46 | """
47 | if training:
48 | batch_mean = self.device.mean(Z, axis=0, keepdims=True)
49 | batch_variance = self.device.var(Z, axis=0, keepdims=True)
50 |
51 | Z_normalized = (Z - batch_mean) / self.device.sqrt(batch_variance + self.epsilon)
52 |
53 | self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * batch_mean
54 | self.running_variance = self.momentum * self.running_variance + (1 - self.momentum) * batch_variance
55 | else:
56 | Z_normalized = (Z - self.running_mean) / self.device.sqrt(self.running_variance + self.epsilon)
57 |
58 | Z_scaled = self.gamma * Z_normalized + self.beta
59 |
60 | return Z_scaled
61 |
62 | def backprop(self, Z, dZ, learning_rate):
63 | """
64 | Computes the gradients for gamma and beta during backpropagation
65 | and updates their values.
66 |
67 | Parameters:
68 | Z (ndarray): The input data used for normalization, of shape (batch_size, layer_size).
69 | dZ (ndarray): The gradient of the loss with respect to the output of the layer,
70 | of shape (batch_size, layer_size).
71 | learning_rate (float): The learning rate for updating gamma and beta.
72 |
73 | Returns:
74 | ndarray: The gradient of the loss with respect to the input data Z.
75 | """
76 | dgamma = self.device.sum(dZ * Z, axis=0, keepdims=True)
77 | dbeta = self.device.sum(dZ, axis=0, keepdims=True)
78 |
79 | self.gamma -= learning_rate * dgamma
80 | self.beta -= learning_rate * dbeta
81 |
82 | return dZ
83 |
--------------------------------------------------------------------------------
/pydeepflow/checkpoints.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 |
4 | class ModelCheckpoint:
5 | def __init__(self, save_dir, monitor="val_loss", save_best_only=True, save_freq=1):
6 | """
7 | Args:
8 | save_dir (str): Directory where the model weights will be saved.
9 | monitor (str): Metric to monitor for saving (e.g., 'val_loss', 'val_accuracy').
10 | save_best_only (bool): Save only the best weights based on the monitored metric.
11 | save_freq (int): Frequency of saving checkpoints (in epochs).
12 | """
13 | self.save_dir = save_dir
14 | self.monitor = monitor
15 | self.save_best_only = save_best_only
16 | self.save_freq = save_freq
17 | self.best_metric = np.inf if "loss" in monitor else -np.inf
18 | self.best_val_loss = float('inf') # Initial best validation loss
19 |
20 | def save_weights(self, epoch, weights, biases, val_loss):
21 |
22 | """ Saves weights and biases to the specified directory.
23 | Args:
24 | epoch (int): The current epoch number.
25 | weights (numpy.ndarray): The weights of the model to be saved.
26 | biases (numpy.ndarray): The biases of the model to be saved.
27 | val_loss (float): The validation loss value to be logged.
28 | Returns:
29 | None
30 | """
31 |
32 | # Create the directory if it does not exist
33 | if not os.path.exists(self.save_dir):
34 | os.makedirs(self.save_dir) # Create the directory
35 |
36 | checkpoint_path = f"{self.save_dir}/checkpoint_epoch_{epoch}.npz"
37 |
38 | # Prepare data to save
39 | data = {}
40 | for i, (w, b) in enumerate(zip(weights, biases)):
41 | data[f'weights_layer_{i}'] = w
42 | data[f'biases_layer_{i}'] = b
43 |
44 | # Save as .npz file
45 | np.savez(checkpoint_path, **data)
46 |
47 |
48 | def should_save(self, epoch, metric):
49 | """
50 | Determines whether to save based on the best metric or save frequency.
51 |
52 | Args:
53 | epoch (int): The current epoch number.
54 | metric (float): The current value of the monitored metric (e.g., loss or accuracy).
55 |
56 | Returns:
57 | bool: True if the model should be saved, False otherwise.
58 | """
59 | if self.save_freq and epoch % self.save_freq == 0:
60 | if self.save_best_only:
61 | if ("loss" in self.monitor and metric < self.best_metric) or \
62 | ("accuracy" in self.monitor and metric > self.best_metric):
63 | self.best_metric = metric
64 | return True
65 | else:
66 | return True
67 | return False
68 |
69 |
--------------------------------------------------------------------------------
/pydeepflow/cross_validator.py:
--------------------------------------------------------------------------------
1 | # cross_validator.py
2 | import numpy as np
3 | from sklearn.model_selection import KFold
4 |
5 | class CrossValidator:
6 | def __init__(self, n_splits=5):
7 | """
8 | Initialize the CrossValidator with the number of splits.
9 |
10 | Args:
11 | n_splits (int): The number of folds for cross-validation.
12 | """
13 | self.n_splits = n_splits
14 |
15 | def split(self, X, y):
16 | """
17 | Generate indices for k-fold cross-validation.
18 |
19 | Args:
20 | X (array-like): Feature data.
21 | y (array-like): Labels.
22 |
23 | Returns:
24 | generator: A generator yielding train and validation indices.
25 | """
26 | kf = KFold(n_splits=self.n_splits)
27 | for train_index, val_index in kf.split(X):
28 | yield train_index, val_index
29 |
30 | def get_metrics(self, y_true, y_pred, metrics):
31 | """
32 | Calculate and return specified metrics.
33 |
34 | Args:
35 | y_true (array-like): True labels.
36 | y_pred (array-like): Predicted labels.
37 | metrics (list): List of metrics to calculate.
38 |
39 | Returns:
40 | dict: A dictionary containing the requested metrics.
41 | """
42 | results = {}
43 | for metric in metrics:
44 | if metric == "accuracy":
45 | results['accuracy'] = np.mean(y_true == y_pred)
46 |
47 | return results
48 |
--------------------------------------------------------------------------------
/pydeepflow/device.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | try:
3 | import cupy as cp
4 | except ImportError:
5 | cp = None
6 |
7 | class Device:
8 | """
9 | A utility class to handle computations on either CPU (NumPy) or GPU (CuPy)
10 | depending on the user's preference.
11 |
12 | Parameters:
13 | -----------
14 | use_gpu : bool, optional (default=False)
15 | If True, the class uses GPU (CuPy) for array operations. If CuPy is not installed,
16 | it raises an ImportError.
17 |
18 | Attributes:
19 | -----------
20 | use_gpu : bool
21 | Whether to use GPU (CuPy) or CPU (NumPy) for computations.
22 |
23 | Raises:
24 | -------
25 | ValueError
26 | If `use_gpu=True` but CuPy is not installed.
27 | """
28 |
29 | def __init__(self, use_gpu=False):
30 | self.use_gpu = use_gpu
31 | if use_gpu and cp is None:
32 | raise ValueError("CuPy is not installed, please install CuPy for GPU support.")
33 |
34 | def abs(self, x):
35 | """
36 | Computes the absolute value of each element in the input array.
37 |
38 | Parameters:
39 | -----------
40 | x : np.ndarray or cp.ndarray
41 | The input array.
42 |
43 | Returns:
44 | --------
45 | np.ndarray or cp.ndarray
46 | The absolute value of each element in `x`.
47 | """
48 | return cp.abs(x) if self.use_gpu else np.abs(x)
49 |
50 |
51 | def array(self, data):
52 | """
53 | Converts input data into a NumPy or CuPy array.
54 |
55 | Parameters:
56 | -----------
57 | data : array-like
58 | The input data to convert.
59 |
60 | Returns:
61 | --------
62 | np.ndarray or cp.ndarray
63 | The converted array, depending on whether GPU or CPU is used.
64 | """
65 | return cp.array(data) if self.use_gpu else np.array(data)
66 |
67 | def zeros(self, shape):
68 | """
69 | Creates an array of zeros with the specified shape.
70 |
71 | Parameters:
72 | -----------
73 | shape : tuple of ints
74 | The shape of the output array.
75 |
76 | Returns:
77 | --------
78 | np.ndarray or cp.ndarray
79 | An array of zeros, either using NumPy or CuPy.
80 | """
81 | return cp.zeros(shape) if self.use_gpu else np.zeros(shape)
82 |
83 | def random(self):
84 | """
85 | Returns a random module, either NumPy's or CuPy's, depending on the device.
86 |
87 | Returns:
88 | --------
89 | module
90 | Either `np.random` or `cp.random`.
91 | """
92 | return cp.random if self.use_gpu else np.random
93 |
94 | def exp(self, x):
95 | """
96 | Computes the element-wise exponential of the input array.
97 |
98 | Parameters:
99 | -----------
100 | x : np.ndarray or cp.ndarray
101 | The input array.
102 |
103 | Returns:
104 | --------
105 | np.ndarray or cp.ndarray
106 | The element-wise exponential of the input.
107 | """
108 | return cp.exp(x) if self.use_gpu else np.exp(x)
109 |
110 | def dot(self, a, b):
111 | """
112 | Computes the dot product of two arrays.
113 |
114 | Parameters:
115 | -----------
116 | a : np.ndarray or cp.ndarray
117 | First input array.
118 | b : np.ndarray or cp.ndarray
119 | Second input array.
120 |
121 | Returns:
122 | --------
123 | np.ndarray or cp.ndarray
124 | The dot product of `a` and `b`.
125 | """
126 | return cp.dot(a, b) if self.use_gpu else np.dot(a, b)
127 |
128 | def maximum(self, a, b):
129 | """
130 | Element-wise maximum of two arrays.
131 |
132 | Parameters:
133 | -----------
134 | a : np.ndarray or cp.ndarray
135 | First input array.
136 | b : np.ndarray or cp.ndarray
137 | Second input array.
138 |
139 | Returns:
140 | --------
141 | np.ndarray or cp.ndarray
142 | The element-wise maximum of `a` and `b`.
143 | """
144 | return cp.maximum(a, b) if self.use_gpu else np.maximum(a, b)
145 |
146 | def tanh(self, x):
147 | """
148 | Computes the hyperbolic tangent of the input array.
149 |
150 | Parameters:
151 | -----------
152 | x : np.ndarray or cp.ndarray
153 | The input array.
154 |
155 | Returns:
156 | --------
157 | np.ndarray or cp.ndarray
158 | The hyperbolic tangent of the input.
159 | """
160 | return cp.tanh(x) if self.use_gpu else np.tanh(x)
161 |
162 | def sum(self, x, axis=None, keepdims=False):
163 | """
164 | Sums the elements of an array along a specified axis.
165 |
166 | Parameters:
167 | -----------
168 | x : np.ndarray or cp.ndarray
169 | Input array.
170 | axis : int or None, optional (default=None)
171 | Axis along which the sum is performed.
172 | keepdims : bool, optional (default=False)
173 | Whether to keep the reduced dimensions.
174 |
175 | Returns:
176 | --------
177 | np.ndarray or cp.ndarray
178 | The sum of elements in `x` along the specified axis.
179 | """
180 | return cp.sum(x, axis=axis, keepdims=keepdims) if self.use_gpu else np.sum(x, axis=axis, keepdims=keepdims)
181 |
182 | def where(self, condition, x, y):
183 | """
184 | Return elements chosen from `x` or `y` depending on `condition`.
185 |
186 | Parameters:
187 | -----------
188 | condition : array-like
189 | Where True, yield `x`, otherwise yield `y`.
190 | x : array-like
191 | Values from which to choose where `condition` is True.
192 | y : array-like
193 | Values from which to choose where `condition` is False.
194 |
195 | Returns:
196 | --------
197 | np.ndarray or cp.ndarray
198 | Array formed by elements from `x` or `y`, depending on the condition.
199 | """
200 | return cp.where(condition, x, y) if self.use_gpu else np.where(condition, x, y)
201 |
202 | def sqrt(self, x):
203 | """
204 | Computes the square root of the input array, element-wise.
205 |
206 | Parameters:
207 | -----------
208 | x : np.ndarray or cp.ndarray
209 | The input array.
210 |
211 | Returns:
212 | --------
213 | np.ndarray or cp.ndarray
214 | The square root of each element in `x`.
215 | """
216 | return cp.sqrt(x) if self.use_gpu else np.sqrt(x)
217 |
218 | def log(self, x):
219 | """
220 | Computes the natural logarithm of the input array, element-wise.
221 |
222 | Parameters:
223 | -----------
224 | x : np.ndarray or cp.ndarray
225 | The input array.
226 |
227 | Returns:
228 | --------
229 | np.ndarray or cp.ndarray
230 | The natural logarithm of each element in `x`.
231 | """
232 | return cp.log(x) if self.use_gpu else np.log(x)
233 |
234 | def asnumpy(self, x):
235 | """
236 | Converts a CuPy array to a NumPy array, or simply returns the input if it is already a NumPy array.
237 |
238 | Parameters:
239 | -----------
240 | x : cp.ndarray or np.ndarray
241 | The input array.
242 |
243 | Returns:
244 | --------
245 | np.ndarray
246 | A NumPy array.
247 | """
248 | return cp.asnumpy(x) if self.use_gpu else x
249 |
250 | def max(self, x, axis=None, keepdims=False):
251 | """
252 | Returns the maximum of an array or along a specific axis.
253 |
254 | Parameters:
255 | -----------
256 | x : np.ndarray or cp.ndarray
257 | Input array.
258 | axis : int or None, optional (default=None)
259 | Axis along which to find the maximum.
260 | keepdims : bool, optional (default=False)
261 | Whether to keep the reduced dimensions.
262 |
263 | Returns:
264 | --------
265 | np.ndarray or cp.ndarray
266 | The maximum value(s) in `x` along the specified axis.
267 | """
268 | return cp.max(x, axis=axis, keepdims=keepdims) if self.use_gpu else np.max(x, axis=axis, keepdims=keepdims)
269 |
270 | def norm(self, x, ord=None, axis=None, keepdims=False):
271 | """
272 | Matrix or vector norm.
273 |
274 | This function is able to return one of eight different matrix norms,
275 | or one of an infinite number of vector norms (described below), depending
276 | on the value of the ``ord`` parameter.
277 |
278 | Parameters
279 | ----------
280 | x : np.ndarray or cp.ndarray
281 | Input array.
282 | ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional (default=None)
283 | Order of the norm.
284 | axis :int or None, optional (default=None)
285 | Axis along which to find the norm.
286 | keepdims : bool, optional (default=False)
287 | Whether to keep the reduced dimensions.
288 |
289 | Returns:
290 | --------
291 | float or np.ndarray or cp.ndarray
292 | Norm of matrix or vector(s).
293 | """
294 |
295 | return cp.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims) if self.use_gpu else np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
296 |
297 | def ones(self, shape):
298 | """
299 | Creates an array of ones with the specified shape.
300 |
301 | Parameters:
302 | -----------
303 | shape : tuple of ints
304 | The shape of the output array.
305 |
306 | Returns:
307 | --------
308 | np.ndarray or cp.ndarray
309 | An array of ones, either using NumPy or CuPy.
310 | """
311 | return cp.ones(shape) if self.use_gpu else np.ones(shape)
312 |
313 |
314 | def mean(self, x, axis=None, keepdims=False):
315 | """
316 | Computes the mean of the input array along the specified axis.
317 |
318 | Parameters:
319 | -----------
320 | x : np.ndarray or cp.ndarray
321 | The input array.
322 | axis : int or tuple of ints, optional
323 | Axis or axes along which the means are computed.
324 | keepdims : bool, optional
325 | If True, the reduced dimensions are retained.
326 |
327 | Returns:
328 | --------
329 | np.ndarray or cp.ndarray
330 | The mean of the input array along the specified axis.
331 | """
332 | return cp.mean(x, axis=axis, keepdims=keepdims) if self.use_gpu else np.mean(x, axis=axis, keepdims=keepdims)
333 |
334 | def var(self, x, axis=None, keepdims=False):
335 | """
336 | Computes the variance of an array along a specified axis.
337 |
338 | Parameters:
339 | -----------
340 | x : np.ndarray or cp.ndarray
341 | Input array.
342 | axis : int or None, optional (default=None)
343 | Axis along which the variance is computed.
344 | keepdims : bool, optional (default=False)
345 | If True, the reduced dimensions will be retained.
346 |
347 | Returns:
348 | --------
349 | np.ndarray or cp.ndarray
350 | The variance of the input array along the specified axis.
351 | """
352 | return cp.var(x, axis=axis, keepdims=keepdims) if self.use_gpu else np.var(x, axis=axis, keepdims=keepdims)
353 |
--------------------------------------------------------------------------------
/pydeepflow/early_stopping.py:
--------------------------------------------------------------------------------
1 | class EarlyStopping:
2 | """Early stops the training if validation loss doesn't improve after a given patience."""
3 | def __init__(self, patience:int = 5, delta:float = 0) -> None:
4 | """
5 | Initializes the EarlyStopping.
6 |
7 | :param patience (int): How long to wait after last time validation loss improved.
8 | Default: 5
9 | :param delta (float): Minimum change in the monitored quantity to qualify as an improvement.
10 | Default: 0
11 | """
12 | self.patience = patience
13 | self.delta = delta
14 | self.early_stop = False
15 | self.best_score = None
16 | self.counter = 0
17 | def __call__(self, val_loss:float) -> None:
18 | """
19 | Checks if the condition for early stopping are met and updates early_stop flag
20 |
21 | :param val_loss (float): The current validation loss.
22 | """
23 | score = - val_loss
24 | if self.best_score is None:
25 | self.best_score = score
26 | elif score <= self.best_score + self.delta:
27 | self.counter+=1
28 | if self.counter > self.patience:
29 | self.early_stop = True
30 | else:
31 | self.best_score = score
32 | self.counter = 0
33 |
--------------------------------------------------------------------------------
/pydeepflow/gridSearch.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from sklearn.model_selection import train_test_split
3 | from itertools import product
4 |
5 | class GridSearchCV:
6 | def __init__(self, model_class, param_grid, scoring='accuracy', cv=3):
7 | """
8 | Initializes the GridSearchCV class.
9 |
10 | Parameters:
11 | model_class: The model class to be used for fitting.
12 | param_grid (dict): Dictionary with parameters names as keys and lists of parameter settings to try as values.
13 | scoring (str): Scoring method to evaluate the model. Options are 'accuracy', 'loss', etc.
14 | cv (int): Number of cross-validation folds.
15 | """
16 | self.model_class = model_class
17 | self.param_grid = param_grid
18 | self.scoring = scoring
19 | self.cv = cv
20 | self.best_params = None
21 | self.best_score = -np.inf
22 |
23 | def fit(self, X, y):
24 | """
25 | Fit the model with the best hyperparameters using Grid Search.
26 |
27 | Parameters:
28 | X (array-like): Feature data.
29 | y (array-like): Target data.
30 | """
31 | # Generate all combinations of parameters
32 | param_names = list(self.param_grid.keys())
33 | param_values = [self.param_grid[name] for name in param_names]
34 | param_combinations = list(product(*param_values))
35 |
36 | for params in param_combinations:
37 | params_dict = dict(zip(param_names, params))
38 | print(f"Testing parameters: {params_dict}")
39 |
40 | # Perform cross-validation
41 | scores = []
42 | for _ in range(self.cv):
43 | X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=None)
44 |
45 | # Initialize model with current parameters
46 | model = self.model_class(X_train, y_train, **params_dict)
47 | model.fit(epochs=10) # Adjust epochs as needed
48 |
49 | # Evaluate the model
50 | val_loss, val_accuracy = model.evaluate(X_val, y_val)
51 |
52 | # Store the score based on the scoring metric
53 | if self.scoring == 'accuracy':
54 | scores.append(val_accuracy)
55 | elif self.scoring == 'loss':
56 | scores.append(-val_loss) # Assuming lower loss is better
57 |
58 | avg_score = np.mean(scores)
59 | print(f"Average score for parameters {params_dict}: {avg_score:.4f}")
60 | print()
61 |
62 | # Update best score and parameters if applicable
63 | if avg_score > self.best_score:
64 | self.best_score = avg_score
65 | self.best_params = params_dict
66 |
67 | print(f"Best parameters: {self.best_params} with score: {self.best_score:.4f}")
68 |
--------------------------------------------------------------------------------
/pydeepflow/learning_rate_scheduler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class LearningRateScheduler:
4 | def __init__(self, initial_lr, strategy="decay", decay_rate=0.1, cycle_length=10, min_lr=1e-6):
5 | """
6 | Initializes the LearningRateScheduler.
7 |
8 | :param initial_lr: Initial learning rate.
9 | :param strategy: The strategy to use, either 'decay' or 'cyclic'.
10 | :param decay_rate: Decay rate for exponential decay.
11 | :param cycle_length: Number of epochs for one learning rate cycle (used for cyclic strategy).
12 | :param min_lr: Minimum learning rate value to avoid too low values.
13 | """
14 | self.initial_lr = initial_lr
15 | self.strategy = strategy
16 | self.decay_rate = decay_rate
17 | self.cycle_length = cycle_length
18 | self.min_lr = min_lr
19 | self.current_lr = initial_lr
20 |
21 | def get_lr(self, epoch):
22 | """
23 | Returns the learning rate for the current epoch based on the selected strategy.
24 |
25 | :param epoch: The current epoch number.
26 | :return: The learning rate for the current epoch.
27 | """
28 | if self.strategy == "decay":
29 | # Exponential decay: LR = initial_lr * (decay_rate ^ epoch)
30 | lr = self.initial_lr * (self.decay_rate ** epoch)
31 | return max(lr, self.min_lr) # Ensure the LR doesn't fall below min_lr
32 | elif self.strategy == "cyclic":
33 | # Cyclic learning rate
34 | cycle_position = epoch % self.cycle_length
35 | lr = self.min_lr + (self.initial_lr - self.min_lr) * (1 + np.cos(np.pi * cycle_position / self.cycle_length)) / 2
36 | return lr
37 | else:
38 | raise ValueError("Invalid learning rate strategy. Choose 'decay' or 'cyclic'.")
39 |
--------------------------------------------------------------------------------
/pydeepflow/losses.py:
--------------------------------------------------------------------------------
1 | # losses.py
2 |
3 | # Loss functions using the device abstraction for GPU/CPU support
4 |
5 | def binary_crossentropy(y_true, y_pred, device):
6 | """
7 | Computes the binary crossentropy loss.
8 |
9 | Parameters:
10 | -----------
11 | y_true : np.ndarray or cp.ndarray
12 | Ground truth binary labels (0 or 1).
13 | y_pred : np.ndarray or cp.ndarray
14 | Predicted probabilities for the positive class.
15 | device : Device
16 | The device instance (CPU or GPU) to perform calculations.
17 |
18 | Returns:
19 | --------
20 | float
21 | The binary crossentropy loss.
22 | """
23 | return -device.mean(y_true * device.log(y_pred + 1e-8) + (1 - y_true) * device.log(1 - y_pred + 1e-8))
24 |
25 | def binary_crossentropy_derivative(y_true, y_pred, device):
26 | """
27 | Computes the derivative of the binary crossentropy loss.
28 |
29 | Parameters:
30 | -----------
31 | y_true : np.ndarray or cp.ndarray
32 | Ground truth binary labels (0 or 1).
33 | y_pred : np.ndarray or cp.ndarray
34 | Predicted probabilities for the positive class.
35 | device : Device
36 | The device instance (CPU or GPU) to perform calculations.
37 |
38 | Returns:
39 | --------
40 | np.ndarray or cp.ndarray
41 | The derivative of the binary crossentropy loss with respect to predictions.
42 | """
43 | return -(y_true / (y_pred + 1e-8)) + (1 - y_true) / (1 - y_pred + 1e-8)
44 |
45 | def mse(y_true, y_pred, device):
46 | """
47 | Computes the Mean Squared Error (MSE) loss.
48 |
49 | Parameters:
50 | -----------
51 | y_true : np.ndarray or cp.ndarray
52 | Ground truth values.
53 | y_pred : np.ndarray or cp.ndarray
54 | Predicted values.
55 | device : Device
56 | The device instance (CPU or GPU) to perform calculations.
57 |
58 | Returns:
59 | --------
60 | float
61 | The Mean Squared Error loss.
62 | """
63 | return device.mean((y_true - y_pred) ** 2)
64 |
65 | def mse_derivative(y_true, y_pred, device):
66 | """
67 | Computes the derivative of the Mean Squared Error (MSE) loss.
68 |
69 | Parameters:
70 | -----------
71 | y_true : np.ndarray or cp.ndarray
72 | Ground truth values.
73 | y_pred : np.ndarray or cp.ndarray
74 | Predicted values.
75 | device : Device
76 | The device instance (CPU or GPU) to perform calculations.
77 |
78 | Returns:
79 | --------
80 | np.ndarray or cp.ndarray
81 | The derivative of the Mean Squared Error loss with respect to predictions.
82 | """
83 | return 2 * (y_pred - y_true) / y_true.size
84 |
85 | def categorical_crossentropy(y_true, y_pred, device):
86 | """
87 | Computes the categorical crossentropy loss.
88 |
89 | Parameters:
90 | -----------
91 | y_true : np.ndarray or cp.ndarray
92 | Ground truth labels in one-hot encoded format.
93 | y_pred : np.ndarray or cp.ndarray
94 | Predicted probabilities for each class.
95 | device : Device
96 | The device instance (CPU or GPU) to perform calculations.
97 |
98 | Returns:
99 | --------
100 | float
101 | The categorical crossentropy loss.
102 | """
103 | return -device.sum(y_true * device.log(y_pred + 1e-8)) / y_true.shape[0]
104 |
105 | def categorical_crossentropy_derivative(y_true, y_pred, device):
106 | """
107 | Computes the derivative of the categorical crossentropy loss.
108 |
109 | Parameters:
110 | -----------
111 | y_true : np.ndarray or cp.ndarray
112 | Ground truth labels in one-hot encoded format.
113 | y_pred : np.ndarray or cp.ndarray
114 | Predicted probabilities for each class.
115 | device : Device
116 | The device instance (CPU or GPU) to perform calculations.
117 |
118 | Returns:
119 | --------
120 | np.ndarray or cp.ndarray
121 | The derivative of the categorical crossentropy loss with respect to predictions.
122 | """
123 | return -y_true / (y_pred + 1e-8)
124 |
125 | def hinge_loss(y_true, y_pred, device):
126 | """
127 | Computes the hinge loss.
128 |
129 | Parameters:
130 | -----------
131 | y_true : np.ndarray or cp.ndarray
132 | Ground truth labels, should be either -1 or 1.
133 | y_pred : np.ndarray or cp.ndarray
134 | Predicted values.
135 | device : Device
136 | The device instance (CPU or GPU) to perform calculations.
137 |
138 | Returns:
139 | --------
140 | float
141 | The hinge loss.
142 | """
143 | return device.mean(device.maximum(0, 1 - y_true * y_pred))
144 |
145 | def hinge_loss_derivative(y_true, y_pred, device):
146 | """
147 | Computes the derivative of the hinge loss.
148 |
149 | Parameters:
150 | -----------
151 | y_true : np.ndarray or cp.ndarray
152 | Ground truth labels, should be either -1 or 1.
153 | y_pred : np.ndarray or cp.ndarray
154 | Predicted values.
155 | device : Device
156 | The device instance (CPU or GPU) to perform calculations.
157 |
158 | Returns:
159 | --------
160 | np.ndarray or cp.ndarray
161 | The derivative of the hinge loss with respect to predictions.
162 | """
163 | return device.where(y_true * y_pred < 1, -y_true, 0)
164 |
165 | def huber_loss(y_true, y_pred, device, delta=1.0):
166 | """
167 | Computes the Huber loss.
168 |
169 | Parameters:
170 | -----------
171 | y_true : np.ndarray or cp.ndarray
172 | Ground truth values.
173 | y_pred : np.ndarray or cp.ndarray
174 | Predicted values.
175 | device : Device
176 | The device instance (CPU or GPU) to perform calculations.
177 | delta : float, optional (default=1.0)
178 | The threshold for defining small and large errors.
179 |
180 | Returns:
181 | --------
182 | float
183 | The Huber loss.
184 | """
185 | error = y_true - y_pred
186 | is_small_error = device.abs(error) <= delta
187 | squared_loss = 0.5 * device.square(error)
188 | linear_loss = delta * (device.abs(error) - 0.5 * delta)
189 | return device.mean(device.where(is_small_error, squared_loss, linear_loss))
190 |
191 | def huber_loss_derivative(y_true, y_pred, device, delta=1.0):
192 | """
193 | Computes the derivative of the Huber loss.
194 |
195 | Parameters:
196 | -----------
197 | y_true : np.ndarray or cp.ndarray
198 | Ground truth values.
199 | y_pred : np.ndarray or cp.ndarray
200 | Predicted values.
201 | device : Device
202 | The device instance (CPU or GPU) to perform calculations.
203 | delta : float, optional (default=1.0)
204 | The threshold for defining small and large errors.
205 |
206 | Returns:
207 | --------
208 | np.ndarray or cp.ndarray
209 | The derivative of the Huber loss with respect to predictions.
210 | """
211 | error = y_pred - y_true
212 | is_small_error = device.abs(error) <= delta
213 | return device.where(is_small_error, error, delta * device.sign(error))
214 |
215 | # Get the appropriate loss function
216 | def get_loss_function(loss_name):
217 | """
218 | Retrieves the specified loss function by name.
219 |
220 | Parameters:
221 | -----------
222 | loss_name : str
223 | The name of the loss function.
224 |
225 | Returns:
226 | --------
227 | function
228 | The corresponding loss function.
229 |
230 | Raises:
231 | -------
232 | ValueError
233 | If the specified loss function is unsupported.
234 | """
235 | if loss_name == 'binary_crossentropy':
236 | return binary_crossentropy
237 | elif loss_name == 'mse':
238 | return mse
239 | elif loss_name == 'categorical_crossentropy':
240 | return categorical_crossentropy
241 | elif loss_name == 'hinge':
242 | return hinge_loss
243 | elif loss_name == 'huber':
244 | return huber_loss
245 | else:
246 | raise ValueError(f"Unsupported loss function: {loss_name}")
247 |
248 | # Get the appropriate loss derivative function
249 | def get_loss_derivative(loss_name):
250 | """
251 | Retrieves the specified loss derivative function by name.
252 |
253 | Parameters:
254 | -----------
255 | loss_name : str
256 | The name of the loss function.
257 |
258 | Returns:
259 | --------
260 | function
261 | The corresponding loss derivative function.
262 |
263 | Raises:
264 | -------
265 | ValueError
266 | If the specified loss derivative function is unsupported.
267 | """
268 | if loss_name == 'binary_crossentropy':
269 | return binary_crossentropy_derivative
270 | elif loss_name == 'mse':
271 | return mse_derivative
272 | elif loss_name == 'categorical_crossentropy':
273 | return categorical_crossentropy_derivative
274 | elif loss_name == 'hinge':
275 | return hinge_loss_derivative
276 | elif loss_name == 'huber':
277 | return huber_loss_derivative
278 | else:
279 | raise ValueError(f"Unsupported loss derivative: {loss_name}")
280 |
--------------------------------------------------------------------------------
/pydeepflow/main.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | from sklearn.model_selection import train_test_split
4 | from sklearn.preprocessing import StandardScaler
5 | from pydeepflow.model import Multi_Layer_ANN
6 | from pydeepflow.cross_validator import CrossValidator # Import CrossValidator
7 |
8 | def load_and_preprocess_data(url):
9 | # Load the Iris dataset
10 | df = pd.read_csv(url, header=None, names=["sepal_length", "sepal_width", "petal_length", "petal_width", "species"])
11 | print(df.head())
12 |
13 | # Encode species labels to integers
14 | df['species'] = df['species'].astype('category').cat.codes
15 |
16 | # Split data into features (X) and labels (y)
17 | X = df.iloc[:, :-1].values
18 | y = df.iloc[:, -1].values
19 |
20 | # Convert labels to one-hot encoding
21 | y_one_hot = np.eye(len(np.unique(y)))[y]
22 |
23 | # Standardize the features
24 | scaler = StandardScaler()
25 | X = scaler.fit_transform(X)
26 |
27 | return X, y_one_hot
28 |
29 | if __name__ == "__main__":
30 | # Configuration
31 | url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
32 | n_splits = 5 # Number of folds for cross-validation
33 |
34 | # Load and preprocess data
35 | X, y_one_hot = load_and_preprocess_data(url)
36 |
37 | # Ask the user whether to use GPU
38 | use_gpu_input = input("Use GPU? (y/n): ").strip().lower()
39 | use_gpu = True if use_gpu_input == 'y' else False
40 |
41 | # Define the architecture of the network
42 | hidden_layers = [5, 5]
43 | activations = ['relu', 'relu']
44 |
45 | # Initialize the ANN with use_gpu option
46 | ann = Multi_Layer_ANN(X, y_one_hot, hidden_layers, activations, loss='categorical_crossentropy', use_gpu=use_gpu)
47 |
48 | # Initialize CrossValidator and perform K-Fold Cross Validation
49 | cross_validator = CrossValidator(k=n_splits, metrics=["accuracy"])
50 | results = cross_validator.evaluate(ann, X, y_one_hot, epochs=1000, learning_rate=0.01, verbose=True)
51 |
52 | # Print cross-validation results
53 | print("Cross-Validation Results:", results)
54 |
55 | # Optionally train the model on the full dataset if needed
56 | # ann.fit(epochs=1000, learning_rate=0.01)
57 |
58 | # Example of making predictions on the entire dataset or a separate test set can be added here
59 |
--------------------------------------------------------------------------------
/pydeepflow/model.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | from pydeepflow.activations import activation, activation_derivative
4 | from pydeepflow.losses import get_loss_function, get_loss_derivative
5 | from pydeepflow.device import Device
6 | from pydeepflow.regularization import Regularization
7 | from pydeepflow.checkpoints import ModelCheckpoint
8 | from pydeepflow.cross_validator import CrossValidator
9 | from pydeepflow.batch_normalization import BatchNormalization
10 | from tqdm import tqdm
11 | import time
12 | import sys
13 |
14 | class Multi_Layer_ANN:
15 | """
16 | A Multi-Layer Artificial Neural Network (ANN) class for binary and multi-class classification tasks.
17 | """
18 | def __init__(self, X_train, Y_train, hidden_layers, activations, loss='categorical_crossentropy',
19 | use_gpu=False, l2_lambda=0.0, dropout_rate=0.0, use_batch_norm=False):
20 | """
21 | Initializes the ANN model with the provided architecture and configurations.
22 | """
23 | self.device = Device(use_gpu=use_gpu)
24 | self.regularization = Regularization(l2_lambda, dropout_rate)
25 |
26 | # Determine the network architecture based on the classification task (binary or multi-class)
27 | if Y_train.ndim == 1 or Y_train.shape[1] == 1:
28 | self.layers = [X_train.shape[1]] + hidden_layers + [1]
29 | self.output_activation = 'sigmoid'
30 | else:
31 | self.layers = [X_train.shape[1]] + hidden_layers + [Y_train.shape[1]]
32 | self.output_activation = 'softmax'
33 |
34 | self.activations = activations
35 | self.weights = []
36 | self.biases = []
37 |
38 |
39 | if len(self.activations) != len(hidden_layers):
40 | raise ValueError("The number of activation functions must match the number of hidden layers.")
41 |
42 | # Setup loss function
43 | self.loss = loss
44 | self.loss_func = get_loss_function(self.loss)
45 | self.loss_derivative = get_loss_derivative(self.loss)
46 |
47 | # Move training data to the device (GPU or CPU)
48 | self.X_train = self.device.array(X_train)
49 | self.y_train = self.device.array(Y_train)
50 |
51 | # Initialize weights and biases with He initialization for better convergence
52 | for i in range(len(self.layers) - 1):
53 | weight_matrix = self.device.random().randn(self.layers[i], self.layers[i + 1]) * np.sqrt(2 / self.layers[i])
54 | bias_vector = self.device.zeros((1, self.layers[i + 1]))
55 | self.weights.append(weight_matrix)
56 | self.biases.append(bias_vector)
57 |
58 | # Initialize training attribute
59 | self.training = False
60 |
61 | # Store metrics for plotting
62 | self.history = {'train_loss': [], 'val_loss': [], 'train_accuracy': [], 'val_accuracy': []}
63 |
64 | # Batch Normalization setup
65 | self.use_batch_norm = use_batch_norm
66 | self.batch_norm_layers = []
67 |
68 | if self.use_batch_norm:
69 | for i in range(len(self.layers) - 2): # Exclude input and output layers
70 | self.batch_norm_layers.append(BatchNormalization(self.layers[i+1], device=self.device))
71 |
72 | def forward_propagation(self, X):
73 | """
74 | Performs forward propagation through the network.
75 | """
76 | activations = [X]
77 | Z_values = []
78 |
79 | # Forward pass through hidden layers with dropout and batch normalization
80 | for i in range(len(self.weights) - 1):
81 | Z = self.device.dot(activations[-1], self.weights[i]) + self.biases[i]
82 | if self.use_batch_norm:
83 | Z = self.batch_norm_layers[i].normalize(Z, training=self.training)
84 | Z_values.append(Z)
85 | A = activation(Z, self.activations[i], self.device)
86 | A = self.regularization.apply_dropout(A, training=self.training)
87 | activations.append(A)
88 |
89 | # Forward pass through the output layer
90 | Z_output = self.device.dot(activations[-1], self.weights[-1]) + self.biases[-1]
91 | A_output = activation(Z_output, self.output_activation, self.device)
92 | Z_values.append(Z_output)
93 | activations.append(A_output)
94 |
95 | return activations, Z_values
96 |
97 | def backpropagation(self, X, y, activations, Z_values, learning_rate, clip_value=None):
98 | """
99 | Performs backpropagation through the network to compute weight updates.
100 | """
101 | # Calculate the error in the output layer
102 | output_error = activations[-1] - y
103 | d_output = output_error * activation_derivative(activations[-1], self.output_activation, self.device)
104 |
105 | # Backpropagate through the network
106 | deltas = [d_output]
107 | for i in reversed(range(len(self.weights) - 1)):
108 | error = self.device.dot(deltas[-1], self.weights[i + 1].T)
109 | if self.use_batch_norm:
110 | error = self.batch_norm_layers[i].backprop(Z_values[i], error, learning_rate)
111 | delta = error * activation_derivative(activations[i + 1], self.activations[i], self.device)
112 | deltas.append(delta)
113 |
114 | deltas.reverse()
115 |
116 | # Update weights and biases with L2 regularization
117 | gradient = {'weights': [], 'biases': []}
118 | for i in range(len(self.weights)):
119 | grad_weights = self.device.dot(activations[i].T, deltas[i])
120 | grad_biases = self.device.sum(deltas[i], axis=0, keepdims=True)
121 |
122 | # Clip gradients if clip_value is specified
123 | if clip_value is not None:
124 | # Clip weights gradients
125 | grad_weights_norm = self.device.norm(grad_weights)
126 | if grad_weights_norm > clip_value:
127 | grad_weights = grad_weights * (clip_value / grad_weights_norm)
128 |
129 | # Clip bias gradients
130 | grad_biases_norm = self.device.norm(grad_biases)
131 | if grad_biases_norm > clip_value:
132 | grad_biases = grad_biases * (clip_value / grad_biases_norm)
133 |
134 | gradient['weights'].append(grad_weights)
135 | gradient['biases'].append(grad_biases)
136 |
137 | for i in range(len(self.weights)):
138 | self.weights[i] -= gradient['weights'][i] * learning_rate
139 | self.biases[i] -= gradient['biases'][i] * learning_rate
140 |
141 | # Apply L2 regularization to the weights
142 | self.weights[i] -= learning_rate * self.regularization.apply_l2_regularization(self.weights[i], learning_rate, X.shape)
143 |
144 | def fit(self, epochs, learning_rate=0.01, lr_scheduler=None, early_stop=None, X_val=None, y_val=None, checkpoint=None, verbose=False, clipping_threshold=None):
145 | """
146 | Trains the model for a given number of epochs with an optional learning rate scheduler.
147 | """
148 | if early_stop:
149 | assert X_val is not None and y_val is not None, "Validation set is required for early stopping"
150 |
151 | for epoch in tqdm(range(epochs), desc="Training Progress", ncols=100, ascii="░▒█", colour='green', disable=not verbose):
152 | start_time = time.time()
153 |
154 | # Adjust the learning rate using the scheduler if provided
155 | if lr_scheduler is not None:
156 | current_lr = lr_scheduler.get_lr(epoch)
157 | else:
158 | current_lr = learning_rate
159 |
160 | # Forward and Backpropagation
161 | self.training = True
162 | activations, Z_values = self.forward_propagation(self.X_train)
163 | self.backpropagation(self.X_train, self.y_train, activations, Z_values, current_lr, clip_value=clipping_threshold)
164 |
165 | self.training = False
166 |
167 | # Compute training loss and accuracy
168 | train_loss = self.loss_func(self.y_train, activations[-1], self.device)
169 | train_accuracy = np.mean((activations[-1] >= 0.5).astype(int) == self.y_train) if self.output_activation == 'sigmoid' else np.mean(np.argmax(activations[-1], axis=1) == np.argmax(self.y_train, axis=1))
170 |
171 | # # Debugging output
172 | # print(f"Computed Train Loss: {train_loss}, Train Accuracy: {train_accuracy}")
173 |
174 | if train_loss is None or train_accuracy is None:
175 | print("Warning: train_loss or train_accuracy is None!")
176 | continue # Skip this epoch if values are not valid
177 |
178 | # Validation step
179 | val_loss = val_accuracy = None
180 | if X_val is not None and y_val is not None:
181 | val_activations, _ = self.forward_propagation(self.device.array(X_val))
182 | val_loss = self.loss_func(self.device.array(y_val), val_activations[-1], self.device)
183 | val_accuracy = np.mean((val_activations[-1] >= 0.5).astype(int) == y_val) if self.output_activation == 'sigmoid' else np.mean(np.argmax(val_activations[-1], axis=1) == np.argmax(y_val, axis=1))
184 |
185 | # Store training history for plotting
186 | self.history['train_loss'].append(train_loss)
187 | self.history['train_accuracy'].append(train_accuracy)
188 | if val_loss is not None:
189 | self.history['val_loss'].append(val_loss)
190 | self.history['val_accuracy'].append(val_accuracy)
191 |
192 | # Checkpoint saving logic
193 | if checkpoint is not None and X_val is not None:
194 | if checkpoint.should_save(epoch, val_loss):
195 | checkpoint.save_weights(epoch, self.weights, self.biases, val_loss)
196 |
197 | if verbose and (epoch % 10 == 0):
198 | # Display progress on the same line
199 | sys.stdout.write(
200 | f"\rEpoch {epoch + 1}/{epochs} | "
201 | f"Train Loss: {train_loss:.4f} | "
202 | f"Accuracy: {train_accuracy:.2f}% | "
203 | f"Val Loss: {val_loss:.4f} | "
204 | f"Val Accuracy: {val_accuracy:.2f}% | "
205 | f"Learning Rate: {current_lr:.6f} "
206 | )
207 | sys.stdout.flush()
208 |
209 | # Early stopping
210 | if early_stop:
211 | early_stop(val_loss)
212 | if early_stop.early_stop:
213 | print('\n', "#" * 150, '\n\n', "early stop at - "
214 | f"Epoch {epoch + 1}/{epochs} Train Loss: {train_loss:.4f} Accuracy: {train_accuracy * 100:.2f}% "
215 | f"Val Loss: {val_loss:.4f} Val Accuracy: {val_accuracy * 100:.2f}% "
216 | f"Learning Rate: {current_lr:.6f}", '\n\n', "#" * 150)
217 | break
218 |
219 | print("Training Completed!")
220 |
221 | def predict(self, X):
222 | """
223 | Predicts the output for given input data X.
224 | """
225 | activations, _ = self.forward_propagation(X)
226 | return activations[-1]
227 |
228 | def evaluate(self, X, y):
229 | """
230 | Evaluates the model on a given test set.
231 | """
232 | predictions = self.predict(X)
233 | loss = self.loss_func(y, predictions, self.device)
234 | accuracy = np.mean((predictions >= 0.5).astype(int) == y) if self.output_activation == 'sigmoid' else np.mean(np.argmax(predictions, axis=1) == np.argmax(y, axis=1))
235 | return loss, accuracy
236 |
237 | def load_checkpoint(self, checkpoint_path):
238 | """
239 | Loads model weights from a checkpoint.
240 | """
241 | print(f"Loading model weights from {checkpoint_path}")
242 | checkpoint = ModelCheckpoint(checkpoint_path)
243 | self.weights, self.biases = checkpoint.load_weights()
244 |
245 | def save_model(self, file_path):
246 | """
247 | Saves the model weights and biases to a file.
248 | """
249 | model_data = {
250 | 'weights': [w.tolist() for w in self.weights],
251 | 'biases': [b.tolist() for b in self.biases],
252 | 'layers': self.layers,
253 | 'activations': self.activations,
254 | 'output_activation': self.output_activation
255 | }
256 | np.save(file_path, model_data)
257 | print(f"Model saved to {file_path}")
258 |
259 | def load_model(self, file_path):
260 | """
261 | Loads the model weights and biases from a file.
262 | """
263 | model_data = np.load(file_path, allow_pickle=True).item()
264 | self.weights = [self.device.array(w) for w in model_data['weights']]
265 | self.biases = [self.device.array(b) for b in model_data['biases']]
266 | self.layers = model_data['layers']
267 | self.activations = model_data['activations']
268 | self.output_activation = model_data['output_activation']
269 | print(f"Model loaded from {file_path}")
270 |
271 |
272 | class Plotting_Utils:
273 | """
274 | Utility class for plotting training and validation metrics.
275 | """
276 | def plot_training_history(self, history, metrics=('loss', 'accuracy'), figure='history.png'):
277 | """
278 | Plots the training and validation loss/accuracy over epochs.
279 | Parameters:
280 | history (dict): A dictionary containing training history with keys 'train_loss', 'val_loss',
281 | 'train_accuracy', and 'val_accuracy'.
282 | metrics (tuple): The metrics to plot ('loss' or 'accuracy').
283 | """
284 | epochs = len(history['train_loss'])
285 | fig, ax = plt.subplots(1, len(metrics), figsize=(12, 5))
286 |
287 | if 'loss' in metrics:
288 | ax[0].plot(range(epochs), history['train_loss'], label='Train Loss')
289 | if 'val_loss' in history:
290 | ax[0].plot(range(epochs), history['val_loss'], label='Validation Loss')
291 | ax[0].set_title("Loss over Epochs")
292 | ax[0].set_xlabel("Epochs")
293 | ax[0].set_ylabel("Loss")
294 | ax[0].legend()
295 |
296 | if 'accuracy' in metrics:
297 | ax[1].plot(range(epochs), history['train_accuracy'], label='Train Accuracy')
298 | if 'val_accuracy' in history:
299 | ax[1].plot(range(epochs), history['val_accuracy'], label='Validation Accuracy')
300 | ax[1].set_title("Accuracy over Epochs")
301 | ax[1].set_xlabel("Epochs")
302 | ax[1].set_ylabel("Accuracy")
303 | ax[1].legend()
304 | plt.savefig(figure)
305 | plt.tight_layout()
306 | plt.show()
307 |
--------------------------------------------------------------------------------
/pydeepflow/regularization.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from .device import Device
3 |
4 | class Regularization:
5 | def __init__(self, l2_lambda=0.0, dropout_rate=0.0):
6 | self.l2_lambda = l2_lambda
7 | self.dropout_rate = dropout_rate
8 | self.device = Device()
9 |
10 | def apply_l2_regularization(self, weights, learning_rate, X_shape):
11 | for i in range(len(weights)):
12 | weights[i] -= (self.l2_lambda * weights[i]) / X_shape[0]
13 | return weights
14 |
15 | def apply_dropout(self, A, training=True):
16 | if training and self.dropout_rate > 0:
17 | dropout_mask = self.device.random().rand(*A.shape) > self.dropout_rate
18 | A *= dropout_mask
19 | else:
20 | A *= (1 - self.dropout_rate)
21 | return A
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.23.5
2 | pandas==1.5.3
3 | scikit-learn==1.2.0
4 | jupyter==1.0.0
5 | tqdm==4.64.1
6 | colorama==0.4.6
7 | matplotlib
8 |
--------------------------------------------------------------------------------
/runner.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | from sklearn.preprocessing import StandardScaler
4 | from sklearn.datasets import load_iris
5 | from sklearn.model_selection import train_test_split
6 | from pydeepflow.model import Multi_Layer_ANN
7 | from pydeepflow.early_stopping import EarlyStopping
8 | from pydeepflow.checkpoints import ModelCheckpoint
9 | from pydeepflow.learning_rate_scheduler import LearningRateScheduler
10 | from pydeepflow.model import Plotting_Utils
11 | from pydeepflow.cross_validator import CrossValidator
12 |
13 | if __name__ == "__main__":
14 |
15 | # Load Iris dataset from sklearn
16 | iris = load_iris()
17 | X = iris.data
18 | y = iris.target
19 |
20 | print("First five rows of the dataset:")
21 | print(pd.DataFrame(X, columns=iris.feature_names).head())
22 |
23 | # Convert labels to one-hot encoding (for multiclass classification)
24 | y_one_hot = np.eye(len(np.unique(y)))[y]
25 |
26 | # Standardize the features
27 | scaler = StandardScaler()
28 | X = scaler.fit_transform(X)
29 |
30 | # Ask the user whether to use GPU (simulated as False for this example)
31 | use_gpu_input = False
32 | use_gpu = True if use_gpu_input == 'y' else False
33 |
34 | # Define the architecture of the network
35 | hidden_layers = [5, 5] # Example: two hidden layers with 5 neurons each
36 | activations = ['relu', 'relu'] # ReLU activations for the hidden layers
37 |
38 | # Initialize the CrossValidator
39 | k_folds = 10 # Set the number of folds for cross-validation
40 | cross_validator = CrossValidator(n_splits=k_folds)
41 |
42 | # Perform k-fold cross-validation
43 | fold_accuracies = [] # To store accuracy for each fold
44 | for fold, (train_index, val_index) in enumerate(cross_validator.split(X, y_one_hot)):
45 | print(f"Training on fold {fold + 1}/{k_folds}")
46 |
47 | # Split data into training and validation sets for the current fold
48 | X_train, X_val = X[train_index], X[val_index]
49 | y_train, y_val = y_one_hot[train_index], y_one_hot[val_index]
50 |
51 | # Initialize the ANN for each fold without batch normalization
52 | ann = Multi_Layer_ANN(X_train, y_train, hidden_layers, activations,
53 | loss='categorical_crossentropy', use_gpu=use_gpu)
54 |
55 | # Callback functions
56 | lr_scheduler = LearningRateScheduler(initial_lr=0.01, strategy="cyclic")
57 |
58 | # Train the model and capture history
59 | ann.fit(epochs=1000, learning_rate=0.01,
60 | lr_scheduler=lr_scheduler,
61 | X_val=X_val,
62 | y_val=y_val,
63 | verbose=True)
64 |
65 | # Evaluate the model on the validation set
66 | y_pred_val = ann.predict(X_val)
67 | y_val_labels = np.argmax(y_val, axis=1)
68 |
69 | # Adjust prediction shape handling for accuracy calculation
70 | y_pred_val_labels = np.argmax(y_pred_val, axis=1) # Multi-class classification
71 |
72 | # Calculate and store the accuracy for this fold
73 | fold_accuracy = np.mean(y_pred_val_labels == y_val_labels)
74 | fold_accuracies.append(fold_accuracy)
75 | print(f"Fold {fold + 1} Accuracy: {fold_accuracy * 100:.2f}%")
76 |
77 | # Optionally plot training history of the last fold
78 | plot_utils = Plotting_Utils()
79 | plot_utils.plot_training_history(ann.history)
80 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | with open("README.md", "r") as fh:
4 | long_description = fh.read()
5 |
6 | setup(
7 | name="pydeepflow",
8 | version="1.0.0", # Updated version
9 | author="Ravin D",
10 | author_email="ravin.d3107@outlook.com",
11 | description="A deep learning package optimized for performing Deep Learning Tasks, easy to learn and integrate into projects",
12 | long_description=long_description,
13 | long_description_content_type="text/markdown",
14 | url="https://github.com/ravin-d-27/PyDeepFlow",
15 | packages=find_packages(),
16 | include_package_data=True,
17 | classifiers=[
18 | "Programming Language :: Python :: 3",
19 | "License :: OSI Approved :: MIT License",
20 | "Operating System :: OS Independent",
21 | "Topic :: Scientific/Engineering :: Artificial Intelligence", # Additional metadata
22 | ],
23 | python_requires='>=3.6',
24 | install_requires=[
25 | "numpy>=1.23.5",
26 | "pandas>=1.5.3",
27 | "scikit-learn>=1.2.0",
28 | "jupyter>=1.0.0",
29 | "tqdm>=4.64.1",
30 | "colorama>=0.4.6",
31 | 'matplotlib',
32 | ],
33 | extras_require={
34 | "gpu": ["cupy>=9.6.0"], # Optional GPU support
35 | "testing": ["pytest>=6.2.5"], # Dependencies for testing
36 | },
37 | entry_points={
38 | 'console_scripts': [
39 | 'pydeepflow-cli=pydeepflow.cli:main', # CLI tool if applicable
40 | ],
41 | },
42 | keywords="deep-learning artificial-intelligence neural-networks tensorflow pytorch", # Add relevant keywords
43 | license="MIT",
44 | project_urls={
45 | "Bug Tracker": "https://github.com/ravin-d-27/PyDeepFlow/issues",
46 | "Source Code": "https://github.com/ravin-d-27/PyDeepFlow",
47 | "Documentation":"https://github.com/ravin-d-27/PyDeepFlow/wiki"
48 | },
49 | )
50 |
--------------------------------------------------------------------------------
/tests/test_activations.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import numpy as np
3 | from pydeepflow.activations import activation, activation_derivative
4 | from pydeepflow.device import Device
5 |
6 | class TestActivations(unittest.TestCase):
7 |
8 | def setUp(self):
9 | self.device_cpu = Device(use_gpu=False)
10 |
11 | def test_relu(self):
12 | x = np.array([-1, 0, 1])
13 | result = activation(x, 'relu', self.device_cpu)
14 | expected = np.array([0, 0, 1])
15 | np.testing.assert_array_equal(result, expected)
16 |
17 | def test_leaky_relu(self):
18 | x = np.array([-1, 0, 1])
19 | result = activation(x, 'leaky_relu', self.device_cpu, alpha=0.01)
20 | expected = np.array([-0.01, 0, 1])
21 | np.testing.assert_array_equal(result, expected)
22 |
23 | def test_prelu(self):
24 | x = np.array([-1, 0, 1])
25 | result = activation(x, 'prelu', self.device_cpu, alpha=0.01)
26 | expected = np.array([-0.01, 0, 1])
27 | np.testing.assert_array_equal(result, expected)
28 |
29 | def test_gelu(self):
30 | x = np.array([0])
31 | result = activation(x, 'gelu', self.device_cpu)
32 | expected = np.array([0])
33 | np.testing.assert_array_almost_equal(result, expected)
34 |
35 | def test_elu(self):
36 | x = np.array([-1, 0, 1])
37 | result = activation(x, 'elu', self.device_cpu, alpha=1.0)
38 | expected = np.array([-0.6321, 0, 1])
39 | np.testing.assert_array_almost_equal(result, expected, decimal=4)
40 |
41 | def test_selu(self):
42 | x = np.array([-1, 0, 1])
43 | result = activation(x, 'selu', self.device_cpu)
44 | expected = np.array([-1.1113, 0, 1.0507])
45 | np.testing.assert_array_almost_equal(result, expected, decimal=4)
46 |
47 | def test_mish(self):
48 | x = np.array([0])
49 | result = activation(x, 'mish', self.device_cpu)
50 | expected = np.array([0])
51 | np.testing.assert_array_almost_equal(result, expected)
52 |
53 | def test_swish(self):
54 | x = np.array([0])
55 | result = activation(x, 'swish', self.device_cpu)
56 | expected = np.array([0])
57 | np.testing.assert_array_almost_equal(result, expected)
58 |
59 | def test_sigmoid(self):
60 | x = np.array([0])
61 | result = activation(x, 'sigmoid', self.device_cpu)
62 | expected = np.array([0.5])
63 | np.testing.assert_array_almost_equal(result, expected)
64 |
65 | def test_softsign(self):
66 | x = np.array([0, 1, -1])
67 | result = activation(x, 'softsign', self.device_cpu)
68 | expected = np.array([0, 0.5, -0.5])
69 | np.testing.assert_array_almost_equal(result, expected)
70 |
71 | def test_tanh(self):
72 | x = np.array([0])
73 | result = activation(x, 'tanh', self.device_cpu)
74 | expected = np.array([0])
75 | np.testing.assert_array_almost_equal(result, expected)
76 |
77 | def test_hardtanh(self):
78 | x = np.array([-2, 0, 2])
79 | result = activation(x, 'hardtanh', self.device_cpu)
80 | expected = np.array([-1, 0, 1])
81 | np.testing.assert_array_almost_equal(result, expected)
82 |
83 | def test_hardswish(self):
84 | x = np.array([-3, 0, 3])
85 | result = activation(x, 'hardswish', self.device_cpu)
86 | expected = np.array([0, 0, 3])
87 | np.testing.assert_array_almost_equal(result, expected)
88 |
89 | def test_hardsigmoid(self):
90 | x = np.array([-2, 0, 2])
91 | result = activation(x, 'hardsigmoid', self.device_cpu)
92 | expected = np.array([0, 0.5, 1])
93 | np.testing.assert_array_almost_equal(result, expected)
94 |
95 | def test_tanhshrink(self):
96 | x = np.array([0, 1])
97 | result = activation(x, 'tanhshrink', self.device_cpu)
98 | expected = np.array([0, 1 - np.tanh(1)])
99 | np.testing.assert_array_almost_equal(result, expected)
100 |
101 | def test_softshrink(self):
102 | x = np.array([-1.5, -0.5, 0.5, 1.5])
103 | result = activation(x, 'softshrink', self.device_cpu, alpha=1.0)
104 | expected = np.array([-0.5, 0, 0, 0.5])
105 | np.testing.assert_array_almost_equal(result, expected)
106 |
107 | def test_hardshrink(self):
108 | x = np.array([-1.5, -0.5, 0.5, 1.5])
109 | result = activation(x, 'hardshrink', self.device_cpu, alpha=1.0)
110 | expected = np.array([-1.5, 0, 0, 1.5])
111 | np.testing.assert_array_almost_equal(result, expected)
112 |
113 | def test_softplus(self):
114 | x = np.array([0])
115 | result = activation(x, 'softplus', self.device_cpu)
116 | expected = np.array([np.log(2)])
117 | np.testing.assert_array_almost_equal(result, expected)
118 |
119 | def test_softmax(self):
120 | x = np.array([[1, 2, 3]])
121 | result = activation(x, 'softmax', self.device_cpu)
122 | expected = np.array([[0.09003057, 0.24472847, 0.66524096]])
123 | np.testing.assert_array_almost_equal(result, expected)
124 |
125 | def test_rrelu(self):
126 | x = np.array([-1, 0, 1])
127 | result = activation(x, 'rrelu', self.device_cpu, alpha=0.01)
128 | expected = np.array([-0.01, 0, 1])
129 | np.testing.assert_array_almost_equal(result, expected)
130 |
131 | def test_invalid_activation(self):
132 | with self.assertRaises(ValueError):
133 | activation(np.array([1, 2, 3]), 'invalid', self.device_cpu)
134 |
135 | # Derivative Tests
136 | def test_relu_derivative(self):
137 | x = np.array([-1, 0, 1])
138 | result = activation_derivative(x, 'relu', self.device_cpu)
139 | expected = np.array([0, 0, 1])
140 | np.testing.assert_array_equal(result, expected)
141 |
142 | def test_sigmoid_derivative(self):
143 | x = np.array([0.5])
144 | result = activation_derivative(x, 'sigmoid', self.device_cpu)
145 | expected = np.array([0.25])
146 | np.testing.assert_array_almost_equal(result, expected)
147 |
148 | def test_softsign_derivative(self):
149 | x = np.array([1])
150 | result = activation_derivative(x, 'softsign', self.device_cpu)
151 | expected = np.array([0.25])
152 | np.testing.assert_array_almost_equal(result, expected)
153 |
154 | def test_tanh_derivative(self):
155 | x = np.array([0.5])
156 | result = activation_derivative(x, 'tanh', self.device_cpu)
157 | expected = np.array([0.78644773])
158 | np.testing.assert_array_almost_equal(result, expected)
159 |
160 | def test_hardtanh_derivative(self):
161 | x = np.array([-2, 0, 2])
162 | result = activation_derivative(x, 'hardtanh', self.device_cpu)
163 | expected = np.array([0, 1, 0])
164 | np.testing.assert_array_almost_equal(result, expected)
165 |
166 | def test_hardsigmoid_derivative(self):
167 | x = np.array([0])
168 | result = activation_derivative(x, 'hardsigmoid', self.device_cpu)
169 | expected = np.array([0.5])
170 | np.testing.assert_array_almost_equal(result, expected)
171 |
172 | def test_tanhshrink_derivative(self):
173 | x = np.array([1])
174 | result = activation_derivative(x, 'tanhshrink', self.device_cpu)
175 | expected = np.array([0.419974])
176 | np.testing.assert_array_almost_equal(result, expected, decimal=5)
177 |
178 | def test_softshrink_derivative(self):
179 | x = np.array([-1.5, -0.5, 0.5, 1.5])
180 | result = activation_derivative(x, 'softshrink', self.device_cpu, alpha=1.0)
181 | expected = np.array([1, 0, 0, 1])
182 | np.testing.assert_array_almost_equal(result, expected)
183 |
184 | def test_hardshrink_derivative(self):
185 | x = np.array([-1.5, -0.5, 0.5, 1.5])
186 | result = activation_derivative(x, 'hardshrink', self.device_cpu, alpha=1.0)
187 | expected = np.array([1, 0, 0, 1])
188 | np.testing.assert_array_almost_equal(result, expected)
189 |
190 | if __name__ == "__main__":
191 | unittest.main()
192 |
--------------------------------------------------------------------------------
/tests/test_batch_normalization.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import numpy as np
3 | from pydeepflow.batch_normalization import BatchNormalization
4 | from pydeepflow.device import Device
5 |
6 | class TestBatchNormalization(unittest.TestCase):
7 | def setUp(self):
8 | self.device = Device(use_gpu=False)
9 | self.bn = BatchNormalization(4, device=self.device)
10 |
11 | def test_normalize_training(self):
12 | Z = self.device.array([[1, 2, 3, 4], [4, 3, 2, 1], [5, 6, 7, 8]])
13 | normalized = self.bn.normalize(Z, training=True)
14 | self.assertEqual(normalized.shape, Z.shape)
15 | self.assertAlmostEqual(self.device.mean(normalized), 0, places=7)
16 | self.assertAlmostEqual(self.device.var(normalized), 1, places=2)
17 |
18 | def test_normalize_inference(self):
19 | Z = self.device.array([[1, 2, 3, 4], [4, 3, 2, 1], [5, 6, 7, 8]])
20 | self.bn.normalize(Z, training=True) # Update running stats
21 | normalized = self.bn.normalize(Z, training=False)
22 | self.assertEqual(normalized.shape, Z.shape)
23 |
24 | def test_backprop(self):
25 | Z = self.device.array([[1, 2, 3, 4], [4, 3, 2, 1], [5, 6, 7, 8]])
26 | dZ = self.device.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1], [0.5, 0.6, 0.7, 0.8]])
27 | self.bn.normalize(Z, training=True)
28 | output = self.bn.backprop(Z, dZ, learning_rate=0.01)
29 | self.assertEqual(output.shape, dZ.shape)
30 |
31 | if __name__ == '__main__':
32 | unittest.main()
33 |
--------------------------------------------------------------------------------
/tests/test_device.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import numpy as np
3 | from pydeepflow.device import Device
4 |
5 | class TestDevice(unittest.TestCase):
6 |
7 | def setUp(self):
8 | self.device_cpu = Device(use_gpu=False)
9 |
10 | def test_array(self):
11 | data = [1, 2, 3]
12 | result = self.device_cpu.array(data)
13 | expected = np.array([1, 2, 3])
14 | np.testing.assert_array_equal(result, expected)
15 |
16 | def test_zeros(self):
17 | shape = (2, 2)
18 | result = self.device_cpu.zeros(shape)
19 | expected = np.zeros(shape)
20 | np.testing.assert_array_equal(result, expected)
21 |
22 | def test_random(self):
23 | random_cpu = self.device_cpu.random().rand(3, 3)
24 | self.assertEqual(random_cpu.shape, (3, 3))
25 |
26 | def test_exp(self):
27 | x = np.array([0, 1, 2])
28 | result = self.device_cpu.exp(x)
29 | expected = np.exp(x)
30 | np.testing.assert_array_almost_equal(result, expected)
31 |
32 | def test_dot(self):
33 | a = np.array([[1, 2], [3, 4]])
34 | b = np.array([[5, 6], [7, 8]])
35 | result = self.device_cpu.dot(a, b)
36 | expected = np.dot(a, b)
37 | np.testing.assert_array_equal(result, expected)
38 |
39 | if __name__ == "__main__":
40 | unittest.main()
41 |
--------------------------------------------------------------------------------
/tests/test_losses.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import numpy as np
3 | from pydeepflow.losses import binary_crossentropy, mse, mse_derivative
4 | from pydeepflow.device import Device
5 |
6 | class TestLosses(unittest.TestCase):
7 |
8 | def setUp(self):
9 | self.device_cpu = Device(use_gpu=False)
10 |
11 | def test_binary_crossentropy(self):
12 | y_true = np.array([1, 0, 1])
13 | y_pred = np.array([0.9, 0.1, 0.8])
14 | result = binary_crossentropy(y_true, y_pred, self.device_cpu)
15 | expected = -np.mean(y_true * np.log(y_pred + 1e-8) + (1 - y_true) * np.log(1 - y_pred + 1e-8))
16 | self.assertAlmostEqual(result, expected)
17 |
18 | def test_mse(self):
19 | y_true = np.array([1, 0, 1])
20 | y_pred = np.array([0.9, 0.1, 0.8])
21 | result = mse(y_true, y_pred, self.device_cpu)
22 | expected = np.mean((y_true - y_pred) ** 2)
23 | self.assertAlmostEqual(result, expected)
24 |
25 | def test_mse_derivative(self):
26 | y_true = np.array([1, 0, 1])
27 | y_pred = np.array([0.9, 0.1, 0.8])
28 | result = mse_derivative(y_true, y_pred, self.device_cpu)
29 | expected = 2 * (y_pred - y_true) / y_true.size
30 | np.testing.assert_array_almost_equal(result, expected)
31 |
32 | if __name__ == "__main__":
33 | unittest.main()
34 |
--------------------------------------------------------------------------------
/tests/test_model.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import numpy as np
3 | from pydeepflow.model import Multi_Layer_ANN
4 | from sklearn.datasets import load_iris
5 | from sklearn.model_selection import train_test_split
6 | from sklearn.preprocessing import StandardScaler
7 |
8 | class TestMultiLayerANN(unittest.TestCase):
9 | def setUp(self):
10 | # Load and prepare the Iris dataset
11 | iris = load_iris()
12 | X = iris.data
13 | y = iris.target
14 | self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size=0.2, random_state=42)
15 |
16 | # Standardize the features
17 | scaler = StandardScaler()
18 | self.X_train = scaler.fit_transform(self.X_train)
19 | self.X_test = scaler.transform(self.X_test)
20 |
21 | # One-hot encode the labels
22 | self.y_train = np.eye(3)[self.y_train]
23 | self.y_test = np.eye(3)[self.y_test]
24 |
25 | def test_model_with_batch_norm(self):
26 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[10, 10],
27 | activations=['relu', 'relu'], use_batch_norm=True)
28 | model.fit(epochs=50, learning_rate=0.01, verbose=False)
29 |
30 | loss, accuracy = model.evaluate(self.X_test, self.y_test)
31 | self.assertGreater(accuracy, 0.8) # Adjust threshold as needed
32 |
33 | def test_model_without_batch_norm(self):
34 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[10, 10],
35 | activations=['relu', 'relu'], use_batch_norm=False)
36 | model.fit(epochs=50, learning_rate=0.01, verbose=False)
37 |
38 | loss, accuracy = model.evaluate(self.X_test, self.y_test)
39 | self.assertGreater(accuracy, 0.8) # Adjust threshold as needed
40 |
41 | def test_forward_propagation(self):
42 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[5],
43 | activations=['relu'], use_batch_norm=True)
44 | activations, Z_values = model.forward_propagation(self.X_train)
45 |
46 | self.assertEqual(len(activations), 3) # Input, hidden, and output layers
47 | self.assertEqual(activations[0].shape, self.X_train.shape)
48 | self.assertEqual(activations[-1].shape, self.y_train.shape)
49 |
50 | def test_backpropagation(self):
51 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[5],
52 | activations=['relu'], use_batch_norm=True)
53 | activations, Z_values = model.forward_propagation(self.X_train)
54 | model.backpropagation(self.X_train, self.y_train, activations, Z_values, learning_rate=0.01)
55 |
56 | # Check if weights and biases are updated
57 | for w in model.weights:
58 | self.assertFalse(np.allclose(w, 0))
59 | for b in model.biases:
60 | self.assertFalse(np.allclose(b, 0))
61 |
62 | def test_predict(self):
63 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[5],
64 | activations=['relu'], use_batch_norm=True)
65 | predictions = model.predict(self.X_test)
66 |
67 | self.assertEqual(predictions.shape, self.y_test.shape)
68 |
69 | def test_save_and_load_model(self):
70 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[5],
71 | activations=['relu'], use_batch_norm=True)
72 | model.fit(epochs=10, learning_rate=0.01, verbose=False)
73 |
74 | # Save the model
75 | model.save_model('test_model.npy')
76 |
77 | # Create a new model and load the saved weights
78 | loaded_model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[5],
79 | activations=['relu'], use_batch_norm=True)
80 | loaded_model.load_model('test_model.npy')
81 |
82 | # Compare predictions
83 | original_predictions = model.predict(self.X_test)
84 | loaded_predictions = loaded_model.predict(self.X_test)
85 |
86 | np.testing.assert_array_almost_equal(original_predictions, loaded_predictions)
87 |
88 | def test_learning_rate_scheduler(self):
89 | from pydeepflow.learning_rate_scheduler import LearningRateScheduler
90 |
91 | lr_scheduler = LearningRateScheduler(initial_lr=0.1, strategy="decay")
92 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[5],
93 | activations=['relu'], use_batch_norm=True)
94 |
95 | model.fit(epochs=20, learning_rate=0.1, lr_scheduler=lr_scheduler, verbose=False)
96 |
97 | # Check if learning rate has decreased
98 | self.assertLess(lr_scheduler.get_lr(19), 0.1)
99 |
100 | def test_early_stopping(self):
101 | from pydeepflow.early_stopping import EarlyStopping
102 |
103 | early_stop = EarlyStopping(patience=5)
104 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[5],
105 | activations=['relu'], use_batch_norm=True)
106 |
107 | model.fit(epochs=100, learning_rate=0.01, early_stop=early_stop,
108 | X_val=self.X_test, y_val=self.y_test, verbose=False)
109 |
110 | # Check if training stopped early
111 | self.assertLess(len(model.history['train_loss']), 100)
112 |
113 | def test_model_checkpointing(self):
114 | from pydeepflow.checkpoints import ModelCheckpoint
115 | import os
116 |
117 | checkpoint = ModelCheckpoint(save_dir='./checkpoints', monitor='val_loss', save_best_only=True)
118 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[5],
119 | activations=['relu'], use_batch_norm=True)
120 |
121 | model.fit(epochs=20, learning_rate=0.01, checkpoint=checkpoint,
122 | X_val=self.X_test, y_val=self.y_test, verbose=False)
123 |
124 | # Check if checkpoint file was created
125 | self.assertTrue(os.path.exists('./checkpoints'))
126 | checkpoint_files = os.listdir('./checkpoints')
127 | self.assertTrue(len(checkpoint_files) > 0)
128 |
129 | def test_batch_norm_effect(self):
130 | model_bn = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[20, 20],
131 | activations=['relu', 'relu'], use_batch_norm=True)
132 | model_bn.fit(epochs=50, learning_rate=0.01, verbose=False)
133 |
134 | # Model without batch norm
135 | model_no_bn = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[20, 20],
136 | activations=['relu', 'relu'], use_batch_norm=False)
137 | model_no_bn.fit(epochs=50, learning_rate=0.01, verbose=False)
138 |
139 | # Compare performance
140 | _, accuracy_bn = model_bn.evaluate(self.X_test, self.y_test)
141 | _, accuracy_no_bn = model_no_bn.evaluate(self.X_test, self.y_test)
142 |
143 | # This test might not always pass due to the stochastic nature of training
144 | # but it gives an idea of the potential benefit of batch normalization
145 | self.assertGreaterEqual(accuracy_bn, accuracy_no_bn)
146 |
147 | def test_different_activations(self):
148 | activations = ['relu', 'sigmoid', 'tanh']
149 | for activation in activations:
150 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[10],
151 | activations=[activation], use_batch_norm=True)
152 | model.fit(epochs=50, learning_rate=0.01, verbose=False)
153 | _, accuracy = model.evaluate(self.X_test, self.y_test)
154 | self.assertGreater(accuracy, 0.8, f"Model with {activation} activation failed")
155 |
156 | def test_model_history(self):
157 | model = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[10],
158 | activations=['relu'], use_batch_norm=True)
159 | model.fit(epochs=50, learning_rate=0.01, X_val=self.X_test, y_val=self.y_test, verbose=False)
160 |
161 | self.assertIn('train_loss', model.history)
162 | self.assertIn('val_loss', model.history)
163 | self.assertIn('train_accuracy', model.history)
164 | self.assertIn('val_accuracy', model.history)
165 | self.assertEqual(len(model.history['train_loss']), 50)
166 |
167 | def test_regularization(self):
168 | # Model with L2 regularization
169 | model_reg = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[20, 20],
170 | activations=['relu', 'relu'], use_batch_norm=True, l2_lambda=0.01)
171 | model_reg.fit(epochs=50, learning_rate=0.01, verbose=False)
172 |
173 | # Model without regularization
174 | model_no_reg = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[20, 20],
175 | activations=['relu', 'relu'], use_batch_norm=True, l2_lambda=0.0)
176 | model_no_reg.fit(epochs=50, learning_rate=0.01, verbose=False)
177 |
178 | # Compare performance on test set
179 | _, accuracy_reg = model_reg.evaluate(self.X_test, self.y_test)
180 | _, accuracy_no_reg = model_no_reg.evaluate(self.X_test, self.y_test)
181 |
182 | # Regularized model should generalize better (though this might not always be true)
183 | self.assertGreaterEqual(accuracy_reg, accuracy_no_reg)
184 |
185 | def test_dropout(self):
186 | model_dropout = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[20, 20],
187 | activations=['relu', 'relu'], use_batch_norm=True, dropout_rate=0.5)
188 | model_dropout.fit(epochs=50, learning_rate=0.01, verbose=False)
189 |
190 | # Model without dropout
191 | model_no_dropout = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[20, 20],
192 | activations=['relu', 'relu'], use_batch_norm=True, dropout_rate=0.0)
193 | model_no_dropout.fit(epochs=50, learning_rate=0.01, verbose=False)
194 |
195 | # Compare performance on test set
196 | _, accuracy_dropout = model_dropout.evaluate(self.X_test, self.y_test)
197 | _, accuracy_no_dropout = model_no_dropout.evaluate(self.X_test, self.y_test)
198 |
199 | # Dropout should help with generalization, but due to the stochastic nature,
200 | # this test might not always pass. It's more of a sanity check.
201 | self.assertGreaterEqual(accuracy_dropout, 0.8)
202 | self.assertGreaterEqual(accuracy_no_dropout, 0.8)
203 |
204 | def test_gradient_clipping(self):
205 | # Model with gradient clipping
206 | model_clip = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[20, 20],
207 | activations=['relu', 'relu'], use_batch_norm=True)
208 | model_clip.fit(epochs=50, learning_rate=0.01, clipping_threshold=1.0, verbose=False)
209 |
210 | # Model without gradient clipping
211 | model_no_clip = Multi_Layer_ANN(self.X_train, self.y_train, hidden_layers=[20, 20],
212 | activations=['relu', 'relu'], use_batch_norm=True)
213 | model_no_clip.fit(epochs=50, learning_rate=0.01, verbose=False)
214 |
215 | # Both models should converge, but the clipped model might be more stable
216 | _, accuracy_clip = model_clip.evaluate(self.X_test, self.y_test)
217 | _, accuracy_no_clip = model_no_clip.evaluate(self.X_test, self.y_test)
218 |
219 | self.assertGreaterEqual(accuracy_clip, 0.8)
220 | self.assertGreaterEqual(accuracy_no_clip, 0.8)
221 |
222 | if __name__ == '__main__':
223 | unittest.main()
224 |
--------------------------------------------------------------------------------