├── .gitignore
├── Case Study
└── 20newsGroup.ipynb
├── Concepts
├── Evaluation Metrics
│ ├── Classification Metrics.md
│ └── Regression Metrics.md
├── ML Models
│ ├── Classification
│ │ ├── Decision Tree.md
│ │ ├── KNN.md
│ │ ├── Logistic Regression.md
│ │ ├── Naive Bayes.md
│ │ ├── Random Forest.md
│ │ ├── SVM.md
│ │ └── xgboost.md
│ └── Regression
│ │ ├── ElasticNet.md
│ │ ├── Gradient Boosting Regressor.md
│ │ ├── HGBRT.md
│ │ ├── Lasso Regression.md
│ │ ├── Linear Regression.md
│ │ ├── MLPRegressor.md
│ │ ├── Ridge Regression.md
│ │ └── SVR.md
├── Other
│ ├── Gini Index.md
│ └── Randomized Search.md
└── Preprocessing
│ ├── MinMaxScaler.md
│ ├── OneHotEncoder.md
│ ├── OrdinalEncoder.md
│ ├── QuantileTransformer.md
│ ├── SimpleImputer.md
│ └── StandardScaler.md
├── Notebooks
├── Classification Models.ipynb
└── Regression Models.ipynb
├── README.md
├── Regression Models.ipynb
└── data
├── adult.csv
└── housing.csv
/.gitignore:
--------------------------------------------------------------------------------
1 | *.joblib
2 | # Byte-compiled / optimized / DLL files
3 | __pycache__/
4 | *.py[cod]
5 | *$py.class
6 |
7 | # C extensions
8 | *.so
9 |
10 | # Distribution / packaging
11 | .Python
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | pip-wheel-metadata/
25 | share/python-wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # pyenv
81 | .python-version
82 |
83 | # celery beat schedule file
84 | celerybeat-schedule
85 |
86 | # SageMath parsed files
87 | *.sage.py
88 |
89 | # Environments
90 | .env
91 | .venv
92 | env/
93 | venv/
94 | ENV/
95 | env.bak/
96 | venv.bak/
97 |
98 | # Spyder project settings
99 | .spyderproject
100 | .spyproject
101 |
102 | # Rope project settings
103 | .ropeproject
104 |
105 | # mkdocs documentation
106 | /site
107 |
108 | # mypy
109 | .mypy_cache/
110 | .dmypy.json
111 | dmypy.json
112 |
113 | # PyCharm
114 | .idea/
115 | # VS Code
116 | .vscode/
117 | # Jupyter
118 | .jupyter
119 | # Notebook config
120 | .jupyter/jupyter_notebook_config.py
121 |
--------------------------------------------------------------------------------
/Case Study/20newsGroup.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "9d90f3b6",
6 | "metadata": {},
7 | "source": [
8 | "## 20 Newsgroups Dataset Overview\n",
9 | "\n",
10 | "The 20 Newsgroups dataset is a popular collection for text classification and text mining tasks. Compiled from the newsgroup postings of 20 different topics, it offers a rich corpus for natural language processing (NLP) research and machine learning (ML) applications.\n",
11 | "\n",
12 | "### Key Features\n",
13 | "\n",
14 | "- **Topics**: The dataset encompasses a diverse range of subjects, broadly categorized into four main groups:\n",
15 | " - **Computer Hardware and Software**: Discussions about technology, including graphics cards, Microsoft Windows, and Mac hardware.\n",
16 | " - **Science**: Conversations around scientific disciplines such as physics, medicine, and space.\n",
17 | " - **Sociopolitical**: Debates and discussions on politics, guns, and the Middle East.\n",
18 | " - **Religion**: Dialogues concerning atheism, Christianity, and Islam.\n",
19 | "\n",
20 | "- **Content**: It contains around 20,000 discussion items, roughly equally distributed across the 20 different newsgroups. \n",
21 | "\n",
22 | "- **Format**: Each entry in the dataset is a raw text file that may include the message body, headers, footers, and quotes. "
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": 1,
28 | "id": "5dc57019",
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "import pandas as pd\n",
33 | "import xgboost as xgb\n",
34 | "from sklearn.pipeline import Pipeline\n",
35 | "from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix\n",
36 | "from sklearn.datasets import fetch_20newsgroups\n",
37 | "from sklearn.model_selection import train_test_split\n",
38 | "from sentence_transformers import SentenceTransformer # pip install -U sentence-transformers"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": 2,
44 | "id": "cd7a40ba",
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "train = fetch_20newsgroups(subset='train')"
49 | ]
50 | },
51 | {
52 | "cell_type": "code",
53 | "execution_count": 3,
54 | "id": "53db5839",
55 | "metadata": {},
56 | "outputs": [
57 | {
58 | "data": {
59 | "text/plain": [
60 | "array([7, 4, 4, ..., 3, 1, 8])"
61 | ]
62 | },
63 | "execution_count": 3,
64 | "metadata": {},
65 | "output_type": "execute_result"
66 | }
67 | ],
68 | "source": [
69 | "train.target"
70 | ]
71 | },
72 | {
73 | "cell_type": "code",
74 | "execution_count": 4,
75 | "id": "e0b5193f",
76 | "metadata": {},
77 | "outputs": [
78 | {
79 | "data": {
80 | "text/plain": [
81 | "['alt.atheism',\n",
82 | " 'comp.graphics',\n",
83 | " 'comp.os.ms-windows.misc',\n",
84 | " 'comp.sys.ibm.pc.hardware',\n",
85 | " 'comp.sys.mac.hardware',\n",
86 | " 'comp.windows.x',\n",
87 | " 'misc.forsale',\n",
88 | " 'rec.autos',\n",
89 | " 'rec.motorcycles',\n",
90 | " 'rec.sport.baseball',\n",
91 | " 'rec.sport.hockey',\n",
92 | " 'sci.crypt',\n",
93 | " 'sci.electronics',\n",
94 | " 'sci.med',\n",
95 | " 'sci.space',\n",
96 | " 'soc.religion.christian',\n",
97 | " 'talk.politics.guns',\n",
98 | " 'talk.politics.mideast',\n",
99 | " 'talk.politics.misc',\n",
100 | " 'talk.religion.misc']"
101 | ]
102 | },
103 | "execution_count": 4,
104 | "metadata": {},
105 | "output_type": "execute_result"
106 | }
107 | ],
108 | "source": [
109 | "train.target_names"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": 5,
115 | "id": "24d848e9",
116 | "metadata": {},
117 | "outputs": [
118 | {
119 | "name": "stdout",
120 | "output_type": "stream",
121 | "text": [
122 | "From: lerxst@wam.umd.edu (where's my thing)\n",
123 | "Subject: WHAT car is this!?\n",
124 | "Nntp-Posting-Host: rac3.wam.umd.edu\n",
125 | "Organization: University of Maryland, College Park\n",
126 | "Lines: 15\n",
127 | "\n",
128 | " I was wondering if anyone out there could enlighten me on this car I saw\n",
129 | "the other day. It was a 2-door sports car, looked to be from the late 60s/\n",
130 | "early 70s. It was called a Bricklin. The doors were really small. In addition,\n",
131 | "the front bumper was separate from the rest of the body. This is \n",
132 | "all I know. If anyone can tellme a model name, engine specs, years\n",
133 | "of production, where this car is made, history, or whatever info you\n",
134 | "have on this funky looking car, please e-mail.\n",
135 | "\n",
136 | "Thanks,\n",
137 | "- IL\n",
138 | " ---- brought to you by your neighborhood Lerxst ----\n",
139 | "\n",
140 | "\n",
141 | "\n",
142 | "\n",
143 | "\n"
144 | ]
145 | }
146 | ],
147 | "source": [
148 | "print(train.data[0])"
149 | ]
150 | },
151 | {
152 | "cell_type": "code",
153 | "execution_count": 6,
154 | "id": "47ce82b9",
155 | "metadata": {},
156 | "outputs": [],
157 | "source": [
158 | "model = SentenceTransformer(\"all-MiniLM-L6-v2\") "
159 | ]
160 | },
161 | {
162 | "cell_type": "code",
163 | "execution_count": 7,
164 | "id": "e5103cd5",
165 | "metadata": {},
166 | "outputs": [],
167 | "source": [
168 | "# Our sentences to encode\n",
169 | "sentences = train.data\n",
170 | "\n",
171 | "# Sentences are encoded by calling model.encode()\n",
172 | "embeddings = model.encode(sentences)"
173 | ]
174 | },
175 | {
176 | "cell_type": "code",
177 | "execution_count": 8,
178 | "id": "dfa27703",
179 | "metadata": {},
180 | "outputs": [
181 | {
182 | "data": {
183 | "text/plain": [
184 | "(384,)"
185 | ]
186 | },
187 | "execution_count": 8,
188 | "metadata": {},
189 | "output_type": "execute_result"
190 | }
191 | ],
192 | "source": [
193 | "embeddings[0].shape"
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "execution_count": 9,
199 | "id": "b47597b4",
200 | "metadata": {},
201 | "outputs": [
202 | {
203 | "data": {
204 | "text/plain": [
205 | "11314"
206 | ]
207 | },
208 | "execution_count": 9,
209 | "metadata": {},
210 | "output_type": "execute_result"
211 | }
212 | ],
213 | "source": [
214 | "len(embeddings)"
215 | ]
216 | },
217 | {
218 | "cell_type": "code",
219 | "execution_count": 10,
220 | "id": "76c28fd6",
221 | "metadata": {},
222 | "outputs": [],
223 | "source": [
224 | "df = pd.DataFrame(embeddings)"
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": 11,
230 | "id": "5e81be0b",
231 | "metadata": {},
232 | "outputs": [
233 | {
234 | "data": {
235 | "text/html": [
236 | "
\n",
237 | "\n",
250 | "
\n",
251 | " \n",
252 | " \n",
253 | " | \n",
254 | " 0 | \n",
255 | " 1 | \n",
256 | " 2 | \n",
257 | " 3 | \n",
258 | " 4 | \n",
259 | " 5 | \n",
260 | " 6 | \n",
261 | " 7 | \n",
262 | " 8 | \n",
263 | " 9 | \n",
264 | " ... | \n",
265 | " 374 | \n",
266 | " 375 | \n",
267 | " 376 | \n",
268 | " 377 | \n",
269 | " 378 | \n",
270 | " 379 | \n",
271 | " 380 | \n",
272 | " 381 | \n",
273 | " 382 | \n",
274 | " 383 | \n",
275 | "
\n",
276 | " \n",
277 | " \n",
278 | " \n",
279 | " 0 | \n",
280 | " -0.073384 | \n",
281 | " 0.144642 | \n",
282 | " 0.043866 | \n",
283 | " -0.008487 | \n",
284 | " 0.010976 | \n",
285 | " 0.004650 | \n",
286 | " -0.092037 | \n",
287 | " 0.056421 | \n",
288 | " -0.117753 | \n",
289 | " -0.004202 | \n",
290 | " ... | \n",
291 | " -0.065488 | \n",
292 | " -0.052008 | \n",
293 | " -0.062907 | \n",
294 | " 0.004635 | \n",
295 | " 0.022742 | \n",
296 | " -0.064751 | \n",
297 | " 0.104309 | \n",
298 | " -0.024578 | \n",
299 | " -0.006717 | \n",
300 | " 0.132525 | \n",
301 | "
\n",
302 | " \n",
303 | " 1 | \n",
304 | " 0.010949 | \n",
305 | " 0.038870 | \n",
306 | " 0.048669 | \n",
307 | " 0.013852 | \n",
308 | " 0.006879 | \n",
309 | " -0.030265 | \n",
310 | " -0.027496 | \n",
311 | " 0.062196 | \n",
312 | " -0.020248 | \n",
313 | " -0.073615 | \n",
314 | " ... | \n",
315 | " -0.010721 | \n",
316 | " 0.015176 | \n",
317 | " -0.073615 | \n",
318 | " 0.076515 | \n",
319 | " 0.015066 | \n",
320 | " 0.117065 | \n",
321 | " 0.029308 | \n",
322 | " -0.011192 | \n",
323 | " -0.052652 | \n",
324 | " -0.000681 | \n",
325 | "
\n",
326 | " \n",
327 | " 2 | \n",
328 | " -0.072989 | \n",
329 | " -0.018359 | \n",
330 | " 0.014476 | \n",
331 | " 0.039985 | \n",
332 | " 0.038344 | \n",
333 | " -0.004665 | \n",
334 | " -0.105183 | \n",
335 | " 0.053926 | \n",
336 | " 0.000405 | \n",
337 | " -0.000927 | \n",
338 | " ... | \n",
339 | " -0.035562 | \n",
340 | " 0.035174 | \n",
341 | " -0.084836 | \n",
342 | " 0.048907 | \n",
343 | " -0.061031 | \n",
344 | " 0.035921 | \n",
345 | " 0.042243 | \n",
346 | " -0.125008 | \n",
347 | " -0.101583 | \n",
348 | " 0.016186 | \n",
349 | "
\n",
350 | " \n",
351 | " 3 | \n",
352 | " -0.107436 | \n",
353 | " 0.012275 | \n",
354 | " -0.032905 | \n",
355 | " 0.003114 | \n",
356 | " -0.017748 | \n",
357 | " -0.009349 | \n",
358 | " 0.010851 | \n",
359 | " 0.091343 | \n",
360 | " -0.082454 | \n",
361 | " -0.130297 | \n",
362 | " ... | \n",
363 | " -0.114026 | \n",
364 | " 0.030369 | \n",
365 | " 0.012861 | \n",
366 | " 0.007753 | \n",
367 | " -0.013295 | \n",
368 | " 0.056767 | \n",
369 | " -0.016463 | \n",
370 | " -0.047806 | \n",
371 | " -0.058419 | \n",
372 | " -0.018400 | \n",
373 | "
\n",
374 | " \n",
375 | " 4 | \n",
376 | " -0.027212 | \n",
377 | " -0.032349 | \n",
378 | " 0.024046 | \n",
379 | " 0.117962 | \n",
380 | " 0.055256 | \n",
381 | " -0.044903 | \n",
382 | " 0.065358 | \n",
383 | " 0.071563 | \n",
384 | " -0.045105 | \n",
385 | " 0.076455 | \n",
386 | " ... | \n",
387 | " -0.035330 | \n",
388 | " 0.037122 | \n",
389 | " -0.013958 | \n",
390 | " -0.038620 | \n",
391 | " -0.012475 | \n",
392 | " -0.006234 | \n",
393 | " 0.099191 | \n",
394 | " 0.039579 | \n",
395 | " -0.044369 | \n",
396 | " -0.014249 | \n",
397 | "
\n",
398 | " \n",
399 | "
\n",
400 | "
5 rows × 384 columns
\n",
401 | "
"
402 | ],
403 | "text/plain": [
404 | " 0 1 2 3 4 5 6 \\\n",
405 | "0 -0.073384 0.144642 0.043866 -0.008487 0.010976 0.004650 -0.092037 \n",
406 | "1 0.010949 0.038870 0.048669 0.013852 0.006879 -0.030265 -0.027496 \n",
407 | "2 -0.072989 -0.018359 0.014476 0.039985 0.038344 -0.004665 -0.105183 \n",
408 | "3 -0.107436 0.012275 -0.032905 0.003114 -0.017748 -0.009349 0.010851 \n",
409 | "4 -0.027212 -0.032349 0.024046 0.117962 0.055256 -0.044903 0.065358 \n",
410 | "\n",
411 | " 7 8 9 ... 374 375 376 377 \\\n",
412 | "0 0.056421 -0.117753 -0.004202 ... -0.065488 -0.052008 -0.062907 0.004635 \n",
413 | "1 0.062196 -0.020248 -0.073615 ... -0.010721 0.015176 -0.073615 0.076515 \n",
414 | "2 0.053926 0.000405 -0.000927 ... -0.035562 0.035174 -0.084836 0.048907 \n",
415 | "3 0.091343 -0.082454 -0.130297 ... -0.114026 0.030369 0.012861 0.007753 \n",
416 | "4 0.071563 -0.045105 0.076455 ... -0.035330 0.037122 -0.013958 -0.038620 \n",
417 | "\n",
418 | " 378 379 380 381 382 383 \n",
419 | "0 0.022742 -0.064751 0.104309 -0.024578 -0.006717 0.132525 \n",
420 | "1 0.015066 0.117065 0.029308 -0.011192 -0.052652 -0.000681 \n",
421 | "2 -0.061031 0.035921 0.042243 -0.125008 -0.101583 0.016186 \n",
422 | "3 -0.013295 0.056767 -0.016463 -0.047806 -0.058419 -0.018400 \n",
423 | "4 -0.012475 -0.006234 0.099191 0.039579 -0.044369 -0.014249 \n",
424 | "\n",
425 | "[5 rows x 384 columns]"
426 | ]
427 | },
428 | "execution_count": 11,
429 | "metadata": {},
430 | "output_type": "execute_result"
431 | }
432 | ],
433 | "source": [
434 | "df.head()"
435 | ]
436 | },
437 | {
438 | "cell_type": "code",
439 | "execution_count": 12,
440 | "id": "fd9090fd",
441 | "metadata": {},
442 | "outputs": [],
443 | "source": [
444 | "df['label'] = train.target"
445 | ]
446 | },
447 | {
448 | "cell_type": "code",
449 | "execution_count": 13,
450 | "id": "0855346c",
451 | "metadata": {},
452 | "outputs": [],
453 | "source": [
454 | "label_dict = {k:v for k,v in enumerate(train.target_names, 0)}"
455 | ]
456 | },
457 | {
458 | "cell_type": "code",
459 | "execution_count": 14,
460 | "id": "cf0bc61f",
461 | "metadata": {},
462 | "outputs": [
463 | {
464 | "data": {
465 | "text/plain": [
466 | "{0: 'alt.atheism',\n",
467 | " 1: 'comp.graphics',\n",
468 | " 2: 'comp.os.ms-windows.misc',\n",
469 | " 3: 'comp.sys.ibm.pc.hardware',\n",
470 | " 4: 'comp.sys.mac.hardware',\n",
471 | " 5: 'comp.windows.x',\n",
472 | " 6: 'misc.forsale',\n",
473 | " 7: 'rec.autos',\n",
474 | " 8: 'rec.motorcycles',\n",
475 | " 9: 'rec.sport.baseball',\n",
476 | " 10: 'rec.sport.hockey',\n",
477 | " 11: 'sci.crypt',\n",
478 | " 12: 'sci.electronics',\n",
479 | " 13: 'sci.med',\n",
480 | " 14: 'sci.space',\n",
481 | " 15: 'soc.religion.christian',\n",
482 | " 16: 'talk.politics.guns',\n",
483 | " 17: 'talk.politics.mideast',\n",
484 | " 18: 'talk.politics.misc',\n",
485 | " 19: 'talk.religion.misc'}"
486 | ]
487 | },
488 | "execution_count": 14,
489 | "metadata": {},
490 | "output_type": "execute_result"
491 | }
492 | ],
493 | "source": [
494 | "label_dict"
495 | ]
496 | },
497 | {
498 | "cell_type": "code",
499 | "execution_count": 15,
500 | "id": "e68463d9",
501 | "metadata": {},
502 | "outputs": [],
503 | "source": [
504 | "df['label'].replace(label_dict, inplace=True)"
505 | ]
506 | },
507 | {
508 | "cell_type": "code",
509 | "execution_count": 16,
510 | "id": "b6d599fd",
511 | "metadata": {},
512 | "outputs": [
513 | {
514 | "data": {
515 | "text/html": [
516 | "\n",
517 | "\n",
530 | "
\n",
531 | " \n",
532 | " \n",
533 | " | \n",
534 | " 0 | \n",
535 | " 1 | \n",
536 | " 2 | \n",
537 | " 3 | \n",
538 | " 4 | \n",
539 | " 5 | \n",
540 | " 6 | \n",
541 | " 7 | \n",
542 | " 8 | \n",
543 | " 9 | \n",
544 | " ... | \n",
545 | " 375 | \n",
546 | " 376 | \n",
547 | " 377 | \n",
548 | " 378 | \n",
549 | " 379 | \n",
550 | " 380 | \n",
551 | " 381 | \n",
552 | " 382 | \n",
553 | " 383 | \n",
554 | " label | \n",
555 | "
\n",
556 | " \n",
557 | " \n",
558 | " \n",
559 | " 2538 | \n",
560 | " -0.034843 | \n",
561 | " -0.058041 | \n",
562 | " -0.066117 | \n",
563 | " 0.008092 | \n",
564 | " -0.003159 | \n",
565 | " -0.003017 | \n",
566 | " -0.027399 | \n",
567 | " -0.035066 | \n",
568 | " 0.027299 | \n",
569 | " 0.037996 | \n",
570 | " ... | \n",
571 | " 0.006640 | \n",
572 | " -0.013894 | \n",
573 | " -0.052924 | \n",
574 | " 0.006114 | \n",
575 | " 0.006530 | \n",
576 | " 0.127832 | \n",
577 | " -0.015681 | \n",
578 | " 0.011310 | \n",
579 | " 0.061710 | \n",
580 | " talk.politics.mideast | \n",
581 | "
\n",
582 | " \n",
583 | " 7845 | \n",
584 | " 0.016892 | \n",
585 | " -0.106390 | \n",
586 | " 0.001552 | \n",
587 | " -0.100962 | \n",
588 | " -0.095707 | \n",
589 | " 0.008934 | \n",
590 | " -0.119062 | \n",
591 | " 0.026750 | \n",
592 | " -0.029804 | \n",
593 | " 0.005235 | \n",
594 | " ... | \n",
595 | " 0.049609 | \n",
596 | " -0.007812 | \n",
597 | " -0.053950 | \n",
598 | " 0.031513 | \n",
599 | " -0.003336 | \n",
600 | " 0.033478 | \n",
601 | " 0.062172 | \n",
602 | " -0.045588 | \n",
603 | " 0.025845 | \n",
604 | " comp.sys.mac.hardware | \n",
605 | "
\n",
606 | " \n",
607 | " 1955 | \n",
608 | " -0.069299 | \n",
609 | " -0.045954 | \n",
610 | " 0.083345 | \n",
611 | " -0.005201 | \n",
612 | " -0.011273 | \n",
613 | " -0.044890 | \n",
614 | " -0.046241 | \n",
615 | " -0.014178 | \n",
616 | " 0.001070 | \n",
617 | " 0.014388 | \n",
618 | " ... | \n",
619 | " -0.082850 | \n",
620 | " -0.001430 | \n",
621 | " -0.041279 | \n",
622 | " -0.049106 | \n",
623 | " 0.074680 | \n",
624 | " -0.096011 | \n",
625 | " -0.037772 | \n",
626 | " 0.010779 | \n",
627 | " 0.026012 | \n",
628 | " sci.crypt | \n",
629 | "
\n",
630 | " \n",
631 | "
\n",
632 | "
3 rows × 385 columns
\n",
633 | "
"
634 | ],
635 | "text/plain": [
636 | " 0 1 2 3 4 5 6 \\\n",
637 | "2538 -0.034843 -0.058041 -0.066117 0.008092 -0.003159 -0.003017 -0.027399 \n",
638 | "7845 0.016892 -0.106390 0.001552 -0.100962 -0.095707 0.008934 -0.119062 \n",
639 | "1955 -0.069299 -0.045954 0.083345 -0.005201 -0.011273 -0.044890 -0.046241 \n",
640 | "\n",
641 | " 7 8 9 ... 375 376 377 \\\n",
642 | "2538 -0.035066 0.027299 0.037996 ... 0.006640 -0.013894 -0.052924 \n",
643 | "7845 0.026750 -0.029804 0.005235 ... 0.049609 -0.007812 -0.053950 \n",
644 | "1955 -0.014178 0.001070 0.014388 ... -0.082850 -0.001430 -0.041279 \n",
645 | "\n",
646 | " 378 379 380 381 382 383 \\\n",
647 | "2538 0.006114 0.006530 0.127832 -0.015681 0.011310 0.061710 \n",
648 | "7845 0.031513 -0.003336 0.033478 0.062172 -0.045588 0.025845 \n",
649 | "1955 -0.049106 0.074680 -0.096011 -0.037772 0.010779 0.026012 \n",
650 | "\n",
651 | " label \n",
652 | "2538 talk.politics.mideast \n",
653 | "7845 comp.sys.mac.hardware \n",
654 | "1955 sci.crypt \n",
655 | "\n",
656 | "[3 rows x 385 columns]"
657 | ]
658 | },
659 | "execution_count": 16,
660 | "metadata": {},
661 | "output_type": "execute_result"
662 | }
663 | ],
664 | "source": [
665 | "df.sample(3)"
666 | ]
667 | },
668 | {
669 | "cell_type": "code",
670 | "execution_count": 17,
671 | "id": "774868ca",
672 | "metadata": {},
673 | "outputs": [
674 | {
675 | "data": {
676 | "text/plain": [
677 | "label\n",
678 | "rec.sport.hockey 600\n",
679 | "soc.religion.christian 599\n",
680 | "rec.motorcycles 598\n",
681 | "rec.sport.baseball 597\n",
682 | "sci.crypt 595\n",
683 | "rec.autos 594\n",
684 | "sci.med 594\n",
685 | "comp.windows.x 593\n",
686 | "sci.space 593\n",
687 | "comp.os.ms-windows.misc 591\n",
688 | "sci.electronics 591\n",
689 | "comp.sys.ibm.pc.hardware 590\n",
690 | "misc.forsale 585\n",
691 | "comp.graphics 584\n",
692 | "comp.sys.mac.hardware 578\n",
693 | "talk.politics.mideast 564\n",
694 | "talk.politics.guns 546\n",
695 | "alt.atheism 480\n",
696 | "talk.politics.misc 465\n",
697 | "talk.religion.misc 377\n",
698 | "Name: count, dtype: int64"
699 | ]
700 | },
701 | "execution_count": 17,
702 | "metadata": {},
703 | "output_type": "execute_result"
704 | }
705 | ],
706 | "source": [
707 | "df.label.value_counts()"
708 | ]
709 | },
710 | {
711 | "cell_type": "code",
712 | "execution_count": 18,
713 | "id": "ad84aef0",
714 | "metadata": {},
715 | "outputs": [],
716 | "source": [
717 | "df = df[df['label'].isin(['rec.sport.baseball', 'sci.space'])]"
718 | ]
719 | },
720 | {
721 | "cell_type": "code",
722 | "execution_count": 19,
723 | "id": "c6e6b0fa",
724 | "metadata": {},
725 | "outputs": [],
726 | "source": [
727 | "X = df[df.columns[:-1]]\n",
728 | "y = df['label'].apply(lambda x: 1 if x == 'rec.sport.baseball' else 0)"
729 | ]
730 | },
731 | {
732 | "cell_type": "code",
733 | "execution_count": 20,
734 | "id": "a05dfd57",
735 | "metadata": {},
736 | "outputs": [],
737 | "source": [
738 | "# Splitting the dataset into training and testing sets\n",
739 | "X_train, X_test, y_train, y_test = train_test_split(X,\n",
740 | " y,\n",
741 | " test_size=0.2,\n",
742 | " random_state=42)"
743 | ]
744 | },
745 | {
746 | "cell_type": "code",
747 | "execution_count": 21,
748 | "id": "559fd37c",
749 | "metadata": {},
750 | "outputs": [
751 | {
752 | "name": "stdout",
753 | "output_type": "stream",
754 | "text": [
755 | "Accuracy: 0.9831932773109243\n",
756 | "Precision: 0.9915254237288136\n",
757 | "Recall: 0.975\n",
758 | "F1 Score: 0.9831932773109243\n"
759 | ]
760 | }
761 | ],
762 | "source": [
763 | "# Actually we don't need to normalize data in this case\n",
764 | "# If you're not sure why, use df.describe()\n",
765 | "clf = Pipeline(steps=[ #('preprocessor', preprocessor),\n",
766 | " ('classifier',\n",
767 | " xgb.XGBClassifier(use_label_encoder=False, eval_metric='logloss'))\n",
768 | "])\n",
769 | "\n",
770 | "# Train the XGBoost model\n",
771 | "clf.fit(X_train, y_train)\n",
772 | "\n",
773 | "# Predict on the testing set\n",
774 | "y_pred = clf.predict(X_test)\n",
775 | "\n",
776 | "# Evaluation metrics\n",
777 | "accuracy = accuracy_score(y_test, y_pred)\n",
778 | "precision = precision_score(y_test, y_pred)\n",
779 | "recall = recall_score(y_test, y_pred)\n",
780 | "f1 = f1_score(y_test, y_pred)\n",
781 | "conf_matrix = confusion_matrix(y_test, y_pred)\n",
782 | "\n",
783 | "print(f'Accuracy: {accuracy}')\n",
784 | "print(f'Precision: {precision}')\n",
785 | "print(f'Recall: {recall}')\n",
786 | "print(f'F1 Score: {f1}')"
787 | ]
788 | }
789 | ],
790 | "metadata": {
791 | "kernelspec": {
792 | "display_name": "Python 3 (ipykernel)",
793 | "language": "python",
794 | "name": "python3"
795 | },
796 | "language_info": {
797 | "codemirror_mode": {
798 | "name": "ipython",
799 | "version": 3
800 | },
801 | "file_extension": ".py",
802 | "mimetype": "text/x-python",
803 | "name": "python",
804 | "nbconvert_exporter": "python",
805 | "pygments_lexer": "ipython3",
806 | "version": "3.11.5"
807 | }
808 | },
809 | "nbformat": 4,
810 | "nbformat_minor": 5
811 | }
812 |
--------------------------------------------------------------------------------
/Concepts/Evaluation Metrics/Classification Metrics.md:
--------------------------------------------------------------------------------
1 | ## Accuracy
2 |
3 | Accuracy is one of the most intuitive performance measures. It is the ratio of correctly predicted observations to the total observations and is given by the formula:
4 |
5 |
6 | Accuracy = (True Positives + True Negatives) / (Total Observations)
7 |
8 |
9 | High accuracy means that a model correctly predicts both the positives and negatives at a high rate.
10 |
11 | ## Precision
12 |
13 | Precision, also known as Positive Predictive Value, is the ratio of correctly predicted positive observations to the total predicted positives. High precision relates to a low false positive rate, and the formula is:
14 |
15 |
16 | Precision = True Positives / (True Positives + False Positives)
17 |
18 |
19 | Precision is a good measure to determine when the costs of False Positives are high.
20 |
21 | ## Recall
22 |
23 | Recall, also known as Sensitivity or True Positive Rate, is the ratio of correctly predicted positive observations to the all observations in actual class - yes. The formula is:
24 |
25 |
26 | Recall = True Positives / (True Positives + False Negatives)
27 |
28 |
29 | Recall shall be the model metric we use to select our best model when there is a high cost associated with False Negative.
30 |
31 | ## F1 Score
32 |
33 | The F1 Score is the weighted average of Precision and Recall. Therefore, this score takes both false positives and false negatives into account. It is a good way to show that a classifer has a good value for both recall and precision. And the formula for the F1 score is:
34 |
35 |
36 | F1 Score = 2 * (Precision * Recall) / (Precision + Recall)
37 |
38 |
39 | The F1 score is a measure of a test’s accuracy and is used when the distribution of data and the cost of false positives and false negatives are not equally important.
40 |
41 | ---
42 |
43 | *True Positives (TP)* - These are the correctly predicted positive values which mean that the value of actual class is yes and the value of predicted class is also yes.
44 |
45 | *True Negatives (TN)* - These are the correctly predicted negative values which mean that the value of actual class is no and value of predicted class is also no.
46 |
47 | *False Positives (FP)* - When actual class is no and predicted class is yes.
48 |
49 | *False Negatives (FN)* - When actual class is yes but predicted class in no.
50 |
51 |
--------------------------------------------------------------------------------
/Concepts/Evaluation Metrics/Regression Metrics.md:
--------------------------------------------------------------------------------
1 | ## Understanding MAE, MSE, RMSE, and R^2
2 |
3 | When evaluating the performance of regression models, we often rely on several key metrics to understand how well our model is predicting. Let's refresh on what these metrics mean:
4 |
5 | ### Mean Absolute Error (MAE)
6 |
7 | MAE measures the average magnitude of the errors in a set of predictions, without considering their direction. It's calculated as the average of the absolute differences between the predicted values and the actual values. This metric gives an idea of how wrong the predictions are; a value of 0 indicates no error or perfect predictions. Since it measures the average error magnitude, it's easy to interpret.
8 |
9 | **Formula:** $$MAE = \frac{1}{n} \sum_{i=1}^{n} |y_i - \hat{y}_i|$$
10 |
11 | Where:
12 | - $y_i$ is the actual value,
13 | - $\hat{y}_i$ is the predicted value,
14 | - $n$ is the number of observations.
15 |
16 | ### Mean Squared Error (MSE)
17 |
18 | MSE is similar to MAE but squares the difference between the predicted and actual values before averaging them. The squaring part places more emphasis on larger errors compared to smaller ones, making MSE sensitive to outliers. MSE is useful when large errors are particularly undesirable.
19 |
20 | **Formula:** $$MSE = \frac{1}{n} \sum_{i=1}^{n} (y_i - \hat{y}_i)^2$$
21 |
22 | ### Root Mean Squared Error (RMSE)
23 |
24 | RMSE is the square root of the mean squared error. Taking the square root brings the units back to the original units of the output variable and somewhat reduces the sensitivity to large errors. Like MSE, it gives more weight to larger errors, but its units are easier to understand since they match the target variable's units.
25 |
26 | **Formula:** $$RMSE = \sqrt{\frac{1}{n} \sum_{i=1}^{n} (y_i - \hat{y}_i)^2}$$
27 |
28 | ### R-Squared (R^2)
29 |
30 | R^2, also known as the coefficient of determination, is a statistical measure that represents the proportion of the variance for the dependent variable that's explained by the independent variable(s) in a regression model. It provides an indication of the goodness of fit of a model. An R^2 of 1 indicates that the regression predictions perfectly fit the data. Values of R^2 outside the range 0 to 1 are possible in cases of extrapolation and should be interpreted with caution.
31 |
32 | **Formula:**
33 |
34 | $$
35 | R^2 = 1 - \frac{\sum_{i=1}^{n} (y_i - \hat{y}_i)^2}{\sum_{i=1}^{n} (y_i - \bar{y})^2}
36 | $$
37 |
38 | Where:
39 | - $\bar{y}$ is the mean of the actual values.
40 |
41 | In summary, MAE, MSE, and RMSE are metrics that directly measure the average error in the predictions, with MSE and RMSE giving more penalty to larger errors. R^2 provides a measure of how well future samples are likely to be predicted by the model. Understanding these metrics helps in evaluating and comparing regression models effectively.
42 |
--------------------------------------------------------------------------------
/Concepts/ML Models/Classification/Decision Tree.md:
--------------------------------------------------------------------------------
1 | Decision trees are a popular supervised learning algorithm used for both classification and regression tasks. They model decisions as a tree-like structure where each internal node represents a feature (or attribute), each branch represents a decision rule, and each leaf node represents the outcome or class label.
2 |
3 | #### How It Works:
4 |
5 | 1. **Splitting Data:**
6 | - Decision trees start by evaluating the entire dataset and selecting the best feature to split on.
7 | - The goal of splitting is to maximize the homogeneity (or purity) of the resulting subsets.
8 | - Common metrics for measuring homogeneity include Gini impurity and entropy.
9 |
10 | 2. **Building the Tree:**
11 | - After the initial split, the process is repeated recursively for each subset (or branch).
12 | - At each step, the algorithm selects the best feature to split on based on the chosen metric.
13 | - This process continues until a stopping criterion is met, such as reaching a maximum tree depth or having subsets with pure class labels.
14 |
15 | 3. **Making Predictions:**
16 | - To make predictions for a new instance, the algorithm traverses the tree from the root node to a leaf node.
17 | - At each internal node, it follows the decision rule based on the value of the corresponding feature.
18 | - Once it reaches a leaf node, the predicted outcome (class label for classification or numerical value for regression) is determined.
19 |
20 | #### Key Points:
21 |
22 | - Decision trees partition the feature space into regions based on simple decision rules.
23 | - They are interpretable and easy to visualize, making them useful for understanding and explaining decision-making processes.
24 | - Decision trees can handle both numerical and categorical features.
25 | - They are prone to overfitting, especially when the tree grows too deep. Techniques like pruning can be used to prevent overfitting.
26 | - Decision trees are often used as the building blocks for more complex ensemble methods like Random Forests and Gradient Boosting.
27 |
28 | #### Applications:
29 |
30 | - Customer churn prediction in marketing.
31 | - Credit risk assessment in finance.
32 | - Medical diagnosis in healthcare.
33 | - Fault diagnosis in engineering.
--------------------------------------------------------------------------------
/Concepts/ML Models/Classification/KNN.md:
--------------------------------------------------------------------------------
1 | The k-Nearest Neighbors (KNN) algorithm is a simple and intuitive supervised learning algorithm used for classification and regression tasks. It makes predictions based on the similarity of instances in a feature space.
2 |
3 | #### How It Works:
4 |
5 | 1. **Instance-Based Learning:**
6 | - KNN belongs to the category of instance-based learning algorithms. Instead of learning explicit models, KNN stores the training dataset and makes predictions based on the similarity of new instances to existing data points.
7 |
8 | 2. **Distance Metric:**
9 | - KNN uses a distance metric, typically Euclidean distance, to measure the similarity between instances in the feature space.
10 | - For each new instance, KNN calculates its distance to all other instances in the training dataset.
11 |
12 | 3. **Finding Neighbors:**
13 | - KNN selects the k nearest neighbors of the new instance based on the calculated distances.
14 | - The value of k is a hyperparameter that needs to be specified by the user. It determines the number of neighbors considered when making predictions.
15 |
16 | 4. **Classification or Regression:**
17 | - For classification tasks, KNN predicts the class label of the new instance by taking a majority vote among its k nearest neighbors.
18 | - For regression tasks, KNN predicts the numerical value of the new instance by taking the average of the target values of its k nearest neighbors.
19 |
20 | #### Key Points:
21 |
22 | - KNN is a non-parametric and lazy learning algorithm, meaning it does not make strong assumptions about the underlying data distribution and does not learn explicit models during training.
23 | - The choice of the distance metric and the value of k are critical parameters that can significantly impact the performance of KNN.
24 | - KNN is sensitive to the scale of features, so feature scaling is often necessary to ensure that all features contribute equally to the distance calculation.
25 | - KNN's prediction time can be relatively high, especially for large datasets, as it requires calculating distances to all training instances.
--------------------------------------------------------------------------------
/Concepts/ML Models/Classification/Logistic Regression.md:
--------------------------------------------------------------------------------
1 | Logistic regression is a type of statistical model used for binary classification tasks. In binary classification, the goal is to predict whether an observation belongs to one of two classes, usually represented as 0 or 1.
2 |
3 | #### How It Works:
4 |
5 | 1. **Linear Combination:**
6 | - Logistic regression starts with a linear combination of the input features. It multiplies each feature by a weight (which the model learns during training) and sums them up.
7 | - This linear combination is then passed through a function called the logistic function (also known as the sigmoid function).
8 |
9 | 2. **Logistic (Sigmoid) Function:**
10 | - The logistic function is an S-shaped curve that maps any real-valued number to the range [0, 1].
11 | - It is defined as: $$\sigma(z) = \frac{1}{1 + e^{-z}}$$
12 | - Where $z$ is the linear combination of input features and weights.
13 | - The output of the logistic function represents the probability that an observation belongs to the positive class (class 1).
14 |
15 | 3. **Decision Boundary:**
16 | - The logistic function outputs probabilities between 0 and 1. To make a binary decision, a threshold is applied (commonly 0.5).
17 | - If the predicted probability is above the threshold, the model predicts class 1; otherwise, it predicts class 0.
18 | - The decision boundary is the point at which the predicted probability equals the threshold.
19 |
20 | 4. **Training:**
21 | - During training, the model adjusts the weights to minimize the difference between predicted probabilities and actual class labels.
22 | - This is typically done using optimization algorithms such as gradient descent.
23 |
24 | #### Key Points:
25 |
26 | - Logistic regression is used for binary classification tasks.
27 | - It models the probability that an observation belongs to a particular class.
28 | - The logistic function maps the linear combination of features and weights to a probability between 0 and 1.
29 | - The decision boundary separates the classes based on the threshold probability.
--------------------------------------------------------------------------------
/Concepts/ML Models/Classification/Naive Bayes.md:
--------------------------------------------------------------------------------
1 | Naive Bayes is a simple yet effective probabilistic classifier based on Bayes' theorem with the "naive" assumption of feature independence. Despite its simplicity, Naive Bayes is widely used for text classification and other tasks in machine learning.
2 |
3 | #### How It Works:
4 |
5 | 1. **Bayes' Theorem:**
6 | - Naive Bayes is based on Bayes' theorem, which describes the probability of a hypothesis given some evidence. Mathematically, it is represented as:
7 |
8 | $$P(y|x) = \frac{P(x|y) \times P(y)}{P(x)}$$
9 |
10 | Where:
11 | - $P(y|x)$ is the posterior probability of class y given predictor x.
12 | - $P(x|y)$ is the likelihood, the probability of predictor x given class y.
13 | - $P(y)$ is the prior probability of class y.
14 | - $P(x)$ is the probability of predictor x.
15 |
16 | 2. **Naive Assumption:**
17 | - Naive Bayes assumes that all features are conditionally independent given the class label. This means that the presence of a particular feature in a class is unrelated to the presence of any other feature.
18 | - Despite this simplifying assumption, Naive Bayes often performs well in practice, especially for text classification tasks.
19 |
20 | 3. **Classification:**
21 | - To classify a new instance, Naive Bayes calculates the posterior probability of each class given the instance's features using Bayes' theorem.
22 | - The class with the highest posterior probability is predicted as the final class label.
23 |
24 | 4. **Types of Naive Bayes:**
25 | - **Gaussian Naive Bayes:** Assumes that continuous features follow a Gaussian distribution.
26 | - **Multinomial Naive Bayes:** Suitable for text classification with discrete features (e.g., word counts).
27 | - **Bernoulli Naive Bayes:** Assumes binary or boolean features.
28 |
29 | #### Key Points:
30 |
31 | - Naive Bayes is a probabilistic classifier based on Bayes' theorem with the assumption of feature independence.
32 | - It is simple, easy to implement, and computationally efficient.
33 | - Naive Bayes works well with high-dimensional data, such as text documents.
34 | - It performs best when the naive assumption holds reasonably well and when classes are well-separated.
--------------------------------------------------------------------------------
/Concepts/ML Models/Classification/Random Forest.md:
--------------------------------------------------------------------------------
1 | Random Forest is a popular ensemble learning method used for both classification and regression tasks. It builds multiple decision trees and merges their predictions to improve accuracy and reduce overfitting.
2 |
3 | #### How It Works:
4 |
5 | 1. **Building Multiple Trees:**
6 | - Random Forest builds a collection of decision trees, where each tree is trained on a random subset of the training data.
7 | - The randomness comes from two sources:
8 | - **Bootstrap Sampling:** Each tree is trained on a bootstrap sample of the original dataset. Bootstrap sampling involves randomly selecting samples from the dataset with replacement.
9 | - **Feature Randomization:** At each node of the tree, a random subset of features is considered for splitting. This helps in decorrelating the trees.
10 |
11 | 2. **Voting or Averaging:**
12 | - For classification tasks, each tree in the forest predicts the class label of a new instance, and the class that receives the most votes (mode) among all trees is chosen as the final prediction.
13 | - For regression tasks, the predictions of all trees are averaged to obtain the final prediction.
14 |
15 | 3. **Reducing Overfitting:**
16 | - Random Forest reduces overfitting by averaging the predictions of multiple trees.
17 | - Additionally, by training each tree on a random subset of the data and considering only a subset of features at each split, it introduces diversity among the trees, which helps in generalization.
18 |
19 | #### Key Points:
20 |
21 | - Random Forest is an ensemble learning method that builds multiple decision trees and combines their predictions.
22 | - It introduces randomness in the training process through bootstrap sampling and feature randomization to reduce overfitting.
23 | - Random Forest is robust to noisy data and tends to generalize well.
24 | - It can handle large datasets with high dimensionality.
25 | - Random Forest is less interpretable compared to individual decision trees but still provides insights into feature importance.
--------------------------------------------------------------------------------
/Concepts/ML Models/Classification/SVM.md:
--------------------------------------------------------------------------------
1 | Support Vector Machines (SVM) is a powerful supervised learning algorithm used for classification and regression tasks. SVM finds the optimal hyperplane that best separates data points into different classes in a high-dimensional space.
2 |
3 | #### How It Works:
4 |
5 | 1. **Separating Hyperplane:**
6 | - In a binary classification task, SVM aims to find the hyperplane that best separates data points belonging to different classes.
7 | - A hyperplane in an n-dimensional space is a flat affine subspace of dimension n-1. In two dimensions, it's simply a line, and in three dimensions, it's a plane.
8 |
9 | 2. **Maximizing Margin:**
10 | - SVM finds the hyperplane that maximizes the margin, which is the distance between the hyperplane and the nearest data points (support vectors) from each class.
11 | - The support vectors are the data points closest to the decision boundary (hyperplane) and play a crucial role in defining the optimal hyperplane.
12 |
13 | 3. **Kernel Trick:**
14 | - SVM can efficiently handle non-linear decision boundaries in the input space by using a technique called the kernel trick.
15 | - The kernel function computes the dot product of two vectors in a higher-dimensional feature space without explicitly mapping the data into that space.
16 | - Common kernel functions include linear, polynomial, radial basis function (RBF), and sigmoid kernels.
17 |
18 | 4. **Regularization Parameter:**
19 | - SVM has a regularization parameter (C) that controls the trade-off between maximizing the margin and minimizing the classification error.
20 | - A smaller value of C allows for a larger margin but may lead to more misclassifications, while a larger value of C results in a smaller margin but fewer misclassifications.
21 |
22 | #### Key Points:
23 |
24 | - SVM is effective for both linear and non-linear classification tasks.
25 | - It finds the optimal hyperplane that maximizes the margin between classes.
26 | - SVM is sensitive to the choice of the kernel function and regularization parameter.
27 | - It works well with high-dimensional data and can handle datasets with more features than samples.
28 | - SVM is widely used in various fields, including image classification, text classification, and bioinformatics.
29 |
30 | #### Applications:
31 |
32 | - Image classification in computer vision.
33 | - Text categorization in natural language processing.
34 | - Handwriting recognition.
35 | - Cancer diagnosis based on gene expression data.
--------------------------------------------------------------------------------
/Concepts/ML Models/Classification/xgboost.md:
--------------------------------------------------------------------------------
1 | XGBoost, short for Extreme Gradient Boosting, is an advanced implementation of the gradient boosting algorithm. It is highly efficient, scalable, and widely used in machine learning competitions and real-world applications due to its exceptional performance.
2 |
3 | #### How It Works:
4 |
5 | 1. **Gradient Boosting:**
6 | - XGBoost belongs to the family of ensemble learning methods known as gradient boosting algorithms.
7 | - Gradient boosting combines multiple weak learners (typically decision trees) sequentially to improve the model's predictive performance.
8 |
9 | 2. **Objective Function:**
10 | - XGBoost minimizes a differentiable loss function by iteratively adding weak learners to the ensemble.
11 | - It uses a regularized objective function that includes both a loss term (measuring prediction error) and a regularization term (preventing overfitting).
12 |
13 | 3. **Boosting Trees:**
14 | - XGBoost builds decision trees as base learners in a sequential manner.
15 | - Each new tree is trained to correct the errors made by the existing ensemble of trees.
16 | - Trees are added to the ensemble iteratively, with each tree focusing on the residuals (errors) of the previous ensemble.
17 |
18 | 4. **Regularization:**
19 | - XGBoost incorporates L1 (Lasso) and L2 (Ridge) regularization techniques to control model complexity and prevent overfitting.
20 | - Regularization penalizes large coefficients in the trees, encouraging simplicity and improving generalization.
21 |
22 | 5. **Tree Pruning:**
23 | - XGBoost employs tree pruning techniques to remove unnecessary branches from the trees, reducing complexity and improving efficiency.
24 | - Pruning helps prevent overfitting and makes the model more interpretable.
25 |
26 | 6. **Parallel and GPU Computing:**
27 | - XGBoost is highly optimized for parallel and distributed computing, allowing it to leverage multiple cores and parallelize tree construction.
28 | - It also supports GPU acceleration, further enhancing its speed and scalability.
29 |
30 | #### Key Points:
31 |
32 | - XGBoost is an advanced implementation of the gradient boosting algorithm, known for its efficiency, scalability, and high performance.
33 | - It builds an ensemble of decision trees sequentially, with each tree correcting the errors of the previous ensemble.
34 | - XGBoost incorporates regularization, tree pruning, and parallel computing to improve model accuracy, prevent overfitting, and enhance efficiency.
35 | - XGBoost is widely used in various domains, including finance, healthcare, and e-commerce, for tasks such as regression, classification, and ranking.
--------------------------------------------------------------------------------
/Concepts/ML Models/Regression/ElasticNet.md:
--------------------------------------------------------------------------------
1 | ElasticNet is a hybrid regularization technique that combines the penalties of both Ridge Regression (L2 regularization) and Lasso Regression (L1 regularization). It is used in linear regression to address multicollinearity, feature selection, and overfitting.
2 |
3 | #### How It Works:
4 |
5 | 1. **Objective Function:**
6 | - ElasticNet modifies the standard linear regression objective function by adding a combination of L1 and L2 penalties to the least squares term.
7 | - The objective function for ElasticNet is given by:
8 | $$\text{minimize } J(\theta) = \sum_{i=1}^{n} (y_i - \hat{y}_i)^2 + \alpha \left( \lambda ||\theta||_1 + (1 - \lambda) ||\theta||_2^2 \right)$$
9 | where:
10 | - $J(\theta)$ is the cost function to be minimized.
11 | - $\theta$ are the regression coefficients.
12 | - $\hat{y}_i$ are the predicted values.
13 | - $\alpha$ is the regularization parameter that controls the overall strength of regularization.
14 | - $\lambda$ is the mixing parameter that controls the balance between L1 and L2 penalties.
15 |
16 | 2. **Balancing L1 and L2 Penalties:**
17 | - The mixing parameter $\lambda$ determines the balance between the L1 (Lasso) and L2 (Ridge) penalties.
18 | - When $\lambda = 0$, ElasticNet reduces to Ridge Regression, and when $\lambda = 1$, it reduces to Lasso Regression.
19 | - Intermediate values of $\lambda$ allow for a mixture of L1 and L2 penalties, providing more flexibility in the regularization process.
20 |
21 | 3. **Feature Selection and Shrinkage:**
22 | - ElasticNet encourages sparsity in the coefficient vector by setting some coefficients to zero (similar to Lasso Regression).
23 | - It also shrinks the coefficients towards zero to prevent overfitting (similar to Ridge Regression).
24 | - The balance between feature selection and shrinkage is controlled by the mixing parameter $\lambda$.
25 |
26 | #### Key Points:
27 |
28 | - ElasticNet is a hybrid regularization technique that combines the penalties of Ridge Regression and Lasso Regression.
29 | - It addresses multicollinearity, feature selection, and overfitting in linear regression models.
30 | - The regularization parameters $\alpha$ and $\lambda$ control the overall strength of regularization and the balance between L1 and L2 penalties, respectively.
31 | - ElasticNet is particularly useful when dealing with high-dimensional datasets with correlated features.
--------------------------------------------------------------------------------
/Concepts/ML Models/Regression/Gradient Boosting Regressor.md:
--------------------------------------------------------------------------------
1 | Gradient Boosting Regressor is a powerful ensemble learning technique used for regression tasks. It builds an ensemble of weak learners (typically decision trees) sequentially, with each tree correcting the errors of the previous ensemble. Gradient Boosting Regressor is known for its high predictive accuracy and robustness to overfitting.
2 |
3 | #### How It Works:
4 |
5 | 1. **Boosting Ensemble:**
6 | - Gradient Boosting Regressor builds an ensemble of decision trees in a sequential manner.
7 | - Each new tree is trained to correct the errors (residuals) of the previous ensemble, focusing on the data points where the model performs poorly.
8 |
9 | 2. **Gradient Descent:**
10 | - Gradient Boosting Regressor minimizes a loss function by iteratively adding weak learners to the ensemble.
11 | - It uses gradient descent optimization to update the ensemble at each iteration, reducing the loss (e.g., mean squared error) between the predicted and actual values.
12 |
13 | 3. **Gradient Tree Boosting:**
14 | - Each weak learner in the ensemble is typically a shallow decision tree (also called a regression tree).
15 | - The decision trees are grown sequentially, with each tree fitting the negative gradient of the loss function.
16 | - The learning rate (shrinkage parameter) controls the contribution of each tree to the ensemble, preventing overfitting.
17 |
18 | 4. **Regularization:**
19 | - Gradient Boosting Regressor incorporates regularization techniques to prevent overfitting and improve generalization.
20 | - Common regularization techniques include limiting the maximum depth of the trees, adding a shrinkage parameter, and subsampling the training data.
21 |
22 | #### Key Points:
23 |
24 | - Gradient Boosting Regressor is an ensemble learning technique that builds an ensemble of weak learners (typically decision trees) sequentially.
25 | - It minimizes a loss function by iteratively adding weak learners that focus on correcting the errors of the previous ensemble.
26 | - Gradient Boosting Regressor is robust to overfitting and can capture complex relationships in the data.
27 | - Hyperparameters such as the number of trees, learning rate, maximum tree depth, and subsampling rate need to be tuned carefully to optimize performance.
28 | - Gradient Boosting Regressor is widely used in various domains for regression tasks, such as predicting house prices, forecasting sales, and estimating demand.
--------------------------------------------------------------------------------
/Concepts/ML Models/Regression/HGBRT.md:
--------------------------------------------------------------------------------
1 | Histogram-based Gradient Boosting Regression Trees are an efficient and scalable variant of the traditional gradient boosting decision tree algorithms. They are designed to handle large datasets more effectively by using histograms to approximate the continuous feature values into discrete bins. This approach reduces the computational complexity and speeds up the training process. HGBRT models are used for regression tasks where the goal is to predict a continuous value.
2 |
3 | ## Key Features
4 |
5 | - **Histogram-based Binning:** Continuous features are binned into discrete intervals, which reduces the number of split points to consider during tree building and significantly decreases computational cost.
6 |
7 | - **Gradient Boosting:** Utilizes the gradient boosting framework to build trees in a sequential manner where each tree attempts to correct the errors of the previous trees.
8 |
9 | - **Scalability:** The histogram approach makes the algorithm highly scalable, enabling it to handle large datasets efficiently.
10 |
11 | - **Handling of Missing Values:** Automatically handles missing values by considering them as a separate category or using surrogate splits.
12 |
13 | - **Regularization:** Includes several regularization techniques, such as learning rate (shrinkage) and subsampling, to prevent overfitting.
14 |
15 | ## Algorithm Overview
16 |
17 | 1. **Data Preprocessing:** Continuous features are divided into discrete bins to construct histograms. This reduces the granularity of feature values but significantly improves computational efficiency.
18 |
19 | 2. **Initial Model:** A simple model (e.g., the mean of the target variable) is used as the starting point.
20 |
21 | 3. **Sequential Tree Building:**
22 | - For each iteration, the algorithm builds a decision tree that predicts the gradients (errors) of the previous model.
23 | - The tree is constructed by selecting the best split based on the histograms of the feature values and the gradients.
24 | - After a tree is added, the model is updated, and its predictions are used to calculate new residuals or gradients for the next tree.
25 |
26 | 4. **Update Model:** The predictions from the new tree are scaled by a learning rate and added to the previous predictions to update the model.
27 |
28 | 5. **Regularization:** Techniques like learning rate adjustment and subsampling are applied to improve model robustness and prevent overfitting.
29 |
30 | 6. **Stop Criterion:** The process is repeated until a specified number of trees are built or no further improvement can be made.
31 |
32 | ## Advantages
33 |
34 | - **Efficiency:** Reduces memory usage and speeds up training, especially on large datasets.
35 |
36 | - **Accuracy:** Often achieves comparable or superior accuracy to traditional gradient boosting models due to more effective handling of continuous features and regularization options.
37 |
38 | - **Flexibility:** Can be used for both regression and classification tasks.
39 |
40 | ## Disadvantages
41 |
42 | - **Approximation:** The binning of continuous features into histograms can lead to a loss of information, which might affect model precision in some cases.
43 |
44 | - **Parameter Tuning:** Requires careful tuning of parameters, such as the number of bins, learning rate, and the number of trees, to achieve optimal performance.
--------------------------------------------------------------------------------
/Concepts/ML Models/Regression/Lasso Regression.md:
--------------------------------------------------------------------------------
1 | Lasso Regression, short for Least Absolute Shrinkage and Selection Operator, is a variant of linear regression that introduces a penalty term to the standard least squares objective function. It is particularly useful for feature selection and when dealing with high-dimensional datasets.
2 |
3 | #### How It Works:
4 |
5 | 1. **Objective Function:**
6 | - Lasso Regression modifies the standard linear regression objective function by adding a regularization term, also known as the Lasso penalty or L1 penalty.
7 | - The objective function for Lasso Regression is given by:
8 | $$\text{minimize } J(\theta) = \sum_{i=1}^{n} (y_i - \hat{y}_i)^2 + \alpha \sum_{j=1}^{p} |\theta_j|$$
9 | where:
10 | - $J(\theta)$ is the cost function to be minimized.
11 | - $\theta_j$ are the regression coefficients.
12 | - $\hat{y}_i$ are the predicted values.
13 | - $\alpha$ is the regularization parameter (also known as the Lasso parameter).
14 | - The second term is the regularization term, penalizing the absolute values of the coefficients $|\theta_j|$.
15 |
16 | 2. **Feature Selection:**
17 | - Lasso Regression encourages sparsity in the coefficient vector by driving some coefficients to exactly zero.
18 | - This property makes Lasso Regression useful for feature selection, as it automatically selects the most important features while setting the coefficients of irrelevant features to zero.
19 | - The choice of $\alpha$ determines the level of sparsity in the model.
20 |
21 | 3. **Shrinkage Effect:**
22 | - Similar to Ridge Regression, Lasso Regression also shrinks the coefficients towards zero to prevent overfitting.
23 | - However, unlike Ridge Regression, Lasso Regression performs variable selection by setting some coefficients exactly to zero, effectively removing those features from the model.
24 |
25 | #### Key Points:
26 |
27 | - Lasso Regression is a linear regression technique that introduces a penalty term to the least squares objective function to prevent overfitting and encourage sparsity.
28 | - It is particularly useful for feature selection and dealing with high-dimensional datasets.
29 | - The regularization parameter $\alpha$ controls the strength of the penalty, with larger values leading to greater shrinkage and sparsity.
30 | - Lasso Regression can be used to automatically select the most relevant features while ignoring irrelevant ones.
--------------------------------------------------------------------------------
/Concepts/ML Models/Regression/Linear Regression.md:
--------------------------------------------------------------------------------
1 | Linear Regression is a fundamental statistical method used for modeling the relationship between a dependent variable (target) and one or more independent variables (features). It assumes a linear relationship between the independent variables and the dependent variable.
2 |
3 | #### How It Works:
4 |
5 | 1. **Simple Linear Regression:**
6 | - In simple linear regression, there is only one independent variable (feature) that is used to predict the dependent variable.
7 | - The relationship between the independent variable (x) and dependent variable (y) is represented by a straight line equation:
8 | $$y = mx + b$$
9 | where:
10 | - $m$ is the slope of the line (the change in y for a unit change in x).
11 | - $b$ is the y-intercept (the value of y when x is zero).
12 |
13 | 2. **Multiple Linear Regression:**
14 | - In multiple linear regression, there are multiple independent variables (features) used to predict the dependent variable.
15 | - The relationship between multiple independent variables ($x_1, x_2, ..., x_n$) and the dependent variable (y) is represented by the equation:
16 | $$y = b_0 + b_1x_1 + b_2x_2 + ... + b_nx_n$$
17 | where:
18 | - $b_0$ is the intercept (the value of y when all independent variables are zero).
19 | - $b_1, b_2, ..., b_n$ are the coefficients (slopes) associated with each independent variable.
20 |
21 | 3. **Fitting the Model:**
22 | - The goal of linear regression is to find the best-fitting line (or hyperplane in higher dimensions) that minimizes the difference between the observed values and the values predicted by the model.
23 | - This is typically done by minimizing the sum of squared differences between the actual and predicted values, known as the least squares method.
24 |
25 | 4. **Interpreting Coefficients:**
26 | - The coefficients ($b_0, b_1, ..., b_n$) in the linear regression equation represent the relationship between each independent variable and the dependent variable.
27 | - A positive coefficient indicates a positive relationship, meaning that as the independent variable increases, the dependent variable also increases (and vice versa for negative coefficients).
28 |
29 | #### Key Points:
30 |
31 | - Linear regression models the relationship between independent variables and a dependent variable using a linear equation.
32 | - Simple linear regression involves one independent variable, while multiple linear regression involves multiple independent variables.
33 | - The coefficients in the regression equation represent the slope or effect of each independent variable on the dependent variable.
34 | - Linear regression is widely used for prediction, inference, and understanding the relationship between variables.
--------------------------------------------------------------------------------
/Concepts/ML Models/Regression/MLPRegressor.md:
--------------------------------------------------------------------------------
1 | The `MLP Regressor` is a Multi-layer Perceptron (MLP) algorithm that can be used for regression tasks. MLPs are a class of feedforward artificial neural networks that consist of at least three layers of nodes: an input layer, a hidden layer, and an output layer. `MLPRegressor` utilizes a backpropagation algorithm for training the network, suitable for complex non-linear modeling that can be used in predicting continuous outcomes.
2 |
3 | ## Key Features
4 |
5 | - **Flexibility in Network Architecture:** Users can define multiple hidden layers and the number of neurons in each layer to customize the network architecture.
6 |
7 | - **Activation Functions:** Supports several activation functions for the hidden layers, such as `relu` (rectified linear unit), `tanh` (hyperbolic tangent), and `logistic` (sigmoid), allowing for non-linear transformations.
8 |
9 | - **Solver for Weight Optimization:** Offers different options for the solver used in weight optimization, including `sgd` (Stochastic Gradient Descent), `adam` (an algorithm based on adaptive estimation of first-order and second-order moments), and `lbfgs` (an optimizer in the family of quasi-Newton methods).
10 |
11 | - **Regularization:** Includes L2 regularization (ridge regression) to help prevent overfitting by penalizing large weights.
12 |
13 | - **Support for Multi-output Regression:** Capable of fitting a separate model for each target. This is useful for scenarios where multiple continuous outcomes are predicted from the same set of inputs.
14 |
15 | ## Parameters
16 |
17 | Some of the key parameters for `MLPRegressor` include:
18 |
19 | - `hidden_layer_sizes`: Tuple specifying the size of the hidden layers.
20 | - `activation`: The activation function for the hidden layers.
21 | - `solver`: The solver for weight optimization.
22 | - `alpha`: L2 penalty (regularization term) parameter.
23 | - `batch_size`: Size of minibatches for stochastic optimizers.
24 | - `learning_rate`: Learning rate schedule for weight updates.
25 | - `max_iter`: Maximum number of iterations.
26 |
27 |
28 | ## Advantages
29 |
30 | - Capable of modeling complex non-linear relationships.
31 | - Flexible in terms of network configuration.
32 | - Suitable for a wide range of regression tasks.
33 |
34 | ## Disadvantages
35 |
36 | - Requires careful tuning of parameters and network architecture.
37 | - Prone to overfitting without proper regularization.
38 | - Can be computationally intensive, especially with large datasets and complex models.
--------------------------------------------------------------------------------
/Concepts/ML Models/Regression/Ridge Regression.md:
--------------------------------------------------------------------------------
1 | Ridge Regression is a variant of linear regression that addresses multicollinearity and overfitting by adding a penalty term to the standard least squares objective function. It is particularly useful when the feature space is high-dimensional or when there are correlated features.
2 |
3 | #### How It Works:
4 |
5 | 1. **Objective Function:**
6 | - Ridge Regression modifies the standard linear regression objective function by adding a regularization term, also known as the Ridge penalty or L2 penalty.
7 | - The objective function for Ridge Regression is given by:
8 | $$\text{minimize } J(\theta) = \sum_{i=1}^{n} (y_i - \hat{y}_i)^2 + \alpha \sum_{j=1}^{p} \theta_j^2$$
9 | where:
10 | - $J(\theta)$ is the cost function to be minimized.
11 | - $\theta_j$ are the regression coefficients.
12 | - $\hat{y}_i$ are the predicted values.
13 | - $\alpha$ is the regularization parameter (also known as the Ridge parameter).
14 | - The second term is the regularization term, penalizing large values of the coefficients $\theta_j$.
15 |
16 | 2. **Bias-Variance Tradeoff:**
17 | - The Ridge penalty shrinks the coefficients towards zero, reducing the variance of the model at the cost of introducing some bias.
18 | - Increasing the value of $\alpha$ increases the regularization strength, leading to greater shrinkage of the coefficients and a simpler model.
19 | - Choosing an appropriate value of $\alpha$ is important to balance the tradeoff between bias and variance.
20 |
21 | 3. **Feature Scaling:**
22 | - It is important to scale the features before applying Ridge Regression to ensure that all features contribute equally to the penalty term.
23 | - Standardizing or normalizing the features to have mean zero and unit variance is a common practice.
24 |
25 | #### Key Points:
26 |
27 | - Ridge Regression is a linear regression technique that adds a penalty term to the least squares objective function to prevent overfitting.
28 | - It is effective for handling multicollinearity and reducing the variance of the model.
29 | - The regularization parameter $\alpha$ controls the strength of the penalty, with larger values leading to greater shrinkage of coefficients.
30 | - Ridge Regression is suitable for high-dimensional datasets and situations where feature correlation is present.
--------------------------------------------------------------------------------
/Concepts/ML Models/Regression/SVR.md:
--------------------------------------------------------------------------------
1 | Support Vector Regression (SVR) is a variant of Support Vector Machines (SVM) that is used for regression tasks. Like SVM for classification, SVR aims to find the hyperplane that best fits the data while maximizing the margin. However, instead of finding the hyperplane that best separates classes, SVR finds the hyperplane that best fits the data within a specified margin of tolerance.
2 |
3 | #### How It Works:
4 |
5 | 1. **Hyperplane:**
6 | - In SVR, the goal is to find the hyperplane that best fits the data while minimizing the margin of tolerance (or error) around the hyperplane.
7 | - The hyperplane is defined by the equation: $$y = \sum_{i=1}^{n} w_i \cdot x_i + b$$ where $y$ is the predicted output, $x_i$ are the input features, $w_i$ are the coefficients, and $b$ is the intercept.
8 |
9 | 2. **Margin of Tolerance:**
10 | - SVR introduces a margin of tolerance around the hyperplane, allowing some data points to lie within this margin without penalty.
11 | - The width of the margin is controlled by a hyperparameter $\epsilon$, which determines the acceptable deviation of the predicted value from the actual value.
12 |
13 | 3. **Loss Function:**
14 | - SVR minimizes a loss function that penalizes deviations from the hyperplane within the margin of tolerance.
15 | - The loss function typically includes a regularization term to control the complexity of the model and prevent overfitting.
16 |
17 | 4. **Kernel Trick:**
18 | - Similar to SVM, SVR can make use of the kernel trick to map the input features into a higher-dimensional space, where non-linear relationships can be captured.
19 | - Common kernel functions used in SVR include linear, polynomial, radial basis function (RBF), and sigmoid kernels.
20 |
21 | #### Key Points:
22 |
23 | - SVR is a regression technique based on the principles of Support Vector Machines (SVM).
24 | - It finds the hyperplane that best fits the data within a specified margin of tolerance.
25 | - SVR is effective for non-linear regression tasks, especially when combined with kernel functions.
26 | - The hyperparameters of SVR, including the margin of tolerance ($\epsilon$) and the regularization parameter, need to be tuned carefully to achieve optimal performance.
27 | - SVR is particularly useful for regression tasks with complex relationships and outliers.
--------------------------------------------------------------------------------
/Concepts/Other/Gini Index.md:
--------------------------------------------------------------------------------
1 | The Gini index, or Gini impurity, is a measure of the disorder or impurity in a set of elements. It is used in decision trees, specifically in the CART (Classification and Regression Trees) algorithm, to evaluate splits in the dataset. The Gini index quantifies how often a randomly chosen element from the set would be incorrectly labeled if it was randomly labeled according to the distribution of labels in the subset.
2 |
3 | The Gini index ranges from 0 to 0.5 (or 0 to 1, depending on the formula), where 0 signifies perfect purity (all elements are of the same class), and higher values indicate higher impurity. The formula for the Gini index is:
4 |
5 | $$ \text{Gini}(D) = 1 - \sum_{i=1}^{m} p_i^2 $$
6 |
7 | where:
8 | - $D$ is the dataset for a node,
9 | - $m$ is the number of classes,
10 | - $p_i$ is the ratio of class $i$ instances among the training instances in the dataset $D$.
11 |
12 | ### Numerical Example:
13 |
14 | Let's say we have a dataset $D$ of 10 instances with two classes: Yes (Y) and No (N). Suppose 6 instances belong to class Yes and 4 to class No.
15 |
16 | First, we calculate the proportions of each class in the dataset:
17 | - Proportion of Yes, $p_{\text{Y}} = \frac{6}{10} = 0.6$
18 | - Proportion of No, $p_{\text{N}} = \frac{4}{10} = 0.4$
19 |
20 | Next, we apply the formula for the Gini index:
21 | $$ \text{Gini}(D) = 1 - (p_{\text{Y}}^2 + p_{\text{N}}^2)$$
22 |
23 | $$\text{Gini}(D) = 1 - (0.6^2 + 0.4^2) $$
24 |
25 | $$\text{Gini}(D) = 1 - (0.36 + 0.16) $$
26 |
27 | $$\text{Gini}(D) = 1 - 0.52 $$
28 |
29 | $$\text{Gini}(D) = 0.48 $$
30 |
31 | Therefore, the Gini index for this dataset is 0.48, indicating a moderate level of impurity. In the context of decision trees, a split that results in the lowest Gini index for the resulting partitions is preferred, as it indicates the most significant increase in purity.
--------------------------------------------------------------------------------
/Concepts/Other/Randomized Search.md:
--------------------------------------------------------------------------------
1 | `RandomizedSearchCV` from `sklearn` is a method used for hyperparameter optimization, a step in building and fine-tuning machine learning models. Unlike grid search that exhaustively tries all combinations of hyperparameters, `RandomizedSearchCV` samples a fixed number of parameter settings from specified distributions. This approach can lead to better results, especially when dealing with a high-dimensional space, and is usually more efficient, saving valuable time and computational resources.
2 |
3 | ### Methodology
4 |
5 | The core idea behind `RandomizedSearchCV` is quite straightforward:
6 |
7 | 1. **Define the Parameter Space:** Specify a range or distribution for each hyperparameter from which values can be randomly sampled. For instance, if you're tuning a decision tree, you might define a range for the maximum depth of the tree.
8 |
9 | 2. **Sampling:** Randomly select a fixed number of parameter combinations from the defined space.
10 |
11 | 3. **Evaluation:** For each combination of parameters, evaluate the model's performance using cross-validation. Cross-validation involves dividing the dataset into a certain number of "folds" or parts, training the model on some of these folds, and validating it on the remaining folds. This process is repeated until each fold has been used for validation once, ensuring that the model's performance is robust and not dependent on a particular train-test split.
12 |
13 | 4. **Select the Best:** Choose the parameter setting that results in the best performance according to a predefined metric, like accuracy for classification tasks.
14 |
15 | ### Numerical Example
16 |
17 | Let's consider a simple example with a hypothetical machine learning model where we aim to tune two hyperparameters: `learning_rate` and `max_depth`. For simplicity, we'll say `learning_rate` can range from 0.01 to 0.1, and `max_depth` can be 1, 2, or 3.
18 |
19 | 1. **Define the Parameter Space:**
20 | - `learning_rate`: Uniform distribution between [0.01, 0.1]
21 | - `max_depth`: Discrete values in the set {1, 2, 3}
22 |
23 | 2. **Sampling:**
24 | Suppose we decide to sample 3 combinations randomly:
25 | - Combination 1: `learning_rate` = 0.02, `max_depth` = 2
26 | - Combination 2: `learning_rate` = 0.05, `max_depth` = 3
27 | - Combination 3: `learning_rate` = 0.08, `max_depth` = 1
28 |
29 | 3. **Evaluation:**
30 | We evaluate each combination using 3-fold cross-validation and suppose we get the following accuracies:
31 | - Combination 1: 70%
32 | - Combination 2: 75%
33 | - Combination 3: 65%
34 |
35 | 4. **Select the Best:**
36 | The best performance comes from Combination 2 (`learning_rate` = 0.05, `max_depth` = 3) with an accuracy of 75%.
--------------------------------------------------------------------------------
/Concepts/Preprocessing/MinMaxScaler.md:
--------------------------------------------------------------------------------
1 | Suppose we have a dataset with a single feature (e.g., temperatures recorded over a week) as follows:
2 |
3 | | Day | Temperature (°C) |
4 | |-----------|------------------|
5 | | Monday | 10 |
6 | | Tuesday | 15 |
7 | | Wednesday | 20 |
8 | | Thursday | 30 |
9 | | Friday | 25 |
10 |
11 | ## Goal
12 |
13 | Our goal is to scale the `Temperature` feature to a range between 0 and 1.
14 |
15 | ## Step 1: Determine the Minimum and Maximum Values
16 |
17 | First, we find the minimum and maximum values of the `Temperature` feature.
18 |
19 | - **Minimum (Min)**: 10°C
20 | - **Maximum (Max)**: 30°C
21 |
22 | ## Step 2: Apply the MinMaxScaler Formula
23 |
24 | The MinMaxScaler rescales each feature to a given range (in this case, between 0 and 1) using the following formula:
25 |
26 | \[ \text{Scaled value} = \frac{\text{value} - \text{Min}}{\text{Max} - \text{Min}} \]
27 |
28 | ## Step 3: Calculate the Scaled Values
29 |
30 | Let's apply the formula to each temperature value:
31 |
32 | - **Monday**: \( \frac{10 - 10}{30 - 10} = 0 \)
33 | - **Tuesday**: \( \frac{15 - 10}{30 - 10} = 0.25 \)
34 | - **Wednesday**: \( \frac{20 - 10}{30 - 10} = 0.5 \)
35 | - **Thursday**: \( \frac{30 - 10}{30 - 10} = 1 \)
36 | - **Friday**: \( \frac{25 - 10}{30 - 10} = 0.75 \)
37 |
38 | ## Scaled Dataset
39 |
40 | After applying the `MinMaxScaler`, our dataset becomes:
41 |
42 | | Day | Scaled Temperature |
43 | |-----------|--------------------|
44 | | Monday | 0 |
45 | | Tuesday | 0.25 |
46 | | Wednesday | 0.5 |
47 | | Thursday | 1 |
48 | | Friday | 0.75 |
--------------------------------------------------------------------------------
/Concepts/Preprocessing/OneHotEncoder.md:
--------------------------------------------------------------------------------
1 | Suppose we have a dataset with a single categorical feature `Color`, which can take on three values: 'Red', 'Green', and 'Blue'. Here's a hypothetical dataset:
2 |
3 | | Example | Color |
4 | |---------|-------|
5 | | 1 | Red |
6 | | 2 | Green |
7 | | 3 | Blue |
8 | | 4 | Red |
9 |
10 | OneHotEncoder works by creating a binary column for each category and encoding the presence or absence of that category for each example. Here's how it works:
11 |
12 | 1. **Identify unique categories**: Identify all unique categories in the `Color` feature: 'Red', 'Green', and 'Blue'.
13 |
14 | 2. **Create binary columns**: Create a binary column for each unique category.
15 |
16 | 3. **Encode the presence or absence of each category**: For each example, set the corresponding binary column to 1 if the example belongs to that category, and 0 otherwise.
17 |
18 | Let's encode the `Color` feature using OneHotEncoder:
19 |
20 | | Example | Red | Green | Blue |
21 | |---------|-----|-------|------|
22 | | 1 | 1 | 0 | 0 |
23 | | 2 | 0 | 1 | 0 |
24 | | 3 | 0 | 0 | 1 |
25 | | 4 | 1 | 0 | 0 |
26 |
27 | As you can see, each unique category in the original `Color` feature has been transformed into a binary feature. For example, in the first row, the `Red` column is set to 1 to indicate that the color of the first example is 'Red', while the `Green` and `Blue` columns are set to 0 since those colors are not present in that example.
28 |
29 | OneHotEncoder is commonly used in machine learning models, especially when dealing with categorical data, as it allows algorithms to work with categorical variables directly without the need for manual label encoding, which could introduce unintended ordinal relationships between categories.
--------------------------------------------------------------------------------
/Concepts/Preprocessing/OrdinalEncoder.md:
--------------------------------------------------------------------------------
1 | Consider a dataset that includes the educational qualification of individuals as follows:
2 |
3 | | Person | Education Level |
4 | |--------|-------------------|
5 | | A | High School |
6 | | B | Bachelor's Degree |
7 | | C | Master's Degree |
8 | | D | Ph.D. |
9 | | E | Associate Degree |
10 |
11 | ## Goal
12 |
13 | Our goal is to encode the `Education Level` feature using `OrdinalEncoder` to represent the educational qualifications in a ranked integer format.
14 |
15 | ## Step 1: Define the Order
16 |
17 | First, we need to define the order or rank of the educational qualifications. For this example, we'll use the following order:
18 |
19 | 1. High School
20 | 2. Associate Degree
21 | 3. Bachelor's Degree
22 | 4. Master's Degree
23 | 5. Ph.D.
24 |
25 | ## Step 2: Apply the OrdinalEncoder
26 |
27 | Now, let's apply the `OrdinalEncoder` to transform the `Education Level` into integers based on the defined order:
28 |
29 | - **High School**: Encoded as 0
30 | - **Associate Degree**: Encoded as 1
31 | - **Bachelor's Degree**: Encoded as 2
32 | - **Master's Degree**: Encoded as 3
33 | - **Ph.D.**: Encoded as 4
34 |
35 | ## Encoded Dataset
36 |
37 | After applying the `OrdinalEncoder`, our dataset becomes:
38 |
39 | | Person | Education Level | Encoded Education Level |
40 | |--------|-----------------|-------------------------|
41 | | A | High School | 0 |
42 | | B | Bachelor's Degree | 2 |
43 | | C | Master's Degree | 3 |
44 | | D | Ph.D. | 4 |
45 | | E | Associate Degree| 1 |
--------------------------------------------------------------------------------
/Concepts/Preprocessing/QuantileTransformer.md:
--------------------------------------------------------------------------------
1 | Imagine we have a dataset with a single feature (e.g., scores from a test) as follows:
2 |
3 | | Student | Score |
4 | |---------|-------|
5 | | A | 20 |
6 | | B | 40 |
7 | | C | 60 |
8 | | D | 80 |
9 | | E | 100 |
10 |
11 | ## Goal
12 |
13 | Our objective is to transform the `Score` feature so that it follows a uniform distribution across its range.
14 |
15 | ## Step 1: Rank the Values
16 |
17 | First, we rank the scores from the smallest to the largest. In our simple case, the scores are already sorted:
18 |
19 | - 20, 40, 60, 80, 100
20 |
21 | These ranks help determine the quantiles of the data.
22 |
23 | ## Step 2: Apply the QuantileTransformer
24 |
25 | The `QuantileTransformer` will transform the scores so that they are evenly spread out across the range. In a uniform distribution, each value is equally likely, and the cumulative distribution function (CDF) is a straight line.
26 |
27 | Let's say we want to map these scores to a uniform distribution between 0 and 1. The transformation process involves:
28 |
29 | 1. Calculating the quantiles of the original data.
30 | 2. Mapping these quantiles to the corresponding quantiles of the uniform distribution.
31 |
32 | ## Step 3: Transforming Scores
33 |
34 | Given our dataset has 5 scores, we can assign quantiles as follows (assuming a uniform distribution for simplicity):
35 |
36 | - **A (20)**: Mapped to the 0.1 quantile of the uniform distribution.
37 | - **B (40)**: Mapped to the 0.3 quantile.
38 | - **C (60)**: Mapped to the 0.5 quantile.
39 | - **D (80)**: Mapped to the 0.7 quantile.
40 | - **E (100)**: Mapped to the 0.9 quantile.
41 |
42 | This mapping ensures that the transformed data follows a uniform distribution.
43 |
44 | ## Transformed Dataset
45 |
46 | After applying the `QuantileTransformer`, the dataset might look like this, assuming a straightforward linear interpolation for illustration:
47 |
48 | | Student | Transformed Score |
49 | |---------|-------------------|
50 | | A | 0.1 |
51 | | B | 0.3 |
52 | | C | 0.5 |
53 | | D | 0.7 |
54 | | E | 0.9 |
55 |
--------------------------------------------------------------------------------
/Concepts/Preprocessing/SimpleImputer.md:
--------------------------------------------------------------------------------
1 | Suppose we have a dataset with a few missing values:
2 |
3 | | Example | Feature 1 | Feature 2 | Feature 3 |
4 | |---------|-----------|-----------|-----------|
5 | | 1 | 10 | 20 | NaN |
6 | | 2 | NaN | 25 | 30 |
7 | | 3 | 8 | NaN | 35 |
8 | | 4 | 12 | 18 | NaN |
9 |
10 | Here, `NaN` represents missing values.
11 |
12 | SimpleImputer works by replacing missing values with a specified strategy. The strategy could be mean, median, most frequent value, or a constant value. Let's use the mean strategy for this example.
13 |
14 | 1. **Calculate the mean**: Calculate the mean for each feature with missing values.
15 |
16 | Mean of `Feature 1` = (10 + 8 + 12) / 3 = 10
17 |
18 | Mean of `Feature 2` = (20 + 25 + 18) / 3 = 21
19 |
20 | Mean of `Feature 3` = (30 + 35) / 2 = 32.5
21 |
22 | 2. **Replace missing values**: Replace the missing values in each feature with its corresponding mean.
23 |
24 | Missing value in `Feature 1` replaced with 10
25 |
26 | Missing value in `Feature 2` replaced with 21
27 |
28 | Missing value in `Feature 3` replaced with 32.5
29 |
30 | After applying SimpleImputer with the mean strategy, the dataset becomes:
31 |
32 | | Example | Feature 1 | Feature 2 | Feature 3 |
33 | |---------|-----------|-----------|-----------|
34 | | 1 | 10 | 20 | 32.5 |
35 | | 2 | 10 | 25 | 30 |
36 | | 3 | 8 | 21 | 35 |
37 | | 4 | 12 | 18 | 32.5 |
38 |
39 | As you can see, the missing values have been replaced with the mean value of each respective feature. This ensures that the dataset is complete and ready for further analysis or modeling.
--------------------------------------------------------------------------------
/Concepts/Preprocessing/StandardScaler.md:
--------------------------------------------------------------------------------
1 | Suppose we have a dataset with two features: `Feature 1` and `Feature 2`. Here's a hypothetical dataset:
2 |
3 | | Example | Feature 1 | Feature 2 |
4 | |---------|-----------|-----------|
5 | | 1 | 10 | 20 |
6 | | 2 | 15 | 25 |
7 | | 3 | 8 | 30 |
8 | | 4 | 12 | 18 |
9 |
10 | StandardScaler works by transforming each feature such that it has a mean of 0 and a standard deviation of 1. Here's how it works:
11 |
12 | 1. **Calculate the mean**: For each feature, calculate the mean value.
13 |
14 | Mean of `Feature 1` = (10 + 15 + 8 + 12) / 4 = 11.25
15 |
16 | Mean of `Feature 2` = (20 + 25 + 30 + 18) / 4 = 23.25
17 |
18 | 2. **Calculate the standard deviation**: For each feature, calculate the standard deviation.
19 |
20 | Standard deviation of `Feature 1` = sqrt(((10-11.25)^2 + (15-11.25)^2 + (8-11.25)^2 + (12-11.25)^2) / 4)
21 |
22 | Standard deviation of `Feature 2` = sqrt(((20-23.25)^2 + (25-23.25)^2 + (30-23.25)^2 + (18-23.25)^2) / 4)
23 |
24 | 3. **Scale the features**: Subtract the mean from each feature value and divide by the standard deviation.
25 |
26 | Standardized value of `Feature 1` = (10 - 11.25) / standard deviation of `Feature 1`
27 |
28 | Standardized value of `Feature 2` = (20 - 23.25) / standard deviation of `Feature 2`
29 |
30 | Let's say the standard deviations are calculated to be 2.5 for `Feature 1` and 4.0 for `Feature 2`. Then the standardized values would be:
31 |
32 | | Example | Standardized Feature 1 | Standardized Feature 2 |
33 | |---------|------------------------|------------------------|
34 | | 1 | (10 - 11.25) / 2.5 = -0.50 | (20 - 23.25) / 4.0 = -0.81 |
35 | | 2 | (15 - 11.25) / 2.5 = 1.50 | (25 - 23.25) / 4.0 = 0.44 |
36 | | 3 | (8 - 11.25) / 2.5 = -1.30 | (30 - 23.25) / 4.0 = 1.69 |
37 | | 4 | (12 - 11.25) / 2.5 = 0.30 | (18 - 23.25) / 4.0 = -1.31 |
38 |
39 | As you can see, after standard scaling, each feature has a mean of 0 and a standard deviation of 1. This scaling process ensures that each feature contributes equally to the analysis, preventing features with larger scales from dominating the others.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Machine Learning with Python
2 |
3 | Welcome to my repository designed to introduce and teach Machine Learning (ML) using Python, focusing on both regression and classification models. This resource is crafted to be a comprehensive guide for learners at all levels who are interested in understanding and applying fundamental ML techniques in their projects.
4 |
5 | ## Topics Covered
6 |
7 | ### 📈 Regression Models
8 | - **Introduction to Regression:** Understand the concept, purpose, and types of regression in ML.
9 | - **Regression Techniques:** Dive into various regression models including Linear Regression, Ridge, Lasso, ElasticNet, Support Vector Regression (SVR), Decision Tree Regressor, Random Forest Regressor, and Gradient Boosting Regressor.
10 |
11 | ### 📊 Classification Models
12 | - **Introduction to Classification:** Learn about the basics of classification, its significance, and its applications in ML.
13 | - **Classification Techniques:** Explore different classification algorithms such as Logistic Regression, K-Nearest Neighbors (KNN), Support Vector Machines (SVM), Decision Trees, Random Forest, and Gradient Boosting Machines (GBM).
14 |
15 | ### Common Topics for Both
16 | - **Data Preprocessing:** Techniques for loading, understanding, and preparing data.
17 | - **Modeling Approach:** Insights into the modeling process, from selecting models to tuning parameters.
18 | - **Evaluation Metrics:** Discussion on various metrics like MAE, MSE, RMSE, R-Squared for regression, and accuracy, precision, recall, F1 score for classification.
19 | - **Model Optimization:** An overview of optimization techniques including Grid Search and Random Search for model selection.
20 |
21 | ## Activities Included
22 |
23 | - **Practical Exercises:** Step-by-step guides for data loading, preprocessing, model training, prediction, and evaluation.
24 | - **Theoretical Foundations:** Provides a deeper understanding of each ML concept through concise theoretical insights.
25 | - **Data Visualization:** Demonstrates techniques for visualizing data, model predictions, and performance metrics effectively.
26 |
27 | ## Upcoming Contents
28 |
29 | - **Unsupervised Learning Models:** We plan to include topics on clustering and dimensionality reduction techniques.
30 | - **Advanced ML Topics:** Future updates will delve into neural networks, deep learning, and reinforcement learning to cater to advanced learners.
31 |
32 | Stay tuned for more, and happy learning!
--------------------------------------------------------------------------------
/data/housing.csv:
--------------------------------------------------------------------------------
1 | 0.00632 18.00 2.310 0 0.5380 6.5750 65.20 4.0900 1 296.0 15.30 396.90 4.98 24.00
2 | 0.02731 0.00 7.070 0 0.4690 6.4210 78.90 4.9671 2 242.0 17.80 396.90 9.14 21.60
3 | 0.02729 0.00 7.070 0 0.4690 7.1850 61.10 4.9671 2 242.0 17.80 392.83 4.03 34.70
4 | 0.03237 0.00 2.180 0 0.4580 6.9980 45.80 6.0622 3 222.0 18.70 394.63 2.94 33.40
5 | 0.06905 0.00 2.180 0 0.4580 7.1470 54.20 6.0622 3 222.0 18.70 396.90 5.33 36.20
6 | 0.02985 0.00 2.180 0 0.4580 6.4300 58.70 6.0622 3 222.0 18.70 394.12 5.21 28.70
7 | 0.08829 12.50 7.870 0 0.5240 6.0120 66.60 5.5605 5 311.0 15.20 395.60 12.43 22.90
8 | 0.14455 12.50 7.870 0 0.5240 6.1720 96.10 5.9505 5 311.0 15.20 396.90 19.15 27.10
9 | 0.21124 12.50 7.870 0 0.5240 5.6310 100.00 6.0821 5 311.0 15.20 386.63 29.93 16.50
10 | 0.17004 12.50 7.870 0 0.5240 6.0040 85.90 6.5921 5 311.0 15.20 386.71 17.10 18.90
11 | 0.22489 12.50 7.870 0 0.5240 6.3770 94.30 6.3467 5 311.0 15.20 392.52 20.45 15.00
12 | 0.11747 12.50 7.870 0 0.5240 6.0090 82.90 6.2267 5 311.0 15.20 396.90 13.27 18.90
13 | 0.09378 12.50 7.870 0 0.5240 5.8890 39.00 5.4509 5 311.0 15.20 390.50 15.71 21.70
14 | 0.62976 0.00 8.140 0 0.5380 5.9490 61.80 4.7075 4 307.0 21.00 396.90 8.26 20.40
15 | 0.63796 0.00 8.140 0 0.5380 6.0960 84.50 4.4619 4 307.0 21.00 380.02 10.26 18.20
16 | 0.62739 0.00 8.140 0 0.5380 5.8340 56.50 4.4986 4 307.0 21.00 395.62 8.47 19.90
17 | 1.05393 0.00 8.140 0 0.5380 5.9350 29.30 4.4986 4 307.0 21.00 386.85 6.58 23.10
18 | 0.78420 0.00 8.140 0 0.5380 5.9900 81.70 4.2579 4 307.0 21.00 386.75 14.67 17.50
19 | 0.80271 0.00 8.140 0 0.5380 5.4560 36.60 3.7965 4 307.0 21.00 288.99 11.69 20.20
20 | 0.72580 0.00 8.140 0 0.5380 5.7270 69.50 3.7965 4 307.0 21.00 390.95 11.28 18.20
21 | 1.25179 0.00 8.140 0 0.5380 5.5700 98.10 3.7979 4 307.0 21.00 376.57 21.02 13.60
22 | 0.85204 0.00 8.140 0 0.5380 5.9650 89.20 4.0123 4 307.0 21.00 392.53 13.83 19.60
23 | 1.23247 0.00 8.140 0 0.5380 6.1420 91.70 3.9769 4 307.0 21.00 396.90 18.72 15.20
24 | 0.98843 0.00 8.140 0 0.5380 5.8130 100.00 4.0952 4 307.0 21.00 394.54 19.88 14.50
25 | 0.75026 0.00 8.140 0 0.5380 5.9240 94.10 4.3996 4 307.0 21.00 394.33 16.30 15.60
26 | 0.84054 0.00 8.140 0 0.5380 5.5990 85.70 4.4546 4 307.0 21.00 303.42 16.51 13.90
27 | 0.67191 0.00 8.140 0 0.5380 5.8130 90.30 4.6820 4 307.0 21.00 376.88 14.81 16.60
28 | 0.95577 0.00 8.140 0 0.5380 6.0470 88.80 4.4534 4 307.0 21.00 306.38 17.28 14.80
29 | 0.77299 0.00 8.140 0 0.5380 6.4950 94.40 4.4547 4 307.0 21.00 387.94 12.80 18.40
30 | 1.00245 0.00 8.140 0 0.5380 6.6740 87.30 4.2390 4 307.0 21.00 380.23 11.98 21.00
31 | 1.13081 0.00 8.140 0 0.5380 5.7130 94.10 4.2330 4 307.0 21.00 360.17 22.60 12.70
32 | 1.35472 0.00 8.140 0 0.5380 6.0720 100.00 4.1750 4 307.0 21.00 376.73 13.04 14.50
33 | 1.38799 0.00 8.140 0 0.5380 5.9500 82.00 3.9900 4 307.0 21.00 232.60 27.71 13.20
34 | 1.15172 0.00 8.140 0 0.5380 5.7010 95.00 3.7872 4 307.0 21.00 358.77 18.35 13.10
35 | 1.61282 0.00 8.140 0 0.5380 6.0960 96.90 3.7598 4 307.0 21.00 248.31 20.34 13.50
36 | 0.06417 0.00 5.960 0 0.4990 5.9330 68.20 3.3603 5 279.0 19.20 396.90 9.68 18.90
37 | 0.09744 0.00 5.960 0 0.4990 5.8410 61.40 3.3779 5 279.0 19.20 377.56 11.41 20.00
38 | 0.08014 0.00 5.960 0 0.4990 5.8500 41.50 3.9342 5 279.0 19.20 396.90 8.77 21.00
39 | 0.17505 0.00 5.960 0 0.4990 5.9660 30.20 3.8473 5 279.0 19.20 393.43 10.13 24.70
40 | 0.02763 75.00 2.950 0 0.4280 6.5950 21.80 5.4011 3 252.0 18.30 395.63 4.32 30.80
41 | 0.03359 75.00 2.950 0 0.4280 7.0240 15.80 5.4011 3 252.0 18.30 395.62 1.98 34.90
42 | 0.12744 0.00 6.910 0 0.4480 6.7700 2.90 5.7209 3 233.0 17.90 385.41 4.84 26.60
43 | 0.14150 0.00 6.910 0 0.4480 6.1690 6.60 5.7209 3 233.0 17.90 383.37 5.81 25.30
44 | 0.15936 0.00 6.910 0 0.4480 6.2110 6.50 5.7209 3 233.0 17.90 394.46 7.44 24.70
45 | 0.12269 0.00 6.910 0 0.4480 6.0690 40.00 5.7209 3 233.0 17.90 389.39 9.55 21.20
46 | 0.17142 0.00 6.910 0 0.4480 5.6820 33.80 5.1004 3 233.0 17.90 396.90 10.21 19.30
47 | 0.18836 0.00 6.910 0 0.4480 5.7860 33.30 5.1004 3 233.0 17.90 396.90 14.15 20.00
48 | 0.22927 0.00 6.910 0 0.4480 6.0300 85.50 5.6894 3 233.0 17.90 392.74 18.80 16.60
49 | 0.25387 0.00 6.910 0 0.4480 5.3990 95.30 5.8700 3 233.0 17.90 396.90 30.81 14.40
50 | 0.21977 0.00 6.910 0 0.4480 5.6020 62.00 6.0877 3 233.0 17.90 396.90 16.20 19.40
51 | 0.08873 21.00 5.640 0 0.4390 5.9630 45.70 6.8147 4 243.0 16.80 395.56 13.45 19.70
52 | 0.04337 21.00 5.640 0 0.4390 6.1150 63.00 6.8147 4 243.0 16.80 393.97 9.43 20.50
53 | 0.05360 21.00 5.640 0 0.4390 6.5110 21.10 6.8147 4 243.0 16.80 396.90 5.28 25.00
54 | 0.04981 21.00 5.640 0 0.4390 5.9980 21.40 6.8147 4 243.0 16.80 396.90 8.43 23.40
55 | 0.01360 75.00 4.000 0 0.4100 5.8880 47.60 7.3197 3 469.0 21.10 396.90 14.80 18.90
56 | 0.01311 90.00 1.220 0 0.4030 7.2490 21.90 8.6966 5 226.0 17.90 395.93 4.81 35.40
57 | 0.02055 85.00 0.740 0 0.4100 6.3830 35.70 9.1876 2 313.0 17.30 396.90 5.77 24.70
58 | 0.01432 100.00 1.320 0 0.4110 6.8160 40.50 8.3248 5 256.0 15.10 392.90 3.95 31.60
59 | 0.15445 25.00 5.130 0 0.4530 6.1450 29.20 7.8148 8 284.0 19.70 390.68 6.86 23.30
60 | 0.10328 25.00 5.130 0 0.4530 5.9270 47.20 6.9320 8 284.0 19.70 396.90 9.22 19.60
61 | 0.14932 25.00 5.130 0 0.4530 5.7410 66.20 7.2254 8 284.0 19.70 395.11 13.15 18.70
62 | 0.17171 25.00 5.130 0 0.4530 5.9660 93.40 6.8185 8 284.0 19.70 378.08 14.44 16.00
63 | 0.11027 25.00 5.130 0 0.4530 6.4560 67.80 7.2255 8 284.0 19.70 396.90 6.73 22.20
64 | 0.12650 25.00 5.130 0 0.4530 6.7620 43.40 7.9809 8 284.0 19.70 395.58 9.50 25.00
65 | 0.01951 17.50 1.380 0 0.4161 7.1040 59.50 9.2229 3 216.0 18.60 393.24 8.05 33.00
66 | 0.03584 80.00 3.370 0 0.3980 6.2900 17.80 6.6115 4 337.0 16.10 396.90 4.67 23.50
67 | 0.04379 80.00 3.370 0 0.3980 5.7870 31.10 6.6115 4 337.0 16.10 396.90 10.24 19.40
68 | 0.05789 12.50 6.070 0 0.4090 5.8780 21.40 6.4980 4 345.0 18.90 396.21 8.10 22.00
69 | 0.13554 12.50 6.070 0 0.4090 5.5940 36.80 6.4980 4 345.0 18.90 396.90 13.09 17.40
70 | 0.12816 12.50 6.070 0 0.4090 5.8850 33.00 6.4980 4 345.0 18.90 396.90 8.79 20.90
71 | 0.08826 0.00 10.810 0 0.4130 6.4170 6.60 5.2873 4 305.0 19.20 383.73 6.72 24.20
72 | 0.15876 0.00 10.810 0 0.4130 5.9610 17.50 5.2873 4 305.0 19.20 376.94 9.88 21.70
73 | 0.09164 0.00 10.810 0 0.4130 6.0650 7.80 5.2873 4 305.0 19.20 390.91 5.52 22.80
74 | 0.19539 0.00 10.810 0 0.4130 6.2450 6.20 5.2873 4 305.0 19.20 377.17 7.54 23.40
75 | 0.07896 0.00 12.830 0 0.4370 6.2730 6.00 4.2515 5 398.0 18.70 394.92 6.78 24.10
76 | 0.09512 0.00 12.830 0 0.4370 6.2860 45.00 4.5026 5 398.0 18.70 383.23 8.94 21.40
77 | 0.10153 0.00 12.830 0 0.4370 6.2790 74.50 4.0522 5 398.0 18.70 373.66 11.97 20.00
78 | 0.08707 0.00 12.830 0 0.4370 6.1400 45.80 4.0905 5 398.0 18.70 386.96 10.27 20.80
79 | 0.05646 0.00 12.830 0 0.4370 6.2320 53.70 5.0141 5 398.0 18.70 386.40 12.34 21.20
80 | 0.08387 0.00 12.830 0 0.4370 5.8740 36.60 4.5026 5 398.0 18.70 396.06 9.10 20.30
81 | 0.04113 25.00 4.860 0 0.4260 6.7270 33.50 5.4007 4 281.0 19.00 396.90 5.29 28.00
82 | 0.04462 25.00 4.860 0 0.4260 6.6190 70.40 5.4007 4 281.0 19.00 395.63 7.22 23.90
83 | 0.03659 25.00 4.860 0 0.4260 6.3020 32.20 5.4007 4 281.0 19.00 396.90 6.72 24.80
84 | 0.03551 25.00 4.860 0 0.4260 6.1670 46.70 5.4007 4 281.0 19.00 390.64 7.51 22.90
85 | 0.05059 0.00 4.490 0 0.4490 6.3890 48.00 4.7794 3 247.0 18.50 396.90 9.62 23.90
86 | 0.05735 0.00 4.490 0 0.4490 6.6300 56.10 4.4377 3 247.0 18.50 392.30 6.53 26.60
87 | 0.05188 0.00 4.490 0 0.4490 6.0150 45.10 4.4272 3 247.0 18.50 395.99 12.86 22.50
88 | 0.07151 0.00 4.490 0 0.4490 6.1210 56.80 3.7476 3 247.0 18.50 395.15 8.44 22.20
89 | 0.05660 0.00 3.410 0 0.4890 7.0070 86.30 3.4217 2 270.0 17.80 396.90 5.50 23.60
90 | 0.05302 0.00 3.410 0 0.4890 7.0790 63.10 3.4145 2 270.0 17.80 396.06 5.70 28.70
91 | 0.04684 0.00 3.410 0 0.4890 6.4170 66.10 3.0923 2 270.0 17.80 392.18 8.81 22.60
92 | 0.03932 0.00 3.410 0 0.4890 6.4050 73.90 3.0921 2 270.0 17.80 393.55 8.20 22.00
93 | 0.04203 28.00 15.040 0 0.4640 6.4420 53.60 3.6659 4 270.0 18.20 395.01 8.16 22.90
94 | 0.02875 28.00 15.040 0 0.4640 6.2110 28.90 3.6659 4 270.0 18.20 396.33 6.21 25.00
95 | 0.04294 28.00 15.040 0 0.4640 6.2490 77.30 3.6150 4 270.0 18.20 396.90 10.59 20.60
96 | 0.12204 0.00 2.890 0 0.4450 6.6250 57.80 3.4952 2 276.0 18.00 357.98 6.65 28.40
97 | 0.11504 0.00 2.890 0 0.4450 6.1630 69.60 3.4952 2 276.0 18.00 391.83 11.34 21.40
98 | 0.12083 0.00 2.890 0 0.4450 8.0690 76.00 3.4952 2 276.0 18.00 396.90 4.21 38.70
99 | 0.08187 0.00 2.890 0 0.4450 7.8200 36.90 3.4952 2 276.0 18.00 393.53 3.57 43.80
100 | 0.06860 0.00 2.890 0 0.4450 7.4160 62.50 3.4952 2 276.0 18.00 396.90 6.19 33.20
101 | 0.14866 0.00 8.560 0 0.5200 6.7270 79.90 2.7778 5 384.0 20.90 394.76 9.42 27.50
102 | 0.11432 0.00 8.560 0 0.5200 6.7810 71.30 2.8561 5 384.0 20.90 395.58 7.67 26.50
103 | 0.22876 0.00 8.560 0 0.5200 6.4050 85.40 2.7147 5 384.0 20.90 70.80 10.63 18.60
104 | 0.21161 0.00 8.560 0 0.5200 6.1370 87.40 2.7147 5 384.0 20.90 394.47 13.44 19.30
105 | 0.13960 0.00 8.560 0 0.5200 6.1670 90.00 2.4210 5 384.0 20.90 392.69 12.33 20.10
106 | 0.13262 0.00 8.560 0 0.5200 5.8510 96.70 2.1069 5 384.0 20.90 394.05 16.47 19.50
107 | 0.17120 0.00 8.560 0 0.5200 5.8360 91.90 2.2110 5 384.0 20.90 395.67 18.66 19.50
108 | 0.13117 0.00 8.560 0 0.5200 6.1270 85.20 2.1224 5 384.0 20.90 387.69 14.09 20.40
109 | 0.12802 0.00 8.560 0 0.5200 6.4740 97.10 2.4329 5 384.0 20.90 395.24 12.27 19.80
110 | 0.26363 0.00 8.560 0 0.5200 6.2290 91.20 2.5451 5 384.0 20.90 391.23 15.55 19.40
111 | 0.10793 0.00 8.560 0 0.5200 6.1950 54.40 2.7778 5 384.0 20.90 393.49 13.00 21.70
112 | 0.10084 0.00 10.010 0 0.5470 6.7150 81.60 2.6775 6 432.0 17.80 395.59 10.16 22.80
113 | 0.12329 0.00 10.010 0 0.5470 5.9130 92.90 2.3534 6 432.0 17.80 394.95 16.21 18.80
114 | 0.22212 0.00 10.010 0 0.5470 6.0920 95.40 2.5480 6 432.0 17.80 396.90 17.09 18.70
115 | 0.14231 0.00 10.010 0 0.5470 6.2540 84.20 2.2565 6 432.0 17.80 388.74 10.45 18.50
116 | 0.17134 0.00 10.010 0 0.5470 5.9280 88.20 2.4631 6 432.0 17.80 344.91 15.76 18.30
117 | 0.13158 0.00 10.010 0 0.5470 6.1760 72.50 2.7301 6 432.0 17.80 393.30 12.04 21.20
118 | 0.15098 0.00 10.010 0 0.5470 6.0210 82.60 2.7474 6 432.0 17.80 394.51 10.30 19.20
119 | 0.13058 0.00 10.010 0 0.5470 5.8720 73.10 2.4775 6 432.0 17.80 338.63 15.37 20.40
120 | 0.14476 0.00 10.010 0 0.5470 5.7310 65.20 2.7592 6 432.0 17.80 391.50 13.61 19.30
121 | 0.06899 0.00 25.650 0 0.5810 5.8700 69.70 2.2577 2 188.0 19.10 389.15 14.37 22.00
122 | 0.07165 0.00 25.650 0 0.5810 6.0040 84.10 2.1974 2 188.0 19.10 377.67 14.27 20.30
123 | 0.09299 0.00 25.650 0 0.5810 5.9610 92.90 2.0869 2 188.0 19.10 378.09 17.93 20.50
124 | 0.15038 0.00 25.650 0 0.5810 5.8560 97.00 1.9444 2 188.0 19.10 370.31 25.41 17.30
125 | 0.09849 0.00 25.650 0 0.5810 5.8790 95.80 2.0063 2 188.0 19.10 379.38 17.58 18.80
126 | 0.16902 0.00 25.650 0 0.5810 5.9860 88.40 1.9929 2 188.0 19.10 385.02 14.81 21.40
127 | 0.38735 0.00 25.650 0 0.5810 5.6130 95.60 1.7572 2 188.0 19.10 359.29 27.26 15.70
128 | 0.25915 0.00 21.890 0 0.6240 5.6930 96.00 1.7883 4 437.0 21.20 392.11 17.19 16.20
129 | 0.32543 0.00 21.890 0 0.6240 6.4310 98.80 1.8125 4 437.0 21.20 396.90 15.39 18.00
130 | 0.88125 0.00 21.890 0 0.6240 5.6370 94.70 1.9799 4 437.0 21.20 396.90 18.34 14.30
131 | 0.34006 0.00 21.890 0 0.6240 6.4580 98.90 2.1185 4 437.0 21.20 395.04 12.60 19.20
132 | 1.19294 0.00 21.890 0 0.6240 6.3260 97.70 2.2710 4 437.0 21.20 396.90 12.26 19.60
133 | 0.59005 0.00 21.890 0 0.6240 6.3720 97.90 2.3274 4 437.0 21.20 385.76 11.12 23.00
134 | 0.32982 0.00 21.890 0 0.6240 5.8220 95.40 2.4699 4 437.0 21.20 388.69 15.03 18.40
135 | 0.97617 0.00 21.890 0 0.6240 5.7570 98.40 2.3460 4 437.0 21.20 262.76 17.31 15.60
136 | 0.55778 0.00 21.890 0 0.6240 6.3350 98.20 2.1107 4 437.0 21.20 394.67 16.96 18.10
137 | 0.32264 0.00 21.890 0 0.6240 5.9420 93.50 1.9669 4 437.0 21.20 378.25 16.90 17.40
138 | 0.35233 0.00 21.890 0 0.6240 6.4540 98.40 1.8498 4 437.0 21.20 394.08 14.59 17.10
139 | 0.24980 0.00 21.890 0 0.6240 5.8570 98.20 1.6686 4 437.0 21.20 392.04 21.32 13.30
140 | 0.54452 0.00 21.890 0 0.6240 6.1510 97.90 1.6687 4 437.0 21.20 396.90 18.46 17.80
141 | 0.29090 0.00 21.890 0 0.6240 6.1740 93.60 1.6119 4 437.0 21.20 388.08 24.16 14.00
142 | 1.62864 0.00 21.890 0 0.6240 5.0190 100.00 1.4394 4 437.0 21.20 396.90 34.41 14.40
143 | 3.32105 0.00 19.580 1 0.8710 5.4030 100.00 1.3216 5 403.0 14.70 396.90 26.82 13.40
144 | 4.09740 0.00 19.580 0 0.8710 5.4680 100.00 1.4118 5 403.0 14.70 396.90 26.42 15.60
145 | 2.77974 0.00 19.580 0 0.8710 4.9030 97.80 1.3459 5 403.0 14.70 396.90 29.29 11.80
146 | 2.37934 0.00 19.580 0 0.8710 6.1300 100.00 1.4191 5 403.0 14.70 172.91 27.80 13.80
147 | 2.15505 0.00 19.580 0 0.8710 5.6280 100.00 1.5166 5 403.0 14.70 169.27 16.65 15.60
148 | 2.36862 0.00 19.580 0 0.8710 4.9260 95.70 1.4608 5 403.0 14.70 391.71 29.53 14.60
149 | 2.33099 0.00 19.580 0 0.8710 5.1860 93.80 1.5296 5 403.0 14.70 356.99 28.32 17.80
150 | 2.73397 0.00 19.580 0 0.8710 5.5970 94.90 1.5257 5 403.0 14.70 351.85 21.45 15.40
151 | 1.65660 0.00 19.580 0 0.8710 6.1220 97.30 1.6180 5 403.0 14.70 372.80 14.10 21.50
152 | 1.49632 0.00 19.580 0 0.8710 5.4040 100.00 1.5916 5 403.0 14.70 341.60 13.28 19.60
153 | 1.12658 0.00 19.580 1 0.8710 5.0120 88.00 1.6102 5 403.0 14.70 343.28 12.12 15.30
154 | 2.14918 0.00 19.580 0 0.8710 5.7090 98.50 1.6232 5 403.0 14.70 261.95 15.79 19.40
155 | 1.41385 0.00 19.580 1 0.8710 6.1290 96.00 1.7494 5 403.0 14.70 321.02 15.12 17.00
156 | 3.53501 0.00 19.580 1 0.8710 6.1520 82.60 1.7455 5 403.0 14.70 88.01 15.02 15.60
157 | 2.44668 0.00 19.580 0 0.8710 5.2720 94.00 1.7364 5 403.0 14.70 88.63 16.14 13.10
158 | 1.22358 0.00 19.580 0 0.6050 6.9430 97.40 1.8773 5 403.0 14.70 363.43 4.59 41.30
159 | 1.34284 0.00 19.580 0 0.6050 6.0660 100.00 1.7573 5 403.0 14.70 353.89 6.43 24.30
160 | 1.42502 0.00 19.580 0 0.8710 6.5100 100.00 1.7659 5 403.0 14.70 364.31 7.39 23.30
161 | 1.27346 0.00 19.580 1 0.6050 6.2500 92.60 1.7984 5 403.0 14.70 338.92 5.50 27.00
162 | 1.46336 0.00 19.580 0 0.6050 7.4890 90.80 1.9709 5 403.0 14.70 374.43 1.73 50.00
163 | 1.83377 0.00 19.580 1 0.6050 7.8020 98.20 2.0407 5 403.0 14.70 389.61 1.92 50.00
164 | 1.51902 0.00 19.580 1 0.6050 8.3750 93.90 2.1620 5 403.0 14.70 388.45 3.32 50.00
165 | 2.24236 0.00 19.580 0 0.6050 5.8540 91.80 2.4220 5 403.0 14.70 395.11 11.64 22.70
166 | 2.92400 0.00 19.580 0 0.6050 6.1010 93.00 2.2834 5 403.0 14.70 240.16 9.81 25.00
167 | 2.01019 0.00 19.580 0 0.6050 7.9290 96.20 2.0459 5 403.0 14.70 369.30 3.70 50.00
168 | 1.80028 0.00 19.580 0 0.6050 5.8770 79.20 2.4259 5 403.0 14.70 227.61 12.14 23.80
169 | 2.30040 0.00 19.580 0 0.6050 6.3190 96.10 2.1000 5 403.0 14.70 297.09 11.10 23.80
170 | 2.44953 0.00 19.580 0 0.6050 6.4020 95.20 2.2625 5 403.0 14.70 330.04 11.32 22.30
171 | 1.20742 0.00 19.580 0 0.6050 5.8750 94.60 2.4259 5 403.0 14.70 292.29 14.43 17.40
172 | 2.31390 0.00 19.580 0 0.6050 5.8800 97.30 2.3887 5 403.0 14.70 348.13 12.03 19.10
173 | 0.13914 0.00 4.050 0 0.5100 5.5720 88.50 2.5961 5 296.0 16.60 396.90 14.69 23.10
174 | 0.09178 0.00 4.050 0 0.5100 6.4160 84.10 2.6463 5 296.0 16.60 395.50 9.04 23.60
175 | 0.08447 0.00 4.050 0 0.5100 5.8590 68.70 2.7019 5 296.0 16.60 393.23 9.64 22.60
176 | 0.06664 0.00 4.050 0 0.5100 6.5460 33.10 3.1323 5 296.0 16.60 390.96 5.33 29.40
177 | 0.07022 0.00 4.050 0 0.5100 6.0200 47.20 3.5549 5 296.0 16.60 393.23 10.11 23.20
178 | 0.05425 0.00 4.050 0 0.5100 6.3150 73.40 3.3175 5 296.0 16.60 395.60 6.29 24.60
179 | 0.06642 0.00 4.050 0 0.5100 6.8600 74.40 2.9153 5 296.0 16.60 391.27 6.92 29.90
180 | 0.05780 0.00 2.460 0 0.4880 6.9800 58.40 2.8290 3 193.0 17.80 396.90 5.04 37.20
181 | 0.06588 0.00 2.460 0 0.4880 7.7650 83.30 2.7410 3 193.0 17.80 395.56 7.56 39.80
182 | 0.06888 0.00 2.460 0 0.4880 6.1440 62.20 2.5979 3 193.0 17.80 396.90 9.45 36.20
183 | 0.09103 0.00 2.460 0 0.4880 7.1550 92.20 2.7006 3 193.0 17.80 394.12 4.82 37.90
184 | 0.10008 0.00 2.460 0 0.4880 6.5630 95.60 2.8470 3 193.0 17.80 396.90 5.68 32.50
185 | 0.08308 0.00 2.460 0 0.4880 5.6040 89.80 2.9879 3 193.0 17.80 391.00 13.98 26.40
186 | 0.06047 0.00 2.460 0 0.4880 6.1530 68.80 3.2797 3 193.0 17.80 387.11 13.15 29.60
187 | 0.05602 0.00 2.460 0 0.4880 7.8310 53.60 3.1992 3 193.0 17.80 392.63 4.45 50.00
188 | 0.07875 45.00 3.440 0 0.4370 6.7820 41.10 3.7886 5 398.0 15.20 393.87 6.68 32.00
189 | 0.12579 45.00 3.440 0 0.4370 6.5560 29.10 4.5667 5 398.0 15.20 382.84 4.56 29.80
190 | 0.08370 45.00 3.440 0 0.4370 7.1850 38.90 4.5667 5 398.0 15.20 396.90 5.39 34.90
191 | 0.09068 45.00 3.440 0 0.4370 6.9510 21.50 6.4798 5 398.0 15.20 377.68 5.10 37.00
192 | 0.06911 45.00 3.440 0 0.4370 6.7390 30.80 6.4798 5 398.0 15.20 389.71 4.69 30.50
193 | 0.08664 45.00 3.440 0 0.4370 7.1780 26.30 6.4798 5 398.0 15.20 390.49 2.87 36.40
194 | 0.02187 60.00 2.930 0 0.4010 6.8000 9.90 6.2196 1 265.0 15.60 393.37 5.03 31.10
195 | 0.01439 60.00 2.930 0 0.4010 6.6040 18.80 6.2196 1 265.0 15.60 376.70 4.38 29.10
196 | 0.01381 80.00 0.460 0 0.4220 7.8750 32.00 5.6484 4 255.0 14.40 394.23 2.97 50.00
197 | 0.04011 80.00 1.520 0 0.4040 7.2870 34.10 7.3090 2 329.0 12.60 396.90 4.08 33.30
198 | 0.04666 80.00 1.520 0 0.4040 7.1070 36.60 7.3090 2 329.0 12.60 354.31 8.61 30.30
199 | 0.03768 80.00 1.520 0 0.4040 7.2740 38.30 7.3090 2 329.0 12.60 392.20 6.62 34.60
200 | 0.03150 95.00 1.470 0 0.4030 6.9750 15.30 7.6534 3 402.0 17.00 396.90 4.56 34.90
201 | 0.01778 95.00 1.470 0 0.4030 7.1350 13.90 7.6534 3 402.0 17.00 384.30 4.45 32.90
202 | 0.03445 82.50 2.030 0 0.4150 6.1620 38.40 6.2700 2 348.0 14.70 393.77 7.43 24.10
203 | 0.02177 82.50 2.030 0 0.4150 7.6100 15.70 6.2700 2 348.0 14.70 395.38 3.11 42.30
204 | 0.03510 95.00 2.680 0 0.4161 7.8530 33.20 5.1180 4 224.0 14.70 392.78 3.81 48.50
205 | 0.02009 95.00 2.680 0 0.4161 8.0340 31.90 5.1180 4 224.0 14.70 390.55 2.88 50.00
206 | 0.13642 0.00 10.590 0 0.4890 5.8910 22.30 3.9454 4 277.0 18.60 396.90 10.87 22.60
207 | 0.22969 0.00 10.590 0 0.4890 6.3260 52.50 4.3549 4 277.0 18.60 394.87 10.97 24.40
208 | 0.25199 0.00 10.590 0 0.4890 5.7830 72.70 4.3549 4 277.0 18.60 389.43 18.06 22.50
209 | 0.13587 0.00 10.590 1 0.4890 6.0640 59.10 4.2392 4 277.0 18.60 381.32 14.66 24.40
210 | 0.43571 0.00 10.590 1 0.4890 5.3440 100.00 3.8750 4 277.0 18.60 396.90 23.09 20.00
211 | 0.17446 0.00 10.590 1 0.4890 5.9600 92.10 3.8771 4 277.0 18.60 393.25 17.27 21.70
212 | 0.37578 0.00 10.590 1 0.4890 5.4040 88.60 3.6650 4 277.0 18.60 395.24 23.98 19.30
213 | 0.21719 0.00 10.590 1 0.4890 5.8070 53.80 3.6526 4 277.0 18.60 390.94 16.03 22.40
214 | 0.14052 0.00 10.590 0 0.4890 6.3750 32.30 3.9454 4 277.0 18.60 385.81 9.38 28.10
215 | 0.28955 0.00 10.590 0 0.4890 5.4120 9.80 3.5875 4 277.0 18.60 348.93 29.55 23.70
216 | 0.19802 0.00 10.590 0 0.4890 6.1820 42.40 3.9454 4 277.0 18.60 393.63 9.47 25.00
217 | 0.04560 0.00 13.890 1 0.5500 5.8880 56.00 3.1121 5 276.0 16.40 392.80 13.51 23.30
218 | 0.07013 0.00 13.890 0 0.5500 6.6420 85.10 3.4211 5 276.0 16.40 392.78 9.69 28.70
219 | 0.11069 0.00 13.890 1 0.5500 5.9510 93.80 2.8893 5 276.0 16.40 396.90 17.92 21.50
220 | 0.11425 0.00 13.890 1 0.5500 6.3730 92.40 3.3633 5 276.0 16.40 393.74 10.50 23.00
221 | 0.35809 0.00 6.200 1 0.5070 6.9510 88.50 2.8617 8 307.0 17.40 391.70 9.71 26.70
222 | 0.40771 0.00 6.200 1 0.5070 6.1640 91.30 3.0480 8 307.0 17.40 395.24 21.46 21.70
223 | 0.62356 0.00 6.200 1 0.5070 6.8790 77.70 3.2721 8 307.0 17.40 390.39 9.93 27.50
224 | 0.61470 0.00 6.200 0 0.5070 6.6180 80.80 3.2721 8 307.0 17.40 396.90 7.60 30.10
225 | 0.31533 0.00 6.200 0 0.5040 8.2660 78.30 2.8944 8 307.0 17.40 385.05 4.14 44.80
226 | 0.52693 0.00 6.200 0 0.5040 8.7250 83.00 2.8944 8 307.0 17.40 382.00 4.63 50.00
227 | 0.38214 0.00 6.200 0 0.5040 8.0400 86.50 3.2157 8 307.0 17.40 387.38 3.13 37.60
228 | 0.41238 0.00 6.200 0 0.5040 7.1630 79.90 3.2157 8 307.0 17.40 372.08 6.36 31.60
229 | 0.29819 0.00 6.200 0 0.5040 7.6860 17.00 3.3751 8 307.0 17.40 377.51 3.92 46.70
230 | 0.44178 0.00 6.200 0 0.5040 6.5520 21.40 3.3751 8 307.0 17.40 380.34 3.76 31.50
231 | 0.53700 0.00 6.200 0 0.5040 5.9810 68.10 3.6715 8 307.0 17.40 378.35 11.65 24.30
232 | 0.46296 0.00 6.200 0 0.5040 7.4120 76.90 3.6715 8 307.0 17.40 376.14 5.25 31.70
233 | 0.57529 0.00 6.200 0 0.5070 8.3370 73.30 3.8384 8 307.0 17.40 385.91 2.47 41.70
234 | 0.33147 0.00 6.200 0 0.5070 8.2470 70.40 3.6519 8 307.0 17.40 378.95 3.95 48.30
235 | 0.44791 0.00 6.200 1 0.5070 6.7260 66.50 3.6519 8 307.0 17.40 360.20 8.05 29.00
236 | 0.33045 0.00 6.200 0 0.5070 6.0860 61.50 3.6519 8 307.0 17.40 376.75 10.88 24.00
237 | 0.52058 0.00 6.200 1 0.5070 6.6310 76.50 4.1480 8 307.0 17.40 388.45 9.54 25.10
238 | 0.51183 0.00 6.200 0 0.5070 7.3580 71.60 4.1480 8 307.0 17.40 390.07 4.73 31.50
239 | 0.08244 30.00 4.930 0 0.4280 6.4810 18.50 6.1899 6 300.0 16.60 379.41 6.36 23.70
240 | 0.09252 30.00 4.930 0 0.4280 6.6060 42.20 6.1899 6 300.0 16.60 383.78 7.37 23.30
241 | 0.11329 30.00 4.930 0 0.4280 6.8970 54.30 6.3361 6 300.0 16.60 391.25 11.38 22.00
242 | 0.10612 30.00 4.930 0 0.4280 6.0950 65.10 6.3361 6 300.0 16.60 394.62 12.40 20.10
243 | 0.10290 30.00 4.930 0 0.4280 6.3580 52.90 7.0355 6 300.0 16.60 372.75 11.22 22.20
244 | 0.12757 30.00 4.930 0 0.4280 6.3930 7.80 7.0355 6 300.0 16.60 374.71 5.19 23.70
245 | 0.20608 22.00 5.860 0 0.4310 5.5930 76.50 7.9549 7 330.0 19.10 372.49 12.50 17.60
246 | 0.19133 22.00 5.860 0 0.4310 5.6050 70.20 7.9549 7 330.0 19.10 389.13 18.46 18.50
247 | 0.33983 22.00 5.860 0 0.4310 6.1080 34.90 8.0555 7 330.0 19.10 390.18 9.16 24.30
248 | 0.19657 22.00 5.860 0 0.4310 6.2260 79.20 8.0555 7 330.0 19.10 376.14 10.15 20.50
249 | 0.16439 22.00 5.860 0 0.4310 6.4330 49.10 7.8265 7 330.0 19.10 374.71 9.52 24.50
250 | 0.19073 22.00 5.860 0 0.4310 6.7180 17.50 7.8265 7 330.0 19.10 393.74 6.56 26.20
251 | 0.14030 22.00 5.860 0 0.4310 6.4870 13.00 7.3967 7 330.0 19.10 396.28 5.90 24.40
252 | 0.21409 22.00 5.860 0 0.4310 6.4380 8.90 7.3967 7 330.0 19.10 377.07 3.59 24.80
253 | 0.08221 22.00 5.860 0 0.4310 6.9570 6.80 8.9067 7 330.0 19.10 386.09 3.53 29.60
254 | 0.36894 22.00 5.860 0 0.4310 8.2590 8.40 8.9067 7 330.0 19.10 396.90 3.54 42.80
255 | 0.04819 80.00 3.640 0 0.3920 6.1080 32.00 9.2203 1 315.0 16.40 392.89 6.57 21.90
256 | 0.03548 80.00 3.640 0 0.3920 5.8760 19.10 9.2203 1 315.0 16.40 395.18 9.25 20.90
257 | 0.01538 90.00 3.750 0 0.3940 7.4540 34.20 6.3361 3 244.0 15.90 386.34 3.11 44.00
258 | 0.61154 20.00 3.970 0 0.6470 8.7040 86.90 1.8010 5 264.0 13.00 389.70 5.12 50.00
259 | 0.66351 20.00 3.970 0 0.6470 7.3330 100.00 1.8946 5 264.0 13.00 383.29 7.79 36.00
260 | 0.65665 20.00 3.970 0 0.6470 6.8420 100.00 2.0107 5 264.0 13.00 391.93 6.90 30.10
261 | 0.54011 20.00 3.970 0 0.6470 7.2030 81.80 2.1121 5 264.0 13.00 392.80 9.59 33.80
262 | 0.53412 20.00 3.970 0 0.6470 7.5200 89.40 2.1398 5 264.0 13.00 388.37 7.26 43.10
263 | 0.52014 20.00 3.970 0 0.6470 8.3980 91.50 2.2885 5 264.0 13.00 386.86 5.91 48.80
264 | 0.82526 20.00 3.970 0 0.6470 7.3270 94.50 2.0788 5 264.0 13.00 393.42 11.25 31.00
265 | 0.55007 20.00 3.970 0 0.6470 7.2060 91.60 1.9301 5 264.0 13.00 387.89 8.10 36.50
266 | 0.76162 20.00 3.970 0 0.6470 5.5600 62.80 1.9865 5 264.0 13.00 392.40 10.45 22.80
267 | 0.78570 20.00 3.970 0 0.6470 7.0140 84.60 2.1329 5 264.0 13.00 384.07 14.79 30.70
268 | 0.57834 20.00 3.970 0 0.5750 8.2970 67.00 2.4216 5 264.0 13.00 384.54 7.44 50.00
269 | 0.54050 20.00 3.970 0 0.5750 7.4700 52.60 2.8720 5 264.0 13.00 390.30 3.16 43.50
270 | 0.09065 20.00 6.960 1 0.4640 5.9200 61.50 3.9175 3 223.0 18.60 391.34 13.65 20.70
271 | 0.29916 20.00 6.960 0 0.4640 5.8560 42.10 4.4290 3 223.0 18.60 388.65 13.00 21.10
272 | 0.16211 20.00 6.960 0 0.4640 6.2400 16.30 4.4290 3 223.0 18.60 396.90 6.59 25.20
273 | 0.11460 20.00 6.960 0 0.4640 6.5380 58.70 3.9175 3 223.0 18.60 394.96 7.73 24.40
274 | 0.22188 20.00 6.960 1 0.4640 7.6910 51.80 4.3665 3 223.0 18.60 390.77 6.58 35.20
275 | 0.05644 40.00 6.410 1 0.4470 6.7580 32.90 4.0776 4 254.0 17.60 396.90 3.53 32.40
276 | 0.09604 40.00 6.410 0 0.4470 6.8540 42.80 4.2673 4 254.0 17.60 396.90 2.98 32.00
277 | 0.10469 40.00 6.410 1 0.4470 7.2670 49.00 4.7872 4 254.0 17.60 389.25 6.05 33.20
278 | 0.06127 40.00 6.410 1 0.4470 6.8260 27.60 4.8628 4 254.0 17.60 393.45 4.16 33.10
279 | 0.07978 40.00 6.410 0 0.4470 6.4820 32.10 4.1403 4 254.0 17.60 396.90 7.19 29.10
280 | 0.21038 20.00 3.330 0 0.4429 6.8120 32.20 4.1007 5 216.0 14.90 396.90 4.85 35.10
281 | 0.03578 20.00 3.330 0 0.4429 7.8200 64.50 4.6947 5 216.0 14.90 387.31 3.76 45.40
282 | 0.03705 20.00 3.330 0 0.4429 6.9680 37.20 5.2447 5 216.0 14.90 392.23 4.59 35.40
283 | 0.06129 20.00 3.330 1 0.4429 7.6450 49.70 5.2119 5 216.0 14.90 377.07 3.01 46.00
284 | 0.01501 90.00 1.210 1 0.4010 7.9230 24.80 5.8850 1 198.0 13.60 395.52 3.16 50.00
285 | 0.00906 90.00 2.970 0 0.4000 7.0880 20.80 7.3073 1 285.0 15.30 394.72 7.85 32.20
286 | 0.01096 55.00 2.250 0 0.3890 6.4530 31.90 7.3073 1 300.0 15.30 394.72 8.23 22.00
287 | 0.01965 80.00 1.760 0 0.3850 6.2300 31.50 9.0892 1 241.0 18.20 341.60 12.93 20.10
288 | 0.03871 52.50 5.320 0 0.4050 6.2090 31.30 7.3172 6 293.0 16.60 396.90 7.14 23.20
289 | 0.04590 52.50 5.320 0 0.4050 6.3150 45.60 7.3172 6 293.0 16.60 396.90 7.60 22.30
290 | 0.04297 52.50 5.320 0 0.4050 6.5650 22.90 7.3172 6 293.0 16.60 371.72 9.51 24.80
291 | 0.03502 80.00 4.950 0 0.4110 6.8610 27.90 5.1167 4 245.0 19.20 396.90 3.33 28.50
292 | 0.07886 80.00 4.950 0 0.4110 7.1480 27.70 5.1167 4 245.0 19.20 396.90 3.56 37.30
293 | 0.03615 80.00 4.950 0 0.4110 6.6300 23.40 5.1167 4 245.0 19.20 396.90 4.70 27.90
294 | 0.08265 0.00 13.920 0 0.4370 6.1270 18.40 5.5027 4 289.0 16.00 396.90 8.58 23.90
295 | 0.08199 0.00 13.920 0 0.4370 6.0090 42.30 5.5027 4 289.0 16.00 396.90 10.40 21.70
296 | 0.12932 0.00 13.920 0 0.4370 6.6780 31.10 5.9604 4 289.0 16.00 396.90 6.27 28.60
297 | 0.05372 0.00 13.920 0 0.4370 6.5490 51.00 5.9604 4 289.0 16.00 392.85 7.39 27.10
298 | 0.14103 0.00 13.920 0 0.4370 5.7900 58.00 6.3200 4 289.0 16.00 396.90 15.84 20.30
299 | 0.06466 70.00 2.240 0 0.4000 6.3450 20.10 7.8278 5 358.0 14.80 368.24 4.97 22.50
300 | 0.05561 70.00 2.240 0 0.4000 7.0410 10.00 7.8278 5 358.0 14.80 371.58 4.74 29.00
301 | 0.04417 70.00 2.240 0 0.4000 6.8710 47.40 7.8278 5 358.0 14.80 390.86 6.07 24.80
302 | 0.03537 34.00 6.090 0 0.4330 6.5900 40.40 5.4917 7 329.0 16.10 395.75 9.50 22.00
303 | 0.09266 34.00 6.090 0 0.4330 6.4950 18.40 5.4917 7 329.0 16.10 383.61 8.67 26.40
304 | 0.10000 34.00 6.090 0 0.4330 6.9820 17.70 5.4917 7 329.0 16.10 390.43 4.86 33.10
305 | 0.05515 33.00 2.180 0 0.4720 7.2360 41.10 4.0220 7 222.0 18.40 393.68 6.93 36.10
306 | 0.05479 33.00 2.180 0 0.4720 6.6160 58.10 3.3700 7 222.0 18.40 393.36 8.93 28.40
307 | 0.07503 33.00 2.180 0 0.4720 7.4200 71.90 3.0992 7 222.0 18.40 396.90 6.47 33.40
308 | 0.04932 33.00 2.180 0 0.4720 6.8490 70.30 3.1827 7 222.0 18.40 396.90 7.53 28.20
309 | 0.49298 0.00 9.900 0 0.5440 6.6350 82.50 3.3175 4 304.0 18.40 396.90 4.54 22.80
310 | 0.34940 0.00 9.900 0 0.5440 5.9720 76.70 3.1025 4 304.0 18.40 396.24 9.97 20.30
311 | 2.63548 0.00 9.900 0 0.5440 4.9730 37.80 2.5194 4 304.0 18.40 350.45 12.64 16.10
312 | 0.79041 0.00 9.900 0 0.5440 6.1220 52.80 2.6403 4 304.0 18.40 396.90 5.98 22.10
313 | 0.26169 0.00 9.900 0 0.5440 6.0230 90.40 2.8340 4 304.0 18.40 396.30 11.72 19.40
314 | 0.26938 0.00 9.900 0 0.5440 6.2660 82.80 3.2628 4 304.0 18.40 393.39 7.90 21.60
315 | 0.36920 0.00 9.900 0 0.5440 6.5670 87.30 3.6023 4 304.0 18.40 395.69 9.28 23.80
316 | 0.25356 0.00 9.900 0 0.5440 5.7050 77.70 3.9450 4 304.0 18.40 396.42 11.50 16.20
317 | 0.31827 0.00 9.900 0 0.5440 5.9140 83.20 3.9986 4 304.0 18.40 390.70 18.33 17.80
318 | 0.24522 0.00 9.900 0 0.5440 5.7820 71.70 4.0317 4 304.0 18.40 396.90 15.94 19.80
319 | 0.40202 0.00 9.900 0 0.5440 6.3820 67.20 3.5325 4 304.0 18.40 395.21 10.36 23.10
320 | 0.47547 0.00 9.900 0 0.5440 6.1130 58.80 4.0019 4 304.0 18.40 396.23 12.73 21.00
321 | 0.16760 0.00 7.380 0 0.4930 6.4260 52.30 4.5404 5 287.0 19.60 396.90 7.20 23.80
322 | 0.18159 0.00 7.380 0 0.4930 6.3760 54.30 4.5404 5 287.0 19.60 396.90 6.87 23.10
323 | 0.35114 0.00 7.380 0 0.4930 6.0410 49.90 4.7211 5 287.0 19.60 396.90 7.70 20.40
324 | 0.28392 0.00 7.380 0 0.4930 5.7080 74.30 4.7211 5 287.0 19.60 391.13 11.74 18.50
325 | 0.34109 0.00 7.380 0 0.4930 6.4150 40.10 4.7211 5 287.0 19.60 396.90 6.12 25.00
326 | 0.19186 0.00 7.380 0 0.4930 6.4310 14.70 5.4159 5 287.0 19.60 393.68 5.08 24.60
327 | 0.30347 0.00 7.380 0 0.4930 6.3120 28.90 5.4159 5 287.0 19.60 396.90 6.15 23.00
328 | 0.24103 0.00 7.380 0 0.4930 6.0830 43.70 5.4159 5 287.0 19.60 396.90 12.79 22.20
329 | 0.06617 0.00 3.240 0 0.4600 5.8680 25.80 5.2146 4 430.0 16.90 382.44 9.97 19.30
330 | 0.06724 0.00 3.240 0 0.4600 6.3330 17.20 5.2146 4 430.0 16.90 375.21 7.34 22.60
331 | 0.04544 0.00 3.240 0 0.4600 6.1440 32.20 5.8736 4 430.0 16.90 368.57 9.09 19.80
332 | 0.05023 35.00 6.060 0 0.4379 5.7060 28.40 6.6407 1 304.0 16.90 394.02 12.43 17.10
333 | 0.03466 35.00 6.060 0 0.4379 6.0310 23.30 6.6407 1 304.0 16.90 362.25 7.83 19.40
334 | 0.05083 0.00 5.190 0 0.5150 6.3160 38.10 6.4584 5 224.0 20.20 389.71 5.68 22.20
335 | 0.03738 0.00 5.190 0 0.5150 6.3100 38.50 6.4584 5 224.0 20.20 389.40 6.75 20.70
336 | 0.03961 0.00 5.190 0 0.5150 6.0370 34.50 5.9853 5 224.0 20.20 396.90 8.01 21.10
337 | 0.03427 0.00 5.190 0 0.5150 5.8690 46.30 5.2311 5 224.0 20.20 396.90 9.80 19.50
338 | 0.03041 0.00 5.190 0 0.5150 5.8950 59.60 5.6150 5 224.0 20.20 394.81 10.56 18.50
339 | 0.03306 0.00 5.190 0 0.5150 6.0590 37.30 4.8122 5 224.0 20.20 396.14 8.51 20.60
340 | 0.05497 0.00 5.190 0 0.5150 5.9850 45.40 4.8122 5 224.0 20.20 396.90 9.74 19.00
341 | 0.06151 0.00 5.190 0 0.5150 5.9680 58.50 4.8122 5 224.0 20.20 396.90 9.29 18.70
342 | 0.01301 35.00 1.520 0 0.4420 7.2410 49.30 7.0379 1 284.0 15.50 394.74 5.49 32.70
343 | 0.02498 0.00 1.890 0 0.5180 6.5400 59.70 6.2669 1 422.0 15.90 389.96 8.65 16.50
344 | 0.02543 55.00 3.780 0 0.4840 6.6960 56.40 5.7321 5 370.0 17.60 396.90 7.18 23.90
345 | 0.03049 55.00 3.780 0 0.4840 6.8740 28.10 6.4654 5 370.0 17.60 387.97 4.61 31.20
346 | 0.03113 0.00 4.390 0 0.4420 6.0140 48.50 8.0136 3 352.0 18.80 385.64 10.53 17.50
347 | 0.06162 0.00 4.390 0 0.4420 5.8980 52.30 8.0136 3 352.0 18.80 364.61 12.67 17.20
348 | 0.01870 85.00 4.150 0 0.4290 6.5160 27.70 8.5353 4 351.0 17.90 392.43 6.36 23.10
349 | 0.01501 80.00 2.010 0 0.4350 6.6350 29.70 8.3440 4 280.0 17.00 390.94 5.99 24.50
350 | 0.02899 40.00 1.250 0 0.4290 6.9390 34.50 8.7921 1 335.0 19.70 389.85 5.89 26.60
351 | 0.06211 40.00 1.250 0 0.4290 6.4900 44.40 8.7921 1 335.0 19.70 396.90 5.98 22.90
352 | 0.07950 60.00 1.690 0 0.4110 6.5790 35.90 10.7103 4 411.0 18.30 370.78 5.49 24.10
353 | 0.07244 60.00 1.690 0 0.4110 5.8840 18.50 10.7103 4 411.0 18.30 392.33 7.79 18.60
354 | 0.01709 90.00 2.020 0 0.4100 6.7280 36.10 12.1265 5 187.0 17.00 384.46 4.50 30.10
355 | 0.04301 80.00 1.910 0 0.4130 5.6630 21.90 10.5857 4 334.0 22.00 382.80 8.05 18.20
356 | 0.10659 80.00 1.910 0 0.4130 5.9360 19.50 10.5857 4 334.0 22.00 376.04 5.57 20.60
357 | 8.98296 0.00 18.100 1 0.7700 6.2120 97.40 2.1222 24 666.0 20.20 377.73 17.60 17.80
358 | 3.84970 0.00 18.100 1 0.7700 6.3950 91.00 2.5052 24 666.0 20.20 391.34 13.27 21.70
359 | 5.20177 0.00 18.100 1 0.7700 6.1270 83.40 2.7227 24 666.0 20.20 395.43 11.48 22.70
360 | 4.26131 0.00 18.100 0 0.7700 6.1120 81.30 2.5091 24 666.0 20.20 390.74 12.67 22.60
361 | 4.54192 0.00 18.100 0 0.7700 6.3980 88.00 2.5182 24 666.0 20.20 374.56 7.79 25.00
362 | 3.83684 0.00 18.100 0 0.7700 6.2510 91.10 2.2955 24 666.0 20.20 350.65 14.19 19.90
363 | 3.67822 0.00 18.100 0 0.7700 5.3620 96.20 2.1036 24 666.0 20.20 380.79 10.19 20.80
364 | 4.22239 0.00 18.100 1 0.7700 5.8030 89.00 1.9047 24 666.0 20.20 353.04 14.64 16.80
365 | 3.47428 0.00 18.100 1 0.7180 8.7800 82.90 1.9047 24 666.0 20.20 354.55 5.29 21.90
366 | 4.55587 0.00 18.100 0 0.7180 3.5610 87.90 1.6132 24 666.0 20.20 354.70 7.12 27.50
367 | 3.69695 0.00 18.100 0 0.7180 4.9630 91.40 1.7523 24 666.0 20.20 316.03 14.00 21.90
368 | 13.52220 0.00 18.100 0 0.6310 3.8630 100.00 1.5106 24 666.0 20.20 131.42 13.33 23.10
369 | 4.89822 0.00 18.100 0 0.6310 4.9700 100.00 1.3325 24 666.0 20.20 375.52 3.26 50.00
370 | 5.66998 0.00 18.100 1 0.6310 6.6830 96.80 1.3567 24 666.0 20.20 375.33 3.73 50.00
371 | 6.53876 0.00 18.100 1 0.6310 7.0160 97.50 1.2024 24 666.0 20.20 392.05 2.96 50.00
372 | 9.23230 0.00 18.100 0 0.6310 6.2160 100.00 1.1691 24 666.0 20.20 366.15 9.53 50.00
373 | 8.26725 0.00 18.100 1 0.6680 5.8750 89.60 1.1296 24 666.0 20.20 347.88 8.88 50.00
374 | 11.10810 0.00 18.100 0 0.6680 4.9060 100.00 1.1742 24 666.0 20.20 396.90 34.77 13.80
375 | 18.49820 0.00 18.100 0 0.6680 4.1380 100.00 1.1370 24 666.0 20.20 396.90 37.97 13.80
376 | 19.60910 0.00 18.100 0 0.6710 7.3130 97.90 1.3163 24 666.0 20.20 396.90 13.44 15.00
377 | 15.28800 0.00 18.100 0 0.6710 6.6490 93.30 1.3449 24 666.0 20.20 363.02 23.24 13.90
378 | 9.82349 0.00 18.100 0 0.6710 6.7940 98.80 1.3580 24 666.0 20.20 396.90 21.24 13.30
379 | 23.64820 0.00 18.100 0 0.6710 6.3800 96.20 1.3861 24 666.0 20.20 396.90 23.69 13.10
380 | 17.86670 0.00 18.100 0 0.6710 6.2230 100.00 1.3861 24 666.0 20.20 393.74 21.78 10.20
381 | 88.97620 0.00 18.100 0 0.6710 6.9680 91.90 1.4165 24 666.0 20.20 396.90 17.21 10.40
382 | 15.87440 0.00 18.100 0 0.6710 6.5450 99.10 1.5192 24 666.0 20.20 396.90 21.08 10.90
383 | 9.18702 0.00 18.100 0 0.7000 5.5360 100.00 1.5804 24 666.0 20.20 396.90 23.60 11.30
384 | 7.99248 0.00 18.100 0 0.7000 5.5200 100.00 1.5331 24 666.0 20.20 396.90 24.56 12.30
385 | 20.08490 0.00 18.100 0 0.7000 4.3680 91.20 1.4395 24 666.0 20.20 285.83 30.63 8.80
386 | 16.81180 0.00 18.100 0 0.7000 5.2770 98.10 1.4261 24 666.0 20.20 396.90 30.81 7.20
387 | 24.39380 0.00 18.100 0 0.7000 4.6520 100.00 1.4672 24 666.0 20.20 396.90 28.28 10.50
388 | 22.59710 0.00 18.100 0 0.7000 5.0000 89.50 1.5184 24 666.0 20.20 396.90 31.99 7.40
389 | 14.33370 0.00 18.100 0 0.7000 4.8800 100.00 1.5895 24 666.0 20.20 372.92 30.62 10.20
390 | 8.15174 0.00 18.100 0 0.7000 5.3900 98.90 1.7281 24 666.0 20.20 396.90 20.85 11.50
391 | 6.96215 0.00 18.100 0 0.7000 5.7130 97.00 1.9265 24 666.0 20.20 394.43 17.11 15.10
392 | 5.29305 0.00 18.100 0 0.7000 6.0510 82.50 2.1678 24 666.0 20.20 378.38 18.76 23.20
393 | 11.57790 0.00 18.100 0 0.7000 5.0360 97.00 1.7700 24 666.0 20.20 396.90 25.68 9.70
394 | 8.64476 0.00 18.100 0 0.6930 6.1930 92.60 1.7912 24 666.0 20.20 396.90 15.17 13.80
395 | 13.35980 0.00 18.100 0 0.6930 5.8870 94.70 1.7821 24 666.0 20.20 396.90 16.35 12.70
396 | 8.71675 0.00 18.100 0 0.6930 6.4710 98.80 1.7257 24 666.0 20.20 391.98 17.12 13.10
397 | 5.87205 0.00 18.100 0 0.6930 6.4050 96.00 1.6768 24 666.0 20.20 396.90 19.37 12.50
398 | 7.67202 0.00 18.100 0 0.6930 5.7470 98.90 1.6334 24 666.0 20.20 393.10 19.92 8.50
399 | 38.35180 0.00 18.100 0 0.6930 5.4530 100.00 1.4896 24 666.0 20.20 396.90 30.59 5.00
400 | 9.91655 0.00 18.100 0 0.6930 5.8520 77.80 1.5004 24 666.0 20.20 338.16 29.97 6.30
401 | 25.04610 0.00 18.100 0 0.6930 5.9870 100.00 1.5888 24 666.0 20.20 396.90 26.77 5.60
402 | 14.23620 0.00 18.100 0 0.6930 6.3430 100.00 1.5741 24 666.0 20.20 396.90 20.32 7.20
403 | 9.59571 0.00 18.100 0 0.6930 6.4040 100.00 1.6390 24 666.0 20.20 376.11 20.31 12.10
404 | 24.80170 0.00 18.100 0 0.6930 5.3490 96.00 1.7028 24 666.0 20.20 396.90 19.77 8.30
405 | 41.52920 0.00 18.100 0 0.6930 5.5310 85.40 1.6074 24 666.0 20.20 329.46 27.38 8.50
406 | 67.92080 0.00 18.100 0 0.6930 5.6830 100.00 1.4254 24 666.0 20.20 384.97 22.98 5.00
407 | 20.71620 0.00 18.100 0 0.6590 4.1380 100.00 1.1781 24 666.0 20.20 370.22 23.34 11.90
408 | 11.95110 0.00 18.100 0 0.6590 5.6080 100.00 1.2852 24 666.0 20.20 332.09 12.13 27.90
409 | 7.40389 0.00 18.100 0 0.5970 5.6170 97.90 1.4547 24 666.0 20.20 314.64 26.40 17.20
410 | 14.43830 0.00 18.100 0 0.5970 6.8520 100.00 1.4655 24 666.0 20.20 179.36 19.78 27.50
411 | 51.13580 0.00 18.100 0 0.5970 5.7570 100.00 1.4130 24 666.0 20.20 2.60 10.11 15.00
412 | 14.05070 0.00 18.100 0 0.5970 6.6570 100.00 1.5275 24 666.0 20.20 35.05 21.22 17.20
413 | 18.81100 0.00 18.100 0 0.5970 4.6280 100.00 1.5539 24 666.0 20.20 28.79 34.37 17.90
414 | 28.65580 0.00 18.100 0 0.5970 5.1550 100.00 1.5894 24 666.0 20.20 210.97 20.08 16.30
415 | 45.74610 0.00 18.100 0 0.6930 4.5190 100.00 1.6582 24 666.0 20.20 88.27 36.98 7.00
416 | 18.08460 0.00 18.100 0 0.6790 6.4340 100.00 1.8347 24 666.0 20.20 27.25 29.05 7.20
417 | 10.83420 0.00 18.100 0 0.6790 6.7820 90.80 1.8195 24 666.0 20.20 21.57 25.79 7.50
418 | 25.94060 0.00 18.100 0 0.6790 5.3040 89.10 1.6475 24 666.0 20.20 127.36 26.64 10.40
419 | 73.53410 0.00 18.100 0 0.6790 5.9570 100.00 1.8026 24 666.0 20.20 16.45 20.62 8.80
420 | 11.81230 0.00 18.100 0 0.7180 6.8240 76.50 1.7940 24 666.0 20.20 48.45 22.74 8.40
421 | 11.08740 0.00 18.100 0 0.7180 6.4110 100.00 1.8589 24 666.0 20.20 318.75 15.02 16.70
422 | 7.02259 0.00 18.100 0 0.7180 6.0060 95.30 1.8746 24 666.0 20.20 319.98 15.70 14.20
423 | 12.04820 0.00 18.100 0 0.6140 5.6480 87.60 1.9512 24 666.0 20.20 291.55 14.10 20.80
424 | 7.05042 0.00 18.100 0 0.6140 6.1030 85.10 2.0218 24 666.0 20.20 2.52 23.29 13.40
425 | 8.79212 0.00 18.100 0 0.5840 5.5650 70.60 2.0635 24 666.0 20.20 3.65 17.16 11.70
426 | 15.86030 0.00 18.100 0 0.6790 5.8960 95.40 1.9096 24 666.0 20.20 7.68 24.39 8.30
427 | 12.24720 0.00 18.100 0 0.5840 5.8370 59.70 1.9976 24 666.0 20.20 24.65 15.69 10.20
428 | 37.66190 0.00 18.100 0 0.6790 6.2020 78.70 1.8629 24 666.0 20.20 18.82 14.52 10.90
429 | 7.36711 0.00 18.100 0 0.6790 6.1930 78.10 1.9356 24 666.0 20.20 96.73 21.52 11.00
430 | 9.33889 0.00 18.100 0 0.6790 6.3800 95.60 1.9682 24 666.0 20.20 60.72 24.08 9.50
431 | 8.49213 0.00 18.100 0 0.5840 6.3480 86.10 2.0527 24 666.0 20.20 83.45 17.64 14.50
432 | 10.06230 0.00 18.100 0 0.5840 6.8330 94.30 2.0882 24 666.0 20.20 81.33 19.69 14.10
433 | 6.44405 0.00 18.100 0 0.5840 6.4250 74.80 2.2004 24 666.0 20.20 97.95 12.03 16.10
434 | 5.58107 0.00 18.100 0 0.7130 6.4360 87.90 2.3158 24 666.0 20.20 100.19 16.22 14.30
435 | 13.91340 0.00 18.100 0 0.7130 6.2080 95.00 2.2222 24 666.0 20.20 100.63 15.17 11.70
436 | 11.16040 0.00 18.100 0 0.7400 6.6290 94.60 2.1247 24 666.0 20.20 109.85 23.27 13.40
437 | 14.42080 0.00 18.100 0 0.7400 6.4610 93.30 2.0026 24 666.0 20.20 27.49 18.05 9.60
438 | 15.17720 0.00 18.100 0 0.7400 6.1520 100.00 1.9142 24 666.0 20.20 9.32 26.45 8.70
439 | 13.67810 0.00 18.100 0 0.7400 5.9350 87.90 1.8206 24 666.0 20.20 68.95 34.02 8.40
440 | 9.39063 0.00 18.100 0 0.7400 5.6270 93.90 1.8172 24 666.0 20.20 396.90 22.88 12.80
441 | 22.05110 0.00 18.100 0 0.7400 5.8180 92.40 1.8662 24 666.0 20.20 391.45 22.11 10.50
442 | 9.72418 0.00 18.100 0 0.7400 6.4060 97.20 2.0651 24 666.0 20.20 385.96 19.52 17.10
443 | 5.66637 0.00 18.100 0 0.7400 6.2190 100.00 2.0048 24 666.0 20.20 395.69 16.59 18.40
444 | 9.96654 0.00 18.100 0 0.7400 6.4850 100.00 1.9784 24 666.0 20.20 386.73 18.85 15.40
445 | 12.80230 0.00 18.100 0 0.7400 5.8540 96.60 1.8956 24 666.0 20.20 240.52 23.79 10.80
446 | 10.67180 0.00 18.100 0 0.7400 6.4590 94.80 1.9879 24 666.0 20.20 43.06 23.98 11.80
447 | 6.28807 0.00 18.100 0 0.7400 6.3410 96.40 2.0720 24 666.0 20.20 318.01 17.79 14.90
448 | 9.92485 0.00 18.100 0 0.7400 6.2510 96.60 2.1980 24 666.0 20.20 388.52 16.44 12.60
449 | 9.32909 0.00 18.100 0 0.7130 6.1850 98.70 2.2616 24 666.0 20.20 396.90 18.13 14.10
450 | 7.52601 0.00 18.100 0 0.7130 6.4170 98.30 2.1850 24 666.0 20.20 304.21 19.31 13.00
451 | 6.71772 0.00 18.100 0 0.7130 6.7490 92.60 2.3236 24 666.0 20.20 0.32 17.44 13.40
452 | 5.44114 0.00 18.100 0 0.7130 6.6550 98.20 2.3552 24 666.0 20.20 355.29 17.73 15.20
453 | 5.09017 0.00 18.100 0 0.7130 6.2970 91.80 2.3682 24 666.0 20.20 385.09 17.27 16.10
454 | 8.24809 0.00 18.100 0 0.7130 7.3930 99.30 2.4527 24 666.0 20.20 375.87 16.74 17.80
455 | 9.51363 0.00 18.100 0 0.7130 6.7280 94.10 2.4961 24 666.0 20.20 6.68 18.71 14.90
456 | 4.75237 0.00 18.100 0 0.7130 6.5250 86.50 2.4358 24 666.0 20.20 50.92 18.13 14.10
457 | 4.66883 0.00 18.100 0 0.7130 5.9760 87.90 2.5806 24 666.0 20.20 10.48 19.01 12.70
458 | 8.20058 0.00 18.100 0 0.7130 5.9360 80.30 2.7792 24 666.0 20.20 3.50 16.94 13.50
459 | 7.75223 0.00 18.100 0 0.7130 6.3010 83.70 2.7831 24 666.0 20.20 272.21 16.23 14.90
460 | 6.80117 0.00 18.100 0 0.7130 6.0810 84.40 2.7175 24 666.0 20.20 396.90 14.70 20.00
461 | 4.81213 0.00 18.100 0 0.7130 6.7010 90.00 2.5975 24 666.0 20.20 255.23 16.42 16.40
462 | 3.69311 0.00 18.100 0 0.7130 6.3760 88.40 2.5671 24 666.0 20.20 391.43 14.65 17.70
463 | 6.65492 0.00 18.100 0 0.7130 6.3170 83.00 2.7344 24 666.0 20.20 396.90 13.99 19.50
464 | 5.82115 0.00 18.100 0 0.7130 6.5130 89.90 2.8016 24 666.0 20.20 393.82 10.29 20.20
465 | 7.83932 0.00 18.100 0 0.6550 6.2090 65.40 2.9634 24 666.0 20.20 396.90 13.22 21.40
466 | 3.16360 0.00 18.100 0 0.6550 5.7590 48.20 3.0665 24 666.0 20.20 334.40 14.13 19.90
467 | 3.77498 0.00 18.100 0 0.6550 5.9520 84.70 2.8715 24 666.0 20.20 22.01 17.15 19.00
468 | 4.42228 0.00 18.100 0 0.5840 6.0030 94.50 2.5403 24 666.0 20.20 331.29 21.32 19.10
469 | 15.57570 0.00 18.100 0 0.5800 5.9260 71.00 2.9084 24 666.0 20.20 368.74 18.13 19.10
470 | 13.07510 0.00 18.100 0 0.5800 5.7130 56.70 2.8237 24 666.0 20.20 396.90 14.76 20.10
471 | 4.34879 0.00 18.100 0 0.5800 6.1670 84.00 3.0334 24 666.0 20.20 396.90 16.29 19.90
472 | 4.03841 0.00 18.100 0 0.5320 6.2290 90.70 3.0993 24 666.0 20.20 395.33 12.87 19.60
473 | 3.56868 0.00 18.100 0 0.5800 6.4370 75.00 2.8965 24 666.0 20.20 393.37 14.36 23.20
474 | 4.64689 0.00 18.100 0 0.6140 6.9800 67.60 2.5329 24 666.0 20.20 374.68 11.66 29.80
475 | 8.05579 0.00 18.100 0 0.5840 5.4270 95.40 2.4298 24 666.0 20.20 352.58 18.14 13.80
476 | 6.39312 0.00 18.100 0 0.5840 6.1620 97.40 2.2060 24 666.0 20.20 302.76 24.10 13.30
477 | 4.87141 0.00 18.100 0 0.6140 6.4840 93.60 2.3053 24 666.0 20.20 396.21 18.68 16.70
478 | 15.02340 0.00 18.100 0 0.6140 5.3040 97.30 2.1007 24 666.0 20.20 349.48 24.91 12.00
479 | 10.23300 0.00 18.100 0 0.6140 6.1850 96.70 2.1705 24 666.0 20.20 379.70 18.03 14.60
480 | 14.33370 0.00 18.100 0 0.6140 6.2290 88.00 1.9512 24 666.0 20.20 383.32 13.11 21.40
481 | 5.82401 0.00 18.100 0 0.5320 6.2420 64.70 3.4242 24 666.0 20.20 396.90 10.74 23.00
482 | 5.70818 0.00 18.100 0 0.5320 6.7500 74.90 3.3317 24 666.0 20.20 393.07 7.74 23.70
483 | 5.73116 0.00 18.100 0 0.5320 7.0610 77.00 3.4106 24 666.0 20.20 395.28 7.01 25.00
484 | 2.81838 0.00 18.100 0 0.5320 5.7620 40.30 4.0983 24 666.0 20.20 392.92 10.42 21.80
485 | 2.37857 0.00 18.100 0 0.5830 5.8710 41.90 3.7240 24 666.0 20.20 370.73 13.34 20.60
486 | 3.67367 0.00 18.100 0 0.5830 6.3120 51.90 3.9917 24 666.0 20.20 388.62 10.58 21.20
487 | 5.69175 0.00 18.100 0 0.5830 6.1140 79.80 3.5459 24 666.0 20.20 392.68 14.98 19.10
488 | 4.83567 0.00 18.100 0 0.5830 5.9050 53.20 3.1523 24 666.0 20.20 388.22 11.45 20.60
489 | 0.15086 0.00 27.740 0 0.6090 5.4540 92.70 1.8209 4 711.0 20.10 395.09 18.06 15.20
490 | 0.18337 0.00 27.740 0 0.6090 5.4140 98.30 1.7554 4 711.0 20.10 344.05 23.97 7.00
491 | 0.20746 0.00 27.740 0 0.6090 5.0930 98.00 1.8226 4 711.0 20.10 318.43 29.68 8.10
492 | 0.10574 0.00 27.740 0 0.6090 5.9830 98.80 1.8681 4 711.0 20.10 390.11 18.07 13.60
493 | 0.11132 0.00 27.740 0 0.6090 5.9830 83.50 2.1099 4 711.0 20.10 396.90 13.35 20.10
494 | 0.17331 0.00 9.690 0 0.5850 5.7070 54.00 2.3817 6 391.0 19.20 396.90 12.01 21.80
495 | 0.27957 0.00 9.690 0 0.5850 5.9260 42.60 2.3817 6 391.0 19.20 396.90 13.59 24.50
496 | 0.17899 0.00 9.690 0 0.5850 5.6700 28.80 2.7986 6 391.0 19.20 393.29 17.60 23.10
497 | 0.28960 0.00 9.690 0 0.5850 5.3900 72.90 2.7986 6 391.0 19.20 396.90 21.14 19.70
498 | 0.26838 0.00 9.690 0 0.5850 5.7940 70.60 2.8927 6 391.0 19.20 396.90 14.10 18.30
499 | 0.23912 0.00 9.690 0 0.5850 6.0190 65.30 2.4091 6 391.0 19.20 396.90 12.92 21.20
500 | 0.17783 0.00 9.690 0 0.5850 5.5690 73.50 2.3999 6 391.0 19.20 395.77 15.10 17.50
501 | 0.22438 0.00 9.690 0 0.5850 6.0270 79.70 2.4982 6 391.0 19.20 396.90 14.33 16.80
502 | 0.06263 0.00 11.930 0 0.5730 6.5930 69.10 2.4786 1 273.0 21.00 391.99 9.67 22.40
503 | 0.04527 0.00 11.930 0 0.5730 6.1200 76.70 2.2875 1 273.0 21.00 396.90 9.08 20.60
504 | 0.06076 0.00 11.930 0 0.5730 6.9760 91.00 2.1675 1 273.0 21.00 396.90 5.64 23.90
505 | 0.10959 0.00 11.930 0 0.5730 6.7940 89.30 2.3889 1 273.0 21.00 393.45 6.48 22.00
506 | 0.04741 0.00 11.930 0 0.5730 6.0300 80.80 2.5050 1 273.0 21.00 396.90 7.88 11.90
507 |
--------------------------------------------------------------------------------