├── .gitignore ├── ee6483.pdf ├── ee6497.pdf ├── images ├── GP.png ├── NN.png ├── TP.png ├── AUC.png ├── BFS.png ├── PCA1.png ├── PCA2.png ├── euler.png ├── Apriori.png ├── FP-Growth.png ├── H-8puzzles.png ├── alpha-beta.png ├── beta-alpha.png ├── data_goal.png ├── Backtracking.png ├── depth-first.png ├── bias & variance.png ├── breadth-first.png ├── Information_Gain.png └── over-under-fitting.png ├── dog&cat ├── .DS_Store ├── dog&cat.pdf ├── prediction_results.csv └── scripts.ipynb ├── LICENSE ├── README.md ├── preamble.tex ├── ee6483.tex └── ee6497.tex /.gitignore: -------------------------------------------------------------------------------- 1 | datasets/ 2 | dog&cat/datasets/ 3 | .DS_Store -------------------------------------------------------------------------------- /ee6483.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/ee6483.pdf -------------------------------------------------------------------------------- /ee6497.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/ee6497.pdf -------------------------------------------------------------------------------- /images/GP.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/GP.png -------------------------------------------------------------------------------- /images/NN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/NN.png -------------------------------------------------------------------------------- /images/TP.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/TP.png -------------------------------------------------------------------------------- /images/AUC.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/AUC.png -------------------------------------------------------------------------------- /images/BFS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/BFS.png -------------------------------------------------------------------------------- /images/PCA1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/PCA1.png -------------------------------------------------------------------------------- /images/PCA2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/PCA2.png -------------------------------------------------------------------------------- /images/euler.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/euler.png -------------------------------------------------------------------------------- /dog&cat/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/dog&cat/.DS_Store -------------------------------------------------------------------------------- /images/Apriori.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/Apriori.png -------------------------------------------------------------------------------- /dog&cat/dog&cat.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/dog&cat/dog&cat.pdf -------------------------------------------------------------------------------- /images/FP-Growth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/FP-Growth.png -------------------------------------------------------------------------------- /images/H-8puzzles.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/H-8puzzles.png -------------------------------------------------------------------------------- /images/alpha-beta.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/alpha-beta.png -------------------------------------------------------------------------------- /images/beta-alpha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/beta-alpha.png -------------------------------------------------------------------------------- /images/data_goal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/data_goal.png -------------------------------------------------------------------------------- /images/Backtracking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/Backtracking.png -------------------------------------------------------------------------------- /images/depth-first.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/depth-first.png -------------------------------------------------------------------------------- /images/bias & variance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/bias & variance.png -------------------------------------------------------------------------------- /images/breadth-first.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/breadth-first.png -------------------------------------------------------------------------------- /images/Information_Gain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/Information_Gain.png -------------------------------------------------------------------------------- /images/over-under-fitting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yichatani/NTU_EE6497_83_Sheets/HEAD/images/over-under-fitting.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Jiangpeng LI 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NTU_EE6497_83_Sheets 2 | 3 | __If you find it useful, a star🌟 will be very good of you❤️.__ 4 | 5 | --- 6 | 7 | __[2025/6/2]__ 8 | 9 | ![](images/GP.png) 10 | 11 | --- 12 | 13 | __[2025/5/8]__ 14 | 15 | Add code block in the __[EE6497 Cheatsheet](./ee6497.pdf)__. Thanks to wlp😑. 16 | 17 | --- 18 | 19 | __[2025/4/27]__ 20 | 21 | Update the __[EE6497 Cheatsheet](./ee6497.pdf)__. It's a complete version now. 22 | 23 | --- 24 | 25 | __[2025/4/23]__ 26 | 27 | Add the __[EE6497 Cheatsheet](./ee6497.pdf)__. It's the first edition, might be some mistakes. 28 | 29 | --- 30 | __[2025/4/5]__ 31 | 32 | Update the __[dog&cat](./dog&cat/)__ final project, source code and results. 33 | 34 | --- 35 | 36 | __[2024/12/1]__ 37 | 38 | __You can find the original PDF file here: [Cheatsheet](./ee6483.pdf).__ It is expected that there will be further updates. 39 | 40 | It was inspired by this [2023_NTU_EE7401_Cheat_Sheet](https://github.com/JiangpengLI86/2023_NTU_EE7401_Cheat_Sheet). 41 | 42 | This cheat sheet is valid only when EE6483's exam is Restricted Open Book. 43 | 44 | The cheat sheet may have some mistakes, so check carefully when you use it. I apreciate it if you find some errors and edit and commit them in [main.tex](./ee6483.tex). 45 | 46 | --- 47 | 48 | ## License 49 | This project is licensed under the MIT License - see the [LICENSE](./LICENSE) file for details. 50 | -------------------------------------------------------------------------------- /dog&cat/prediction_results.csv: -------------------------------------------------------------------------------- 1 | id,label 2 | 1,0 3 | 2,0 4 | 3,0 5 | 4,1 6 | 5,1 7 | 6,0 8 | 7,1 9 | 8,1 10 | 9,1 11 | 10,1 12 | 11,1 13 | 12,0 14 | 13,0 15 | 14,1 16 | 15,1 17 | 16,1 18 | 17,1 19 | 18,0 20 | 19,1 21 | 20,1 22 | 21,0 23 | 22,0 24 | 23,0 25 | 24,0 26 | 25,0 27 | 26,1 28 | 27,1 29 | 28,1 30 | 29,1 31 | 30,0 32 | 31,0 33 | 32,1 34 | 33,0 35 | 34,0 36 | 35,1 37 | 36,1 38 | 37,0 39 | 38,1 40 | 39,0 41 | 40,0 42 | 41,1 43 | 42,1 44 | 43,0 45 | 44,1 46 | 45,1 47 | 46,0 48 | 47,0 49 | 48,0 50 | 49,1 51 | 50,1 52 | 51,1 53 | 52,1 54 | 53,1 55 | 54,0 56 | 55,0 57 | 56,1 58 | 57,0 59 | 58,0 60 | 59,1 61 | 60,0 62 | 61,0 63 | 62,0 64 | 63,1 65 | 64,1 66 | 65,0 67 | 66,1 68 | 67,1 69 | 68,1 70 | 69,0 71 | 70,1 72 | 71,1 73 | 72,0 74 | 73,0 75 | 74,0 76 | 75,1 77 | 76,1 78 | 77,0 79 | 78,0 80 | 79,0 81 | 80,0 82 | 81,0 83 | 82,1 84 | 83,1 85 | 84,0 86 | 85,0 87 | 86,0 88 | 87,1 89 | 88,1 90 | 89,1 91 | 90,1 92 | 91,0 93 | 92,0 94 | 93,1 95 | 94,1 96 | 95,1 97 | 96,0 98 | 97,1 99 | 98,0 100 | 99,0 101 | 100,0 102 | 101,0 103 | 102,1 104 | 103,0 105 | 104,1 106 | 105,1 107 | 106,1 108 | 107,0 109 | 108,0 110 | 109,1 111 | 110,0 112 | 111,0 113 | 112,1 114 | 113,1 115 | 114,1 116 | 115,1 117 | 116,1 118 | 117,1 119 | 118,1 120 | 119,1 121 | 120,0 122 | 121,0 123 | 122,1 124 | 123,1 125 | 124,0 126 | 125,0 127 | 126,1 128 | 127,1 129 | 128,0 130 | 129,1 131 | 130,1 132 | 131,0 133 | 132,0 134 | 133,0 135 | 134,0 136 | 135,1 137 | 136,0 138 | 137,1 139 | 138,1 140 | 139,0 141 | 140,1 142 | 141,0 143 | 142,1 144 | 143,0 145 | 144,0 146 | 145,1 147 | 146,1 148 | 147,1 149 | 148,1 150 | 149,1 151 | 150,1 152 | 151,0 153 | 152,1 154 | 153,1 155 | 154,0 156 | 155,0 157 | 156,0 158 | 157,1 159 | 158,0 160 | 159,0 161 | 160,0 162 | 161,1 163 | 162,0 164 | 163,1 165 | 164,1 166 | 165,1 167 | 166,0 168 | 167,1 169 | 168,1 170 | 169,0 171 | 170,0 172 | 171,0 173 | 172,1 174 | 173,1 175 | 174,0 176 | 175,1 177 | 176,0 178 | 177,0 179 | 178,0 180 | 179,1 181 | 180,0 182 | 181,0 183 | 182,0 184 | 183,1 185 | 184,0 186 | 185,1 187 | 186,0 188 | 187,0 189 | 188,1 190 | 189,1 191 | 190,0 192 | 191,1 193 | 192,1 194 | 193,1 195 | 194,0 196 | 195,0 197 | 196,1 198 | 197,0 199 | 198,0 200 | 199,0 201 | 200,0 202 | 201,1 203 | 202,1 204 | 203,0 205 | 204,0 206 | 205,0 207 | 206,1 208 | 207,1 209 | 208,1 210 | 209,1 211 | 210,0 212 | 211,0 213 | 212,0 214 | 213,0 215 | 214,1 216 | 215,1 217 | 216,0 218 | 217,1 219 | 218,1 220 | 219,1 221 | 220,1 222 | 221,0 223 | 222,0 224 | 223,1 225 | 224,1 226 | 225,1 227 | 226,1 228 | 227,1 229 | 228,0 230 | 229,1 231 | 230,1 232 | 231,1 233 | 232,0 234 | 233,1 235 | 234,0 236 | 235,1 237 | 236,1 238 | 237,0 239 | 238,0 240 | 239,1 241 | 240,0 242 | 241,1 243 | 242,0 244 | 243,1 245 | 244,1 246 | 245,0 247 | 246,0 248 | 247,1 249 | 248,0 250 | 249,1 251 | 250,1 252 | 251,1 253 | 252,1 254 | 253,0 255 | 254,1 256 | 255,1 257 | 256,1 258 | 257,1 259 | 258,0 260 | 259,1 261 | 260,0 262 | 261,0 263 | 262,1 264 | 263,1 265 | 264,1 266 | 265,1 267 | 266,1 268 | 267,1 269 | 268,1 270 | 269,0 271 | 270,1 272 | 271,0 273 | 272,0 274 | 273,0 275 | 274,0 276 | 275,0 277 | 276,1 278 | 277,0 279 | 278,1 280 | 279,0 281 | 280,1 282 | 281,1 283 | 282,1 284 | 283,0 285 | 284,1 286 | 285,1 287 | 286,1 288 | 287,0 289 | 288,1 290 | 289,1 291 | 290,0 292 | 291,0 293 | 292,1 294 | 293,1 295 | 294,0 296 | 295,1 297 | 296,0 298 | 297,1 299 | 298,1 300 | 299,1 301 | 300,0 302 | 301,1 303 | 302,0 304 | 303,1 305 | 304,0 306 | 305,0 307 | 306,0 308 | 307,0 309 | 308,1 310 | 309,1 311 | 310,1 312 | 311,1 313 | 312,0 314 | 313,1 315 | 314,0 316 | 315,0 317 | 316,0 318 | 317,0 319 | 318,0 320 | 319,1 321 | 320,1 322 | 321,1 323 | 322,1 324 | 323,1 325 | 324,0 326 | 325,1 327 | 326,0 328 | 327,0 329 | 328,0 330 | 329,0 331 | 330,0 332 | 331,0 333 | 332,0 334 | 333,1 335 | 334,1 336 | 335,1 337 | 336,1 338 | 337,1 339 | 338,0 340 | 339,1 341 | 340,0 342 | 341,0 343 | 342,1 344 | 343,1 345 | 344,0 346 | 345,1 347 | 346,1 348 | 347,1 349 | 348,0 350 | 349,0 351 | 350,1 352 | 351,0 353 | 352,1 354 | 353,1 355 | 354,1 356 | 355,0 357 | 356,0 358 | 357,1 359 | 358,0 360 | 359,0 361 | 360,0 362 | 361,0 363 | 362,1 364 | 363,0 365 | 364,0 366 | 365,1 367 | 366,1 368 | 367,0 369 | 368,0 370 | 369,0 371 | 370,1 372 | 371,1 373 | 372,1 374 | 373,1 375 | 374,0 376 | 375,1 377 | 376,0 378 | 377,1 379 | 378,1 380 | 379,0 381 | 380,1 382 | 381,0 383 | 382,1 384 | 383,0 385 | 384,1 386 | 385,0 387 | 386,1 388 | 387,1 389 | 388,1 390 | 389,1 391 | 390,0 392 | 391,1 393 | 392,0 394 | 393,1 395 | 394,0 396 | 395,1 397 | 396,0 398 | 397,1 399 | 398,0 400 | 399,0 401 | 400,0 402 | 401,1 403 | 402,0 404 | 403,0 405 | 404,1 406 | 405,0 407 | 406,0 408 | 407,1 409 | 408,1 410 | 409,0 411 | 410,0 412 | 411,0 413 | 412,0 414 | 413,1 415 | 414,0 416 | 415,0 417 | 416,0 418 | 417,0 419 | 418,1 420 | 419,0 421 | 420,0 422 | 421,1 423 | 422,0 424 | 423,1 425 | 424,1 426 | 425,1 427 | 426,1 428 | 427,0 429 | 428,0 430 | 429,1 431 | 430,1 432 | 431,0 433 | 432,0 434 | 433,0 435 | 434,0 436 | 435,0 437 | 436,0 438 | 437,0 439 | 438,1 440 | 439,0 441 | 440,1 442 | 441,0 443 | 442,1 444 | 443,0 445 | 444,0 446 | 445,0 447 | 446,1 448 | 447,0 449 | 448,1 450 | 449,1 451 | 450,0 452 | 451,1 453 | 452,1 454 | 453,0 455 | 454,1 456 | 455,1 457 | 456,0 458 | 457,0 459 | 458,1 460 | 459,1 461 | 460,1 462 | 461,0 463 | 462,0 464 | 463,0 465 | 464,0 466 | 465,1 467 | 466,1 468 | 467,1 469 | 468,0 470 | 469,1 471 | 470,1 472 | 471,1 473 | 472,1 474 | 473,1 475 | 474,1 476 | 475,1 477 | 476,1 478 | 477,0 479 | 478,0 480 | 479,0 481 | 480,0 482 | 481,0 483 | 482,0 484 | 483,0 485 | 484,0 486 | 485,1 487 | 486,0 488 | 487,0 489 | 488,0 490 | 489,1 491 | 490,0 492 | 491,1 493 | 492,1 494 | 493,0 495 | 494,1 496 | 495,1 497 | 496,0 498 | 497,0 499 | 498,0 500 | 499,0 501 | 500,1 502 | -------------------------------------------------------------------------------- /preamble.tex: -------------------------------------------------------------------------------- 1 | % Encoding Settings 2 | \usepackage[T1]{fontenc} 3 | \usepackage[utf8]{inputenc} 4 | 5 | 6 | 7 | % Font Settings 8 | \usepackage{times} 9 | 10 | % Define a new font called Tinyb. This font can maintain its shape even in very small fontsize: 11 | % \usepackage{lmodern} 12 | % \rmfamily 13 | % \DeclareFontShape{T1}{lmr}{bx}{sc}{<-> cmr10}{}% USE BOLD SCSHAPE NOT OTHERWISE DEFINED 14 | % %%% MATH FONT FIX 15 | % \DeclareFontFamily{OML}{zlmm}{} 16 | % \DeclareFontShape{OML}{zlmm}{m}{it}{<-> lmmi10}{} 17 | % \DeclareFontShape{OML}{zlmm}{b}{it}{<->ssub * zlmm/m/it}{} 18 | % \DeclareFontShape{OML}{zlmm}{bx}{it}{<->ssub * zlmm/m/it}{} 19 | % \DeclareMathVersion{Tinyb} 20 | % \SetSymbolFont{operators}{Tinyb}{T1}{lmr}{bx}{sc} 21 | % \SetSymbolFont{letters}{Tinyb}{OML}{zlmm}{m}{it} 22 | % \newenvironment{tinyb}{\bgroup\tiny\bfseries\scshape\mathversion{Tinyb}}{\egroup} 23 | 24 | 25 | 26 | % Paragraph Settings 27 | \usepackage{setspace} 28 | \onehalfspacing 29 | \usepackage{parskip} 30 | \setlength{\parindent}{0in} 31 | 32 | 33 | 34 | % Reference Settings 35 | \usepackage[ 36 | backend=biber, 37 | style=ieee, 38 | ]{biblatex} 39 | \addbibresource{References.bib} 40 | 41 | % Elegantly break long doi field 42 | % \setcounter{biburllcpenalty}{100} 43 | % \setcounter{biburlucpenalty}{100} 44 | % \setcounter{biburlnumpenalty}{100} 45 | 46 | 47 | 48 | % Figure Settings 49 | \usepackage{graphicx} % Required for inserting images 50 | \renewcommand{\figurename}{\textbf{Fig.}} 51 | 52 | \usepackage{float} 53 | 54 | % Enable two figures in one line: 55 | % \usepackage{subfig} 56 | % \begin{figure}[htbp!] 57 | % \centering 58 | % \subfloat[Before re-initialization]{\includegraphics[width=0.5\textwidth]{Figures/Before re-initialization.pdf}\label{fig:f1}} 59 | % \hfill 60 | % \subfloat[After re-initialization]{\includegraphics[width=0.5\textwidth]{Figures/After re-initialization.pdf}\label{fig:f2}} 61 | % \centering 62 | % \caption{Swarm distribution before and after reinitializing transitional particles on a one-dimensional problem} 63 | % \label{fig:before_and_after_reinitialization} 64 | % \end{figure} 65 | 66 | 67 | 68 | % Array Settings 69 | \usepackage{array} 70 | \newcolumntype{R}{>{$}r<{$}} % math-mode version of "r" column type 71 | \newcolumntype{C}{>{$}c<{$}} % math-mode version of "c" column type 72 | \newcolumntype{L}{>{$}l<{$}} % math-mode version of "l" column type 73 | 74 | % defines a new type of column called Y based on a X column (this column type is defined by the tabularx package and it is basically a p{ } column, where is calculated by the package) but typesets the content using \small font size and with ragged-right text. 75 | % \newcolumntype{Y}{>{\small\raggedright\arraybackslash}X} 76 | 77 | % Modify the space on the bottom and top of each cell: 78 | \usepackage{cellspace} 79 | % \addtolength{\cellspacetoplimit}{5pt} 80 | % \addtolength{\cellspacebottomlimit}{5pt} 81 | 82 | \usepackage{multirow} 83 | \usepackage{tabularx,booktabs} 84 | \usepackage{longtable} 85 | \usepackage{relsize} 86 | 87 | % \renewcommand{\thetable}{S\arabic{table}} % Rename the table names to Table Sx. 88 | 89 | % Equally spread columns to fulfill the whole table. 90 | % \begin{longtable}[c]{@{\extracolsep{\fill}}Lllllllll} 91 | 92 | % Define a horizontal line that only appears in specific columns: 93 | % \usepackage{hhline} 94 | % \hhline{~----~~} % Use as \hline, but the column with ~ will not have a horizontal line. 95 | 96 | 97 | 98 | % Hyper-reference Settings 99 | \usepackage{hyperref} 100 | \hypersetup{ 101 | colorlinks=true, 102 | linkcolor=cyan, 103 | citecolor=cyan, 104 | urlcolor=cyan, 105 | } 106 | % \usepackage[all]{hypcap} 107 | % \makeatletter 108 | % \AtBeginDocument{\def\@citecolor{cyan}} % Define citing 109 | % \AtBeginDocument{\def\@urlcolor{cyan}} 110 | % \AtBeginDocument{\def\@linkcolor{cyan}} 111 | % \makeatother 112 | 113 | % Make the brackets of equation citation blue: 114 | % \hyperref[eq:clpso_velocity]{(\ref*{eq:clpso_velocity})} 115 | 116 | 117 | 118 | % Mathematical Settings 119 | \usepackage{mathtools} 120 | \usepackage{amssymb,mathrsfs} % Typical maths resource packages 121 | \usepackage{amsthm} 122 | \usepackage{amsmath} 123 | \usepackage{nccmath} % To narrow parskip between two equations. \useshortskip 124 | 125 | 126 | 127 | % Equation Settings 128 | % \counterwithout{equation}{chapter} 129 | 130 | % To use the large bracket on one side of equation: 131 | % \begin{equation} 132 | % \label{eq:sum} 133 | % Sum = 134 | % \begin{cases} 135 | % Y_1 + Y_2 + Y_3, & \theta = 3 \\ 136 | % Y_1 + Y_2 + \cdots + Y_8, & \theta = 8 137 | % \end{cases} 138 | % \end{equation} 139 | 140 | 141 | 142 | % Algorithm Settings 143 | \usepackage{algorithmic} 144 | % \usepackage{algpseudocodex} 145 | \renewcommand{\algorithmicrequire}{\textbf{Input:}} 146 | \renewcommand{\algorithmicensure}{\textbf{Output:}} 147 | 148 | 149 | 150 | % Enumerate Settings 151 | % \usepackage{enumitem} 152 | % \begin{enumerate}[label={(\arabic*).}] 153 | % \item XXX 154 | % \item XXX 155 | % \end{enumerate} 156 | 157 | 158 | % Line Number Settings 159 | \usepackage{lineno} 160 | % \linenumbers % Uncomment this line to turn on the line number settings 161 | 162 | 163 | 164 | % Code Settings 165 | % \usepackage{courier} 166 | % \usepackage{minted} 167 | 168 | 169 | % Subfile Settings 170 | % \usepackage{subfiles} 171 | % \providecommand{\topdir}{.} 172 | % \addglobalbib{\topdir/References.bib} 173 | 174 | 175 | 176 | % Attach File Settings 177 | \usepackage{attachfile} 178 | % \attachfile[icon=Paperclip]{Test.pdf} 179 | 180 | 181 | 182 | % Additional Settings 183 | 184 | % Define a checkbox: 185 | % \newcommand{\checkbox}[1]{% 186 | % \ifnum#1=1 187 | % \makebox[0pt][l]{\raisebox{0.15ex}{\hspace{0.1em}$\checkmark$}}% 188 | % \fi 189 | % $\square$% 190 | % } -------------------------------------------------------------------------------- /ee6483.tex: -------------------------------------------------------------------------------- 1 | \documentclass[12pt]{article} 2 | \usepackage[a4paper,top=1pt,bottom=2pt,left=1pt,right=1pt,marginparwidth=1pt,headheight=1pt]{geometry} 3 | 4 | \input{preamble.tex} 5 | 6 | \usepackage{blindtext} 7 | \usepackage{multicol} 8 | \usepackage{color} 9 | \setlength{\columnsep}{0.2cm} 10 | \setlength{\columnseprule}{1pt} 11 | \def\columnseprulecolor{\color{black}} 12 | 13 | \usepackage{xpatch} 14 | \xpatchcmd{\NCC@ignorepar}{% 15 | \abovedisplayskip\abovedisplayshortskip} 16 | {% 17 | \abovedisplayskip\abovedisplayshortskip% 18 | \belowdisplayskip\belowdisplayshortskip} 19 | {}{} 20 | 21 | \setlength{\parindent}{0in} 22 | \setlength{\parskip}{0in} 23 | 24 | \setlength{\belowdisplayskip}{0pt} \setlength{\belowdisplayshortskip}{0pt} 25 | \setlength{\abovedisplayskip}{0pt} \setlength{\abovedisplayshortskip}{0pt} 26 | 27 | 28 | 29 | \usepackage{soul} 30 | \usepackage[dvipsnames]{xcolor} 31 | \newcommand{\bulletPoint}[1]{\ul{\textit{\textbf{#1}}}} 32 | 33 | 34 | % For better text align 35 | \usepackage{ragged2e} 36 | 37 | 38 | 39 | \begin{document} 40 | \singlespacing 41 | 42 | 43 | \begin{multicols*}{2} 44 | 45 | \scriptsize 46 | 47 | \raggedright 48 | 49 | \bulletPoint{Knowledge Representation}: 50 | 51 | Capturing knowledge in a way suitable for computer manipulation. 52 | 53 | – Predicate Calculus/Neural Network; 54 | – Graph / Tree 55 | 56 | % \bulletPoint{Search}: 57 | 58 | % Problem-solving techniques. systematically explores a space of problem states. 59 | 60 | % \bulletPoint{State Space Representation (SSR)}: 61 | 62 | % Define possible States; Transitions between these states based on Rules: observation/actions. SSR is used to model the different configurations a system can be in; and find the possible paths to reach a solution. 63 | 64 | 65 | \bulletPoint{Euler's Conclusion}: 66 | 67 | Unless a graph contained either exactly 0 or 2 nodes of odd degree, a walk over a graph in the manner described by the bridges of Konigsberg problem is impossible. 68 | 69 | %\includegraphics[scale=0.3]{euler.png} 70 | 71 | % \bulletPoint{State Space Search}: 72 | % A state space is represented by a four-tuple [N, A, S, GD],where: 73 | 74 | % – N is the set of nodes or states of the graph. These correspond to the states in a problem-solving process 75 | 76 | % – A is the set of arcs (or links) between nodes. These correspond to the steps in a problem-solving process 77 | 78 | % – S, a nonempty subset of N, contains the start state(s) of the problem 79 | 80 | % – GD, a nonempty subset of N, contains the goal state(s) or the problem. The states in GD are described using either: 81 | 82 | % • a measurable property of the states visited in the search 83 | 84 | % • a property of the path developed in the search 85 | 86 | 87 | % \bulletPoint{Strategies for State Space Search}: 88 | 89 | % A state space is represented by a four-tuple [N, A, S, GD],where: 90 | 91 | % – N is the set of nodes or states of the graph. These correspond to the states in a problem-solving process 92 | 93 | % – A is the set of arcs (or links) between nodes. These correspond to the steps in a problem-solving process 94 | 95 | % – S, a nonempty subset of N, contains the start state(s) of the problem 96 | 97 | % – GD, a nonempty subset of N, contains the goal state(s) or the problem. The states in GD are described using either: 98 | 99 | % • a measurable property of the states visited in the search 100 | 101 | % • a property of the path developed in the search 102 | 103 | \bulletPoint{Graph Terminologies}: 104 | 105 | – Node/Arch/Path/Tree; 106 | – Directed/Rooted Graphs; 107 | – Parent, Siblings/Ancestor/Descendant 108 | 109 | \bulletPoint{State Space Approach Examples}: 110 | 111 | – Tic-Tac-Toe/8-puzzle; 112 | – TSP: The number of possible ways to visit N cities, (N-1)! 113 | 114 | %\bulletPoint{Data-driven and Goal-driven}: 115 | 116 | %\includegraphics[scale=0.15]{data_goal.png} 117 | 118 | \bulletPoint{Backtracking}: 119 | 120 | – Depth-first search for CSPs; 121 | – Basic uninformed search for CSPs 122 | 123 | Notations: 124 | 125 | – CS = Current State (the state currently under consideration) 126 | 127 | – SL = State List (the list of states in the current path being pursued. If a 128 | goal is found, SL contains the ordered list of states on the solution path) 129 | 130 | – NSL = New State List (the list of new states contains nodes awaiting 131 | evaluation, i.e., nodes whose descendants have not yet been generated 132 | and searched) (Unprocessed states) 133 | 134 | – DE = Dead Ends (the list of states whose descendants have failed to 135 | contain a goal node. If these states are encountered again, they will be 136 | deleted as elements of DE and eliminated) 137 | 138 | – CS (Current State) is always equal to the state most recently added to SL 139 | and represents the "frontier" of the solution path currently being explored. 140 | 141 | \includegraphics[scale=0.31]{images/Backtracking.png} 142 | 143 | \bulletPoint{Breadth-first}: 144 | 145 | – open - states that have been generated but whose children have not been examined. Right in, left out; first-in-first-out. (FIFO) 146 | 147 | – closed - states that have already been examined. Add from the left. 148 | 149 | – Memory used: $B ^ n$ 150 | 151 | \includegraphics[scale=0.3]{images/breadth-first.png} 152 | 153 | \bulletPoint{Depth-first}: 154 | 155 | – open is maintained as a stack, or last-in-first- out (LIFO) structure. Open is similar to NSL in backtrack 156 | 157 | – closed‐ states that have already been examined. An union of DE and SL in backtrack 158 | 159 | – Memory used: $B * n$ 160 | 161 | \includegraphics[scale=0.3]{images/depth-first.png} 162 | 163 | \bulletPoint{Depth-First with Iterative Deepening}: 164 | 165 | Depth bound from 1, and increase by one each time. 166 | 167 | 168 | \bulletPoint{Uninformed}: 169 | 170 | BFS: $b^d$, $b^d$; DFS: $b^m$, $b*m$; IDS: $b^d$, $b*d. 171 | 172 | b- maximum branching factor of the search tree; d- depth of the least-cost solution; m- maximum depth of the state space (may be unlimited ) 173 | 174 | \bulletPoint{Informed}: 175 | Hill-Climbing, Best-First(Greedy), A* 176 | 177 | \bulletPoint{Heuristic Search}: 178 | 179 | – Hill-Climbing 180 | 181 | – Best-First-Search: 182 | 183 | \includegraphics[scale=0.3]{images/BFS.png} 184 | 185 | - Evaluation function f(n)=g(n)+h(n): 186 | 187 | g(n) = cost so far to reach n; 188 | h(n) = estimated cost to goal from n; 189 | f(n) = estimated total cost of path through n to goal. 190 | 191 | – When g(n)=0, Greedy Best-First; 192 | – A* search is optimal, when h(n) is admissible. h(n) is always under-estimated/same as the actual cost from n to a goal. 193 | 194 | \bulletPoint{Minimax}: 195 | 196 | – ALPHA-BETA pruning: Directly prune the whole right node. 197 | 198 | \includegraphics[scale=0.17]{images/alpha-beta.png} 199 | \includegraphics[scale=0.16]{images/beta-alpha.png} 200 | 201 | \bulletPoint{Association rule}: 202 | 203 | An \textit{association rule} is an implication of the form \( X \rightarrow Y \), where \( X \subseteq I \), \( Y \subseteq I \), and \( X \cap Y = \emptyset \), e.g., \(\{ \text{Diaper, Milk} \} \rightarrow \{ \text{Beer} \}\), 204 | $\text{Support}(X) = \frac{\sigma(X)}{|T|} = P(X) \quad \textit{Support of itemset } X: \textit{the Probability of } X$ 205 | $\text{Support}(X \rightarrow Y) = \frac{\sigma(X \cup Y)}{|T|} = P(X \cup Y)$ 206 | $\text{Confidence}(X \rightarrow Y) = \frac{\sigma(X \cup Y)}{\sigma(X)} = \frac{P(X \cup Y)}{P(X)} = P(Y \mid X)$ 207 | There are a total of $3^d - 2^{d + 1} + 1$ possible rules for a dataset 208 | containing $d$ items. $2^{d}-1$ item sets. 209 | 210 | \bulletPoint{The Apriori Principle}: 211 | 212 | \includegraphics[scale=0.3]{images/Apriori.png} 213 | 214 | \bulletPoint{The FP-Growth Algorithm}: 215 | 216 | \includegraphics[scale=0.3]{images/FP-Growth.png} 217 | 218 | \bulletPoint{Association Rule Generation}: 219 | 220 | if \(\frac{\sigma(Y)}{\sigma(X)} \geq \textit{minconf}\): 221 | $X\subset Y$, \( X \rightarrow Y -X\). If \( |Y| = k \), then there are \( 2^k - 2 \) candidate association rules (ignoring: \( Y \rightarrow \emptyset \) and \( \emptyset \rightarrow Y \)). 222 | 223 | Lift is a simple correlation measure between two item sets \(X\) and \(Y\), defined as 224 | 225 | $\text{Lift}(X, Y) = \frac{\text{Confidence}(X \rightarrow Y)}{\text{Support}(Y)} = \frac{P(X \cup Y)}{P(X)P(Y)} = \frac{P(Y \mid X)}{P(Y)}$ 226 | 227 | where 228 | $\text{Lift}(X, Y) = 229 | \begin{cases} 230 | 1, & \text{if } X \text{ and } Y \text{ are independent;} \\ 231 | >1, & \text{if } X \text{ and } Y \text{ are positively correlated;} \\ 232 | <1, & \text{if } X \text{ and } Y \text{ are negatively correlated.} 233 | \end{cases}$ 234 | 235 | \bulletPoint{Information Gain}: 236 | 237 | The amount of information in \( D \) with \( m \) distinct classes can be defined as: 238 | 239 | $\text{Info}(D) = - \sum_{i=1}^{m} p_i \log_2(p_i)$ 240 | 241 | If attribute \(A\) is used to split \(D\) into \(v\) subsets, \(\{D_1, D_2, \ldots, D_v\}\), the resulting information is 242 | 243 | $\text{Info}_A(D) = \sum_{j=1}^{v} \frac{|D_j|}{|D|} \times \text{Info}(D_j)$ 244 | 245 | Information gain is defined as the difference between the original information (before splitting) and the remaining information (after splitting \(D\) by \(A\)): 246 | 247 | $\text{Gain}(A) = \text{Info}(D) - \text{Info}_A(D)$ 248 | 249 | \includegraphics[scale=0.36]{images/Information_Gain.png} 250 | 251 | \quad 252 | 253 | $SplitInfo_A(D) = -\sum_{j=1}^{v} \frac{|D_j|}{|D|} \log_2\left(\frac{|D_j|}{|D|}\right)$ 254 | 255 | $GainRatio_A(D) = \frac{\text{Gain}(A)}{\text{SplitInfo}_A(D)}$ 256 | 257 | \bulletPoint{Gini Index}: 258 | 259 | $\text{Gini}(D) = 1 - \sum_{i=1}^{2} p_i^2 \quad \text{Gini}_A(D) = \frac{|D_1|}{|D|} \text{Gini}(D_1) + \frac{|D_2|}{|D|} \text{Gini}(D_2)$ 260 | 261 | $\Delta \text{Gini}(A) = \text{Gini}(D) - \text{Gini}_A(D)$ 262 | 263 | \bulletPoint{Evaluating Classifier Performance}: 264 | 265 | \includegraphics[scale=0.4]{images/TP.png} 266 | 267 | $\text{Accuracy} = \frac{TP + TN}{P + N} \quad \text{Error rate} = \frac{FP + FN}{P + N} = 1 - \text{Accuracy} 268 | 269 | $ \text{Sensitivity} = \frac{TP}{P} \quad \text{Specificity} = \frac{TN}{N} $ 270 | 271 | $ \text{Accuracy} = \text{Sensitivity} \times \left(\frac{P}{P+N}\right) + \text{Specificity} \times \left(\frac{N}{P+N}\right) $ 272 | 273 | $ \text{Precision} = \frac{TP}{TP + FP} = \frac{TP}{P'} $ 274 | 275 | $\text{Recall} = \frac{TP}{TP + FN} = \frac{TP}{P} = TPR \quad \frac{FP}{N} =FPR $ 276 | 277 | $ F = \frac{2 \times \text{Precision} \times \text{Recall}}{\text{Precision} + \text{Recall}} $ 278 | 279 | %\includegraphics[scale=0.25]{AUC.png} 280 | 281 | \bulletPoint{Dissimilarity and Similarity Measures}: 282 | 283 | 1. Minkowski distance: 284 | $d(\mathbf{x}, \mathbf{y}) = \left( \sum_{k=1}^n |x_k - y_k|^r \right)^{1/r}$ 285 | 286 | 2. Manhattan distance ($r = 1$): 287 | $d(\mathbf{x}, \mathbf{y}) = \sum_{k=1}^n |x_k - y_k|$ 288 | 289 | 3. Euclidean distance ($r = 2$): 290 | $d(\mathbf{x}, \mathbf{y}) = \sqrt{\sum_{k=1}^n (x_k - y_k)^2}$ 291 | 292 | 4. Cosine similarity: 293 | $ \cos(\mathbf{x}, \mathbf{y}) = \frac{\mathbf{x} \cdot \mathbf{y}}{\|\mathbf{x}\| \|\mathbf{y}\|} = \frac{\sum_{i=1}^n x_i y_i}{\sqrt{\sum_{i=1}^n x_i^2} \sqrt{\sum_{i=1}^n y_i^2}}$ 294 | 295 | 5. Infinity (Sup) Distance: 296 | $ d(\mathbf{x}, \mathbf{y}) = \max_{1 \leq j \leq d} |x_j - y_j| $ 297 | 298 | \bulletPoint{SVM}: 299 | 300 | $ y_i = \text{sign}(\mathbf{w} \cdot \mathbf{x}_i + b). \quad d = \frac{2}{\|\mathbf{w}\|}. $ 301 | 302 | $ \min_{\mathbf{w}} \frac{\|\mathbf{w}\|^2}{2} \quad \text{subject to} \quad y_i(\mathbf{w} \cdot \mathbf{x}_i + b) \geq 1, \; i = 1, 2, \dots, N. $ 303 | 304 | – Dual optimization problem 305 | 306 | $ \mathbf{w} = \sum_{i=1}^N \lambda_i y_i \mathbf{x}_i, \quad \sum_{i=1}^N \lambda_i y_i = 0. $ 307 | 308 | $ y_i(\mathbf{w} \cdot \mathbf{x}_i + b) - 1 = 0. $ 309 | 310 | 311 | \bulletPoint{Neural Network}: 312 | 313 | – Activation Functions: 314 | 315 | Linear: $ \sigma(x) = x \quad$ 316 | Sigmoid: $ \sigma(x) = \frac{1}{1 + e^{-ax}} $ \quad Tanh: $ \sigma(x) = \tanh(\gamma x) = \frac{e^{2\gamma x} - 1}{e^{2\gamma x} + 1} $ 317 | 318 | Sign: $ \sigma(x) = \text{sign}(x) = 319 | \begin{cases} 320 | +1, & x \geq 0 \\ 321 | -1, & x < 0 322 | \end{cases} $ \quad ReLU: $ \sigma(x) = 323 | \begin{cases} 324 | x, & x \geq 0 \\ 325 | 0, & x < 0 326 | \end{cases} = \max(0, x) $ 327 | 328 | Leaky ReLU: $ \sigma(x) = 329 | \begin{cases} 330 | x, & x \geq 0 \\ 331 | ax, & x < 0 332 | \end{cases} = \max(ax, x), \quad \text{where } a \ll 1 $ 333 | 334 | – Gradient Descent: 335 | $ \mathbf{w}' = \mathbf{w} - \eta \frac{\partial J(\mathbf{w})}{\partial \mathbf{w}} $ 336 | 337 | – Back propagation Algorithm: 338 | 339 | % $ E = \frac{1}{2} \sum_{k=1}^c (t_k - o_k)^2 = \frac{1}{2} \| \mathbf{t} - \mathbf{o} \|^2 \quad 340 | % \Delta w_{jk} = -\eta_w \frac{\partial E}{\partial w_{jk}} \quad 341 | % \Delta b_j = -\eta_b \frac{\partial E}{\partial b_j} $ 342 | 343 | \includegraphics[scale=0.38]{images/NN.png} 344 | 345 | % $ w_{kj} = w_{kj} + \Delta w_{kj}, \quad \text{where} \quad \Delta w_{kj} = \eta_w \delta_k o_j $ 346 | 347 | % $ w_{ji} = w_{ji} + \Delta w_{ji}, \quad \text{where} \quad \Delta w_{ji} = \eta_w \delta_j o_i $ 348 | 349 | % $ b_j = b_j + \Delta b_j, \quad \text{where} \quad \Delta b_j = \eta_b \delta_j $ 350 | 351 | % $\text{net}_j = \sum_i w_{ji} o_i + b_j \quad o_j = \sigma(\text{net}_j) $ 352 | 353 | $ \delta_k = \sigma'(\text{net}_k)(t_k - o_k), \quad \text{for output units} $ 354 | 355 | $ \delta_j = \sigma'(\text{net}_j) \sum_k \delta_k w_{kj}, \quad \text{for hidden units} $ 356 | 357 | \quad 358 | 359 | $ 360 | \frac{\partial E}{\partial w_{kj}} = \frac{\partial E}{\partial o_k} \cdot \frac{\partial o_k}{\partial \text{net}_k} \cdot \frac{\partial \text{net}_k}{\partial w_{kj}} 361 | = \frac{\partial E}{\partial o_k} \cdot \sigma'(\text{net}_k) \cdot \frac{\partial (\sum_j w_{kj} o_j + b_k)}{\partial w_{kj}} 362 | = -(t_k - o_k) \cdot \sigma'(\text{net}_k) \cdot o_j 363 | = -\delta_k \cdot o_j 364 | $ 365 | 366 | $ \frac{\partial E}{\partial w_{ji}} = \frac{\partial E}{\partial o_j} \cdot \frac{\partial o_j}{\partial \text{net}_j} \cdot \frac{\partial \text{net}_j}{\partial w_{ji}} 367 | = \frac{\partial E}{\partial o_j} \cdot \sigma'(\text{net}_j) \cdot \frac{\partial \left(\sum_i w_{ji} o_i + b_j\right)}{\partial w_{ji}} 368 | = -\left(\sum_k \delta_k w_{kj}\right) \cdot \sigma'(\text{net}_j) \cdot o_i 369 | = -\delta_j \cdot o_i $ 370 | 371 | \bulletPoint{CNN}: 372 | 373 | – Stride: steps per moving. – Zero padding : pads the input with zeros around the border. – Pooling: Max: max one within filter size; Average: average within filter size. 374 | 375 | – Regularization: $ J'(\mathbf{w}) = J(\mathbf{w}) + \alpha R(\mathbf{w}) $ 376 | 377 | L1 Regularization (LASSO): $ R(\mathbf{w}) = \|\mathbf{w}\|_1 = \sum_k |w_k| $ 378 | 379 | L2 Regularization (Ridge): $ R(\mathbf{w}) = \|\mathbf{w}\|_2^2 = \sum_k (w_k)^2 $ 380 | 381 | Elastic Net Regularization: $ R(\mathbf{w}) = \|\mathbf{w}\|_1 + \beta \|\mathbf{w}\|_2^2 $ 382 | 383 | Also can be done by early stopping. 384 | 385 | \bulletPoint{Clustering}: 386 | 387 | – Use Euclidean Distance: $d(\mathbf{x}, \mathbf{y}) = \sqrt{\sum_{k=1}^n (x_k - y_k)^2}$ 388 | 389 | – K-Means: 1. Initialize K random points as K clusters' centers. 2. Assign every point to its cluster by which center it nearest. 3. Calculate each clusters' average again to set as new center. 4. Repeat 2-3, until no points assignments. Initialization influence results. 390 | 391 | – HAC: 392 | --Single Linkage: the minimum distance between any pair of two data samples from each cluster. 393 | --Complete Linkage: the maximum distance between any pair of two data samples from each cluster. 394 | --Average Linkage: the average distance between all pairs of two data samples from each cluster. 395 | --Centroid Distance: the distance between the means of data samples (i.e., centroids) from each cluster. 396 | 397 | • What is the limitations of K-Means algorithm? 398 | Need to choose K. Can stuck at poor local minimum. Need good metric. 399 | • What are the limitations of HAC algorithm? 400 | Memory- and computationally-intensive. 401 | 402 | \bulletPoint{Regression}: 403 | 404 | $ f_{w,b}(\mathbf{x}) = \mathbf{w}^T \mathbf{x} + b $ 405 | 406 | Minimize the \( l_2 \) loss: $ \min_{\mathbf{w}, b} \hat{L}(f_{w,b}) = \min_{\mathbf{w}, b} \frac{1}{N} \sum_{i=1}^N \left( \mathbf{w}^T \mathbf{x}_i + b - y_i \right)^2 $ 407 | 408 | $ \text{Loss function: mean squared error between } \mathbf{w}^T \mathbf{x}_i + b \text{ and } y_i. $ 409 | 410 | 411 | \bulletPoint{Bias and Variance}: 412 | 413 | – Bias: Error caused by the wrong assumptions made in the learning algorithms or models. 414 | 415 | – Variance: Error due to the learning sensitivity to small fluctuations in the training set. 416 | 417 | \includegraphics[scale=0.27]{images/bias & variance.png} 418 | 419 | – Underfitting: High bias and low variance. 420 | 421 | – Overfitting: Low bias and high variance. 422 | 423 | \includegraphics[scale=0.235]{images/over-under-fitting.png} 424 | 425 | 426 | \bulletPoint{PCA}: 427 | 428 | \includegraphics[scale=0.35]{images/PCA1.png} 429 | \includegraphics[scale=0.26]{images/PCA2.png} 430 | 431 | 432 | \bulletPoint{Bayes’ Theorem}: 433 | 434 | $ P(A \mid B) = \frac{P(B \mid A) P(A)}{P(B)} $ 435 | 436 | $ P(B) = \sum_i P(B \mid A_i) P(A_i) $ 437 | 438 | \bulletPoint{Naïve Bayes}: 439 | 440 | $ P(a_1, \dots, a_d \mid v_j) = P(a_1 \mid v_j) \cdots P(a_d \mid v_j) = \prod_{i=1}^d P(a_i \mid v_j) $ 441 | 442 | 443 | \end{multicols*} 444 | 445 | \end{document} -------------------------------------------------------------------------------- /ee6497.tex: -------------------------------------------------------------------------------- 1 | \documentclass[10pt]{article} 2 | \usepackage[a4paper,top=1pt,bottom=2pt,left=1pt,right=1pt,marginparwidth=1pt,headheight=1pt]{geometry} 3 | \input{preamble.tex} 4 | \usepackage{blindtext} 5 | \usepackage{multicol} 6 | \usepackage{color} 7 | \usepackage{amsmath, amssymb,amsfonts, listings} 8 | \usepackage{bbm} 9 | \usepackage{listings} 10 | \usepackage{courier} 11 | \usepackage{enumitem} 12 | \setlength{\columnsep}{0.2cm} 13 | \setlength{\columnseprule}{1pt} 14 | \def\columnseprulecolor{\color{black}} 15 | \usepackage{xpatch} 16 | \xpatchcmd{\NCC@ignorepar}{% 17 | \abovedisplayskip\abovedisplayshortskip} 18 | { 19 | \abovedisplayskip\abovedisplayshortskip% 20 | \belowdisplayskip\belowdisplayshortskip} 21 | {}{} 22 | \setlength{\parindent}{0in} 23 | \setlength{\parskip}{0in} 24 | \setlength{\belowdisplayskip}{0pt} \setlength{\belowdisplayshortskip}{0pt} 25 | \setlength{\abovedisplayskip}{0pt} \setlength{\abovedisplayshortskip}{0pt} 26 | \usepackage{soul} 27 | \usepackage[dvipsnames]{xcolor} 28 | \newcommand{\bulletPoint}[1]{\ul{\textit{\textbf{#1}}}} 29 | % For better text align 30 | \usepackage{ragged2e} 31 | \begin{document} 32 | \singlespacing 33 | \begin{multicols*}{3} 34 | \scriptsize 35 | \raggedright 36 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 37 | % \section*{Introduction} 38 | 39 | % \includegraphics[scale=0.3]{images/breadth-first.png} 40 | 41 | % \bulletPoint{Famous Pdfs}:\quad 42 | 43 | % $\text{Bern}(x | \theta) = \theta^x (1 - \theta)^{1-x}=\theta^{\mathbbm{1}\{x=1\}} (1 - \theta)^{\mathbbm{1}\{x=0\}}$. 44 | % $\text{Unif}(x | a, b) = \frac{1}{b-a} \mathbbm{1}\{a \leq x \leq b\}$.\quad 45 | % $\text{Exp}(x | \lambda) = \lambda e^{-\lambda x} \mathbbm{1}\{x \geq 0\}$. 46 | % $\text{Bin}(x | n, \theta) = \binom{n}{x} \theta^x (1 - \theta)^{n-x}$ 47 | % where $\binom{n}{x} = \frac{n!}{x!(n-x)!}$. 48 | % $\text{Beta}(\theta | a, b) = \frac{\Gamma(a+b)}{\Gamma(a) \Gamma(b)} \theta^{a-1} (1-\theta)^{b-1}$. 49 | % $\Gamma(x) = \int_0^\infty t^{x-1} e^{-t} dt$ for $x \in \mathbb{R}$. 50 | 51 | \bulletPoint{Probability Basics}: 52 | 53 | \begin{itemize}[label=$\cdot$,leftmargin=0pt] 54 | \item$\mathbb{P}((X,Y) \in A) = \int_A p(x,y) \,dx\,dy$. \quad 55 | $p(x) = \int_{-\infty}^{\infty} p(x,y) \,dy$.\quad 56 | $p(y | x) = \frac{p(x,y)}{p(x)}$. 57 | \item$p(x,y) = p(x)p(y | x)$, 58 | If \(X\) and \(Y\) are independent, then \( p(x,y) = p(x)p(y) \). 59 | 60 | \item\textbf{Likelihood of i.i.d. dataset:} 61 | $p(\mathcal{D}) = \prod_{i=1}^{n} p(x_i)$ or $\log p(\mathcal{D}) = \sum_{i=1}^{n} \log p(x_i)$. 62 | where dataset \( \mathcal{D} = \{x_1, \dots, x_n\} \) is i.i.d. 63 | \item\textbf{Bayes' theorem:} 64 | $p(x | y) = \frac{p(x,y)}{p(y)} = \frac{p(y | x) p(x)}{p(y)}$. 65 | \item\textbf{Expectation:} % Expectation 66 | $\mathbb{E}[X] = \int_{-\infty}^{\infty} x p(x) \,dx, \quad \mathbb{E}[g(X)] = \int_{-\infty}^{\infty} g(x) p(x) \,dx$,\quad 67 | $\mathbb{E}[X + Y] = \mathbb{E}[X] + \mathbb{E}[Y]$,\quad$\mathbb{E}[X \mid Y = y] = \int_{-\infty}^{\infty} x p(x \mid y) \,dx$,\quad 68 | $\mathbb{E}[f(X) g(Y) \mid Y = y] = \mathbb{E}[f(X) \mid Y = y] g(y)$. 69 | \item\textbf{Variance and Covariance:} % Variance and Covariance 70 | $\text{var}(X) = \mathbb{E}[(X - \mathbb{E}[X])^2] = \mathbb{E}[X^2] - \mathbb{E}[X]^2$,\quad 71 | $\text{cov}(X,Y) = \mathbb{E}[(X - \mathbb{E}[X])(Y - \mathbb{E}[Y])] = \mathbb{E}[XY] - \mathbb{E}[X] \mathbb{E}[Y]$,\quad 72 | $\text{var}(X + Y) = \text{var}(X) + \text{var}(Y) + 2 \text{cov}(X,Y)$. 73 | \item \textbf{Vector Variables:} 74 | $ 75 | \mathbf{x} = 76 | \begin{bmatrix} 77 | x_1 & x_2 & \cdots & x_n 78 | \end{bmatrix}^{\top}, \quad 79 | \mathbb{E}[\mathbf{x}] = 80 | \begin{bmatrix} 81 | \mathbb{E}[x_1] & \mathbb{E}[x_2] & \cdots & \mathbb{E}[x_n] 82 | \end{bmatrix}^{\top} 83 | $\quad 84 | $\text{If } \mathbf{A} \text{ is a deterministic matrix, } \mathbb{E}[\mathbf{A} \mathbf{x}] = \mathbf{A} \mathbb{E}[\mathbf{x}].$\quad 85 | $\text{Covariance: } \text{cov}(\mathbf{x}) = \mathbf{\Sigma}_{xx} = \mathbb{E}[(\mathbf{x} - \mathbb{E}\mathbf{x})(\mathbf{x} - \mathbb{E}\mathbf{x})^\top],$\quad 86 | $\text{cov}(\mathbf{A} \mathbf{x}) = \mathbf{A} \, \text{cov}(\mathbf{x}) \mathbf{A}^\top$\quad 87 | $\text{Cross-covariance: } \text{cov}(\mathbf{x}, \mathbf{y}) = \mathbf{\Sigma}_{xy} = \mathbb{E}[(\mathbf{x} - \mathbb{E}\mathbf{x})(\mathbf{y} - \mathbb{E}\mathbf{y})^\top]$ 88 | \item \textbf{Matrix Calculus:} 89 | $\frac{\partial (\mathbf{a}^\top \mathbf{x})}{\partial \mathbf{x}} = \mathbf{a}, \quad 90 | \frac{\partial (\mathbf{x}^\top \mathbf{A} \mathbf{x})}{\partial \mathbf{x}} = (\mathbf{A} + \mathbf{A}^\top) \mathbf{x}$\quad 91 | $\frac{\partial (\mathbf{a}^\top \mathbf{X} \mathbf{b})}{\partial \mathbf{X}} = \mathbf{a} \mathbf{b}^\top$, \quad 92 | $\frac{\partial \det(\mathbf{X})}{\partial \mathbf{X}} = \det(\mathbf{X}) (\mathbf{X}^{-1})^\top$, \quad 93 | $\frac{\partial (\mathbf{a}^\top \mathbf{X}^{-1} \mathbf{b})}{\partial \mathbf{X}} = -(\mathbf{X}^{-1})^\top \mathbf{a} \mathbf{b}^\top (\mathbf{X}^{-1})^\top$ 94 | 95 | \end{itemize} 96 | % \textbf{Parametric vs. Non-Parametric Models} 97 | % \bulletPoint{Parametric vs. Non-Parametric Models} 98 | 99 | % \begin{itemize}[leftmargin=0pt] 100 | % \item • A parametric model assumes a fixed number of parameters. It usually belongs to a predefined family of distributions: 101 | % \( p(x,y) = p(x,y \mid \theta) \) or \( p(x) = p(x \mid \theta) \). \\ 102 | % 1. Faster to train (find “optimal” \( \theta \)). 2. Stronger assumptions about the data distribution. 103 | % \item • A non-parametric model the number of parameters grows with the amount of training data: 104 | % 1. More flexible. 105 | % 2. Can be computationally intractable for large datasets. 106 | % \end{itemize} 107 | 108 | \bulletPoint{Bayesian Inference:} \quad 109 | 110 | Given a parametric model, the posterior is derived as: $p(\theta \mid x) = \frac{p(x \mid \theta) p(\theta)}{p(x)} \propto p(x \mid \theta) p(\theta)$, 111 | where $p(x)$ is a normalization constant. 112 | If $p(x) \propto f(x)$ for some function $f(x)$, then $p(x) = c f(x)$ with 113 | $c = \left( \int f(x) dx \right)^{-1}$. 114 | 115 | \bulletPoint{Conjugate Distributions:} \quad 116 | 117 | If prior and posterior share the same form, they are conjugate: 118 | $p(\theta \mid x) \propto p(x \mid \theta) p(\theta)$. 119 | The prior $p(\theta)$ is called the conjugate prior of the likelihood $p(x \mid \theta)$.\\ 120 | 1. Allows for analytical closed form solutions and easy to interpret. 121 | 2. May lack flexibility to complex data, requiring MCMC. 122 | 123 | 124 | \bulletPoint{Conjugate Prior for Binomial:} \quad 125 | 126 | Given $s \sim \text{Bin}(n, \theta)$ and prior $\theta \sim \text{Beta}(a, b)$, the posterior is: 127 | $p(\theta \mid s) \propto \text{Bin}(s \mid \theta, n) \cdot \text{Beta}(\theta \mid a, b)$ 128 | $\propto \theta^s (1 - \theta)^{n-s} \cdot \theta^{a-1} (1 - \theta)^{b-1}$ 129 | $\propto \theta^{s+a-1} (1 - \theta)^{n-s+b-1}$. 130 | Thus, the posterior follows: 131 | $p(\theta \mid s) = \text{Beta}(\theta \mid s + a, n - s + b)$ 132 | 133 | \bulletPoint{Categorical Distribution:} \quad 134 | 135 | • A categorical variable $X$ follows: 136 | $\text{Cat} (x \mid \theta_1, \dots, \theta_K)$ with parameters $\theta_k \geq 0, \sum_{k=1}^{K} \theta_k = 1$; 137 | $\text{Cat} (x \mid \theta_1, \dots, \theta_K)=\theta_x$. 138 | 139 | • Given i.i.d. samples $X_i \sim \text{Cat}(\theta_1, \dots, \theta_K)$, the joint probability of $\mathcal{D} = \{X_1, \dots, X_n\}$ is: 140 | $p(\mathcal{D}) = \prod_{i=1}^{n} \text{Cat}(x_i \mid \theta_1, \dots, \theta_K) = \prod_{i=1}^{n} \prod_{k=1}^{K} \theta_k^{\mathbbm{1}\{x_i = k\}}$. 141 | Using count notation $N_k = \sum_{i=1}^{n} \mathbbm{1}\{x_i = k\}$, we get: 142 | $p(\mathcal{D}) = \prod_{k=1}^{K} \theta_k^{N_k}$. 143 | 144 | 145 | \bulletPoint{Gaussian (Normal) Distribution:} \quad 146 | 147 | A random variable $X$ follows a normal distribution: 148 | $\mathcal{N}(x \mid \mu, \sigma^2) = \frac{1}{\sqrt{2\pi\sigma^2}} e^{-\frac{1}{2\sigma^2} (x-\mu)^2}$. 149 | 150 | If $X \sim \mathcal{N}(\mu, \sigma^2)$, then: 151 | $aX \sim \mathcal{N}(a\mu, a^2\sigma^2)$ for any $a \in \mathbb{R}$ 152 | $X + c \sim \mathcal{N}(\mu + c, \sigma^2)$. 153 | If $Z \sim \mathcal{N}(0,1)$, then $X = \sigma Z + \mu \sim \mathcal{N}(\mu, \sigma^2)$. 154 | If $X \sim \mathcal{N}(\mu, \sigma^2)$ and $Y \sim \mathcal{N}(\xi, \nu^2)$ are independent: 155 | $X + Y \sim \mathcal{N}(\mu + \xi, \sigma^2 + \nu^2)$. 156 | 157 | 158 | \bulletPoint{Multivariate Gaussian Distribution:} \quad 159 | 160 | A random vector $\mathbf{X}$ follows a multivariate normal distribution: 161 | $\mathcal{N}(\mathbf{x} \mid \boldsymbol{\mu}, \boldsymbol{\Sigma}) = \frac{1}{(2\pi)^{K/2} (\det \boldsymbol{\Sigma})^{1/2}} \exp \left( -\frac{1}{2} (\mathbf{x} - \boldsymbol{\mu})^\top \boldsymbol{\Sigma}^{-1} (\mathbf{x} - \boldsymbol{\mu}) \right)$. 162 | 163 | • $\mathbf{X} = [X_1, \dots, X_n]^\top$ is \textit{jointly Gaussian} if for any vector $\mathbf{a} \in \mathbb{R}^n$, the linear combination: $\mathbf{a}^\top \mathbf{X} = \sum_{i=1}^{n} a_i X_i$ is Gaussian. 164 | 165 | • If $\mathbf{Z} \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$, then: $\mathbf{X} = \mathbf{A} \mathbf{Z} + \boldsymbol{\mu}$ 166 | is jointly Gaussian with mean and covariance: 167 | $\mathbb{E}[\mathbf{X}] = \boldsymbol{\mu}, \quad \text{cov}(\mathbf{X}) = \mathbf{A} \mathbf{A}^\top$. 168 | 169 | • $\boldsymbol{\mu} = 170 | \begin{bmatrix} \mu_X \\ \mu_Y \end{bmatrix}, \quad 171 | \boldsymbol{\Sigma} = 172 | \begin{bmatrix} \sigma_X^2 & \text{cov}(X,Y) \\ \text{cov}(X,Y) & \sigma_Y^2 \end{bmatrix}$.\quad $\rho_{X,Y} = \frac{\text{cov}(X,Y)}{\sigma_X \sigma_Y}$. 173 | 174 | \bulletPoint{Likelihood Functions for Common Distributions:}\quad 175 | 176 | \textbf{1. Bernoulli:} If $x_i \sim \text{Bern}(\theta)$ i.i.d., where $\theta \in [0,1]$, then: 177 | $p(\mathcal{D} \mid \theta) = \theta^{N_1} (1 - \theta)^{N_0}, \quad N_k = \sum_{i=1}^{n} \mathbbm{1}\{x_i = k\}$. 178 | 179 | \textbf{2. Exponential:} If $x_i \sim \text{Exp}(\lambda)$ i.i.d., where $\lambda > 0$, then: 180 | $p(\mathcal{D} \mid \lambda) = \lambda^n \exp \left( -\lambda \sum_{i=1}^{n} x_i \right)$. 181 | 182 | \textbf{3. Gaussian:} If $x_i \sim \mathcal{N}(\mu, \sigma^2)$ i.i.d., then $\theta = (\mu, \sigma^2)$ and: 183 | $p(\mathcal{D} \mid \theta) = \frac{1}{(2\pi)^{n/2} \sigma^n} \exp \left( -\frac{1}{2\sigma^2} \sum_{i=1}^{n} (x_i - \mu)^2 \right)$. 184 | 185 | 186 | \bulletPoint{Maximum Likelihood Estimation (MLE):}\quad 187 | 188 | The MLE of parameter $\theta$ is: $\theta_{\text{ML}} = \arg\max_{\theta} p(\mathcal{D} \mid \theta)$. 189 | For i.i.d. data $\mathcal{D} = \{x_1, \dots, x_n\}$, we maximize the log-likelihood: 190 | $\log p(\mathcal{D} \mid \theta) = \sum_{i=1}^{n} \log p(x_i \mid \theta)$. 191 | 192 | \textbf{Bernoulli MLE:} If $x_i \sim \text{Bern}(\theta)$, then: $p(\mathcal{D} \mid \theta) = \theta^{N_1} (1 - \theta)^{N_0}$, 193 | $\log p(\mathcal{D} \mid \theta) = N_1 \log \theta + N_0 \log (1 - \theta)$. Solving $\frac{\partial}{\partial \theta} \log p(\mathcal{D} \mid \theta) = 0$: 194 | $\theta_{\text{ML}} = \frac{N_1}{N_0 + N_1} = \frac{N_1}{n}$. 195 | 196 | \textbf{Exponential MLE:} If $x_i \sim \text{Exp}(\lambda)$, then: $\log p(\mathcal{D} \mid \lambda) = n \log \lambda - \lambda \sum_{i=1}^{n} x_i$. 197 | Solving $\frac{\partial}{\partial \lambda} \log p(\mathcal{D} \mid \lambda) = 0$: $\lambda_{\text{ML}} = \frac{n}{\sum_{i=1}^{n} x_i}$. 198 | 199 | 200 | \bulletPoint{Linear Regression Model:}\quad 201 | 202 | $\mathbf{x} = (x_1, x_2, \dots, x_D)$, 203 | $y = \mathbf{w}^\top \mathbf{x} + \epsilon, $ $ \epsilon \sim \mathcal{N}(0, \sigma^2)$.\\ 204 | Thus, the likelihood is: 205 | $p(y \mid \mathbf{x}, \mathbf{w}) = \mathcal{N}(y \mid \mathbf{w}^\top \mathbf{x}, \sigma^2)$. \\ 206 | \textbf{Basis Function Expansion:} model non-linear relationships using basis functions: 207 | $ 208 | \phi(\mathbf{x}) = 209 | \begin{bmatrix} 210 | \varphi_1(\mathbf{x}) & \varphi_2(\mathbf{x}) & \cdots & \varphi_M(\mathbf{x}) 211 | \end{bmatrix}^{\top} 212 | $ 213 | , \quad 214 | $y = \mathbf{w}^\top \phi(\mathbf{x}) + \epsilon$; 215 | For $\mathbf{x} = \begin{bmatrix} x_1 \\ x_2 \end{bmatrix}$, $\phi(\mathbf{x}) = [1, x_1, x_2, x_1^2, x_2^2]$, 216 | $y = w_1 + w_2 x_1 + w_3 x_2 + w_4 x_1^2 + w_5 x_2^2 + \epsilon$. \\ 217 | \textbf{MLE for $\mathbf{w}$:} Given i.i.d. training data $\mathcal{D} = \{(\mathbf{x}_i, y_i)\}$, 218 | $\log p(\mathbf{y} \mid \mathbf{\Phi}, \mathbf{w}) = -\frac{1}{2\sigma^2} \|\mathbf{\Phi} \mathbf{w} - \mathbf{y} \|^2 + \text{const}$, 219 | where $\mathbf{\Phi} = [\mathbf{x}_1 \dots \mathbf{x}_n]^\top$ is the design matrix. 220 | Maximizing w.r.t. $\mathbf{w}$ gives the least squares solution: 221 | $\mathbf{w}_{\text{ML}} = (\mathbf{\Phi}^\top \mathbf{\Phi})^{-1} \mathbf{\Phi}^\top \mathbf{y}$. \\ 222 | 223 | 224 | \bulletPoint{Model Evaluation Metrics:}\quad 225 | 226 | • $\text{RSS} = \sum_{i=1}^{n} (y_i - \hat{y}_i)^2$. \quad 227 | • $\text{RMSE} = \sqrt{\frac{1}{n} \text{RSS}}$. \\ 228 | • $R^2 = 1 - \frac{\text{RSS}}{\text{TSS}} = 1 - \frac{\text{RSS}}{\sum_{i=1}^{n} (y_i - \bar{y})^2}$, 229 | 230 | \bulletPoint{Maximum A Posteriori (MAP) Estimation:}\quad 231 | 232 | • The MAP estimate maximizes the posterior: 233 | $\theta_{\text{MAP}} = \arg\max_{\theta} p(\theta \mid \mathcal{D})$. Using Bayes' theorem: $p(\theta \mid \mathcal{D}) \propto p(\mathcal{D} \mid \theta) p(\theta)$, 234 | $\log p(\theta \mid \mathcal{D}) = \log p(\mathcal{D} \mid \theta) + \log p(\theta) + \text{const}$. 235 | 236 | • Given: 237 | $p(\mathcal{D} \mid \theta) = \theta^{N_1} (1 - \theta)^{N_0}, \quad p(\theta) = \text{Beta}(\theta \mid a, b)$, 238 | $\log p(\mathcal{D} \mid \theta) p(\theta) = (N_1 + a - 1) \log \theta + (N_0 + b - 1) \log(1 - \theta)$. 239 | Solving $\frac{\partial}{\partial \theta} \log p(\mathcal{D} \mid \theta) = 0$: 240 | $\theta_{\text{MAP}} = \frac{N_1 + a - 1}{n + a + b - 2}$. 241 | 242 | \bulletPoint{Classification and Naïve Bayes:}\quad 243 | 244 | • Classification Rule: Given feature vector $\mathbf{x}$ and class label $y \in \{1, \dots, K\}$, 245 | $\delta(\mathbf{x}) = k$ if $p(y = k \mid \mathbf{x})$ is maximized. \\ 246 | • Naïve Bayes Classifier: Assuming conditional independence, 247 | $p(\mathbf{x} \mid y = c, \theta) = \prod_{d=1}^{D} p(x_d \mid \theta_{dc})$. Using Bayes' rule: $p(y = c \mid \mathbf{x}, \theta) \propto \pi(c) \prod_{d=1}^{D} p(x_d \mid \theta_{dc})$, where $\pi(c)$ is the prior probability of class $c$. 248 | 249 | 250 | \bulletPoint{Mixture Models and Gaussian Mixture Model (GMM):}\quad 251 | 252 | • Suppose an observation $\mathbf{x}$ can be generated from one of $K$ possible probability density functions (pdfs): 253 | $p(\mathbf{x} \mid \boldsymbol{\eta}_1), \dots, p(\mathbf{x} \mid \boldsymbol{\eta}_K)$. 254 | The generating index $z$ follows a categorical distribution: $p(z) = \text{Cat}(z \mid \boldsymbol{\pi})$. 255 | Since $z$ is unobserved, it is a \textbf{latent variable}. The marginal distribution is: 256 | $p(\mathbf{x} \mid \boldsymbol{\theta}) = \sum_{k=1}^{K} \pi(k) p(\mathbf{x} \mid \boldsymbol{\eta}_k)$, 257 | where $\boldsymbol{\theta} = (\boldsymbol{\pi}, \{ \boldsymbol{\eta}_k \}_{k=1}^{K})$. 258 | 259 | • GMM: $p(\mathbf{x} \mid \boldsymbol{\theta}) = \sum_{k=1}^{K} \pi(k) \mathcal{N} (\mathbf{x} \mid \boldsymbol{\mu}_k, \boldsymbol{\Sigma}_k)$, 260 | where $\boldsymbol{\theta} = (\boldsymbol{\pi}, \{ \boldsymbol{\mu}_k, \boldsymbol{\Sigma}_k \}_{k=1}^{K})$. 261 | Given i.i.d. observations $\mathbf{x}_1, \mathbf{x}_2, \dots, \mathbf{x}_n$, $p(\mathbf{x}_i \mid \boldsymbol{\theta}) = \sum_{k=1}^{K} \pi(k) p(\mathbf{x}_i \mid \boldsymbol{\eta}_k)$.\\ 262 | $\log p(\mathbf{x}_1, \dots, \mathbf{x}_n \mid \boldsymbol{\theta}) = \sum_{i=1}^{n} \log \sum_{k=1}^{K} \pi(k) p(\mathbf{x}_i \mid \boldsymbol{\eta}_k)$. 263 | 264 | \bulletPoint{Algorithmic Issues in Mixture Models:}\quad 265 | 266 | • Singularity: 267 | If for some $k$, we set $\boldsymbol{\mu}_k = \mathbf{x}_i$ and $\sigma_k \to 0$, then: 268 | $\mathcal{N}(\mathbf{x}_i \mid \boldsymbol{\mu}_k, \sigma_k \mathbf{I}) \propto \frac{1}{\sigma_k} \to \infty$. 269 | 270 | • Unidentifiability: 271 | no unique global optimum for log-likelihood function. 272 | 273 | • Optimization Challenges: non-convex, hard to solve. 274 | 275 | • If latent variables $z_1, \dots, z_n$ are observed, the likelihood simplifies to: 276 | $\log p((\mathbf{x}_1, z_1), \dots, (\mathbf{x}_n, z_n) \mid \boldsymbol{\theta}) = \sum_{i=1}^{n} \big( \log \pi[z_i] + \log p(\mathbf{x}_i \mid \boldsymbol{\eta}_{z_i}) \big)$. 277 | much easier to maximize. 278 | 279 | 280 | \bulletPoint{Gaussian Mixture Model (GMM):}\quad 281 | 282 | • Observed data $\mathbf{x}_1, \dots, \mathbf{x}_n \in \mathbb{R}^{D}$ are generated from a mixture of $K$ Gaussian distributions: 283 | $p(\mathbf{x} \mid \boldsymbol{\theta}) = \sum_{k=1}^{K} \pi(k) \mathcal{N} (\mathbf{x} \mid \boldsymbol{\mu}_k, \boldsymbol{\Sigma}_k)$ 284 | $\boldsymbol{\theta} = (\pi(k), \boldsymbol{\mu}_k, \boldsymbol{\Sigma}_k)_{k=1}^{K}$. 285 | 286 | • Complete data likelihood: 287 | $\log p(\mathbf{y}_1, \dots, \mathbf{y}_n \mid \boldsymbol{\theta}) = \sum_{k=1}^{K} \sum_{i: z_i = k} \big( \log \pi(k) + \log \mathcal{N} (\mathbf{x}_i \mid \boldsymbol{\mu}_k, \boldsymbol{\Sigma}_k) \big).$. 288 | 289 | • The MLE estimates for $\boldsymbol{\mu}_k$ and $\boldsymbol{\Sigma}_k$: 290 | $ \hat{\boldsymbol{\mu}}_k = \frac{1}{n} \sum_{i: z_i = k} \mathbf{x}_i, $ 291 | $ \hat{\boldsymbol{\Sigma}}_k = \frac{1}{n} \sum_{i: z_i = k} (\mathbf{x}_i - \hat{\boldsymbol{\mu}}_k)(\mathbf{x}_i - \hat{\boldsymbol{\mu}}_k)^{\top} $. 292 | 293 | \bulletPoint{Expectation-Maximization (EM) Algorithm:}\quad 294 | 295 | Complete data $\mathbf{y}$ can't be known directly. Still since $\log p(\mathbf{x} \mid \boldsymbol{\theta})$ is hard to optimize. Try maximize the expectation $\mathbb{E}_{p(\mathbf{y} \mid \mathbf{x}, \hat{\boldsymbol{\theta}})} \left[ \log p(\mathbf{y} \mid \boldsymbol{\theta}) \mid \mathbf{x}, \hat{\boldsymbol{\theta}} \right]$. 296 | 297 | \textbf{EM Steps:} 1. Initialize $\boldsymbol{\theta}^{(0)}$. \quad 298 | 2. \textbf{E-step}: Compute $Q(\boldsymbol{\theta} \mid \boldsymbol{\theta}^{(m)}) = \mathbb{E}_{p(\mathbf{y} \mid \mathbf{x}, \boldsymbol{\theta}^{(m)})} \left[ \log p(\mathbf{y} \mid \boldsymbol{\theta}) \mid \mathbf{x}, \boldsymbol{\theta}^{(m)} \right] = \int \log p(\mathbf{y} \mid \boldsymbol{\theta}) p(\mathbf{y} \mid \mathbf{x}, \boldsymbol{\theta}^{(m)}) d\mathbf{y}$. \quad 299 | 300 | 3. \textbf{M-step}: Update $\boldsymbol{\theta}$ by $\boldsymbol{\theta}^{(m+1)} = \arg\max_{\boldsymbol{\theta} \in \Theta} Q(\boldsymbol{\theta} \mid \boldsymbol{\theta}^{(m)})$. 301 | 302 | 4. Repeat until convergence. 303 | 304 | \bulletPoint{EM for GMM:}\quad 305 | 306 | Given $\pi^{(0)}(k), \boldsymbol{\mu}_k^{(0)}, \boldsymbol{\Sigma}_k^{(0)}$ for $k=1,\dots,K$. 307 | 308 | $L^{(0)} = \frac{1}{n} \sum_{i=1}^{n} \log \left( \sum_{k=1}^{K} \pi^{(0)}(k) \mathcal{N} (\mathbf{x}_i \mid \boldsymbol{\mu}_k^{(0)}, \boldsymbol{\Sigma}_k^{(0)}) \right)$. 309 | \textbf{Repeat:} 310 | 311 | 1. \textbf{E-step:} Compute responsibilities: 312 | $r_{ik}^{(m)} = \frac{\pi^{(m)}(k) \mathcal{N} (\mathbf{x}_i \mid \boldsymbol{\mu}_k^{(m)}, \boldsymbol{\Sigma}_k^{(m)})}{\sum_{k'} \pi^{(m)}(k') \mathcal{N} (\mathbf{x}_i \mid \boldsymbol{\mu}_{k'}^{(m)}, \boldsymbol{\Sigma}_{k'}^{(m)})}$, 313 | $n_k^{(m)} = \sum_{i=1}^{n} r_{ik}$. 314 | 315 | 2. \textbf{M-step:} Update parameters: 316 | $\pi^{(m+1)}(k) = \frac{n_k^{(m)}}{n}$, \quad 317 | $\boldsymbol{\mu}_k^{(m+1)} = \frac{1}{n_k^{(m)}} \sum_{i=1}^{n} r_{ik}^{(m)} \mathbf{x}_i$, \quad 318 | $\boldsymbol{\Sigma}_k^{(m+1)} = \frac{1}{n_k^{(m)}} \sum_{i=1}^{n} r_{ik}^{(m)} (\mathbf{x}_i - \boldsymbol{\mu}_k^{(m+1)})(\mathbf{x}_i - \boldsymbol{\mu}_k^{(m+1)})^\top$. 319 | 320 | 3. \textbf{Compute:} 321 | $L^{(m+1)} = \frac{1}{n} \sum_{i=1}^{n} \log \left( \sum_{k=1}^{K} \pi^{(m+1)}(k) \mathcal{N} (\mathbf{x}_i \mid \boldsymbol{\mu}_k^{(m+1)}, \boldsymbol{\Sigma}_k^{(m+1)}) \right)$. 322 | 323 | 4. \textbf{untill:} $\lvert L^{(m+1)} - L^{(m)} \rvert \leq \epsilon$. 324 | 325 | 326 | \bulletPoint{K-Means Algorithm}\quad 327 | 328 | GMM with $\Sigma_k = \sigma^2 I$ and $\pi(k) = 1/K$ are fixed. Only $\mu_k$ are inferred. \quad 329 | 1. \textbf{E-step:} Assign each $x_i$ to its nearest cluster center: $k_i = \arg\min_{k} \|x_i - \mu_k^{(m)}\|^2$. Define hard assignment: $r_{ik}^{(m)} = 1$ if $k = k_i$, otherwise $r_{ik}^{(m)} = 0$. 330 | 331 | $Q(\theta \mid \theta^{(m)}) = -\frac{1}{2\sigma^2} \sum_{i=1}^{n} \|x_i - \mu_{k_i}^{(m)}\|^2 + \text{const}$. 332 | 333 | 2. \textbf{M-step:} Update cluster centers: $\mu_k^{(m+1)} = \frac{1}{N_k} \sum_{i:k_i=k} x_i$, where $N_k = \sum_{i=1}^{n} \mathbbm{1}\{ k_i = k \}$. 334 | 335 | 3. Update each cluster center using the mean of its assigned points. 336 | 337 | \bulletPoint{EM Algorithm for MAP Estimation}\quad 338 | 339 | Given data $\mathbf{x}$, the posterior is $p(\theta \mid \mathbf{x}) = \frac{p(\mathbf{x} \mid \theta) p(\theta)}{p(\mathbf{x})}$. The MAP estimate is $\theta_{MAP} = \arg\max_{\theta} \left( \log p(\mathbf{x} \mid \theta) + \log p(\theta) \right)$. 340 | 341 | \textbf{Algorithm:} 342 | 343 | 1. Pick initial guess $\theta^{(0)}$. 344 | 345 | 2. \textbf{E-step:} At iteration $m+1$, compute $Q(\theta \mid \theta^{(m)}) = \int p(\mathbf{y} \mid \mathbf{x}, \theta^{(m)}) \log p(\mathbf{y} \mid \theta) d\mathbf{y}$. 346 | 347 | 3. \textbf{M-step:} Update $\theta^{(m+1)} = \arg\max_{\theta} \left( Q(\theta \mid \theta^{(m)}) + \log p(\theta) \right)$. 348 | 349 | 4. Repeat until convergence. 350 | 351 | 352 | % \bulletPoint{Markov Chains}\quad 353 | 354 | % A discrete-time sequence $\mathbf{x} = \{x[0], x[1], \dots\}$, where each $x[t] \in \{1, 2, \dots, M\}$, satisfies the Markov property: 355 | % $p(x[t] \mid x[1], \dots, x[t-1]) = p(x[t] \mid x[t-1])$ 356 | 357 | \bulletPoint{Transition Matrix}\quad 358 | 359 | • Each row of $\mathbf{T}$ sums to one: $\sum_{j=1}^{M} T(i, j) = \sum_{j=1}^{M} p_{x[t] \mid x[t-1]}(j \mid i) = 1$. 360 | $\mathbf{T}$ is a (row) stochastic matrix. 361 | 362 | • Given initial distribution $\mathbf{p}_0 = [p_0(1), p_0(2), \dots, p_0(M)]$, $\mathbf{p}_1 = \mathbf{p}_0 \mathbf{T}$. For general $t$, let $\mathbf{p}_t = [p_t(1), \dots, p_t(M)]$, then: $\mathbf{p}_t = \mathbf{p}_0 \mathbf{T}^t$. 363 | 364 | 365 | \bulletPoint{MLE for Transition Matrix}\quad 366 | 367 | • Estimate prior $\pi$ and transition matrix $\mathbf{T}$ from training data: 368 | $p(x[0], \dots, x[t] \mid \pi, \mathbf{T}) = \pi(x[0]) \mathbf{T}(x[0], x[1]) \dots$ $\mathbf{T}(x[t-1], x[t])$. 369 | Given $n$ observed sequences $\mathcal{D} = \{\mathbf{x}_1[0:t_1], \dots, \mathbf{x}_n[0:t_n]\}$, each of varying length $t_i+1$, assume all data points follow the same $\mathbf{T}$. 370 | 371 | • $\log p(\mathcal{D} \mid \pi, \mathbf{T}) = \sum_{i=1}^{n} \log \pi(x_i[0]) + \sum_{i=1}^{n} \sum_{t=1}^{t_i} \log \mathbf{T}(x_i[t-1], x_i[t])$ 372 | $= \sum_{x=1}^{M} N_x \log \pi(x) + \sum_{x=1}^{M} \sum_{y=1}^{M} N_{xy} \log \mathbf{T}(x, y)$. 373 | 374 | $N_x = \sum_{i=1}^{n} \mathbbm{1}(x_i[0] = x)$, $N_{xy} = \sum_{i=1}^{n} \sum_{t=1}^{t_i} \mathbbm{1}(x_i[t-1] = x, x_i[t] = y)$. 375 | 376 | • $\hat{\pi}(x) = \frac{N_x}{n}$, $\hat{\mathbf{T}}(x, y) = \frac{N_{xy}}{\sum_{z=1}^{M} N_{xz}}$. 377 | 378 | • May predict certain strings are impossible if data have zero count of certain states (this is a form of overfitting). 379 | 380 | \bulletPoint{HMM: Hidden Markov Model}\quad 381 | 382 | A hidden Markov model consists of: 383 | 384 | • A discrete state Markov chain with \textbf{hidden states} or latent variables $z[t] \in \{1, \dots, M\}$, $t = 0,1,\dots$, with initial pdf $\pi$ and transition matrix $\mathbf{T}$. 385 | 386 | • An observation model with emission probabilities $p(\mathbf{x}[t] \mid z[t]) = p(\mathbf{x}[t] \mid \phi_{z[t]})$, where $\phi = (\phi_1, \dots, \phi_M)$. 387 | Applications: 388 | (1) Long-range dependencies; 389 | (2) Speech recognition; 390 | (3) Gene finding; 391 | (4) Emission probabilities (Gaussian example). 392 | 393 | \bulletPoint{Baum-Welch Algorithm for HMM Training}\quad 394 | 395 | 1. Initialize $\theta^{(0)}$. 396 | 397 | 2. \textbf{E step}: Use Forward-Backward Algorithm to compute 398 | $\gamma_{i,t}(z) = p(z_i[t] = z | \mathbf{x}_i[0:t_i], \theta^{(m)}) \propto \alpha_j(z) \beta_j(z)$. 399 | $\xi_{i,t}(z,z') = p(z_i[t-1] = z, z_i[t] = z' | \mathbf{x}_i[0:t_i], \theta^{(m)})$ 400 | $\quad \propto \alpha_{t-1}(z) p(\mathbf{x}_i[t] | z_i[t]=z') \beta_t(z') p(z_i[t] = z' | z_i[t-1] = z)$. 401 | 402 | 3. \textbf{M step}: Update parameters 403 | $\hat{\pi}(z) = \frac{\sum_{i=1}^{n} \gamma_{i,0}(z)}{n}$, 404 | $\hat{T}(z,z') = \frac{\sum_{i=1}^{n} \sum_{t=1}^{t_i} \xi_{i,t}(z,z')}{\sum_u \sum_{i=1}^{n} \sum_{t=1}^{t_i} \xi_{i,t}(z,u)}$, 405 | $\hat{\phi}_z$ = emission probability parameters. \quad 406 | 4. Repeat E and M steps. 407 | 408 | \bulletPoint{Inference in HMMs}\quad 409 | 410 | • Filtering: Estimate latent state $p(z[t] | \mathbf{x}[0:t])$ using observations up to time $t$ ({Forward Algorithm}). 411 | 412 | • Smoothing: Estimate $p(z[t] | \mathbf{x}[0:T])$, using both past and future observations ({Forward-Backward Algorithm}). 413 | 414 | • Fixed-lag smoothing: Estimate $p(z[t-l] | \mathbf{x}[0:t])$ for online inference ({Forward-Backward Algorithm}). 415 | 416 | • Prediction: Estimate $p(z[t+h] | \mathbf{x}[0:t])$, where $h > 0$ is the prediction horizon: 417 | $p(z[t+h] | \mathbf{x}[0:t]) = \sum_{z[t],...,z[t+h-1]} p(z[t+h] | z[t+h-1]) p(z[t+h-1] | z[t+h-2]) \cdots p(z[t+1] | z[t]) \cdot p(z[t] | \mathbf{x}[0:t])$. 418 | 419 | • MAP sequence: using Viterbi Algorithm, most probable sequence $\mathbf{z}^*[0:T] = \arg\max_{z[0:T]} p(z[0:T] | \mathbf{x}[0:T])$. 420 | 421 | 422 | \bulletPoint{Sampling Using Cdf}\quad 423 | 424 | If \( X \sim F \), then: 425 | $\mathbb{P}(X \leq x) = F(x)$, \quad 426 | Let \( U \sim \text{Unif}(0,1) \), define: 427 | $X = F^{-1}(U) 428 | \Rightarrow X \sim F$. 429 | 430 | $\mathbb{P}(F^{-1}(U) \leq x) = \mathbb{P}(U \leq F(x)) = F(x)$. 431 | 432 | \bulletPoint{Transformations}\quad 433 | 434 | If \( Y = f(X) \), then \( p_Y(y) = \sum_{k=1}^K \frac{p_X(x_k)}{|f'(x_k)|} \), where \( x_k \) are the solutions to \( f(x) = y \) 435 | \quad\textbf{Note:} Requires solving \( f(x) = y \) and knowing \( f'(x) \) 436 | 437 | 438 | \bulletPoint{Rejection Sampling}\quad 439 | 440 | • \( p(z) = \frac{1}{M} \tilde{p}(z) \), where \( M \) unknown. 441 | Choose proposal \( q(z) \), and constant \( k \geq \frac{\tilde{p}(z)}{q(z)} \) for all \( z \). 442 | 443 | • \( \text{supp}(p) \subseteq \text{supp}(q) \), where $\operatorname{supp} p = \overline{ \{ z : p(z) > 0 \} }$. 444 | 445 | • \textbf{Sampling Procedure:} 446 | 1. Sample \( z \sim q(z) \) \quad 447 | 2. Sample \( u \sim \text{Unif}[0, kq(z)] \) \quad 448 | 3. Accept \( z \) if \( u \leq \tilde{p}(z) \). 449 | 450 | • $\mathbb{P}(z\text{ accepted}) = \int \frac{\tilde{p}(z)}{kq(z)} q(z)\,dz = \frac{M}{k}$. \quad 451 | 452 | • Choose smallest possible \( k \) s.t. \( kq(z) \geq \tilde{p}(z) \ \forall z \). \quad 453 | 454 | • Accepted \( z \sim p(z) \) 455 | $\mathbb{P}(z \leq z_0 \mid \text{accepted}) = \frac{1}{M} \int_{z \leq z_0} \tilde{p}(z)\,dz$ 456 | → Accepted samples follow the CDF of \( p(z) \) 457 | 458 | \bulletPoint{Importance Sampling}\quad 459 | 460 | • Estimate expectation $\mathbb{E}_p[f(z)] = \int f(z)p(z)\,dz$, where $p(z)$ is hard to sample. 461 | 462 | • Use proposal $q(z)$ and importance weights $w(z) = \frac{p(z)}{q(z)}$, rewrite as 463 | $\mathbb{E}_p[f(z)] = \int f(z)w(z)q(z)\,dz \approx \frac{1}{n} \sum_{i=1}^n w(z_i)f(z_i)$, where $z_i \sim q(z)$. 464 | 465 | • $\text{supp}(f(\cdot)p(\cdot)) \subseteq \text{supp}(q)$. Keep all samples, no need $q(z) \geq p(z)$. Better matches well, if $q(z)$ is large where $|f(z)|p(z)$ is large. 466 | 467 | • If only know how to compute $p(z)$ and $q(z)$ up to normalizing constants, use normalized weights: 468 | 469 | $\mathbb{E}_p[f(z)] \approx \sum_{i=1}^n w_n(z_i)f(z_i)$, $w_n(z_i) = \frac{w(z_i)}{\sum_j w(z_j)}$. 470 | 471 | \bulletPoint{Sampling Importance Resampling (SIR)}\quad 472 | 473 | • Convert importance weighted samples into unweighted samples from $p(z)$. \quad 474 | • \textbf{Steps:} 475 | 476 | 1. Sample $z_1,\dots,z_n$ from $q(z)$. 477 | 478 | 2. Compute weights $w_n(z_1),\dots,w_n(z_n)$. 479 | 480 | 3. Resample with replacement from $\{z_1,\dots,z_n\}$ using weights $(w_n(z_1),\dots,w_n(z_n))$. 481 | 482 | • Each sample $\tilde{z}_i$ drawn from multinomial over $\{z_1,\dots,z_n\}$ with weights $w_n(z_i)$. 483 | asymptotically for large $n\to\infty$, 484 | $\mathbb{P}(\tilde{z} \leq a) = \sum_{i=1}^n w_n(z_i)\mathbbm{1}_{\{z_i \leq a\}} \to \int_{z \leq a} p(z)\,dz$. 485 | 486 | \bulletPoint{SIR for Bayesian Inference}\quad 487 | 488 | • Take unnormalized $\tilde{p}(\theta) = p(\mathcal{D} \mid \theta)p(\theta)$, sample $\theta_1,\dots,\theta_n$ from $q(\theta) = p(\theta)$. 489 | 490 | • $w_n(\theta_i) = \frac{\tilde{p}(\theta_i)/q(\theta_i)}{\sum_j \tilde{p}(\theta_j)/q(\theta_j)} = \frac{p(\mathcal{D} \mid \theta_i)}{\sum_j p(\mathcal{D} \mid \theta_j)}$. 491 | 492 | • Resampling $\theta_1,\dots,\theta_n$ according to weights $(w_n(\theta_1),\dots,w_n(\theta_n))$. 493 | 494 | \bulletPoint{Sampling for EM}\quad 495 | 496 | • Observed incomplete data $\mathbf{x}$, while complete is $\mathbf{(x,z)}$. 497 | Need to compute: 498 | $Q(\theta \mid \theta^{(m)}) = \int p(\mathbf{z} \mid \mathbf{x}, \theta^{(m)}) \log p(\mathbf{x}, \mathbf{z} \mid \theta)\,dz$. 499 | 500 | • $Q(\theta \mid \theta^{(m)}) \approx \frac{1}{n} \sum_{i=1}^n \log p(\mathbf{x}, \mathbf{z}_i \mid \theta)$, where $\mathbf{z}_i \sim p(\mathbf{z} \mid \mathbf{x}, \theta^{(m)})$. 501 | 502 | • Rejection and importance sampling not suitable for high-dimensional $\mathbf{z}$ → need MCMC methods 503 | 504 | \bulletPoint{Stationary Distribution}\quad 505 | 506 | Consider homogeneous Markov chain with transition probability $p(x_t = y \mid x_{t-1} = x) = \mathbf{T}(x, y)$. 507 | $\pi$ is a stationary distribution if $\sum_x \pi(x)\mathbf{T}(x, y) = \pi(y)$ for all states $y$. 508 | Also called invariant distribution — does not change over time in the chain. 509 | If $M$ states, $\mathbf{T}$ is an $M \times M$ matrix with $\pi \mathbf{T} = \pi$. 510 | 511 | \bulletPoint{Asymptotic Steady State}\quad 512 | 513 | Initial distribution: $\pi_0$. 514 | Markov evolution: $\pi_1 = \pi_0 \mathbf{T}, \ \pi_2 = \pi_1 \mathbf{T}, \ \dots, \ \pi_k = \pi_0 \mathbf{T}^k$. 515 | If the limit $\pi = \lim_{k \to \infty} \pi_k = \lim_{k \to \infty} \pi_0 \mathbf{T}^k$ exist,$\pi$ must satisfy $\pi \mathbf{T} = \pi$ → a stationary distribution 516 | 517 | 518 | \bulletPoint{Reversible Markov Chain (MC)}\quad 519 | 520 | • Sufficient condition but not necessary condition for $\pi$ to be stationary: $\pi(x)\mathbf{T}(x,y) = \pi(y)\mathbf{T}(y,x)$ for all $x, y$ 521 | → This chain is \textit{reversible}. 522 | Summing both sides over $x$: 523 | $\sum_x \pi(x)\mathbf{T}(x,y) = \sum_x \pi(y)\mathbf{T}(y,x) = \pi(y)\sum_x \mathbf{T}(y,x) = \pi(y)$. 524 | 525 | • Sampling from a distribution $\pi(x)$: 526 | Design transition $\mathbf{T}(x,y)$ such that 527 | (1) All the ergodicity conditions hold 528 | (2) Usually make $\mathbf{T}$ reversible and aperiodic 529 | (3) $\pi(x)$ is stationary distribution. 530 | 531 | • MCMC basis: 532 | Generate sample path for the Markov chain $z_0, z_1, \dots, z_n$ starting from any $z_0$. 533 | If $n$ large, then $p(z_n) \approx \pi(z_n)$ 534 | → obtain samples from $\pi$ 535 | 536 | 537 | \bulletPoint{Metropolis-Hastings Algorithm} \quad 538 | 539 | • Sample from $\pi(\mathbf{x})$ (e.g., $\mathcal{X} = \mathbb{R}^{1000}$). 540 | Assume we can compute unnormalized density $\tilde{\pi}(\mathbf{x})$. 541 | Choose transition probability $q(\mathbf{x}, \mathbf{y})$ that is irreducible, aperiodic, and easy to sample — this is the \textit{proposal distribution}. 542 | 543 | • Let $Z_0, Z_1, \dots$ be the chain states. At step $m$: 544 | 545 | 1. Let $\mathbf{x} = Z_{m-1}$. \quad 546 | 2. Sample $\mathbf{y} \sim q(\mathbf{x}, \cdot)$. 547 | 548 | 3. Accept $\mathbf{y}$ with probability 549 | $A(\mathbf{x}, \mathbf{y}) = \min\left(1, \frac{\tilde{\pi}(\mathbf{y})q(\mathbf{y}, \mathbf{x})}{\tilde{\pi}(\mathbf{x})q(\mathbf{x}, \mathbf{y})} \right)$. 550 | 551 | 4. If accepted, $Z_m = \mathbf{y}$; else $Z_m = Z_{m-1}$. 552 | 553 | • (1) Sequence $Z_0, Z_1, \dots$ is a Markov chain (only depends on $Z_{m-1}$) 554 | (2) Like rejection/importance sampling, does not need normalization constant of $\tilde{\pi}$ 555 | (3) Suitable for high-dimensional $\pi(\mathbf{x})$ since sampling is from simple $q(\mathbf{x}, \cdot)$ 556 | 557 | 558 | \bulletPoint{Proposal Distributions}\quad 559 | 560 | • MH chain $Z_0, Z_1, \dots$ is designed to converge to stationary distribution $\pi(\cdot)$. 561 | For large $m$, $Z_m \sim \pi$ approximately. 562 | 563 | • Burn-in period: discard first 1000 to 5000 samples 564 | 565 | • Choice of Proposal Distribution: 566 | 567 | 1. $q(\mathbf{x}, \mathbf{y}) = q(\mathbf{y} - \mathbf{x})$ — \textit{random walk MH}. 568 | 569 | (1) $\mathbf{y} - \mathbf{x} \sim \mathcal{N}(0, \Sigma)$ — Gaussian centered at $\mathbf{x}$. \quad 570 | 571 | (2) $\mathbf{y} - \mathbf{x} \sim \text{Unif}[-\delta, \delta]^d$ — Uniform around $\mathbf{x}$. \quad 572 | 573 | (3) If $q(\mathbf{x}, \mathbf{y}) = q(\mathbf{y}, \mathbf{x})$, then 574 | $A(\mathbf{x}, \mathbf{y}) = \min\left(1, \frac{\tilde{\pi}(\mathbf{y})}{\tilde{\pi}(\mathbf{x})}\right)$ known as the Metropolis Algorithm. 575 | 576 | (4) Variance affects — small $\Rightarrow$ slow,\quad high $\Rightarrow$ high rejection rates. 577 | 578 | 2. Independence Chain MH: 579 | $q(\mathbf{x}, \mathbf{y}) = q(\mathbf{y})$, i.e. next state is independent of current. \quad 580 | Works well if $q(\mathbf{y})$ closely approximates $\pi(\mathbf{y})$ and is heavy-tailed 581 | 582 | 3. Exploiting Structure of $\pi$: 583 | Suppose $\pi(\mathbf{x}) \propto \psi(\mathbf{x}) h(\mathbf{x})$, with $h(\mathbf{x})$ is a density 584 | that can be sampled from, bounded function $\psi(\mathbf{x})$. \quad 585 | Choose $q(\mathbf{x}, \mathbf{y}) = h(\mathbf{y})$, then: 586 | $A(\mathbf{x}, \mathbf{y}) = \min\left(1, \frac{\tilde{\pi}(\mathbf{y})q(\mathbf{y}, \mathbf{x})}{\tilde{\pi}(\mathbf{x})q(\mathbf{x}, \mathbf{y})} \right) 587 | = \min\left(1, \frac{\psi(\mathbf{y})h(\mathbf{y})h(\mathbf{x})}{\psi(\mathbf{x})h(\mathbf{x})h(\mathbf{y})} \right) 588 | = \min\left(1, \frac{\psi(\mathbf{y})}{\psi(\mathbf{x})} \right)$ 589 | 590 | 591 | \bulletPoint{Proposal Variance} \quad 592 | • Important to tune proposal variance $\sigma$. \quad 593 | 594 | • $\sigma$ too small → slow convergence, high acceptance rate, takes a long time to explore the whole space. 595 | 596 | • $\sigma$ too large → big steps, low acceptance rate, stuck for long time. 597 | 598 | • Rules of thumb: 599 | (1) Random walk MH: target acceptance rate of 0.25 to 0.5. 600 | (2) Independence chain MH: acceptance rate close to 1 601 | 602 | \bulletPoint{Burn-In} \quad 603 | Discard early samples before chain reaches stationary distribution. 604 | Hard to know when. E.g. $x_0 \sim \text{Unif}(\{0,1,\dots,20\})$, takes over 400 steps to "forget" initial state. 605 | 606 | \bulletPoint{Thinning}\quad 607 | Break dependencies between samples by taking every $d$-th sample. 608 | Useful when $\sigma$ is too large → MC stuck for long time at same location. 609 | Subsample every $d$ samples: $z_0, z_d, z_{2d}, \dots$ 610 | 611 | \bulletPoint{Gibbs Sampling}\quad 612 | 613 | Special case of Metropolis-Hastings, for multivariate $p(z_1, \dots, z_d)$ (hard to sample jointly when $d$ is large). 614 | For each $i$, define $\mathbf{z}_{-i} = \{z_1,\dots,z_{i-1},z_{i+1},\dots,z_d\}$. 615 | If we can compute full conditionals $p(z_i \mid \mathbf{z}_{-i})$, we can perform Gibbs sampling: 616 | 617 | • Initialize $(z_1^{(0)}, \dots, z_d^{(0)})$. 618 | 619 | • For each iteration $k$, sequentially sample: 620 | $z_1^{(k)} \sim p(\cdot \mid z_2^{(k-1)}, \dots, z_d^{(k-1)})$ 621 | $z_2^{(k)} \sim p(\cdot \mid z_1^{(k)}, z_3^{(k-1)}, \dots)$ 622 | ... 623 | $z_j^{(k)} \sim p(\cdot \mid z_1^{(k)}, \dots, z_{j-1}^{(k)}, z_{j+1}^{(k-1)}, \dots)$ 624 | ... 625 | $z_d^{(k)} \sim p(\cdot \mid z_1^{(k)}, \dots, z_{d-1}^{(k)})$. 626 | 627 | • Discard burn-in samples. 628 | 629 | • Generating Approximate i.i.d. Samples: 630 | (1) Run $r$ independent Gibbs chains of length $m$, use final sample from each sequence. 631 | (2) Run one long sequence, discard burn-in, take every $d$-th sample. 632 | 633 | • Getting conditionals distributions: 634 | $p(z_1 \mid z_2, \dots, z_d) = \frac{p(z_1, \dots, z_d)}{p(z_2, \dots, z_d)}$ 635 | (1) Start with joint $p(z_1, \dots, z_d)$. 636 | (2) Drop constants not depend on $z_1$. 637 | (3) Use knowledge of well-known distributions to find the distribution $p(z_1 \mid z_2, \dots, z_d)$. 638 | 639 | 640 | 641 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%below is wlb's part 642 | 643 | 644 | \bulletPoint{Introduction to NN and DL} \quad 645 | • An artificial neuron computes $y = f\left(\sum_{j=0}^{m} w_j x_j\right)$ with $x_0 = 1$, $w_0 = b$. \quad 646 | Common activations: sigmoid $f(x) = \frac{1}{1 + e^{-x}}$, ReLU $f(x) = \max(0, x)$. 647 | 648 | • Binary output: $1$ if $\mathbf{w}^T \mathbf{p} + b > 0$, else $0$. \quad 649 | Decision boundary: $\mathbf{w}^T \mathbf{p} + b = 0$. \quad +1 from negative to positive. 650 | 651 | • OR Gate Perceptron Design: 652 | Given $\mathbf{w} = [1, 1]^T$, point $\mathbf{p} = [0, 0.5]^T$ lies on the boundary. \quad 653 | Then $1 \cdot 0 + 1 \cdot 0.5 + b = 0 \Rightarrow b = -0.5$. 654 | 655 | • AND Gate Perceptron Design: 656 | Given $\mathbf{w} = [1, 1]^T$, point $\mathbf{p} = [0, 1.5]^T$ lies on the boundary. \quad 657 | Then $1 \cdot 0 + 1 \cdot 1.5 + b = 0 \Rightarrow b = -1.5$. 658 | 659 | • XOR and Multi-Layer Perceptron: 660 | Single-layer fails on XOR. Use hidden layer: \quad 661 | Neuron 1: $\mathbf{w}_1 = [1,1]^T$, $b = -0.5$; Neuron 2: $\mathbf{w}_2 = [-1,-1]^T$, $b = 1.5$. \quad 662 | Output neuron AND: $\mathbf{w}_{\text{out}} = [1,1]$, $b = -1.5$. 663 | 664 | • Forward Propagation: 665 | Given $W_1, W_2, W_3$, $b_1, b_2, b_3$, input $\mathbf{p}$, compute: \quad 666 | $\mathbf{h}_1 = \sigma(W_1 \mathbf{p} + b_1)$, $\mathbf{h}_2 = \sigma(W_2 \mathbf{h}_1 + b_2)$, $\hat{y} = \sigma(W_3 \mathbf{h}_2 + b_3)$. 667 | 668 | • Universal Approximation Theorem: 669 | (1) Approximating Arbitrary Decision Regions with 3-Layer MLP; Approximating a Function with 2-Layer MLP. 670 | (2) A hidden layer of nonconstant, bounded, and continuous neurons (with weights 671 | and biases), plus a linear output neuron (with weights but without bias) can 672 | approximate any continuous function to any arbitrary accuracy if there are 673 | enough hidden neurons. 674 | 675 | \bulletPoint{Feedforward Computation} \quad 676 | 677 | A multilayer NN maps input $x$ to output $y$ via: 678 | $y = f(x) = \sigma(W_L \sigma(W_{L-1} \dots \sigma(W_1 x + b_1) \dots + b_{L-1}) + b_L)$. \quad 679 | 680 | All layers use weight matrices $W_i$ and bias vectors $b_i$. 681 | 682 | \bulletPoint{NN terminology Basics} \quad 683 | 684 | • Classification: last layer = softmax, loss = cross-entropy. \quad 685 | 686 | • Regression: last layer = linear, loss = MSE. \quad 687 | 688 | • "2-layer NN(1-hidden-layer NN)" = input + 1 hidden + output. \quad 689 | 690 | • "3-layer NN(2-hidden-layer NN)" = input + 2 hidden + output. 691 | 692 | \bulletPoint{Activation Functions} \quad 693 | 694 | • Sigmoid: $\sigma(x) = \frac{1}{1 + e^{-x}}$, output in $[0,1]$, saturates. \quad 695 | 696 | • Tanh: $\tanh(x)=2\sigma(2x)-1$, output in $[-1,1]$, zero-centered. \quad 697 | 698 | • ReLU: $\max(0, x)$, avoids vanishing gradient, fast to compute. \quad 699 | 700 | • Leaky ReLU: $f(x) = \alpha x$ if $x < 0$, else $x$, avoids dead neurons. \quad 701 | 702 | • Linear: $f(x) = cx$, used in regression output layer. 703 | Also known as identity activation function. 704 | 705 | • Softmax: $\text{softmax}(z_i) = \frac{e^{z_i}}{\sum_j e^{z_j}}$, output is probability distribution.\quad 706 | • Maxout.\quad • Exponential Linear Unit (ELU). 707 | 708 | \bulletPoint{Data Preprocessing} \quad 709 | 710 | • Standardization: zero mean, unit variance. \quad 711 | $x' = \frac{x - \mu}{\sigma}$ \quad 712 | 713 | • Normalization: scale to $[0, 1]$ or $[-1, 1]$: $x' = \frac{x - \min(x)}{\max(x) - \min(x)}$ 714 | 715 | \bulletPoint{Loss Functions} \quad 716 | 717 | • Classification: Cross-entropy loss: 718 | $\mathcal{L}(\theta) = -\sum_{k=1}^K y_k \log \hat{y}_k$. 719 | 720 | • Regression: 721 | 722 | Mean squared error (MSE): $\mathcal{L}(\theta) = \frac{1}{n} \sum_i (y_i - \hat{y}_i)^2$. 723 | 724 | Mean absolute error (MAE): $\frac{1}{n} \sum_i |y_i - \hat{y}_i|$ 725 | 726 | \bulletPoint{Gradient Descent (GD)} \quad 727 | 728 | 1. \textbf{Initialization:} Randomly initialize the model parameters $\theta^0$. Set $\theta^{\text{old}} = \theta^0$. 729 | 730 | 2. Compute the gradient of the loss function at $\theta^{\text{old}}$: $\nabla \mathcal{L}(\theta^{\text{old}})$. 731 | 732 | 3. Update the parameters: 733 | $\theta^{\text{new}} = \theta^{\text{old}} - \alpha \nabla \mathcal{L}(\theta^{\text{old}})$ 734 | where $\alpha$ is the learning rate. 735 | 736 | 4. Set $\theta^{\text{old}} = \theta^{\text{new}}$ and return to step 2 (repeat until terminating). 737 | 738 | GD does not guarantee reaching a global minimum. 739 | 740 | • Backpropagation: Combines forward pass + backward pass using chain rule to compute gradients of loss w.r.t. weights. 741 | 742 | • Batch GD: compute gradients over full dataset. 743 | 744 | • Mini-batch GD: over subset (e.g., 32 to 256 samples). Mini-batch gradient approximates the full training set gradient well. 745 | 746 | • SGD: mini-batch size = 1, fast but noisy. Less used. 747 | 748 | \bulletPoint{GD with Momentum:} 749 | 750 | • Momentum: $v_t = \beta v_{t-1} + (1-\beta) \nabla \mathcal{L}$, improves convergence. \quad 751 | 752 | • Nesterov Momentum: lookahead at next step before computing gradient. (Skip) 753 | 754 | • Adam: computes a weighted average of past gradients and weighted average of past squared gradients. 755 | • Other: RMSprop, Adagrad, Adadelta, Nadam, etc. 756 | • Most used: Adam, SGD with momentum. 757 | 758 | \bulletPoint{Learning Rate (LR)} 759 | 760 | • Too small → slow convergence; Too large → divergence or oscillations. 761 | • LR scheduling: reduce $\alpha$ over time. 762 | Approaches: step decay, exponential/cosine decay, reduce by constant, warmup(increase then cool down). 763 | 764 | \bulletPoint{Underfitting and Overfitting} 765 | 766 | • Underfitting: high train + validation error (too simple). 767 | 768 | • Overfitting: low train error, high validation error (too complex). 769 | 770 | \bulletPoint{Regularization Techniques} \quad 771 | 772 | • $\ell_2$ regularization: $\mathcal{L}_{\text{reg}} = \mathcal{L}(\theta) + \lambda \sum_k \theta_k^2$, penalty for large. 773 | 774 | • $\ell_1$ regularization: $\sum_k |\theta_k|$, perform worse than $\ell_2$. \quad 775 | 776 | • Elastic net: mix of $\ell_1$ and $\ell_2$. \quad 777 | • Dropout: randomly deactivate neurons during training (e.g., $p=0.5$). \quad 778 | • Early stopping: monitor validation loss, stop if no improvement after $n$ epochs. 779 | 780 | \bulletPoint{Batch Normalization} \quad 781 | • Normalize each mini-batch: $\hat{x} = \frac{x - \mu}{\sigma}$ \quad 782 | 783 | • Less dependent on initialization. Normalizes activations. Reduces vanishing/exploding gradients. Reduces internal covariate shift. 784 | Stabilizes training, enables higher learning rates. Common in CNNs. 785 | 786 | \bulletPoint{Hyperparameter Tuning} \quad 787 | • layer size, lr(schedule), optimizer, regularization, batch size, activation, loss f. \quad 788 | 789 | • Methods: grid search(check all with step), random search, Bayesian optimization. Use k-fold cross-validation if data is limited. 790 | 791 | \bulletPoint{Why MLP Fails on Images} \quad 792 | MLPs are not translation invariant: shifting the input changes the output drastically. CNNs solve this with local receptive fields and shared weights. 793 | 794 | \bulletPoint{Convolution Operation} \quad 795 | • 1D: Convolution flips and slides a kernel $w$ over input $x$, 796 | $(x * w)[n] = \sum_{k=-\infty}^{\infty} x[k] w[n-k]$ 797 | 798 | $x = [1, 2, 3, 4]$, $w = [5, 6, 7]$→$z = [5, 16, 34, 52, 45, 28]$ 799 | 800 | • 2D: Apply 2D filter over 2D image by elementwise multiplication and sum. Edge detectors highlight directional features. 801 | 802 | \bulletPoint{Convolutional Neural Networks(CNNs)} \quad 803 | • Parameter sharing via convolution filters. 804 | • Sparse connections via local receptive fields. 805 | • Translation invariance. 806 | • Fewer parameters than MLPs: faster train. 807 | 808 | % CNNs use a convolutional operator for extracting data features. 809 | 810 | % • Allows parameter sharing, Efficient to train. 811 | 812 | % • Require fewer parameters than fully connected neural networks. 813 | 814 | % • CNNs are robust to spatial translations of objects in images. 815 | 816 | % • A convolutional filter slides across the image. 817 | 818 | \bulletPoint{Residual Networks (ResNets)} \quad 819 | • Use identity skip connections: output = layer(x) + x. \quad 820 | • Help prevent vanishing gradients. \quad 821 | 822 | • Enable very deep models (e.g., 18, 50, 152 layers). 823 | 824 | \bulletPoint{CNN Details} \quad 825 | • Each filter spans full input depth. \quad 826 | • Produces one activation map per filter. \quad 827 | • Multiple filters produce stacked output (e.g., 6 filters → output depth = 6). 828 | 829 | \bulletPoint{CNN Architecture} \quad 830 | Typical: [(CONV - ReLU)$\times N$ - POOL]$\times M$ - [FC - ReLU]$\times K$ - Softmax. \quad 831 | Modern trends: deep models (VGG, ResNet), small filters ($3\times3$), less pooling. 832 | 833 | 834 | \bulletPoint{Spatial Dimensions} \quad 835 | For input of size $W_1 \times H_1 \times C$, filter size $F$, stride $S$, padding $P$, number of filters $K$: 836 | 837 | it will produce an output of $W_2 \times H_2 \times K$ 838 | $W_2 = \frac{W_1 - F + 2P}{S} + 1$. Same for $H_2$. 839 | 840 | Number of parameters per filter: $F^2 C + 1$ (bias), \quad total: $K(F^2C + 1)$ for $K$ filters.\quad 841 | 842 | \textbf{Example Calculation:} 843 | Input: $32 \times 32 \times 3$, Filters: $10$ of size $5 \times 5$, stride $1$, padding $2$ \quad 844 | Output spatial size: $(32+2*2-5)/1 + 1 = 32$ → $32 \times 32 \times 10$ \quad 845 | Params: $5*5*3 + 1 = 76$ per filter → total $760$ parameters. 846 | 847 | \bulletPoint{Padding Strategy} \quad 848 | Zero-padding preserves spatial size. To preserve size: use padding $P = \frac{F - 1}{2}$ if $S = 1$ 849 | 850 | \bulletPoint{Pooling Layer} \quad 851 | • Reduces spatial size, parameters, overfitting. \quad 852 | 853 | • Max pooling: keep largest value in window. \quad 854 | • Avg pooling: keep mean value. \quad 855 | • Output size: $W_2 = \frac{W_1 - F}{S} + 1$, same for $H_2$. \quad 856 | 857 | • 0 learnable parameters. 858 | 859 | \bulletPoint{Flatten and FC Layer} \quad 860 | • Flatten: convert activation map to 1D before FC. \quad 861 | • FC: standard dense layer connects to all inputs. 862 | 863 | % \bulletPoint{Code} 864 | 865 | \lstset{ 866 | basicstyle=\tiny\ttfamily, 867 | % numbers=left, 868 | % numberstyle=\tiny\color{gray}, 869 | % numbersep=5pt, 870 | frame=single, 871 | breaklines=true, 872 | columns=flexible, 873 | keepspaces=true, 874 | showstringspaces=false, 875 | language=Python, 876 | aboveskip=1pt, 877 | belowskip=1pt, 878 | lineskip=0pt, 879 | xleftmargin=1em, 880 | framexleftmargin=1em, 881 | } 882 | 883 | 884 | \begin{lstlisting} 885 | import torch 886 | X = torch.tensor([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=torch.float) 887 | Y = torch.tensor([0, 1, 1, 0], dtype=int) 888 | network = torch.nn.Linear(2, 2) 889 | loss_fn = torch.nn.CrossEntropyLoss() 890 | optim = torch.optim.SGD(network.parameters(), lr=0.1) 891 | epochs = 50 892 | for epoch in range(epochs): 893 | for x, y in zip(X, Y): 894 | optim.zero_grad() 895 | y_pred = network(x) 896 | loss = loss_fn(y_pred, y) 897 | loss.backward() 898 | optimizer.step() 899 | ------------------------------------------------------------ 900 | import numpy as np 901 | class NeuralNetwork: 902 | def __init__(self, input_size, hidden_size, output_size, learning_rate, momentum=None): 903 | self.input_size = input_size 904 | self.hidden_size = hidden_size 905 | self.output_size = output_size 906 | self.learning_rate = learning_rate 907 | self.momentum = momentum 908 | self.weights_input_hidden = np.random.rand(input_size, hidden_size) 909 | self.bias_hidden = np.zeros((1, hidden_size)) 910 | self.weights_hidden_output = np.random.rand(hidden_size, output_size) 911 | self.bias_output = np.zeros((1, output_size)) 912 | if self.momentum: 913 | self.velocity_input_hidden = np.zeros((input_size, hidden_size)) 914 | self.velocity_hidden_output = np.zeros((hidden_size, output_size)) 915 | def sigmoid(self, x): 916 | return 1 / (1 + np.exp(-x)) 917 | def sigmoid_derivative(self, x): 918 | return x * (1 - x) 919 | def forward_pass(self, X): 920 | self.hidden_output = self.sigmoid(np.dot(X, self.weights_input_hidden) + self.bias_hidden) 921 | self.output = self.sigmoid(np.dot(self.hidden_output, self.weights_hidden_output) + self.bias_output) 922 | return self.output 923 | def backward_pass(self, X, y, output): 924 | error = y - output 925 | output_delta = error * self.sigmoid_derivative(output) 926 | error_hidden = output_delta.dot(self.weights_hidden_output.T) 927 | hidden_delta = error_hidden * self.sigmoid_derivative(self.hidden_output) 928 | if self.momentum: 929 | self.velocity_input_hidden = self.momentum * self.velocity_input_hidden + X.T.dot(hidden_delta) * self.learning_rate 930 | self.velocity_hidden_output = self.momentum * self.velocity_hidden_output + self.hidden_output.T.dot(output_delta) * self.learning_rate 931 | self.weights_input_hidden += self.velocity_input_hidden 932 | self.weights_hidden_output += self.velocity_hidden_output 933 | else: 934 | self.weights_hidden_output += self.hidden_output.T.dot(output_delta) * self.learning_rate 935 | self.weights_input_hidden += X.T.dot(hidden_delta) * self.learning_rate 936 | self.bias_output += np.sum(output_delta, axis=0, keepdims=True) * self.learning_rate 937 | self.bias_hidden += np.sum(hidden_delta, axis=0, keepdims=True) * self.learning_rate 938 | def train(self, X, y, epochs): 939 | for epoch in range(epochs): 940 | output = self.forward_pass(X) 941 | self.backward_pass(X, y, output) 942 | if epoch % 1000 == 0: 943 | loss = np.mean(np.square(y - output)) 944 | print(f'Epoch {epoch}, Loss: {loss:.4f}') 945 | \end{lstlisting} 946 | 947 | 948 | 949 | 950 | \end{multicols*} 951 | 952 | \end{document} -------------------------------------------------------------------------------- /dog&cat/scripts.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 3, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import torch\n", 11 | "import torch.nn as nn\n", 12 | "import torch.optim as optim\n", 13 | "import torchvision.transforms as transforms\n", 14 | "from torchvision.datasets import ImageFolder\n", 15 | "from torchvision import models\n", 16 | "from torch.utils.data import DataLoader, Subset\n", 17 | "from tqdm import tqdm\n", 18 | "from PIL import Image\n", 19 | "import numpy as np\n", 20 | "import matplotlib.pyplot as plt\n", 21 | "import pandas as pd \n", 22 | "import random\n", 23 | "from collections import Counter" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "metadata": {}, 29 | "source": [ 30 | "### Data processing" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 4, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# 训练集的数据增强与预处理\n", 40 | "train_transform = transforms.Compose([\n", 41 | " transforms.Resize(240), # 将短边调整到240\n", 42 | " transforms.CenterCrop(224), # 中心裁剪为224×224\n", 43 | " transforms.RandomHorizontalFlip(p=0.5), # 随机水平翻转\n", 44 | " transforms.ColorJitter(brightness=0.2, contrast=0.2), # 颜色抖动\n", 45 | " transforms.ToTensor(),\n", 46 | " transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) # 归一化\n", 47 | "])\n", 48 | "\n", 49 | "# 验证集与测试集的预处理\n", 50 | "test_transform = transforms.Compose([\n", 51 | " transforms.Resize(240),\n", 52 | " transforms.CenterCrop(224),\n", 53 | " transforms.ToTensor(),\n", 54 | " transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n", 55 | "])\n", 56 | "\n", 57 | "# 加载数据集\n", 58 | "train_data = ImageFolder(root=os.path.join(os.getcwd() + '/datasets/train'), transform=train_transform)\n", 59 | "val_data = ImageFolder(root=os.path.join(os.getcwd() + '/datasets/val'), transform=test_transform)\n" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 5, 65 | "metadata": {}, 66 | "outputs": [ 67 | { 68 | "name": "stdout", 69 | "output_type": "stream", 70 | "text": [ 71 | "Subset Label Counts: Counter({1: 2519, 0: 2481})\n" 72 | ] 73 | } 74 | ], 75 | "source": [ 76 | "# from collections import Counter\n", 77 | "# labels = [label for _, label in val_data.samples]\n", 78 | "# print(f\"Label Counts: {Counter(labels)}\")\n", 79 | "\n", 80 | "total_indices_train = list(range(len(train_data)))\n", 81 | "random.shuffle(total_indices_train)\n", 82 | "selected_indices_train = total_indices_train[:5000]\n", 83 | "\n", 84 | "total_indices_val = list(range(len(val_data)))\n", 85 | "random.shuffle(total_indices_val)\n", 86 | "selected_indices_val = total_indices_val[:500]\n", 87 | "\n", 88 | "# 训练集只取5000张, 验证集取500张\n", 89 | "train_subset = Subset(train_data, selected_indices_train)\n", 90 | "val_subset = Subset(val_data, selected_indices_val) \n", 91 | "\n", 92 | "subset_labels = [train_data.targets[i] for i in selected_indices_train]\n", 93 | "print(f\"Subset Label Counts: {Counter(subset_labels)}\")\n", 94 | "\n", 95 | "# 数据加载器\n", 96 | "# train_loader = DataLoader(train_subset, batch_size=32, shuffle=True)\n", 97 | "# val_loader = DataLoader(val_subset, batch_size=32, shuffle=False)\n", 98 | "train_loader = DataLoader(train_data, batch_size=32, shuffle=True)\n", 99 | "val_loader = DataLoader(val_data, batch_size=32, shuffle=False)\n" 100 | ] 101 | }, 102 | { 103 | "cell_type": "markdown", 104 | "metadata": {}, 105 | "source": [ 106 | "### Network" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": 7, 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "model = models.resnet50(pretrained=True)\n", 116 | "num_features = model.fc.in_features # 获取全连接层的输入特征数\n", 117 | "model.fc = nn.Linear(num_features, 1) # 二分类:1个输出节点" 118 | ] 119 | }, 120 | { 121 | "cell_type": "markdown", 122 | "metadata": {}, 123 | "source": [ 124 | "### Train" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": 8, 130 | "metadata": {}, 131 | "outputs": [ 132 | { 133 | "data": { 134 | "text/plain": [ 135 | "device(type='mps')" 136 | ] 137 | }, 138 | "execution_count": 8, 139 | "metadata": {}, 140 | "output_type": "execute_result" 141 | } 142 | ], 143 | "source": [ 144 | "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"mps\" if torch.backends.mps.is_available() else \"cpu\")\n", 145 | "device" 146 | ] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": 9, 151 | "metadata": {}, 152 | "outputs": [ 153 | { 154 | "name": "stderr", 155 | "output_type": "stream", 156 | "text": [ 157 | "Epoch 1/20: 76%|███████▌ | 474/625 [03:18<01:03, 2.39batch/s, Loss=0.285]\n" 158 | ] 159 | }, 160 | { 161 | "ename": "KeyboardInterrupt", 162 | "evalue": "", 163 | "output_type": "error", 164 | "traceback": [ 165 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 166 | "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", 167 | "Cell \u001b[0;32mIn[9], line 84\u001b[0m\n\u001b[1;32m 82\u001b[0m loss \u001b[38;5;241m=\u001b[39m criterion(outputs, labels)\n\u001b[1;32m 83\u001b[0m loss\u001b[38;5;241m.\u001b[39mbackward()\n\u001b[0;32m---> 84\u001b[0m \u001b[43moptimizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstep\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 86\u001b[0m running_loss \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m loss\u001b[38;5;241m.\u001b[39mitem()\n\u001b[1;32m 87\u001b[0m pbar\u001b[38;5;241m.\u001b[39mset_postfix({\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mLoss\u001b[39m\u001b[38;5;124m'\u001b[39m: running_loss \u001b[38;5;241m/\u001b[39m (pbar\u001b[38;5;241m.\u001b[39mn \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m)}) \u001b[38;5;66;03m# 进度条\u001b[39;00m\n", 168 | "File \u001b[0;32m~/miniforge3/envs/anygrasp/lib/python3.9/site-packages/torch/optim/optimizer.py:385\u001b[0m, in \u001b[0;36mOptimizer.profile_hook_step..wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 380\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 381\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[1;32m 382\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mfunc\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m must return None or a tuple of (new_args, new_kwargs), but got \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresult\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 383\u001b[0m )\n\u001b[0;32m--> 385\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 386\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_optimizer_step_code()\n\u001b[1;32m 388\u001b[0m \u001b[38;5;66;03m# call optimizer step post hooks\u001b[39;00m\n", 169 | "File \u001b[0;32m~/miniforge3/envs/anygrasp/lib/python3.9/site-packages/torch/optim/optimizer.py:76\u001b[0m, in \u001b[0;36m_use_grad_for_differentiable.._use_grad\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 74\u001b[0m torch\u001b[38;5;241m.\u001b[39mset_grad_enabled(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdefaults[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mdifferentiable\u001b[39m\u001b[38;5;124m'\u001b[39m])\n\u001b[1;32m 75\u001b[0m torch\u001b[38;5;241m.\u001b[39m_dynamo\u001b[38;5;241m.\u001b[39mgraph_break()\n\u001b[0;32m---> 76\u001b[0m ret \u001b[38;5;241m=\u001b[39m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 77\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m 78\u001b[0m torch\u001b[38;5;241m.\u001b[39m_dynamo\u001b[38;5;241m.\u001b[39mgraph_break()\n", 170 | "File \u001b[0;32m~/miniforge3/envs/anygrasp/lib/python3.9/site-packages/torch/optim/adam.py:166\u001b[0m, in \u001b[0;36mAdam.step\u001b[0;34m(self, closure)\u001b[0m\n\u001b[1;32m 155\u001b[0m beta1, beta2 \u001b[38;5;241m=\u001b[39m group[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mbetas\u001b[39m\u001b[38;5;124m'\u001b[39m]\n\u001b[1;32m 157\u001b[0m has_complex \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_init_group(\n\u001b[1;32m 158\u001b[0m group,\n\u001b[1;32m 159\u001b[0m params_with_grad,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 163\u001b[0m max_exp_avg_sqs,\n\u001b[1;32m 164\u001b[0m state_steps)\n\u001b[0;32m--> 166\u001b[0m \u001b[43madam\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 167\u001b[0m \u001b[43m \u001b[49m\u001b[43mparams_with_grad\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 168\u001b[0m \u001b[43m \u001b[49m\u001b[43mgrads\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 169\u001b[0m \u001b[43m \u001b[49m\u001b[43mexp_avgs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 170\u001b[0m \u001b[43m \u001b[49m\u001b[43mexp_avg_sqs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 171\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_exp_avg_sqs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 172\u001b[0m \u001b[43m \u001b[49m\u001b[43mstate_steps\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 173\u001b[0m \u001b[43m \u001b[49m\u001b[43mamsgrad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgroup\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mamsgrad\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 174\u001b[0m \u001b[43m \u001b[49m\u001b[43mhas_complex\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhas_complex\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 175\u001b[0m \u001b[43m \u001b[49m\u001b[43mbeta1\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbeta1\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 176\u001b[0m \u001b[43m \u001b[49m\u001b[43mbeta2\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbeta2\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 177\u001b[0m \u001b[43m \u001b[49m\u001b[43mlr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgroup\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mlr\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 178\u001b[0m \u001b[43m \u001b[49m\u001b[43mweight_decay\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgroup\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mweight_decay\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 179\u001b[0m \u001b[43m \u001b[49m\u001b[43meps\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgroup\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43meps\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 180\u001b[0m \u001b[43m \u001b[49m\u001b[43mmaximize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgroup\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mmaximize\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 181\u001b[0m \u001b[43m \u001b[49m\u001b[43mforeach\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgroup\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mforeach\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 182\u001b[0m \u001b[43m \u001b[49m\u001b[43mcapturable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgroup\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mcapturable\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 183\u001b[0m \u001b[43m \u001b[49m\u001b[43mdifferentiable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgroup\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mdifferentiable\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 184\u001b[0m \u001b[43m \u001b[49m\u001b[43mfused\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgroup\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mfused\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 185\u001b[0m \u001b[43m \u001b[49m\u001b[43mgrad_scale\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mgetattr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mgrad_scale\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 186\u001b[0m \u001b[43m \u001b[49m\u001b[43mfound_inf\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mgetattr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfound_inf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 187\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 189\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m loss\n", 171 | "File \u001b[0;32m~/miniforge3/envs/anygrasp/lib/python3.9/site-packages/torch/optim/adam.py:316\u001b[0m, in \u001b[0;36madam\u001b[0;34m(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, foreach, capturable, differentiable, fused, grad_scale, found_inf, has_complex, amsgrad, beta1, beta2, lr, weight_decay, eps, maximize)\u001b[0m\n\u001b[1;32m 313\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 314\u001b[0m func \u001b[38;5;241m=\u001b[39m _single_tensor_adam\n\u001b[0;32m--> 316\u001b[0m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43mparams\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 317\u001b[0m \u001b[43m \u001b[49m\u001b[43mgrads\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 318\u001b[0m \u001b[43m \u001b[49m\u001b[43mexp_avgs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 319\u001b[0m \u001b[43m \u001b[49m\u001b[43mexp_avg_sqs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 320\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_exp_avg_sqs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 321\u001b[0m \u001b[43m \u001b[49m\u001b[43mstate_steps\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 322\u001b[0m \u001b[43m \u001b[49m\u001b[43mamsgrad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mamsgrad\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 323\u001b[0m \u001b[43m \u001b[49m\u001b[43mhas_complex\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhas_complex\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 324\u001b[0m \u001b[43m \u001b[49m\u001b[43mbeta1\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbeta1\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 325\u001b[0m \u001b[43m \u001b[49m\u001b[43mbeta2\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbeta2\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 326\u001b[0m \u001b[43m \u001b[49m\u001b[43mlr\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlr\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 327\u001b[0m \u001b[43m \u001b[49m\u001b[43mweight_decay\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mweight_decay\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 328\u001b[0m \u001b[43m \u001b[49m\u001b[43meps\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43meps\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 329\u001b[0m \u001b[43m \u001b[49m\u001b[43mmaximize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmaximize\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 330\u001b[0m \u001b[43m \u001b[49m\u001b[43mcapturable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcapturable\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 331\u001b[0m \u001b[43m \u001b[49m\u001b[43mdifferentiable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdifferentiable\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 332\u001b[0m \u001b[43m \u001b[49m\u001b[43mgrad_scale\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgrad_scale\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 333\u001b[0m \u001b[43m \u001b[49m\u001b[43mfound_inf\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mfound_inf\u001b[49m\u001b[43m)\u001b[49m\n", 172 | "File \u001b[0;32m~/miniforge3/envs/anygrasp/lib/python3.9/site-packages/torch/optim/adam.py:439\u001b[0m, in \u001b[0;36m_single_tensor_adam\u001b[0;34m(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, grad_scale, found_inf, amsgrad, has_complex, beta1, beta2, lr, weight_decay, eps, maximize, capturable, differentiable)\u001b[0m\n\u001b[1;32m 437\u001b[0m denom \u001b[38;5;241m=\u001b[39m (max_exp_avg_sqs[i]\u001b[38;5;241m.\u001b[39msqrt() \u001b[38;5;241m/\u001b[39m bias_correction2_sqrt)\u001b[38;5;241m.\u001b[39madd_(eps)\n\u001b[1;32m 438\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 439\u001b[0m denom \u001b[38;5;241m=\u001b[39m (\u001b[43mexp_avg_sq\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msqrt\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mbias_correction2_sqrt\u001b[49m)\u001b[38;5;241m.\u001b[39madd_(eps)\n\u001b[1;32m 441\u001b[0m param\u001b[38;5;241m.\u001b[39maddcdiv_(exp_avg, denom, value\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m-\u001b[39mstep_size)\n\u001b[1;32m 443\u001b[0m \u001b[38;5;66;03m# Lastly, switch back to complex view\u001b[39;00m\n", 173 | "\u001b[0;31mKeyboardInterrupt\u001b[0m: " 174 | ] 175 | } 176 | ], 177 | "source": [ 178 | "model = model.to(device)\n", 179 | "\n", 180 | "criterion = nn.BCEWithLogitsLoss() # 更稳定的损失函数(无需手动添加 Sigmoid)\n", 181 | "optimizer = optim.Adam(model.parameters(), lr=0.001)\n", 182 | "\n", 183 | "train_losses = []\n", 184 | "\n", 185 | "num_epochs = 20\n", 186 | "\n", 187 | "def validate(model, val_loader):\n", 188 | " \"\"\"在验证集上评估模型性能\"\"\"\n", 189 | " model.eval() # 切换到评估模式\n", 190 | " correct = 0\n", 191 | " total = 0\n", 192 | " val_loss = 0.0 # 记录验证集上的损失\n", 193 | "\n", 194 | " # 使用 no_grad 避免计算梯度,提高评估速度\n", 195 | " with torch.no_grad():\n", 196 | " for inputs, labels in val_loader:\n", 197 | " inputs, labels = inputs.to(device), labels.float().unsqueeze(1).to(device)\n", 198 | "\n", 199 | " # 前向传播\n", 200 | " outputs = model(inputs)\n", 201 | "\n", 202 | " # 计算损失\n", 203 | " loss = criterion(outputs, labels)\n", 204 | " val_loss += loss.item()\n", 205 | "\n", 206 | " # 使用 Sigmoid 函数将输出转换为 [0, 1] 之间的概率\n", 207 | " predicted = (torch.sigmoid(outputs) > 0.5).float()\n", 208 | "\n", 209 | " # 计算准确率\n", 210 | " total += labels.size(0)\n", 211 | " correct += (predicted == labels).sum().item()\n", 212 | "\n", 213 | " # 打印验证损失和准确率\n", 214 | " avg_loss = val_loss / len(val_loader)\n", 215 | " accuracy = 100 * correct / total\n", 216 | " print(f'Validation Loss: {avg_loss:.4f}, Accuracy: {accuracy:.2f}%')\n", 217 | "\n", 218 | " # 返回平均损失(用于 Early Stopping 或学习率调度)\n", 219 | " return avg_loss\n", 220 | "\n", 221 | "class EarlyStopping:\n", 222 | " def __init__(self, patience=5, min_delta=0):\n", 223 | " \"\"\"\n", 224 | " 初始化 EarlyStopping\n", 225 | " :param patience: 等待多少个 epoch 后停止训练\n", 226 | " :param min_delta: 损失改善的最小值\n", 227 | " \"\"\"\n", 228 | " self.patience = patience\n", 229 | " self.min_delta = min_delta\n", 230 | " self.counter = 0\n", 231 | " self.best_loss = None\n", 232 | " self.early_stop = False\n", 233 | "\n", 234 | " def __call__(self, val_loss):\n", 235 | " if self.best_loss is None:\n", 236 | " self.best_loss = val_loss\n", 237 | " elif val_loss > self.best_loss - self.min_delta:\n", 238 | " self.counter += 1\n", 239 | " if self.counter >= self.patience:\n", 240 | " self.early_stop = True\n", 241 | " else:\n", 242 | " self.best_loss = val_loss\n", 243 | " self.counter = 0\n", 244 | "\n", 245 | "# 初始化 EarlyStopping\n", 246 | "early_stopping = EarlyStopping(patience=3, min_delta=0.001)\n", 247 | "\n", 248 | "for epoch in range(num_epochs):\n", 249 | " model.train()\n", 250 | " running_loss = 0.0\n", 251 | "\n", 252 | "\n", 253 | " with tqdm(total=len(train_loader), desc=f'Epoch {epoch + 1}/{num_epochs}', unit='batch') as pbar:\n", 254 | " for inputs, labels in train_loader:\n", 255 | " inputs, labels = inputs.to(device), labels.float().unsqueeze(1).to(device)\n", 256 | "\n", 257 | " optimizer.zero_grad()\n", 258 | " outputs = model(inputs)\n", 259 | " loss = criterion(outputs, labels)\n", 260 | " loss.backward()\n", 261 | " optimizer.step()\n", 262 | "\n", 263 | " running_loss += loss.item()\n", 264 | " pbar.set_postfix({'Loss': running_loss / (pbar.n + 1)}) # 进度条\n", 265 | " pbar.update(1)\n", 266 | "\n", 267 | "\n", 268 | " epoch_loss = running_loss / len(train_loader)\n", 269 | " train_losses.append(epoch_loss) # 保存每个 epoch 的损失\n", 270 | " print(f'Epoch {epoch + 1}, Loss: {running_loss / len(train_loader)}')\n", 271 | "\n", 272 | " # 在验证集上评估模型性能\n", 273 | " val_loss = validate(model, val_loader)\n", 274 | " early_stopping(val_loss)\n", 275 | " if early_stopping.early_stop:\n", 276 | " print(\"Early stopping\")\n", 277 | " break # 结束训练\n", 278 | "\n", 279 | "print(\"训练完成!\")\n" 280 | ] 281 | }, 282 | { 283 | "cell_type": "code", 284 | "execution_count": 68, 285 | "metadata": {}, 286 | "outputs": [ 287 | { 288 | "data": { 289 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAA1cAAAHWCAYAAACbsXOkAAAAP3RFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMS5wb3N0MSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8kixA/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABtGUlEQVR4nO3dfXzN9f/H8efZ2GbYyNjGlqGi5KIWooRvy0V95TIXKRddECWSQuWiVCj1VSnKt3KZxBepbynJJIlyUZKkvq7ZXBRzPc4+vz/ev50Zu97Z+Zyd87jfbue2z/mcz/nsdU6n2XPv9+f1dliWZQkAAAAAUCgBdhcAAAAAAL6AcAUAAAAAbkC4AgAAAAA3IFwBAAAAgBsQrgAAAADADQhXAAAAAOAGhCsAAAAAcAPCFQAAAAC4AeEKAAAAANyAcAUAKLTevXsrLi6uQM8dM2aMHA6HewsCAMAGhCsA8GEOhyNPt8TERLtLtUXv3r1VpkwZu8vIs0WLFqlNmzaKiIhQUFCQKleurC5duujrr7+2uzQAgCSHZVmW3UUAAIrG7NmzM92fOXOmli1bplmzZmXaf9tttykyMrLA3+fcuXNKS0tTcHBwvp97/vx5nT9/XiEhIQX+/gXVu3dvLViwQCdOnPD4984Py7J03333afr06bruuuvUuXNnRUVF6cCBA1q0aJHWr1+v1atXq0mTJnaXCgB+rYTdBQAAis4999yT6f7333+vZcuWXbL/YqdOnVJoaGiev0/JkiULVJ8klShRQiVK8M9RTl555RVNnz5dgwcP1quvvpppGuXTTz+tWbNmueU9tCxLZ86cUalSpQp9LgDwR0wLBAA/17x5c1177bVav369brnlFoWGhuqpp56SJH388ce64447VLlyZQUHB6tGjRoaO3asnE5npnNcfM3Vzp075XA4NHHiRL3zzjuqUaOGgoOD1aBBA/3www+ZnpvVNVcOh0OPPPKIFi9erGuvvVbBwcGqXbu2li5dekn9iYmJuuGGGxQSEqIaNWro7bffdvt1XPPnz1d8fLxKlSqliIgI3XPPPdq3b1+mY5KSktSnTx/FxMQoODhY0dHRateunXbu3Ok65scff1SrVq0UERGhUqVKqVq1arrvvvty/N6nT5/WuHHjVKtWLU2cODHL13XvvfeqYcOGkrK/hm369OlyOByZ6omLi9M///lPffHFF7rhhhtUqlQpvf3227r22mvVokWLS86RlpamKlWqqHPnzpn2TZo0SbVr11ZISIgiIyPVr18//f333zm+LgDwRfypEACgI0eOqE2bNurWrZvuuece1xTB6dOnq0yZMhoyZIjKlCmjr7/+WqNGjVJKSopefvnlXM/7wQcf6Pjx4+rXr58cDodeeukldezYUf/73/9yHe369ttvtXDhQg0YMEBly5bV66+/rk6dOmn37t2qUKGCJGnjxo1q3bq1oqOj9eyzz8rpdOq5555TxYoVC/+m/L/p06erT58+atCggcaNG6fk5GS99tprWr16tTZu3Khy5cpJkjp16qQtW7Zo4MCBiouL08GDB7Vs2TLt3r3bdb9ly5aqWLGihg8frnLlymnnzp1auHBhru/DX3/9pcGDByswMNBtryvdtm3b1L17d/Xr108PPvigatasqa5du2rMmDFKSkpSVFRUplr279+vbt26ufb169fP9R49+uij2rFjhyZPnqyNGzdq9erVhRrVBIBixwIA+I2HH37YuvhHf7NmzSxJ1tSpUy85/tSpU5fs69evnxUaGmqdOXPGta9Xr15W1apVXfd37NhhSbIqVKhg/fXXX679H3/8sSXJ+uSTT1z7Ro8efUlNkqygoCDrjz/+cO376aefLEnWG2+84drXtm1bKzQ01Nq3b59r3/bt260SJUpccs6s9OrVyypdunS2j6emplqVKlWyrr32Wuv06dOu/Z9++qklyRo1apRlWZb1999/W5Ksl19+OdtzLVq0yJJk/fDDD7nWdaHXXnvNkmQtWrQoT8dn9X5almW9//77liRrx44drn1Vq1a1JFlLly7NdOy2bdsuea8ty7IGDBhglSlTxvW5WLVqlSXJmjNnTqbjli5dmuV+APB1TAsEACg4OFh9+vS5ZP+F194cP35chw8fVtOmTXXq1Cn99ttvuZ63a9euKl++vOt+06ZNJUn/+9//cn1uQkKCatSo4bpft25dhYWFuZ7rdDr11VdfqX379qpcubLruCuuuEJt2rTJ9fx58eOPP+rgwYMaMGBApoYbd9xxh2rVqqX//ve/ksz7FBQUpMTExGynw6WPcH366ac6d+5cnmtISUmRJJUtW7aAryJn1apVU6tWrTLtu+qqq1S/fn3NmzfPtc/pdGrBggVq27at63Mxf/58hYeH67bbbtPhw4ddt/j4eJUpU0YrVqwokpoBwFsRrgAAqlKlioKCgi7Zv2XLFnXo0EHh4eEKCwtTxYoVXc0wjh07lut5L7/88kz304NWXq7Hufi56c9Pf+7Bgwd1+vRpXXHFFZccl9W+gti1a5ckqWbNmpc8VqtWLdfjwcHBmjBhgj7//HNFRkbqlltu0UsvvaSkpCTX8c2aNVOnTp307LPPKiIiQu3atdP777+vs2fP5lhDWFiYJBNui0K1atWy3N+1a1etXr3adW1ZYmKiDh48qK5du7qO2b59u44dO6ZKlSqpYsWKmW4nTpzQwYMHi6RmAPBWhCsAQJbd4Y4ePapmzZrpp59+0nPPPadPPvlEy5Yt04QJEySZRga5ye4aISsPq4AU5rl2GDx4sH7//XeNGzdOISEhGjlypK6++mpt3LhRkmnSsWDBAq1Zs0aPPPKI9u3bp/vuu0/x8fE5toKvVauWJGnz5s15qiO7Rh4XNyFJl11nwK5du8qyLM2fP1+S9NFHHyk8PFytW7d2HZOWlqZKlSpp2bJlWd6ee+65PNUMAL6CcAUAyFJiYqKOHDmi6dOna9CgQfrnP/+phISETNP87FSpUiWFhITojz/+uOSxrPYVRNWqVSWZpg8X27Ztm+vxdDVq1NDjjz+uL7/8Ur/88otSU1P1yiuvZDrmxhtv1AsvvKAff/xRc+bM0ZYtW/Thhx9mW8PNN9+s8uXLa+7cudkGpAul//c5evRopv3po2x5Va1aNTVs2FDz5s3T+fPntXDhQrVv3z7TWmY1atTQkSNHdNNNNykhIeGSW7169fL1PQGguCNcAQCylD5ydOFIUWpqqt566y27SsokMDBQCQkJWrx4sfbv3+/a/8cff+jzzz93y/e44YYbVKlSJU2dOjXT9L3PP/9cW7du1R133CHJrAt25syZTM+tUaOGypYt63re33//fcmoW/369SUpx6mBoaGhGjZsmLZu3aphw4ZlOXI3e/ZsrVu3zvV9Jembb75xPX7y5EnNmDEjry/bpWvXrvr+++/13nvv6fDhw5mmBEpSly5d5HQ6NXbs2Euee/78+UsCHgD4OlqxAwCy1KRJE5UvX169evXSo48+KofDoVmzZnnVtLwxY8boyy+/1E033aT+/fvL6XRq8uTJuvbaa7Vp06Y8nePcuXN6/vnnL9l/2WWXacCAAZowYYL69OmjZs2aqXv37q5W7HFxcXrsscckSb///rtuvfVWdenSRddcc41KlCihRYsWKTk52dW2fMaMGXrrrbfUoUMH1ahRQ8ePH9e0adMUFham22+/Pccan3jiCW3ZskWvvPKKVqxYoc6dOysqKkpJSUlavHix1q1bp++++06S1LJlS11++eW6//779cQTTygwMFDvvfeeKlasqN27d+fj3TXhaejQoRo6dKguu+wyJSQkZHq8WbNm6tevn8aNG6dNmzapZcuWKlmypLZv36758+frtddey7QmFgD4OsIVACBLFSpU0KeffqrHH39czzzzjMqXL6977rlHt9566yXd5ewSHx+vzz//XEOHDtXIkSMVGxur5557Tlu3bs1TN0PJjMaNHDnykv01atTQgAED1Lt3b4WGhmr8+PEaNmyYSpcurQ4dOmjChAmuDoCxsbHq3r27li9frlmzZqlEiRKqVauWPvroI3Xq1EmSCSLr1q3Thx9+qOTkZIWHh6thw4aaM2dOtk0l0gUEBGjmzJlq166d3nnnHU2cOFEpKSmqWLGiq3lG48aNJUklS5bUokWLNGDAAI0cOVJRUVEaPHiwypcvn2VHyJzExMSoSZMmWr16tR544IEs16yaOnWq4uPj9fbbb+upp55SiRIlFBcXp3vuuUc33XRTvr4fABR3Dsub/gQJAIAbtG/fXlu2bNH27dvtLgUA4Ee45goAUKydPn060/3t27frs88+U/Pmze0pCADgtxi5AgAUa9HR0erdu7eqV6+uXbt2acqUKTp79qw2btyoK6+80u7yAAB+hGuuAADFWuvWrTV37lwlJSUpODhYjRs31osvvkiwAgB4HCNXAAAAAOAGXHMFAAAAAG5AuAIAAAAAN+CaqyykpaVp//79Klu2rBwOh93lAAAAALCJZVk6fvy4KleurICAnMemCFdZ2L9/v2JjY+0uAwAAAICX2LNnj2JiYnI8hnCVhbJly0oyb2BYWJjN1QAAAACwS0pKimJjY10ZISeEqyykTwUMCwsjXAEAAADI0+VCNLQAAAAAADcgXAEAAACAGxCuAAAAAMANuOYKAAAAPsuyLJ0/f15Op9PuUuClAgMDVaJECbcswUS4AgAAgE9KTU3VgQMHdOrUKbtLgZcLDQ1VdHS0goKCCnUewhUAAAB8Tlpamnbs2KHAwEBVrlxZQUFBbhmZgG+xLEupqak6dOiQduzYoSuvvDLXhYJzQrgCAACAz0lNTVVaWppiY2MVGhpqdznwYqVKlVLJkiW1a9cupaamKiQkpMDnoqEFAAAAfFZhRiHgP9z1OeHTBgAAAABuwLRAL+Z0SqtWSQcOSNHRUtOmUmCg3VUBAAAAyAojV15q4UIpLk5q0UK6+27zNS7O7AcAAIDnOJ1SYqI0d675Why7usfFxWnSpEl5Pj4xMVEOh0NHjx4tspp8EeHKCy1cKHXuLO3dm3n/vn1mPwELAADAMzz9B2+Hw5HjbcyYMQU67w8//KC+ffvm+fgmTZrowIEDCg8PL9D3yytfC3FMC/QyTqc0aJBkWZc+ZlmSwyENHiy1a8cUQQAAgKKU/gfvi38vS/+D94IFUseO7v2eBw4ccG3PmzdPo0aN0rZt21z7ypQp49q2LEtOp1MlSuT+K33FihXzVUdQUJCioqLy9RwwcuV1Vq26dMTqQpYl7dljjgMAAEDeWZZ08mTebikp0qOPZv8Hb8n8QTwlJW/ny+o8WYmKinLdwsPD5XA4XPd/++03lS1bVp9//rni4+MVHBysb7/9Vn/++afatWunyMhIlSlTRg0aNNBXX32V6bwXTwt0OBz697//rQ4dOig0NFRXXnmllixZ4nr84hGl6dOnq1y5cvriiy909dVXq0yZMmrdunWmMHj+/Hk9+uijKleunCpUqKBhw4apV69eat++fd5efBb+/vtv9ezZU+XLl1doaKjatGmj7du3ux7ftWuX2rZtq/Lly6t06dKqXbu2PvvsM9dze/TooYoVK6pUqVK68sor9f777xe4lrwgXHmZCz6fbjkOAAAAxqlTUpkyebuFh5sRquxYlvmDeHh43s536pT7Xsfw4cM1fvx4bd26VXXr1tWJEyd0++23a/ny5dq4caNat26ttm3bavfu3Tme59lnn1WXLl30888/6/bbb1ePHj30119/ZXv8qVOnNHHiRM2aNUvffPONdu/eraFDh7oenzBhgubMmaP3339fq1evVkpKihYvXlyo19q7d2/9+OOPWrJkidasWSPLsnT77bfr3LlzkqSHH35YZ8+e1TfffKPNmzdrwoQJrtG9kSNH6tdff9Xnn3+urVu3asqUKYqIiChUPblhWqCXiY5273EAAADwLc8995xuu+021/3LLrtM9erVc90fO3asFi1apCVLluiRRx7J9jy9e/dW9+7dJUkvvviiXn/9da1bt06tW7fO8vhz585p6tSpqlGjhiTpkUce0XPPPed6/I033tCIESPUoUMHSdLkyZNdo0gFsX37di1ZskSrV69WkyZNJElz5sxRbGysFi9erLvuuku7d+9Wp06dVKdOHUlS9erVXc/fvXu3rrvuOt1www2SzOhdUWPkyss0bSrFxJhrq7LicEixseY4AAAA5F1oqHTiRN5uec0En32Wt/OFhrrvdaSHhXQnTpzQ0KFDdfXVV6tcuXIqU6aMtm7dmuvIVd26dV3bpUuXVlhYmA4ePJjt8aGhoa5gJUnR0dGu448dO6bk5GQ1bNjQ9XhgYKDi4+Pz9doutHXrVpUoUUKNGjVy7atQoYJq1qyprVu3SpIeffRRPf/887rppps0evRo/fzzz65j+/fvrw8//FD169fXk08+qe+++67AteQV4crLBAZKr71mtrMLWJMm0cwCAAAgvxwOqXTpvN1atszbH7xbtszb+bI7T0GULl060/2hQ4dq0aJFevHFF7Vq1Spt2rRJderUUWpqao7nKVmy5EWvyaG0tLR8HW/l9WKyIvLAAw/of//7n+69915t3rxZN9xwg9544w1JUps2bbRr1y499thj2r9/v2699dZM0xiLAuHKC3XsaLrPVKmSeb/DYdZXcHdXGgAAAGSW0x+80+97yx+8V69erd69e6tDhw6qU6eOoqKitHPnTo/WEB4ersjISP3www+ufU6nUxs2bCjwOa+++mqdP39ea9eude07cuSItm3bpmuuuca1LzY2Vg899JAWLlyoxx9/XNOmTXM9VrFiRfXq1UuzZ8/WpEmT9M477xS4nrzgmisv1bGjabe+apW5mHLwYOnwYSkPnTYBAADgBul/8B40KHM355gYE6y85Q/eV155pRYuXKi2bdvK4XBo5MiROY5AFZWBAwdq3LhxuuKKK1SrVi298cYb+vvvv+XIw7Dd5s2bVbZsWdd9h8OhevXqqV27dnrwwQf19ttvq2zZsho+fLiqVKmidu3aSZIGDx6sNm3a6KqrrtLff/+tFStW6Oqrr5YkjRo1SvHx8apdu7bOnj2rTz/91PVYUeFXdS8WGCg1b262N2+WJkyQZs6UOnWytSwAAAC/ceEfvA8cME3Fmjb1jhGrdK+++qruu+8+NWnSRBERERo2bJhSUlI8XsewYcOUlJSknj17KjAwUH379lWrVq0UmIc365Zbbsl0PzAwUOfPn9f777+vQYMG6Z///KdSU1N1yy236LPPPnNNUXQ6nXr44Ye1d+9ehYWFqXXr1vrXv/4lyazVNWLECO3cuVOlSpVS06ZN9eGHH7r/hV/AYdk9UdILpaSkKDw8XMeOHVNYWJjd5UiStm6VrrnGjFzt3y/lcx04AAAAv3LmzBnt2LFD1apVU0hIiN3l+KW0tDRdffXV6tKli8aOHWt3OTnK6fOSn2zANVfFxNVXSw0aSOfPSx98YHc1AAAAQGa7du3StGnT9Pvvv2vz5s3q37+/duzYobvvvtvu0jyGcFWM9Oplvs6YYW8dAAAAwMUCAgI0ffp0NWjQQDfddJM2b96sr776qsivc/ImXHNVjHTrJj32mLRxo7kG6//XSgMAAABsFxsbq9WrV9tdhq0YuSpGKlSQ/vlPs83oFQAAAOBdCFfFTPrUwNmzzfVXAAAAyB6925AX7vqcEK6KmTZtpIgIKTlZWrbM7moAAAC8U3qr7lOnTtlcCYqD9M9J+uemoLjmqpgJCpLuvlt6/XUzNbBNG7srAgAA8D6BgYEqV66cDh48KEkKDQ3N02K28C+WZenUqVM6ePCgypUrl6c1uXJCuCqGevUy4WrxYunoUalcOZsLAgAA8EJRUVGS5ApYQHbKlSvn+rwUBuGqGLruOunaa6VffpE++kjq29fuigAAALyPw+FQdHS0KlWqpHPnztldDrxUyZIlCz1ilY5wVQw5HGb06oknzNRAwhUAAED2AgMD3fbLM5ATGloUUz16SAEB0nffSdu3210NAAAAAMJVMRUdLbVsabZnzrS3FgAAAACEq2Itfc2rmTOltDR7awEAAAD8nVeEqzfffFNxcXEKCQlRo0aNtG7dumyPXbhwoW644QaVK1dOpUuXVv369TVr1qxMx1iWpVGjRik6OlqlSpVSQkKCtvvg3Ll27aTwcGn3bumbb+yuBgAAAPBvtoerefPmaciQIRo9erQ2bNigevXqqVWrVtm2zLzsssv09NNPa82aNfr555/Vp08f9enTR1988YXrmJdeekmvv/66pk6dqrVr16p06dJq1aqVzpw546mX5RGlSkldupjtGTPsrQUAAADwdw7Lsiw7C2jUqJEaNGigyZMnS5LS0tIUGxurgQMHavjw4Xk6x/XXX6877rhDY8eOlWVZqly5sh5//HENHTpUknTs2DFFRkZq+vTp6tatW67nS0lJUXh4uI4dO6awsLCCvzgPWL1auvlmqUwZKSlJKl3a7ooAAAAA35GfbGDryFVqaqrWr1+vhIQE176AgAAlJCRozZo1uT7fsiwtX75c27Zt0y233CJJ2rFjh5KSkjKdMzw8XI0aNcr2nGfPnlVKSkqmW3HRpIlUo4Z04oS0cKHd1QAAAAD+y9ZwdfjwYTmdTkVGRmbaHxkZqaSkpGyfd+zYMZUpU0ZBQUG644479MYbb+i2226TJNfz8nPOcePGKTw83HWLjY0tzMvyKIdD6tnTbDM1EAAAALCP7ddcFUTZsmW1adMm/fDDD3rhhRc0ZMgQJSYmFvh8I0aM0LFjx1y3PXv2uK9YD0gPV19/LRWz0gEAAACfYWu4ioiIUGBgoJKTkzPtT05OVlRUVLbPCwgI0BVXXKH69evr8ccfV+fOnTVu3DhJcj0vP+cMDg5WWFhYpltxEhcnNWsmWZZ0UeNEAAAAAB5ia7gKCgpSfHy8li9f7tqXlpam5cuXq3Hjxnk+T1pams6ePStJqlatmqKiojKdMyUlRWvXrs3XOYubC9e8srdFCQAAAOCfbJ8WOGTIEE2bNk0zZszQ1q1b1b9/f508eVJ9+vSRJPXs2VMjRoxwHT9u3DgtW7ZM//vf/7R161a98sormjVrlu655x5JksPh0ODBg/X8889ryZIl2rx5s3r27KnKlSurffv2drxEj+jcWQoNlbZtk3JYJgwAAABAESlhdwFdu3bVoUOHNGrUKCUlJal+/fpaunSpqyHF7t27FRCQkQFPnjypAQMGaO/evSpVqpRq1aql2bNnq2vXrq5jnnzySZ08eVJ9+/bV0aNHdfPNN2vp0qUKCQnx+OvzlLJlpY4dpdmzTWOLRo3srggAAADwL7avc+WNitM6Vxf66ivpttuk8uWlAwek4GC7KwIAAACKt2KzzhXcq0ULqUoV6e+/pU8+sbsaAAAAwL8QrnxIYKB0771mmzWvAAAAAM8iXPmY9K6Bn38uXdSNHgAAAEARIlz5mFq1pIYNJadTmjvX7moAAAAA/0G48kHpo1dMDQQAAAA8h3Dlg7p1k4KCpE2bpJ9/trsaAAAAwD8QrnzQZZdJbduabUavAAAAAM8gXPmonj3N1zlzpPPn7a0FAAAA8AeEKx/Vpo1UsaLpGPjFF3ZXAwAAAPg+wpWPKllSuvtus83UQAAAAKDoEa58WHrXwCVLpL//trcWAAAAwNcRrnxY/fpSnTrS2bPSRx/ZXQ0AAADg2whXPszhYM0rAAAAwFMIVz6uRw8pMFBas0b6/Xe7qwEAAAB8F+HKx0VFSa1ame2ZM+2tBQAAAPBlhCs/kL7m1axZUlqavbUAAAAAvopw5QfatZPCw6Xdu6XERLurAQAAAHwT4coPhIRIXbuabaYGAgAAAEWDcOUn0rsGLlggnThhby0AAACALyJc+YnGjaUrr5ROnpQWLrS7GgAAAMD3EK78hMOR0diCNa8AAAAA9yNc+ZF77zVfV6wwzS0AAAAAuA/hyo9UrSo1by5ZlmnLDgAAAMB9CFd+Jr2xxYwZJmQBAAAAcA/ClZ/p1EkKDZW2b5fWrrW7GgAAAMB3EK78TNmyJmBJNLYAAAAA3Ilw5YfSpwZ++KF05oy9tQAAAAC+gnDlh1q0kGJjpaNHpU8+sbsaAAAAwDcQrvxQQEBGW3amBgIAAADuQbjyU+kLCi9dKiUn21sLAAAA4AsIV36qZk2pUSPJ6ZTmzLG7GgAAAKD4I1z5sfTGFjNn2lsHAAAA4AsIV36sa1cpKEj66SdzAwAAAFBwhCs/dtll0p13mm0aWwAAAACFQ7jyc+lTA+fMkc6ds7cWAAAAoDgjXPm5Vq2kSpWkgwelL76wuxoAAACg+CJc+bmSJaW77zbbTA0EAAAACo5wBdfUwCVLpL/+srcWAAAAoLgiXEH160t160qpqdJHH9ldDQAAAFA8Ea4gKWP0iqmBAAAAQMEQriBJ6tFDCgyUvv9e2rbN7moAAACA4odwBUlSZKTUurXZnjnT3loAAACA4ohwBZf0qYGzZklpafbWAgAAABQ3hCu4tG0rlSsn7dkjrVhhdzUAAABA8UK4gktIiNS1q9mmsQUAAACQP4QrZJI+NfA//5FOnLC3FgAAAKA4IVwhkxtvlK68Ujp1ygQsAAAAAHlDuEImDgdrXgEAAAAFQbjCJe6914SsFSukXbvsrgYAAAAoHghXuMTll0stWpjtWbPsrQUAAAAoLghXyFLPnubrzJmSZdlbCwAAAFAcEK6QpU6dpNKlpe3bpTVr7K4GAAAA8H6EK2SpTBkTsCQzegUAAAAgZ4QrZCu9a+C8edKZM/bWAgAAAHg7whWy1by5aW5x9Ki0ZInd1QAAAADezSvC1Ztvvqm4uDiFhISoUaNGWrduXbbHTps2TU2bNlX58uVVvnx5JSQkXHJ879695XA4Mt1at25d1C/D5wQEmLbsEmteAQAAALmxPVzNmzdPQ4YM0ejRo7VhwwbVq1dPrVq10sGDB7M8PjExUd27d9eKFSu0Zs0axcbGqmXLltq3b1+m41q3bq0DBw64bnPnzvXEy/E56V0Dv/hCSkqytxYAAADAmzksy95G240aNVKDBg00efJkSVJaWppiY2M1cOBADR8+PNfnO51OlS9fXpMnT1bP/08CvXv31tGjR7V48eIC1ZSSkqLw8HAdO3ZMYWFhBTqHL2nSxHQMnDhRevxxu6sBAAAAPCc/2cDWkavU1FStX79eCQkJrn0BAQFKSEjQmjz2/z516pTOnTunyy67LNP+xMREVapUSTVr1lT//v115MiRbM9x9uxZpaSkZLohQ/ro1YwZrHkFAAAAZMfWcHX48GE5nU5FRkZm2h8ZGamkPM5BGzZsmCpXrpwpoLVu3VozZ87U8uXLNWHCBK1cuVJt2rSR0+nM8hzjxo1TeHi46xYbG1vwF+WDunaVgoOlzZuln36yuxoAAADAO9l+zVVhjB8/Xh9++KEWLVqkkJAQ1/5u3brpzjvvVJ06ddS+fXt9+umn+uGHH5SYmJjleUaMGKFjx465bnv27PHQKygeypeX7rzTbNPYAgAAAMiareEqIiJCgYGBSk5OzrQ/OTlZUVFROT534sSJGj9+vL788kvVrVs3x2OrV6+uiIgI/fHHH1k+HhwcrLCwsEw3ZJa+5tWcOdK5c/bWAgAAAHgjW8NVUFCQ4uPjtXz5cte+tLQ0LV++XI0bN872eS+99JLGjh2rpUuX6oYbbsj1++zdu1dHjhxRdHS0W+r2R61aSZGR0qFD0tKldlcDAAAAeB/bpwUOGTJE06ZN04wZM7R161b1799fJ0+eVJ8+fSRJPXv21IgRI1zHT5gwQSNHjtR7772nuLg4JSUlKSkpSSdOnJAknThxQk888YS+//577dy5U8uXL1e7du10xRVXqFWrVra8Rl9QooTUo4fZnjhRmjtXSkyUsrmMDQAAAPA7JewuoGvXrjp06JBGjRqlpKQk1a9fX0uXLnU1udi9e7cCAjIy4JQpU5SamqrOnTtnOs/o0aM1ZswYBQYG6ueff9aMGTN09OhRVa5cWS1bttTYsWMVHBzs0dfma6pUMV+/+cbcJCkmRnrtNaljR/vqAgAAALyB7etceSPWubrUwoVS586XtmJ3OMzXBQsIWAAAAPA9xWadKxQPTqc0aFDWa1yl7xs8mCmCAAAA8G+EK+Rq1Spp797sH7csac8ecxwAAADgrwhXyNWBA+49DgAAAPBFhCvkKq8d7Ol0DwAAAH9GuEKumjY1XQHTm1dczOGQYmPNcQAAAIC/IlwhV4GBpt26lH3AmjTJHAcAAAD4K8IV8qRjR9NuPX2tqwvdfjtt2AEAAADWucoC61xlz+k0XQEPHJD275eGDpVCQqQdO6SoKLurAwAAANwrP9mghIdqgo8IDJSaNzfbliX95z/SmjXSxInmBgAAAPgrpgWiwBwOadQosz1linTwoL31AAAAAHYiXKFQWrWSGjSQTp2SXn3V7moAAAAA+xCuUCgXjl69+aZ05Ii99QAAAAB2IVyh0O64Q7ruOunECdOSHQAAAPBHhCsUmsMhjRxptl9/Xfr7b3vrAQAAAOxAuIJbtGsn1akjpaSYgAUAAAD4G8IV3CIgIGP0atIkE7IAAAAAf0K4gtt06iRdc4109Kg0ebLd1QAAAACeRbiC2wQESE8/bbZfeUU6ftzeegAAAABPIlzBrbp2la66SvrrL7OwMAAAAOAvCFdwq8DAjNGriROlkyftrQcAAADwFMIV3O7uu6Xq1aVDh6S337a7GgAAAMAzCFdwuxIlMkavXnpJOn3a3noAAAAATyBcoUjce69UtaqUnCz9+992VwMAAAAUPcIVikTJktKIEWZ7/HjpzBl76wEAAACKGuEKRaZ3bykmRtq/X3r/fburAQAAAIoW4QpFJjhYGj7cbI8bJ6Wm2lsPAAAAUJQIVyhS998vRUdLe/ZIM2bYXQ0AAABQdAhXKFIhIdKTT5rtceOkc+fsrQcAAAAoKoQrFLm+faVKlaQdO6Q5c+yuBgAAACgahCsUudBQ6YknzPYLL0jnz9tbDwAAAFAUCFfwiIcekiIipD/+kD780O5qAAAAAPcjXMEjypSRHn/cbD//vOR02lsPAAAA4G6EK3jMww9L5ctL27ZJCxbYXQ0AAADgXoQreEzZstJjj5ntsWOltDR76wEAAADciXAFjxo4UAoPl7ZskRYtsrsaAAAAwH0IV/CocuWkQYPM9nPPMXoFAAAA30G4gscNGmSmCP78s/TJJ3ZXAwAAALgH4Qoed9ll0iOPmO2xYyXLsrceAAAAwB0IV7DFkCFS6dLS+vXS55/bXQ0AAABQeIQr2CIiQhowwGw/9xyjVwAAACj+CFewzeOPS6VKSWvXSsuW2V0NAAAAUDiEK9gmMlJ66CGzzegVAAAAijvCFWz1xBNScLC0erWUmGh3NQAAAEDBEa5gq+ho6cEHzfZzz9lbCwAAAFAYhCvYbtgwKSjIjFx9843d1QAAAAAFQ7iC7WJipPvuM9tjx9pbCwAAAFBQhCt4heHDpRIlpK++ktassbsaAAAAIP8IV/AKVatKvXqZbUavAAAAUBwRruA1RoyQAgOlzz+XfvjB7moAAACA/CFcwWvUqCHdc4/ZZvQKAAAAxQ3hCl7lqaekgADpk0+kjRvtrgYAAADIO8IVvMpVV0ndu5ttRq8AAABQnBCu4HWeflpyOKRFi6TNm+2uBgAAAMgbwhW8ztVXS3fdZbaff97eWgAAAIC8IlzBKz3zjPk6f77066/21gIAAADkBeEKXqlOHaljR8mypBdesLsaAAAAIHdeEa7efPNNxcXFKSQkRI0aNdK6deuyPXbatGlq2rSpypcvr/LlyyshIeGS4y3L0qhRoxQdHa1SpUopISFB27dvL+qXATcbOdJ8/fBDads2e2sBAAAAcmN7uJo3b56GDBmi0aNHa8OGDapXr55atWqlgwcPZnl8YmKiunfvrhUrVmjNmjWKjY1Vy5YttW/fPtcxL730kl5//XVNnTpVa9euVenSpdWqVSudOXPGUy8LblC/vtS2rZSWJr34ot3VAAAAADlzWJZl2VlAo0aN1KBBA02ePFmSlJaWptjYWA0cOFDDhw/P9flOp1Ply5fX5MmT1bNnT1mWpcqVK+vxxx/X0KFDJUnHjh1TZGSkpk+frm7duuV6zpSUFIWHh+vYsWMKCwsr3AtEofzwg9SwoRQYaEavatSwuyIAAAD4k/xkA1tHrlJTU7V+/XolJCS49gUEBCghIUFr1qzJ0zlOnTqlc+fO6bLLLpMk7dixQ0lJSZnOGR4erkaNGmV7zrNnzyolJSXTDd6hQQOpTRvJ6ZTGjbO7GgAAACB7toarw4cPy+l0KjIyMtP+yMhIJSUl5ekcw4YNU+XKlV1hKv15+TnnuHHjFB4e7rrFxsbm96WgCKVfezVjhrRzp62lAAAAANmy/Zqrwhg/frw+/PBDLVq0SCEhIQU+z4gRI3Ts2DHXbc+ePW6sEoXVuLF0223S+fPS+PF2VwMAAABkzdZwFRERocDAQCUnJ2fan5ycrKioqByfO3HiRI0fP15ffvml6tat69qf/rz8nDM4OFhhYWGZbvAu6aNX770nkX0BAADgjWwNV0FBQYqPj9fy5ctd+9LS0rR8+XI1btw42+e99NJLGjt2rJYuXaobbrgh02PVqlVTVFRUpnOmpKRo7dq1OZ4T3q1pU6l5c+ncOemll+yuBgAAALiU7dMChwwZomnTpmnGjBnaunWr+vfvr5MnT6pPnz6SpJ49e2rEiBGu4ydMmKCRI0fqvffeU1xcnJKSkpSUlKQTJ05IkhwOhwYPHqznn39eS5Ys0ebNm9WzZ09VrlxZ7du3t+Mlwk1GjTJfp02T9u+3txYAAADgYiXsLqBr1646dOiQRo0apaSkJNWvX19Lly51NaTYvXu3AgIyMuCUKVOUmpqqzp07ZzrP6NGjNWbMGEnSk08+qZMnT6pv3746evSobr75Zi1durRQ12XBfs2bSzffLH37rfTyy9K//mV3RQAAAECGAq1ztWfPHjkcDsXExEiS1q1bpw8++EDXXHON+vbt6/YiPY11rrzXsmVSy5ZSSIi0Y4eUy6V5AAAAQKEU+TpXd999t1asWCHJtD6/7bbbtG7dOj399NN67rnnCnJKIE8SEqRGjaQzZ6RXXrG7GgAAACBDgcLVL7/8ooYNG0qSPvroI1177bX67rvvNGfOHE2fPt2d9QGZOBwZ11699ZZ06JC99QAAAADpChSuzp07p+DgYEnSV199pTvvvFOSVKtWLR04cMB91QFZaNNGio+XTp3iuisAAAB4jwKFq9q1a2vq1KlatWqVli1bptatW0uS9u/frwoVKri1QOBiF45evfGG9Ndf9tYDAAAASAUMVxMmTNDbb7+t5s2bq3v37qpXr54kacmSJa7pgkBRattWqldPOnFCmjTJ7moAAACAAnYLlCSn06mUlBSVL1/etW/nzp0KDQ1VpUqV3FagHegWWDz85z9S585SWJi0a5dUrpzdFQEAAMDXFHm3wNOnT+vs2bOuYLVr1y5NmjRJ27ZtK/bBCsVHhw5S7dpSSoo0ZIg0d66UmCg5nXZXBgAAAH9UoHDVrl07zZw5U5J09OhRNWrUSK+88orat2+vKVOmuLVAIDsBAVKrVmb7/felu++WWrSQ4uKkhQttLQ0AAAB+qEDhasOGDWratKkkacGCBYqMjNSuXbs0c+ZMvf76624tEMjOwoVZdwvct89MFyRgAQAAwJMKFK5OnTqlsmXLSpK+/PJLdezYUQEBAbrxxhu1a9cutxYIZMXplAYNkrK6YjB93+DBTBEEAACA5xQoXF1xxRVavHix9uzZoy+++EItW7aUJB08eJAGEPCIVaukvXuzf9yypD17zHEAAACAJxQoXI0aNUpDhw5VXFycGjZsqMaNG0syo1jXXXedWwsEspLXtapZ0xoAAACeUqIgT+rcubNuvvlmHThwwLXGlSTdeuut6tChg9uKA7ITHZ2340JDi7YOAAAAIF2B17lKt/f/52bFxMS4pSBvwDpX3s/pNF0B9+3L+rqrdJGR0vTpUuvWnqoMAAAAvqTI17lKS0vTc889p/DwcFWtWlVVq1ZVuXLlNHbsWKWlpRWoaCA/AgOl114z2w5H5sccDnOrUkVKTpbatJEeeUQ6dcrzdQIAAMB/FChcPf3005o8ebLGjx+vjRs3auPGjXrxxRf1xhtvaOTIke6uEchSx47SggUmRF0oJsbs375dGjjQ7HvzTen666X16z1fJwAAAPxDgaYFVq5cWVOnTtWdd96Zaf/HH3+sAQMGaN++fW4r0A5MCyxenE7TFfDAAXMtVtOmZmQr3RdfSH36mMdLlJBGj5aGDzfbAAAAQE7ykw0KFK5CQkL0888/66qrrsq0f9u2bapfv75Onz6d31N6FcKV7zlyRHroITOiJUmNG0uzZkk1athbl6flFkQBAACQWZFfc1WvXj1Nnjz5kv2TJ09W3bp1C3JKoEhVqCB99JE0c6YUFiatWSPVry+9+27ODTF8ycKFpglIixbS3Xebr3FxZj8AAAAKr0AjVytXrtQdd9yhyy+/3LXG1Zo1a7Rnzx599tlnatq0qdsL9SRGrnzbrl1Sz57SN9+Y++3bS++8I1WsaGtZRWrhQqlz50uDZHozkAULzDVsAAAAyKzIR66aNWum33//XR06dNDRo0d19OhRdezYUVu2bNGsWbMKVDTgKVWrSl9/LU2YIJUsKS1eLNWpI/33v3ZXVjScTmnQoKxH6NL3DR5sjgMAAEDBFXqdqwv99NNPuv766+Us5r+lMXLlPzZtku65R9qyxdx/6CFp4kSpdGlby3KrxEQzBTA3K1ZIzZsXdTUAAADFS5GPXAG+on596ccfpcceM/enTpWuu05at87Wstzm8GFzXVleHDhQtLUAAAD4OsIV/F5IiPTqq9JXX5k1s7Zvl5o0kZ59Vjp/3u7q8i8tTVq+XOrWzbye2bPz9rzkZP9p7gEAAFAUCFfA/7v1VmnzZhNKnE5pzBjppptM2CoODhyQXnxRuvJKKSFBmjdPSk01I3HlymU0r8jOY4+Z1uyffUbIAgAAKIh8XXPVMZd2YkePHtXKlSu55grF3gcfSAMGSMeOSaGhZmSrb9/cA4qnnT8vLV0qTZtmGnKk/68XFib16CE98IB0/fUZ3QKlzMEp/fW0bGmuzTp71tyvX1966inTQZB1sAAAgD8rsmuuwsPDc7xVrVpVPXv2LFTxgDe4+24zitWihXTqlGl0ceedZuqcN9i5Uxo50qxT1battGSJCVY33SRNny7t3y+99ZYJVpIJSQsWmGmCF4qJMfuXLpV27JCGDjXNPDZtkrp0kWrXNuc7d86jLw8AAKBYcmu3QF/ByBXSpaVJkyZJI0aYKXYVK0r//rcJWp6Wmip9/LEZpfrqq4wRqAoVpF69pPvvl665JudzOJ3SqlVmCmF0tJkGePHI1JEj0htvSK+/Lv39t9l3+eXSk09K990nlSrl/tcGAADgrfKTDQhXWSBc4WKbN5uW7T//bO7ff78JXWXKFP33/u03E+hmzDDd/9IlJEgPPii1aycFB7v/+x4/bronvvJKxohdZKQ0ZIjUv79Utqz7vycAAIC3IVwVEuEKWTl71kzFmzjRjBpVr2468TVu7P7vdeqUma43bZr07bcZ+ytXlvr0MSNI1au7//tm5fRp6f33pZdeknbtMvvKlZMefdTcKlTwTB0AAAB2IFwVEuEKOUlMNNPwdu+WAgJM44dRo6SSJQt/7k2bTKCaM8c005DM97jjDjNK1aaNVKJE4b9PQZw7Zxp9jBsnbdtm9pUuba5He/xxM80QAADA1xCuColwhdwcOyY98kjGGlI33GC2a9bM/7lSUqS5c02oWr8+Y3+1amb6Ye/elzaisJPTKS1aZNq+b9xo9gUFmdG0J580dQMAAPgKwlUhEa6QVx99ZEZu/v7bNHqYONFcj5SWlnPjCMuSvv/eBKp588w0QMmMfnXoYEap/vEPM2rlrSzLdBl84QVp9WqzLzDQdFocPjz35hoAAADFAeGqkAhXyI99+8x1UMuWmfv165sGEAcOZBwTEyO99prUrJk0a5ZpULFlS8bjtWqZQHXvvaYjYXHzzTdmJOuLLzL2dexopkzGx9tXFwAAQGERrgqJcIX8SkuTJk8260TltCZUiRJm4V/JjHR16WJCVZMm3rdAcUGsX29C1sKFGftatTIh65Zb7KsLAACgoAhXhUS4QkE4nebaqNwWGq5fX+rbV+re3XTd80W//iqNH28aYDidZt/NN5uQ1bp15iCZl7W3AAAA7JKfbODFV3QAxcuqVbkHK0n617/MdVm+Gqwkc73VzJnS9u3mmrSgINNS/vbbzTTBBQtMqFq4UIqLk1q0MNdqtWhh7l848gUAAFBcEK4AN7nwGit3HOcLqlWTpkyRduww7dpLlzYdBu+6S6paVerUSdq7N/Nz9u2TOncmYAEAgOKHcAW4SV7XefLH9aAqVzadFHftMmuChYebEJWV9InKgwdnTCkEAAAoDghXgJs0bWq6AmbXmMLhkGJjzXH+qkIF6dlnzSLJObEsac8eM9USAACguCBcAW4SGGjarUuXBqz0+5Mm0axBMgsn54U/TaEEAADFH+EKcKOOHU2zhipVMu+PiTH7O3a0py5vwxRKAADgi2jFngVasaOwaC+eM6fTdAXcty/jGqsLORwmkO7YwfsGAADslZ9sUMJDNQF+JTBQat7c7iq8V/oUys6dTZC6OGBZFlMoAQCFwx86YQemBQKwRXZTKNOFh3u2HgCA72AdRdiFaYFZYFog4DkX/2Xxgw+kadNM6PrpJ9NhEACAvFq40MyMuPg33PTmUlwDjfzKTzYgXGWBcAXY5+RJKT5e2rbNLDI8f3727e0BALhQ+jW9Fy9Qn45relEQ+ckGTAsE4FVKlzbrYJUoIf3nP9L06XZXBAAoLlatyj5YSayjiKJHuALgdeLjpbFjzfajj0p//GFvPQCA4iGv6yOyjiKKCuEKgFd64gnpllukEyeke+6Rzp2zuyIAgLdjHUXYjXAFwCsFBkqzZpmugWvXSs8/b3dFAABv17SpFBGR/eMOhxQba44DigLhCoDXuvxyaepUs/3889J339lbDwDAuzkcUpkyOR/DOoooSoQrAF6tWzczLTAtTerRQ0pJsbsiAIC3mj1b2rlTKlVKqlz50sebNqUNO4oW4QqA15s82bTW3blTGjjQ7moAAN7o1Cnp6afN9pgx0u7d0ooVZv3EKVPM/m++YRYEihbhCoDXCw83f40MCJBmzpTmzbO7IgCAt/nXv0wb9qpVTafZwECpeXOpe3fpoYek++4zxz36qJkNARQFwhWAYuGmm6SnnjLbDz1k1ikBAECSkpOl8ePN9rhxUkjIpce8+KIUFiatXy+9/75n64P/sD1cvfnmm4qLi1NISIgaNWqkdevWZXvsli1b1KlTJ8XFxcnhcGjSpEmXHDNmzBg5HI5Mt1q1ahXhKwDgKaNGSQ0bSkePSj17Sk6n3RUBALzBmDFm6Y4GDaSuXbM+JjJSGj3abI8YYf4tAdzN1nA1b948DRkyRKNHj9aGDRtUr149tWrVSgcPHszy+FOnTql69eoaP368oqKisj1v7dq1deDAAdft22+/LaqXAMCDSpaU5syRSpeWEhOlV16xuyIAgN1+/VWaNs1sT5xoppBn55FHpFq1pEOHpGef9Ux98C+2hqtXX31VDz74oPr06aNrrrlGU6dOVWhoqN57770sj2/QoIFefvlldevWTcHBwdmet0SJEoqKinLdInJa8ABAsXLFFdJrr5ntZ56RNmywtx4AgL2GDTMzGdq3N4vP5yQoKOPfkMmTTTAD3Mm2cJWamqr169crISEho5iAACUkJGjNmjWFOvf27dtVuXJlVa9eXT169NDu3btzPP7s2bNKSUnJdAPgve67z7TSPXdOuvtu0yEKAOB/vv5a+vRTqUQJacKEvD2nZUvpzjul8+elwYMlyyrSEuFnbAtXhw8fltPpVGRkZKb9kZGRSkpKKvB5GzVqpOnTp2vp0qWaMmWKduzYoaZNm+r48ePZPmfcuHEKDw933WJjYwv8/QEUPYdDeucds4bJtm3S0KF2VwQA8LS0tIyf/w89JF11Vd6f++qrZhRr2TJpyZKiqQ/+yfaGFu7Wpk0b3XXXXapbt65atWqlzz77TEePHtVHH32U7XNGjBihY8eOuW57aEMGeL0KFaTp0832lCnSJ5/YWg4AwMNmz5Y2bjQdANMbVeRVjRrS44+b7ccek86ccX998E+2hauIiAgFBgYqOTk50/7k5OQcm1XkV7ly5XTVVVfpjz/+yPaY4OBghYWFZboB8H633SYNGWK277/ftOIFAPi+CxcMfvppqSCX1z/1lJkBsWMHDZLgPraFq6CgIMXHx2v58uWufWlpaVq+fLkaN27stu9z4sQJ/fnnn4qOjnbbOQF4jxdflOrWNZ2f7ruPufMA4A8mTTILBl9+uVkUuCDKlJFeeslsv/iiOR9QWLZOCxwyZIimTZumGTNmaOvWrerfv79OnjypPn36SJJ69uypESNGuI5PTU3Vpk2btGnTJqWmpmrfvn3atGlTplGpoUOHauXKldq5c6e+++47dejQQYGBgerevbvHXx+AohccLH3wgfn62WfSW2/ZXREAoCglJ5uFgqXsFwzOq7vvlpo0MSNhTz7pnvrg32wNV127dtXEiRM1atQo1a9fX5s2bdLSpUtdTS52796tAwcOuI7fv3+/rrvuOl133XU6cOCAJk6cqOuuu04PPPCA65i9e/eqe/fuqlmzprp06aIKFSro+++/V8WKFT3++gB4Ru3aGX99HDqU1roA4MuefdYsGHzDDVK3boU7l8MhvfGG+Tp3rsTSqCgsh2UxieZiKSkpCg8P17Fjx7j+CigmLEu6/XZp6VKpXj1p7VozmgUA8B1bt0p16ph1rRITpWbN3HPevn3NQsT160s//igFBrrnvPAN+ckGPtctEIB/cjik9983FzX/9JNZYBgA4FuefNIEq3bt3BesJOmFF6TwcGnTJunf/3bfeeF/CFcAfEZUlPTuu2Z74kTpgn45AIBi7sIFg9OngrtLxYpmuqFkug/+/bd7zw//QbgC4FPuvFPq189s9+ol/fWXvfUAAAqvMAsG59WAAdI110hHjuR/3SwgHeEKgM955RXzD+++fWYePVeWAkDxNmdOxoLBo0YVzfcoWVJ67TWz/dZb0i+/FM33gW8jXAHwOaVLm/bsJUpI//mPNH263RUBAArq9Gmz4K9kvhZlA+iEBKlDB3Nd16BB/HEO+Ue4AuCT4uOlsWPN9qOPSn/+aW89AICCcceCwfnxyium2+zXX0uLFhX994NvIVwB8FlPPCHdcotZD6VHD+ncObsrAgDkx8GDGQsGv/iiVKpU0X/PatXMvx+SNGSIGTkD8opwBcBnBQZKs2aZ9rpr10rPP293RQCA/BgzRjp+3CwY3L27577v8OFSTIy0a5f08sue+74o/ghXAHza5ZdLU6ea7eefl777zt56AAB5s3Wr9M47ZnviRCnAg7+1li5tvqckjR8v7d7tue+N4o1wBcDndesm3XOPaeV7zz1SSordFQEAcjNsWNEsGJxXXbqYqeWnT2dMEwRyQ7gC4BcmT5aqVpV27JAGDrS7GgBATlaskD75xEzvnjDBnhocDtOaPSBA+ugjaeVKe+pA8UK4AuAXwsOl2bPNP5IzZ5p/KAEA3ufiBYNr1rSvlvr1zXqJkulUeP68fbWgeCBcAfAbN9+csVZKv37Snj321gMAuNQHH0gbNkhly0qjR9tdjVnWo3x56eefM64BA7JDuALgV0aNkho2lI4elXr2NPP5AQDewZMLBudVRETGuokjR0pHjthbD7wb4QqAXylZ0kwPLF1aSkw0i0UCALzDpElmVsHll0uDBtldTYZ+/aQ6daS//jJ/pAOyQ7gC4HeuvNJcpCxJzzxjpp8AAOxlx4LBeVWiRMa/G1OnmimCQFYIVwD80n33SR06SOfOSXffLZ06ZXdFAODfnn3WLBgcH+/ZBYPzqkULqXNn03Dj0Ucly7K7IngjwhUAv+RwSNOmSdHR0rZtGZ2pAACe99tv0ttvm21PLxicHxMnSiEhpi37/Pl2VwNv5KUfXQAoehUqSDNmmO0pU6RPP7W3HgDwV+kLBt95p9S8ud3VZK9qVVOrZP4ox6wHXIxwBcCv3Xab9NhjZvu++6TkZHvrAQB/k5goLVli74LB+fHkk6bhxp49xaNeeBbhCoDfe/FF0wXq0CETsM6fN//Yz51rvtKuHQCKRlqa9PjjZrtfP6lWLXvryYvQ0IxOsy+9JO3caWs58DKEKwB+LyTELFoZHCx99plZV6VFC9PookULKS5OWrjQ7ioBwPd424LBedWpk5m+eOYM1+wiM8IVAEi69lqpRw+zffRo5sf27TMdoghYAOA+Fy8YXKmSvfXkh8Mhvf66abzxn/9IX39td0XwFoQrAJCZ+vfll1k/lt5ud/BgpgheyOlk+iSAgnvtNXPdUmysdy0YnFd16kj9+5vtRx81U8oBwhUASFq1Stq7N/vHLcv8EjBrlnTypOfq8lYLF5rpkkyfBFAQhw6Z610l71swOD+ee0667DJpyxbTdRZwWBZLoF0sJSVF4eHhOnbsmMLCwuwuB4AHzJ1rQkJeVaxoWvLGxWXcLrxfpox763M6TQA8cMCszdW0qemsZYeFC800yYv/9XA4zNcFC6SOHT1fF4Di45FHpDfflK6/XvrhB+9d1yovpk41I1jlyknbt0sREXZXBHfLTzYgXGWBcAX4n8REM/qSm9DQvK1rUqFC1qEr/X5+frQsXGimzFw4shYTY6bUeDrEOJ3mNWQ3yudwmNp27LAv/AHwbtu2SbVrm58nX3+dt5+93szplOLjpZ9+Mh0Pp061uyK4G+GqkAhXgP9JDw379l06IiNlDg3Hj0u7dpn2uxfe0vf9/Xfu3698+cyB6+IQFh5ujiuqUSKn04TEi28nT+a87/ff8zb175NPpH/+M/91AfB97dqZda3atjVffcE330jNmpmfzevXS9ddZ3dFcCfCVSERrgD/lB5kpMxhJr9B5tixjKCVVQj766/cz1GunAlb27aZVr85HTdokDkmL+Eo/Xb2bO41FFb6a7gwOF4YIMuXz3hv3cWbpk8CuFT6LIHAQOmXX4rHulZ51a2bNG+edPPNJmy5++cb7EO4KiTCFeC/spqCFxsrTZrkvil4WY18XXj/8GH3fJ+8Cg3NfCtdOvt9hw5Jc+a45/uWKZN98Kpa1VzXlp9fTrxp+iSAS6WlSQ0bmpGdAQPMNVe+ZM8eqWZN02L+gw+k7t3trgjuQrgqJMIV4N/sHv04cULavVuaPl16+eXcj7/1Vqlu3byFo4tvpUrlL8Dkdfrk5s3mmIuDY/p2cnLu36tUqUsD14VfIyMzLoKnyQbg/ebMke65xywY/McfxWtdq7x6/nlp5EipShUz86B0absrgjsQrgqJcAXAG+S1ycaKFVLz5kVdTQZ3TJ88fdoEyKyC165d0v79WYe3CwUFmaBVtaq0Zk32LfJpsgHY7/RpMwVw927Ten3ECLsrKhqnT0vXXGN+lj39tAlbKP4IV4VEuALgDfLTZMPToaGop0+ePWvOnVXw2rnTPJaWlr9zejqEAsgwYYI0fLj5mfX778V3Xau8WLTI/BwMDpZ+/VWqXt3uilBYhKtCIlwB8BbuarJRFOycPnnuXMa0w3nz8tb6mGsgAHscOiRdcYWUkiLNnCnde6/dFRUty5Juu01avlxq396ELRRv+ckGxXjJNgDwfR07mgBVpUrm/TEx9l9HFBhoRoK6dzdfPTl6VrKkGdVr3lzq2jVvz3H3ws4A8ubZZ02wuu46qUcPu6speg6HaaQTGCgtXiwtW2Z3RfAkRq6ywMgVAG9jd5MNb5bb9Ml0ERHSxIlSz560SAY8xdcWDM6PQYOk11+Xrr7aLDBcsqTdFaGgGLkCAB9j5yiRtwsMNH8lli4NTen3Y2JMi/vevc1Cn7/84tESUUw5naaxzNy55qvTaXdFxc+wYeZ9a9vWv4KVJI0ZY/6os3Wr77WdR/YIVwCAYi+n6ZP/+Y/0v/+ZC+pDQ80I4HXXSU8+adreA1lZuNCMiLZoId19t/kaF2f2I29WrpQ+/tj8AWTCBLur8bzy5U1nRMkErYMHbS0HHsK0wCwwLRAAiqfcpk/u3i0NHpxxgXn6IsMdOjBVEBlYN63w0tKkRo2kH3+U+veX3nrL7ors4XRKDRpIGzdKDzwgTZtmd0UoCLoFFhLhCgB823//Kw0caNrYS1KbNtIbb0g1athbF+yXfg3fhcsMXIh10/Lmgw9M8wpfXjA4r1avlm6+2Xx2vv9eOnWK62eLG665AgAgB3fcIW3ZIo0caRYj/vxz6dprpbFjpTNn7K4Odlq1KvtgJZnRrD17zHHI2unTGYsEDx/u38FKkm66yUwttSwTpphq6tsIVwAAv1SqlPTcc9LPP0sJCSZUjRol1a1L62R/duCAe4/zR6+/bqbgxsSYabjIWMA8NTXz/n37zBRUApbvIFwBAPxazZrSl19KH35opuls3y61bGnWz9q3z+7q4ClOp7RkiTRpUt6Oj4oq0nKKlQu7Ki5eLL3wgtn/4oumiYy/czrNH3Kykn5xzuDBdKP0FYQrAIDfczhMmPrtN/NLTkCA9NFHUq1a0r/+JZ0/b3eFKCoHDkjPPy9Vqya1ayetW5e3540eLW3aVKSlFQsXd1Xs0EE6fty8n/6wYHBeMNXUvxCuAAD4f2FhJkytXy81bmxatQ8ZIsXHS999Z3d1cBfLMgva3nWXdPnl5tq7PXukChWkJ54waxI5HNmvmxYUZH4Rjo+XHnpIOnTI86/BG6R3VcwqOOzYYUaxwFRTf0O4AgDgIvXrS99+K/3739Jll5nrsm66ybRSPnzY7upQUH/9ZcJzrVrSrbealurnz5v/trNnm5Dw0kvSgAE5r5u2fbsZ6UxLk95+W7rySjOd8Nw5W16WLZxOadCgS9vVp3M4mOqWLjravcfBu9GKPQu0YgcApDt82HQ8e/ddc/+yy8yCqPfdZ6YPwrtZlpnqN2WKNG9eRjfIMmWke+81I09162b93NzWTVu1ygSMjRvN/Vq1TMhq1apIX5JXSEw0UwFzs2JFRjMHf5Xe3n/fvqzDKO39vR+t2AEAcJOICDOCtXq1+SX8r7+kBx8069b89JPd1SE7J05I77xjpu7deKM0Y4YJVvXqSVOnSvv3m4VtswtWkvlFt3lzqXt38/XiX3ybNpV++MEsDFuxorlmr3VrqW1bM7rly5jqlneBgWaxcinrxcoty/zBhmDlGwhXAADkQZMm5lqsf/3LjHqsWSNdf7302GNSSord1SHdL79IjzwiVa4s9etnRpWCg6WePc1/s40bzf6yZd3z/QIDzXTR33831+eVKCF9+qlUu7b05JO+99mwLGn5cnNdWl4w1c3o2DHrqabpo99LlmQ/xRLFC9MCs8C0QABATvbtM79If/SRuV+5sgldd92V9V+mUbTOnjXXQk2ZYq6VS3fllWbaX69eplmFJ2zbZgL355+b+5UqSePGSb17F+9ppCkpZvTvrbfMCF1umOqWtYunmjqdZrTz/HnTwv6pp+yuEFnJTzYgXGWBcAUAyIsvv5Qeflj64w9z/7bbpMmTpauuyjgmt+t2UHD/+59pKPHeexmNRgIDpfbtTaj6xz/sCzSffWZC1u+/m/vx8WZx3SZN7KmnoLZsMaNUs2aZqZaSGbnt1cuE18ceM/su/G0y/Q8MCxaYERvkbOpUqX9/s714sVkSAN6FcFVIhCsAQF6dOSO9/LL5q/PZs6ZN97Bh0ogRZvRi0KDMrapjYsz1F/zSWTDnz0v//a8Zpfrii4z9VapIffuaKXqVK9tX34VSU03YfvbZjOmBd99trq+JibG3tpycPy99/LGpPTExY3+tWmbK5b33mmULJNOO/eLPeGysaezBZzzvHn7YjAqWKWOWfahTx+6KcCHCVSERrgAA+fXnn9LAgZmngx08eOlx/FU/a7mN8O3fbxqLTJuW8Yu8w2E68z30kHTHHeZ6J2+UnCw984zpOGlZUmio6UA5dKhUqpTd1WVISjLv79tvm6mvkhn5a9fOhKoWLbKe9srobOGdO2c+yytWmM6CP/xgmunAOxCuColwBQAoCMuSFi2SHn0045fTrHA9SmZZjX7ExJjr2MqVM9OmFi/OWDMpIsK0wu/XT6pe3Y6KC2bDBvM6068Lq1pVmjhR6tTJvmv1LMs0+pg82QT+9LW6KlY0I4H9+pmRKBS9I0ekhg3NdNdmzcy046Agu6uCRLgqNMIVAKAwPv9cuv323I9jDSATrDp3zluntJtvNtemdOpkOgAWR5Zl1tt64omMMNm8uZlGV6+e5+o4dUqaO9eEqk2bMvY3bmymqHXuXHzf4+Jsyxbz3+D4cTMiO2WK3RVBYp0rAABsdfRo3o7z9zWAnE4zkpNTsHI4TKD6+Wcz9ezuu4v3L/0Oh9Stm+kqOHq0FBJirmu6/nrzOg8dKtrv/+ef0uOPm5HBBx4wwSokxIwErl9vrvfp0aN4v8fFWe3a0pw55nMydaq5DgvFi+3h6s0331RcXJxCQkLUqFEjrVu3Lttjt2zZok6dOikuLk4Oh0OTJk0q9DkBAHC3vK7tM3as9Oqr0u7dRVuPt/r888xTAbNiWVKXLr53gX9oqDRmjGlr3qWLlJZmfpm+6irT8CR9ep47pKWZ7oW33246/L36qvT331K1aqYZy9695nqw66933/dEwbVtK734otl+9FHp66/trQf5Y2u4mjdvnoYMGaLRo0drw4YNqlevnlq1aqWDWV0BLOnUqVOqXr26xo8fr6ioKLecEwAAd2va1IwM5HYdzdatZhShalXTovtf/5L27PFMjXbZuVN64w3Ttj6vLad9eYSvalUzTXDlSql+fTPqOXiwVLdu5m6IBfHXX9Irr5hAdccdJsxaltSmjVnoePt201TDU2uAIe+GDTMjiE6nWT/vzz/trgh5Zes1V40aNVKDBg00efJkSVJaWppiY2M1cOBADR8+PMfnxsXFafDgwRo8eLDbzpmOa64AAIWVfi2RlPUaQNOmmTbuH31kprtdeEyTJuYXqs6dvbtld16kpZnpZkuWmNvPP+f/HP5ybZrTaUaQnn46Y92utm0zAlL6Mbl15tuwwaxN9cEH5jMmmcYg991nph5ecYXHXhIK4fRp09jihx+ka64xjUf4tdQexeKaq9TUVK1fv14JCQkZxQQEKCEhQWvWrPHoOc+ePauUlJRMNwAACqNjR9N9rUqVzPtjYsz+++83jQNWrjSdBd94w/yi7HCY614ee8x0abv5ZrP4bE7dB73N6dNmLap+/czrbdhQev55E6wCAqRbbjFd8rZuzXmEz+Ew70HTpp6t3y6BgaZD3/bt5r9/iRLSJ5+Y63CefFKaPdu06W7Rwlx71qKFub9woVljbc4cE8zj483CymfOmCYZ06aZz88rrxCsipNSpUz30eho6ddfM0ay4N1sWxHi8OHDcjqdioyMzLQ/MjJSv/32m0fPOW7cOD377LMF+p4AAGSnY0cz9S23kYboaLOO0COPmF+C//Mfaf5807J79WpzGzxYuukmc31Op07es1BuukOHzFSzJUtMC+lTpzIeK1NGat1auvNOc93PhdPQXnvNjNA5HFmP8E2a5H/t6suVM9dF9e1rQtbSpebaqKzs22c+D2FhGQsVlyxp3tOHHzZhy6427yi8KlXMMgS33GL+/3rmGWncOLurQk5sb2jhDUaMGKFjx465bnt8fcI7AMBjAgPNlLbu3c3X3IJClSrmIvZVq8z1V5MmmVBlWSZsPfqoGe1p1sy00bbreiTLMs0YXnrJjK5FRpppZ4sXm2AVEyMNGGCCweHDJizee++l1/fkNsLnzwst16plGlF8/HH2CySnB9KUFBO4n3vONEj54APzuSFYFX8NG5rpopI0frwZoYT3sm3kKiIiQoGBgUpOTs60Pzk5OdtmFUV1zuDgYAXTcxQA4GViYkyr8vQFdhcsMNdorVkjffONuT36qPmrdpcuJogU8J/QPDl/3kxZTL9+avv2zI9ff70ZnbrzTtOcIa+/2Od1hM8fORxmVOr8+dyPnTFDuuDKCPiQHj2kzZulCRPMlOKrrpIaNLC7KmTFtpGroKAgxcfHa/ny5a59aWlpWr58uRo3buw15wQAwBvExJipgd99Z0YmXn1VuvFGM3KxcqWZAla5srkOZ8oU6aK/M2bidJq1lebONV9zuo7j+HET6nr2NKNTzZqZa3e2bzfTz1q1Mmvx7N5tGleMHi1dd13+R0zyO8LnT/I6OlnUa2TBXi+8IP3zn+b6unbtpP377a4IWbFt5EqShgwZol69eumGG25Qw4YNNWnSJJ08eVJ9+vSRJPXs2VNVqlTRuP+fXJqamqpff/3Vtb1v3z5t2rRJZcqU0RX/f4VmbucEAKC4i4011+I89pi0a5cJP/PnS2vXmrCUmGiu32rWLGNEq1Il89yFCzNGwtLFxJhrn9Kn4O3dmzE6tWKFlJqacexll5m23nfeKbVsSfcyT8jruml5PQ7FU2CgmRLYuLFpcNG+vfnDSqlSdleGC9nail2SJk+erJdffllJSUmqX7++Xn/9dTVq1EiS1Lx5c8XFxWn69OmSpJ07d6patWqXnKNZs2ZKTEzM0znzglbsAIDiaOfOjKC1bl3G/oAAM6J1xRXSO+9kbhwhZYwy3XWX9McfppX3ha64wvyl/M47TYOE7K7/QdFwOk1XwH37Lv1vJ5n/fjEx0o4djPj5gz//NNdh/fWXmS44axbX1hW1/GQD28OVNyJcAQCKux07Mq7R+vHH/D3X4TAhKv36qZo1+eXNbrmtm+bvzT/8zddfm5Fjp9M0uRg2zO6KfFuxWOcKAAAUnWrVpCeeMAuQ/vmnaeudF8OGSUlJpjPhk0+ajnUEK/vRVREX+sc/zFReSRoxwrRph3cgXAEA4OOqVzdNIvKiXr2M67PgXTp2NFM/V6wwrdZXrDAjlAQr/zRggFmo27LMotJbtthdESSbG1oAAADPoCmCb0jvqgg4HNLrr5v15lauNFN41627dC05eBYjVwAA+IGmTc0Usuym+Dkcpgth06aerQtAwQUFmWmhcXHS//5nmtKcO2d3Vf6NcAUAgB8IDMy4RuPigJV+f9Ikus0BxU1EhFk2oUwZM1X0scfsrsi/Ea4AAPATNEUAfFOdOtLs2Wb7zTelt9+2tx5/Riv2LNCKHQDgy5xOadUq6cABc41V06aMWAG+4IUXpGeeMWvRffWVWUgchcc6V4VEuAIAAEBxk9458MMPTWOLH34wyzKgcFjnCgAAAPAzDof07rtSfLx05IjpIHj8uN1V+RfCFQAAAOAjQkOlxYulqCjpl1+ke++V0tLsrsp/EK4AAAAAHxITIy1aZFq1f/yxNGqU3RX5D8IVAAAA4GNuvFGaNs1sv/CCNG+evfX4C8IVAAAA4IN69pSGDjXbvXtL69fbWo5fIFwBAAAAPmr8eKlNG+nMGaldO7MEA4oO4QoAAADwUYGB0ty5Uq1a0r59UocOJmihaBCuAAAAAB8WHi4tWSKVLy+tXSv162fWxIL7Ea4AAAAAH3flldJHH5mRrJkzpVdftbsi30S4AgAAAPxAQkJGqHrySenzzyWnU0pMNFMHExPNfRRcCbsLAAAAAOAZAwdKmzdL//631KmTFBYmJSdnPB4TI732mtSxo301FmeMXAEAAAB+wuGQ3nzTNLg4fTpzsJJM04vOnaWFC+2pr7gjXAEAAAB+JDBQOnYs68fSG10MHswUwYIgXAEAAAB+ZNWqnNe7sixpzx5zHPKHcAUAAAD4kbwuJMyCw/lHuAIAAAD8SHS0e49DBsIVAAAA4EeaNjVdAR2O7I8JD5duvtlzNfkKwhUAAADgRwIDTbt1KfuAdeyY1K2bdPy45+ryBYQrAAAAwM907CgtWCBVqZJ5f2ys1LevVLKk9J//SA0bSr/9Zk+NxZHDstIbLiJdSkqKwsPDdezYMYWFhdldDgAAAFAknM6M7oHR0WbKYGCg9P33Zr2rffukMmWk6dPNosP+KD/ZgHCVBcIVAAAA/F1ystS1q7Rypbn/xBPSiy9KJUrYW5en5ScbMC0QAAAAwCUiI6WvvpKGDDH3X35ZatVKOnTI3rq8GeEKAAAAQJZKlJBeeUX68EOpdGnp66+l66+X1q2zuzLvRLgCAAAAkKOuXaW1a6WrrpL27jXXZk2bZndV3odwBQAAACBXtWubEav27aXUVNNV8IEHpDNn7K7MexCuAAAAAORJeLhp0f7ii1JAgPTuu2YUa9cuuyvzDoQrAAAAAHkWECCNGCEtXSpVqCD9+KMUH2+aX/g7whUAAACAfLvtNmn9ehOsjhwxnQTHj5f8eaEnwhUAAACAAqlaVfr2W+m++6S0NDOi1amTlJJid2X2IFwBAAAAKLCQEOnf/5beflsKCpIWLZIaNpR+/dXuyjyPcAUAAACgUBwO0z1w1SopJkbats0ErPnz7a7MswhXAAAAANyiYUNpwwbpH/+QTp6UunSRnnhCOn/e7so8g3AFAAAAwG0qVpS++EJ68klzf+JEqWVL6eBBe+vyBMIVAAAAALcqUUKaMEFasEAqU0ZascJ0FVy71u7KihbhCgAAAECR6NRJWrdOqllT2rvXLDg8darvtmsnXAEAAAAoMldfbQJWx47SuXNS//6mdfvp03ZX5n6EKwAAAABFKizMTBEcP14KCJCmT5duvlnaudPuytyLcAUAAACgyDkc0rBh0pdfShERpqtgfLy57ysIVwAAAAA85tZbpfXrpQYNpL/+klq3ll54QUpLM487nVJiojR3rvnqdNpZbf4QrgAAAAB41OWXS998Iz34oGlu8cwz5pqsWbOkuDipRQvp7rvN17g4aeFCuyvOG4dl+WqvjoJLSUlReHi4jh07prCwMLvLAQAAAHzWu+9KDz8snT2b9eMOh/m6YIEJYJ6Wn2zAyBUAAAAA29x/v7RypRQYmPXj6UNBgwd7/xRBwhUAAAAAW50+nXNwsixpzx5p1SrP1VQQhCsAAAAAtjpwwL3H2YVwBQAAAMBW0dHuPc4uhCsAAAAAtmraVIqJyWhecTGHQ4qNNcd5M8IVAAAAAFsFBkqvvWa2Lw5Y6fcnTcq+6YW3IFwBAAAAsF3HjqbdepUqmffHxNjXhj2/vCJcvfnmm4qLi1NISIgaNWqkdevW5Xj8/PnzVatWLYWEhKhOnTr67LPPMj3eu3dvORyOTLfWrVsX5UsAAAAAUEgdO0o7d0orVkgffGC+7thRPIKV5AXhat68eRoyZIhGjx6tDRs2qF69emrVqpUOHjyY5fHfffedunfvrvvvv18bN25U+/bt1b59e/3yyy+ZjmvdurUOHDjgus2dO9cTLwcAAABAIQQGSs2bS927m6/ePhXwQg7LSl+Wyx6NGjVSgwYNNHnyZElSWlqaYmNjNXDgQA0fPvyS47t27aqTJ0/q008/de278cYbVb9+fU2dOlWSGbk6evSoFi9eXKCa8rMKMwAAAADflZ9sYOvIVWpqqtavX6+EhATXvoCAACUkJGjNmjVZPmfNmjWZjpekVq1aXXJ8YmKiKlWqpJo1a6p///46cuRItnWcPXtWKSkpmW4AAAAAkB+2hqvDhw/L6XQqMjIy0/7IyEglJSVl+ZykpKRcj2/durVmzpyp5cuXa8KECVq5cqXatGkjZzbLPo8bN07h4eGuW2xsbCFfGQAAAAB/U8LuAopCt27dXNt16tRR3bp1VaNGDSUmJurWW2+95PgRI0ZoyJAhrvspKSkELAAAAAD5YuvIVUREhAIDA5WcnJxpf3JysqKiorJ8TlRUVL6Ol6Tq1asrIiJCf/zxR5aPBwcHKywsLNMNAAAAAPLD1nAVFBSk+Ph4LV++3LUvLS1Ny5cvV+PGjbN8TuPGjTMdL0nLli3L9nhJ2rt3r44cOaLo6Gj3FA4AAAAAF7G9FfuQIUM0bdo0zZgxQ1u3blX//v118uRJ9enTR5LUs2dPjRgxwnX8oEGDtHTpUr3yyiv67bffNGbMGP3444965JFHJEknTpzQE088oe+//147d+7U8uXL1a5dO11xxRVq1aqVLa8RAAAAgO+z/Zqrrl276tChQxo1apSSkpJUv359LV261NW0Yvfu3QoIyMiATZo00QcffKBnnnlGTz31lK688kotXrxY1157rSQpMDBQP//8s2bMmKGjR4+qcuXKatmypcaOHavg4GBbXiMAAAAA32f7OlfeiHWuAAAAAEjFaJ0rAAAAAPAVtk8L9Ebpg3ksJgwAAAD4t/RMkJcJf4SrLBw/flySWOsKAAAAgCSTEcLDw3M8hmuuspCWlqb9+/erbNmycjgcdpfj09IXbN6zZw/Xt3kI77ln8X57Hu+55/Geexbvt+fxnnueN73nlmXp+PHjqly5cqZGe1lh5CoLAQEBiomJsbsMv8LizZ7He+5ZvN+ex3vuebznnsX77Xm8557nLe95biNW6WhoAQAAAABuQLgCAAAAADcgXMFWwcHBGj16NAs8exDvuWfxfnse77nn8Z57Fu+35/Gee15xfc9paAEAAAAAbsDIFQAAAAC4AeEKAAAAANyAcAUAAAAAbkC4AgAAAAA3IFyhyIwbN04NGjRQ2bJlValSJbVv317btm3L8TnTp0+Xw+HIdAsJCfFQxcXfmDFjLnn/atWqleNz5s+fr1q1aikkJER16tTRZ5995qFqi7+4uLhL3m+Hw6GHH344y+P5fOffN998o7Zt26py5cpyOBxavHhxpscty9KoUaMUHR2tUqVKKSEhQdu3b8/1vG+++abi4uIUEhKiRo0aad26dUX0CoqfnN7zc+fOadiwYapTp45Kly6typUrq2fPntq/f3+O5yzIzyZ/ktvnvHfv3pe8f61bt871vHzOs5bb+53Vz3WHw6GXX34523PyGc9eXn4fPHPmjB5++GFVqFBBZcqUUadOnZScnJzjeQv687+oEa5QZFauXKmHH35Y33//vZYtW6Zz586pZcuWOnnyZI7PCwsL04EDB1y3Xbt2eahi31C7du1M79+3336b7bHfffedunfvrvvvv18bN25U+/bt1b59e/3yyy8erLj4+uGHHzK918uWLZMk3XXXXdk+h893/pw8eVL16tXTm2++meXjL730kl5//XVNnTpVa9euVenSpdWqVSudOXMm23POmzdPQ4YM0ejRo7VhwwbVq1dPrVq10sGDB4vqZRQrOb3np06d0oYNGzRy5Eht2LBBCxcu1LZt23TnnXfmet78/GzyN7l9ziWpdevWmd6/uXPn5nhOPufZy+39vvB9PnDggN577z05HA516tQpx/PyGc9aXn4ffOyxx/TJJ59o/vz5Wrlypfbv36+OHTvmeN6C/Pz3CAvwkIMHD1qSrJUrV2Z7zPvvv2+Fh4d7rigfM3r0aKtevXp5Pr5Lly7WHXfckWlfo0aNrH79+rm5Mv8waNAgq0aNGlZaWlqWj/P5LhxJ1qJFi1z309LSrKioKOvll1927Tt69KgVHBxszZ07N9vzNGzY0Hr44Ydd951Op1W5cmVr3LhxRVJ3cXbxe56VdevWWZKsXbt2ZXtMfn82+bOs3vNevXpZ7dq1y9d5+JznTV4+4+3atbP+8Y9/5HgMn/G8u/j3waNHj1olS5a05s+f7zpm69atliRrzZo1WZ6joD//PYGRK3jMsWPHJEmXXXZZjsedOHFCVatWVWxsrNq1a6ctW7Z4ojyfsX37dlWuXFnVq1dXjx49tHv37myPXbNmjRISEjLta9WqldasWVPUZfqc1NRUzZ49W/fdd58cDke2x/H5dp8dO3YoKSkp02c4PDxcjRo1yvYznJqaqvXr12d6TkBAgBISEvjcF9CxY8fkcDhUrly5HI/Lz88mXCoxMVGVKlVSzZo11b9/fx05ciTbY/mcu09ycrL++9//6v7778/1WD7jeXPx74Pr16/XuXPnMn1ea9Wqpcsvvzzbz2tBfv57CuEKHpGWlqbBgwfrpptu0rXXXpvtcTVr1tR7772njz/+WLNnz1ZaWpqaNGmivXv3erDa4qtRo0aaPn26li5dqilTpmjHjh1q2rSpjh8/nuXxSUlJioyMzLQvMjJSSUlJnijXpyxevFhHjx5V7969sz2Gz7d7pX9O8/MZPnz4sJxOJ597Nzlz5oyGDRum7t27KywsLNvj8vuzCZm1bt1aM2fO1PLlyzVhwgStXLlSbdq0kdPpzPJ4PufuM2PGDJUtWzbXKWp8xvMmq98Hk5KSFBQUdMkfaHL6vBbk57+nlLD1u8NvPPzww/rll19ynX/cuHFjNW7c2HW/SZMmuvrqq/X2229r7NixRV1msdemTRvXdt26ddWoUSNVrVpVH330UZ7+6oaCe/fdd9WmTRtVrlw522P4fMOXnDt3Tl26dJFlWZoyZUqOx/KzqXC6devm2q5Tp47q1q2rGjVqKDExUbfeequNlfm+9957Tz169Mi1+RCf8bzJ6++DxRkjVyhyjzzyiD799FOtWLFCMTEx+XpuyZIldd111+mPP/4ooup8W7ly5XTVVVdl+/5FRUVd0o0nOTlZUVFRnijPZ+zatUtfffWVHnjggXw9j8934aR/TvPzGY6IiFBgYCCf+0JKD1a7du3SsmXLchy1ykpuP5uQs+rVqysiIiLb94/PuXusWrVK27Zty/fPdonPeFay+30wKipKqampOnr0aKbjc/q8FuTnv6cQrlBkLMvSI488okWLFunrr79WtWrV8n0Op9OpzZs3Kzo6uggq9H0nTpzQn3/+me3717hxYy1fvjzTvmXLlmUaXUHu3n//fVWqVEl33HFHvp7H57twqlWrpqioqEyf4ZSUFK1duzbbz3BQUJDi4+MzPSctLU3Lly/nc59H6cFq+/bt+uqrr1ShQoV8nyO3n03I2d69e3XkyJFs3z8+5+7x7rvvKj4+XvXq1cv3c/mMZ8jt98H4+HiVLFky0+d127Zt2r17d7af14L8/PcYW9tpwKf179/fCg8PtxITE60DBw64bqdOnXIdc++991rDhw933X/22WetL774wvrzzz+t9evXW926dbNCQkKsLVu22PESip3HH3/cSkxMtHbs2GGtXr3aSkhIsCIiIqyDBw9alnXp+7169WqrRIkS1sSJE62tW7dao0ePtkqWLGlt3rzZrpdQ7DidTuvyyy+3hg0bdsljfL4L7/jx49bGjRutjRs3WpKsV1991dq4caOrM9348eOtcuXKWR9//LH1888/W+3atbOqVatmnT592nWOf/zjH9Ybb7zhuv/hhx9awcHB1vTp061ff/3V6tu3r1WuXDkrKSnJ46/PG+X0nqemplp33nmnFRMTY23atCnTz/azZ8+6znHxe57bzyZ/l9N7fvz4cWvo0KHWmjVrrB07dlhfffWVdf3111tXXnmldebMGdc5+JznXW4/VyzLso4dO2aFhoZaU6ZMyfIcfMbzLi+/Dz700EPW5Zdfbn399dfWjz/+aDVu3Nhq3LhxpvPUrFnTWrhwoet+Xn7+24FwhSIjKcvb+++/7zqmWbNmVq9evVz3Bw8ebF1++eVWUFCQFRkZad1+++3Whg0bPF98MdW1a1crOjraCgoKsqpUqWJ17drV+uOPP1yPX/x+W5ZlffTRR9ZVV11lBQUFWbVr17b++9//erjq4u2LL76wJFnbtm275DE+34W3YsWKLH+OpL+vaWlp1siRI63IyEgrODjYuvXWWy/5b1G1alVr9OjRmfa98cYbrv8WDRs2tL7//nsPvSLvl9N7vmPHjmx/tq9YscJ1jovf89x+Nvm7nN7zU6dOWS1btrQqVqxolSxZ0qpatar14IMPXhKS+JznXW4/VyzLst5++22rVKlS1tGjR7M8B5/xvMvL74OnT5+2BgwYYJUvX94KDQ21OnToYB04cOCS81z4nLz8/LeDw7Isq2jGxAAAAADAf3DNFQAAAAC4AeEKAAAAANyAcAUAAAAAbkC4AgAAAAA3IFwBAAAAgBsQrgAAAADADQhXAAAAAOAGhCsAAAAAcAPCFQAAbuZwOLR48WK7ywAAeBjhCgDgU3r37i2Hw3HJrXXr1naXBgDwcSXsLgAAAHdr3bq13n///Uz7goODbaoGAOAvGLkCAPic4OBgRUVFZbqVL19ekpmyN2XKFLVp00alSpVS9erVtWDBgkzP37x5s/7xj3+oVKlSqlChgvr27asTJ05kOua9995T7dq1FRwcrOjoaD3yyCOZHj98+LA6dOig0NBQXXnllVqyZEnRvmgAgO0IVwAAvzNy5Eh16tRJP/30k3r06KFu3bpp69atkqSTJ0+qVatWKl++vH744QfNnz9fX331VabwNGXKFD388MPq27evNm/erCVLluiKK67I9D2effZZdenSRT///LNuv/129ejRQ3/99ZdHXycAwLMclmVZdhcBAIC79O7dW7Nnz1ZISEim/U899ZSeeuopORwOPfTQQ5oyZYrrsRtvvFHXX3+93nrrLU2bNk3Dhg3Tnj17VLp0aUnSZ599prZt22r//v2KjIxUlSpV1KdPHz3//PNZ1uBwOPTMM89o7NixkkxgK1OmjD7//HOu/QIAH8Y1VwAAn9OiRYtM4UmSLrvsMtd248aNMz3WuHFjbdq0SZK0detW1atXzxWsJOmmm25SWlqatm3bJofDof379+vWW2/NsYa6deu6tkuXLq2wsDAdPHiwoC8JAFAMEK4AAD6ndOnSl0zTc5dSpUrl6biSJUtmuu9wOJSWllYUJQEAvATXXAEA/M73339/yf2rr75aknT11Vfrp59+0smTJ12Pr169WgEBAapZs6bKli2ruLg4LV++3KM1AwC8HyNXAACfc/bsWSUlJWXaV6JECUVEREiS5s+frxtuuEE333yz5syZo3Xr1undd9+VJPXo0UOjR49Wr169NGbMGB06dEgDBw7Uvffeq8jISEnSmDFj9NBDD6lSpUpq06aNjh8/rtWrV2vgwIGefaEAAK9CuAIA+JylS5cqOjo6076aNWvqt99+k2Q6+X344YcaMGCAoqOjNXfuXF1zzTWSpNDQUH3xxRcaNGiQGjRooNDQUHXq1Emvvvqq61y9evXSmTNn9K9//UtDhw5VRESEOnfu7LkXCADwSnQLBAD4FYfDoUWLFql9+/Z2lwIA8DFccwUAAAAAbkC4AgAAAAA34JorAIBfYTY8AKCoMHIFAAAAAG5AuAIAAAAANyBcAQAAAIAbEK4AAAAAwA0IVwAAAADgBoQrAAAAAHADwhUAAAAAuAHhCgAAAADc4P8AuA8h57cwy2sAAAAASUVORK5CYII=", 290 | "text/plain": [ 291 | "
" 292 | ] 293 | }, 294 | "metadata": {}, 295 | "output_type": "display_data" 296 | } 297 | ], 298 | "source": [ 299 | "plt.figure(figsize=(10, 5))\n", 300 | "plt.plot(range(1, num_epochs + 1), train_losses, marker='o', linestyle='-', color='b', label='Training Loss')\n", 301 | "plt.xlabel('Epoch')\n", 302 | "plt.ylabel('Loss')\n", 303 | "plt.title('Training Loss Curve')\n", 304 | "plt.legend()\n", 305 | "plt.show()\n" 306 | ] 307 | }, 308 | { 309 | "cell_type": "markdown", 310 | "metadata": {}, 311 | "source": [ 312 | "### Validation" 313 | ] 314 | }, 315 | { 316 | "cell_type": "code", 317 | "execution_count": 70, 318 | "metadata": {}, 319 | "outputs": [ 320 | { 321 | "name": "stdout", 322 | "output_type": "stream", 323 | "text": [ 324 | "验证准确率:93.20%\n" 325 | ] 326 | } 327 | ], 328 | "source": [ 329 | "model.eval() # 设置模型为评估模式\n", 330 | "correct = 0\n", 331 | "total = 0\n", 332 | "\n", 333 | "with torch.no_grad():\n", 334 | " for inputs, labels in val_loader:\n", 335 | " inputs, labels = inputs.to(device), labels.to(device)\n", 336 | " outputs = model(inputs)\n", 337 | " predicted = (outputs > 0.5).float()\n", 338 | " total += labels.size(0)\n", 339 | " correct += (predicted == labels.unsqueeze(1)).sum().item()\n", 340 | "\n", 341 | "print(f'验证准确率:{100 * correct / total:.2f}%')\n" 342 | ] 343 | }, 344 | { 345 | "cell_type": "markdown", 346 | "metadata": {}, 347 | "source": [ 348 | "### Test" 349 | ] 350 | }, 351 | { 352 | "cell_type": "code", 353 | "execution_count": 73, 354 | "metadata": {}, 355 | "outputs": [ 356 | { 357 | "name": "stdout", 358 | "output_type": "stream", 359 | "text": [ 360 | "预测结果已保存为 prediction_results.csv\n" 361 | ] 362 | } 363 | ], 364 | "source": [ 365 | "model.eval()\n", 366 | "def predict_image(image_path, model):\n", 367 | " \"\"\"对单张图片进行预测并返回标签\"\"\"\n", 368 | " img = Image.open(image_path).convert('RGB') # 确保图片是RGB格式\n", 369 | " img = test_transform(img).unsqueeze(0).to(device) # 预处理图片\n", 370 | "\n", 371 | " with torch.no_grad():\n", 372 | " output = model(img)\n", 373 | " prediction = torch.sigmoid(output).item()\n", 374 | "\n", 375 | " # 将预测结果转换为0或1:0表示Cat,1表示Dog\n", 376 | " return 1 if prediction >= 0.5 else 0\n", 377 | "\n", 378 | "test_folder = os.path.join(os.getcwd() + '/datasets/test')\n", 379 | "# 遍历测试文件夹并收集预测结果\n", 380 | "results = [] # 存储 (文件名, 预测结果) 的列表 \n", 381 | "for img_file in os.listdir(test_folder):\n", 382 | " img_path = os.path.join(test_folder, img_file)\n", 383 | " if img_file.endswith(('.jpg', '.png', '.jpeg')): # 过滤图片格式\n", 384 | " label = predict_image(img_path, model)\n", 385 | " img_id, _ = os.path.splitext(img_file) # 去掉扩展名\n", 386 | " results.append([img_id, label])\n", 387 | "\n", 388 | "# 将结果保存为 CSV 文件\n", 389 | "df = pd.DataFrame(results, columns=['id', 'label'])\n", 390 | "df['id'] = df['id'].astype(int) # 转换为整数\n", 391 | "df = df.sort_values(by='id')\n", 392 | "df.to_csv(os.path.join(os.getcwd() +'/prediction_results.csv'), index=False)\n", 393 | "\n", 394 | "print(\"预测结果已保存为 prediction_results.csv\")\n", 395 | "\n" 396 | ] 397 | } 398 | ], 399 | "metadata": { 400 | "kernelspec": { 401 | "display_name": "anygrasp", 402 | "language": "python", 403 | "name": "python3" 404 | }, 405 | "language_info": { 406 | "codemirror_mode": { 407 | "name": "ipython", 408 | "version": 3 409 | }, 410 | "file_extension": ".py", 411 | "mimetype": "text/x-python", 412 | "name": "python", 413 | "nbconvert_exporter": "python", 414 | "pygments_lexer": "ipython3", 415 | "version": "3.9.19" 416 | } 417 | }, 418 | "nbformat": 4, 419 | "nbformat_minor": 2 420 | } 421 | --------------------------------------------------------------------------------