├── P0_Titanic ├── Kaggle_Titanic │ ├── gender_submission.csv │ ├── output.csv │ ├── test.csv │ ├── titanic.py │ └── train.csv ├── README.md ├── Titanic_Decision_Tree_Solution.png ├── p0_playground.ipynb ├── titanic_data.csv ├── titanic_survival_exploration.html ├── titanic_survival_exploration.ipynb └── titanic_visualizations.py ├── P1_Boston_Housing ├── README.md ├── bj_housing.csv ├── boston_housing.html ├── boston_housing.ipynb ├── code.py ├── code_bj.py ├── housing.csv └── visuals.py ├── P2_Finding_Donors ├── Mini_Project_NLP.py ├── README.md ├── census.csv ├── code.py ├── finding_donors.html ├── finding_donors.ipynb ├── p2_playground.ipynb ├── project_description.md ├── tree.png └── visuals.py ├── P3_Create_Customer_Segments ├── Mini_Project_Eigenfaces.py ├── PCA - GMM preds.png ├── PCA - GMM preds_prob.png ├── README.md ├── Visualization Samples .ipynb ├── cluster.csv ├── customer_segments.html ├── customer_segments.ipynb ├── customers.csv └── visuals.py ├── P4_Smart_Cab ├── README.md ├── images │ ├── car-black.png │ ├── car-blue.png │ ├── car-cyan.png │ ├── car-green.png │ ├── car-magenta.png │ ├── car-orange.png │ ├── car-red.png │ ├── car-white.png │ ├── car-yellow.png │ ├── east-west.png │ ├── logo.png │ └── north-south.png ├── logs │ ├── sim_default-learning (4 Features, Training 20 rounds, Score FA).csv │ ├── sim_default-learning (4 Features, Training 20 rounds, Score FA).png │ ├── sim_default-learning (4 Features, Training 20 rounds, Score FA).txt │ ├── sim_default-learning (5 Features, Training 20 rounds, Score FB).csv │ ├── sim_default-learning (5 Features, Training 20 rounds, Score FB).png │ ├── sim_default-learning (5 Features, Training 20 rounds, Score FB).txt │ ├── sim_default-learning (5 Features, Training 20 rounds, Score FD).csv │ ├── sim_default-learning (5 Features, Training 20 rounds, Score FD).png │ ├── sim_default-learning (5 Features, Training 20 rounds, Score FD).txt │ ├── sim_improved-learning - Use decay_exponential (4 Features, 300 rounds, 310 combinations, Score A+A+).csv │ ├── sim_improved-learning - Use decay_exponential (4 Features, 300 rounds, 310 combinations, Score A+A+).png │ ├── sim_improved-learning - Use decay_exponential (4 Features, 300 rounds, 310 combinations, Score A+A+).txt │ ├── sim_improved-learning - Use decay_exponential (5 Features, 600 rounds, 1000 combinations, Score A+A).csv │ ├── sim_improved-learning - Use decay_exponential (5 Features, 600 rounds, 1000 combinations, Score A+A).png │ ├── sim_improved-learning - Use decay_exponential (5 Features, 600 rounds, 1000 combinations, Score A+A).txt │ ├── sim_improved-learning - Use decay_exponential (5 Features, 600 rounds, 1000 combinations, Score A+A+).csv │ ├── sim_improved-learning - Use decay_exponential (5 Features, 600 rounds, 1000 combinations, Score A+A+).png │ ├── sim_improved-learning - Use decay_exponential (5 Features, 600 rounds, 1000 combinations, Score A+A+).txt │ ├── sim_improved-learning - Use decay_step (4 Features, 300 rounds, 370 combinations, Score A+A+).csv │ ├── sim_improved-learning - Use decay_step (4 Features, 300 rounds, 370 combinations, Score A+A+).png │ ├── sim_improved-learning - Use decay_step (4 Features, 300 rounds, 370 combinations, Score A+A+).txt │ ├── sim_improved-learning - Use decay_step (5 Features, 600 rounds, 1260 combinations, Score A+A+).csv │ ├── sim_improved-learning - Use decay_step (5 Features, 600 rounds, 1260 combinations, Score A+A+).png │ ├── sim_improved-learning - Use decay_step (5 Features, 600 rounds, 1260 combinations, Score A+A+).txt │ ├── sim_no-learning - Hard_coded (No Training, Score A+A+).csv │ ├── sim_no-learning - Hard_coded (No Training, Score A+A+).png │ ├── sim_no-learning.csv │ └── sim_no-learning.png ├── project_description.md ├── smartcab.html ├── smartcab.ipynb ├── smartcab │ ├── __init__.py │ ├── agent.py │ ├── environment.py │ ├── planner.py │ └── simulator.py └── visuals.py ├── P5_Image_Classification ├── CIFAR10 Image Classification - LSUV Initialization.ipynb ├── CIFAR10 Image Classification using Keras (Concise).ipynb ├── CIFAR10 Image Classification using Keras.ipynb ├── CIFAR10 Image Classification using TensorFlow.ipynb ├── CIFAR10 with Transfer Learning (For Fun).ipynb ├── Conv2D.ipynb ├── Intro │ ├── image │ │ ├── Learn_Rate_Tune_Image.png │ │ ├── Mean_Variance_Image.png │ │ ├── network_diagram.png │ │ └── notmnist.png │ └── intro_to_tensorflow.ipynb ├── MNIST Image Classification using Keras.ipynb ├── MNIST │ └── multilayer_perceptron.py ├── README.md ├── castle.jpg ├── helper.py ├── image_classification.ipynb ├── image_classification_ZH-CN.html ├── image_classification_ZH-CN.ipynb ├── lsuv_init.py ├── p5_playground.ipynb ├── problem_unittests.py └── utils.py ├── P6_Dogs_VS_Cats ├── Class Activation Map Visualizations.ipynb ├── Dog VS Cat - Experimenting With Transfer Learning.ipynb ├── Dog VS Cat - Kaggle.ipynb ├── Dog VS Cat using Transfer Learning.ipynb ├── Dogs VS Cats.ipynb ├── Hand-built CNN Model.ipynb ├── OpenCV_Primer.ipynb ├── cat1.jpg ├── cat2.jpg ├── cat3.jpg ├── cat4.jpg ├── cat5.jpg ├── cat6.jpg ├── output.gif ├── output2.gif ├── utils.py ├── view.jpg ├── weights_history.p └── weights_history2.p ├── README.md └── Visualizations ├── Activation Functions.py ├── Lasso.ipynb ├── convexity.py ├── gaussian.py ├── pca_feature_transformation.py ├── regularization.py └── univariate_regression_figure.py /P0_Titanic/Kaggle_Titanic/gender_submission.csv: -------------------------------------------------------------------------------- 1 | PassengerId,Survived 2 | 892,0 3 | 893,1 4 | 894,0 5 | 895,0 6 | 896,1 7 | 897,0 8 | 898,1 9 | 899,0 10 | 900,1 11 | 901,0 12 | 902,0 13 | 903,0 14 | 904,1 15 | 905,0 16 | 906,1 17 | 907,1 18 | 908,0 19 | 909,0 20 | 910,1 21 | 911,1 22 | 912,0 23 | 913,0 24 | 914,1 25 | 915,0 26 | 916,1 27 | 917,0 28 | 918,1 29 | 919,0 30 | 920,0 31 | 921,0 32 | 922,0 33 | 923,0 34 | 924,1 35 | 925,1 36 | 926,0 37 | 927,0 38 | 928,1 39 | 929,1 40 | 930,0 41 | 931,0 42 | 932,0 43 | 933,0 44 | 934,0 45 | 935,1 46 | 936,1 47 | 937,0 48 | 938,0 49 | 939,0 50 | 940,1 51 | 941,1 52 | 942,0 53 | 943,0 54 | 944,1 55 | 945,1 56 | 946,0 57 | 947,0 58 | 948,0 59 | 949,0 60 | 950,0 61 | 951,1 62 | 952,0 63 | 953,0 64 | 954,0 65 | 955,1 66 | 956,0 67 | 957,1 68 | 958,1 69 | 959,0 70 | 960,0 71 | 961,1 72 | 962,1 73 | 963,0 74 | 964,1 75 | 965,0 76 | 966,1 77 | 967,0 78 | 968,0 79 | 969,1 80 | 970,0 81 | 971,1 82 | 972,0 83 | 973,0 84 | 974,0 85 | 975,0 86 | 976,0 87 | 977,0 88 | 978,1 89 | 979,1 90 | 980,1 91 | 981,0 92 | 982,1 93 | 983,0 94 | 984,1 95 | 985,0 96 | 986,0 97 | 987,0 98 | 988,1 99 | 989,0 100 | 990,1 101 | 991,0 102 | 992,1 103 | 993,0 104 | 994,0 105 | 995,0 106 | 996,1 107 | 997,0 108 | 998,0 109 | 999,0 110 | 1000,0 111 | 1001,0 112 | 1002,0 113 | 1003,1 114 | 1004,1 115 | 1005,1 116 | 1006,1 117 | 1007,0 118 | 1008,0 119 | 1009,1 120 | 1010,0 121 | 1011,1 122 | 1012,1 123 | 1013,0 124 | 1014,1 125 | 1015,0 126 | 1016,0 127 | 1017,1 128 | 1018,0 129 | 1019,1 130 | 1020,0 131 | 1021,0 132 | 1022,0 133 | 1023,0 134 | 1024,1 135 | 1025,0 136 | 1026,0 137 | 1027,0 138 | 1028,0 139 | 1029,0 140 | 1030,1 141 | 1031,0 142 | 1032,1 143 | 1033,1 144 | 1034,0 145 | 1035,0 146 | 1036,0 147 | 1037,0 148 | 1038,0 149 | 1039,0 150 | 1040,0 151 | 1041,0 152 | 1042,1 153 | 1043,0 154 | 1044,0 155 | 1045,1 156 | 1046,0 157 | 1047,0 158 | 1048,1 159 | 1049,1 160 | 1050,0 161 | 1051,1 162 | 1052,1 163 | 1053,0 164 | 1054,1 165 | 1055,0 166 | 1056,0 167 | 1057,1 168 | 1058,0 169 | 1059,0 170 | 1060,1 171 | 1061,1 172 | 1062,0 173 | 1063,0 174 | 1064,0 175 | 1065,0 176 | 1066,0 177 | 1067,1 178 | 1068,1 179 | 1069,0 180 | 1070,1 181 | 1071,1 182 | 1072,0 183 | 1073,0 184 | 1074,1 185 | 1075,0 186 | 1076,1 187 | 1077,0 188 | 1078,1 189 | 1079,0 190 | 1080,1 191 | 1081,0 192 | 1082,0 193 | 1083,0 194 | 1084,0 195 | 1085,0 196 | 1086,0 197 | 1087,0 198 | 1088,0 199 | 1089,1 200 | 1090,0 201 | 1091,1 202 | 1092,1 203 | 1093,0 204 | 1094,0 205 | 1095,1 206 | 1096,0 207 | 1097,0 208 | 1098,1 209 | 1099,0 210 | 1100,1 211 | 1101,0 212 | 1102,0 213 | 1103,0 214 | 1104,0 215 | 1105,1 216 | 1106,1 217 | 1107,0 218 | 1108,1 219 | 1109,0 220 | 1110,1 221 | 1111,0 222 | 1112,1 223 | 1113,0 224 | 1114,1 225 | 1115,0 226 | 1116,1 227 | 1117,1 228 | 1118,0 229 | 1119,1 230 | 1120,0 231 | 1121,0 232 | 1122,0 233 | 1123,1 234 | 1124,0 235 | 1125,0 236 | 1126,0 237 | 1127,0 238 | 1128,0 239 | 1129,0 240 | 1130,1 241 | 1131,1 242 | 1132,1 243 | 1133,1 244 | 1134,0 245 | 1135,0 246 | 1136,0 247 | 1137,0 248 | 1138,1 249 | 1139,0 250 | 1140,1 251 | 1141,1 252 | 1142,1 253 | 1143,0 254 | 1144,0 255 | 1145,0 256 | 1146,0 257 | 1147,0 258 | 1148,0 259 | 1149,0 260 | 1150,1 261 | 1151,0 262 | 1152,0 263 | 1153,0 264 | 1154,1 265 | 1155,1 266 | 1156,0 267 | 1157,0 268 | 1158,0 269 | 1159,0 270 | 1160,1 271 | 1161,0 272 | 1162,0 273 | 1163,0 274 | 1164,1 275 | 1165,1 276 | 1166,0 277 | 1167,1 278 | 1168,0 279 | 1169,0 280 | 1170,0 281 | 1171,0 282 | 1172,1 283 | 1173,0 284 | 1174,1 285 | 1175,1 286 | 1176,1 287 | 1177,0 288 | 1178,0 289 | 1179,0 290 | 1180,0 291 | 1181,0 292 | 1182,0 293 | 1183,1 294 | 1184,0 295 | 1185,0 296 | 1186,0 297 | 1187,0 298 | 1188,1 299 | 1189,0 300 | 1190,0 301 | 1191,0 302 | 1192,0 303 | 1193,0 304 | 1194,0 305 | 1195,0 306 | 1196,1 307 | 1197,1 308 | 1198,0 309 | 1199,0 310 | 1200,0 311 | 1201,1 312 | 1202,0 313 | 1203,0 314 | 1204,0 315 | 1205,1 316 | 1206,1 317 | 1207,1 318 | 1208,0 319 | 1209,0 320 | 1210,0 321 | 1211,0 322 | 1212,0 323 | 1213,0 324 | 1214,0 325 | 1215,0 326 | 1216,1 327 | 1217,0 328 | 1218,1 329 | 1219,0 330 | 1220,0 331 | 1221,0 332 | 1222,1 333 | 1223,0 334 | 1224,0 335 | 1225,1 336 | 1226,0 337 | 1227,0 338 | 1228,0 339 | 1229,0 340 | 1230,0 341 | 1231,0 342 | 1232,0 343 | 1233,0 344 | 1234,0 345 | 1235,1 346 | 1236,0 347 | 1237,1 348 | 1238,0 349 | 1239,1 350 | 1240,0 351 | 1241,1 352 | 1242,1 353 | 1243,0 354 | 1244,0 355 | 1245,0 356 | 1246,1 357 | 1247,0 358 | 1248,1 359 | 1249,0 360 | 1250,0 361 | 1251,1 362 | 1252,0 363 | 1253,1 364 | 1254,1 365 | 1255,0 366 | 1256,1 367 | 1257,1 368 | 1258,0 369 | 1259,1 370 | 1260,1 371 | 1261,0 372 | 1262,0 373 | 1263,1 374 | 1264,0 375 | 1265,0 376 | 1266,1 377 | 1267,1 378 | 1268,1 379 | 1269,0 380 | 1270,0 381 | 1271,0 382 | 1272,0 383 | 1273,0 384 | 1274,1 385 | 1275,1 386 | 1276,0 387 | 1277,1 388 | 1278,0 389 | 1279,0 390 | 1280,0 391 | 1281,0 392 | 1282,0 393 | 1283,1 394 | 1284,0 395 | 1285,0 396 | 1286,0 397 | 1287,1 398 | 1288,0 399 | 1289,1 400 | 1290,0 401 | 1291,0 402 | 1292,1 403 | 1293,0 404 | 1294,1 405 | 1295,0 406 | 1296,0 407 | 1297,0 408 | 1298,0 409 | 1299,0 410 | 1300,1 411 | 1301,1 412 | 1302,1 413 | 1303,1 414 | 1304,1 415 | 1305,0 416 | 1306,1 417 | 1307,0 418 | 1308,0 419 | 1309,0 420 | -------------------------------------------------------------------------------- /P0_Titanic/Kaggle_Titanic/output.csv: -------------------------------------------------------------------------------- 1 | PassengerId,survived 2 | 892,0 3 | 893,0 4 | 894,0 5 | 895,0 6 | 896,0 7 | 897,0 8 | 898,0 9 | 899,0 10 | 900,1 11 | 901,0 12 | 902,0 13 | 903,0 14 | 904,1 15 | 905,0 16 | 906,1 17 | 907,1 18 | 908,0 19 | 909,0 20 | 910,1 21 | 911,0 22 | 912,0 23 | 913,0 24 | 914,1 25 | 915,0 26 | 916,1 27 | 917,0 28 | 918,1 29 | 919,1 30 | 920,1 31 | 921,0 32 | 922,0 33 | 923,0 34 | 924,0 35 | 925,0 36 | 926,1 37 | 927,0 38 | 928,0 39 | 929,0 40 | 930,0 41 | 931,0 42 | 932,0 43 | 933,0 44 | 934,0 45 | 935,1 46 | 936,1 47 | 937,0 48 | 938,1 49 | 939,0 50 | 940,1 51 | 941,1 52 | 942,1 53 | 943,0 54 | 944,1 55 | 945,1 56 | 946,1 57 | 947,0 58 | 948,0 59 | 949,0 60 | 950,0 61 | 951,1 62 | 952,0 63 | 953,0 64 | 954,0 65 | 955,1 66 | 956,1 67 | 957,1 68 | 958,1 69 | 959,0 70 | 960,1 71 | 961,1 72 | 962,1 73 | 963,0 74 | 964,0 75 | 965,0 76 | 966,1 77 | 967,0 78 | 968,0 79 | 969,1 80 | 970,0 81 | 971,1 82 | 972,1 83 | 973,0 84 | 974,0 85 | 975,0 86 | 976,0 87 | 977,0 88 | 978,0 89 | 979,0 90 | 980,0 91 | 981,1 92 | 982,0 93 | 983,0 94 | 984,1 95 | 985,0 96 | 986,0 97 | 987,0 98 | 988,1 99 | 989,0 100 | 990,1 101 | 991,0 102 | 992,1 103 | 993,0 104 | 994,0 105 | 995,0 106 | 996,1 107 | 997,0 108 | 998,0 109 | 999,0 110 | 1000,0 111 | 1001,0 112 | 1002,0 113 | 1003,0 114 | 1004,1 115 | 1005,1 116 | 1006,1 117 | 1007,0 118 | 1008,0 119 | 1009,1 120 | 1010,0 121 | 1011,1 122 | 1012,1 123 | 1013,0 124 | 1014,1 125 | 1015,0 126 | 1016,0 127 | 1017,1 128 | 1018,0 129 | 1019,1 130 | 1020,0 131 | 1021,0 132 | 1022,0 133 | 1023,0 134 | 1024,0 135 | 1025,0 136 | 1026,0 137 | 1027,0 138 | 1028,0 139 | 1029,0 140 | 1030,1 141 | 1031,0 142 | 1032,0 143 | 1033,1 144 | 1034,0 145 | 1035,0 146 | 1036,0 147 | 1037,0 148 | 1038,0 149 | 1039,0 150 | 1040,0 151 | 1041,0 152 | 1042,1 153 | 1043,0 154 | 1044,0 155 | 1045,1 156 | 1046,0 157 | 1047,0 158 | 1048,1 159 | 1049,1 160 | 1050,0 161 | 1051,1 162 | 1052,0 163 | 1053,1 164 | 1054,1 165 | 1055,0 166 | 1056,0 167 | 1057,0 168 | 1058,0 169 | 1059,0 170 | 1060,1 171 | 1061,0 172 | 1062,0 173 | 1063,0 174 | 1064,0 175 | 1065,0 176 | 1066,0 177 | 1067,1 178 | 1068,1 179 | 1069,0 180 | 1070,1 181 | 1071,1 182 | 1072,0 183 | 1073,0 184 | 1074,1 185 | 1075,0 186 | 1076,1 187 | 1077,0 188 | 1078,1 189 | 1079,0 190 | 1080,0 191 | 1081,0 192 | 1082,0 193 | 1083,0 194 | 1084,1 195 | 1085,0 196 | 1086,1 197 | 1087,0 198 | 1088,1 199 | 1089,1 200 | 1090,0 201 | 1091,0 202 | 1092,1 203 | 1093,1 204 | 1094,1 205 | 1095,1 206 | 1096,0 207 | 1097,0 208 | 1098,0 209 | 1099,0 210 | 1100,1 211 | 1101,0 212 | 1102,0 213 | 1103,0 214 | 1104,0 215 | 1105,1 216 | 1106,1 217 | 1107,0 218 | 1108,0 219 | 1109,0 220 | 1110,1 221 | 1111,0 222 | 1112,1 223 | 1113,0 224 | 1114,1 225 | 1115,0 226 | 1116,1 227 | 1117,1 228 | 1118,0 229 | 1119,0 230 | 1120,0 231 | 1121,0 232 | 1122,0 233 | 1123,1 234 | 1124,0 235 | 1125,0 236 | 1126,1 237 | 1127,0 238 | 1128,0 239 | 1129,0 240 | 1130,1 241 | 1131,1 242 | 1132,1 243 | 1133,1 244 | 1134,0 245 | 1135,0 246 | 1136,0 247 | 1137,0 248 | 1138,1 249 | 1139,0 250 | 1140,1 251 | 1141,0 252 | 1142,1 253 | 1143,0 254 | 1144,1 255 | 1145,0 256 | 1146,1 257 | 1147,0 258 | 1148,0 259 | 1149,0 260 | 1150,1 261 | 1151,0 262 | 1152,0 263 | 1153,0 264 | 1154,1 265 | 1155,1 266 | 1156,0 267 | 1157,0 268 | 1158,0 269 | 1159,0 270 | 1160,0 271 | 1161,0 272 | 1162,0 273 | 1163,0 274 | 1164,1 275 | 1165,1 276 | 1166,0 277 | 1167,1 278 | 1168,0 279 | 1169,0 280 | 1170,0 281 | 1171,0 282 | 1172,0 283 | 1173,1 284 | 1174,0 285 | 1175,1 286 | 1176,1 287 | 1177,0 288 | 1178,0 289 | 1179,0 290 | 1180,0 291 | 1181,0 292 | 1182,0 293 | 1183,0 294 | 1184,0 295 | 1185,0 296 | 1186,0 297 | 1187,0 298 | 1188,1 299 | 1189,0 300 | 1190,0 301 | 1191,0 302 | 1192,0 303 | 1193,1 304 | 1194,0 305 | 1195,0 306 | 1196,0 307 | 1197,1 308 | 1198,1 309 | 1199,1 310 | 1200,0 311 | 1201,0 312 | 1202,0 313 | 1203,1 314 | 1204,0 315 | 1205,0 316 | 1206,1 317 | 1207,1 318 | 1208,0 319 | 1209,0 320 | 1210,0 321 | 1211,0 322 | 1212,0 323 | 1213,0 324 | 1214,0 325 | 1215,0 326 | 1216,1 327 | 1217,0 328 | 1218,1 329 | 1219,0 330 | 1220,0 331 | 1221,0 332 | 1222,1 333 | 1223,1 334 | 1224,0 335 | 1225,0 336 | 1226,0 337 | 1227,0 338 | 1228,0 339 | 1229,0 340 | 1230,0 341 | 1231,0 342 | 1232,0 343 | 1233,0 344 | 1234,0 345 | 1235,1 346 | 1236,1 347 | 1237,1 348 | 1238,0 349 | 1239,0 350 | 1240,0 351 | 1241,1 352 | 1242,1 353 | 1243,0 354 | 1244,0 355 | 1245,0 356 | 1246,0 357 | 1247,0 358 | 1248,1 359 | 1249,0 360 | 1250,0 361 | 1251,0 362 | 1252,0 363 | 1253,1 364 | 1254,1 365 | 1255,0 366 | 1256,1 367 | 1257,0 368 | 1258,0 369 | 1259,0 370 | 1260,1 371 | 1261,0 372 | 1262,0 373 | 1263,1 374 | 1264,0 375 | 1265,0 376 | 1266,1 377 | 1267,1 378 | 1268,0 379 | 1269,0 380 | 1270,0 381 | 1271,0 382 | 1272,0 383 | 1273,0 384 | 1274,1 385 | 1275,0 386 | 1276,1 387 | 1277,1 388 | 1278,0 389 | 1279,0 390 | 1280,0 391 | 1281,0 392 | 1282,0 393 | 1283,1 394 | 1284,1 395 | 1285,0 396 | 1286,0 397 | 1287,1 398 | 1288,0 399 | 1289,1 400 | 1290,0 401 | 1291,0 402 | 1292,1 403 | 1293,0 404 | 1294,1 405 | 1295,0 406 | 1296,1 407 | 1297,0 408 | 1298,0 409 | 1299,0 410 | 1300,0 411 | 1301,1 412 | 1302,0 413 | 1303,1 414 | 1304,1 415 | 1305,0 416 | 1306,1 417 | 1307,0 418 | 1308,0 419 | 1309,1 420 | -------------------------------------------------------------------------------- /P0_Titanic/Kaggle_Titanic/titanic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from sklearn.model_selection import train_test_split, ShuffleSplit, GridSearchCV 4 | from sklearn.tree import DecisionTreeClassifier 5 | from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier 6 | from sklearn.metrics import accuracy_score, r2_score, make_scorer 7 | 8 | train = pd.read_csv('train.csv') 9 | test = pd.read_csv('test.csv') 10 | 11 | 12 | def preprocess(f): 13 | f = f[['Pclass', 'SibSp', 'Parch', 'Sex', 'Age', 'Fare', 'Embarked']].copy() 14 | 15 | # Preprocess 'Pclass' 16 | # f.loc[f.Pclass == 1, 'Pclass'] = 'a' 17 | # f.loc[f.Pclass == 2, 'Pclass'] = 'b' 18 | # f.loc[f.Pclass == 3, 'Pclass'] = 'c' 19 | 20 | # Preprocess 'Sex' 21 | f.loc[f.Sex == 'male', 'Sex'] = 1 22 | f.loc[f.Sex == 'female', 'Sex'] = 0 23 | f.Sex = f.Sex.astype(int) 24 | 25 | # Preprocess 'Age' 26 | f.loc[np.isnan(f.Age), 'Age'] = -1 27 | mean = f.Age.mean() 28 | std = f.Age.std() 29 | f.loc[:, 'Age'] = (f.loc[:, 'Age'] - mean) / std 30 | 31 | # Preprocess 'Fare' 32 | f.loc[np.isnan(f.Fare), 'Fare'] = 0 33 | mean = f.Fare.mean() 34 | std = f.Fare.std() 35 | f.loc[:, 'Fare'] = (f.loc[:, 'Fare'] - mean) / std 36 | 37 | f = pd.get_dummies(f) 38 | 39 | return f 40 | 41 | 42 | def fit_model(X, y): 43 | cv_sets = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0) 44 | # clf = DecisionTreeClassifier(random_state=0) 45 | clf = RandomForestClassifier(random_state=0) 46 | params = {'n_estimators': np.arange(300, 400, 100)} 47 | scorer = make_scorer(r2_score) 48 | grid = GridSearchCV(estimator=clf, param_grid=params, scoring=scorer, cv=cv_sets) 49 | grid.fit(X, y) 50 | print 'Best Depth =', grid.best_estimator_.get_params()['n_estimators'] 51 | return grid.best_estimator_ 52 | 53 | 54 | f = preprocess(train) 55 | print list(f.columns) 56 | survived = train.Survived.copy() 57 | X_train, X_test, y_train, y_test = train_test_split(f, survived, random_state=0) 58 | clf = fit_model(X_train, y_train) 59 | y_pred = clf.predict(X_test) 60 | print accuracy_score(y_test, y_pred) 61 | print r2_score(y_test, y_pred) 62 | print clf.feature_importances_ 63 | 64 | f = preprocess(test) 65 | result = clf.predict(f) 66 | result = pd.DataFrame({'PassengerId': pd.Series(test.PassengerId), 'survived': pd.Series(result)}, columns=['PassengerId', 'survived']) 67 | result.to_csv('output.csv', index=False) 68 | -------------------------------------------------------------------------------- /P0_Titanic/README.md: -------------------------------------------------------------------------------- 1 | # 项目 0: 入门与基础 2 | ## 预测泰坦尼克号乘客幸存率 3 | 4 | ### 安装要求 5 | 这个项目要求使用 **Python 2.7** 以及安装下列python库 6 | 7 | - [NumPy](http://www.numpy.org/) 8 | - [Pandas](http://pandas.pydata.org) 9 | - [matplotlib](http://matplotlib.org/) 10 | - [scikit-learn](http://scikit-learn.org/stable/) 11 | ​ 12 | 13 | 你还需要安装和运行 [Jupyter Notebook](http://jupyter.readthedocs.io/en/latest/install.html#optional-for-experienced-python-developers-installing-jupyter-with-pip)。 14 | 15 | 16 | 优达学城推荐学生安装 [Anaconda](https://www.continuum.io/downloads),一个包含了项目需要的所有库和软件的 Python 发行版本。[这里](https://classroom.udacity.com/nanodegrees/nd002/parts/0021345403/modules/317671873575460/lessons/5430778793/concepts/54140889150923)介绍了如何安装Anaconda。 17 | 18 | 如果你使用macOS系统并且对命令行比较熟悉,可以安装[homebrew](http://brew.sh/),以及brew版python 19 | 20 | ```bash 21 | $ brew install python 22 | ``` 23 | 24 | 再用下列命令安装所需要的python库 25 | 26 | ```bash 27 | $ pip install numpy pandas matplotlib scikit-learn scipy jupyter 28 | ``` 29 | 30 | ### 代码 31 | ​ 32 | 事例代码在 `titanic_survival_exploration_cn.ipynb` 文件中,辅助代码在 `titanic_visualizations.py` 文件中。尽管已经提供了一些代码帮助你上手,你还是需要补充些代码使得项目要求的功能能够成功实现。 33 | 34 | ### 运行 35 | ​ 36 | 在命令行中,确保当前目录为 `titanic_survival_exploration/` 文件夹的最顶层(目录包含本 README 文件),运行下列命令: 37 | 38 | ```bash 39 | $ jupyter notebook titanic_survival_exploration.ipynb 40 | ``` 41 | ​ 42 | 这会启动 Jupyter Notebook 把项目文件打开在你的浏览器中。 43 | 44 | 对jupyter不熟悉的同学可以看一下这两个链接: 45 | 46 | - [Jupyter使用视频教程](http://cn-static.udacity.com/mlnd/how_to_use_jupyter.mp4) 47 | - [为什么使用jupyter?](https://www.zhihu.com/question/37490497) 48 | ​ 49 | ​ 50 | ​ 51 | ​ 52 | ​ 53 | ​ 54 | ​ 55 | ​ 56 | ​ 57 | ​ 58 | ​ 59 | ​ 60 | ​ 61 | ​ 62 | 63 | ### 数据 64 | ​ 65 | 这个项目的数据包含在 `titanic_data.csv` 文件中。文件包含下列特征: 66 | ​ 67 | - **Survived**:是否存活(0代表否,1代表是) 68 | - **Pclass**:社会阶级(1代表上层阶级,2代表中层阶级,3代表底层阶级) 69 | - **Name**:船上乘客的名字 70 | - **Sex**:船上乘客的性别 71 | - **Age**:船上乘客的年龄(可能存在 `NaN`) 72 | - **SibSp**:乘客在船上的兄弟姐妹和配偶的数量 73 | - **Parch**:乘客在船上的父母以及小孩的数量 74 | - **Ticket**:乘客船票的编号 75 | - **Fare**:乘客为船票支付的费用 76 | - **Cabin**:乘客所在船舱的编号(可能存在 `NaN`) 77 | - **Embarked**:乘客上船的港口(C 代表从 Cherbourg 登船,Q 代表从 Queenstown 登船,S 代表从 Southampton 登船) 78 | -------------------------------------------------------------------------------- /P0_Titanic/Titanic_Decision_Tree_Solution.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P0_Titanic/Titanic_Decision_Tree_Solution.png -------------------------------------------------------------------------------- /P0_Titanic/titanic_visualizations.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | 5 | def filter_data(data, condition): 6 | """ 7 | Remove elements that do not match the condition provided. 8 | Takes a data list as input and returns a filtered list. 9 | Conditions should be a list of strings of the following format: 10 | ' ' 11 | where the following operations are valid: >, <, >=, <=, ==, != 12 | 13 | Example: ["Sex == 'male'", 'Age < 18'] 14 | """ 15 | 16 | field, op, value = condition.split(" ") 17 | 18 | # convert value into number or strip excess quotes if string 19 | try: 20 | value = float(value) 21 | except: 22 | value = value.strip("\'\"") 23 | 24 | # get booleans for filtering 25 | if op == ">": 26 | matches = data[field] > value 27 | elif op == "<": 28 | matches = data[field] < value 29 | elif op == ">=": 30 | matches = data[field] >= value 31 | elif op == "<=": 32 | matches = data[field] <= value 33 | elif op == "==": 34 | matches = data[field] == value 35 | elif op == "!=": 36 | matches = data[field] != value 37 | else: # catch invalid operation codes 38 | raise Exception("Invalid comparison operator. Only >, <, >=, <=, ==, != allowed.") 39 | 40 | # filter data and outcomes 41 | data = data[matches].reset_index(drop = True) 42 | return data 43 | 44 | def survival_stats(data, outcomes, key, filters = []): 45 | """ 46 | Print out selected statistics regarding survival, given a feature of 47 | interest and any number of filters (including no filters) 48 | """ 49 | 50 | # Check that the key exists 51 | if key not in data.columns.values : 52 | print "'{}' is not a feature of the Titanic data. Did you spell something wrong?".format(key) 53 | return False 54 | 55 | # Return the function before visualizing if 'Cabin' or 'Ticket' 56 | # is selected: too many unique categories to display 57 | if(key == 'Cabin' or key == 'PassengerId' or key == 'Ticket'): 58 | print "'{}' has too many unique categories to display! Try a different feature.".format(key) 59 | return False 60 | 61 | # Merge data and outcomes into single dataframe 62 | all_data = pd.concat([data, outcomes], axis = 1) 63 | 64 | # Apply filters to data 65 | for condition in filters: 66 | all_data = filter_data(all_data, condition) 67 | 68 | # Create outcomes DataFrame 69 | all_data = all_data[[key, 'Survived']] 70 | 71 | # Create plotting figure 72 | plt.figure(figsize=(8,6)) 73 | 74 | # 'Numerical' features 75 | if(key == 'Age' or key == 'Fare'): 76 | 77 | # Remove NaN values from Age data 78 | all_data = all_data[~np.isnan(all_data[key])] 79 | 80 | # Divide the range of data into bins and count survival rates 81 | min_value = all_data[key].min() 82 | max_value = all_data[key].max() 83 | value_range = max_value - min_value 84 | 85 | # 'Fares' has larger range of values than 'Age' so create more bins 86 | if(key == 'Fare'): 87 | bins = np.arange(0, all_data['Fare'].max() + 20, 20) 88 | if(key == 'Age'): 89 | bins = np.arange(0, all_data['Age'].max() + 10, 10) 90 | 91 | # Overlay each bin's survival rates 92 | nonsurv_vals = all_data[all_data['Survived'] == 0][key].reset_index(drop = True) 93 | surv_vals = all_data[all_data['Survived'] == 1][key].reset_index(drop = True) 94 | plt.hist(nonsurv_vals, bins = bins, alpha = 0.6, 95 | color = 'red', label = 'Did not survive') 96 | plt.hist(surv_vals, bins = bins, alpha = 0.6, 97 | color = 'green', label = 'Survived') 98 | 99 | # Add legend to plot 100 | plt.xlim(0, bins.max()) 101 | plt.legend(framealpha = 0.8) 102 | 103 | # 'Categorical' features 104 | else: 105 | 106 | # Set the various categories 107 | if(key == 'Pclass'): 108 | values = np.arange(1,4) 109 | if(key == 'Parch' or key == 'SibSp'): 110 | values = np.arange(0,np.max(data[key]) + 1) 111 | if(key == 'Embarked'): 112 | values = ['C', 'Q', 'S'] 113 | if(key == 'Sex'): 114 | values = ['male', 'female'] 115 | 116 | # Create DataFrame containing categories and count of each 117 | frame = pd.DataFrame(index = np.arange(len(values)), columns=(key,'Survived','NSurvived')) 118 | for i, value in enumerate(values): 119 | frame.loc[i] = [value, \ 120 | len(all_data[(all_data['Survived'] == 1) & (all_data[key] == value)]), \ 121 | len(all_data[(all_data['Survived'] == 0) & (all_data[key] == value)])] 122 | 123 | # Set the width of each bar 124 | bar_width = 0.4 125 | 126 | # Display each category's survival rates 127 | for i in np.arange(len(frame)): 128 | nonsurv_bar = plt.bar(i-bar_width, frame.loc[i]['NSurvived'], width = bar_width, color = 'r') 129 | surv_bar = plt.bar(i, frame.loc[i]['Survived'], width = bar_width, color = 'g') 130 | 131 | plt.xticks(np.arange(len(frame)), values) 132 | plt.legend((nonsurv_bar[0], surv_bar[0]),('Did not survive', 'Survived'), framealpha = 0.8) 133 | 134 | # Common attributes for plot formatting 135 | plt.xlabel(key) 136 | plt.ylabel('Number of Passengers') 137 | plt.title('Passenger Survival Statistics With \'%s\' Feature'%(key)) 138 | plt.show() 139 | 140 | # Report number of passengers with missing values 141 | if sum(pd.isnull(all_data[key])): 142 | nan_outcomes = all_data[pd.isnull(all_data[key])]['Survived'] 143 | print "Passengers with missing '{}' values: {} ({} survived, {} did not survive)".format( \ 144 | key, len(nan_outcomes), sum(nan_outcomes == 1), sum(nan_outcomes == 0)) 145 | 146 | -------------------------------------------------------------------------------- /P1_Boston_Housing/README.md: -------------------------------------------------------------------------------- 1 | # 项目1:模型评估与验证 2 | ## 波士顿房价预测 3 | 4 | ### 准备工作 5 | 6 | 这个项目需要安装**Python 2.7**和以下的Python函数库: 7 | 8 | - [NumPy](http://www.numpy.org/) 9 | - [matplotlib](http://matplotlib.org/) 10 | - [scikit-learn](http://scikit-learn.org/stable/) 11 | 12 | 你还需要安装一个软件,以运行和编辑[ipynb](http://jupyter.org/)文件。 13 | 14 | 优达学城推荐学生安装 [Anaconda](https://www.continuum.io/downloads),这是一个常用的Python集成编译环境,且已包含了本项目中所需的全部函数库。我们在P0项目中也有讲解[如何搭建学习环境](https://github.com/udacity/machine-learning/blob/master/projects_cn/titanic_survival_exploration/README.md)。 15 | 16 | ### 编码 17 | 18 | 代码的模版已经在`boston_housing.ipynb`文件中给出。你还会用到`visuals.py`和名为`housing.csv`的数据文件来完成这个项目。我们已经为你提供了一部分代码,但还有些功能需要你来实现才能以完成这个项目。 19 | 20 | ### 运行 21 | 22 | 在终端或命令行窗口中,选定`boston_housing/`的目录下(包含此README文件),运行下方的命令: 23 | 24 | ```jupyter notebook boston_housing.ipynb``` 25 | 26 | 这样就能够启动jupyter notebook软件,并在你的浏览器中打开文件。 27 | 28 | ### Data 29 | 30 | 经过编辑的波士顿房价数据集有490个数据点,每个点有三个特征。这个数据集编辑自[加州大学欧文分校机器学习数据集库](https://archive.ics.uci.edu/ml/datasets/Housing). 31 | 32 | **特征** 33 | 34 | 1. `RM`: 住宅平均房间数量 35 | 2. `LSTAT`: 区域中被认为是低收入阶层的比率 36 | 3. `PTRATIO`: 镇上学生与教师数量比例 37 | 38 | **目标变量** 39 | 40 | 4. `MEDV`: 房屋的中值价格 -------------------------------------------------------------------------------- /P1_Boston_Housing/code.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from sklearn.model_selection import ShuffleSplit 4 | from sklearn.model_selection import train_test_split 5 | from sklearn.model_selection import GridSearchCV 6 | from sklearn.metrics import r2_score 7 | from sklearn.metrics import make_scorer 8 | from sklearn.tree import DecisionTreeRegressor 9 | 10 | data = pd.read_csv('housing.csv') 11 | prices = data['MEDV'] 12 | features = data.drop('MEDV', axis=1) # features = data[['RM', 'LSTAT', 'PTRATIO']] 13 | X_train, X_test, y_train, y_test = train_test_split(features, prices, train_size=0.8, random_state=0) 14 | 15 | 16 | def fit_model(X, y): 17 | cv_sets = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0) 18 | regressor = DecisionTreeRegressor(random_state=0) 19 | params = {'max_depth': np.arange(1, 11)} 20 | scoring_fnc = make_scorer(r2_score) 21 | grid = GridSearchCV(estimator=regressor, param_grid=params, scoring=scoring_fnc, cv=cv_sets) 22 | grid.fit(X, y) 23 | return grid.best_estimator_ 24 | 25 | 26 | model = fit_model(X_train, y_train) 27 | print model.get_params()['max_depth'] 28 | 29 | print model.predict([[5, 17, 15]]) 30 | 31 | client_data = [[5, 17, 15], # Client 1 32 | [4, 32, 22], # Client 2 33 | [8, 3, 12]] # Client 3 34 | 35 | from sklearn.neighbors import NearestNeighbors 36 | 37 | nb = NearestNeighbors(10) 38 | nb.fit(X_train) 39 | print nb.kneighbors(client_data, 1) 40 | print X_train.iloc[[101]] -------------------------------------------------------------------------------- /P1_Boston_Housing/code_bj.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from sklearn.model_selection import ShuffleSplit 4 | from sklearn.model_selection import train_test_split 5 | from sklearn.model_selection import GridSearchCV 6 | from sklearn.metrics import r2_score 7 | from sklearn.metrics import make_scorer 8 | from sklearn.tree import DecisionTreeRegressor 9 | 10 | 11 | data = pd.read_csv('bj_housing.csv') 12 | prices = data['Value'] 13 | features = data.drop('Value', axis=1) 14 | X_train, X_test, y_train, y_test = train_test_split(features, prices, train_size=0.8) 15 | 16 | 17 | # Train the model, and using GridSearch + K-Fold CV for model parameter tuning. 18 | def fit_model(X, y): 19 | cv_sets = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0) 20 | regressor = DecisionTreeRegressor(random_state=0) 21 | params = {'max_depth': np.arange(1, 11)} 22 | scoring_fnc = make_scorer(r2_score) 23 | grid = GridSearchCV(estimator=regressor, param_grid=params, scoring=scoring_fnc, cv=cv_sets) 24 | grid.fit(X, y) 25 | return grid.best_estimator_ 26 | 27 | 28 | def robust_test(features, prices, models, iteration): 29 | depth = [] 30 | score = [] 31 | for x in range(iteration): 32 | X_train, X_test, y_train, y_test = train_test_split(features, prices, train_size=0.8, random_state=x) 33 | model = fit_model(X_train, y_train) 34 | depth.append(model.get_params()['max_depth']) 35 | y_pred = model.predict(pd.DataFrame(X_test)) 36 | score.append(r2_score(y_pred, y_test)) 37 | return depth, score 38 | 39 | (depth, score) = robust_test(features, prices, fit_model, 10) 40 | print pd.Series(depth).describe() 41 | print pd.Series(score).describe() 42 | 43 | -------------------------------------------------------------------------------- /P1_Boston_Housing/housing.csv: -------------------------------------------------------------------------------- 1 | RM,LSTAT,PTRATIO,MEDV 2 | 6.575,4.98,15.3,504000.0 3 | 6.421,9.14,17.8,453600.0 4 | 7.185,4.03,17.8,728700.0 5 | 6.998,2.94,18.7,701400.0 6 | 7.147,5.33,18.7,760200.0 7 | 6.43,5.21,18.7,602700.0 8 | 6.012,12.43,15.2,480900.0 9 | 6.172,19.15,15.2,569100.0 10 | 5.631,29.93,15.2,346500.0 11 | 6.004,17.1,15.2,396900.0 12 | 6.377,20.45,15.2,315000.0 13 | 6.009,13.27,15.2,396900.0 14 | 5.889,15.71,15.2,455700.0 15 | 5.949,8.26,21.0,428400.0 16 | 6.096,10.26,21.0,382200.0 17 | 5.834,8.47,21.0,417900.0 18 | 5.935,6.58,21.0,485100.0 19 | 5.99,14.67,21.0,367500.0 20 | 5.456,11.69,21.0,424200.0 21 | 5.727,11.28,21.0,382200.0 22 | 5.57,21.02,21.0,285600.0 23 | 5.965,13.83,21.0,411600.0 24 | 6.142,18.72,21.0,319200.0 25 | 5.813,19.88,21.0,304500.0 26 | 5.924,16.3,21.0,327600.0 27 | 5.599,16.51,21.0,291900.0 28 | 5.813,14.81,21.0,348600.0 29 | 6.047,17.28,21.0,310800.0 30 | 6.495,12.8,21.0,386400.0 31 | 6.674,11.98,21.0,441000.0 32 | 5.713,22.6,21.0,266700.0 33 | 6.072,13.04,21.0,304500.0 34 | 5.95,27.71,21.0,277200.0 35 | 5.701,18.35,21.0,275100.0 36 | 6.096,20.34,21.0,283500.0 37 | 5.933,9.68,19.2,396900.0 38 | 5.841,11.41,19.2,420000.0 39 | 5.85,8.77,19.2,441000.0 40 | 5.966,10.13,19.2,518700.0 41 | 6.595,4.32,18.3,646800.0 42 | 7.024,1.98,18.3,732900.0 43 | 6.77,4.84,17.9,558600.0 44 | 6.169,5.81,17.9,531300.0 45 | 6.211,7.44,17.9,518700.0 46 | 6.069,9.55,17.9,445200.0 47 | 5.682,10.21,17.9,405300.0 48 | 5.786,14.15,17.9,420000.0 49 | 6.03,18.8,17.9,348600.0 50 | 5.399,30.81,17.9,302400.0 51 | 5.602,16.2,17.9,407400.0 52 | 5.963,13.45,16.8,413700.0 53 | 6.115,9.43,16.8,430500.0 54 | 6.511,5.28,16.8,525000.0 55 | 5.998,8.43,16.8,491400.0 56 | 5.888,14.8,21.1,396900.0 57 | 7.249,4.81,17.9,743400.0 58 | 6.383,5.77,17.3,518700.0 59 | 6.816,3.95,15.1,663600.0 60 | 6.145,6.86,19.7,489300.0 61 | 5.927,9.22,19.7,411600.0 62 | 5.741,13.15,19.7,392700.0 63 | 5.966,14.44,19.7,336000.0 64 | 6.456,6.73,19.7,466200.0 65 | 6.762,9.5,19.7,525000.0 66 | 7.104,8.05,18.6,693000.0 67 | 6.29,4.67,16.1,493500.0 68 | 5.787,10.24,16.1,407400.0 69 | 5.878,8.1,18.9,462000.0 70 | 5.594,13.09,18.9,365400.0 71 | 5.885,8.79,18.9,438900.0 72 | 6.417,6.72,19.2,508200.0 73 | 5.961,9.88,19.2,455700.0 74 | 6.065,5.52,19.2,478800.0 75 | 6.245,7.54,19.2,491400.0 76 | 6.273,6.78,18.7,506100.0 77 | 6.286,8.94,18.7,449400.0 78 | 6.279,11.97,18.7,420000.0 79 | 6.14,10.27,18.7,436800.0 80 | 6.232,12.34,18.7,445200.0 81 | 5.874,9.1,18.7,426300.0 82 | 6.727,5.29,19.0,588000.0 83 | 6.619,7.22,19.0,501900.0 84 | 6.302,6.72,19.0,520800.0 85 | 6.167,7.51,19.0,480900.0 86 | 6.389,9.62,18.5,501900.0 87 | 6.63,6.53,18.5,558600.0 88 | 6.015,12.86,18.5,472500.0 89 | 6.121,8.44,18.5,466200.0 90 | 7.007,5.5,17.8,495600.0 91 | 7.079,5.7,17.8,602700.0 92 | 6.417,8.81,17.8,474600.0 93 | 6.405,8.2,17.8,462000.0 94 | 6.442,8.16,18.2,480900.0 95 | 6.211,6.21,18.2,525000.0 96 | 6.249,10.59,18.2,432600.0 97 | 6.625,6.65,18.0,596400.0 98 | 6.163,11.34,18.0,449400.0 99 | 8.069,4.21,18.0,812700.0 100 | 7.82,3.57,18.0,919800.0 101 | 7.416,6.19,18.0,697200.0 102 | 6.727,9.42,20.9,577500.0 103 | 6.781,7.67,20.9,556500.0 104 | 6.405,10.63,20.9,390600.0 105 | 6.137,13.44,20.9,405300.0 106 | 6.167,12.33,20.9,422100.0 107 | 5.851,16.47,20.9,409500.0 108 | 5.836,18.66,20.9,409500.0 109 | 6.127,14.09,20.9,428400.0 110 | 6.474,12.27,20.9,415800.0 111 | 6.229,15.55,20.9,407400.0 112 | 6.195,13.0,20.9,455700.0 113 | 6.715,10.16,17.8,478800.0 114 | 5.913,16.21,17.8,394800.0 115 | 6.092,17.09,17.8,392700.0 116 | 6.254,10.45,17.8,388500.0 117 | 5.928,15.76,17.8,384300.0 118 | 6.176,12.04,17.8,445200.0 119 | 6.021,10.3,17.8,403200.0 120 | 5.872,15.37,17.8,428400.0 121 | 5.731,13.61,17.8,405300.0 122 | 5.87,14.37,19.1,462000.0 123 | 6.004,14.27,19.1,426300.0 124 | 5.961,17.93,19.1,430500.0 125 | 5.856,25.41,19.1,363300.0 126 | 5.879,17.58,19.1,394800.0 127 | 5.986,14.81,19.1,449400.0 128 | 5.613,27.26,19.1,329700.0 129 | 5.693,17.19,21.2,340200.0 130 | 6.431,15.39,21.2,378000.0 131 | 5.637,18.34,21.2,300300.0 132 | 6.458,12.6,21.2,403200.0 133 | 6.326,12.26,21.2,411600.0 134 | 6.372,11.12,21.2,483000.0 135 | 5.822,15.03,21.2,386400.0 136 | 5.757,17.31,21.2,327600.0 137 | 6.335,16.96,21.2,380100.0 138 | 5.942,16.9,21.2,365400.0 139 | 6.454,14.59,21.2,359100.0 140 | 5.857,21.32,21.2,279300.0 141 | 6.151,18.46,21.2,373800.0 142 | 6.174,24.16,21.2,294000.0 143 | 5.019,34.41,21.2,302400.0 144 | 5.403,26.82,14.7,281400.0 145 | 5.468,26.42,14.7,327600.0 146 | 4.903,29.29,14.7,247800.0 147 | 6.13,27.8,14.7,289800.0 148 | 5.628,16.65,14.7,327600.0 149 | 4.926,29.53,14.7,306600.0 150 | 5.186,28.32,14.7,373800.0 151 | 5.597,21.45,14.7,323400.0 152 | 6.122,14.1,14.7,451500.0 153 | 5.404,13.28,14.7,411600.0 154 | 5.012,12.12,14.7,321300.0 155 | 5.709,15.79,14.7,407400.0 156 | 6.129,15.12,14.7,357000.0 157 | 6.152,15.02,14.7,327600.0 158 | 5.272,16.14,14.7,275100.0 159 | 6.943,4.59,14.7,867300.0 160 | 6.066,6.43,14.7,510300.0 161 | 6.51,7.39,14.7,489300.0 162 | 6.25,5.5,14.7,567000.0 163 | 5.854,11.64,14.7,476700.0 164 | 6.101,9.81,14.7,525000.0 165 | 5.877,12.14,14.7,499800.0 166 | 6.319,11.1,14.7,499800.0 167 | 6.402,11.32,14.7,468300.0 168 | 5.875,14.43,14.7,365400.0 169 | 5.88,12.03,14.7,401100.0 170 | 5.572,14.69,16.6,485100.0 171 | 6.416,9.04,16.6,495600.0 172 | 5.859,9.64,16.6,474600.0 173 | 6.546,5.33,16.6,617400.0 174 | 6.02,10.11,16.6,487200.0 175 | 6.315,6.29,16.6,516600.0 176 | 6.86,6.92,16.6,627900.0 177 | 6.98,5.04,17.8,781200.0 178 | 7.765,7.56,17.8,835800.0 179 | 6.144,9.45,17.8,760200.0 180 | 7.155,4.82,17.8,795900.0 181 | 6.563,5.68,17.8,682500.0 182 | 5.604,13.98,17.8,554400.0 183 | 6.153,13.15,17.8,621600.0 184 | 6.782,6.68,15.2,672000.0 185 | 6.556,4.56,15.2,625800.0 186 | 7.185,5.39,15.2,732900.0 187 | 6.951,5.1,15.2,777000.0 188 | 6.739,4.69,15.2,640500.0 189 | 7.178,2.87,15.2,764400.0 190 | 6.8,5.03,15.6,653100.0 191 | 6.604,4.38,15.6,611100.0 192 | 7.287,4.08,12.6,699300.0 193 | 7.107,8.61,12.6,636300.0 194 | 7.274,6.62,12.6,726600.0 195 | 6.975,4.56,17.0,732900.0 196 | 7.135,4.45,17.0,690900.0 197 | 6.162,7.43,14.7,506100.0 198 | 7.61,3.11,14.7,888300.0 199 | 7.853,3.81,14.7,1018500.0 200 | 5.891,10.87,18.6,474600.0 201 | 6.326,10.97,18.6,512400.0 202 | 5.783,18.06,18.6,472500.0 203 | 6.064,14.66,18.6,512400.0 204 | 5.344,23.09,18.6,420000.0 205 | 5.96,17.27,18.6,455700.0 206 | 5.404,23.98,18.6,405300.0 207 | 5.807,16.03,18.6,470400.0 208 | 6.375,9.38,18.6,590100.0 209 | 5.412,29.55,18.6,497700.0 210 | 6.182,9.47,18.6,525000.0 211 | 5.888,13.51,16.4,489300.0 212 | 6.642,9.69,16.4,602700.0 213 | 5.951,17.92,16.4,451500.0 214 | 6.373,10.5,16.4,483000.0 215 | 6.951,9.71,17.4,560700.0 216 | 6.164,21.46,17.4,455700.0 217 | 6.879,9.93,17.4,577500.0 218 | 6.618,7.6,17.4,632100.0 219 | 8.266,4.14,17.4,940800.0 220 | 8.04,3.13,17.4,789600.0 221 | 7.163,6.36,17.4,663600.0 222 | 7.686,3.92,17.4,980700.0 223 | 6.552,3.76,17.4,661500.0 224 | 5.981,11.65,17.4,510300.0 225 | 7.412,5.25,17.4,665700.0 226 | 8.337,2.47,17.4,875700.0 227 | 8.247,3.95,17.4,1014300.0 228 | 6.726,8.05,17.4,609000.0 229 | 6.086,10.88,17.4,504000.0 230 | 6.631,9.54,17.4,527100.0 231 | 7.358,4.73,17.4,661500.0 232 | 6.481,6.36,16.6,497700.0 233 | 6.606,7.37,16.6,489300.0 234 | 6.897,11.38,16.6,462000.0 235 | 6.095,12.4,16.6,422100.0 236 | 6.358,11.22,16.6,466200.0 237 | 6.393,5.19,16.6,497700.0 238 | 5.593,12.5,19.1,369600.0 239 | 5.605,18.46,19.1,388500.0 240 | 6.108,9.16,19.1,510300.0 241 | 6.226,10.15,19.1,430500.0 242 | 6.433,9.52,19.1,514500.0 243 | 6.718,6.56,19.1,550200.0 244 | 6.487,5.9,19.1,512400.0 245 | 6.438,3.59,19.1,520800.0 246 | 6.957,3.53,19.1,621600.0 247 | 8.259,3.54,19.1,898800.0 248 | 6.108,6.57,16.4,459900.0 249 | 5.876,9.25,16.4,438900.0 250 | 7.454,3.11,15.9,924000.0 251 | 7.333,7.79,13.0,756000.0 252 | 6.842,6.9,13.0,632100.0 253 | 7.203,9.59,13.0,709800.0 254 | 7.52,7.26,13.0,905100.0 255 | 8.398,5.91,13.0,1024800.0 256 | 7.327,11.25,13.0,651000.0 257 | 7.206,8.1,13.0,766500.0 258 | 5.56,10.45,13.0,478800.0 259 | 7.014,14.79,13.0,644700.0 260 | 7.47,3.16,13.0,913500.0 261 | 5.92,13.65,18.6,434700.0 262 | 5.856,13.0,18.6,443100.0 263 | 6.24,6.59,18.6,529200.0 264 | 6.538,7.73,18.6,512400.0 265 | 7.691,6.58,18.6,739200.0 266 | 6.758,3.53,17.6,680400.0 267 | 6.854,2.98,17.6,672000.0 268 | 7.267,6.05,17.6,697200.0 269 | 6.826,4.16,17.6,695100.0 270 | 6.482,7.19,17.6,611100.0 271 | 6.812,4.85,14.9,737100.0 272 | 7.82,3.76,14.9,953400.0 273 | 6.968,4.59,14.9,743400.0 274 | 7.645,3.01,14.9,966000.0 275 | 7.088,7.85,15.3,676200.0 276 | 6.453,8.23,15.3,462000.0 277 | 6.23,12.93,18.2,422100.0 278 | 6.209,7.14,16.6,487200.0 279 | 6.315,7.6,16.6,468300.0 280 | 6.565,9.51,16.6,520800.0 281 | 6.861,3.33,19.2,598500.0 282 | 7.148,3.56,19.2,783300.0 283 | 6.63,4.7,19.2,585900.0 284 | 6.127,8.58,16.0,501900.0 285 | 6.009,10.4,16.0,455700.0 286 | 6.678,6.27,16.0,600600.0 287 | 6.549,7.39,16.0,569100.0 288 | 5.79,15.84,16.0,426300.0 289 | 6.345,4.97,14.8,472500.0 290 | 7.041,4.74,14.8,609000.0 291 | 6.871,6.07,14.8,520800.0 292 | 6.59,9.5,16.1,462000.0 293 | 6.495,8.67,16.1,554400.0 294 | 6.982,4.86,16.1,695100.0 295 | 7.236,6.93,18.4,758100.0 296 | 6.616,8.93,18.4,596400.0 297 | 7.42,6.47,18.4,701400.0 298 | 6.849,7.53,18.4,592200.0 299 | 6.635,4.54,18.4,478800.0 300 | 5.972,9.97,18.4,426300.0 301 | 4.973,12.64,18.4,338100.0 302 | 6.122,5.98,18.4,464100.0 303 | 6.023,11.72,18.4,407400.0 304 | 6.266,7.9,18.4,453600.0 305 | 6.567,9.28,18.4,499800.0 306 | 5.705,11.5,18.4,340200.0 307 | 5.914,18.33,18.4,373800.0 308 | 5.782,15.94,18.4,415800.0 309 | 6.382,10.36,18.4,485100.0 310 | 6.113,12.73,18.4,441000.0 311 | 6.426,7.2,19.6,499800.0 312 | 6.376,6.87,19.6,485100.0 313 | 6.041,7.7,19.6,428400.0 314 | 5.708,11.74,19.6,388500.0 315 | 6.415,6.12,19.6,525000.0 316 | 6.431,5.08,19.6,516600.0 317 | 6.312,6.15,19.6,483000.0 318 | 6.083,12.79,19.6,466200.0 319 | 5.868,9.97,16.9,405300.0 320 | 6.333,7.34,16.9,474600.0 321 | 6.144,9.09,16.9,415800.0 322 | 5.706,12.43,16.9,359100.0 323 | 6.031,7.83,16.9,407400.0 324 | 6.316,5.68,20.2,466200.0 325 | 6.31,6.75,20.2,434700.0 326 | 6.037,8.01,20.2,443100.0 327 | 5.869,9.8,20.2,409500.0 328 | 5.895,10.56,20.2,388500.0 329 | 6.059,8.51,20.2,432600.0 330 | 5.985,9.74,20.2,399000.0 331 | 5.968,9.29,20.2,392700.0 332 | 7.241,5.49,15.5,686700.0 333 | 6.54,8.65,15.9,346500.0 334 | 6.696,7.18,17.6,501900.0 335 | 6.874,4.61,17.6,655200.0 336 | 6.014,10.53,18.8,367500.0 337 | 5.898,12.67,18.8,361200.0 338 | 6.516,6.36,17.9,485100.0 339 | 6.635,5.99,17.0,514500.0 340 | 6.939,5.89,19.7,558600.0 341 | 6.49,5.98,19.7,480900.0 342 | 6.579,5.49,18.3,506100.0 343 | 5.884,7.79,18.3,390600.0 344 | 6.728,4.5,17.0,632100.0 345 | 5.663,8.05,22.0,382200.0 346 | 5.936,5.57,22.0,432600.0 347 | 6.212,17.6,20.2,373800.0 348 | 6.395,13.27,20.2,455700.0 349 | 6.127,11.48,20.2,476700.0 350 | 6.112,12.67,20.2,474600.0 351 | 6.398,7.79,20.2,525000.0 352 | 6.251,14.19,20.2,417900.0 353 | 5.362,10.19,20.2,436800.0 354 | 5.803,14.64,20.2,352800.0 355 | 3.561,7.12,20.2,577500.0 356 | 4.963,14.0,20.2,459900.0 357 | 3.863,13.33,20.2,485100.0 358 | 4.906,34.77,20.2,289800.0 359 | 4.138,37.97,20.2,289800.0 360 | 7.313,13.44,20.2,315000.0 361 | 6.649,23.24,20.2,291900.0 362 | 6.794,21.24,20.2,279300.0 363 | 6.38,23.69,20.2,275100.0 364 | 6.223,21.78,20.2,214200.0 365 | 6.968,17.21,20.2,218400.0 366 | 6.545,21.08,20.2,228900.0 367 | 5.536,23.6,20.2,237300.0 368 | 5.52,24.56,20.2,258300.0 369 | 4.368,30.63,20.2,184800.0 370 | 5.277,30.81,20.2,151200.0 371 | 4.652,28.28,20.2,220500.0 372 | 5.0,31.99,20.2,155400.0 373 | 4.88,30.62,20.2,214200.0 374 | 5.39,20.85,20.2,241500.0 375 | 5.713,17.11,20.2,317100.0 376 | 6.051,18.76,20.2,487200.0 377 | 5.036,25.68,20.2,203700.0 378 | 6.193,15.17,20.2,289800.0 379 | 5.887,16.35,20.2,266700.0 380 | 6.471,17.12,20.2,275100.0 381 | 6.405,19.37,20.2,262500.0 382 | 5.747,19.92,20.2,178500.0 383 | 5.453,30.59,20.2,105000.0 384 | 5.852,29.97,20.2,132300.0 385 | 5.987,26.77,20.2,117600.0 386 | 6.343,20.32,20.2,151200.0 387 | 6.404,20.31,20.2,254100.0 388 | 5.349,19.77,20.2,174300.0 389 | 5.531,27.38,20.2,178500.0 390 | 5.683,22.98,20.2,105000.0 391 | 4.138,23.34,20.2,249900.0 392 | 5.608,12.13,20.2,585900.0 393 | 5.617,26.4,20.2,361200.0 394 | 6.852,19.78,20.2,577500.0 395 | 5.757,10.11,20.2,315000.0 396 | 6.657,21.22,20.2,361200.0 397 | 4.628,34.37,20.2,375900.0 398 | 5.155,20.08,20.2,342300.0 399 | 4.519,36.98,20.2,147000.0 400 | 6.434,29.05,20.2,151200.0 401 | 6.782,25.79,20.2,157500.0 402 | 5.304,26.64,20.2,218400.0 403 | 5.957,20.62,20.2,184800.0 404 | 6.824,22.74,20.2,176400.0 405 | 6.411,15.02,20.2,350700.0 406 | 6.006,15.7,20.2,298200.0 407 | 5.648,14.1,20.2,436800.0 408 | 6.103,23.29,20.2,281400.0 409 | 5.565,17.16,20.2,245700.0 410 | 5.896,24.39,20.2,174300.0 411 | 5.837,15.69,20.2,214200.0 412 | 6.202,14.52,20.2,228900.0 413 | 6.193,21.52,20.2,231000.0 414 | 6.38,24.08,20.2,199500.0 415 | 6.348,17.64,20.2,304500.0 416 | 6.833,19.69,20.2,296100.0 417 | 6.425,12.03,20.2,338100.0 418 | 6.436,16.22,20.2,300300.0 419 | 6.208,15.17,20.2,245700.0 420 | 6.629,23.27,20.2,281400.0 421 | 6.461,18.05,20.2,201600.0 422 | 6.152,26.45,20.2,182700.0 423 | 5.935,34.02,20.2,176400.0 424 | 5.627,22.88,20.2,268800.0 425 | 5.818,22.11,20.2,220500.0 426 | 6.406,19.52,20.2,359100.0 427 | 6.219,16.59,20.2,386400.0 428 | 6.485,18.85,20.2,323400.0 429 | 5.854,23.79,20.2,226800.0 430 | 6.459,23.98,20.2,247800.0 431 | 6.341,17.79,20.2,312900.0 432 | 6.251,16.44,20.2,264600.0 433 | 6.185,18.13,20.2,296100.0 434 | 6.417,19.31,20.2,273000.0 435 | 6.749,17.44,20.2,281400.0 436 | 6.655,17.73,20.2,319200.0 437 | 6.297,17.27,20.2,338100.0 438 | 7.393,16.74,20.2,373800.0 439 | 6.728,18.71,20.2,312900.0 440 | 6.525,18.13,20.2,296100.0 441 | 5.976,19.01,20.2,266700.0 442 | 5.936,16.94,20.2,283500.0 443 | 6.301,16.23,20.2,312900.0 444 | 6.081,14.7,20.2,420000.0 445 | 6.701,16.42,20.2,344400.0 446 | 6.376,14.65,20.2,371700.0 447 | 6.317,13.99,20.2,409500.0 448 | 6.513,10.29,20.2,424200.0 449 | 6.209,13.22,20.2,449400.0 450 | 5.759,14.13,20.2,417900.0 451 | 5.952,17.15,20.2,399000.0 452 | 6.003,21.32,20.2,401100.0 453 | 5.926,18.13,20.2,401100.0 454 | 5.713,14.76,20.2,422100.0 455 | 6.167,16.29,20.2,417900.0 456 | 6.229,12.87,20.2,411600.0 457 | 6.437,14.36,20.2,487200.0 458 | 6.98,11.66,20.2,625800.0 459 | 5.427,18.14,20.2,289800.0 460 | 6.162,24.1,20.2,279300.0 461 | 6.484,18.68,20.2,350700.0 462 | 5.304,24.91,20.2,252000.0 463 | 6.185,18.03,20.2,306600.0 464 | 6.229,13.11,20.2,449400.0 465 | 6.242,10.74,20.2,483000.0 466 | 6.75,7.74,20.2,497700.0 467 | 7.061,7.01,20.2,525000.0 468 | 5.762,10.42,20.2,457800.0 469 | 5.871,13.34,20.2,432600.0 470 | 6.312,10.58,20.2,445200.0 471 | 6.114,14.98,20.2,401100.0 472 | 5.905,11.45,20.2,432600.0 473 | 5.454,18.06,20.1,319200.0 474 | 5.414,23.97,20.1,147000.0 475 | 5.093,29.68,20.1,170100.0 476 | 5.983,18.07,20.1,285600.0 477 | 5.983,13.35,20.1,422100.0 478 | 5.707,12.01,19.2,457800.0 479 | 5.926,13.59,19.2,514500.0 480 | 5.67,17.6,19.2,485100.0 481 | 5.39,21.14,19.2,413700.0 482 | 5.794,14.1,19.2,384300.0 483 | 6.019,12.92,19.2,445200.0 484 | 5.569,15.1,19.2,367500.0 485 | 6.027,14.33,19.2,352800.0 486 | 6.593,9.67,21.0,470400.0 487 | 6.12,9.08,21.0,432600.0 488 | 6.976,5.64,21.0,501900.0 489 | 6.794,6.48,21.0,462000.0 490 | 6.03,7.88,21.0,249900.0 491 | -------------------------------------------------------------------------------- /P1_Boston_Housing/visuals.py: -------------------------------------------------------------------------------- 1 | ########################################### 2 | # Suppress matplotlib user warnings 3 | # Necessary for newer version of matplotlib 4 | import warnings 5 | warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib") 6 | # 7 | # Display inline matplotlib plots with IPython 8 | from IPython import get_ipython 9 | get_ipython().run_line_magic('matplotlib', 'inline') 10 | ########################################### 11 | 12 | import matplotlib.pyplot as pl 13 | import numpy as np 14 | from sklearn.model_selection import learning_curve, validation_curve 15 | from sklearn.tree import DecisionTreeRegressor 16 | from sklearn.model_selection import ShuffleSplit, train_test_split 17 | 18 | 19 | def ModelLearning(X, y): 20 | """ Calculates the performance of several models with varying sizes of training data. 21 | The learning and testing scores for each model are then plotted. """ 22 | 23 | # Create 10 cross-validation sets for training and testing 24 | cv = ShuffleSplit(n_splits = 10, test_size = 0.2, random_state = 0) 25 | 26 | 27 | # Generate the training set sizes increasing by 50 28 | train_sizes = np.rint(np.linspace(1, X.shape[0]*0.8 - 1, 9)).astype(int) 29 | 30 | # Create the figure window 31 | fig = pl.figure(figsize=(10,7)) 32 | 33 | # Create three different models based on max_depth 34 | for k, depth in enumerate([1,3,6,10]): 35 | 36 | # Create a Decision tree regressor at max_depth = depth 37 | regressor = DecisionTreeRegressor(max_depth = depth) 38 | 39 | # Calculate the training and testing scores 40 | sizes, train_scores, test_scores = learning_curve(regressor, X, y, \ 41 | cv = cv, train_sizes = train_sizes, scoring = 'r2') 42 | 43 | # Find the mean and standard deviation for smoothing 44 | train_std = np.std(train_scores, axis = 1) 45 | train_mean = np.mean(train_scores, axis = 1) 46 | test_std = np.std(test_scores, axis = 1) 47 | test_mean = np.mean(test_scores, axis = 1) 48 | 49 | # Subplot the learning curve 50 | ax = fig.add_subplot(2, 2, k+1) 51 | ax.plot(sizes, train_mean, 'o-', color = 'r', label = 'Training Score') 52 | ax.plot(sizes, test_mean, 'o-', color = 'g', label = 'Testing Score') 53 | ax.fill_between(sizes, train_mean - train_std, \ 54 | train_mean + train_std, alpha = 0.15, color = 'r') 55 | ax.fill_between(sizes, test_mean - test_std, \ 56 | test_mean + test_std, alpha = 0.15, color = 'g') 57 | 58 | # Labels 59 | ax.set_title('max_depth = %s'%(depth)) 60 | ax.set_xlabel('Number of Training Points') 61 | ax.set_ylabel('Score') 62 | ax.set_xlim([0, X.shape[0]*0.8]) 63 | ax.set_ylim([-0.05, 1.05]) 64 | 65 | # Visual aesthetics 66 | ax.legend(bbox_to_anchor=(1.05, 2.05), loc='lower left', borderaxespad = 0.) 67 | fig.suptitle('Decision Tree Regressor Learning Performances', fontsize = 16, y = 1.03) 68 | fig.tight_layout() 69 | fig.show() 70 | 71 | 72 | def ModelComplexity(X, y): 73 | """ Calculates the performance of the model as model complexity increases. 74 | The learning and testing errors rates are then plotted. """ 75 | 76 | # Create 10 cross-validation sets for training and testing 77 | cv = ShuffleSplit(n_splits = 10, test_size = 0.2, random_state = 0) 78 | 79 | # Vary the max_depth parameter from 1 to 10 80 | max_depth = np.arange(1,11) 81 | 82 | # Calculate the training and testing scores 83 | train_scores, test_scores = validation_curve(DecisionTreeRegressor(), X, y, \ 84 | param_name = "max_depth", param_range = max_depth, cv = cv, scoring = 'r2') 85 | 86 | # Find the mean and standard deviation for smoothing 87 | train_mean = np.mean(train_scores, axis=1) 88 | train_std = np.std(train_scores, axis=1) 89 | test_mean = np.mean(test_scores, axis=1) 90 | test_std = np.std(test_scores, axis=1) 91 | 92 | # Plot the validation curve 93 | pl.figure(figsize=(7, 5)) 94 | pl.title('Decision Tree Regressor Complexity Performance') 95 | pl.plot(max_depth, train_mean, 'o-', color = 'r', label = 'Training Score') 96 | pl.plot(max_depth, test_mean, 'o-', color = 'g', label = 'Validation Score') 97 | pl.fill_between(max_depth, train_mean - train_std, \ 98 | train_mean + train_std, alpha = 0.15, color = 'r') 99 | pl.fill_between(max_depth, test_mean - test_std, \ 100 | test_mean + test_std, alpha = 0.15, color = 'g') 101 | 102 | # Visual aesthetics 103 | pl.legend(loc = 'lower right') 104 | pl.xlabel('Maximum Depth') 105 | pl.ylabel('Score') 106 | pl.ylim([-0.05,1.05]) 107 | pl.show() 108 | 109 | 110 | def PredictTrials(X, y, fitter, data): 111 | """ Performs trials of fitting and predicting data. """ 112 | 113 | # Store the predicted prices 114 | prices = [] 115 | 116 | for k in range(10): 117 | # Split the data 118 | X_train, X_test, y_train, y_test = train_test_split(X, y, \ 119 | test_size = 0.2, random_state = k) 120 | 121 | # Fit the data 122 | reg = fitter(X_train, y_train) 123 | 124 | # Make a prediction 125 | pred = reg.predict([data[0]])[0] 126 | prices.append(pred) 127 | 128 | # Result 129 | print "Trial {}: ${:,.2f}".format(k+1, pred) 130 | 131 | # Display price range 132 | print "\nRange in prices: ${:,.2f}".format(max(prices) - min(prices)) -------------------------------------------------------------------------------- /P2_Finding_Donors/Mini_Project_NLP.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | import re 4 | 5 | # Maximum Likelihood Hypothesis 6 | # 7 | # 8 | # In this quiz we will find the maximum likelihood word based on the preceding word 9 | # 10 | # Fill in the NextWordProbability procedure so that it takes in sample text and a word, 11 | # and returns a dictionary with keys the set of words that come after, whose values are 12 | # the number of times the key comes after that word. 13 | # 14 | 15 | 16 | # 文本预处理:断句、断词、去标点符号、小写化 17 | def pre_process(sample): 18 | sample = sample.lower() 19 | letters_only = re.sub("[^a-zA-Z']", " ", sample) # 将所有非字符和单引号的字符全都替换成为空格 20 | words_only = letters_only.split() 21 | return words_only 22 | 23 | 24 | # 计算给定单词target后可能出现的所有单词及其频率,返回该统计信息 25 | def next_word_rate(word_list, target): 26 | target = target.lower() 27 | freq = {} 28 | count = 0 29 | for i in range(len(word_list) - 1): 30 | nxt = word_list[i + 1] 31 | if word_list[i] == target: 32 | count += 1 33 | if nxt in freq: 34 | freq[nxt] += 1 35 | else: 36 | freq[nxt] = 1 37 | for x in freq: 38 | freq[x] /= float(count) 39 | return freq 40 | 41 | 42 | # 计算给定语料sample,给定单词target之后距离为distance位置最有可能出现的单词 43 | def predict_word(sample, target, distance): 44 | word_list = pre_process(sample) 45 | first_list = next_word_rate(word_list, target) 46 | print "1st Missing Word: ", first_list.keys() 47 | if distance == 1: 48 | return max(first_list, key=first_list.get) # 返回可能性最大的单词 49 | second_list = {} 50 | second_set = set([]) 51 | # 计算第一个单词的每个假设下第二个单词的所有可能选项,存为一个装词典的词典 52 | for w in first_list: 53 | second_list[w] = next_word_rate(word_list, w) 54 | second_set.update(second_list[w].keys()) # 将所有可能性单独存为一个set去重 55 | result = {} 56 | for sec in second_set: 57 | total = 0 58 | for mapping in second_list: 59 | if sec in second_list[mapping]: 60 | total += first_list[mapping] * second_list[mapping][sec] # 全概率公式计算第二个单词每一种可能性的独立概率 61 | result[sec] = total 62 | 63 | print "2nd Missing Word: ", list(second_set) 64 | return max(result, key=result.get) 65 | 66 | sample_memo = ''' 67 | Milt, we're gonna need to go ahead and move you downstairs into storage B. We have some new people coming in, and we need all the space we can get. So if you could just go ahead and pack up your stuff and move it down there, that would be terrific, OK? 68 | Oh, and remember: next Friday ... is Hawaiian shirt day. So, you know, if you want to, go ahead and wear a Hawaiian shirt and jeans. 69 | Oh, oh, and I almost forgot. Ahh, I'm also gonna need you to go ahead and come in on Sunday, too ... 70 | Hello Peter, whats happening? Ummm, I'm gonna need you to go ahead and come in tomorrow. So if you could be here around 9 that would be great, mmmk ... oh oh! and I almost forgot ahh, I'm also gonna need you to go ahead and come in on Sunday too, kay. We ahh lost some people this week and ah, we sorta need to play catch up. 71 | ''' 72 | 73 | corrupted_memo = ''' 74 | Yeah, I'm gonna --- you to go ahead --- --- complain about this. Oh, and if you could --- --- and sit at the kids' table, that'd be --- 75 | ''' 76 | 77 | target = "could" 78 | distance = 2 79 | res = predict_word(sample_memo, target, distance) 80 | print 'The most likely word after <', target, '> is <', res, '>' 81 | 82 | -------------------------------------------------------------------------------- /P2_Finding_Donors/README.md: -------------------------------------------------------------------------------- 1 | # 机器学习纳米学位 2 | # 监督学习 3 | ## 项目: 为CharityML寻找捐献者 4 | ### 安装 5 | 6 | 这个项目要求使用 Python 2.7 并且需要安装下面这些python包: 7 | 8 | - [Python 2.7](https://www.python.org/download/releases/2.7/) 9 | - [NumPy](http://www.numpy.org/) 10 | - [Pandas](http://pandas.pydata.org/) 11 | - [scikit-learn](http://scikit-learn.org/stable/) 12 | - [matplotlib](http://matplotlib.org/) 13 | 14 | 你同样需要安装好相应软件使之能够运行 [iPython Notebook](http://ipython.org/notebook.html) 15 | 16 | 优达学城推荐学生安装[Anaconda](https://www.continuum.io/downloads), 这是一个已经打包好的python发行版,它包含了我们这个项目需要的所有的库和软件。 17 | 18 | ### 代码 19 | 20 | 初始代码包含在`finding_donors.ipynb`这个notebook文件中。你还会用到`visuals.py`和名为`census.csv`的数据文件来完成这个项目。我们已经为你提供了一部分代码,但还有些功能需要你来实现才能以完成这个项目。 21 | 这里面有一些代码已经实现好来帮助你开始项目,但是为了完成项目,你还需要实现附加的功能。 22 | 注意包含在`visuals.py`中的代码设计成一个外部导入的功能,而不是打算学生去修改。如果你对notebook中创建的可视化感兴趣,你也可以去查看这些代码。 23 | 24 | 25 | ### 运行 26 | 在命令行中,确保当前目录为 `finding_donors/` 文件夹的最顶层(目录包含本 README 文件),运行下列命令: 27 | 28 | ```bash 29 | jupyter notebook finding_donors.ipynb 30 | ``` 31 | 32 | ​这会启动 Jupyter Notebook 并把项目文件打开在你的浏览器中。 33 | 34 | ### 数据 35 | 36 | 修改的人口普查数据集含有将近32,000个数据点,每一个数据点含有13个特征。这个数据集是Ron Kohavi的论文*"Scaling Up the Accuracy of Naive-Bayes Classifiers: a Decision-Tree Hybrid",*中数据集的一个修改版本。你能够在[这里](https://www.aaai.org/Papers/KDD/1996/KDD96-033.pdf)找到论文,在[UCI的网站](https://archive.ics.uci.edu/ml/datasets/Census+Income)找到原始数据集。 37 | 38 | **特征** 39 | 40 | - `age`: 一个整数,表示被调查者的年龄。 41 | - `workclass`: 一个类别变量表示被调查者的通常劳动类型,允许的值有 {Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked} 42 | - `education_level`: 一个类别变量表示教育程度,允许的值有 {Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool} 43 | - `education-num`: 一个整数表示在学校学习了多少年 44 | - `marital-status`: 一个类别变量,允许的值有 {Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse} 45 | - `occupation`: 一个类别变量表示一般的职业领域,允许的值有 {Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces} 46 | - `relationship`: 一个类别变量表示家庭情况,允许的值有 {Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried} 47 | - `race`: 一个类别变量表示人种,允许的值有 {White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black} 48 | - `sex`: 一个类别变量表示性别,允许的值有 {Female, Male} 49 | - `capital-gain`: 连续值。 50 | - `capital-loss`: 连续值。 51 | - `hours-per-week`: 连续值。 52 | - `native-country`: 一个类别变量表示原始的国家,允许的值有 {United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands} 53 | 54 | **目标变量** 55 | 56 | - `income`: 一个类别变量,表示收入属于那个类别,允许的值有 {<=50K, >50K} -------------------------------------------------------------------------------- /P2_Finding_Donors/code.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import pandas as pd 4 | from time import time 5 | from sklearn.preprocessing import MinMaxScaler, LabelEncoder 6 | from sklearn.model_selection import train_test_split 7 | from sklearn.metrics import fbeta_score, accuracy_score 8 | from sklearn.tree import DecisionTreeClassifier, export_graphviz 9 | from sklearn.naive_bayes import GaussianNB 10 | from sklearn.linear_model import SGDClassifier 11 | from sklearn.svm import SVC 12 | from sklearn.neighbors import KNeighborsClassifier 13 | from sklearn.linear_model import LogisticRegression 14 | from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, GradientBoostingClassifier, BaggingClassifier 15 | 16 | data = pd.read_csv("census.csv") 17 | income_raw = data['income'] # label column: 1 18 | features_raw = data.drop('income', axis=1) # feature column: 13 19 | 20 | # 数值型特征预处理:特征缩放 21 | # 对于符合长尾分布(倾斜)的特征,用对数变换压缩X轴取值范围 22 | # 原数据中,capital-gain 取值范围为 [0, 99999], capital-loss 则为 [0, 4356] 23 | # 压缩后的数据中,capital-gain 取值范围为 [0, 11], capital-loss 则为 [0, 8] 24 | skewed = ['capital-gain', 'capital-loss'] 25 | scaler = MinMaxScaler() 26 | numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week'] 27 | features_raw[numerical] = scaler.fit_transform(data[numerical]) 28 | 29 | # 字符型特征预处理:One-Hot Encoding 30 | # 必须要确保特征数据的元素是可以运算的类型,例如数值型或者布尔型。Object和String不行。 31 | features = pd.get_dummies(features_raw) 32 | print "{} total features after one-hot encoding.".format(features.shape[1]) 33 | 34 | # 字符型特征的另一种预处理方式:Label Encoding 35 | cat_features = ['workclass', 'education_level', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country'] 36 | num_features = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week'] 37 | trans_features = {} 38 | for f in cat_features: 39 | labels = data[f].values 40 | le = LabelEncoder() 41 | le.fit(labels) 42 | trans = le.transform(data[f]) 43 | trans_features[f] = pd.Series(trans) 44 | for f in num_features: 45 | trans_features[f] = pd.Series(features[f]) 46 | trans_features = pd.DataFrame(trans_features) 47 | 48 | # 标签预处理:直接赋值为0和1 49 | income = (income_raw == '>50K').astype(int) 50 | 51 | # 数据划分为训练集和测试集 52 | X_train, X_test, y_train, y_test = train_test_split(features, income, test_size=0.2, random_state=0) 53 | X_train_trans, X_test_trans, y_train_trans, y_test_trans = train_test_split(trans_features, income, test_size=0.2, random_state=0) 54 | 55 | # 训练1:使用One-Hot Encoder的训练数据 56 | clf = DecisionTreeClassifier(random_state=0, max_depth=5) 57 | clf.fit(X_train, y_train) 58 | y_pred = clf.predict(X_test) 59 | imp = clf.feature_importances_ 60 | indices = np.argsort(imp)[::-1] # Sort according to importance 61 | columns = X_train.columns.values[indices[:5]] # Select the first 5 features 62 | values = imp[indices][:5] # Get the corresponding importance 63 | print "\nThe Most Important 5 Features: " 64 | for i in range(columns.shape[0]): 65 | print '-', columns[i], '=', values[i] 66 | print 'Accuracy =', accuracy_score(y_test, y_pred) 67 | print 'F0.5 =', fbeta_score(y_test, y_pred, 0.5) 68 | 69 | # 训练2:使用LabelEncoder的训练数据 70 | clf_trans = DecisionTreeClassifier(random_state=0, max_depth=5) 71 | clf_trans.fit(X_train_trans, y_train_trans) 72 | y_pred_trans = clf_trans.predict(X_test_trans) 73 | imp_trans = clf_trans.feature_importances_ 74 | indices = np.argsort(imp_trans)[::-1] # Sort according to importance 75 | columns = X_train_trans.columns.values[indices[:5]] # Select the first 5 features 76 | values = imp_trans[indices][:5] # Get the corresponding importance 77 | print "\nThe Most Important 5 Features: " 78 | for i in range(columns.shape[0]): 79 | print '-', columns[i], '=', values[i] 80 | print 'Accuracy =', accuracy_score(y_test_trans, y_pred_trans) 81 | print 'F0.5 =', fbeta_score(y_test_trans, y_pred_trans, 0.5) 82 | export_graphviz(clf_trans, out_file='decisiontree.dot') 83 | 84 | def get_importance(importances, name): 85 | feature_name = np.where(X_train.columns.str.contains(name)) 86 | print name, "importance:", np.sum(importances[feature_name]) 87 | 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /P2_Finding_Donors/project_description.md: -------------------------------------------------------------------------------- 1 | # 内容: 监督学习 2 | ## 项目:为CharityML寻找捐献者 3 | 4 | ## 项目概况 5 | 在这个项目中,你将使用监督技术和分析能力对美国人口普查数据进行分析,以帮助CharityML(一个虚拟的慈善机构)识别最有可能向他们捐款的人,你将首先探索数据以了解人口普查数据是如何记录的。接下来,你将使用一系列的转换和预处理技术以将数据整理成能用的形式。然后,你将在这个数据上评价你选择的几个算法,然后考虑哪一个是最合适的。之后,你将优化你现在为CharityML选择的模型。最后,你将探索选择的模型和它的预测能力。 6 | 7 | ## 项目亮点 8 | 这个项目设计成帮助你熟悉在sklearn中能够使用的多个监督学习算法,并提供一个评价模型在某种类型的数据上表现的方法。在机器学习中准确理解在什么时候什么地方应该选择什么算法和不应该选择什么算法是十分重要的。 9 | 10 | 完成这个项目你将学会以下内容: 11 | - 知道什么时候应该使用预处理以及如何做预处理。 12 | - 如何为问题设置一个基准。 13 | - 判断在一个特定的数据集上几个监督学习算法的表现如何。 14 | - 调查候选的解决方案模型是否足够解决问题。 15 | 16 | ## 软件要求 17 | 18 | 这个项目要求使用 Python 2.7 并且需要安装下面这些python包: 19 | 20 | - [Python 2.7](https://www.python.org/download/releases/2.7/) 21 | - [NumPy](http://www.numpy.org/) 22 | - [Pandas](http://pandas.pydata.org/) 23 | - [scikit-learn](http://scikit-learn.org/stable/) 24 | - [matplotlib](http://matplotlib.org/) 25 | 26 | 你同样需要安装好相应软件使之能够运行 [iPython Notebook](http://ipython.org/notebook.html) 27 | 28 | 优达学城推荐学生安装[Anaconda](https://www.continuum.io/downloads), 这是一个已经打包好的python发行版,它包含了我们这个项目需要的所有的库和软件。请注意你安装的是2.7而不是3.X 29 | 30 | ## 开始项目 31 | 32 | 对于这个项目,你能够在**Resources**部分找到一个能下载的`find_donors.zip`。*你也可以访问我们的[机器学习项目GitHub](https://github.com/udacity/machine-learning)获取我们纳米学位中的所有项目* 33 | 34 | 这个项目包含以下文件: 35 | 36 | - `find_donors.ipynb`: 这是你需要工作的主要的文件。 37 | - `census.csv`: 项目使用的数据集,你将需要在notebook中载入这个数据集。 38 | - `visuals.py`: 一个实现了可视化功能的Python代码。不要修改它。 39 | 40 | 在终端或命令提示符中,导航到包含项目文件的文件夹,使用命令`jupyter notebook finding_donors.ipynb`以在一个浏览器窗口或一个标签页打开notebook文件。或者你也可以使用命令`jupyter notebook`或者`ipython notebook`然后在打开的网页中导航到需要的文件夹。跟随notebook中的指引,回答每一个问题以成功完成项目。在这个项目中我们也提供了一个**README**文件,其中也包含了你在这个项目中需要了解的信息或者指引。 41 | 42 | ## 提交项目 43 | 44 | ### 评价 45 | 你的项目会由Udacity项目评审师根据**为CharityML寻找捐献者项目量规**进行评审。请注意仔细阅读这份量规并在提交前进行全面的自我评价。这份量规中涉及的所有条目必须全部被标记成*meeting specifications*你才能通过。 46 | 47 | ### 需要提交的文件 48 | 当你准备好提交你的项目的时候,请收集以下的文件,并把他们压缩进单个压缩包中上传。或者你也可以在你的GitHub Repo中的一个名叫`finding_donors`的文件夹中提供以下文件以方便检查: 49 | - 回答了所有问题并且所有的代码单元被执行并显示了输出结果的`finding_donors.ipynb`文件。 50 | - 一个从项目的notebook文件中导出的命名为**report.html**的**HTML**文件。这个文件*必须*提供。 51 | 52 | 一旦你收集好了这些文件,并阅读了项目量规,请进入项目提交页面。 53 | 54 | ### 我准备好了! 55 | 当你准备好提交项目的时候,点击页面底部的**提交项目**按钮。 56 | 57 | 如果你提交项目中遇到任何问题或者是希望检查你的提交的进度,请给**machine-support@udacity.com**发邮件,或者你可以访问论坛. 58 | 59 | ### 然后? 60 | 当你的项目评审师给你回复之后你会马上收到一封通知邮件。在等待的同时你也可以开始准备下一个项目,或者学习相关的课程。 -------------------------------------------------------------------------------- /P2_Finding_Donors/tree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P2_Finding_Donors/tree.png -------------------------------------------------------------------------------- /P2_Finding_Donors/visuals.py: -------------------------------------------------------------------------------- 1 | ########################################### 2 | # Suppress matplotlib user warnings 3 | # Necessary for newer version of matplotlib 4 | import warnings 5 | warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib") 6 | # 7 | # Display inline matplotlib plots with IPython 8 | from IPython import get_ipython 9 | get_ipython().run_line_magic('matplotlib', 'inline') 10 | ########################################### 11 | 12 | import matplotlib.pyplot as pl 13 | import matplotlib.patches as mpatches 14 | import numpy as np 15 | import pandas as pd 16 | from time import time 17 | from sklearn.metrics import f1_score, accuracy_score 18 | 19 | 20 | def distribution(data, transformed = False): 21 | """ 22 | Visualization code for displaying skewed distributions of features 23 | """ 24 | 25 | # Create figure 26 | fig = pl.figure(figsize = (11,5)); 27 | 28 | # Skewed feature plotting 29 | for i, feature in enumerate(['capital-gain','capital-loss']): 30 | ax = fig.add_subplot(1, 2, i+1) 31 | ax.hist(data[feature], bins = 25, color = '#00A0A0') 32 | ax.set_title("'%s' Feature Distribution"%(feature), fontsize = 14) 33 | ax.set_xlabel("Value") 34 | ax.set_ylabel("Number of Records") 35 | ax.set_ylim((0, 2000)) 36 | ax.set_yticks([0, 500, 1000, 1500, 2000]) 37 | ax.set_yticklabels([0, 500, 1000, 1500, ">2000"]) 38 | 39 | # Plot aesthetics 40 | if transformed: 41 | fig.suptitle("Log-transformed Distributions of Continuous Census Data Features", \ 42 | fontsize = 16, y = 1.03) 43 | else: 44 | fig.suptitle("Skewed Distributions of Continuous Census Data Features", \ 45 | fontsize = 16, y = 1.03) 46 | 47 | fig.tight_layout() 48 | fig.show() 49 | 50 | 51 | def evaluate(results, accuracy, f1): 52 | """ 53 | Visualization code to display results of various learners. 54 | 55 | inputs: 56 | - learners: a list of supervised learners 57 | - stats: a list of dictionaries of the statistic results from 'train_predict()' 58 | - accuracy: The score for the naive predictor 59 | - f1: The score for the naive predictor 60 | """ 61 | 62 | # Create figure 63 | fig, ax = pl.subplots(2, 3, figsize = (11,7)) 64 | 65 | # Constants 66 | bar_width = 0.3 67 | colors = ['#A00000','#00A0A0','#00A000'] 68 | 69 | # Super loop to plot four panels of data 70 | for k, learner in enumerate(results.keys()): 71 | for j, metric in enumerate(['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test', 'f_test']): 72 | for i in np.arange(3): 73 | 74 | # Creative plot code 75 | ax[j/3, j%3].bar(i+k*bar_width, results[learner][i][metric], width = bar_width, color = colors[k]) 76 | ax[j/3, j%3].set_xticks([0.45, 1.45, 2.45]) 77 | ax[j/3, j%3].set_xticklabels(["1%", "10%", "100%"]) 78 | ax[j/3, j%3].set_xlabel("Training Set Size") 79 | ax[j/3, j%3].set_xlim((-0.1, 3.0)) 80 | 81 | # Add unique y-labels 82 | ax[0, 0].set_ylabel("Time (in seconds)") 83 | ax[0, 1].set_ylabel("Accuracy Score") 84 | ax[0, 2].set_ylabel("F-score") 85 | ax[1, 0].set_ylabel("Time (in seconds)") 86 | ax[1, 1].set_ylabel("Accuracy Score") 87 | ax[1, 2].set_ylabel("F-score") 88 | 89 | # Add titles 90 | ax[0, 0].set_title("Model Training") 91 | ax[0, 1].set_title("Accuracy Score on Training Subset") 92 | ax[0, 2].set_title("F-score on Training Subset") 93 | ax[1, 0].set_title("Model Predicting") 94 | ax[1, 1].set_title("Accuracy Score on Testing Set") 95 | ax[1, 2].set_title("F-score on Testing Set") 96 | 97 | # Add horizontal lines for naive predictors 98 | ax[0, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed') 99 | ax[1, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed') 100 | ax[0, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed') 101 | ax[1, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed') 102 | 103 | # Set y-limits for score panels 104 | ax[0, 1].set_ylim((0, 1)) 105 | ax[0, 2].set_ylim((0, 1)) 106 | ax[1, 1].set_ylim((0, 1)) 107 | ax[1, 2].set_ylim((0, 1)) 108 | 109 | # Create patches for the legend 110 | patches = [] 111 | for i, learner in enumerate(results.keys()): 112 | patches.append(mpatches.Patch(color = colors[i], label = learner)) 113 | pl.legend(handles = patches, bbox_to_anchor = (-.80, 2.53), \ 114 | loc = 'upper center', borderaxespad = 0., ncol = 3, fontsize = 'x-large') 115 | 116 | # Aesthetics 117 | pl.suptitle("Performance Metrics for Three Supervised Learning Models", fontsize = 16, y = 1.10) 118 | pl.tight_layout() 119 | pl.show() 120 | 121 | 122 | def feature_plot(importances, X_train, y_train): 123 | 124 | # Display the five most important features 125 | indices = np.argsort(importances)[::-1] 126 | columns = X_train.columns.values[indices[:5]] 127 | values = importances[indices][:5] 128 | 129 | # Creat the plot 130 | fig = pl.figure(figsize = (9,5)) 131 | pl.title("Normalized Weights for First Five Most Predictive Features", fontsize = 16) 132 | pl.bar(np.arange(5), values, width = 0.6, align="center", color = '#00A000', \ 133 | label = "Feature Weight") 134 | pl.bar(np.arange(5) - 0.3, np.cumsum(values), width = 0.2, align = "center", color = '#00A0A0', \ 135 | label = "Cumulative Feature Weight") 136 | pl.xticks(np.arange(5), columns) 137 | pl.xlim((-0.5, 4.5)) 138 | pl.ylabel("Weight", fontsize = 12) 139 | pl.xlabel("Feature", fontsize = 12) 140 | 141 | pl.legend(loc = 'upper center') 142 | pl.tight_layout() 143 | pl.show() 144 | -------------------------------------------------------------------------------- /P3_Create_Customer_Segments/Mini_Project_Eigenfaces.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | from time import time 3 | import logging 4 | import pylab as pl 5 | import numpy as np 6 | 7 | from sklearn.datasets import fetch_lfw_people 8 | from sklearn.model_selection import train_test_split 9 | from sklearn.model_selection import GridSearchCV 10 | from sklearn.metrics import classification_report 11 | from sklearn.metrics import confusion_matrix 12 | from sklearn.decomposition import PCA 13 | from sklearn.svm import SVC 14 | 15 | # 使用前的准备工作:下载图像数据到用户根目录,安装PIL。 16 | 17 | ############################################################################### 18 | # 0. 数据导入为numpy array,只选择照片资源多于70张的人物(Easy模式),每张图像尺寸都进行缩放 19 | np.random.seed(42) 20 | lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4) 21 | 22 | # 输入特征数据 23 | X = lfw_people.data 24 | n_features = X.shape[1] 25 | n_samples, h, w = lfw_people.images.shape 26 | 27 | # 输出数据:人物编号(照片多于70张的只有7个人) the label to predict is the id of the person 28 | y = lfw_people.target 29 | target_names = lfw_people.target_names 30 | n_classes = target_names.shape[0] 31 | 32 | print "Total dataset size:" 33 | print "样本数量 n_samples: %d" % n_samples 34 | print "特征数量 n_features: %d" % n_features 35 | print "分类个数 n_classes: %d" % n_classes 36 | 37 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42) 38 | 39 | ############################################################################### 40 | # 1. PCA分析:将1850个特征经过PCA分析后挑选出150个最重要的主成分,对测试集和训练集都进行PCA转换。 41 | # Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled 42 | # dataset): unsupervised feature extraction / dimensionality reduction 43 | 44 | # 观察其中一人的分数随主成分数量增加的变化趋势:先提升、后退化 45 | # Ariel Sharon's F1 Score 46 | # 10: 0.11 47 | # 15: 0.34 48 | # 20: 0.40 49 | # 25: 0.60 50 | # 50: 0.74 51 | # 100: 0.71 52 | # 150: 0.72 53 | 54 | n_components = 150 55 | print "Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0]) 56 | t0 = time() 57 | pca = PCA(svd_solver='randomized', n_components=n_components, whiten=True) 58 | pca.fit(X_train) 59 | print "done in %0.3fs" % (time() - t0) 60 | print "Projecting the input data on the eigenfaces orthonormal basis" 61 | t0 = time() 62 | 63 | X_train_pca = pca.transform(X_train) 64 | X_test_pca = pca.transform(X_test) 65 | print "done in %0.3fs" % (time() - t0) 66 | eigenfaces = pca.components_.reshape((n_components, h, w)) 67 | 68 | 69 | ############################################################################### 70 | # 2. 将转换后的数据送入SVM进行训练 71 | # Train a SVM classification model 72 | print "Fitting the classifier to the training set" 73 | t0 = time() 74 | param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5], 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]} 75 | clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid) 76 | clf = clf.fit(X_train_pca, y_train) 77 | print "done in %0.3fs" % (time() - t0) 78 | print "Best estimator found by grid search:" 79 | print clf.best_estimator_ 80 | 81 | 82 | ############################################################################### 83 | # 3. 将模型用测试集进行性能评估(量化评估) 84 | # Quantitative evaluation of the model quality on the test set 85 | 86 | print "Predicting the people names on the testing set" 87 | t0 = time() 88 | y_pred = clf.predict(X_test_pca) 89 | print "done in %0.3fs" % (time() - t0) 90 | 91 | print 'classification report: ' 92 | print classification_report(y_test, y_pred, target_names=target_names) 93 | print confusion_matrix(y_test, y_pred, labels=range(n_classes)) 94 | 95 | 96 | ############################################################################### 97 | # 4. 将模型预测进行可视化评估 98 | # Qualitative evaluation of the predictions using matplotlib 99 | 100 | def plot_gallery(images, titles, h, w, n_row=3, n_col=4): 101 | """Helper function to plot a gallery of portraits""" 102 | pl.figure(figsize=(1.8 * n_col, 2.4 * n_row)) 103 | pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) 104 | for i in range(n_row * n_col): 105 | pl.subplot(n_row, n_col, i + 1) 106 | pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray) 107 | pl.title(titles[i], size=12) 108 | pl.xticks(()) 109 | pl.yticks(()) 110 | 111 | 112 | # plot the result of the prediction on a portion of the test set 113 | def title(y_pred, y_test, target_names, i): 114 | pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1] 115 | true_name = target_names[y_test[i]].rsplit(' ', 1)[-1] 116 | return 'predicted: %s\ntrue: %s' % (pred_name, true_name) 117 | 118 | prediction_titles = [title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])] 119 | plot_gallery(X_test, prediction_titles, h, w) 120 | 121 | # plot the gallery of the most significant eigenfaces 122 | eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])] 123 | plot_gallery(eigenfaces, eigenface_titles, h, w) 124 | 125 | pl.show() -------------------------------------------------------------------------------- /P3_Create_Customer_Segments/PCA - GMM preds.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P3_Create_Customer_Segments/PCA - GMM preds.png -------------------------------------------------------------------------------- /P3_Create_Customer_Segments/PCA - GMM preds_prob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P3_Create_Customer_Segments/PCA - GMM preds_prob.png -------------------------------------------------------------------------------- /P3_Create_Customer_Segments/README.md: -------------------------------------------------------------------------------- 1 | # 项目 3: 非监督学习 2 | ## 创建用户细分 3 | 4 | ### 安装 5 | 6 | 这个项目要求使用 **Python 2.7** 并且需要安装下面这些python包: 7 | 8 | - [NumPy](http://www.numpy.org/) 9 | - [Pandas](http://pandas.pydata.org) 10 | - [scikit-learn](http://scikit-learn.org/stable/) 11 | 12 | 你同样需要安装好相应软件使之能够运行[Jupyter Notebook](http://jupyter.org/)。 13 | 14 | 优达学城推荐学生安装 [Anaconda](https://www.continuum.io/downloads), 这是一个已经打包好的python发行版,它包含了我们这个项目需要的所有的库和软件。 15 | 16 | ### 代码 17 | 18 | 初始代码包含在 `customer_segments.ipynb` 这个notebook文件中。这里面有一些代码已经实现好来帮助你开始项目,但是为了完成项目,你还需要实现附加的功能。 19 | 20 | ### 运行 21 | 22 | 在命令行中,确保当前目录为 `customer_segments.ipynb` 文件夹的最顶层(目录包含本 README 文件),运行下列命令: 23 | 24 | ```jupyter notebook customer_segments.ipynb``` 25 | 26 | ​这会启动 Jupyter Notebook 并把项目文件打开在你的浏览器中。 27 | 28 | ## 数据 29 | 30 | ​这个项目的数据包含在 `customers.csv` 文件中。你能在[UCI 机器学习信息库](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers)页面中找到更多信息。 31 | -------------------------------------------------------------------------------- /P3_Create_Customer_Segments/visuals.py: -------------------------------------------------------------------------------- 1 | ########################################### 2 | # Suppress matplotlib user warnings 3 | # Necessary for newer version of matplotlib 4 | import warnings 5 | warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib") 6 | # 7 | # Display inline matplotlib plots with IPython 8 | from IPython import get_ipython 9 | get_ipython().run_line_magic('matplotlib', 'inline') 10 | ########################################### 11 | 12 | import matplotlib.pyplot as plt 13 | import matplotlib.cm as cm 14 | import pandas as pd 15 | import numpy as np 16 | 17 | def pca_results(good_data, pca): 18 | ''' 19 | Create a DataFrame of the PCA results 20 | Includes dimension feature weights and explained variance 21 | Visualizes the PCA results 22 | ''' 23 | 24 | # Dimension indexing 25 | dimensions = dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)] 26 | 27 | # PCA components 28 | components = pd.DataFrame(np.round(pca.components_, 4), columns = good_data.keys()) 29 | components.index = dimensions 30 | 31 | # PCA explained variance 32 | ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1) 33 | variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance']) 34 | variance_ratios.index = dimensions 35 | 36 | # Create a bar plot visualization 37 | fig, ax = plt.subplots(figsize = (14,8)) 38 | 39 | # Plot the feature weights as a function of the components 40 | components.plot(ax = ax, kind = 'bar'); 41 | ax.set_ylabel("Feature Weights") 42 | ax.set_xticklabels(dimensions, rotation=0) 43 | 44 | 45 | # Display the explained variance ratios 46 | for i, ev in enumerate(pca.explained_variance_ratio_): 47 | ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev)) 48 | 49 | # Return a concatenated DataFrame 50 | return pd.concat([variance_ratios, components], axis = 1) 51 | 52 | def cluster_results(reduced_data, preds, centers, pca_samples): 53 | ''' 54 | Visualizes the PCA-reduced cluster data in two dimensions 55 | Adds cues for cluster centers and student-selected sample data 56 | ''' 57 | 58 | predictions = pd.DataFrame(preds, columns = ['Cluster']) 59 | plot_data = pd.concat([predictions, reduced_data], axis = 1) 60 | 61 | # Generate the cluster plot 62 | fig, ax = plt.subplots(figsize = (14,8)) 63 | 64 | # Color map 65 | cmap = cm.get_cmap('gist_rainbow') 66 | 67 | # Color the points based on assigned cluster 68 | for i, cluster in plot_data.groupby('Cluster'): 69 | cluster.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \ 70 | color = cmap((i)*1.0/(len(centers)-1)), label = 'Cluster %i'%(i), s=30); 71 | 72 | # Plot centers with indicators 73 | for i, c in enumerate(centers): 74 | ax.scatter(x = c[0], y = c[1], color = 'white', edgecolors = 'black', \ 75 | alpha = 1, linewidth = 2, marker = 'o', s=200); 76 | ax.scatter(x = c[0], y = c[1], marker='$%d$'%(i), alpha = 1, s=100); 77 | 78 | # Plot transformed sample points 79 | ax.scatter(x = pca_samples[:,0], y = pca_samples[:,1], \ 80 | s = 150, linewidth = 4, color = 'black', marker = 'x'); 81 | 82 | # Set plot title 83 | ax.set_title("Cluster Learning on PCA-Reduced Data - Centroids Marked by Number\nTransformed Sample Data Marked by Black Cross"); 84 | 85 | 86 | def biplot(good_data, reduced_data, pca): 87 | ''' 88 | Produce a biplot that shows a scatterplot of the reduced 89 | data and the projections of the original features. 90 | 91 | good_data: original data, before transformation. 92 | Needs to be a pandas dataframe with valid column names 93 | reduced_data: the reduced data (the first two dimensions are plotted) 94 | pca: pca object that contains the components_ attribute 95 | 96 | return: a matplotlib AxesSubplot object (for any additional customization) 97 | 98 | This procedure is inspired by the script: 99 | https://github.com/teddyroland/python-biplot 100 | ''' 101 | 102 | fig, ax = plt.subplots(figsize = (14,8)) 103 | # scatterplot of the reduced data 104 | ax.scatter(x=reduced_data.loc[:, 'Dimension 1'], y=reduced_data.loc[:, 'Dimension 2'], 105 | facecolors='b', edgecolors='b', s=70, alpha=0.5) 106 | 107 | feature_vectors = pca.components_.T 108 | 109 | # we use scaling factors to make the arrows easier to see 110 | arrow_size, text_pos = 7.0, 8.0, 111 | 112 | # projections of the original features 113 | for i, v in enumerate(feature_vectors): 114 | ax.arrow(0, 0, arrow_size*v[0], arrow_size*v[1], 115 | head_width=0.2, head_length=0.2, linewidth=2, color='red') 116 | ax.text(v[0]*text_pos, v[1]*text_pos, good_data.columns[i], color='black', 117 | ha='center', va='center', fontsize=18) 118 | 119 | ax.set_xlabel("Dimension 1", fontsize=14) 120 | ax.set_ylabel("Dimension 2", fontsize=14) 121 | ax.set_title("PC plane with original feature projections.", fontsize=16); 122 | return ax 123 | 124 | 125 | def channel_results(reduced_data, outliers, pca_samples): 126 | ''' 127 | Visualizes the PCA-reduced cluster data in two dimensions using the full dataset 128 | Data is labeled by "Channel" and cues added for student-selected sample data 129 | ''' 130 | 131 | # Check that the dataset is loadable 132 | try: 133 | full_data = pd.read_csv("customers.csv") 134 | except: 135 | print "Dataset could not be loaded. Is the file missing?" 136 | return False 137 | 138 | # Create the Channel DataFrame 139 | channel = pd.DataFrame(full_data['Channel'], columns = ['Channel']) 140 | channel = channel.drop(channel.index[outliers]).reset_index(drop = True) 141 | labeled = pd.concat([reduced_data, channel], axis = 1) 142 | 143 | # Generate the cluster plot 144 | fig, ax = plt.subplots(figsize = (14,8)) 145 | 146 | # Color map 147 | cmap = cm.get_cmap('gist_rainbow') 148 | 149 | # Color the points based on assigned Channel 150 | labels = ['Hotel/Restaurant/Cafe', 'Retailer'] 151 | grouped = labeled.groupby('Channel') 152 | for i, channel in grouped: 153 | channel.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \ 154 | color = cmap((i-1)*1.0/2), label = labels[i-1], s=30); 155 | 156 | # Plot transformed sample points 157 | for i, sample in enumerate(pca_samples): 158 | ax.scatter(x = sample[0], y = sample[1], \ 159 | s = 200, linewidth = 3, color = 'black', marker = 'o', facecolors = 'none'); 160 | ax.scatter(x = sample[0]+0.25, y = sample[1]+0.3, marker='$%d$'%(i), alpha = 1, s=125); 161 | 162 | # Set plot title 163 | ax.set_title("PCA-Reduced Data Labeled by 'Channel'\nTransformed Sample Data Circled"); -------------------------------------------------------------------------------- /P4_Smart_Cab/README.md: -------------------------------------------------------------------------------- 1 | # Machine Learning Engineer Nanodegree 2 | # Reinforcement Learning 3 | ## Project: Train a Smartcab How to Drive 4 | 5 | ### Install 6 | 7 | This project requires **Python 2.7** with the [pygame](https://www.pygame.org/wiki/GettingStarted 8 | ) library installed 9 | 10 | ### Code 11 | 12 | Template code is provided in the `smartcab/agent.py` python file. Additional supporting python code can be found in `smartcab/enviroment.py`, `smartcab/planner.py`, and `smartcab/simulator.py`. Supporting images for the graphical user interface can be found in the `images` folder. While some code has already been implemented to get you started, you will need to implement additional functionality for the `LearningAgent` class in `agent.py` when requested to successfully complete the project. 13 | 14 | ### Run 15 | 16 | In a terminal or command window, navigate to the top-level project directory `smartcab/` (that contains this README) and run one of the following commands: 17 | 18 | ```python smartcab/agent.py``` 19 | ```python -m smartcab.agent``` 20 | 21 | This will run the `agent.py` file and execute your agent code. 22 | -------------------------------------------------------------------------------- /P4_Smart_Cab/images/car-black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/car-black.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/car-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/car-blue.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/car-cyan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/car-cyan.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/car-green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/car-green.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/car-magenta.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/car-magenta.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/car-orange.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/car-orange.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/car-red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/car-red.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/car-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/car-white.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/car-yellow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/car-yellow.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/east-west.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/east-west.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/logo.png -------------------------------------------------------------------------------- /P4_Smart_Cab/images/north-south.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/images/north-south.png -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_default-learning (4 Features, Training 20 rounds, Score FA).csv: -------------------------------------------------------------------------------- 1 | trial,testing,parameters,initial_deadline,final_deadline,net_reward,actions,success,actions-0,actions-1,actions-2,actions-3,actions-4 2 | 1,False,"{'a': 0.5, 'e': 0.95}",25,0,-96.02831145637084,"{0: 13, 1: 4, 2: 7, 3: 1, 4: 0}",0,13,4,7,1,0 3 | 2,False,"{'a': 0.5, 'e': 0.8999999999999999}",20,0,-84.86387491786364,"{0: 13, 1: 0, 2: 6, 3: 0, 4: 1}",0,13,0,6,0,1 4 | 3,False,"{'a': 0.5, 'e': 0.8499999999999999}",20,0,-27.315589642672865,"{0: 14, 1: 3, 2: 3, 3: 0, 4: 0}",0,14,3,3,0,0 5 | 4,False,"{'a': 0.5, 'e': 0.7999999999999998}",20,0,-100.11375802248132,"{0: 15, 1: 0, 2: 2, 3: 1, 4: 2}",0,15,0,2,1,2 6 | 5,False,"{'a': 0.5, 'e': 0.7499999999999998}",20,0,-180.14643131327233,"{0: 13, 1: 1, 2: 1, 3: 1, 4: 4}",0,13,1,1,1,4 7 | 6,False,"{'a': 0.5, 'e': 0.6999999999999997}",30,3,-42.36993788242807,"{0: 20, 1: 1, 2: 5, 3: 1, 4: 0}",1,20,1,5,1,0 8 | 7,False,"{'a': 0.5, 'e': 0.6499999999999997}",25,0,-48.943334937870404,"{0: 20, 1: 2, 2: 2, 3: 0, 4: 1}",0,20,2,2,0,1 9 | 8,False,"{'a': 0.5, 'e': 0.5999999999999996}",35,0,-154.57078761046088,"{0: 23, 1: 1, 2: 7, 3: 3, 4: 1}",0,23,1,7,3,1 10 | 9,False,"{'a': 0.5, 'e': 0.5499999999999996}",30,0,-265.7023047992192,"{0: 20, 1: 1, 2: 2, 3: 1, 4: 6}",0,20,1,2,1,6 11 | 10,False,"{'a': 0.5, 'e': 0.4999999999999996}",30,11,20.656241641563895,"{0: 18, 1: 0, 2: 1, 3: 0, 4: 0}",1,18,0,1,0,0 12 | 11,False,"{'a': 0.5, 'e': 0.4499999999999996}",20,0,-38.52429027938823,"{0: 16, 1: 1, 2: 2, 3: 0, 4: 1}",0,16,1,2,0,1 13 | 12,False,"{'a': 0.5, 'e': 0.39999999999999963}",25,0,-63.19821850369701,"{0: 21, 1: 1, 2: 0, 3: 2, 4: 1}",0,21,1,0,2,1 14 | 13,False,"{'a': 0.5, 'e': 0.34999999999999964}",25,4,-69.07705813048233,"{0: 17, 1: 1, 2: 1, 3: 0, 4: 2}",1,17,1,1,0,2 15 | 14,False,"{'a': 0.5, 'e': 0.29999999999999966}",30,12,-4.412135716435037,"{0: 16, 1: 0, 2: 1, 3: 1, 4: 0}",1,16,0,1,1,0 16 | 15,False,"{'a': 0.5, 'e': 0.24999999999999967}",20,0,-0.38039617057815467,"{0: 17, 1: 2, 2: 1, 3: 0, 4: 0}",0,17,2,1,0,0 17 | 16,False,"{'a': 0.5, 'e': 0.19999999999999968}",20,0,-14.60820376616631,"{0: 16, 1: 1, 2: 3, 3: 0, 4: 0}",0,16,1,3,0,0 18 | 17,False,"{'a': 0.5, 'e': 0.1499999999999997}",25,13,2.991632046765857,"{0: 10, 1: 0, 2: 2, 3: 0, 4: 0}",1,10,0,2,0,0 19 | 18,False,"{'a': 0.5, 'e': 0.09999999999999969}",20,9,7.0560613596587265,"{0: 10, 1: 0, 2: 1, 3: 0, 4: 0}",1,10,0,1,0,0 20 | 19,False,"{'a': 0.5, 'e': 0.049999999999999684}",20,7,-17.597563446151902,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 1}",1,12,0,0,0,1 21 | 20,False,"{'a': 0.5, 'e': -3.191891195797325e-16}",25,15,18.616505038195967,"{0: 10, 1: 0, 2: 0, 3: 0, 4: 0}",1,10,0,0,0,0 22 | 1,True,"{'a': 0.5, 'e': -0.05000000000000032}",20,11,3.3218600328551133,"{0: 8, 1: 0, 2: 1, 3: 0, 4: 0}",1,8,0,1,0,0 23 | 2,True,"{'a': 0.5, 'e': -0.10000000000000032}",20,0,32.819753968048644,"{0: 20, 1: 0, 2: 0, 3: 0, 4: 0}",0,20,0,0,0,0 24 | 3,True,"{'a': 0.5, 'e': -0.15000000000000033}",25,0,40.843157046765604,"{0: 25, 1: 0, 2: 0, 3: 0, 4: 0}",0,25,0,0,0,0 25 | 4,True,"{'a': 0.5, 'e': -0.20000000000000034}",30,0,16.03240594807972,"{0: 28, 1: 0, 2: 2, 3: 0, 4: 0}",0,28,0,2,0,0 26 | 5,True,"{'a': 0.5, 'e': -0.25000000000000033}",20,2,18.861236150835406,"{0: 17, 1: 0, 2: 1, 3: 0, 4: 0}",1,17,0,1,0,0 27 | 6,True,"{'a': 0.5, 'e': -0.3000000000000003}",25,8,29.859436602201413,"{0: 17, 1: 0, 2: 0, 3: 0, 4: 0}",1,17,0,0,0,0 28 | 7,True,"{'a': 0.5, 'e': -0.3500000000000003}",30,16,21.603741975213477,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 29 | 8,True,"{'a': 0.5, 'e': -0.4000000000000003}",30,17,27.398507537539615,"{0: 13, 1: 0, 2: 0, 3: 0, 4: 0}",1,13,0,0,0,0 30 | 9,True,"{'a': 0.5, 'e': -0.4500000000000003}",35,18,33.442087156286846,"{0: 17, 1: 0, 2: 0, 3: 0, 4: 0}",1,17,0,0,0,0 31 | 10,True,"{'a': 0.5, 'e': -0.5000000000000003}",25,14,25.051600645050996,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 32 | -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_default-learning (4 Features, Training 20 rounds, Score FA).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/logs/sim_default-learning (4 Features, Training 20 rounds, Score FA).png -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_default-learning (4 Features, Training 20 rounds, Score FA).txt: -------------------------------------------------------------------------------- 1 | /----------------------------------------- 2 | | State-action rewards from Q-Learning 3 | \----------------------------------------- 4 | 5 | ('red', 'forward', 'right', 'right') 6 | -- forward : -9.10 7 | -- None : 0.00 8 | -- right : 0.00 9 | -- left : 0.00 10 | 11 | ('red', None, 'right', 'right') 12 | -- forward : -40.66 13 | -- None : 2.41 14 | -- right : 0.00 15 | -- left : 0.00 16 | 17 | ('green', None, 'left', 'left') 18 | -- forward : 0.69 19 | -- None : 0.00 20 | -- right : 0.00 21 | -- left : 2.40 22 | 23 | ('green', 'forward', 'left', 'right') 24 | -- forward : 0.00 25 | -- None : -5.64 26 | -- right : 0.00 27 | -- left : 0.00 28 | 29 | ('green', 'right', 'right', 'right') 30 | -- forward : 1.03 31 | -- None : 0.00 32 | -- right : 0.00 33 | -- left : 0.00 34 | 35 | ('red', 'left', 'right', 'right') 36 | -- forward : -9.53 37 | -- None : 0.00 38 | -- right : 0.00 39 | -- left : 0.00 40 | 41 | ('red', None, 'forward', 'left') 42 | -- forward : 0.00 43 | -- None : 2.47 44 | -- right : -20.86 45 | -- left : 0.00 46 | 47 | ('green', 'left', 'left', 'forward') 48 | -- forward : 1.56 49 | -- None : 0.00 50 | -- right : 0.00 51 | -- left : -0.03 52 | 53 | ('red', 'left', None, 'left') 54 | -- forward : -40.83 55 | -- None : 2.53 56 | -- right : -0.29 57 | -- left : -10.67 58 | 59 | ('green', 'left', None, 'forward') 60 | -- forward : 1.68 61 | -- None : 0.63 62 | -- right : -0.49 63 | -- left : 0.44 64 | 65 | ('green', 'forward', None, 'forward') 66 | -- forward : 2.40 67 | -- None : -5.98 68 | -- right : 0.00 69 | -- left : -20.83 70 | 71 | ('green', 'forward', 'left', 'left') 72 | -- forward : 0.35 73 | -- None : 0.00 74 | -- right : 0.62 75 | -- left : 0.00 76 | 77 | ('red', 'left', None, 'right') 78 | -- forward : -10.49 79 | -- None : 1.49 80 | -- right : 1.37 81 | -- left : 0.00 82 | 83 | ('green', 'right', None, 'right') 84 | -- forward : -0.11 85 | -- None : 0.00 86 | -- right : 2.05 87 | -- left : -19.80 88 | 89 | ('red', 'left', 'left', 'forward') 90 | -- forward : -9.92 91 | -- None : 1.83 92 | -- right : 0.00 93 | -- left : 0.00 94 | 95 | ('red', None, None, 'forward') 96 | -- forward : -39.09 97 | -- None : 2.27 98 | -- right : 0.77 99 | -- left : -10.09 100 | 101 | ('red', 'left', 'left', 'left') 102 | -- forward : -10.34 103 | -- None : 2.18 104 | -- right : 0.00 105 | -- left : 0.00 106 | 107 | ('green', 'forward', 'forward', 'forward') 108 | -- forward : 0.00 109 | -- None : 0.00 110 | -- right : 0.33 111 | -- left : 0.00 112 | 113 | ('red', 'left', 'right', 'forward') 114 | -- forward : -9.47 115 | -- None : 2.78 116 | -- right : -0.49 117 | -- left : 0.00 118 | 119 | ('green', None, 'right', 'right') 120 | -- forward : 0.00 121 | -- None : 0.00 122 | -- right : 1.01 123 | -- left : 0.91 124 | 125 | ('green', 'forward', None, 'left') 126 | -- forward : 1.01 127 | -- None : 0.00 128 | -- right : 0.00 129 | -- left : -19.65 130 | 131 | ('green', 'left', 'right', 'forward') 132 | -- forward : 2.27 133 | -- None : 0.00 134 | -- right : 0.00 135 | -- left : 0.00 136 | 137 | ('green', 'right', None, 'forward') 138 | -- forward : 1.75 139 | -- None : 0.00 140 | -- right : 0.00 141 | -- left : 0.00 142 | 143 | ('red', 'right', None, 'forward') 144 | -- forward : -9.54 145 | -- None : 1.52 146 | -- right : 0.00 147 | -- left : 0.00 148 | 149 | ('red', 'left', None, 'forward') 150 | -- forward : -9.44 151 | -- None : 1.92 152 | -- right : 0.10 153 | -- left : -10.87 154 | 155 | ('green', 'forward', 'right', 'forward') 156 | -- forward : 2.91 157 | -- None : -4.21 158 | -- right : 0.00 159 | -- left : -20.69 160 | 161 | ('red', 'forward', 'left', 'right') 162 | -- forward : -9.46 163 | -- None : 2.95 164 | -- right : 0.00 165 | -- left : -39.35 166 | 167 | ('red', 'left', 'left', 'right') 168 | -- forward : 0.00 169 | -- None : 1.42 170 | -- right : 0.00 171 | -- left : 0.00 172 | 173 | ('red', 'forward', None, 'left') 174 | -- forward : -10.21 175 | -- None : 0.00 176 | -- right : 1.36 177 | -- left : 0.00 178 | 179 | ('red', None, 'forward', 'forward') 180 | -- forward : -40.45 181 | -- None : 2.13 182 | -- right : -20.65 183 | -- left : 0.00 184 | 185 | ('green', 'left', None, 'right') 186 | -- forward : 0.44 187 | -- None : 0.36 188 | -- right : 1.94 189 | -- left : 0.35 190 | 191 | ('green', 'right', None, 'left') 192 | -- forward : 0.00 193 | -- None : -5.88 194 | -- right : 0.36 195 | -- left : 0.00 196 | 197 | ('green', 'left', 'forward', 'left') 198 | -- forward : -0.03 199 | -- None : 0.00 200 | -- right : 0.00 201 | -- left : 0.00 202 | 203 | ('green', 'forward', 'left', 'forward') 204 | -- forward : 1.12 205 | -- None : 0.00 206 | -- right : 0.00 207 | -- left : 0.00 208 | 209 | ('red', None, None, 'left') 210 | -- forward : -10.78 211 | -- None : 2.08 212 | -- right : 0.64 213 | -- left : -9.21 214 | 215 | ('red', 'forward', 'right', 'forward') 216 | -- forward : 0.00 217 | -- None : 0.00 218 | -- right : 1.52 219 | -- left : -10.86 220 | 221 | ('red', 'left', 'forward', 'right') 222 | -- forward : -39.54 223 | -- None : 0.00 224 | -- right : 0.00 225 | -- left : 0.00 226 | 227 | ('green', None, None, 'left') 228 | -- forward : -0.19 229 | -- None : -4.06 230 | -- right : 0.02 231 | -- left : 2.55 232 | 233 | ('red', 'forward', 'forward', 'forward') 234 | -- forward : -40.88 235 | -- None : 1.60 236 | -- right : -19.59 237 | -- left : 0.00 238 | 239 | ('green', 'right', 'forward', 'right') 240 | -- forward : 0.00 241 | -- None : 0.00 242 | -- right : 0.00 243 | -- left : -20.82 244 | 245 | ('green', 'left', 'right', 'left') 246 | -- forward : 1.30 247 | -- None : 0.00 248 | -- right : 0.00 249 | -- left : 0.00 250 | 251 | ('red', 'right', None, 'right') 252 | -- forward : 0.00 253 | -- None : 0.00 254 | -- right : 0.84 255 | -- left : 0.00 256 | 257 | ('red', 'right', 'left', 'left') 258 | -- forward : -10.99 259 | -- None : 0.00 260 | -- right : 0.00 261 | -- left : 0.00 262 | 263 | ('green', None, 'left', 'right') 264 | -- forward : -0.36 265 | -- None : -5.86 266 | -- right : 2.51 267 | -- left : 0.00 268 | 269 | ('red', None, 'right', 'forward') 270 | -- forward : -39.51 271 | -- None : 1.78 272 | -- right : 0.00 273 | -- left : 0.00 274 | 275 | ('green', None, 'right', 'left') 276 | -- forward : 0.47 277 | -- None : 0.00 278 | -- right : 0.00 279 | -- left : 0.00 280 | 281 | ('red', 'forward', 'forward', 'right') 282 | -- forward : -39.72 283 | -- None : 2.51 284 | -- right : 0.00 285 | -- left : 0.00 286 | 287 | ('green', 'left', 'left', 'right') 288 | -- forward : 0.00 289 | -- None : 0.00 290 | -- right : 0.00 291 | -- left : 1.51 292 | 293 | ('red', 'forward', None, 'forward') 294 | -- forward : -9.70 295 | -- None : 1.76 296 | -- right : 0.00 297 | -- left : -9.50 298 | 299 | ('red', 'forward', 'left', 'left') 300 | -- forward : -10.81 301 | -- None : 0.00 302 | -- right : 0.00 303 | -- left : 0.00 304 | 305 | ('green', None, None, 'forward') 306 | -- forward : 2.64 307 | -- None : -5.32 308 | -- right : -0.13 309 | -- left : 0.40 310 | 311 | ('red', None, 'left', 'left') 312 | -- forward : -9.06 313 | -- None : 1.40 314 | -- right : 0.49 315 | -- left : -9.26 316 | 317 | ('green', None, 'left', 'forward') 318 | -- forward : 1.41 319 | -- None : 0.00 320 | -- right : 0.00 321 | -- left : -0.28 322 | 323 | ('red', None, 'left', 'forward') 324 | -- forward : -40.38 325 | -- None : 2.20 326 | -- right : -0.07 327 | -- left : 0.00 328 | 329 | ('green', 'left', 'forward', 'forward') 330 | -- forward : 2.71 331 | -- None : 0.11 332 | -- right : 0.00 333 | -- left : 0.23 334 | 335 | ('green', 'left', None, 'left') 336 | -- forward : -0.01 337 | -- None : 0.00 338 | -- right : 1.93 339 | -- left : 0.00 340 | 341 | ('green', 'forward', 'forward', 'left') 342 | -- forward : 1.38 343 | -- None : 0.00 344 | -- right : 0.00 345 | -- left : 0.00 346 | 347 | ('red', None, 'right', 'left') 348 | -- forward : -9.17 349 | -- None : 0.00 350 | -- right : 0.00 351 | -- left : 0.00 352 | 353 | ('red', 'forward', 'left', 'forward') 354 | -- forward : -9.40 355 | -- None : 1.78 356 | -- right : -0.05 357 | -- left : 0.00 358 | 359 | ('red', None, 'forward', 'right') 360 | -- forward : 0.00 361 | -- None : 1.59 362 | -- right : 0.00 363 | -- left : -40.07 364 | 365 | ('red', 'right', 'left', 'forward') 366 | -- forward : -9.04 367 | -- None : 1.80 368 | -- right : 0.00 369 | -- left : 0.00 370 | 371 | ('red', 'right', 'left', 'right') 372 | -- forward : -10.39 373 | -- None : 0.00 374 | -- right : 0.00 375 | -- left : 0.00 376 | 377 | ('red', 'right', 'forward', 'right') 378 | -- forward : -39.30 379 | -- None : 0.00 380 | -- right : 0.00 381 | -- left : 0.00 382 | 383 | ('green', 'left', 'left', 'left') 384 | -- forward : -0.43 385 | -- None : -0.02 386 | -- right : 0.00 387 | -- left : 0.00 388 | 389 | ('red', None, 'left', 'right') 390 | -- forward : -9.36 391 | -- None : 2.36 392 | -- right : 0.00 393 | -- left : 0.00 394 | 395 | ('red', 'right', None, 'left') 396 | -- forward : 0.00 397 | -- None : 1.31 398 | -- right : -0.15 399 | -- left : 0.00 400 | 401 | ('green', None, 'forward', 'left') 402 | -- forward : 0.00 403 | -- None : 0.00 404 | -- right : 0.33 405 | -- left : 0.52 406 | 407 | ('green', 'right', 'left', 'forward') 408 | -- forward : 1.94 409 | -- None : 0.00 410 | -- right : 0.00 411 | -- left : 0.00 412 | 413 | ('green', None, 'forward', 'right') 414 | -- forward : -0.57 415 | -- None : -4.01 416 | -- right : 1.33 417 | -- left : -0.06 418 | 419 | ('red', None, None, 'right') 420 | -- forward : -40.38 421 | -- None : 0.00 422 | -- right : 1.26 423 | -- left : 0.00 424 | 425 | ('green', 'left', 'forward', 'right') 426 | -- forward : 0.00 427 | -- None : -0.45 428 | -- right : 0.00 429 | -- left : 0.00 430 | 431 | ('green', None, None, 'right') 432 | -- forward : 0.47 433 | -- None : -5.49 434 | -- right : 1.64 435 | -- left : -0.11 436 | 437 | ('red', 'left', 'forward', 'left') 438 | -- forward : -39.12 439 | -- None : 1.89 440 | -- right : 0.00 441 | -- left : 0.00 442 | 443 | ('red', 'forward', None, 'right') 444 | -- forward : -10.30 445 | -- None : 1.76 446 | -- right : 1.57 447 | -- left : -9.27 448 | 449 | ('green', 'forward', None, 'right') 450 | -- forward : 1.17 451 | -- None : 0.00 452 | -- right : 1.62 453 | -- left : 0.00 454 | 455 | ('green', None, 'forward', 'forward') 456 | -- forward : 0.00 457 | -- None : -5.57 458 | -- right : 0.02 459 | -- left : -0.32 460 | 461 | ('green', None, 'right', 'forward') 462 | -- forward : 2.58 463 | -- None : 0.00 464 | -- right : 0.00 465 | -- left : 0.00 466 | 467 | ('red', 'right', 'right', 'right') 468 | -- forward : 0.00 469 | -- None : 1.35 470 | -- right : 0.00 471 | -- left : 0.00 472 | 473 | ('red', 'left', 'forward', 'forward') 474 | -- forward : 0.00 475 | -- None : 2.09 476 | -- right : 0.00 477 | -- left : -39.37 478 | 479 | ('red', 'forward', 'right', 'left') 480 | -- forward : -10.39 481 | -- None : 0.00 482 | -- right : 0.00 483 | -- left : 0.00 484 | 485 | ('red', 'left', 'right', 'left') 486 | -- forward : -9.96 487 | -- None : 1.68 488 | -- right : 0.00 489 | -- left : -10.02 490 | 491 | ('red', 'forward', 'forward', 'left') 492 | -- forward : -39.16 493 | -- None : 1.24 494 | -- right : 0.00 495 | -- left : 0.00 496 | 497 | -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_default-learning (5 Features, Training 20 rounds, Score FB).csv: -------------------------------------------------------------------------------- 1 | trial,testing,parameters,initial_deadline,final_deadline,net_reward,actions,success,actions-0,actions-1,actions-2,actions-3,actions-4 2 | 1,False,"{'a': 0.5, 'e': 0.95}",25,0,-79.03032914667898,"{0: 15, 1: 4, 2: 5, 3: 1, 4: 0}",0,15,4,5,1,0 3 | 2,False,"{'a': 0.5, 'e': 0.8999999999999999}",30,0,-179.61194697992235,"{0: 19, 1: 2, 2: 3, 3: 4, 4: 2}",0,19,2,3,4,2 4 | 3,False,"{'a': 0.5, 'e': 0.8499999999999999}",35,0,-113.03668243363182,"{0: 23, 1: 5, 2: 5, 3: 1, 4: 1}",0,23,5,5,1,1 5 | 4,False,"{'a': 0.5, 'e': 0.7999999999999998}",20,0,-112.15765478648744,"{0: 9, 1: 3, 2: 7, 3: 0, 4: 1}",0,9,3,7,0,1 6 | 5,False,"{'a': 0.5, 'e': 0.7499999999999998}",30,0,-212.51685487831114,"{0: 21, 1: 1, 2: 2, 3: 1, 4: 5}",0,21,1,2,1,5 7 | 6,False,"{'a': 0.5, 'e': 0.6999999999999997}",25,8,-91.09833483747305,"{0: 10, 1: 1, 2: 4, 3: 1, 4: 1}",1,10,1,4,1,1 8 | 7,False,"{'a': 0.5, 'e': 0.6499999999999997}",25,0,-123.86543964601438,"{0: 15, 1: 3, 2: 5, 3: 0, 4: 2}",0,15,3,5,0,2 9 | 8,False,"{'a': 0.5, 'e': 0.5999999999999996}",30,19,0.3111623179157077,"{0: 9, 1: 1, 2: 1, 3: 0, 4: 0}",1,9,1,1,0,0 10 | 9,False,"{'a': 0.5, 'e': 0.5499999999999996}",20,0,-124.50108551635076,"{0: 13, 1: 1, 2: 3, 3: 1, 4: 2}",0,13,1,3,1,2 11 | 10,False,"{'a': 0.5, 'e': 0.4999999999999996}",20,6,-18.61285438564991,"{0: 10, 1: 1, 2: 3, 3: 0, 4: 0}",1,10,1,3,0,0 12 | 11,False,"{'a': 0.5, 'e': 0.4499999999999996}",20,0,-83.35654430440017,"{0: 14, 1: 0, 2: 4, 3: 1, 4: 1}",0,14,0,4,1,1 13 | 12,False,"{'a': 0.5, 'e': 0.39999999999999963}",25,0,-143.6048944525399,"{0: 15, 1: 2, 2: 5, 3: 1, 4: 2}",0,15,2,5,1,2 14 | 13,False,"{'a': 0.5, 'e': 0.34999999999999964}",25,0,-62.875659741343725,"{0: 18, 1: 2, 2: 4, 3: 0, 4: 1}",0,18,2,4,0,1 15 | 14,False,"{'a': 0.5, 'e': 0.29999999999999966}",25,0,-41.05550168087808,"{0: 22, 1: 0, 2: 1, 3: 1, 4: 1}",0,22,0,1,1,1 16 | 15,False,"{'a': 0.5, 'e': 0.24999999999999967}",20,0,-32.614776736367766,"{0: 18, 1: 0, 2: 0, 3: 1, 4: 1}",0,18,0,0,1,1 17 | 16,False,"{'a': 0.5, 'e': 0.19999999999999968}",20,0,-66.31485123382097,"{0: 15, 1: 0, 2: 4, 3: 0, 4: 1}",0,15,0,4,0,1 18 | 17,False,"{'a': 0.5, 'e': 0.1499999999999997}",30,12,-6.612095523948431,"{0: 17, 1: 0, 2: 0, 3: 0, 4: 1}",1,17,0,0,0,1 19 | 18,False,"{'a': 0.5, 'e': 0.09999999999999969}",25,0,-28.385217967029458,"{0: 22, 1: 0, 2: 2, 3: 0, 4: 1}",0,22,0,2,0,1 20 | 19,False,"{'a': 0.5, 'e': 0.049999999999999684}",20,7,-17.66855900046226,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 1}",1,12,0,0,0,1 21 | 20,False,"{'a': 0.5, 'e': -3.191891195797325e-16}",20,0,-5.905057468516429,"{0: 17, 1: 0, 2: 3, 3: 0, 4: 0}",0,17,0,3,0,0 22 | 1,True,"{'a': 0.0, 'e': 0.0}",20,9,16.2740309613349,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 23 | 2,True,"{'a': 0.0, 'e': 0.0}",20,7,25.404623779771395,"{0: 13, 1: 0, 2: 0, 3: 0, 4: 0}",1,13,0,0,0,0 24 | 3,True,"{'a': 0.0, 'e': 0.0}",20,13,12.793533139196752,"{0: 7, 1: 0, 2: 0, 3: 0, 4: 0}",1,7,0,0,0,0 25 | 4,True,"{'a': 0.0, 'e': 0.0}",35,25,0.33081049595258216,"{0: 9, 1: 0, 2: 1, 3: 0, 4: 0}",1,9,0,1,0,0 26 | 5,True,"{'a': 0.0, 'e': 0.0}",20,14,-31.93572458367295,"{0: 5, 1: 0, 2: 0, 3: 0, 4: 1}",1,5,0,0,0,1 27 | 6,True,"{'a': 0.0, 'e': 0.0}",20,0,-73.37927715292714,"{0: 16, 1: 0, 2: 2, 3: 0, 4: 2}",0,16,0,2,0,2 28 | 7,True,"{'a': 0.0, 'e': 0.0}",20,9,4.923049373568943,"{0: 10, 1: 0, 2: 1, 3: 0, 4: 0}",1,10,0,1,0,0 29 | 8,True,"{'a': 0.0, 'e': 0.0}",25,3,-53.03535921048065,"{0: 19, 1: 0, 2: 1, 3: 0, 4: 2}",1,19,0,1,0,2 30 | 9,True,"{'a': 0.0, 'e': 0.0}",20,0,-23.77054075849565,"{0: 18, 1: 0, 2: 1, 3: 0, 4: 1}",0,18,0,1,0,1 31 | 10,True,"{'a': 0.0, 'e': 0.0}",20,4,29.814396009524017,"{0: 16, 1: 0, 2: 0, 3: 0, 4: 0}",1,16,0,0,0,0 32 | -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_default-learning (5 Features, Training 20 rounds, Score FB).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/logs/sim_default-learning (5 Features, Training 20 rounds, Score FB).png -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_default-learning (5 Features, Training 20 rounds, Score FD).csv: -------------------------------------------------------------------------------- 1 | trial,testing,parameters,initial_deadline,final_deadline,net_reward,actions,success,actions-0,actions-1,actions-2,actions-3,actions-4 2 | 1,False,"{'a': 0.5, 'e': 0.95}",20,0,-68.50053127395111,"{0: 14, 1: 1, 2: 4, 3: 0, 4: 1}",0,14,1,4,0,1 3 | 2,False,"{'a': 0.5, 'e': 0.8999999999999999}",20,0,-79.81693900019653,"{0: 16, 1: 1, 2: 1, 3: 0, 4: 2}",0,16,1,1,0,2 4 | 3,False,"{'a': 0.5, 'e': 0.8499999999999999}",30,0,-172.71375949444146,"{0: 19, 1: 2, 2: 6, 3: 0, 4: 3}",0,19,2,6,0,3 5 | 4,False,"{'a': 0.5, 'e': 0.7999999999999998}",20,13,-3.363630317737735,"{0: 6, 1: 0, 2: 1, 3: 0, 4: 0}",1,6,0,1,0,0 6 | 5,False,"{'a': 0.5, 'e': 0.7499999999999998}",25,0,-227.82390890787778,"{0: 13, 1: 1, 2: 6, 3: 1, 4: 4}",1,13,1,6,1,4 7 | 6,False,"{'a': 0.5, 'e': 0.6999999999999997}",20,0,-90.62217575334049,"{0: 12, 1: 0, 2: 7, 3: 0, 4: 1}",0,12,0,7,0,1 8 | 7,False,"{'a': 0.5, 'e': 0.6499999999999997}",30,0,-215.70441776010833,"{0: 19, 1: 1, 2: 5, 3: 1, 4: 4}",0,19,1,5,1,4 9 | 8,False,"{'a': 0.5, 'e': 0.5999999999999996}",20,0,-62.238648532897564,"{0: 15, 1: 0, 2: 4, 3: 0, 4: 1}",0,15,0,4,0,1 10 | 9,False,"{'a': 0.5, 'e': 0.5499999999999996}",25,0,-101.20121839998677,"{0: 18, 1: 1, 2: 4, 3: 0, 4: 2}",0,18,1,4,0,2 11 | 10,False,"{'a': 0.5, 'e': 0.4999999999999996}",20,0,-75.41001907573968,"{0: 15, 1: 0, 2: 3, 3: 1, 4: 1}",0,15,0,3,1,1 12 | 11,False,"{'a': 0.5, 'e': 0.4499999999999996}",20,0,-58.65733065621367,"{0: 15, 1: 1, 2: 3, 3: 0, 4: 1}",0,15,1,3,0,1 13 | 12,False,"{'a': 0.5, 'e': 0.39999999999999963}",25,0,-68.52968987302668,"{0: 22, 1: 0, 2: 0, 3: 1, 4: 2}",0,22,0,0,1,2 14 | 13,False,"{'a': 0.5, 'e': 0.34999999999999964}",35,0,-74.9417497176702,"{0: 27, 1: 2, 2: 4, 3: 1, 4: 1}",0,27,2,4,1,1 15 | 14,False,"{'a': 0.5, 'e': 0.29999999999999966}",20,7,-3.12856256536083,"{0: 11, 1: 0, 2: 2, 3: 0, 4: 0}",1,11,0,2,0,0 16 | 15,False,"{'a': 0.5, 'e': 0.24999999999999967}",20,0,-163.90322247381636,"{0: 11, 1: 1, 2: 4, 3: 1, 4: 3}",1,11,1,4,1,3 17 | 16,False,"{'a': 0.5, 'e': 0.19999999999999968}",20,0,-102.12166779503306,"{0: 14, 1: 0, 2: 4, 3: 0, 4: 2}",0,14,0,4,0,2 18 | 17,False,"{'a': 0.5, 'e': 0.1499999999999997}",25,0,-74.73943891378552,"{0: 21, 1: 0, 2: 2, 3: 0, 4: 2}",0,21,0,2,0,2 19 | 18,False,"{'a': 0.5, 'e': 0.09999999999999969}",25,16,-26.251459098248354,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 1}",1,8,0,0,0,1 20 | 19,False,"{'a': 0.5, 'e': 0.049999999999999684}",20,6,27.062670991760744,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 21 | 20,False,"{'a': 0.5, 'e': -3.191891195797325e-16}",20,12,-50.55814597893853,"{0: 5, 1: 0, 2: 2, 3: 0, 4: 1}",1,5,0,2,0,1 22 | 1,True,"{'a': 0.0, 'e': 0.0}",20,0,-83.64446800738175,"{0: 16, 1: 0, 2: 2, 3: 0, 4: 2}",0,16,0,2,0,2 23 | 2,True,"{'a': 0.0, 'e': 0.0}",20,12,5.1096318373863046,"{0: 7, 1: 0, 2: 1, 3: 0, 4: 0}",1,7,0,1,0,0 24 | 3,True,"{'a': 0.0, 'e': 0.0}",30,12,16.89651557553828,"{0: 17, 1: 0, 2: 1, 3: 0, 4: 0}",1,17,0,1,0,0 25 | 4,True,"{'a': 0.0, 'e': 0.0}",35,10,-70.52009436372944,"{0: 21, 1: 0, 2: 2, 3: 0, 4: 2}",1,21,0,2,0,2 26 | 5,True,"{'a': 0.0, 'e': 0.0}",20,0,-35.52132549390551,"{0: 17, 1: 0, 2: 2, 3: 0, 4: 1}",0,17,0,2,0,1 27 | 6,True,"{'a': 0.0, 'e': 0.0}",30,19,-36.05758717485023,"{0: 9, 1: 0, 2: 1, 3: 0, 4: 1}",1,9,0,1,0,1 28 | 7,True,"{'a': 0.0, 'e': 0.0}",25,3,-15.365992199206618,"{0: 20, 1: 0, 2: 1, 3: 0, 4: 1}",1,20,0,1,0,1 29 | 8,True,"{'a': 0.0, 'e': 0.0}",25,8,-62.156587812942234,"{0: 14, 1: 0, 2: 1, 3: 0, 4: 2}",1,14,0,1,0,2 30 | 9,True,"{'a': 0.0, 'e': 0.0}",25,0,-44.76171765109381,"{0: 21, 1: 0, 2: 3, 3: 0, 4: 1}",0,21,0,3,0,1 31 | 10,True,"{'a': 0.0, 'e': 0.0}",20,0,-23.300449708174384,"{0: 18, 1: 0, 2: 1, 3: 0, 4: 1}",0,18,0,1,0,1 32 | -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_default-learning (5 Features, Training 20 rounds, Score FD).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/logs/sim_default-learning (5 Features, Training 20 rounds, Score FD).png -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_improved-learning - Use decay_exponential (4 Features, 300 rounds, 310 combinations, Score A+A+).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/logs/sim_improved-learning - Use decay_exponential (4 Features, 300 rounds, 310 combinations, Score A+A+).png -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_improved-learning - Use decay_exponential (4 Features, 300 rounds, 310 combinations, Score A+A+).txt: -------------------------------------------------------------------------------- 1 | /----------------------------------------- 2 | | State-action rewards from Q-Learning 3 | \----------------------------------------- 4 | 5 | ('green', 'forward', None, 'left') 6 | -- forward : 0.20 7 | -- None : -5.61 8 | -- right : 0.08 9 | -- left : -20.92 10 | 11 | ('green', 'forward', 'left', 'right') 12 | -- forward : 1.53 13 | -- None : 0.00 14 | -- right : 1.16 15 | -- left : -20.38 16 | 17 | ('red', None, 'forward', 'right') 18 | -- forward : -39.41 19 | -- None : 2.07 20 | -- right : -20.71 21 | -- left : -40.47 22 | 23 | ('green', 'forward', 'left', 'left') 24 | -- forward : 1.56 25 | -- None : 0.00 26 | -- right : 0.00 27 | -- left : -19.25 28 | 29 | ('red', None, None, 'forward') 30 | -- forward : -10.90 31 | -- None : 2.32 32 | -- right : 0.15 33 | -- left : -10.09 34 | 35 | ('green', 'left', 'right', 'forward') 36 | -- forward : 1.46 37 | -- None : 0.00 38 | -- right : 0.31 39 | -- left : 0.84 40 | 41 | ('red', 'left', 'right', 'forward') 42 | -- forward : -9.15 43 | -- None : 2.90 44 | -- right : 0.63 45 | -- left : -9.11 46 | 47 | ('red', 'left', 'right', 'right') 48 | -- forward : -10.58 49 | -- None : 2.81 50 | -- right : 0.98 51 | -- left : 0.00 52 | 53 | ('green', 'right', 'left', 'forward') 54 | -- forward : 2.81 55 | -- None : -5.62 56 | -- right : 0.00 57 | -- left : -19.87 58 | 59 | ('red', None, 'forward', 'forward') 60 | -- forward : -40.11 61 | -- None : 1.91 62 | -- right : -20.42 63 | -- left : -40.34 64 | 65 | ('green', 'forward', 'right', 'forward') 66 | -- forward : 2.02 67 | -- None : 0.00 68 | -- right : 0.00 69 | -- left : -20.63 70 | 71 | ('green', None, None, 'left') 72 | -- forward : 0.02 73 | -- None : -5.76 74 | -- right : 0.58 75 | -- left : 1.93 76 | 77 | ('green', 'left', 'right', 'left') 78 | -- forward : 0.00 79 | -- None : 0.41 80 | -- right : -0.29 81 | -- left : 1.93 82 | 83 | ('red', 'right', None, 'right') 84 | -- forward : -40.64 85 | -- None : 1.91 86 | -- right : 0.63 87 | -- left : -39.03 88 | 89 | ('red', None, 'right', 'forward') 90 | -- forward : -39.64 91 | -- None : 2.62 92 | -- right : -0.08 93 | -- left : -10.48 94 | 95 | ('green', 'right', 'forward', 'forward') 96 | -- forward : 2.83 97 | -- None : -5.08 98 | -- right : -0.35 99 | -- left : -19.57 100 | 101 | ('green', None, 'left', 'forward') 102 | -- forward : 2.46 103 | -- None : -5.79 104 | -- right : 0.29 105 | -- left : 0.79 106 | 107 | ('red', 'right', 'forward', 'forward') 108 | -- forward : -39.83 109 | -- None : 1.61 110 | -- right : -19.92 111 | -- left : -39.84 112 | 113 | ('green', 'forward', 'right', 'right') 114 | -- forward : 1.09 115 | -- None : 0.00 116 | -- right : 1.70 117 | -- left : 0.00 118 | 119 | ('green', 'right', 'forward', 'left') 120 | -- forward : 1.44 121 | -- None : -5.63 122 | -- right : 0.00 123 | -- left : -20.39 124 | 125 | ('red', None, 'right', 'left') 126 | -- forward : -10.75 127 | -- None : 0.45 128 | -- right : 0.39 129 | -- left : 0.00 130 | 131 | ('green', 'left', 'forward', 'right') 132 | -- forward : 0.31 133 | -- None : 0.10 134 | -- right : 0.00 135 | -- left : 0.00 136 | 137 | ('green', 'forward', 'forward', 'forward') 138 | -- forward : 2.12 139 | -- None : 0.00 140 | -- right : 0.00 141 | -- left : -19.40 142 | 143 | ('green', 'forward', None, 'right') 144 | -- forward : 0.07 145 | -- None : -5.40 146 | -- right : 1.29 147 | -- left : -19.95 148 | 149 | ('green', None, 'forward', 'forward') 150 | -- forward : 1.69 151 | -- None : -5.28 152 | -- right : 0.61 153 | -- left : 0.43 154 | 155 | ('red', 'forward', 'right', 'right') 156 | -- forward : -10.36 157 | -- None : 2.10 158 | -- right : 1.18 159 | -- left : -9.26 160 | 161 | ('red', 'left', 'forward', 'right') 162 | -- forward : -39.17 163 | -- None : 1.49 164 | -- right : -20.52 165 | -- left : -40.39 166 | 167 | ('green', 'left', 'left', 'forward') 168 | -- forward : 1.79 169 | -- None : 0.21 170 | -- right : 0.21 171 | -- left : 0.64 172 | 173 | ('red', 'left', None, 'left') 174 | -- forward : -10.50 175 | -- None : 2.54 176 | -- right : -0.12 177 | -- left : -10.32 178 | 179 | ('green', None, 'forward', 'right') 180 | -- forward : 0.59 181 | -- None : -4.30 182 | -- right : 1.09 183 | -- left : 0.19 184 | 185 | ('green', 'forward', None, 'forward') 186 | -- forward : 2.10 187 | -- None : -5.02 188 | -- right : 1.07 189 | -- left : -20.38 190 | 191 | ('green', None, 'right', 'right') 192 | -- forward : 0.40 193 | -- None : -5.28 194 | -- right : 2.38 195 | -- left : 0.12 196 | 197 | ('red', 'right', 'right', 'forward') 198 | -- forward : -40.07 199 | -- None : 1.32 200 | -- right : 0.00 201 | -- left : 0.00 202 | 203 | ('green', 'right', None, 'forward') 204 | -- forward : 1.52 205 | -- None : -4.06 206 | -- right : 0.00 207 | -- left : -20.79 208 | 209 | ('red', 'left', None, 'forward') 210 | -- forward : -10.89 211 | -- None : 1.08 212 | -- right : 0.05 213 | -- left : -10.01 214 | 215 | ('red', 'forward', None, 'left') 216 | -- forward : -9.06 217 | -- None : 1.28 218 | -- right : 0.28 219 | -- left : -10.82 220 | 221 | ('green', 'left', 'forward', 'left') 222 | -- forward : 0.28 223 | -- None : 0.63 224 | -- right : 0.52 225 | -- left : 1.10 226 | 227 | ('red', 'left', 'left', 'left') 228 | -- forward : -9.54 229 | -- None : 2.05 230 | -- right : 0.33 231 | -- left : -10.14 232 | 233 | ('green', 'right', 'right', 'left') 234 | -- forward : -0.09 235 | -- None : -4.15 236 | -- right : 0.45 237 | -- left : 0.00 238 | 239 | ('green', 'right', 'forward', 'right') 240 | -- forward : -0.15 241 | -- None : -4.87 242 | -- right : 0.35 243 | -- left : 0.00 244 | 245 | ('red', None, 'right', 'right') 246 | -- forward : -9.07 247 | -- None : 2.20 248 | -- right : 1.14 249 | -- left : -10.77 250 | 251 | ('red', 'right', None, 'forward') 252 | -- forward : -41.00 253 | -- None : 1.37 254 | -- right : 0.26 255 | -- left : -39.12 256 | 257 | ('red', 'forward', None, 'forward') 258 | -- forward : -10.52 259 | -- None : 2.48 260 | -- right : 0.70 261 | -- left : -9.55 262 | 263 | ('red', 'forward', 'left', 'left') 264 | -- forward : -39.19 265 | -- None : 1.96 266 | -- right : 0.00 267 | -- left : -9.35 268 | 269 | ('red', 'right', 'right', 'left') 270 | -- forward : -9.29 271 | -- None : 0.00 272 | -- right : 0.19 273 | -- left : 0.00 274 | 275 | ('green', 'left', 'right', 'right') 276 | -- forward : 0.00 277 | -- None : 0.00 278 | -- right : 1.90 279 | -- left : 0.00 280 | 281 | ('red', 'right', 'right', 'right') 282 | -- forward : -9.23 283 | -- None : 2.20 284 | -- right : 0.99 285 | -- left : -40.49 286 | 287 | ('red', 'forward', 'right', 'left') 288 | -- forward : -10.74 289 | -- None : 1.87 290 | -- right : 0.00 291 | -- left : -10.66 292 | 293 | ('red', 'left', 'forward', 'forward') 294 | -- forward : 0.00 295 | -- None : 1.38 296 | -- right : -20.59 297 | -- left : -40.16 298 | 299 | ('red', None, 'forward', 'left') 300 | -- forward : -40.76 301 | -- None : 2.29 302 | -- right : -20.93 303 | -- left : -40.31 304 | 305 | ('green', 'right', None, 'right') 306 | -- forward : -0.05 307 | -- None : -4.24 308 | -- right : 2.41 309 | -- left : -19.48 310 | 311 | ('green', None, 'right', 'forward') 312 | -- forward : 1.92 313 | -- None : -5.86 314 | -- right : 0.51 315 | -- left : 0.21 316 | 317 | ('green', 'right', 'left', 'right') 318 | -- forward : 0.42 319 | -- None : 0.00 320 | -- right : 0.00 321 | -- left : -20.77 322 | 323 | ('green', None, 'left', 'right') 324 | -- forward : 0.27 325 | -- None : -5.87 326 | -- right : 1.31 327 | -- left : -0.38 328 | 329 | ('green', None, 'right', 'left') 330 | -- forward : 0.24 331 | -- None : -5.14 332 | -- right : 0.10 333 | -- left : 0.76 334 | 335 | ('red', 'forward', 'forward', 'forward') 336 | -- forward : -39.45 337 | -- None : 0.85 338 | -- right : -20.76 339 | -- left : -40.17 340 | 341 | ('green', 'right', 'right', 'forward') 342 | -- forward : 2.83 343 | -- None : 0.00 344 | -- right : 0.00 345 | -- left : -20.92 346 | 347 | ('red', None, 'left', 'left') 348 | -- forward : -10.70 349 | -- None : 1.74 350 | -- right : 0.33 351 | -- left : -40.19 352 | 353 | ('green', 'left', 'forward', 'forward') 354 | -- forward : 1.30 355 | -- None : 0.09 356 | -- right : 0.23 357 | -- left : 0.00 358 | 359 | ('green', 'left', None, 'left') 360 | -- forward : -0.34 361 | -- None : 1.00 362 | -- right : 0.74 363 | -- left : 2.49 364 | 365 | ('green', 'forward', 'forward', 'left') 366 | -- forward : -0.54 367 | -- None : -5.66 368 | -- right : 0.55 369 | -- left : 0.00 370 | 371 | ('red', 'left', 'left', 'forward') 372 | -- forward : -10.80 373 | -- None : 1.08 374 | -- right : 0.00 375 | -- left : -9.21 376 | 377 | ('red', None, None, 'left') 378 | -- forward : -10.79 379 | -- None : 1.79 380 | -- right : 0.02 381 | -- left : -40.95 382 | 383 | ('green', None, 'forward', 'left') 384 | -- forward : 0.84 385 | -- None : -4.31 386 | -- right : -0.08 387 | -- left : 2.66 388 | 389 | ('red', 'right', 'forward', 'left') 390 | -- forward : -39.31 391 | -- None : 1.09 392 | -- right : 0.00 393 | -- left : -40.38 394 | 395 | ('red', None, None, 'right') 396 | -- forward : -10.32 397 | -- None : 1.73 398 | -- right : 0.59 399 | -- left : -9.29 400 | 401 | ('red', 'forward', None, 'right') 402 | -- forward : -10.13 403 | -- None : 1.00 404 | -- right : 1.92 405 | -- left : -9.32 406 | 407 | ('green', 'left', None, 'right') 408 | -- forward : 0.25 409 | -- None : 0.01 410 | -- right : 1.88 411 | -- left : 0.30 412 | 413 | ('green', 'left', 'left', 'left') 414 | -- forward : 0.35 415 | -- None : 0.81 416 | -- right : 0.55 417 | -- left : 1.35 418 | 419 | ('green', None, 'left', 'left') 420 | -- forward : 0.68 421 | -- None : -4.03 422 | -- right : 0.20 423 | -- left : 1.99 424 | 425 | ('green', 'left', None, 'forward') 426 | -- forward : 0.80 427 | -- None : 0.07 428 | -- right : 0.09 429 | -- left : 0.12 430 | 431 | ('red', 'left', None, 'right') 432 | -- forward : -9.04 433 | -- None : 1.18 434 | -- right : 0.47 435 | -- left : -9.07 436 | 437 | ('red', 'forward', 'left', 'right') 438 | -- forward : -10.28 439 | -- None : 1.07 440 | -- right : 2.55 441 | -- left : -10.21 442 | 443 | ('red', 'left', 'left', 'right') 444 | -- forward : 0.00 445 | -- None : 0.00 446 | -- right : 2.61 447 | -- left : -10.72 448 | 449 | ('green', 'left', 'left', 'right') 450 | -- forward : 2.00 451 | -- None : 0.00 452 | -- right : 0.29 453 | -- left : -0.05 454 | 455 | ('green', 'right', None, 'left') 456 | -- forward : -0.29 457 | -- None : -4.13 458 | -- right : 1.99 459 | -- left : -19.69 460 | 461 | ('green', 'forward', 'left', 'forward') 462 | -- forward : 1.19 463 | -- None : 0.00 464 | -- right : 0.00 465 | -- left : -19.08 466 | 467 | ('red', 'forward', 'right', 'forward') 468 | -- forward : -9.60 469 | -- None : 2.56 470 | -- right : 0.00 471 | -- left : -10.22 472 | 473 | ('green', 'forward', 'right', 'left') 474 | -- forward : 1.34 475 | -- None : -5.06 476 | -- right : 0.78 477 | -- left : -19.01 478 | 479 | ('red', 'forward', 'forward', 'right') 480 | -- forward : -40.44 481 | -- None : 1.31 482 | -- right : -19.12 483 | -- left : -40.93 484 | 485 | ('green', 'forward', 'forward', 'right') 486 | -- forward : 0.56 487 | -- None : 0.00 488 | -- right : 1.80 489 | -- left : 0.00 490 | 491 | ('green', None, None, 'forward') 492 | -- forward : 1.44 493 | -- None : -4.99 494 | -- right : 0.59 495 | -- left : 0.24 496 | 497 | ('red', None, 'left', 'right') 498 | -- forward : -39.77 499 | -- None : 0.52 500 | -- right : 2.31 501 | -- left : -10.30 502 | 503 | ('red', 'forward', 'left', 'forward') 504 | -- forward : -9.99 505 | -- None : 2.42 506 | -- right : -0.03 507 | -- left : -39.96 508 | 509 | ('red', 'right', 'left', 'forward') 510 | -- forward : -9.90 511 | -- None : 1.27 512 | -- right : 0.00 513 | -- left : -39.44 514 | 515 | ('red', 'right', 'left', 'right') 516 | -- forward : 0.00 517 | -- None : 1.22 518 | -- right : 1.90 519 | -- left : 0.00 520 | 521 | ('red', 'right', None, 'left') 522 | -- forward : -39.49 523 | -- None : 2.83 524 | -- right : 0.55 525 | -- left : -39.50 526 | 527 | ('red', 'right', 'forward', 'right') 528 | -- forward : -39.24 529 | -- None : 0.32 530 | -- right : -20.64 531 | -- left : -39.78 532 | 533 | ('red', None, 'left', 'forward') 534 | -- forward : -10.18 535 | -- None : 2.54 536 | -- right : -0.10 537 | -- left : -9.43 538 | 539 | ('red', 'right', 'left', 'left') 540 | -- forward : -10.77 541 | -- None : 1.31 542 | -- right : 0.00 543 | -- left : 0.00 544 | 545 | ('green', None, None, 'right') 546 | -- forward : 0.93 547 | -- None : -5.22 548 | -- right : 1.22 549 | -- left : 0.64 550 | 551 | ('red', 'left', 'forward', 'left') 552 | -- forward : -40.68 553 | -- None : 1.12 554 | -- right : -20.83 555 | -- left : -39.37 556 | 557 | ('green', 'right', 'left', 'left') 558 | -- forward : -0.22 559 | -- None : -4.33 560 | -- right : 0.00 561 | -- left : -19.30 562 | 563 | ('red', 'left', 'right', 'left') 564 | -- forward : -10.25 565 | -- None : 1.99 566 | -- right : 1.10 567 | -- left : -10.53 568 | 569 | ('red', 'forward', 'forward', 'left') 570 | -- forward : -40.10 571 | -- None : 1.49 572 | -- right : 0.00 573 | -- left : -40.31 574 | 575 | -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_improved-learning - Use decay_exponential (5 Features, 600 rounds, 1000 combinations, Score A+A).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/logs/sim_improved-learning - Use decay_exponential (5 Features, 600 rounds, 1000 combinations, Score A+A).png -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_improved-learning - Use decay_exponential (5 Features, 600 rounds, 1000 combinations, Score A+A+).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/logs/sim_improved-learning - Use decay_exponential (5 Features, 600 rounds, 1000 combinations, Score A+A+).png -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_improved-learning - Use decay_step (4 Features, 300 rounds, 370 combinations, Score A+A+).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/logs/sim_improved-learning - Use decay_step (4 Features, 300 rounds, 370 combinations, Score A+A+).png -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_improved-learning - Use decay_step (4 Features, 300 rounds, 370 combinations, Score A+A+).txt: -------------------------------------------------------------------------------- 1 | /----------------------------------------- 2 | | State-action rewards from Q-Learning 3 | \----------------------------------------- 4 | 5 | ('green', 'forward', None, 'left') 6 | -- forward : -0.05 7 | -- None : -5.49 8 | -- right : 1.19 9 | -- left : -19.82 10 | 11 | ('green', 'forward', 'left', 'right') 12 | -- forward : 0.04 13 | -- None : -4.71 14 | -- right : 0.74 15 | -- left : -19.51 16 | 17 | ('red', None, 'forward', 'right') 18 | -- forward : -40.45 19 | -- None : 2.01 20 | -- right : -20.02 21 | -- left : -40.09 22 | 23 | ('green', 'forward', 'left', 'left') 24 | -- forward : 0.77 25 | -- None : -4.63 26 | -- right : 0.47 27 | -- left : -19.05 28 | 29 | ('red', None, None, 'forward') 30 | -- forward : -13.39 31 | -- None : 1.76 32 | -- right : 0.60 33 | -- left : -9.93 34 | 35 | ('green', 'left', 'right', 'forward') 36 | -- forward : 1.34 37 | -- None : 0.39 38 | -- right : 0.24 39 | -- left : 0.85 40 | 41 | ('red', 'left', 'right', 'forward') 42 | -- forward : -13.72 43 | -- None : 1.01 44 | -- right : 0.88 45 | -- left : -17.78 46 | 47 | ('red', 'left', 'left', 'left') 48 | -- forward : -10.26 49 | -- None : 1.33 50 | -- right : 1.16 51 | -- left : -9.80 52 | 53 | ('green', 'forward', 'right', 'right') 54 | -- forward : 0.05 55 | -- None : -3.29 56 | -- right : 1.46 57 | -- left : -9.82 58 | 59 | ('red', None, 'forward', 'forward') 60 | -- forward : -39.85 61 | -- None : 2.19 62 | -- right : -19.92 63 | -- left : -40.05 64 | 65 | ('green', None, None, 'left') 66 | -- forward : 1.33 67 | -- None : -5.16 68 | -- right : 0.89 69 | -- left : 1.66 70 | 71 | ('green', 'left', 'right', 'left') 72 | -- forward : 1.18 73 | -- None : -0.51 74 | -- right : 0.69 75 | -- left : 2.22 76 | 77 | ('red', 'right', None, 'right') 78 | -- forward : -9.71 79 | -- None : 0.83 80 | -- right : 0.80 81 | -- left : -39.59 82 | 83 | ('red', None, 'right', 'forward') 84 | -- forward : -25.36 85 | -- None : 0.78 86 | -- right : 0.94 87 | -- left : -11.17 88 | 89 | ('green', 'right', 'forward', 'forward') 90 | -- forward : 1.15 91 | -- None : -5.25 92 | -- right : 0.74 93 | -- left : -18.30 94 | 95 | ('green', None, 'left', 'forward') 96 | -- forward : 1.60 97 | -- None : -5.36 98 | -- right : 1.16 99 | -- left : 0.68 100 | 101 | ('red', 'right', 'forward', 'forward') 102 | -- forward : -39.28 103 | -- None : 2.12 104 | -- right : 0.00 105 | -- left : -34.91 106 | 107 | ('green', 'right', 'left', 'forward') 108 | -- forward : 1.19 109 | -- None : -5.08 110 | -- right : 1.14 111 | -- left : -9.61 112 | 113 | ('green', 'right', 'forward', 'left') 114 | -- forward : 0.54 115 | -- None : -3.61 116 | -- right : 0.43 117 | -- left : -14.71 118 | 119 | ('red', None, 'right', 'left') 120 | -- forward : -17.89 121 | -- None : 1.07 122 | -- right : 0.73 123 | -- left : -17.71 124 | 125 | ('green', 'left', 'forward', 'right') 126 | -- forward : 0.09 127 | -- None : 0.59 128 | -- right : 1.78 129 | -- left : 1.26 130 | 131 | ('green', 'right', 'right', 'right') 132 | -- forward : 0.21 133 | -- None : -3.68 134 | -- right : 0.00 135 | -- left : -15.60 136 | 137 | ('green', 'forward', 'forward', 'forward') 138 | -- forward : 2.00 139 | -- None : 0.00 140 | -- right : -0.03 141 | -- left : -18.27 142 | 143 | ('green', 'forward', None, 'right') 144 | -- forward : 0.55 145 | -- None : -5.36 146 | -- right : 1.05 147 | -- left : -20.01 148 | 149 | ('green', None, 'forward', 'forward') 150 | -- forward : 2.25 151 | -- None : -4.88 152 | -- right : 0.51 153 | -- left : 0.49 154 | 155 | ('red', 'forward', 'right', 'right') 156 | -- forward : -22.23 157 | -- None : 1.66 158 | -- right : 1.49 159 | -- left : -16.61 160 | 161 | ('red', 'left', 'forward', 'right') 162 | -- forward : -39.75 163 | -- None : 2.12 164 | -- right : -18.59 165 | -- left : -39.85 166 | 167 | ('green', 'left', 'left', 'forward') 168 | -- forward : 1.54 169 | -- None : 0.83 170 | -- right : 0.90 171 | -- left : 0.74 172 | 173 | ('red', 'left', None, 'left') 174 | -- forward : -10.08 175 | -- None : 2.55 176 | -- right : 0.38 177 | -- left : -12.75 178 | 179 | ('red', 'right', 'forward', 'left') 180 | -- forward : -29.53 181 | -- None : 1.28 182 | -- right : -15.64 183 | -- left : -30.43 184 | 185 | ('green', 'forward', None, 'forward') 186 | -- forward : 1.64 187 | -- None : -5.63 188 | -- right : 0.52 189 | -- left : -19.74 190 | 191 | ('green', None, 'left', 'left') 192 | -- forward : 0.34 193 | -- None : -5.57 194 | -- right : 0.45 195 | -- left : 1.79 196 | 197 | ('red', 'right', 'right', 'forward') 198 | -- forward : -4.69 199 | -- None : 0.55 200 | -- right : 0.48 201 | -- left : -20.18 202 | 203 | ('green', 'right', None, 'forward') 204 | -- forward : 0.69 205 | -- None : -5.09 206 | -- right : -0.08 207 | -- left : -20.18 208 | 209 | ('red', 'left', None, 'forward') 210 | -- forward : -10.72 211 | -- None : 1.44 212 | -- right : 0.93 213 | -- left : -10.03 214 | 215 | ('red', 'forward', None, 'left') 216 | -- forward : -10.33 217 | -- None : 1.63 218 | -- right : 1.09 219 | -- left : -28.66 220 | 221 | ('green', 'left', 'forward', 'left') 222 | -- forward : 0.72 223 | -- None : 0.67 224 | -- right : 1.12 225 | -- left : 0.46 226 | 227 | ('red', 'left', 'right', 'right') 228 | -- forward : -9.90 229 | -- None : 0.80 230 | -- right : 1.74 231 | -- left : -9.27 232 | 233 | ('green', 'right', 'right', 'left') 234 | -- forward : 0.00 235 | -- None : -2.85 236 | -- right : 0.44 237 | -- left : 0.00 238 | 239 | ('green', 'right', 'forward', 'right') 240 | -- forward : 0.89 241 | -- None : -3.98 242 | -- right : 1.05 243 | -- left : -14.82 244 | 245 | ('red', None, 'right', 'right') 246 | -- forward : -24.98 247 | -- None : 1.02 248 | -- right : 1.96 249 | -- left : -9.76 250 | 251 | ('red', 'right', None, 'forward') 252 | -- forward : -13.80 253 | -- None : 2.40 254 | -- right : 0.58 255 | -- left : -39.60 256 | 257 | ('red', 'forward', None, 'forward') 258 | -- forward : -17.93 259 | -- None : 0.95 260 | -- right : 0.55 261 | -- left : -10.21 262 | 263 | ('red', 'forward', 'left', 'left') 264 | -- forward : -6.80 265 | -- None : 1.30 266 | -- right : 0.28 267 | -- left : -29.08 268 | 269 | ('red', 'right', 'right', 'left') 270 | -- forward : -17.19 271 | -- None : 1.55 272 | -- right : 0.05 273 | -- left : -29.72 274 | 275 | ('green', 'left', 'right', 'right') 276 | -- forward : 0.80 277 | -- None : 0.89 278 | -- right : 1.98 279 | -- left : 0.64 280 | 281 | ('red', 'right', 'right', 'right') 282 | -- forward : -15.07 283 | -- None : 0.00 284 | -- right : 1.21 285 | -- left : -30.04 286 | 287 | ('green', 'right', 'left', 'right') 288 | -- forward : 0.73 289 | -- None : -2.28 290 | -- right : 1.47 291 | -- left : -15.15 292 | 293 | ('red', 'left', 'forward', 'forward') 294 | -- forward : -39.02 295 | -- None : 1.84 296 | -- right : -19.96 297 | -- left : -39.57 298 | 299 | ('red', None, 'forward', 'left') 300 | -- forward : -39.83 301 | -- None : 1.27 302 | -- right : -20.27 303 | -- left : -39.68 304 | 305 | ('green', 'right', None, 'right') 306 | -- forward : 0.75 307 | -- None : -5.16 308 | -- right : 1.24 309 | -- left : -20.51 310 | 311 | ('green', None, 'right', 'forward') 312 | -- forward : 1.66 313 | -- None : -4.59 314 | -- right : 1.29 315 | -- left : 0.87 316 | 317 | ('green', 'forward', 'right', 'forward') 318 | -- forward : 1.38 319 | -- None : -4.08 320 | -- right : 0.94 321 | -- left : -17.03 322 | 323 | ('green', None, 'left', 'right') 324 | -- forward : 0.68 325 | -- None : -5.23 326 | -- right : 1.79 327 | -- left : 0.74 328 | 329 | ('green', None, 'right', 'left') 330 | -- forward : 0.57 331 | -- None : -5.10 332 | -- right : 0.53 333 | -- left : 1.77 334 | 335 | ('red', 'forward', 'forward', 'forward') 336 | -- forward : -39.06 337 | -- None : 1.79 338 | -- right : -19.91 339 | -- left : -30.20 340 | 341 | ('green', 'right', 'right', 'forward') 342 | -- forward : 0.00 343 | -- None : -2.63 344 | -- right : 0.68 345 | -- left : 0.00 346 | 347 | ('red', None, 'left', 'left') 348 | -- forward : -17.30 349 | -- None : 2.01 350 | -- right : 0.92 351 | -- left : -12.12 352 | 353 | ('green', 'left', 'forward', 'forward') 354 | -- forward : 2.21 355 | -- None : 0.24 356 | -- right : 0.68 357 | -- left : 1.11 358 | 359 | ('green', 'left', None, 'left') 360 | -- forward : 0.70 361 | -- None : 1.04 362 | -- right : 0.57 363 | -- left : 1.79 364 | 365 | ('green', 'forward', 'forward', 'left') 366 | -- forward : 0.67 367 | -- None : -5.43 368 | -- right : 0.58 369 | -- left : -15.54 370 | 371 | ('red', 'left', 'left', 'forward') 372 | -- forward : -9.49 373 | -- None : 1.44 374 | -- right : 0.42 375 | -- left : -13.58 376 | 377 | ('red', None, None, 'left') 378 | -- forward : -10.75 379 | -- None : 1.71 380 | -- right : 0.40 381 | -- left : -10.15 382 | 383 | ('green', None, 'forward', 'left') 384 | -- forward : 0.71 385 | -- None : -5.10 386 | -- right : 1.33 387 | -- left : 2.12 388 | 389 | ('green', None, 'forward', 'right') 390 | -- forward : 0.63 391 | -- None : -5.36 392 | -- right : 2.46 393 | -- left : 0.23 394 | 395 | ('red', None, None, 'right') 396 | -- forward : -10.25 397 | -- None : 1.59 398 | -- right : 2.12 399 | -- left : -11.88 400 | 401 | ('red', 'forward', None, 'right') 402 | -- forward : -13.57 403 | -- None : 1.79 404 | -- right : 2.43 405 | -- left : -14.41 406 | 407 | ('green', 'left', None, 'right') 408 | -- forward : 0.95 409 | -- None : 1.55 410 | -- right : 1.93 411 | -- left : 0.87 412 | 413 | ('green', 'left', 'left', 'left') 414 | -- forward : 0.39 415 | -- None : 0.44 416 | -- right : 0.38 417 | -- left : 1.54 418 | 419 | ('green', None, 'right', 'right') 420 | -- forward : 1.21 421 | -- None : -5.22 422 | -- right : 1.48 423 | -- left : 0.96 424 | 425 | ('green', 'left', None, 'forward') 426 | -- forward : 2.12 427 | -- None : 0.76 428 | -- right : 1.39 429 | -- left : 0.40 430 | 431 | ('red', 'left', None, 'right') 432 | -- forward : -11.79 433 | -- None : 1.58 434 | -- right : 1.77 435 | -- left : -10.06 436 | 437 | ('red', 'forward', 'right', 'left') 438 | -- forward : -8.72 439 | -- None : 1.68 440 | -- right : 0.56 441 | -- left : -26.26 442 | 443 | ('red', 'forward', 'left', 'right') 444 | -- forward : -8.92 445 | -- None : 1.60 446 | -- right : 1.73 447 | -- left : -5.31 448 | 449 | ('red', 'left', 'left', 'right') 450 | -- forward : -14.26 451 | -- None : 2.26 452 | -- right : 1.83 453 | -- left : -9.62 454 | 455 | ('green', 'left', 'left', 'right') 456 | -- forward : 0.60 457 | -- None : 0.15 458 | -- right : 1.62 459 | -- left : 0.41 460 | 461 | ('green', 'right', None, 'left') 462 | -- forward : 0.92 463 | -- None : -4.99 464 | -- right : 0.29 465 | -- left : -20.30 466 | 467 | ('green', 'forward', 'left', 'forward') 468 | -- forward : 1.48 469 | -- None : -3.60 470 | -- right : 1.28 471 | -- left : -17.45 472 | 473 | ('red', 'forward', 'right', 'forward') 474 | -- forward : -17.21 475 | -- None : 0.95 476 | -- right : 0.71 477 | -- left : -9.09 478 | 479 | ('green', 'forward', 'right', 'left') 480 | -- forward : 1.19 481 | -- None : -4.63 482 | -- right : 1.06 483 | -- left : -14.83 484 | 485 | ('red', 'forward', 'forward', 'right') 486 | -- forward : -39.87 487 | -- None : 2.38 488 | -- right : -18.63 489 | -- left : -39.51 490 | 491 | ('green', 'forward', 'forward', 'right') 492 | -- forward : 1.00 493 | -- None : -4.85 494 | -- right : 1.71 495 | -- left : -19.42 496 | 497 | ('green', None, None, 'forward') 498 | -- forward : 1.80 499 | -- None : -5.00 500 | -- right : 1.05 501 | -- left : 0.20 502 | 503 | ('red', None, 'left', 'right') 504 | -- forward : -10.70 505 | -- None : 1.61 506 | -- right : 1.56 507 | -- left : -18.00 508 | 509 | ('red', 'forward', 'left', 'forward') 510 | -- forward : -17.23 511 | -- None : 2.09 512 | -- right : 0.51 513 | -- left : -9.26 514 | 515 | ('red', 'right', 'left', 'forward') 516 | -- forward : -14.87 517 | -- None : 1.27 518 | -- right : 0.33 519 | -- left : -34.78 520 | 521 | ('red', 'right', 'left', 'right') 522 | -- forward : -23.89 523 | -- None : 1.47 524 | -- right : 1.44 525 | -- left : -37.70 526 | 527 | ('red', 'right', None, 'left') 528 | -- forward : -10.51 529 | -- None : 2.19 530 | -- right : 0.32 531 | -- left : -40.14 532 | 533 | ('red', 'right', 'forward', 'right') 534 | -- forward : -37.62 535 | -- None : 1.87 536 | -- right : -9.81 537 | -- left : -19.52 538 | 539 | ('red', None, 'left', 'forward') 540 | -- forward : -10.11 541 | -- None : 1.73 542 | -- right : 0.58 543 | -- left : -10.79 544 | 545 | ('red', 'right', 'left', 'left') 546 | -- forward : -23.47 547 | -- None : 1.94 548 | -- right : 0.45 549 | -- left : -20.08 550 | 551 | ('green', None, None, 'right') 552 | -- forward : 0.58 553 | -- None : -4.35 554 | -- right : 1.65 555 | -- left : 0.51 556 | 557 | ('red', 'left', 'forward', 'left') 558 | -- forward : -39.65 559 | -- None : 2.34 560 | -- right : -19.64 561 | -- left : -29.91 562 | 563 | ('green', 'right', 'left', 'left') 564 | -- forward : -0.18 565 | -- None : -4.81 566 | -- right : 0.20 567 | -- left : -16.99 568 | 569 | ('red', 'left', 'right', 'left') 570 | -- forward : -9.73 571 | -- None : 1.28 572 | -- right : 0.83 573 | -- left : -9.19 574 | 575 | ('red', 'forward', 'forward', 'left') 576 | -- forward : -39.16 577 | -- None : 1.90 578 | -- right : -18.90 579 | -- left : -40.04 580 | 581 | -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_improved-learning - Use decay_step (5 Features, 600 rounds, 1260 combinations, Score A+A+).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/logs/sim_improved-learning - Use decay_step (5 Features, 600 rounds, 1260 combinations, Score A+A+).png -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_no-learning - Hard_coded (No Training, Score A+A+).csv: -------------------------------------------------------------------------------- 1 | trial,testing,parameters,initial_deadline,final_deadline,net_reward,actions,success,actions-0,actions-1,actions-2,actions-3,actions-4 2 | 1,False,"{'a': 0.5, 'e': 1.0}",30,12,33.06758841260718,"{0: 18, 1: 0, 2: 0, 3: 0, 4: 0}",1,18,0,0,0,0 3 | 2,False,"{'a': 0.5, 'e': 1.0}",30,15,28.82242178315615,"{0: 15, 1: 0, 2: 0, 3: 0, 4: 0}",1,15,0,0,0,0 4 | 3,False,"{'a': 0.5, 'e': 1.0}",35,14,35.797476554997594,"{0: 21, 1: 0, 2: 0, 3: 0, 4: 0}",1,21,0,0,0,0 5 | 4,False,"{'a': 0.5, 'e': 1.0}",20,11,18.342712827742112,"{0: 9, 1: 0, 2: 0, 3: 0, 4: 0}",1,9,0,0,0,0 6 | 5,False,"{'a': 0.5, 'e': 1.0}",25,19,13.384489245576548,"{0: 6, 1: 0, 2: 0, 3: 0, 4: 0}",1,6,0,0,0,0 7 | 6,False,"{'a': 0.5, 'e': 1.0}",30,13,29.51826180137332,"{0: 17, 1: 0, 2: 0, 3: 0, 4: 0}",1,17,0,0,0,0 8 | 7,False,"{'a': 0.5, 'e': 1.0}",20,11,16.69571533359769,"{0: 9, 1: 0, 2: 0, 3: 0, 4: 0}",1,9,0,0,0,0 9 | 8,False,"{'a': 0.5, 'e': 1.0}",35,16,33.243401492367795,"{0: 19, 1: 0, 2: 0, 3: 0, 4: 0}",1,19,0,0,0,0 10 | 9,False,"{'a': 0.5, 'e': 1.0}",25,10,28.473616722294928,"{0: 15, 1: 0, 2: 0, 3: 0, 4: 0}",1,15,0,0,0,0 11 | 10,False,"{'a': 0.5, 'e': 1.0}",25,19,12.852170444047147,"{0: 6, 1: 0, 2: 0, 3: 0, 4: 0}",1,6,0,0,0,0 12 | 11,False,"{'a': 0.5, 'e': 1.0}",30,16,27.682872080545884,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 13 | 12,False,"{'a': 0.5, 'e': 1.0}",40,25,32.86175194939426,"{0: 15, 1: 0, 2: 0, 3: 0, 4: 0}",1,15,0,0,0,0 14 | 13,False,"{'a': 0.5, 'e': 1.0}",35,8,49.07333906357646,"{0: 27, 1: 0, 2: 0, 3: 0, 4: 0}",1,27,0,0,0,0 15 | 14,False,"{'a': 0.5, 'e': 1.0}",30,19,25.426442168011256,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 16 | 15,False,"{'a': 0.5, 'e': 1.0}",40,23,32.63825988758093,"{0: 17, 1: 0, 2: 0, 3: 0, 4: 0}",1,17,0,0,0,0 17 | 16,False,"{'a': 0.5, 'e': 1.0}",25,19,10.773034999904011,"{0: 6, 1: 0, 2: 0, 3: 0, 4: 0}",1,6,0,0,0,0 18 | 17,False,"{'a': 0.5, 'e': 1.0}",40,23,31.549410312019795,"{0: 17, 1: 0, 2: 0, 3: 0, 4: 0}",1,17,0,0,0,0 19 | 18,False,"{'a': 0.5, 'e': 1.0}",20,12,18.211642107785092,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 0}",1,8,0,0,0,0 20 | 19,False,"{'a': 0.5, 'e': 1.0}",25,7,28.73473028685455,"{0: 18, 1: 0, 2: 0, 3: 0, 4: 0}",1,18,0,0,0,0 21 | 20,False,"{'a': 0.5, 'e': 1.0}",30,9,39.77737531346862,"{0: 21, 1: 0, 2: 0, 3: 0, 4: 0}",1,21,0,0,0,0 22 | 1,True,"{'a': 0.5, 'e': 1.0}",25,15,20.571941335719337,"{0: 10, 1: 0, 2: 0, 3: 0, 4: 0}",1,10,0,0,0,0 23 | 2,True,"{'a': 0.5, 'e': 1.0}",40,20,38.59241742576325,"{0: 20, 1: 0, 2: 0, 3: 0, 4: 0}",1,20,0,0,0,0 24 | 3,True,"{'a': 0.5, 'e': 1.0}",25,15,18.581482818732667,"{0: 10, 1: 0, 2: 0, 3: 0, 4: 0}",1,10,0,0,0,0 25 | 4,True,"{'a': 0.5, 'e': 1.0}",20,14,10.662222641784936,"{0: 6, 1: 0, 2: 0, 3: 0, 4: 0}",1,6,0,0,0,0 26 | 5,True,"{'a': 0.5, 'e': 1.0}",20,11,19.888586368874243,"{0: 9, 1: 0, 2: 0, 3: 0, 4: 0}",1,9,0,0,0,0 27 | 6,True,"{'a': 0.5, 'e': 1.0}",25,14,19.841445498137695,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 28 | 7,True,"{'a': 0.5, 'e': 1.0}",35,13,43.9329844688073,"{0: 22, 1: 0, 2: 0, 3: 0, 4: 0}",1,22,0,0,0,0 29 | 8,True,"{'a': 0.5, 'e': 1.0}",20,13,15.483412859712745,"{0: 7, 1: 0, 2: 0, 3: 0, 4: 0}",1,7,0,0,0,0 30 | 9,True,"{'a': 0.5, 'e': 1.0}",20,12,14.496636412980038,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 0}",1,8,0,0,0,0 31 | 10,True,"{'a': 0.5, 'e': 1.0}",20,16,8.377899003666988,"{0: 4, 1: 0, 2: 0, 3: 0, 4: 0}",1,4,0,0,0,0 32 | 11,True,"{'a': 0.5, 'e': 1.0}",35,26,14.848437819541239,"{0: 9, 1: 0, 2: 0, 3: 0, 4: 0}",1,9,0,0,0,0 33 | 12,True,"{'a': 0.5, 'e': 1.0}",30,18,18.726406136853264,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 34 | 13,True,"{'a': 0.5, 'e': 1.0}",35,18,32.30188096093217,"{0: 17, 1: 0, 2: 0, 3: 0, 4: 0}",1,17,0,0,0,0 35 | 14,True,"{'a': 0.5, 'e': 1.0}",40,22,32.49153249801247,"{0: 18, 1: 0, 2: 0, 3: 0, 4: 0}",1,18,0,0,0,0 36 | 15,True,"{'a': 0.5, 'e': 1.0}",20,8,25.849924696183372,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 37 | 16,True,"{'a': 0.5, 'e': 1.0}",30,12,32.91204537503226,"{0: 18, 1: 0, 2: 0, 3: 0, 4: 0}",1,18,0,0,0,0 38 | 17,True,"{'a': 0.5, 'e': 1.0}",35,21,25.49636251505765,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 39 | 18,True,"{'a': 0.5, 'e': 1.0}",25,14,23.325520538404042,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 40 | 19,True,"{'a': 0.5, 'e': 1.0}",40,18,35.30399984685637,"{0: 22, 1: 0, 2: 0, 3: 0, 4: 0}",1,22,0,0,0,0 41 | 20,True,"{'a': 0.5, 'e': 1.0}",20,4,30.927030801625573,"{0: 16, 1: 0, 2: 0, 3: 0, 4: 0}",1,16,0,0,0,0 42 | 21,True,"{'a': 0.5, 'e': 1.0}",40,14,43.61962579352871,"{0: 26, 1: 0, 2: 0, 3: 0, 4: 0}",1,26,0,0,0,0 43 | 22,True,"{'a': 0.5, 'e': 1.0}",20,12,13.074089240276436,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 0}",1,8,0,0,0,0 44 | 23,True,"{'a': 0.5, 'e': 1.0}",30,15,26.883590071729447,"{0: 15, 1: 0, 2: 0, 3: 0, 4: 0}",1,15,0,0,0,0 45 | 24,True,"{'a': 0.5, 'e': 1.0}",30,10,35.04048202149212,"{0: 20, 1: 0, 2: 0, 3: 0, 4: 0}",1,20,0,0,0,0 46 | 25,True,"{'a': 0.5, 'e': 1.0}",30,11,39.57745699212265,"{0: 19, 1: 0, 2: 0, 3: 0, 4: 0}",1,19,0,0,0,0 47 | 26,True,"{'a': 0.5, 'e': 1.0}",35,22,23.875272811238855,"{0: 13, 1: 0, 2: 0, 3: 0, 4: 0}",1,13,0,0,0,0 48 | 27,True,"{'a': 0.5, 'e': 1.0}",30,15,29.935316503236205,"{0: 15, 1: 0, 2: 0, 3: 0, 4: 0}",1,15,0,0,0,0 49 | 28,True,"{'a': 0.5, 'e': 1.0}",20,13,13.474301495081695,"{0: 7, 1: 0, 2: 0, 3: 0, 4: 0}",1,7,0,0,0,0 50 | 29,True,"{'a': 0.5, 'e': 1.0}",20,13,15.436409891271136,"{0: 7, 1: 0, 2: 0, 3: 0, 4: 0}",1,7,0,0,0,0 51 | 30,True,"{'a': 0.5, 'e': 1.0}",20,16,6.781421527763628,"{0: 4, 1: 0, 2: 0, 3: 0, 4: 0}",1,4,0,0,0,0 52 | 31,True,"{'a': 0.5, 'e': 1.0}",30,16,25.874668309855537,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 53 | 32,True,"{'a': 0.5, 'e': 1.0}",35,24,22.125050560710534,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 54 | 33,True,"{'a': 0.5, 'e': 1.0}",20,12,15.664630572040444,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 0}",1,8,0,0,0,0 55 | 34,True,"{'a': 0.5, 'e': 1.0}",20,2,30.60534839927367,"{0: 18, 1: 0, 2: 0, 3: 0, 4: 0}",1,18,0,0,0,0 56 | 35,True,"{'a': 0.5, 'e': 1.0}",20,6,27.178371241437656,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 57 | 36,True,"{'a': 0.5, 'e': 1.0}",35,23,22.046052155103737,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 58 | 37,True,"{'a': 0.5, 'e': 1.0}",35,21,25.393428012693978,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 59 | 38,True,"{'a': 0.5, 'e': 1.0}",25,15,19.969523620834295,"{0: 10, 1: 0, 2: 0, 3: 0, 4: 0}",1,10,0,0,0,0 60 | 39,True,"{'a': 0.5, 'e': 1.0}",20,8,20.08618591757202,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 61 | 40,True,"{'a': 0.5, 'e': 1.0}",30,16,27.351258395986772,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 62 | 41,True,"{'a': 0.5, 'e': 1.0}",25,17,16.386420456468937,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 0}",1,8,0,0,0,0 63 | 42,True,"{'a': 0.5, 'e': 1.0}",20,6,23.104984236247223,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 64 | 43,True,"{'a': 0.5, 'e': 1.0}",25,16,18.419123134561318,"{0: 9, 1: 0, 2: 0, 3: 0, 4: 0}",1,9,0,0,0,0 65 | 44,True,"{'a': 0.5, 'e': 1.0}",30,16,30.480150860181674,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 66 | 45,True,"{'a': 0.5, 'e': 1.0}",25,16,16.060063248944125,"{0: 9, 1: 0, 2: 0, 3: 0, 4: 0}",1,9,0,0,0,0 67 | 46,True,"{'a': 0.5, 'e': 1.0}",20,8,23.718803133972944,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 68 | 47,True,"{'a': 0.5, 'e': 1.0}",20,7,23.17722733101669,"{0: 13, 1: 0, 2: 0, 3: 0, 4: 0}",1,13,0,0,0,0 69 | 48,True,"{'a': 0.5, 'e': 1.0}",25,12,23.099903881943742,"{0: 13, 1: 0, 2: 0, 3: 0, 4: 0}",1,13,0,0,0,0 70 | 49,True,"{'a': 0.5, 'e': 1.0}",20,12,18.678924773412273,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 0}",1,8,0,0,0,0 71 | 50,True,"{'a': 0.5, 'e': 1.0}",30,12,31.269530860514863,"{0: 18, 1: 0, 2: 0, 3: 0, 4: 0}",1,18,0,0,0,0 72 | 51,True,"{'a': 0.5, 'e': 1.0}",25,16,20.365140923624438,"{0: 9, 1: 0, 2: 0, 3: 0, 4: 0}",1,9,0,0,0,0 73 | 52,True,"{'a': 0.5, 'e': 1.0}",25,15,15.872453466399037,"{0: 10, 1: 0, 2: 0, 3: 0, 4: 0}",1,10,0,0,0,0 74 | 53,True,"{'a': 0.5, 'e': 1.0}",25,12,23.521080734544014,"{0: 13, 1: 0, 2: 0, 3: 0, 4: 0}",1,13,0,0,0,0 75 | 54,True,"{'a': 0.5, 'e': 1.0}",30,19,24.946499333325015,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 76 | 55,True,"{'a': 0.5, 'e': 1.0}",20,6,25.536202170378935,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 77 | 56,True,"{'a': 0.5, 'e': 1.0}",30,16,24.663112137265774,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 78 | 57,True,"{'a': 0.5, 'e': 1.0}",40,25,27.11468785693544,"{0: 15, 1: 0, 2: 0, 3: 0, 4: 0}",1,15,0,0,0,0 79 | 58,True,"{'a': 0.5, 'e': 1.0}",25,18,14.467861105855654,"{0: 7, 1: 0, 2: 0, 3: 0, 4: 0}",1,7,0,0,0,0 80 | 59,True,"{'a': 0.5, 'e': 1.0}",30,13,28.812281592731814,"{0: 17, 1: 0, 2: 0, 3: 0, 4: 0}",1,17,0,0,0,0 81 | 60,True,"{'a': 0.5, 'e': 1.0}",20,11,16.118420735776674,"{0: 9, 1: 0, 2: 0, 3: 0, 4: 0}",1,9,0,0,0,0 82 | 61,True,"{'a': 0.5, 'e': 1.0}",20,6,29.013109314903122,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 83 | 62,True,"{'a': 0.5, 'e': 1.0}",30,16,27.84274240364565,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 84 | 63,True,"{'a': 0.5, 'e': 1.0}",40,21,36.18893674107318,"{0: 19, 1: 0, 2: 0, 3: 0, 4: 0}",1,19,0,0,0,0 85 | 64,True,"{'a': 0.5, 'e': 1.0}",30,22,13.653481666024817,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 0}",1,8,0,0,0,0 86 | 65,True,"{'a': 0.5, 'e': 1.0}",30,19,22.224177621352197,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 87 | 66,True,"{'a': 0.5, 'e': 1.0}",20,5,26.96952536202997,"{0: 15, 1: 0, 2: 0, 3: 0, 4: 0}",1,15,0,0,0,0 88 | 67,True,"{'a': 0.5, 'e': 1.0}",25,17,14.014417903958124,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 0}",1,8,0,0,0,0 89 | 68,True,"{'a': 0.5, 'e': 1.0}",25,17,18.54674391187043,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 0}",1,8,0,0,0,0 90 | 69,True,"{'a': 0.5, 'e': 1.0}",35,8,51.072643461837714,"{0: 27, 1: 0, 2: 0, 3: 0, 4: 0}",1,27,0,0,0,0 91 | 70,True,"{'a': 0.5, 'e': 1.0}",20,6,26.474395715369706,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 92 | 71,True,"{'a': 0.5, 'e': 1.0}",20,12,14.25841875629205,"{0: 8, 1: 0, 2: 0, 3: 0, 4: 0}",1,8,0,0,0,0 93 | 72,True,"{'a': 0.5, 'e': 1.0}",40,28,24.168887344554562,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 94 | 73,True,"{'a': 0.5, 'e': 1.0}",35,13,44.80624866153382,"{0: 22, 1: 0, 2: 0, 3: 0, 4: 0}",1,22,0,0,0,0 95 | 74,True,"{'a': 0.5, 'e': 1.0}",20,2,28.719541372970134,"{0: 18, 1: 0, 2: 0, 3: 0, 4: 0}",1,18,0,0,0,0 96 | 75,True,"{'a': 0.5, 'e': 1.0}",25,16,16.04786964892488,"{0: 9, 1: 0, 2: 0, 3: 0, 4: 0}",1,9,0,0,0,0 97 | 76,True,"{'a': 0.5, 'e': 1.0}",20,14,12.303663575976763,"{0: 6, 1: 0, 2: 0, 3: 0, 4: 0}",1,6,0,0,0,0 98 | 77,True,"{'a': 0.5, 'e': 1.0}",20,14,14.99590290591593,"{0: 6, 1: 0, 2: 0, 3: 0, 4: 0}",1,6,0,0,0,0 99 | 78,True,"{'a': 0.5, 'e': 1.0}",30,19,22.506382023542276,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 100 | 79,True,"{'a': 0.5, 'e': 1.0}",25,18,14.67322968809553,"{0: 7, 1: 0, 2: 0, 3: 0, 4: 0}",1,7,0,0,0,0 101 | 80,True,"{'a': 0.5, 'e': 1.0}",35,13,38.83013039255238,"{0: 22, 1: 0, 2: 0, 3: 0, 4: 0}",1,22,0,0,0,0 102 | 81,True,"{'a': 0.5, 'e': 1.0}",20,16,7.922413464558931,"{0: 4, 1: 0, 2: 0, 3: 0, 4: 0}",1,4,0,0,0,0 103 | 82,True,"{'a': 0.5, 'e': 1.0}",20,9,18.55270402113851,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 104 | 83,True,"{'a': 0.5, 'e': 1.0}",25,13,23.535765327470415,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 105 | 84,True,"{'a': 0.5, 'e': 1.0}",40,29,18.128690527387942,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 106 | 85,True,"{'a': 0.5, 'e': 1.0}",25,10,27.80817819919861,"{0: 15, 1: 0, 2: 0, 3: 0, 4: 0}",1,15,0,0,0,0 107 | 86,True,"{'a': 0.5, 'e': 1.0}",25,15,18.39383521134338,"{0: 10, 1: 0, 2: 0, 3: 0, 4: 0}",1,10,0,0,0,0 108 | 87,True,"{'a': 0.5, 'e': 1.0}",20,14,13.24864866738165,"{0: 6, 1: 0, 2: 0, 3: 0, 4: 0}",1,6,0,0,0,0 109 | 88,True,"{'a': 0.5, 'e': 1.0}",45,25,38.431258055516835,"{0: 20, 1: 0, 2: 0, 3: 0, 4: 0}",1,20,0,0,0,0 110 | 89,True,"{'a': 0.5, 'e': 1.0}",30,18,21.244749583256997,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 111 | 90,True,"{'a': 0.5, 'e': 1.0}",35,17,31.663363388857924,"{0: 18, 1: 0, 2: 0, 3: 0, 4: 0}",1,18,0,0,0,0 112 | 91,True,"{'a': 0.5, 'e': 1.0}",30,19,21.228221790733958,"{0: 11, 1: 0, 2: 0, 3: 0, 4: 0}",1,11,0,0,0,0 113 | 92,True,"{'a': 0.5, 'e': 1.0}",30,18,24.087972114640397,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 114 | 93,True,"{'a': 0.5, 'e': 1.0}",20,6,27.787690774021012,"{0: 14, 1: 0, 2: 0, 3: 0, 4: 0}",1,14,0,0,0,0 115 | 94,True,"{'a': 0.5, 'e': 1.0}",40,14,52.56735045993944,"{0: 26, 1: 0, 2: 0, 3: 0, 4: 0}",1,26,0,0,0,0 116 | 95,True,"{'a': 0.5, 'e': 1.0}",20,10,17.19431131489479,"{0: 10, 1: 0, 2: 0, 3: 0, 4: 0}",1,10,0,0,0,0 117 | 96,True,"{'a': 0.5, 'e': 1.0}",30,12,33.1290638986342,"{0: 18, 1: 0, 2: 0, 3: 0, 4: 0}",1,18,0,0,0,0 118 | 97,True,"{'a': 0.5, 'e': 1.0}",30,17,23.284130570663347,"{0: 13, 1: 0, 2: 0, 3: 0, 4: 0}",1,13,0,0,0,0 119 | 98,True,"{'a': 0.5, 'e': 1.0}",30,18,19.752987008617037,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 120 | 99,True,"{'a': 0.5, 'e': 1.0}",30,18,24.91989490987775,"{0: 12, 1: 0, 2: 0, 3: 0, 4: 0}",1,12,0,0,0,0 121 | 100,True,"{'a': 0.5, 'e': 1.0}",40,23,36.45434100144582,"{0: 17, 1: 0, 2: 0, 3: 0, 4: 0}",1,17,0,0,0,0 122 | -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_no-learning - Hard_coded (No Training, Score A+A+).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/logs/sim_no-learning - Hard_coded (No Training, Score A+A+).png -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_no-learning.csv: -------------------------------------------------------------------------------- 1 | trial,testing,parameters,initial_deadline,final_deadline,net_reward,actions,success,actions-0,actions-1,actions-2,actions-3,actions-4 2 | 1,False,"{'a': 0.5, 'e': 1.0}",20,0,-93.12602506810526,"{0: 10, 1: 4, 2: 5, 3: 0, 4: 1}",0,10,4,5,0,1 3 | 2,False,"{'a': 0.5, 'e': 1.0}",35,0,-224.7867807722711,"{0: 20, 1: 3, 2: 7, 3: 2, 4: 3}",0,20,3,7,2,3 4 | 3,False,"{'a': 0.5, 'e': 1.0}",20,0,-83.16346956193723,"{0: 13, 1: 2, 2: 3, 3: 1, 4: 1}",0,13,2,3,1,1 5 | 4,False,"{'a': 0.5, 'e': 1.0}",20,2,-118.20606306365502,"{0: 10, 1: 2, 2: 2, 3: 3, 4: 1}",1,10,2,2,3,1 6 | 5,False,"{'a': 0.5, 'e': 1.0}",20,0,-50.348155792859174,"{0: 14, 1: 2, 2: 2, 3: 2, 4: 0}",0,14,2,2,2,0 7 | 6,False,"{'a': 0.5, 'e': 1.0}",25,0,-126.19139831340205,"{0: 17, 1: 1, 2: 4, 3: 1, 4: 2}",0,17,1,4,1,2 8 | 7,False,"{'a': 0.5, 'e': 1.0}",30,0,-241.81139942467792,"{0: 16, 1: 2, 2: 5, 3: 4, 4: 3}",0,16,2,5,4,3 9 | 8,False,"{'a': 0.5, 'e': 1.0}",25,18,-23.244144881868444,"{0: 5, 1: 0, 2: 1, 3: 1, 4: 0}",1,5,0,1,1,0 10 | 9,False,"{'a': 0.5, 'e': 1.0}",25,0,-106.32750483038768,"{0: 17, 1: 2, 2: 3, 3: 2, 4: 1}",0,17,2,3,2,1 11 | 10,False,"{'a': 0.5, 'e': 1.0}",25,0,-163.73641463036182,"{0: 16, 1: 1, 2: 3, 3: 3, 4: 2}",0,16,1,3,3,2 12 | 11,False,"{'a': 0.5, 'e': 1.0}",30,0,-219.54027010991666,"{0: 15, 1: 3, 2: 6, 3: 4, 4: 2}",0,15,3,6,4,2 13 | 12,False,"{'a': 0.5, 'e': 1.0}",20,0,-74.22502580169018,"{0: 11, 1: 4, 2: 4, 3: 1, 4: 0}",0,11,4,4,1,0 14 | 13,False,"{'a': 0.5, 'e': 1.0}",30,0,-198.21845502420248,"{0: 18, 1: 5, 2: 3, 3: 0, 4: 4}",0,18,5,3,0,4 15 | 14,False,"{'a': 0.5, 'e': 1.0}",30,0,-183.46491094960774,"{0: 17, 1: 2, 2: 7, 3: 2, 4: 2}",0,17,2,7,2,2 16 | 15,False,"{'a': 0.5, 'e': 1.0}",25,0,-72.02077381522294,"{0: 19, 1: 0, 2: 5, 3: 0, 4: 1}",0,19,0,5,0,1 17 | 16,False,"{'a': 0.5, 'e': 1.0}",30,0,-188.27007595388818,"{0: 15, 1: 3, 2: 7, 3: 4, 4: 1}",0,15,3,7,4,1 18 | 17,False,"{'a': 0.5, 'e': 1.0}",20,0,-157.6823404818581,"{0: 12, 1: 1, 2: 3, 3: 1, 4: 3}",0,12,1,3,1,3 19 | 18,False,"{'a': 0.5, 'e': 1.0}",25,0,-207.03320810169873,"{0: 13, 1: 0, 2: 8, 3: 1, 4: 3}",0,13,0,8,1,3 20 | 19,False,"{'a': 0.5, 'e': 1.0}",20,0,-161.6042455846132,"{0: 9, 1: 2, 2: 6, 3: 1, 4: 2}",0,9,2,6,1,2 21 | 20,False,"{'a': 0.5, 'e': 1.0}",30,0,-119.95064566213982,"{0: 17, 1: 3, 2: 7, 3: 3, 4: 0}",0,17,3,7,3,0 22 | 1,True,"{'a': 0.5, 'e': 1.0}",30,0,-178.88977155876776,"{0: 16, 1: 4, 2: 6, 3: 2, 4: 2}",0,16,4,6,2,2 23 | 2,True,"{'a': 0.5, 'e': 1.0}",25,0,-95.59327962525288,"{0: 17, 1: 1, 2: 5, 3: 1, 4: 1}",0,17,1,5,1,1 24 | 3,True,"{'a': 0.5, 'e': 1.0}",30,0,-148.06170341031682,"{0: 15, 1: 5, 2: 8, 3: 1, 4: 1}",0,15,5,8,1,1 25 | 4,True,"{'a': 0.5, 'e': 1.0}",30,0,-150.00905412255452,"{0: 19, 1: 3, 2: 4, 3: 2, 4: 2}",0,19,3,4,2,2 26 | 5,True,"{'a': 0.5, 'e': 1.0}",20,0,-149.01708846543167,"{0: 13, 1: 0, 2: 2, 3: 3, 4: 2}",0,13,0,2,3,2 27 | 6,True,"{'a': 0.5, 'e': 1.0}",20,0,-40.865305920984476,"{0: 14, 1: 1, 2: 5, 3: 0, 4: 0}",0,14,1,5,0,0 28 | 7,True,"{'a': 0.5, 'e': 1.0}",30,1,-184.67641413463835,"{0: 16, 1: 1, 2: 8, 3: 2, 4: 2}",1,16,1,8,2,2 29 | 8,True,"{'a': 0.5, 'e': 1.0}",25,0,-62.090328754346146,"{0: 17, 1: 2, 2: 5, 3: 1, 4: 0}",0,17,2,5,1,0 30 | 9,True,"{'a': 0.5, 'e': 1.0}",20,4,-79.63840897954638,"{0: 12, 1: 1, 2: 1, 3: 0, 4: 2}",1,12,1,1,0,2 31 | 10,True,"{'a': 0.5, 'e': 1.0}",20,0,-116.01642945796026,"{0: 9, 1: 2, 2: 6, 3: 3, 4: 0}",0,9,2,6,3,0 32 | -------------------------------------------------------------------------------- /P4_Smart_Cab/logs/sim_no-learning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/logs/sim_no-learning.png -------------------------------------------------------------------------------- /P4_Smart_Cab/project_description.md: -------------------------------------------------------------------------------- 1 | # Content: Reinforcement Learning 2 | ## Project: Train a Smartcab How to Drive 3 | 4 | ## Project Overview 5 | 6 | In this project you will apply reinforcement learning techniques for a self-driving agent in a simplified world to aid it in effectively reaching its destinations in the allotted time. You will first investigate the environment the agent operates in by constructing a very basic driving implementation. Once your agent is successful at operating within the environment, you will then identify each possible state the agent can be in when considering such things as traffic lights and oncoming traffic at each intersection. With states identified, you will then implement a Q-Learning algorithm for the self-driving agent to guide the agent towards its destination within the allotted time. Finally, you will improve upon the Q-Learning algorithm to find the best configuration of learning and exploration factors to ensure the self-driving agent is reaching its destinations with consistently positive results. 7 | 8 | ## Description 9 | In the not-so-distant future, taxicab companies across the United States no longer employ human drivers to operate their fleet of vehicles. Instead, the taxicabs are operated by self-driving agents, known as *smartcabs*, to transport people from one location to another within the cities those companies operate. In major metropolitan areas, such as Chicago, New York City, and San Francisco, an increasing number of people have come to depend on *smartcabs* to get to where they need to go as safely and reliably as possible. Although *smartcabs* have become the transport of choice, concerns have arose that a self-driving agent might not be as safe or reliable as human drivers, particularly when considering city traffic lights and other vehicles. To alleviate these concerns, your task as an employee for a national taxicab company is to use reinforcement learning techniques to construct a demonstration of a *smartcab* operating in real-time to prove that both safety and reliability can be achieved. 10 | 11 | ## Software Requirements 12 | This project uses the following software and Python libraries: 13 | 14 | - [Python 2.7](https://www.python.org/download/releases/2.7/) 15 | - [NumPy](http://www.numpy.org/) 16 | - [pandas](http://pandas.pydata.org/) 17 | - [matplotlib](http://matplotlib.org/) 18 | - [PyGame](http://pygame.org/) 19 | 20 | If you do not have Python installed yet, it is highly recommended that you install the [Anaconda](http://continuum.io/downloads) distribution of Python, which already has the above packages and more included. Make sure that you select the Python 2.7 installer and not the Python 3.x installer. `pygame` can then be installed using one of the following commands: 21 | 22 | Mac: `conda install -c https://conda.anaconda.org/quasiben pygame` 23 | Windows: `conda install -c https://conda.anaconda.org/tlatorre pygame` 24 | Linux: `conda install -c https://conda.anaconda.org/prkrekel pygame` 25 | 26 | ## Fixing Common PyGame Problems 27 | 28 | The PyGame library can in some cases require a bit of troubleshooting to work correctly for this project. While the PyGame aspect of the project is not required for a successful submission (you can complete the project without a visual simulation, although it is more difficult), it is very helpful to have it working! If you encounter an issue with PyGame, first see these helpful links below that are developed by communities of users working with the library: 29 | - [Getting Started](https://www.pygame.org/wiki/GettingStarted) 30 | - [PyGame Information](http://www.pygame.org/wiki/info) 31 | - [Google Group](https://groups.google.com/forum/#!forum/pygame-mirror-on-google-groups) 32 | - [PyGame subreddit](https://www.reddit.com/r/pygame/) 33 | 34 | ### Problems most often reported by students 35 | _"PyGame won't install on my machine; there was an issue with the installation."_ 36 | **Solution:** As has been recommended for previous projects, Udacity suggests that you are using the Anaconda distribution of Python, which can then allow you to install PyGame through the `conda`-specific command. 37 | 38 | _"I'm seeing a black screen when running the code; output says that it can't load car images."_ 39 | **Solution:** The code will not operate correctly unless it is run from the top-level directory for `smartcab`. The top-level directory is the one that contains the **README** and the project notebook. 40 | 41 | If you continue to have problems with the project code in regards to PyGame, you can also [use the discussion forums](https://discussions.udacity.com/c/nd009-reinforcement-learning) to find posts from students that encountered issues that you may be experiencing. Additionally, you can seek help from a swath of students in the [MLND Student Slack Community](http://mlnd.slack.com). 42 | 43 | ## Starting the Project 44 | 45 | For this assignment, you can find the `smartcab` folder containing the necessary project files on the [Machine Learning projects GitHub](https://github.com/udacity/machine-learning), under the `projects` folder. You may download all of the files for projects we'll use in this Nanodegree program directly from this repo. Please make sure that you use the most recent version of project files when completing a project! 46 | 47 | This project contains three directories: 48 | 49 | - `/logs/`: This folder will contain all log files that are given from the simulation when specific prerequisites are met. 50 | - `/images/`: This folder contains various images of cars to be used in the graphical user interface. You will not need to modify or create any files in this directory. 51 | - `/smartcab/`: This folder contains the Python scripts that create the environment, graphical user interface, the simulation, and the agents. You will not need to modify or create any files in this directory except for `agent.py`. 52 | 53 | It also contains two files: 54 | - `smartcab.ipynb`: This is the main file where you will answer questions and provide an analysis for your work. 55 | -`visuals.py`: This Python script provides supplementary visualizations for the analysis. Do not modify. 56 | 57 | Finally, in `/smartcab/` are the following four files: 58 | - **Modify:** 59 | - `agent.py`: This is the main Python file where you will be performing your work on the project. 60 | - **Do not modify:** 61 | - `environment.py`: This Python file will create the *smartcab* environment. 62 | - `planner.py`: This Python file creates a high-level planner for the agent to follow towards a set goal. 63 | - `simulation.py`: This Python file creates the simulation and graphical user interface. 64 | 65 | ### Running the Code 66 | In a terminal or command window, navigate to the top-level project directory `smartcab/` (that contains the two project directories) and run one of the following commands: 67 | 68 | `python smartcab/agent.py` or 69 | `python -m smartcab.agent` 70 | 71 | This will run the `agent.py` file and execute your implemented agent code into the environment. Additionally, use the command `jupyter notebook smartcab.ipynb` from this same directory to open up a browser window or tab to work with your analysis notebook. Alternatively, you can use the command `jupyter notebook` or `ipython notebook` and navigate to the notebook file in the browser window that opens. Follow the instructions in the notebook and answer each question presented to successfully complete the implementation necessary for your `agent.py` agent file. A **README** file has also been provided with the project files which may contain additional necessary information or instruction for the project. 72 | 73 | ## Definitions 74 | 75 | ### Environment 76 | The *smartcab* operates in an ideal, grid-like city (similar to New York City), with roads going in the North-South and East-West directions. Other vehicles will certainly be present on the road, but there will be no pedestrians to be concerned with. At each intersection there is a traffic light that either allows traffic in the North-South direction or the East-West direction. U.S. Right-of-Way rules apply: 77 | - On a green light, a left turn is permitted if there is no oncoming traffic making a right turn or coming straight through the intersection. 78 | - On a red light, a right turn is permitted if no oncoming traffic is approaching from your left through the intersection. 79 | To understand how to correctly yield to oncoming traffic when turning left, you may refer to [this official drivers? education video](https://www.youtube.com/watch?v=TW0Eq2Q-9Ac), or [this passionate exposition](https://www.youtube.com/watch?v=0EdkxI6NeuA). 80 | 81 | ### Inputs and Outputs 82 | Assume that the *smartcab* is assigned a route plan based on the passengers? starting location and destination. The route is split at each intersection into waypoints, and you may assume that the *smartcab*, at any instant, is at some intersection in the world. Therefore, the next waypoint to the destination, assuming the destination has not already been reached, is one intersection away in one direction (North, South, East, or West). The *smartcab* has only an egocentric view of the intersection it is at: It can determine the state of the traffic light for its direction of movement, and whether there is a vehicle at the intersection for each of the oncoming directions. For each action, the *smartcab* may either idle at the intersection, or drive to the next intersection to the left, right, or ahead of it. Finally, each trip has a time to reach the destination which decreases for each action taken (the passengers want to get there quickly). If the allotted time becomes zero before reaching the destination, the trip has failed. 83 | 84 | ### Rewards and Goal 85 | The *smartcab* will receive positive or negative rewards based on the action it as taken. Expectedly, the *smartcab* will receive a small positive reward when making a good action, and a varying amount of negative reward dependent on the severity of the traffic violation it would have committed. Based on the rewards and penalties the *smartcab* receives, the self-driving agent implementation should learn an optimal policy for driving on the city roads while obeying traffic rules, avoiding accidents, and reaching passengers? destinations in the allotted time. 86 | 87 | ## Submitting the Project 88 | 89 | ### Evaluation 90 | Your project will be reviewed by a Udacity reviewer against the **Train a Smartcab to Drive project rubric**. Be sure to review this rubric thoroughly and self-evaluate your project before submission. All criteria found in the rubric must be *meeting specifications* for you to pass. 91 | 92 | ### Submission Files 93 | When you are ready to submit your project, collect the following files and compress them into a single archive for upload. Alternatively, you may supply the following files on your GitHub Repo in a folder named `smartcab` for ease of access: 94 | - The `agent.py` Python file with all code implemented as required in the instructed tasks. 95 | - The `/logs/` folder which should contain **five** log files that were produced from your simulation and used in the analysis. 96 | - The `smartcab.ipynb` notebook file with all questions answered and all visualization cells executed and displaying results. 97 | - An **HTML** export of the project notebook with the name **report.html**. This file *must* be present for your project to be evaluated. 98 | 99 | Once you have collected these files and reviewed the project rubric, proceed to the project submission page. 100 | 101 | ### I'm Ready! 102 | When you're ready to submit your project, click on the **Submit Project** button at the bottom of the page. 103 | 104 | If you are having any problems submitting your project or wish to check on the status of your submission, please email us at **machine-support@udacity.com** or visit us in the discussion forums. 105 | 106 | ### What's Next? 107 | You will get an email as soon as your reviewer has feedback for you. In the meantime, review your next project and feel free to get started on it or the courses supporting it! -------------------------------------------------------------------------------- /P4_Smart_Cab/smartcab/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P4_Smart_Cab/smartcab/__init__.py -------------------------------------------------------------------------------- /P4_Smart_Cab/smartcab/agent.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import random 3 | import math 4 | from environment import Agent, Environment 5 | from planner import RoutePlanner 6 | from simulator import Simulator 7 | 8 | 9 | class LearningAgent(Agent): 10 | """ An agent that learns to drive in the Smartcab world. 11 | This is the object you will be modifying. """ 12 | 13 | def __init__(self, env, learning=False, epsilon=1.0, alpha=0.5): 14 | super(LearningAgent, self).__init__(env) # Set the agent in the environment 15 | self.planner = RoutePlanner(self.env, self) # Create a route planner 16 | self.valid_actions = self.env.valid_actions # The set of valid actions 17 | 18 | # Set parameters of the learning agent 19 | self.learning = learning # Whether the agent is expected to learn 20 | self.Q = dict() # Create a Q-table which will be a dictionary of tuples 21 | self.epsilon = epsilon # Random exploration factor 22 | self.alpha = alpha # Learning factor 23 | 24 | ########### 25 | ## TO DO ## 26 | ########### 27 | self.trial_count = 0 # Use as input for epsilon decay functions 28 | 29 | self.overall_states = 384 # Size of Q-Table = States Combination of all features = 2 * 4 * 4 * 4 * 3 30 | self.overall_state_action = 384 * 4 # Combination of all possible state + valid actions = 1536 31 | self.state_action_count = 0 32 | 33 | def reset(self, destination=None, testing=False): 34 | """ The reset function is called at the beginning of each trial. 35 | 'testing' is set to True if testing trials are being used 36 | once training trials have completed. """ 37 | 38 | # Select the destination as the new location to route to 39 | self.planner.route_to(destination) 40 | 41 | ########### 42 | ## TO DO ## 43 | ########### 44 | # Update epsilon using a decay function of your choice 45 | 46 | # Linear: a = 0.05, 20 training trials 47 | def decay_linear(a): 48 | self.epsilon -= a 49 | 50 | # Polynomial: a = 20, 20 training trials 51 | def decay_frac(a): 52 | self.epsilon = 1.0 / a ** 2 53 | 54 | # Exponential: a = 0.995, 600 training trials 55 | def decay_exponential(a): 56 | self.epsilon = a ** self.trial_count 57 | 58 | def decay_exponential_e(a): 59 | self.epsilon = math.e ** (-a * self.trial_count) 60 | 61 | def decay_cosine(a): 62 | self.epsilon = math.cos(a * self.trial_count) 63 | 64 | # Hold epsilon at 1 during all training trials (in order to explore state-action combination more efficiently) 65 | def decay_step(total_trials): 66 | if self.trial_count > total_trials: 67 | self.epsilon = 0.0 68 | 69 | # Update additional class parameters as needed 70 | # If 'testing' is True, set epsilon and alpha to 0, 71 | # make sure retrieve from Q-Table instead of random walk. 72 | if testing: 73 | self.epsilon = 0.0 74 | self.alpha = 0.0 75 | else: 76 | self.trial_count += 1 77 | decay_exponential(0.995) 78 | # decay_linear(0.05) 79 | # decay_step(600) 80 | 81 | return None 82 | 83 | def build_state(self): 84 | """ The build_state function is called when the agent requests data from the 85 | environment. The next waypoint, the intersection inputs, and the deadline 86 | are all features available to the agent. """ 87 | 88 | # Collect data about the environment 89 | waypoint = self.planner.next_waypoint() # The next waypoint 90 | inputs = self.env.sense(self) # Visual input - intersection light and traffic 91 | deadline = self.env.get_deadline(self) # Remaining deadline 92 | 93 | ########### 94 | ## TO DO ## 95 | ########### 96 | # Set 'state' as a tuple of relevant data for the agent 97 | # 选择5个基础特征,state是这5个特征所有状态的组合,应该一共有384种。(waypoint只会有3种状态) 98 | state = (inputs['light'], inputs['oncoming'], inputs['left'], inputs['right'], waypoint) 99 | 100 | return state 101 | 102 | def get_maxQ(self, state): 103 | """ The get_max_Q function is called when the agent is asked to find the 104 | maximum Q-value of all actions based on the 'state' the smartcab is in. """ 105 | 106 | ########### 107 | ## TO DO ## 108 | ########### 109 | # Calculate the maximum Q-value of all actions for a given state 110 | maxQ = max(self.Q[state].values()) 111 | 112 | return maxQ 113 | 114 | def createQ(self, state): 115 | """ The createQ function is called when a state is generated by the agent. """ 116 | 117 | ########### 118 | ## TO DO ## 119 | ########### 120 | # When learning, check if the 'state' is not in the Q-table 121 | # If it is not, create a new dictionary for that state 122 | # Then, for each action available, set the initial Q-value to 0.0 123 | if self.learning: 124 | if state not in self.Q: 125 | self.Q[state] = {None: 0.0, 'forward': 0.0, 'left': 0.0, 'right': 0.0} # Init default Q Score. 126 | return 127 | 128 | def choose_action(self, state): 129 | """ The choose_action function is called when the agent is asked to choose 130 | which action to take, based on the 'state' the smartcab is in. """ 131 | 132 | # Set the agent state and default action 133 | self.state = state 134 | self.next_waypoint = self.planner.next_waypoint() 135 | action = None 136 | 137 | ########### 138 | ## TO DO ## 139 | ########### 140 | # When not learning, choose a random action 141 | # When learning, choose a random action with 'epsilon' probability 142 | # Otherwise, choose an action with the highest Q-value for the current state 143 | 144 | # 这里所说的按照epsilon的概率选择任意操作的实际意思是: 145 | # 首先,你有两个选项,opt1是任选一个操作,opt2一个是选择Q值最高的操作 146 | # 然后,epsilon的取值范围在[0,1]之内,并且呈现逐渐缩小的趋势, 147 | # 如果epsilon是0.7,那么opt1的概率就是0.7,opt2的概率就是0.3. 148 | if not self.learning: 149 | action = random.choice(self.valid_actions) # Pick a random action from All 4 Valid Actions. 150 | else: 151 | if random.random() < self.epsilon: 152 | action = random.choice(self.valid_actions) # Pick a random action from All 4 Valid Actions. 153 | else: 154 | # action = max(self.Q[state], key=self.Q[state].get) # Get Key with max Q Score as action. 155 | # 需要考虑具有多个最大值的情况,应随机抽取 156 | max_Q = max(self.Q[state].values()) 157 | max_candidate = [x for x in self.Q[state] if self.Q[state][x] == max_Q] 158 | action = random.choice(max_candidate) 159 | 160 | """Hard Coded Driving Logic For Fun""" 161 | # if inputs['light'] == 'red': 162 | # if self.next_waypoint == 'right' and inputs['oncoming'] != 'left' and inputs['left'] != 'forward': 163 | # action = self.next_waypoint 164 | # else: 165 | # action = None 166 | # else: 167 | # if self.next_waypoint == 'left' and (inputs['oncoming'] == 'forward' or inputs['oncoming'] == 'right'): 168 | # action = 'forward' 169 | # else: 170 | # action = self.next_waypoint 171 | 172 | return action 173 | 174 | def learn(self, state, action, reward): 175 | """ The learn function is called after the agent completes an action and 176 | receives an award. This function does not consider future rewards 177 | when conducting learning. """ 178 | 179 | ########### 180 | ## TO DO ## 181 | ########### 182 | # When learning, implement the value iteration update rule 183 | # Use only the learning rate 'alpha' (do not use the discount factor 'gamma') 184 | prev_Q = self.Q[state][action] 185 | self.Q[state][action] = prev_Q * (1 - self.alpha) + reward * self.alpha 186 | 187 | # 显示当前学习进度,包括<状态>的覆盖程度以及<状态-动作>的覆盖程度。 188 | if prev_Q == 0 and reward != 0: 189 | self.state_action_count += 1 190 | print 'Trial Count =', self.trial_count 191 | print 'Q-Table Size = {} / {}'.format(len(self.Q), self.overall_states) 192 | print 'Q-Table Non-zero Item Count = {} / {}'.format(self.state_action_count, self.overall_state_action) 193 | return 194 | 195 | def update(self): 196 | """ The update function is called when a time step is completed in the 197 | environment for a given trial. This function will build the agent 198 | state, choose an action, receive a reward, and learn if enabled. """ 199 | 200 | state = self.build_state() # Get current state 201 | self.createQ(state) # Create 'state' in Q-table 202 | action = self.choose_action(state) # Choose an action 203 | reward = self.env.act(self, action) # Receive a reward 204 | self.learn(state, action, reward) # Q-learn 205 | 206 | return 207 | 208 | 209 | def run(): 210 | """ Driving function for running the simulation. 211 | Press ESC to close the simulation, or [SPACE] to pause the simulation. """ 212 | 213 | ############## 214 | # Create the environment 215 | # Flags: 216 | # verbose - set to True to display additional output from the simulation 217 | # num_dummies - discrete number of dummy agents in the environment, default is 100 218 | # grid_size - discrete number of intersections (columns, rows), default is (8, 6) 219 | env = Environment() 220 | 221 | ############## 222 | # Create the driving agent 223 | # Flags: 224 | # learning - set to True to force the driving agent to use Q-learning 225 | # * epsilon - continuous value for the exploration factor, default is 1 226 | # * alpha - continuous value for the learning rate, default is 0.5 227 | agent = env.create_agent(LearningAgent, learning=True, alpha=0.5) 228 | 229 | ############## 230 | # Follow the driving agent 231 | # Flags: 232 | # enforce_deadline - set to True to enforce a deadline metric 233 | env.set_primary_agent(agent, enforce_deadline=True) 234 | 235 | ############## 236 | # Create the simulation 237 | # Flags: 238 | # update_delay - continuous time (in seconds) between actions, default is 2.0 seconds 239 | # display - set to False to disable the GUI if PyGame is enabled 240 | # log_metrics - set to True to log trial and simulation results to /logs 241 | # optimized - set to True to change the default log file name 242 | sim = Simulator(env, update_delay=0.001, display=False, log_metrics=True, optimized=True) 243 | 244 | ############## 245 | # Run the simulator 246 | # Flags: 247 | # tolerance - epsilon tolerance before beginning testing, default is 0.05 248 | # n_test - discrete number of testing trials to perform, default is 0 249 | sim.run(n_test=10) 250 | 251 | 252 | if __name__ == '__main__': 253 | run() 254 | -------------------------------------------------------------------------------- /P4_Smart_Cab/smartcab/planner.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | class RoutePlanner(object): 4 | """ Complex route planner that is meant for a perpendicular grid network. """ 5 | 6 | def __init__(self, env, agent): 7 | self.env = env 8 | self.agent = agent 9 | self.destination = None 10 | 11 | def route_to(self, destination=None): 12 | """ Select the destination if one is provided, otherwise choose a random intersection. """ 13 | 14 | self.destination = destination if destination is not None else random.choice(self.env.intersections.keys()) 15 | 16 | def next_waypoint(self): 17 | """ Creates the next waypoint based on current heading, location, 18 | intended destination and L1 distance from destination. """ 19 | 20 | # Collect global location details 21 | bounds = self.env.grid_size 22 | location = self.env.agent_states[self.agent]['location'] 23 | heading = self.env.agent_states[self.agent]['heading'] 24 | 25 | delta_a = (self.destination[0] - location[0], self.destination[1] - location[1]) 26 | delta_b = (bounds[0] + delta_a[0] if delta_a[0] <= 0 else delta_a[0] - bounds[0], \ 27 | bounds[1] + delta_a[1] if delta_a[1] <= 0 else delta_a[1] - bounds[1]) 28 | 29 | # Calculate true difference in location based on world-wrap 30 | # This will pre-determine the need for U-turns from improper headings 31 | dx = delta_a[0] if abs(delta_a[0]) < abs(delta_b[0]) else delta_b[0] 32 | dy = delta_a[1] if abs(delta_a[1]) < abs(delta_b[1]) else delta_b[1] 33 | 34 | # First check if destination is at location 35 | if dx == 0 and dy == 0: 36 | return None 37 | 38 | # Next check if destination is cardinally East or West of location 39 | elif dx != 0: 40 | 41 | if dx * heading[0] > 0: # Heading the correct East or West direction 42 | return 'forward' 43 | elif dx * heading[0] < 0 and heading[0] < 0: # Heading West, destination East 44 | if dy > 0: # Destination also to the South 45 | return 'left' 46 | else: 47 | return 'right' 48 | elif dx * heading[0] < 0 and heading[0] > 0: # Heading East, destination West 49 | if dy < 0: # Destination also to the North 50 | return 'left' 51 | else: 52 | return 'right' 53 | elif dx * heading[1] > 0: # Heading North destination West; Heading South destination East 54 | return 'left' 55 | else: 56 | return 'right' 57 | 58 | # Finally, check if destination is cardinally North or South of location 59 | elif dy != 0: 60 | 61 | if dy * heading[1] > 0: # Heading the correct North or South direction 62 | return 'forward' 63 | elif dy * heading[1] < 0 and heading[1] < 0: # Heading North, destination South 64 | if dx < 0: # Destination also to the West 65 | return 'left' 66 | else: 67 | return 'right' 68 | elif dy * heading[1] < 0 and heading[1] > 0: # Heading South, destination North 69 | if dx > 0: # Destination also to the East 70 | return 'left' 71 | else: 72 | return 'right' 73 | elif dy * heading[0] > 0: # Heading West destination North; Heading East destination South 74 | return 'right' 75 | else: 76 | return 'left' -------------------------------------------------------------------------------- /P4_Smart_Cab/visuals.py: -------------------------------------------------------------------------------- 1 | ########################################### 2 | # Suppress matplotlib user warnings 3 | # Necessary for newer version of matplotlib 4 | import warnings 5 | warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib") 6 | ########################################### 7 | # 8 | # Display inline matplotlib plots with IPython 9 | from IPython import get_ipython 10 | get_ipython().run_line_magic('matplotlib', 'inline') 11 | ########################################### 12 | 13 | import matplotlib.pyplot as plt 14 | import numpy as np 15 | import pandas as pd 16 | import os 17 | import ast 18 | 19 | 20 | def calculate_safety(data): 21 | """ Calculates the safety rating of the smartcab during testing. """ 22 | 23 | good_ratio = data['good_actions'].sum() * 1.0 / \ 24 | (data['initial_deadline'] - data['final_deadline']).sum() 25 | 26 | if good_ratio == 1: # Perfect driving 27 | return ("A+", "green") 28 | else: # Imperfect driving 29 | if data['actions'].apply(lambda x: ast.literal_eval(x)[4]).sum() > 0: # Major accident 30 | return ("F", "red") 31 | elif data['actions'].apply(lambda x: ast.literal_eval(x)[3]).sum() > 0: # Minor accident 32 | return ("D", "#EEC700") 33 | elif data['actions'].apply(lambda x: ast.literal_eval(x)[2]).sum() > 0: # Major violation 34 | return ("C", "#EEC700") 35 | else: # Minor violation 36 | minor = data['actions'].apply(lambda x: ast.literal_eval(x)[1]).sum() 37 | if minor >= len(data)/2: # Minor violation in at least half of the trials 38 | return ("B", "green") 39 | else: 40 | return ("A", "green") 41 | 42 | 43 | def calculate_reliability(data): 44 | """ Calculates the reliability rating of the smartcab during testing. """ 45 | 46 | success_ratio = data['success'].sum() * 1.0 / len(data) 47 | 48 | if success_ratio == 1: # Always meets deadline 49 | return ("A+", "green") 50 | else: 51 | if success_ratio >= 0.90: 52 | return ("A", "green") 53 | elif success_ratio >= 0.80: 54 | return ("B", "green") 55 | elif success_ratio >= 0.70: 56 | return ("C", "#EEC700") 57 | elif success_ratio >= 0.60: 58 | return ("D", "#EEC700") 59 | else: 60 | return ("F", "red") 61 | 62 | 63 | def plot_trials(csv): 64 | """ Plots the data from logged metrics during a simulation.""" 65 | 66 | data = pd.read_csv(os.path.join("logs", csv)) 67 | 68 | if len(data) < 10: 69 | print "Not enough data collected to create a visualization." 70 | print "At least 20 trials are required." 71 | return 72 | 73 | # Create additional features 74 | data['average_reward'] = (data['net_reward'] / (data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean() 75 | data['reliability_rate'] = (data['success']*100).rolling(window=10, center=False).mean() # compute avg. net reward with window=10 76 | data['good_actions'] = data['actions'].apply(lambda x: ast.literal_eval(x)[0]) 77 | data['good'] = (data['good_actions'] * 1.0 / \ 78 | (data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean() 79 | data['minor'] = (data['actions'].apply(lambda x: ast.literal_eval(x)[1]) * 1.0 / \ 80 | (data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean() 81 | data['major'] = (data['actions'].apply(lambda x: ast.literal_eval(x)[2]) * 1.0 / \ 82 | (data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean() 83 | data['minor_acc'] = (data['actions'].apply(lambda x: ast.literal_eval(x)[3]) * 1.0 / \ 84 | (data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean() 85 | data['major_acc'] = (data['actions'].apply(lambda x: ast.literal_eval(x)[4]) * 1.0 / \ 86 | (data['initial_deadline'] - data['final_deadline'])).rolling(window=10, center=False).mean() 87 | data['epsilon'] = data['parameters'].apply(lambda x: ast.literal_eval(x)['e']) 88 | data['alpha'] = data['parameters'].apply(lambda x: ast.literal_eval(x)['a']) 89 | 90 | 91 | # Create training and testing subsets 92 | training_data = data[data['testing'] == False] 93 | testing_data = data[data['testing'] == True] 94 | 95 | plt.figure(figsize=(12,8)) 96 | 97 | 98 | ############### 99 | ### Average step reward plot 100 | ############### 101 | 102 | ax = plt.subplot2grid((6,6), (0,3), colspan=3, rowspan=2) 103 | ax.set_title("10-Trial Rolling Average Reward per Action") 104 | ax.set_ylabel("Reward per Action") 105 | ax.set_xlabel("Trial Number") 106 | ax.set_xlim((10, len(training_data))) 107 | 108 | # Create plot-specific data 109 | step = training_data[['trial','average_reward']].dropna() 110 | 111 | ax.axhline(xmin = 0, xmax = 1, y = 0, color = 'black', linestyle = 'dashed') 112 | ax.plot(step['trial'], step['average_reward']) 113 | 114 | 115 | ############### 116 | ### Parameters Plot 117 | ############### 118 | 119 | ax = plt.subplot2grid((6,6), (2,3), colspan=3, rowspan=2) 120 | 121 | # Check whether the agent was expected to learn 122 | if csv != 'sim_no-learning.csv': 123 | ax.set_ylabel("Parameter Value") 124 | ax.set_xlabel("Trial Number") 125 | ax.set_xlim((1, len(training_data))) 126 | ax.set_ylim((0, 1.05)) 127 | 128 | ax.plot(training_data['trial'], training_data['epsilon'], color='blue', label='Exploration factor') 129 | ax.plot(training_data['trial'], training_data['alpha'], color='green', label='Learning factor') 130 | 131 | ax.legend(bbox_to_anchor=(0.5,1.19), fancybox=True, ncol=2, loc='upper center', fontsize=10) 132 | 133 | else: 134 | ax.axis('off') 135 | ax.text(0.52, 0.30, "Simulation completed\nwith learning disabled.", fontsize=24, ha='center', style='italic') 136 | 137 | 138 | ############### 139 | ### Bad Actions Plot 140 | ############### 141 | 142 | actions = training_data[['trial','good', 'minor','major','minor_acc','major_acc']].dropna() 143 | maximum = (1 - actions['good']).values.max() 144 | 145 | ax = plt.subplot2grid((6,6), (0,0), colspan=3, rowspan=4) 146 | ax.set_title("10-Trial Rolling Relative Frequency of Bad Actions") 147 | ax.set_ylabel("Relative Frequency") 148 | ax.set_xlabel("Trial Number") 149 | 150 | ax.set_ylim((0, maximum + 0.01)) 151 | ax.set_xlim((10, len(training_data))) 152 | 153 | ax.set_yticks(np.linspace(0, maximum+0.01, 10)) 154 | 155 | ax.plot(actions['trial'], (1 - actions['good']), color='black', label='Total Bad Actions', linestyle='dotted', linewidth=3) 156 | ax.plot(actions['trial'], actions['minor'], color='orange', label='Minor Violation', linestyle='dashed') 157 | ax.plot(actions['trial'], actions['major'], color='orange', label='Major Violation', linewidth=2) 158 | ax.plot(actions['trial'], actions['minor_acc'], color='red', label='Minor Accident', linestyle='dashed') 159 | ax.plot(actions['trial'], actions['major_acc'], color='red', label='Major Accident', linewidth=2) 160 | 161 | ax.legend(loc='upper right', fancybox=True, fontsize=10) 162 | 163 | 164 | ############### 165 | ### Rolling Success-Rate plot 166 | ############### 167 | 168 | ax = plt.subplot2grid((6,6), (4,0), colspan=4, rowspan=2) 169 | ax.set_title("10-Trial Rolling Rate of Reliability") 170 | ax.set_ylabel("Rate of Reliability") 171 | ax.set_xlabel("Trial Number") 172 | ax.set_xlim((10, len(training_data))) 173 | ax.set_ylim((-5, 105)) 174 | ax.set_yticks(np.arange(0, 101, 20)) 175 | ax.set_yticklabels(['0%', '20%', '40%', '60%', '80%', '100%']) 176 | 177 | # Create plot-specific data 178 | trial = training_data.dropna()['trial'] 179 | rate = training_data.dropna()['reliability_rate'] 180 | 181 | # Rolling success rate 182 | ax.plot(trial, rate, label="Reliability Rate", color='blue') 183 | 184 | 185 | ############### 186 | ### Test results 187 | ############### 188 | 189 | ax = plt.subplot2grid((6,6), (4,4), colspan=2, rowspan=2) 190 | ax.axis('off') 191 | 192 | if len(testing_data) > 0: 193 | safety_rating, safety_color = calculate_safety(testing_data) 194 | reliability_rating, reliability_color = calculate_reliability(testing_data) 195 | 196 | # Write success rate 197 | ax.text(0.40, .9, "{} testing trials simulated.".format(len(testing_data)), fontsize=14, ha='center') 198 | ax.text(0.40, 0.7, "Safety Rating:", fontsize=16, ha='center') 199 | ax.text(0.40, 0.42, "{}".format(safety_rating), fontsize=40, ha='center', color=safety_color) 200 | ax.text(0.40, 0.27, "Reliability Rating:", fontsize=16, ha='center') 201 | ax.text(0.40, 0, "{}".format(reliability_rating), fontsize=40, ha='center', color=reliability_color) 202 | 203 | else: 204 | ax.text(0.36, 0.30, "Simulation completed\nwith testing disabled.", fontsize=20, ha='center', style='italic') 205 | 206 | plt.tight_layout() 207 | plt.show() 208 | -------------------------------------------------------------------------------- /P5_Image_Classification/Intro/image/Learn_Rate_Tune_Image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P5_Image_Classification/Intro/image/Learn_Rate_Tune_Image.png -------------------------------------------------------------------------------- /P5_Image_Classification/Intro/image/Mean_Variance_Image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P5_Image_Classification/Intro/image/Mean_Variance_Image.png -------------------------------------------------------------------------------- /P5_Image_Classification/Intro/image/network_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P5_Image_Classification/Intro/image/network_diagram.png -------------------------------------------------------------------------------- /P5_Image_Classification/Intro/image/notmnist.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P5_Image_Classification/Intro/image/notmnist.png -------------------------------------------------------------------------------- /P5_Image_Classification/MNIST/multilayer_perceptron.py: -------------------------------------------------------------------------------- 1 | from tensorflow.examples.tutorials.mnist import input_data 2 | mnist = input_data.read_data_sets(".", one_hot=True, reshape=False) 3 | 4 | import tensorflow as tf 5 | 6 | # Parameters 7 | learning_rate = 0.001 8 | training_epochs = 20 9 | batch_size = 128 # Decrease batch size if you don't have enough memory 10 | display_step = 1 11 | 12 | n_input = 784 # MNIST data input (img shape: 28*28) 13 | n_classes = 10 # MNIST total classes (0-9 digits) 14 | 15 | n_hidden_layer = 256 # layer number of features 16 | 17 | # Store layers weight & bias 18 | weights = { 19 | 'hidden_layer': tf.Variable(tf.random_normal([n_input, n_hidden_layer])), 20 | 'out': tf.Variable(tf.random_normal([n_hidden_layer, n_classes])) 21 | } 22 | biases = { 23 | 'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])), 24 | 'out': tf.Variable(tf.random_normal([n_classes])) 25 | } 26 | 27 | # tf Graph input 28 | x = tf.placeholder("float", [None, 28, 28, 1]) 29 | y = tf.placeholder("float", [None, n_classes]) 30 | 31 | x_flat = tf.reshape(x, [-1, n_input]) 32 | 33 | # Hidden layer with RELU activation 34 | layer_1 = tf.add(tf.matmul(x_flat, weights['hidden_layer']), biases['hidden_layer']) 35 | layer_1 = tf.nn.relu(layer_1) 36 | # Output layer with linear activation 37 | logits = tf.matmul(layer_1, weights['out']) + biases['out'] 38 | 39 | # Define loss and optimizer 40 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) 41 | optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) 42 | 43 | # Initializing the variables 44 | init = tf.global_variables_initializer() 45 | 46 | config = tf.ConfigProto() 47 | config.gpu_options.allow_growth = True 48 | # Launch the graph 49 | with tf.Session(config=config) as sess: 50 | sess.run(init) 51 | # Training cycle 52 | for epoch in range(training_epochs): 53 | total_batch = int(mnist.train.num_examples/batch_size) 54 | # Loop over all batches 55 | for i in range(total_batch): 56 | batch_x, batch_y = mnist.train.next_batch(batch_size) 57 | # Run optimization op (backprop) and cost op (to get loss value) 58 | sess.run(optimizer, feed_dict={x: batch_x, y: batch_y}) 59 | # Display logs per epoch step 60 | if epoch % display_step == 0: 61 | c = sess.run(cost, feed_dict={x: batch_x, y: batch_y}) 62 | print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c)) 63 | print("Optimization Finished!") 64 | 65 | # Test model 66 | correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)) 67 | # Calculate accuracy 68 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 69 | # Decrease test_size if you don't have enough memory 70 | test_size = 256 71 | print("Accuracy:", accuracy.eval({x: mnist.test.images[:test_size], y: mnist.test.labels[:test_size]})) 72 | -------------------------------------------------------------------------------- /P5_Image_Classification/README.md: -------------------------------------------------------------------------------- 1 | # MLND Project 5 - Image Classification 2 | 3 | ## Introduction 4 | 5 | 在这一项目中,你需要对 **CIFAR-10 数据集** 进行图像分类。 6 | 7 | 该数据集包含飞机,狗,猫等一共10个类型的图片,共50000张。你需要先对数据集进行预处理,然后用卷积神经网络对所有样本进行训练。你要先将图像归一化(normalize),对标签进行独热编码(one-hot encode the labels),然后建立卷积层、最大池化层和全连接层。最后,你将看到该模型对样本图像作出的预测结果。 8 | 9 | 10 | ## Performance 11 | 12 | - **MNIST** 13 | 14 | | Model | Epochs | Accuracy | 15 | | :--- | :------: | :-------: | 16 | | 传统神经网络 `FC1024`, `FC1024` | 50 | **98%** 17 | | 卷积神经网络 类VGG结构 | 50 | **99.5%** 18 | 19 | - **CIFAR10** 20 | 21 | | Model | Epochs | Accuracy | 22 | | :--- | :------: | :-------: | 23 | | TensorFlow `CONV16`, `CONV32`, `CONV64`, `FC128` | 100 | **70%** 24 | | TensorFlow `CONV16`, `CONV32`, `CONV64`, `FC128` (Xavier, BN, L2) | 100 | **72%** 25 | | Keras 类VGG `(CONV / CONV / POOL) * 3`, `FC128` | 100 | **80%** 26 | | Keras 类VGG `(CONV / CONV / POOL) * 3`, `FC128` (With Data Augmentation) | 100 | **89%** 27 | 28 | ## Methodology 29 | 30 | - **Preprocessing** 31 | - Normalize 32 | - One-hot Encoding 33 | 34 | - **Model Construction** 35 | - Input Layer 36 | - Convolution + Maxpooling Layer 37 | - Flatten Layer 38 | - Fully Connected Layer 39 | - Dropout 40 | - Output Layer 41 | 42 | - **Parameter Fine Tuning** 43 | - epoch 44 | - batch_size 45 | - stddev 46 | - conv_num_output 47 | - conv_size (width x height) 48 | - conv_stride 49 | - pooling_size (width x height) 50 | - pooling_stride 51 | - full_num_output 52 | - Activation Function (Conv Layer, Fully Connected Layer) 53 | - Padding Style -------------------------------------------------------------------------------- /P5_Image_Classification/castle.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P5_Image_Classification/castle.jpg -------------------------------------------------------------------------------- /P5_Image_Classification/helper.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from sklearn.preprocessing import LabelBinarizer 5 | 6 | 7 | def _load_label_names(): 8 | """ 9 | Load the label names from file 10 | """ 11 | return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] 12 | 13 | 14 | def load_cfar10_batch(cifar10_dataset_folder_path, batch_id): 15 | """ 16 | Load a batch of the dataset 17 | """ 18 | with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file: 19 | batch = pickle.load(file, encoding='latin1') 20 | 21 | features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1) 22 | labels = batch['labels'] 23 | 24 | return features, labels 25 | 26 | 27 | def display_stats(cifar10_dataset_folder_path, batch_id, sample_id): 28 | """ 29 | Display Stats of the the dataset 30 | """ 31 | batch_ids = list(range(1, 6)) 32 | 33 | if batch_id not in batch_ids: 34 | print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids)) 35 | return None 36 | 37 | features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id) 38 | 39 | if not (0 <= sample_id < len(features)): 40 | print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id)) 41 | return None 42 | 43 | print('\nStats of batch {}:'.format(batch_id)) 44 | print('Samples: {}'.format(len(features))) 45 | print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True))))) 46 | print('First 20 Labels: {}'.format(labels[:20])) 47 | 48 | sample_image = features[sample_id] 49 | sample_label = labels[sample_id] 50 | label_names = _load_label_names() 51 | 52 | print('\nExample of Image {}:'.format(sample_id)) 53 | print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max())) 54 | print('Image - Shape: {}'.format(sample_image.shape)) 55 | print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label])) 56 | plt.axis('off') 57 | plt.imshow(sample_image) 58 | 59 | 60 | def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename): 61 | """ 62 | Preprocess data and save it to file 63 | """ 64 | features = normalize(features) 65 | labels = one_hot_encode(labels) 66 | 67 | pickle.dump((features, labels), open(filename, 'wb')) 68 | 69 | 70 | def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode): 71 | """ 72 | Preprocess Training and Validation Data 73 | """ 74 | n_batches = 5 75 | valid_features = [] 76 | valid_labels = [] 77 | 78 | for batch_i in range(1, n_batches + 1): 79 | features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i) 80 | validation_count = int(len(features) * 0.1) 81 | 82 | # Prprocess and save a batch of training data 83 | _preprocess_and_save( 84 | normalize, 85 | one_hot_encode, 86 | features[:-validation_count], 87 | labels[:-validation_count], 88 | 'preprocess_batch_' + str(batch_i) + '.p') 89 | 90 | # Use a portion of training batch for validation 91 | valid_features.extend(features[-validation_count:]) 92 | valid_labels.extend(labels[-validation_count:]) 93 | 94 | # Preprocess and Save all validation data 95 | _preprocess_and_save( 96 | normalize, 97 | one_hot_encode, 98 | np.array(valid_features), 99 | np.array(valid_labels), 100 | 'preprocess_validation.p') 101 | 102 | with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file: 103 | batch = pickle.load(file, encoding='latin1') 104 | 105 | # load the training data 106 | test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1) 107 | test_labels = batch['labels'] 108 | 109 | # Preprocess and Save all training data 110 | _preprocess_and_save( 111 | normalize, 112 | one_hot_encode, 113 | np.array(test_features), 114 | np.array(test_labels), 115 | 'preprocess_training.p') 116 | 117 | 118 | def batch_features_labels(features, labels, batch_size): 119 | """ 120 | Split features and labels into batches 121 | """ 122 | for start in range(0, len(features), batch_size): 123 | end = min(start + batch_size, len(features)) 124 | yield features[start:end], labels[start:end] 125 | 126 | 127 | def load_preprocess_training_batch(batch_id, batch_size): 128 | """ 129 | Load the Preprocessed Training data and return them in batches of or less 130 | """ 131 | filename = 'preprocess_batch_' + str(batch_id) + '.p' 132 | features, labels = pickle.load(open(filename, mode='rb')) 133 | 134 | # Return the training data in batches of size or less 135 | return batch_features_labels(features, labels, batch_size) 136 | 137 | 138 | def display_image_predictions(features, labels, predictions): 139 | n_classes = 10 140 | label_names = _load_label_names() 141 | label_binarizer = LabelBinarizer() 142 | label_binarizer.fit(range(n_classes)) 143 | label_ids = label_binarizer.inverse_transform(np.array(labels)) 144 | 145 | fig, axies = plt.subplots(nrows=4, ncols=2) 146 | fig.tight_layout() 147 | fig.suptitle('Softmax Predictions', fontsize=20, y=1.1) 148 | 149 | n_predictions = 3 150 | margin = 0.05 151 | ind = np.arange(n_predictions) 152 | width = (1. - 2. * margin) / n_predictions 153 | 154 | for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)): 155 | pred_names = [label_names[pred_i] for pred_i in pred_indicies] 156 | correct_name = label_names[label_id] 157 | 158 | axies[image_i][0].imshow(feature) 159 | axies[image_i][0].set_title(correct_name) 160 | axies[image_i][0].set_axis_off() 161 | 162 | axies[image_i][1].barh(ind + margin, pred_values[::-1], width) 163 | axies[image_i][1].set_yticks(ind + margin) 164 | axies[image_i][1].set_yticklabels(pred_names[::-1]) 165 | axies[image_i][1].set_xticks([0, 0.5, 1.0]) 166 | -------------------------------------------------------------------------------- /P5_Image_Classification/lsuv_init.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import numpy as np 3 | from keras.models import Model 4 | from keras import backend as K 5 | from keras.layers import Dense, Convolution2D 6 | 7 | # Orthonorm init code is taked from Lasagne 8 | # https://github.com/Lasagne/Lasagne/blob/master/lasagne/init.py 9 | def svd_orthonormal(shape): 10 | if len(shape) < 2: 11 | raise RuntimeError("Only shapes of length 2 or more are supported.") 12 | flat_shape = (shape[0], np.prod(shape[1:])) 13 | a = np.random.standard_normal(flat_shape) 14 | u, _, v = np.linalg.svd(a, full_matrices=False) 15 | q = u if u.shape == flat_shape else v 16 | q = q.reshape(shape) 17 | return q 18 | def get_activations(model, layer, X_batch): 19 | intermediate_layer_model = Model(input=model.get_input_at(0), output=layer.get_output_at(0)) 20 | activations = intermediate_layer_model.predict(X_batch) 21 | return activations 22 | 23 | def LSUVinit(model,batch): 24 | # only these layer classes considered for LSUV initialization; add more if needed 25 | classes_to_consider = (Dense, Convolution2D) 26 | 27 | margin = 0.1 28 | max_iter = 10 29 | layers_inintialized = 0 30 | for layer in model.layers: 31 | print(layer.name) 32 | if not any([type(layer) is class_name for class_name in classes_to_consider]): 33 | continue 34 | # avoid small layers where activation variance close to zero, esp. for small batches 35 | if np.prod(layer.get_output_shape_at(0)[1:]) < 32: 36 | print(layer.name, 'too small') 37 | continue 38 | print('LSUV initializing', layer.name) 39 | layers_inintialized += 1 40 | w_all=layer.get_weights(); 41 | weights = np.array(w_all[0]) 42 | weights = svd_orthonormal(weights.shape) 43 | biases = np.array(w_all[1]) 44 | w_all_new = [weights,biases] 45 | layer.set_weights(w_all_new) 46 | acts1=get_activations(model, layer, batch) 47 | var1=np.var(acts1) 48 | iter1=0 49 | needed_variance = 1.0 50 | print(var1) 51 | while (abs(needed_variance - var1) > margin): 52 | w_all=layer.get_weights(); 53 | weights = np.array(w_all[0]) 54 | biases = np.array(w_all[1]) 55 | if np.abs(np.sqrt(var1)) < 1e-7: break # avoid zero division 56 | weights /= np.sqrt(var1)/np.sqrt(needed_variance) 57 | w_all_new = [weights,biases] 58 | layer.set_weights(w_all_new) 59 | acts1=get_activations(model, layer, batch) 60 | var1=np.var(acts1) 61 | iter1+=1 62 | print(var1) 63 | if iter1 > max_iter: 64 | break 65 | print('LSUV: total layers initialized', layers_inintialized) 66 | return model 67 | -------------------------------------------------------------------------------- /P5_Image_Classification/problem_unittests.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import tensorflow as tf 4 | import random 5 | from unittest.mock import MagicMock 6 | 7 | 8 | def _print_success_message(): 9 | print('Tests Passed') 10 | 11 | 12 | def test_folder_path(cifar10_dataset_folder_path): 13 | assert cifar10_dataset_folder_path is not None,\ 14 | 'Cifar-10 data folder not set.' 15 | assert cifar10_dataset_folder_path[-1] != '/',\ 16 | 'The "/" shouldn\'t be added to the end of the path.' 17 | assert os.path.exists(cifar10_dataset_folder_path),\ 18 | 'Path not found.' 19 | assert os.path.isdir(cifar10_dataset_folder_path),\ 20 | '{} is not a folder.'.format(os.path.basename(cifar10_dataset_folder_path)) 21 | 22 | train_files = [cifar10_dataset_folder_path + '/data_batch_' + str(batch_id) for batch_id in range(1, 6)] 23 | other_files = [cifar10_dataset_folder_path + '/batches.meta', cifar10_dataset_folder_path + '/test_batch'] 24 | missing_files = [path for path in train_files + other_files if not os.path.exists(path)] 25 | 26 | assert not missing_files,\ 27 | 'Missing files in directory: {}'.format(missing_files) 28 | 29 | print('All files found!') 30 | 31 | 32 | def test_normalize(normalize): 33 | test_shape = (np.random.choice(range(1000)), 32, 32, 3) 34 | test_numbers = np.random.choice(range(256), test_shape) 35 | normalize_out = normalize(test_numbers) 36 | 37 | assert type(normalize_out).__module__ == np.__name__,\ 38 | 'Not Numpy Object' 39 | 40 | assert normalize_out.shape == test_shape,\ 41 | 'Incorrect Shape. {} shape found'.format(normalize_out.shape) 42 | 43 | assert normalize_out.max() <= 1 and normalize_out.min() >= 0,\ 44 | 'Incorect Range. {} to {} found'.format(normalize_out.min(), normalize_out.max()) 45 | 46 | _print_success_message() 47 | 48 | 49 | def test_one_hot_encode(one_hot_encode): 50 | test_shape = np.random.choice(range(1000)) 51 | test_numbers = np.random.choice(range(10), test_shape) 52 | one_hot_out = one_hot_encode(test_numbers) 53 | 54 | assert type(one_hot_out).__module__ == np.__name__,\ 55 | 'Not Numpy Object' 56 | 57 | assert one_hot_out.shape == (test_shape, 10),\ 58 | 'Incorrect Shape. {} shape found'.format(one_hot_out.shape) 59 | 60 | n_encode_tests = 5 61 | test_pairs = list(zip(test_numbers, one_hot_out)) 62 | test_indices = np.random.choice(len(test_numbers), n_encode_tests) 63 | labels = [test_pairs[test_i][0] for test_i in test_indices] 64 | enc_labels = np.array([test_pairs[test_i][1] for test_i in test_indices]) 65 | new_enc_labels = one_hot_encode(labels) 66 | 67 | assert np.array_equal(enc_labels, new_enc_labels),\ 68 | 'Encodings returned different results for the same numbers.\n' \ 69 | 'For the first call it returned:\n' \ 70 | '{}\n' \ 71 | 'For the second call it returned\n' \ 72 | '{}\n' \ 73 | 'Make sure you save the map of labels to encodings outside of the function.'.format(enc_labels, new_enc_labels) 74 | 75 | _print_success_message() 76 | 77 | 78 | def test_nn_image_inputs(neural_net_image_input): 79 | image_shape = (32, 32, 3) 80 | nn_inputs_out_x = neural_net_image_input(image_shape) 81 | 82 | assert nn_inputs_out_x.get_shape().as_list() == [None, image_shape[0], image_shape[1], image_shape[2]],\ 83 | 'Incorrect Image Shape. Found {} shape'.format(nn_inputs_out_x.get_shape().as_list()) 84 | 85 | assert nn_inputs_out_x.op.type == 'Placeholder',\ 86 | 'Incorrect Image Type. Found {} type'.format(nn_inputs_out_x.op.type) 87 | 88 | assert nn_inputs_out_x.name == 'x:0', \ 89 | 'Incorrect Name. Found {}'.format(nn_inputs_out_x.name) 90 | 91 | print('Image Input Tests Passed.') 92 | 93 | 94 | def test_nn_label_inputs(neural_net_label_input): 95 | n_classes = 10 96 | nn_inputs_out_y = neural_net_label_input(n_classes) 97 | 98 | assert nn_inputs_out_y.get_shape().as_list() == [None, n_classes],\ 99 | 'Incorrect Label Shape. Found {} shape'.format(nn_inputs_out_y.get_shape().as_list()) 100 | 101 | assert nn_inputs_out_y.op.type == 'Placeholder',\ 102 | 'Incorrect Label Type. Found {} type'.format(nn_inputs_out_y.op.type) 103 | 104 | assert nn_inputs_out_y.name == 'y:0', \ 105 | 'Incorrect Name. Found {}'.format(nn_inputs_out_y.name) 106 | 107 | print('Label Input Tests Passed.') 108 | 109 | 110 | def test_nn_keep_prob_inputs(neural_net_keep_prob_input): 111 | nn_inputs_out_k = neural_net_keep_prob_input() 112 | 113 | assert nn_inputs_out_k.get_shape().ndims is None,\ 114 | 'Too many dimensions found for keep prob. Found {} dimensions. It should be a scalar (0-Dimension Tensor).'.format(nn_inputs_out_k.get_shape().ndims) 115 | 116 | assert nn_inputs_out_k.op.type == 'Placeholder',\ 117 | 'Incorrect keep prob Type. Found {} type'.format(nn_inputs_out_k.op.type) 118 | 119 | assert nn_inputs_out_k.name == 'keep_prob:0', \ 120 | 'Incorrect Name. Found {}'.format(nn_inputs_out_k.name) 121 | 122 | print('Keep Prob Tests Passed.') 123 | 124 | 125 | def test_con_pool(conv2d_maxpool): 126 | test_x = tf.placeholder(tf.float32, [None, 32, 32, 5]) 127 | test_num_outputs = 10 128 | test_con_k = (2, 2) 129 | test_con_s = (4, 4) 130 | test_pool_k = (2, 2) 131 | test_pool_s = (2, 2) 132 | 133 | conv2d_maxpool_out = conv2d_maxpool(test_x, test_num_outputs, test_con_k, test_con_s, test_pool_k, test_pool_s) 134 | 135 | assert conv2d_maxpool_out.get_shape().as_list() == [None, 4, 4, 10],\ 136 | 'Incorrect Shape. Found {} shape'.format(conv2d_maxpool_out.get_shape().as_list()) 137 | 138 | _print_success_message() 139 | 140 | 141 | def test_flatten(flatten): 142 | test_x = tf.placeholder(tf.float32, [None, 10, 30, 6]) 143 | flat_out = flatten(test_x) 144 | 145 | assert flat_out.get_shape().as_list() == [None, 10*30*6],\ 146 | 'Incorrect Shape. Found {} shape'.format(flat_out.get_shape().as_list()) 147 | 148 | _print_success_message() 149 | 150 | 151 | def test_fully_conn(fully_conn): 152 | test_x = tf.placeholder(tf.float32, [None, 128]) 153 | test_num_outputs = 40 154 | 155 | fc_out = fully_conn(test_x, test_num_outputs) 156 | 157 | assert fc_out.get_shape().as_list() == [None, 40],\ 158 | 'Incorrect Shape. Found {} shape'.format(fc_out.get_shape().as_list()) 159 | 160 | _print_success_message() 161 | 162 | 163 | def test_output(output): 164 | test_x = tf.placeholder(tf.float32, [None, 128]) 165 | test_num_outputs = 40 166 | 167 | output_out = output(test_x, test_num_outputs) 168 | 169 | assert output_out.get_shape().as_list() == [None, 40],\ 170 | 'Incorrect Shape. Found {} shape'.format(output_out.get_shape().as_list()) 171 | 172 | _print_success_message() 173 | 174 | 175 | def test_conv_net(conv_net): 176 | test_x = tf.placeholder(tf.float32, [None, 32, 32, 3]) 177 | test_k = tf.placeholder(tf.float32) 178 | 179 | logits_out = conv_net(test_x, test_k) 180 | 181 | assert logits_out.get_shape().as_list() == [None, 10],\ 182 | 'Incorrect Model Output. Found {}'.format(logits_out.get_shape().as_list()) 183 | 184 | print('Neural Network Built!') 185 | 186 | 187 | def test_train_nn(train_neural_network): 188 | test_x = np.random.rand(128, 32, 32, 3) 189 | test_y = np.random.rand(128, 10) 190 | test_k = np.random.rand(1) 191 | test_optimizer = tf.train.AdamOptimizer() 192 | 193 | with tf.Session() as session: 194 | session.run = MagicMock() 195 | train_neural_network(session, test_optimizer, test_k, test_x, test_y) 196 | assert session.run.called, 'Session not used' 197 | _print_success_message() 198 | -------------------------------------------------------------------------------- /P5_Image_Classification/utils.py: -------------------------------------------------------------------------------- 1 | from keras.layers.merge import Concatenate 2 | from keras.layers.core import Lambda 3 | from keras.models import Model 4 | import tensorflow as tf 5 | import matplotlib.pyplot as plt 6 | 7 | 8 | def make_parallel(model, gpu_count): 9 | 10 | def get_slice(data, idx, parts): 11 | shape = tf.shape(data) 12 | size = tf.concat([shape[:1] // parts, shape[1:]], axis=0) 13 | stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0) 14 | start = stride * idx 15 | return tf.slice(data, start, size) 16 | 17 | outputs_all = [] 18 | for i in range(len(model.outputs)): 19 | outputs_all.append([]) 20 | 21 | # Place a copy of the model on each GPU, each getting a slice of the batch 22 | for i in range(gpu_count): 23 | with tf.device('/gpu:%d' % i): 24 | with tf.name_scope('tower_%d' % i) as scope: 25 | 26 | inputs = [] 27 | # Slice each input into a piece for processing on this GPU 28 | for x in model.inputs: 29 | input_shape = tuple(x.get_shape().as_list())[1:] 30 | slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x) 31 | inputs.append(slice_n) 32 | 33 | outputs = model(inputs) 34 | 35 | if not isinstance(outputs, list): 36 | outputs = [outputs] 37 | 38 | # Save all the outputs for merging back together later 39 | for l in range(len(outputs)): 40 | outputs_all[l].append(outputs[l]) 41 | 42 | # merge outputs on CPU 43 | # 也可以设为GPU,如果CPU负载已经很大的话 44 | with tf.device('/cpu:0'): 45 | merged = [] 46 | for outputs in outputs_all: 47 | merged.append(Concatenate(axis=0)(outputs)) 48 | return Model(model.inputs, merged) 49 | 50 | 51 | def accuracy_curve(h): 52 | acc, loss, val_acc, val_loss = h.history['acc'], h.history['loss'], h.history['val_acc'], h.history['val_loss'] 53 | epoch = len(acc) 54 | plt.figure(figsize=(17, 5)) 55 | plt.subplot(121) 56 | plt.plot(range(epoch), acc, label='Train') 57 | plt.plot(range(epoch), val_acc, label='Test') 58 | plt.title('Accuracy over ' + str(epoch) + ' Epochs', size=15) 59 | plt.legend() 60 | plt.grid(True) 61 | plt.subplot(122) 62 | plt.plot(range(epoch), loss, label='Train') 63 | plt.plot(range(epoch), val_loss, label='Test') 64 | plt.title('Loss over ' + str(epoch) + ' Epochs', size=15) 65 | plt.legend() 66 | plt.grid(True) 67 | plt.show() -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/cat1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/cat1.jpg -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/cat2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/cat2.jpg -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/cat3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/cat3.jpg -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/cat4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/cat4.jpg -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/cat5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/cat5.jpg -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/cat6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/cat6.jpg -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/output.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/output.gif -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/output2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/output2.gif -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/utils.py: -------------------------------------------------------------------------------- 1 | from keras.layers.merge import Concatenate 2 | from keras.layers.core import Lambda 3 | from keras.models import Model 4 | import tensorflow as tf 5 | import matplotlib.pyplot as plt 6 | from keras import backend as K 7 | import numpy as np 8 | 9 | 10 | def get_params_count(model): 11 | trainable = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)])) 12 | non_trainable = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)])) 13 | return trainable, non_trainable 14 | 15 | 16 | def make_parallel(model, gpu_count): 17 | 18 | def get_slice(data, idx, parts): 19 | shape = tf.shape(data) 20 | size = tf.concat([shape[:1] // parts, shape[1:]], axis=0) 21 | stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0) 22 | start = stride * idx 23 | return tf.slice(data, start, size) 24 | 25 | outputs_all = [] 26 | for i in range(len(model.outputs)): 27 | outputs_all.append([]) 28 | 29 | # Place a copy of the model on each GPU, each getting a slice of the batch 30 | for i in range(gpu_count): 31 | with tf.device('/gpu:%d' % i): 32 | with tf.name_scope('tower_%d' % i) as scope: 33 | 34 | inputs = [] 35 | # Slice each input into a piece for processing on this GPU 36 | for x in model.inputs: 37 | input_shape = tuple(x.get_shape().as_list())[1:] 38 | slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x) 39 | inputs.append(slice_n) 40 | 41 | outputs = model(inputs) 42 | 43 | if not isinstance(outputs, list): 44 | outputs = [outputs] 45 | 46 | # Save all the outputs for merging back together later 47 | for l in range(len(outputs)): 48 | outputs_all[l].append(outputs[l]) 49 | 50 | # merge outputs on CPU 51 | # 也可以设为GPU,如果CPU负载已经很大的话 52 | with tf.device('/cpu:0'): 53 | merged = [] 54 | for outputs in outputs_all: 55 | merged.append(Concatenate(axis=0)(outputs)) 56 | return Model(model.inputs, merged) 57 | 58 | 59 | def accuracy_curve(h): 60 | acc, loss, val_acc, val_loss = h.history['acc'], h.history['loss'], h.history['val_acc'], h.history['val_loss'] 61 | epoch = len(acc) 62 | plt.figure(figsize=(17, 5)) 63 | plt.subplot(121) 64 | plt.plot(range(epoch), acc, label='Train') 65 | plt.plot(range(epoch), val_acc, label='Test') 66 | plt.title('Accuracy over ' + str(epoch) + ' Epochs', size=15) 67 | plt.legend() 68 | plt.grid(True) 69 | plt.subplot(122) 70 | plt.plot(range(epoch), loss, label='Train') 71 | plt.plot(range(epoch), val_loss, label='Test') 72 | plt.title('Loss over ' + str(epoch) + ' Epochs', size=15) 73 | plt.legend() 74 | plt.grid(True) 75 | plt.show() -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/view.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/view.jpg -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/weights_history.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/weights_history.p -------------------------------------------------------------------------------- /P6_Dogs_VS_Cats/weights_history2.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mtyylx/MLND/bf85f83a45155602c959a5438f05ea337d075b49/P6_Dogs_VS_Cats/weights_history2.p -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MLND - Machine Learning Nano Degree 2 | 3 | Udacity Machine Learning Courses. 4 | 5 | - [x] [**Project 0 - Titanic Survival Exploration**](https://github.com/mtyylx/MLND/blob/master/P0_Titanic/titanic_survival_exploration.ipynb) 6 | 7 | - [x] [**Project 1 - Housing Price Prediction**](https://github.com/mtyylx/MLND/blob/master/P1_Boston_Housing/boston_housing.ipynb) 8 | 9 | - [x] [**Project 2 - Finding Donors For CharityML**](https://github.com/mtyylx/MLND/blob/master/P2_Finding_Donors/finding_donors.ipynb) 10 | 11 | - [x] [**Project 3 - Create Customer Segments**](https://github.com/mtyylx/MLND/blob/master/P3_Create_Customer_Segments/customer_segments.ipynb) 12 | 13 | - [x] [**Project 4 - Smart Cab with Q-Learning**](https://github.com/mtyylx/MLND/blob/master/P4_Smart_Cab/smartcab.ipynb) 14 | 15 | - [x] [**Project 5 - Image Classification of CIFAR-10 using TensorFlow**](https://github.com/mtyylx/MLND/blob/master/P5_Image_Classification/image_classification_ZH-CN.ipynb) 16 | 17 | - [ ] [**Project 6 - Cats VS Dogs using Transfer Learning**](https://github.com/mtyylx/MLND/blob/master/P6_Dogs_VS_Cats/Dog%20VS%20Cat%20-%20Experimenting%20With%20Transfer%20Learning.ipynb) 18 | 19 | # Side Projects 20 | 21 | - [**Class Activation Maps (CAM) - Dynamic Visualization**](https://github.com/mtyylx/MLND/blob/master/P6_Dogs_VS_Cats/Class%20Activation%20Map%20Visualizations.ipynb) 22 | 23 | - [**CIFAR10 Image Classification using Keras**: 89% test accuracy](https://github.com/mtyylx/MLND/blob/master/P5_Image_Classification/CIFAR10%20Image%20Classification%20using%20Keras%20(Concise).ipynb) 24 | 25 | - [**MNIST Image Classification using Keras**: 99.5% test accuracy](https://github.com/mtyylx/MLND/blob/master/P5_Image_Classification/MNIST%20Image%20Classification%20using%20Keras.ipynb) 26 | -------------------------------------------------------------------------------- /Visualizations/Activation Functions.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import matplotlib 4 | import matplotlib.pyplot as plt 5 | matplotlib.rc('text', usetex=True) 6 | 7 | plt.figure(figsize=(17, 3)) 8 | 9 | x = np.arange(-5, 5, 0.001) 10 | y1 = np.tanh(x) 11 | y2 = 1 / (1 + np.exp(-x)) 12 | y3 = np.zeros(x.shape) 13 | y3 = np.maximum(x, 0) 14 | y4 = [] 15 | for i in x: 16 | if i < 0: 17 | y4.append(0.05 * i) 18 | else: 19 | y4.append(i) 20 | 21 | plt.subplot(141) 22 | plt.plot(x, y1) 23 | plt.plot(np.arange(-5, 6), np.zeros(11), 'black', alpha=0.5) 24 | plt.plot(np.zeros(3), np.arange(-1, 2), 'black', alpha=0.5) 25 | plt.title("$Tanh ~ (-1, 1)$", size=15) 26 | plt.grid(True) 27 | plt.subplot(142) 28 | plt.plot(x, y2) 29 | plt.plot(np.arange(-5, 6), np.zeros(11), 'black', alpha=0.5) 30 | plt.plot(np.zeros(2), np.arange(0, 2), 'black', alpha=0.5) 31 | plt.title("$Sigmoid ~ (0, 1)$", size=15) 32 | plt.grid(True) 33 | plt.subplot(143) 34 | plt.plot(x, y3) 35 | plt.title("$\mathrm{ReLU}$", size=15) 36 | plt.ylim([-0.5, 5]) 37 | plt.grid(True) 38 | plt.subplot(144) 39 | plt.plot(x, y4) 40 | plt.title("$\mathrm{Leaky ~ ReLU}$", size=15) 41 | plt.ylim([-0.5, 5]) 42 | plt.grid(True) 43 | plt.show() -------------------------------------------------------------------------------- /Visualizations/convexity.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | 5 | a = np.arange(-4 * np.pi, 4 * np.pi, 0.001) 6 | convex = a**2 7 | concave = -a**2 8 | nonconvex = a**2 + 20 * np.sin(2*a) 9 | plt.figure(figsize=(17,5)) 10 | plt.subplot(131) 11 | plt.plot(a, convex, color='r') 12 | plt.title('Convex', size=20) 13 | plt.grid(True) 14 | plt.subplot(132) 15 | plt.plot(a, concave, color = 'g') 16 | plt.title('Concave', size=20) 17 | plt.grid(True) 18 | plt.subplot(133) 19 | plt.plot(a, nonconvex, color = 'b') 20 | plt.title('Non-Convex', size=20) 21 | plt.grid(True) 22 | plt.show() -------------------------------------------------------------------------------- /Visualizations/gaussian.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | x = np.arange(10000) # x is only index 5 | y = np.random.randn(10000) # y is not a function of x 6 | 7 | plt.figure(figsize=(18, 5)) 8 | plt.suptitle(" Gaussian Random Variable", size=18) 9 | 10 | plt.subplot(131) 11 | plt.scatter(x, y, s=5, alpha=0.5) 12 | plt.title("Value Distribution", size=15) 13 | plt.xlabel("Sample Index") 14 | plt.ylabel("Sample Value") 15 | 16 | plt.subplot(132) 17 | plt.hist(y, bins=99) 18 | plt.title("Value Histogram (PDF)", size=15) 19 | plt.xlabel("Sample Value") 20 | plt.ylabel("Value Occurrence Count") 21 | 22 | plt.subplot(133) 23 | x = np.arange(-5, 5, 0.01) 24 | y = 1.0/2 * np.pi * np.exp(-(x/2.0)**2) # y is an exponential function of x 25 | plt.plot(x, y) 26 | plt.title("Probability Density Curve (PDF)", size=15) 27 | plt.xlabel("Density Input") 28 | plt.ylabel("Density Value") 29 | plt.show() -------------------------------------------------------------------------------- /Visualizations/pca_feature_transformation.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | matplotlib.rc('text', usetex=True) 5 | 6 | x = np.arange(1, 9, 0.1) 7 | y = 0.5 * x + np.random.rand(x.shape[0]) - 0.5 8 | plt.figure(figsize=(10, 5)) 9 | plt.scatter(x, y, s=10) 10 | plt.xlim([-2, 12]) 11 | plt.ylim([-1, 6]) 12 | plt.title('$\mathbf{Principal ~Components ~Analysis}$', size=15) 13 | plt.annotate('$x^{\prime}_1$', xy=(1, 3), xytext=(9.40, 4.41), size=15) 14 | plt.annotate('$x^{\prime}_2$', xy=(1, 3), xytext=(0.74, 5), size=15) 15 | plt.annotate('$x_1$', xy=(1, 3), xytext=(10.28, -0.4), size=15) 16 | plt.annotate('$x_2$', xy=(0, 0), xytext=(-0.64, 5.2), size=15) 17 | ax = plt.axes() 18 | ax.arrow(0, 0, 10, 0, linewidth=2, head_width=0.15, head_length=0.3, fc='k', ec='k') 19 | ax.arrow(0, 0, 0, 5, linewidth=2, head_width=0.15, head_length=0.3, fc='k', ec='k') 20 | ax.arrow(1, 0.5, 8, 4, linewidth=2, head_width=0.15, head_length=0.3, fc='red', ec='red') 21 | ax.arrow(3, 0.5, -2, 4, linewidth=2, head_width=0.15, head_length=0.3, fc='red', ec='red') 22 | plt.grid(True) 23 | plt.show() -------------------------------------------------------------------------------- /Visualizations/regularization.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | matplotlib.rc('text', usetex=True) 5 | 6 | x = np.arange(0, 8, 0.001).reshape((8000, 1)) 7 | y0 = np.cos(x) 8 | y1 = 1 - x**2/2 9 | y2 = y1 + x**4/24 10 | y3 = y2 - x**6/720 11 | y4 = y3 + x**8/40320 12 | y5 = y4 - x**10/3628800 13 | y6 = y5 + x**12/479001600 14 | y7 = y6 - x**14/87178291200 15 | 16 | plt.figure(figsize=(10,6)) 17 | plt.title('$\mathbf{Use ~ Polynomial ~ Function ~ to ~ Fit ~ Cosine}$', size=20) 18 | f0 = plt.plot(x, y0, label='$\cos{x}$', linewidth=3, linestyle='--', color='crimson') 19 | f1 = plt.plot(x, y1, label='$O(x^2)$', color='C1') 20 | f2 = plt.plot(x, y2, label='$O(x^4)$', color='C2') 21 | f3 = plt.plot(x, y3, label='$O(x^6)$', color='C9') 22 | f4 = plt.plot(x, y4, label='$O(x^8)$', color='darkcyan') 23 | f5 = plt.plot(x, y5, label='$O(x^{10})$', color='purple') 24 | f6 = plt.plot(x, y6, label='$O(x^{12})$', color='blue') 25 | f7 = plt.plot(x, y7, label='$O(x^{14})$', color='midnightblue') 26 | plt.xlabel('$x$', size=15) 27 | plt.ylim([-3, 3]) 28 | plt.grid(True) 29 | plt.legend(fontsize='13') 30 | plt.show() 31 | plt.savefig('series') -------------------------------------------------------------------------------- /Visualizations/univariate_regression_figure.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import numpy as np 3 | import matplotlib 4 | import matplotlib.pyplot as plt 5 | matplotlib.rc('text', usetex=True) 6 | 7 | x = np.arange(0, 10).reshape((10, 1)) 8 | # Truth (But in reality unknown to us) 9 | y = x * 0.5 10 | 11 | # Prediction (Univariate Linear Regression) 12 | w = np.arange(-1, 2, 0.1).reshape((30, 1)) 13 | # np.sum 按行求和 14 | # np.multiply 矩阵乘法 15 | # x.T 列向量转置 16 | # **2 按每个元素求平方 17 | mse = np.sum((np.multiply(w, x.T) - y.T)**2, axis = 1) 18 | 19 | plt.figure(figsize=(15, 5)) 20 | 21 | # Hypothesis 22 | plt.subplot(121) 23 | plt.title('$Hypothesis ~ Function ~ \hat{y}(x)$', size=15) 24 | w_sample = np.arange(-0.5, 2, 0.5).reshape((5, 1)) 25 | y_pred = np.multiply(w_sample, x.T) 26 | plt.xlabel('$x$', size=15) 27 | plt.ylabel('$\hat{y}$', size=15, rotation=0) 28 | plt.scatter(x, y_pred[0], color='purple') 29 | plt.scatter(x, y_pred[1], color='orange') 30 | plt.scatter(x, y_pred[2], color='red', marker='x') 31 | plt.scatter(x, y_pred[3], color='green') 32 | plt.scatter(x, y_pred[4], color='blue') 33 | reg = plt.plot(x, y, 'r') 34 | plt.annotate('$w = 1.5$', xy=(1, 3), xytext=(6.91, 11.36), size=10) 35 | plt.annotate('$w = 1.0$', xy=(1, 3), xytext=(6.91, 8.25), size=10) 36 | plt.annotate('$w = 0.5$', xy=(1, 3), xytext=(6.91, 4.28), size=10) 37 | plt.annotate('$w = 0.0$', xy=(1, 3), xytext=(6.91, 1.0), size=10) 38 | plt.annotate('$w = -0.5$', xy=(1, 3), xytext=(6.80, -2.2), size=10) 39 | plt.legend(reg, ('$\hat{y}(x) = wx$',), fontsize=20) 40 | plt.grid(True) 41 | 42 | # Cost Function 43 | plt.subplot(122) 44 | plt.title('$Cost ~ Function ~ J(w)$', size=15) 45 | reg2 = plt.plot(w, mse) 46 | mse_sample = range(5, 30, 5) 47 | plt.xlabel('$w$', size=15) 48 | plt.ylabel('$J$', size=15, rotation=0) 49 | plt.scatter(-0.5, mse[5], color='purple', s=100) 50 | plt.scatter( 0.0, mse[10], color='orange', s=100) 51 | plt.scatter( 0.5, mse[15], color='red', s=100) 52 | plt.scatter( 1.0, mse[20], color='green', s=100) 53 | plt.scatter( 1.5, mse[25], color='blue', s=100) 54 | plt.plot(0.5 * np.ones((10, 1)), np.arange(-100, 900, 100), color='black', linestyle='dotted') 55 | plt.plot(np.arange(-5, 5, 1), np.zeros((10, 1)), color='black') 56 | plt.annotate('$Slope < 0 \leftarrow$', xy=(1, 3), xytext=(0.05, 347), size=10) 57 | plt.annotate('$\\rightarrow Slope > 0 $', xy=(1, 3), xytext=(0.498, 347), size=10) 58 | plt.legend(reg2, ('$J(w) = \sum_{i=1}^m {\left(wx^{(i)} - y^{(i)}\\right)^2}$',), fontsize=20) 59 | plt.xlim([-1.1, 2]) 60 | plt.ylim([-40, 670]) 61 | plt.grid(True) 62 | plt.show() --------------------------------------------------------------------------------