├── .gitignore ├── README.md ├── bankChurnModel.py ├── cart_pole_game_understanding.ipynb ├── data ├── 0.png ├── 1.png ├── 2.png ├── 3.png ├── 4.png ├── 5.png ├── 6.png ├── 7.png ├── 8.png ├── 9.png ├── Churn_Modelling.csv ├── KaggleV2-May-2016.csv ├── character-predictions.csv ├── mnist_data │ ├── train │ │ └── 1 │ │ │ ├── 1_1.png │ │ │ ├── 1_2.png │ │ │ ├── 1_3.png │ │ │ ├── 1_4.png │ │ │ └── 1_5.png │ └── validation │ │ └── 1 │ │ ├── 1_1.png │ │ └── 1_2.png └── pima-indians-diabetes.data.csv ├── diabetiesPredictions.py ├── gotCharactersDeathPredictions.py ├── gotCharactersDeathPredictionsAPI.py ├── mnist-flask ├── README.md ├── app.py ├── model │ └── mnistCNN.h5 ├── requirements.txt └── templates │ └── index.html ├── mnistCNN.py ├── mnistCNNModelPredictions.py ├── mnist_with_own_images.ipynb ├── models ├── gotCharactersDeathPredictions.h5 └── mnistCNN.h5 ├── mountain_car_game_understanding.ipynb └── noShowAppointments.py /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .ipynb_checkpoints 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ai-examples 2 | To run these examples you need Keras, TensorFlow, Python. 3 | 4 | Please follow [this link](https://medium.com/@ashok.tankala/build-your-first-deep-learning-neural-network-model-using-keras-tensorflow-in-python-a3e76a6b3ccb) to run one of the examples. Rest are similar. 5 | 6 | Please follow [this link](https://medium.com/@ashok.tankala/handwritten-digit-prediction-using-convolutional-neural-networks-in-tensorflow-with-keras-and-live-5ebddf46dc8) to run MNIST example. 7 | 8 | Please follow [this link](https://medium.com/@ashok.tankala/deploy-your-first-deep-learning-neural-network-model-using-flask-keras-tensorflow-in-python-f4bb7309fc49) to deploy your deep learning model as an API. 9 | 10 | Please follow [this link](https://medium.com/@ashok.tankala/build-your-first-ai-game-bot-using-openai-gym-keras-tensorflow-in-python-50a4d4296687) to run CartPole game bot example. 11 | 12 | Please follow [this link](https://medium.com/@ashok.tankala/solving-curious-case-of-mountaincar-reward-problem-using-openai-gym-keras-tensorflow-in-python-d031c471b346) to run MountainCar game bot example. 13 | 14 | Please follow [this link](https://medium.com/@ashok.tankala/build-the-mnist-model-with-your-own-handwritten-digits-using-tensorflow-keras-and-python-f8ec9f871fd3) to build MNIST model with your own images example. 15 | -------------------------------------------------------------------------------- /bankChurnModel.py: -------------------------------------------------------------------------------- 1 | # Importing the libraries 2 | import numpy as np 3 | import matplotlib 4 | matplotlib.use('TkAgg') 5 | import matplotlib.pyplot as plt 6 | import pandas as pd 7 | 8 | # Importing the dataset 9 | dataset = pd.read_csv('data/Churn_Modelling.csv') 10 | 11 | X = dataset.iloc[:, 3:13].values 12 | y = dataset.iloc[:, 13].values 13 | 14 | from sklearn.preprocessing import LabelEncoder, OneHotEncoder 15 | labelencoder_X_1 = LabelEncoder() 16 | X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1]) 17 | labelencoder_X_2 = LabelEncoder() 18 | X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2]) 19 | 20 | onehotencoder = OneHotEncoder(categorical_features = [1]) 21 | X = onehotencoder.fit_transform(X).toarray() 22 | X = X[:, 1:] 23 | 24 | # Splitting the dataset into the Training set and Test set 25 | from sklearn.model_selection import train_test_split 26 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2) 27 | 28 | # Feature Scaling 29 | from sklearn.preprocessing import StandardScaler 30 | sc = StandardScaler() 31 | X_train = sc.fit_transform(X_train) 32 | X_test = sc.transform(X_test) 33 | 34 | # Importing the Keras libraries and packages 35 | import keras 36 | from keras.models import Sequential 37 | from keras.layers import Dense 38 | 39 | #Initializing Neural Network 40 | classifier = Sequential() 41 | 42 | # Adding the input layer and the first hidden layer 43 | classifier.add(Dense(6, activation = 'relu', input_dim = 11)) 44 | # Adding the second hidden layer 45 | classifier.add(Dense(6, activation = 'relu')) 46 | # Adding the output layer 47 | classifier.add(Dense(1, activation = 'sigmoid')) 48 | 49 | # Compiling Neural Network 50 | classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) 51 | 52 | import keras 53 | tbCallBack = keras.callbacks.TensorBoard(log_dir='/tmp/keras_logs', write_graph=True) 54 | 55 | # Fitting our model 56 | classifier.fit(X_train, y_train, batch_size = 10, epochs = 100, callbacks=[tbCallBack]) 57 | 58 | # Predicting the Test set results 59 | y_pred = classifier.predict(X_test) 60 | y_pred = (y_pred > 0.5) 61 | 62 | # Creating the Confusion Matrix 63 | from sklearn.metrics import confusion_matrix 64 | cm = confusion_matrix(y_test, y_pred) 65 | print(cm) -------------------------------------------------------------------------------- /cart_pole_game_understanding.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "/Users/ashoktankala/tensorflow/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", 13 | " from ._conv import register_converters as _register_converters\n", 14 | "Using TensorFlow backend.\n" 15 | ] 16 | } 17 | ], 18 | "source": [ 19 | "import gym\n", 20 | "import random\n", 21 | "import numpy as np\n", 22 | "from keras.models import Sequential\n", 23 | "from keras.layers import Dense\n", 24 | "from keras.optimizers import Adam" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 2, 30 | "metadata": {}, 31 | "outputs": [ 32 | { 33 | "name": "stdout", 34 | "output_type": "stream", 35 | "text": [ 36 | "\u001b[33mWARN: gym.spaces.Box autodetected dtype as . Please provide explicit dtype.\u001b[0m\n" 37 | ] 38 | } 39 | ], 40 | "source": [ 41 | "env = gym.make('CartPole-v1')\n", 42 | "env.reset()\n", 43 | "goal_steps = 500\n", 44 | "score_requirement = 60\n", 45 | "intial_games = 10000" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 3, 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "def play_a_random_game_first():\n", 55 | " for step_index in range(goal_steps):\n", 56 | "# env.render()\n", 57 | " action = env.action_space.sample()\n", 58 | " observation, reward, done, info = env.step(action)\n", 59 | " print(\"Step {}:\".format(step_index))\n", 60 | " print(\"action: {}\".format(action))\n", 61 | " print(\"observation: {}\".format(observation))\n", 62 | " print(\"reward: {}\".format(reward))\n", 63 | " print(\"done: {}\".format(done))\n", 64 | " print(\"info: {}\".format(info))\n", 65 | " if done:\n", 66 | " break\n", 67 | " env.reset()" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": 4, 73 | "metadata": {}, 74 | "outputs": [ 75 | { 76 | "name": "stdout", 77 | "output_type": "stream", 78 | "text": [ 79 | "Step 0:\n", 80 | "action: 0\n", 81 | "observation: [ 0.00407904 -0.22263742 0.00922543 0.28504967]\n", 82 | "reward: 1.0\n", 83 | "done: False\n", 84 | "info: {}\n", 85 | "Step 1:\n", 86 | "action: 1\n", 87 | "observation: [-0.00037371 -0.02764825 0.01492643 -0.0047094 ]\n", 88 | "reward: 1.0\n", 89 | "done: False\n", 90 | "info: {}\n", 91 | "Step 2:\n", 92 | "action: 1\n", 93 | "observation: [-0.00092668 0.16725649 0.01483224 -0.29264574]\n", 94 | "reward: 1.0\n", 95 | "done: False\n", 96 | "info: {}\n", 97 | "Step 3:\n", 98 | "action: 0\n", 99 | "observation: [ 0.00241845 -0.02807376 0.00897932 0.00467793]\n", 100 | "reward: 1.0\n", 101 | "done: False\n", 102 | "info: {}\n", 103 | "Step 4:\n", 104 | "action: 1\n", 105 | "observation: [ 0.00185698 0.16691827 0.00907288 -0.28515844]\n", 106 | "reward: 1.0\n", 107 | "done: False\n", 108 | "info: {}\n", 109 | "Step 5:\n", 110 | "action: 1\n", 111 | "observation: [ 0.00519534 0.36190966 0.00336971 -0.57496609]\n", 112 | "reward: 1.0\n", 113 | "done: False\n", 114 | "info: {}\n", 115 | "Step 6:\n", 116 | "action: 1\n", 117 | "observation: [ 0.01243354 0.55698421 -0.00812961 -0.86658556]\n", 118 | "reward: 1.0\n", 119 | "done: False\n", 120 | "info: {}\n", 121 | "Step 7:\n", 122 | "action: 1\n", 123 | "observation: [ 0.02357322 0.75221584 -0.02546132 -1.16181341]\n", 124 | "reward: 1.0\n", 125 | "done: False\n", 126 | "info: {}\n", 127 | "Step 8:\n", 128 | "action: 1\n", 129 | "observation: [ 0.03861754 0.94765999 -0.04869759 -1.46236944]\n", 130 | "reward: 1.0\n", 131 | "done: False\n", 132 | "info: {}\n", 133 | "Step 9:\n", 134 | "action: 1\n", 135 | "observation: [ 0.05757074 1.14334375 -0.07794498 -1.76985853]\n", 136 | "reward: 1.0\n", 137 | "done: False\n", 138 | "info: {}\n", 139 | "Step 10:\n", 140 | "action: 1\n", 141 | "observation: [ 0.08043761 1.33925415 -0.11334215 -2.08572453]\n", 142 | "reward: 1.0\n", 143 | "done: False\n", 144 | "info: {}\n", 145 | "Step 11:\n", 146 | "action: 0\n", 147 | "observation: [ 0.1072227 1.14544519 -0.15505664 -1.83012771]\n", 148 | "reward: 1.0\n", 149 | "done: False\n", 150 | "info: {}\n", 151 | "Step 12:\n", 152 | "action: 0\n", 153 | "observation: [ 0.1301316 0.95234385 -0.19165919 -1.58935492]\n", 154 | "reward: 1.0\n", 155 | "done: False\n", 156 | "info: {}\n", 157 | "Step 13:\n", 158 | "action: 1\n", 159 | "observation: [ 0.14917848 1.14915567 -0.22344629 -1.93517053]\n", 160 | "reward: 1.0\n", 161 | "done: True\n", 162 | "info: {}\n" 163 | ] 164 | } 165 | ], 166 | "source": [ 167 | "play_a_random_game_first()" 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": 5, 173 | "metadata": {}, 174 | "outputs": [], 175 | "source": [ 176 | "def model_data_preparation():\n", 177 | " training_data = []\n", 178 | " accepted_scores = []\n", 179 | " for game_index in range(intial_games):\n", 180 | " score = 0\n", 181 | " game_memory = []\n", 182 | " previous_observation = []\n", 183 | " for step_index in range(goal_steps):\n", 184 | " action = random.randrange(0, 2)\n", 185 | " observation, reward, done, info = env.step(action)\n", 186 | " \n", 187 | " if len(previous_observation) > 0:\n", 188 | " game_memory.append([previous_observation, action])\n", 189 | " \n", 190 | " previous_observation = observation\n", 191 | " score += reward\n", 192 | " if done:\n", 193 | " break\n", 194 | " \n", 195 | " if score >= score_requirement:\n", 196 | " accepted_scores.append(score)\n", 197 | " for data in game_memory:\n", 198 | " if data[1] == 1:\n", 199 | " output = [0, 1]\n", 200 | " elif data[1] == 0:\n", 201 | " output = [1, 0]\n", 202 | " training_data.append([data[0], output])\n", 203 | " \n", 204 | " env.reset()\n", 205 | "\n", 206 | " print(accepted_scores)\n", 207 | " \n", 208 | " return training_data" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": 6, 214 | "metadata": {}, 215 | "outputs": [ 216 | { 217 | "name": "stdout", 218 | "output_type": "stream", 219 | "text": [ 220 | "[63.0, 62.0, 74.0, 66.0, 84.0, 69.0, 65.0, 64.0, 66.0, 63.0, 62.0, 67.0, 62.0, 60.0, 76.0, 65.0, 87.0, 85.0, 76.0, 81.0, 68.0, 63.0, 80.0, 65.0, 63.0, 60.0, 60.0, 61.0, 86.0, 71.0, 72.0, 60.0, 95.0, 65.0, 68.0, 68.0, 63.0, 95.0, 91.0, 99.0, 86.0, 68.0, 72.0, 69.0, 62.0, 74.0, 76.0, 74.0, 64.0, 77.0, 92.0, 67.0, 67.0, 99.0, 81.0, 81.0, 63.0, 73.0, 70.0, 68.0, 63.0, 77.0, 61.0, 62.0, 78.0, 61.0, 71.0, 77.0, 70.0, 72.0, 80.0, 61.0, 68.0, 61.0, 86.0, 145.0, 74.0, 68.0, 79.0, 61.0, 63.0, 65.0, 62.0, 64.0, 65.0, 80.0, 67.0, 78.0, 76.0, 66.0, 63.0, 110.0, 62.0, 70.0, 72.0, 109.0, 76.0, 75.0, 75.0, 73.0, 75.0, 65.0, 77.0, 64.0, 61.0, 60.0, 66.0, 61.0, 62.0, 71.0, 75.0, 82.0, 95.0, 67.0, 61.0, 66.0, 67.0, 65.0, 61.0, 65.0, 66.0, 62.0, 70.0, 89.0, 96.0, 86.0, 62.0, 61.0, 75.0, 84.0, 63.0, 66.0, 73.0, 68.0, 61.0, 66.0, 144.0, 64.0, 61.0, 62.0, 62.0, 67.0, 66.0, 65.0, 66.0, 71.0, 68.0, 81.0, 73.0, 75.0, 75.0, 79.0, 75.0, 104.0, 69.0, 66.0, 81.0, 73.0, 60.0, 64.0, 78.0, 115.0, 62.0, 91.0, 70.0, 69.0, 64.0, 86.0, 70.0, 70.0, 68.0]\n" 221 | ] 222 | } 223 | ], 224 | "source": [ 225 | "training_data = model_data_preparation()" 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "execution_count": 7, 231 | "metadata": {}, 232 | "outputs": [], 233 | "source": [ 234 | "def build_model(input_size, output_size):\n", 235 | " model = Sequential()\n", 236 | " model.add(Dense(128, input_dim=input_size, activation='relu'))\n", 237 | " model.add(Dense(52, activation='relu'))\n", 238 | " model.add(Dense(output_size, activation='linear'))\n", 239 | " model.compile(loss='mse', optimizer=Adam())\n", 240 | "\n", 241 | " return model" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": 8, 247 | "metadata": {}, 248 | "outputs": [], 249 | "source": [ 250 | "def train_model(training_data):\n", 251 | " X = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]))\n", 252 | " y = np.array([i[1] for i in training_data]).reshape(-1, len(training_data[0][1]))\n", 253 | " model = build_model(input_size=len(X[0]), output_size=len(y[0]))\n", 254 | " \n", 255 | " model.fit(X, y, epochs=10)\n", 256 | " return model" 257 | ] 258 | }, 259 | { 260 | "cell_type": "code", 261 | "execution_count": 9, 262 | "metadata": {}, 263 | "outputs": [ 264 | { 265 | "name": "stdout", 266 | "output_type": "stream", 267 | "text": [ 268 | "Epoch 1/10\n", 269 | "12236/12236 [==============================] - 1s 94us/step - loss: 0.2483\n", 270 | "Epoch 2/10\n", 271 | "12236/12236 [==============================] - 1s 71us/step - loss: 0.2348\n", 272 | "Epoch 3/10\n", 273 | "12236/12236 [==============================] - 1s 67us/step - loss: 0.2333\n", 274 | "Epoch 4/10\n", 275 | "12236/12236 [==============================] - 1s 68us/step - loss: 0.2334\n", 276 | "Epoch 5/10\n", 277 | "12236/12236 [==============================] - 1s 64us/step - loss: 0.2325\n", 278 | "Epoch 6/10\n", 279 | "12236/12236 [==============================] - 1s 63us/step - loss: 0.2324\n", 280 | "Epoch 7/10\n", 281 | "12236/12236 [==============================] - 1s 66us/step - loss: 0.2315\n", 282 | "Epoch 8/10\n", 283 | "12236/12236 [==============================] - 1s 65us/step - loss: 0.2318\n", 284 | "Epoch 9/10\n", 285 | "12236/12236 [==============================] - 1s 65us/step - loss: 0.2317\n", 286 | "Epoch 10/10\n", 287 | "12236/12236 [==============================] - 1s 65us/step - loss: 0.2318\n" 288 | ] 289 | } 290 | ], 291 | "source": [ 292 | "trained_model = train_model(training_data)" 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": 10, 298 | "metadata": {}, 299 | "outputs": [ 300 | { 301 | "name": "stdout", 302 | "output_type": "stream", 303 | "text": [ 304 | "[500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 247.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 259.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 264.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 241.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 255.0, 500.0, 500.0, 500.0, 245.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0, 500.0]\n", 305 | "Average Score: 485.11\n", 306 | "choice 1:0.5007936344334275 choice 0:0.49920636556657255\n" 307 | ] 308 | } 309 | ], 310 | "source": [ 311 | "scores = []\n", 312 | "choices = []\n", 313 | "for each_game in range(100):\n", 314 | " score = 0\n", 315 | " prev_obs = []\n", 316 | " for step_index in range(goal_steps):\n", 317 | " # Uncomment below line if you want to see how our bot is playing the game.\n", 318 | " # env.render()\n", 319 | " if len(prev_obs)==0:\n", 320 | " action = random.randrange(0,2)\n", 321 | " else:\n", 322 | " action = np.argmax(trained_model.predict(prev_obs.reshape(-1, len(prev_obs)))[0])\n", 323 | " \n", 324 | " choices.append(action)\n", 325 | " new_observation, reward, done, info = env.step(action)\n", 326 | " prev_obs = new_observation\n", 327 | " score+=reward\n", 328 | " if done:\n", 329 | " break\n", 330 | "\n", 331 | " env.reset()\n", 332 | " scores.append(score)\n", 333 | "\n", 334 | "print(scores)\n", 335 | "print('Average Score:',sum(scores)/len(scores))\n", 336 | "print('choice 1:{} choice 0:{}'.format(choices.count(1)/len(choices),choices.count(0)/len(choices)))" 337 | ] 338 | } 339 | ], 340 | "metadata": { 341 | "kernelspec": { 342 | "display_name": "Python 3", 343 | "language": "python", 344 | "name": "python3" 345 | }, 346 | "language_info": { 347 | "codemirror_mode": { 348 | "name": "ipython", 349 | "version": 3 350 | }, 351 | "file_extension": ".py", 352 | "mimetype": "text/x-python", 353 | "name": "python", 354 | "nbconvert_exporter": "python", 355 | "pygments_lexer": "ipython3", 356 | "version": "3.6.4" 357 | } 358 | }, 359 | "nbformat": 4, 360 | "nbformat_minor": 2 361 | } 362 | -------------------------------------------------------------------------------- /data/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/0.png -------------------------------------------------------------------------------- /data/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/1.png -------------------------------------------------------------------------------- /data/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/2.png -------------------------------------------------------------------------------- /data/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/3.png -------------------------------------------------------------------------------- /data/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/4.png -------------------------------------------------------------------------------- /data/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/5.png -------------------------------------------------------------------------------- /data/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/6.png -------------------------------------------------------------------------------- /data/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/7.png -------------------------------------------------------------------------------- /data/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/8.png -------------------------------------------------------------------------------- /data/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/9.png -------------------------------------------------------------------------------- /data/mnist_data/train/1/1_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/mnist_data/train/1/1_1.png -------------------------------------------------------------------------------- /data/mnist_data/train/1/1_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/mnist_data/train/1/1_2.png -------------------------------------------------------------------------------- /data/mnist_data/train/1/1_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/mnist_data/train/1/1_3.png -------------------------------------------------------------------------------- /data/mnist_data/train/1/1_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/mnist_data/train/1/1_4.png -------------------------------------------------------------------------------- /data/mnist_data/train/1/1_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/mnist_data/train/1/1_5.png -------------------------------------------------------------------------------- /data/mnist_data/validation/1/1_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/mnist_data/validation/1/1_1.png -------------------------------------------------------------------------------- /data/mnist_data/validation/1/1_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/data/mnist_data/validation/1/1_2.png -------------------------------------------------------------------------------- /data/pima-indians-diabetes.data.csv: -------------------------------------------------------------------------------- 1 | 6,148,72,35,0,33.6,0.627,50,1 2 | 1,85,66,29,0,26.6,0.351,31,0 3 | 8,183,64,0,0,23.3,0.672,32,1 4 | 1,89,66,23,94,28.1,0.167,21,0 5 | 0,137,40,35,168,43.1,2.288,33,1 6 | 5,116,74,0,0,25.6,0.201,30,0 7 | 3,78,50,32,88,31.0,0.248,26,1 8 | 10,115,0,0,0,35.3,0.134,29,0 9 | 2,197,70,45,543,30.5,0.158,53,1 10 | 8,125,96,0,0,0.0,0.232,54,1 11 | 4,110,92,0,0,37.6,0.191,30,0 12 | 10,168,74,0,0,38.0,0.537,34,1 13 | 10,139,80,0,0,27.1,1.441,57,0 14 | 1,189,60,23,846,30.1,0.398,59,1 15 | 5,166,72,19,175,25.8,0.587,51,1 16 | 7,100,0,0,0,30.0,0.484,32,1 17 | 0,118,84,47,230,45.8,0.551,31,1 18 | 7,107,74,0,0,29.6,0.254,31,1 19 | 1,103,30,38,83,43.3,0.183,33,0 20 | 1,115,70,30,96,34.6,0.529,32,1 21 | 3,126,88,41,235,39.3,0.704,27,0 22 | 8,99,84,0,0,35.4,0.388,50,0 23 | 7,196,90,0,0,39.8,0.451,41,1 24 | 9,119,80,35,0,29.0,0.263,29,1 25 | 11,143,94,33,146,36.6,0.254,51,1 26 | 10,125,70,26,115,31.1,0.205,41,1 27 | 7,147,76,0,0,39.4,0.257,43,1 28 | 1,97,66,15,140,23.2,0.487,22,0 29 | 13,145,82,19,110,22.2,0.245,57,0 30 | 5,117,92,0,0,34.1,0.337,38,0 31 | 5,109,75,26,0,36.0,0.546,60,0 32 | 3,158,76,36,245,31.6,0.851,28,1 33 | 3,88,58,11,54,24.8,0.267,22,0 34 | 6,92,92,0,0,19.9,0.188,28,0 35 | 10,122,78,31,0,27.6,0.512,45,0 36 | 4,103,60,33,192,24.0,0.966,33,0 37 | 11,138,76,0,0,33.2,0.420,35,0 38 | 9,102,76,37,0,32.9,0.665,46,1 39 | 2,90,68,42,0,38.2,0.503,27,1 40 | 4,111,72,47,207,37.1,1.390,56,1 41 | 3,180,64,25,70,34.0,0.271,26,0 42 | 7,133,84,0,0,40.2,0.696,37,0 43 | 7,106,92,18,0,22.7,0.235,48,0 44 | 9,171,110,24,240,45.4,0.721,54,1 45 | 7,159,64,0,0,27.4,0.294,40,0 46 | 0,180,66,39,0,42.0,1.893,25,1 47 | 1,146,56,0,0,29.7,0.564,29,0 48 | 2,71,70,27,0,28.0,0.586,22,0 49 | 7,103,66,32,0,39.1,0.344,31,1 50 | 7,105,0,0,0,0.0,0.305,24,0 51 | 1,103,80,11,82,19.4,0.491,22,0 52 | 1,101,50,15,36,24.2,0.526,26,0 53 | 5,88,66,21,23,24.4,0.342,30,0 54 | 8,176,90,34,300,33.7,0.467,58,1 55 | 7,150,66,42,342,34.7,0.718,42,0 56 | 1,73,50,10,0,23.0,0.248,21,0 57 | 7,187,68,39,304,37.7,0.254,41,1 58 | 0,100,88,60,110,46.8,0.962,31,0 59 | 0,146,82,0,0,40.5,1.781,44,0 60 | 0,105,64,41,142,41.5,0.173,22,0 61 | 2,84,0,0,0,0.0,0.304,21,0 62 | 8,133,72,0,0,32.9,0.270,39,1 63 | 5,44,62,0,0,25.0,0.587,36,0 64 | 2,141,58,34,128,25.4,0.699,24,0 65 | 7,114,66,0,0,32.8,0.258,42,1 66 | 5,99,74,27,0,29.0,0.203,32,0 67 | 0,109,88,30,0,32.5,0.855,38,1 68 | 2,109,92,0,0,42.7,0.845,54,0 69 | 1,95,66,13,38,19.6,0.334,25,0 70 | 4,146,85,27,100,28.9,0.189,27,0 71 | 2,100,66,20,90,32.9,0.867,28,1 72 | 5,139,64,35,140,28.6,0.411,26,0 73 | 13,126,90,0,0,43.4,0.583,42,1 74 | 4,129,86,20,270,35.1,0.231,23,0 75 | 1,79,75,30,0,32.0,0.396,22,0 76 | 1,0,48,20,0,24.7,0.140,22,0 77 | 7,62,78,0,0,32.6,0.391,41,0 78 | 5,95,72,33,0,37.7,0.370,27,0 79 | 0,131,0,0,0,43.2,0.270,26,1 80 | 2,112,66,22,0,25.0,0.307,24,0 81 | 3,113,44,13,0,22.4,0.140,22,0 82 | 2,74,0,0,0,0.0,0.102,22,0 83 | 7,83,78,26,71,29.3,0.767,36,0 84 | 0,101,65,28,0,24.6,0.237,22,0 85 | 5,137,108,0,0,48.8,0.227,37,1 86 | 2,110,74,29,125,32.4,0.698,27,0 87 | 13,106,72,54,0,36.6,0.178,45,0 88 | 2,100,68,25,71,38.5,0.324,26,0 89 | 15,136,70,32,110,37.1,0.153,43,1 90 | 1,107,68,19,0,26.5,0.165,24,0 91 | 1,80,55,0,0,19.1,0.258,21,0 92 | 4,123,80,15,176,32.0,0.443,34,0 93 | 7,81,78,40,48,46.7,0.261,42,0 94 | 4,134,72,0,0,23.8,0.277,60,1 95 | 2,142,82,18,64,24.7,0.761,21,0 96 | 6,144,72,27,228,33.9,0.255,40,0 97 | 2,92,62,28,0,31.6,0.130,24,0 98 | 1,71,48,18,76,20.4,0.323,22,0 99 | 6,93,50,30,64,28.7,0.356,23,0 100 | 1,122,90,51,220,49.7,0.325,31,1 101 | 1,163,72,0,0,39.0,1.222,33,1 102 | 1,151,60,0,0,26.1,0.179,22,0 103 | 0,125,96,0,0,22.5,0.262,21,0 104 | 1,81,72,18,40,26.6,0.283,24,0 105 | 2,85,65,0,0,39.6,0.930,27,0 106 | 1,126,56,29,152,28.7,0.801,21,0 107 | 1,96,122,0,0,22.4,0.207,27,0 108 | 4,144,58,28,140,29.5,0.287,37,0 109 | 3,83,58,31,18,34.3,0.336,25,0 110 | 0,95,85,25,36,37.4,0.247,24,1 111 | 3,171,72,33,135,33.3,0.199,24,1 112 | 8,155,62,26,495,34.0,0.543,46,1 113 | 1,89,76,34,37,31.2,0.192,23,0 114 | 4,76,62,0,0,34.0,0.391,25,0 115 | 7,160,54,32,175,30.5,0.588,39,1 116 | 4,146,92,0,0,31.2,0.539,61,1 117 | 5,124,74,0,0,34.0,0.220,38,1 118 | 5,78,48,0,0,33.7,0.654,25,0 119 | 4,97,60,23,0,28.2,0.443,22,0 120 | 4,99,76,15,51,23.2,0.223,21,0 121 | 0,162,76,56,100,53.2,0.759,25,1 122 | 6,111,64,39,0,34.2,0.260,24,0 123 | 2,107,74,30,100,33.6,0.404,23,0 124 | 5,132,80,0,0,26.8,0.186,69,0 125 | 0,113,76,0,0,33.3,0.278,23,1 126 | 1,88,30,42,99,55.0,0.496,26,1 127 | 3,120,70,30,135,42.9,0.452,30,0 128 | 1,118,58,36,94,33.3,0.261,23,0 129 | 1,117,88,24,145,34.5,0.403,40,1 130 | 0,105,84,0,0,27.9,0.741,62,1 131 | 4,173,70,14,168,29.7,0.361,33,1 132 | 9,122,56,0,0,33.3,1.114,33,1 133 | 3,170,64,37,225,34.5,0.356,30,1 134 | 8,84,74,31,0,38.3,0.457,39,0 135 | 2,96,68,13,49,21.1,0.647,26,0 136 | 2,125,60,20,140,33.8,0.088,31,0 137 | 0,100,70,26,50,30.8,0.597,21,0 138 | 0,93,60,25,92,28.7,0.532,22,0 139 | 0,129,80,0,0,31.2,0.703,29,0 140 | 5,105,72,29,325,36.9,0.159,28,0 141 | 3,128,78,0,0,21.1,0.268,55,0 142 | 5,106,82,30,0,39.5,0.286,38,0 143 | 2,108,52,26,63,32.5,0.318,22,0 144 | 10,108,66,0,0,32.4,0.272,42,1 145 | 4,154,62,31,284,32.8,0.237,23,0 146 | 0,102,75,23,0,0.0,0.572,21,0 147 | 9,57,80,37,0,32.8,0.096,41,0 148 | 2,106,64,35,119,30.5,1.400,34,0 149 | 5,147,78,0,0,33.7,0.218,65,0 150 | 2,90,70,17,0,27.3,0.085,22,0 151 | 1,136,74,50,204,37.4,0.399,24,0 152 | 4,114,65,0,0,21.9,0.432,37,0 153 | 9,156,86,28,155,34.3,1.189,42,1 154 | 1,153,82,42,485,40.6,0.687,23,0 155 | 8,188,78,0,0,47.9,0.137,43,1 156 | 7,152,88,44,0,50.0,0.337,36,1 157 | 2,99,52,15,94,24.6,0.637,21,0 158 | 1,109,56,21,135,25.2,0.833,23,0 159 | 2,88,74,19,53,29.0,0.229,22,0 160 | 17,163,72,41,114,40.9,0.817,47,1 161 | 4,151,90,38,0,29.7,0.294,36,0 162 | 7,102,74,40,105,37.2,0.204,45,0 163 | 0,114,80,34,285,44.2,0.167,27,0 164 | 2,100,64,23,0,29.7,0.368,21,0 165 | 0,131,88,0,0,31.6,0.743,32,1 166 | 6,104,74,18,156,29.9,0.722,41,1 167 | 3,148,66,25,0,32.5,0.256,22,0 168 | 4,120,68,0,0,29.6,0.709,34,0 169 | 4,110,66,0,0,31.9,0.471,29,0 170 | 3,111,90,12,78,28.4,0.495,29,0 171 | 6,102,82,0,0,30.8,0.180,36,1 172 | 6,134,70,23,130,35.4,0.542,29,1 173 | 2,87,0,23,0,28.9,0.773,25,0 174 | 1,79,60,42,48,43.5,0.678,23,0 175 | 2,75,64,24,55,29.7,0.370,33,0 176 | 8,179,72,42,130,32.7,0.719,36,1 177 | 6,85,78,0,0,31.2,0.382,42,0 178 | 0,129,110,46,130,67.1,0.319,26,1 179 | 5,143,78,0,0,45.0,0.190,47,0 180 | 5,130,82,0,0,39.1,0.956,37,1 181 | 6,87,80,0,0,23.2,0.084,32,0 182 | 0,119,64,18,92,34.9,0.725,23,0 183 | 1,0,74,20,23,27.7,0.299,21,0 184 | 5,73,60,0,0,26.8,0.268,27,0 185 | 4,141,74,0,0,27.6,0.244,40,0 186 | 7,194,68,28,0,35.9,0.745,41,1 187 | 8,181,68,36,495,30.1,0.615,60,1 188 | 1,128,98,41,58,32.0,1.321,33,1 189 | 8,109,76,39,114,27.9,0.640,31,1 190 | 5,139,80,35,160,31.6,0.361,25,1 191 | 3,111,62,0,0,22.6,0.142,21,0 192 | 9,123,70,44,94,33.1,0.374,40,0 193 | 7,159,66,0,0,30.4,0.383,36,1 194 | 11,135,0,0,0,52.3,0.578,40,1 195 | 8,85,55,20,0,24.4,0.136,42,0 196 | 5,158,84,41,210,39.4,0.395,29,1 197 | 1,105,58,0,0,24.3,0.187,21,0 198 | 3,107,62,13,48,22.9,0.678,23,1 199 | 4,109,64,44,99,34.8,0.905,26,1 200 | 4,148,60,27,318,30.9,0.150,29,1 201 | 0,113,80,16,0,31.0,0.874,21,0 202 | 1,138,82,0,0,40.1,0.236,28,0 203 | 0,108,68,20,0,27.3,0.787,32,0 204 | 2,99,70,16,44,20.4,0.235,27,0 205 | 6,103,72,32,190,37.7,0.324,55,0 206 | 5,111,72,28,0,23.9,0.407,27,0 207 | 8,196,76,29,280,37.5,0.605,57,1 208 | 5,162,104,0,0,37.7,0.151,52,1 209 | 1,96,64,27,87,33.2,0.289,21,0 210 | 7,184,84,33,0,35.5,0.355,41,1 211 | 2,81,60,22,0,27.7,0.290,25,0 212 | 0,147,85,54,0,42.8,0.375,24,0 213 | 7,179,95,31,0,34.2,0.164,60,0 214 | 0,140,65,26,130,42.6,0.431,24,1 215 | 9,112,82,32,175,34.2,0.260,36,1 216 | 12,151,70,40,271,41.8,0.742,38,1 217 | 5,109,62,41,129,35.8,0.514,25,1 218 | 6,125,68,30,120,30.0,0.464,32,0 219 | 5,85,74,22,0,29.0,1.224,32,1 220 | 5,112,66,0,0,37.8,0.261,41,1 221 | 0,177,60,29,478,34.6,1.072,21,1 222 | 2,158,90,0,0,31.6,0.805,66,1 223 | 7,119,0,0,0,25.2,0.209,37,0 224 | 7,142,60,33,190,28.8,0.687,61,0 225 | 1,100,66,15,56,23.6,0.666,26,0 226 | 1,87,78,27,32,34.6,0.101,22,0 227 | 0,101,76,0,0,35.7,0.198,26,0 228 | 3,162,52,38,0,37.2,0.652,24,1 229 | 4,197,70,39,744,36.7,2.329,31,0 230 | 0,117,80,31,53,45.2,0.089,24,0 231 | 4,142,86,0,0,44.0,0.645,22,1 232 | 6,134,80,37,370,46.2,0.238,46,1 233 | 1,79,80,25,37,25.4,0.583,22,0 234 | 4,122,68,0,0,35.0,0.394,29,0 235 | 3,74,68,28,45,29.7,0.293,23,0 236 | 4,171,72,0,0,43.6,0.479,26,1 237 | 7,181,84,21,192,35.9,0.586,51,1 238 | 0,179,90,27,0,44.1,0.686,23,1 239 | 9,164,84,21,0,30.8,0.831,32,1 240 | 0,104,76,0,0,18.4,0.582,27,0 241 | 1,91,64,24,0,29.2,0.192,21,0 242 | 4,91,70,32,88,33.1,0.446,22,0 243 | 3,139,54,0,0,25.6,0.402,22,1 244 | 6,119,50,22,176,27.1,1.318,33,1 245 | 2,146,76,35,194,38.2,0.329,29,0 246 | 9,184,85,15,0,30.0,1.213,49,1 247 | 10,122,68,0,0,31.2,0.258,41,0 248 | 0,165,90,33,680,52.3,0.427,23,0 249 | 9,124,70,33,402,35.4,0.282,34,0 250 | 1,111,86,19,0,30.1,0.143,23,0 251 | 9,106,52,0,0,31.2,0.380,42,0 252 | 2,129,84,0,0,28.0,0.284,27,0 253 | 2,90,80,14,55,24.4,0.249,24,0 254 | 0,86,68,32,0,35.8,0.238,25,0 255 | 12,92,62,7,258,27.6,0.926,44,1 256 | 1,113,64,35,0,33.6,0.543,21,1 257 | 3,111,56,39,0,30.1,0.557,30,0 258 | 2,114,68,22,0,28.7,0.092,25,0 259 | 1,193,50,16,375,25.9,0.655,24,0 260 | 11,155,76,28,150,33.3,1.353,51,1 261 | 3,191,68,15,130,30.9,0.299,34,0 262 | 3,141,0,0,0,30.0,0.761,27,1 263 | 4,95,70,32,0,32.1,0.612,24,0 264 | 3,142,80,15,0,32.4,0.200,63,0 265 | 4,123,62,0,0,32.0,0.226,35,1 266 | 5,96,74,18,67,33.6,0.997,43,0 267 | 0,138,0,0,0,36.3,0.933,25,1 268 | 2,128,64,42,0,40.0,1.101,24,0 269 | 0,102,52,0,0,25.1,0.078,21,0 270 | 2,146,0,0,0,27.5,0.240,28,1 271 | 10,101,86,37,0,45.6,1.136,38,1 272 | 2,108,62,32,56,25.2,0.128,21,0 273 | 3,122,78,0,0,23.0,0.254,40,0 274 | 1,71,78,50,45,33.2,0.422,21,0 275 | 13,106,70,0,0,34.2,0.251,52,0 276 | 2,100,70,52,57,40.5,0.677,25,0 277 | 7,106,60,24,0,26.5,0.296,29,1 278 | 0,104,64,23,116,27.8,0.454,23,0 279 | 5,114,74,0,0,24.9,0.744,57,0 280 | 2,108,62,10,278,25.3,0.881,22,0 281 | 0,146,70,0,0,37.9,0.334,28,1 282 | 10,129,76,28,122,35.9,0.280,39,0 283 | 7,133,88,15,155,32.4,0.262,37,0 284 | 7,161,86,0,0,30.4,0.165,47,1 285 | 2,108,80,0,0,27.0,0.259,52,1 286 | 7,136,74,26,135,26.0,0.647,51,0 287 | 5,155,84,44,545,38.7,0.619,34,0 288 | 1,119,86,39,220,45.6,0.808,29,1 289 | 4,96,56,17,49,20.8,0.340,26,0 290 | 5,108,72,43,75,36.1,0.263,33,0 291 | 0,78,88,29,40,36.9,0.434,21,0 292 | 0,107,62,30,74,36.6,0.757,25,1 293 | 2,128,78,37,182,43.3,1.224,31,1 294 | 1,128,48,45,194,40.5,0.613,24,1 295 | 0,161,50,0,0,21.9,0.254,65,0 296 | 6,151,62,31,120,35.5,0.692,28,0 297 | 2,146,70,38,360,28.0,0.337,29,1 298 | 0,126,84,29,215,30.7,0.520,24,0 299 | 14,100,78,25,184,36.6,0.412,46,1 300 | 8,112,72,0,0,23.6,0.840,58,0 301 | 0,167,0,0,0,32.3,0.839,30,1 302 | 2,144,58,33,135,31.6,0.422,25,1 303 | 5,77,82,41,42,35.8,0.156,35,0 304 | 5,115,98,0,0,52.9,0.209,28,1 305 | 3,150,76,0,0,21.0,0.207,37,0 306 | 2,120,76,37,105,39.7,0.215,29,0 307 | 10,161,68,23,132,25.5,0.326,47,1 308 | 0,137,68,14,148,24.8,0.143,21,0 309 | 0,128,68,19,180,30.5,1.391,25,1 310 | 2,124,68,28,205,32.9,0.875,30,1 311 | 6,80,66,30,0,26.2,0.313,41,0 312 | 0,106,70,37,148,39.4,0.605,22,0 313 | 2,155,74,17,96,26.6,0.433,27,1 314 | 3,113,50,10,85,29.5,0.626,25,0 315 | 7,109,80,31,0,35.9,1.127,43,1 316 | 2,112,68,22,94,34.1,0.315,26,0 317 | 3,99,80,11,64,19.3,0.284,30,0 318 | 3,182,74,0,0,30.5,0.345,29,1 319 | 3,115,66,39,140,38.1,0.150,28,0 320 | 6,194,78,0,0,23.5,0.129,59,1 321 | 4,129,60,12,231,27.5,0.527,31,0 322 | 3,112,74,30,0,31.6,0.197,25,1 323 | 0,124,70,20,0,27.4,0.254,36,1 324 | 13,152,90,33,29,26.8,0.731,43,1 325 | 2,112,75,32,0,35.7,0.148,21,0 326 | 1,157,72,21,168,25.6,0.123,24,0 327 | 1,122,64,32,156,35.1,0.692,30,1 328 | 10,179,70,0,0,35.1,0.200,37,0 329 | 2,102,86,36,120,45.5,0.127,23,1 330 | 6,105,70,32,68,30.8,0.122,37,0 331 | 8,118,72,19,0,23.1,1.476,46,0 332 | 2,87,58,16,52,32.7,0.166,25,0 333 | 1,180,0,0,0,43.3,0.282,41,1 334 | 12,106,80,0,0,23.6,0.137,44,0 335 | 1,95,60,18,58,23.9,0.260,22,0 336 | 0,165,76,43,255,47.9,0.259,26,0 337 | 0,117,0,0,0,33.8,0.932,44,0 338 | 5,115,76,0,0,31.2,0.343,44,1 339 | 9,152,78,34,171,34.2,0.893,33,1 340 | 7,178,84,0,0,39.9,0.331,41,1 341 | 1,130,70,13,105,25.9,0.472,22,0 342 | 1,95,74,21,73,25.9,0.673,36,0 343 | 1,0,68,35,0,32.0,0.389,22,0 344 | 5,122,86,0,0,34.7,0.290,33,0 345 | 8,95,72,0,0,36.8,0.485,57,0 346 | 8,126,88,36,108,38.5,0.349,49,0 347 | 1,139,46,19,83,28.7,0.654,22,0 348 | 3,116,0,0,0,23.5,0.187,23,0 349 | 3,99,62,19,74,21.8,0.279,26,0 350 | 5,0,80,32,0,41.0,0.346,37,1 351 | 4,92,80,0,0,42.2,0.237,29,0 352 | 4,137,84,0,0,31.2,0.252,30,0 353 | 3,61,82,28,0,34.4,0.243,46,0 354 | 1,90,62,12,43,27.2,0.580,24,0 355 | 3,90,78,0,0,42.7,0.559,21,0 356 | 9,165,88,0,0,30.4,0.302,49,1 357 | 1,125,50,40,167,33.3,0.962,28,1 358 | 13,129,0,30,0,39.9,0.569,44,1 359 | 12,88,74,40,54,35.3,0.378,48,0 360 | 1,196,76,36,249,36.5,0.875,29,1 361 | 5,189,64,33,325,31.2,0.583,29,1 362 | 5,158,70,0,0,29.8,0.207,63,0 363 | 5,103,108,37,0,39.2,0.305,65,0 364 | 4,146,78,0,0,38.5,0.520,67,1 365 | 4,147,74,25,293,34.9,0.385,30,0 366 | 5,99,54,28,83,34.0,0.499,30,0 367 | 6,124,72,0,0,27.6,0.368,29,1 368 | 0,101,64,17,0,21.0,0.252,21,0 369 | 3,81,86,16,66,27.5,0.306,22,0 370 | 1,133,102,28,140,32.8,0.234,45,1 371 | 3,173,82,48,465,38.4,2.137,25,1 372 | 0,118,64,23,89,0.0,1.731,21,0 373 | 0,84,64,22,66,35.8,0.545,21,0 374 | 2,105,58,40,94,34.9,0.225,25,0 375 | 2,122,52,43,158,36.2,0.816,28,0 376 | 12,140,82,43,325,39.2,0.528,58,1 377 | 0,98,82,15,84,25.2,0.299,22,0 378 | 1,87,60,37,75,37.2,0.509,22,0 379 | 4,156,75,0,0,48.3,0.238,32,1 380 | 0,93,100,39,72,43.4,1.021,35,0 381 | 1,107,72,30,82,30.8,0.821,24,0 382 | 0,105,68,22,0,20.0,0.236,22,0 383 | 1,109,60,8,182,25.4,0.947,21,0 384 | 1,90,62,18,59,25.1,1.268,25,0 385 | 1,125,70,24,110,24.3,0.221,25,0 386 | 1,119,54,13,50,22.3,0.205,24,0 387 | 5,116,74,29,0,32.3,0.660,35,1 388 | 8,105,100,36,0,43.3,0.239,45,1 389 | 5,144,82,26,285,32.0,0.452,58,1 390 | 3,100,68,23,81,31.6,0.949,28,0 391 | 1,100,66,29,196,32.0,0.444,42,0 392 | 5,166,76,0,0,45.7,0.340,27,1 393 | 1,131,64,14,415,23.7,0.389,21,0 394 | 4,116,72,12,87,22.1,0.463,37,0 395 | 4,158,78,0,0,32.9,0.803,31,1 396 | 2,127,58,24,275,27.7,1.600,25,0 397 | 3,96,56,34,115,24.7,0.944,39,0 398 | 0,131,66,40,0,34.3,0.196,22,1 399 | 3,82,70,0,0,21.1,0.389,25,0 400 | 3,193,70,31,0,34.9,0.241,25,1 401 | 4,95,64,0,0,32.0,0.161,31,1 402 | 6,137,61,0,0,24.2,0.151,55,0 403 | 5,136,84,41,88,35.0,0.286,35,1 404 | 9,72,78,25,0,31.6,0.280,38,0 405 | 5,168,64,0,0,32.9,0.135,41,1 406 | 2,123,48,32,165,42.1,0.520,26,0 407 | 4,115,72,0,0,28.9,0.376,46,1 408 | 0,101,62,0,0,21.9,0.336,25,0 409 | 8,197,74,0,0,25.9,1.191,39,1 410 | 1,172,68,49,579,42.4,0.702,28,1 411 | 6,102,90,39,0,35.7,0.674,28,0 412 | 1,112,72,30,176,34.4,0.528,25,0 413 | 1,143,84,23,310,42.4,1.076,22,0 414 | 1,143,74,22,61,26.2,0.256,21,0 415 | 0,138,60,35,167,34.6,0.534,21,1 416 | 3,173,84,33,474,35.7,0.258,22,1 417 | 1,97,68,21,0,27.2,1.095,22,0 418 | 4,144,82,32,0,38.5,0.554,37,1 419 | 1,83,68,0,0,18.2,0.624,27,0 420 | 3,129,64,29,115,26.4,0.219,28,1 421 | 1,119,88,41,170,45.3,0.507,26,0 422 | 2,94,68,18,76,26.0,0.561,21,0 423 | 0,102,64,46,78,40.6,0.496,21,0 424 | 2,115,64,22,0,30.8,0.421,21,0 425 | 8,151,78,32,210,42.9,0.516,36,1 426 | 4,184,78,39,277,37.0,0.264,31,1 427 | 0,94,0,0,0,0.0,0.256,25,0 428 | 1,181,64,30,180,34.1,0.328,38,1 429 | 0,135,94,46,145,40.6,0.284,26,0 430 | 1,95,82,25,180,35.0,0.233,43,1 431 | 2,99,0,0,0,22.2,0.108,23,0 432 | 3,89,74,16,85,30.4,0.551,38,0 433 | 1,80,74,11,60,30.0,0.527,22,0 434 | 2,139,75,0,0,25.6,0.167,29,0 435 | 1,90,68,8,0,24.5,1.138,36,0 436 | 0,141,0,0,0,42.4,0.205,29,1 437 | 12,140,85,33,0,37.4,0.244,41,0 438 | 5,147,75,0,0,29.9,0.434,28,0 439 | 1,97,70,15,0,18.2,0.147,21,0 440 | 6,107,88,0,0,36.8,0.727,31,0 441 | 0,189,104,25,0,34.3,0.435,41,1 442 | 2,83,66,23,50,32.2,0.497,22,0 443 | 4,117,64,27,120,33.2,0.230,24,0 444 | 8,108,70,0,0,30.5,0.955,33,1 445 | 4,117,62,12,0,29.7,0.380,30,1 446 | 0,180,78,63,14,59.4,2.420,25,1 447 | 1,100,72,12,70,25.3,0.658,28,0 448 | 0,95,80,45,92,36.5,0.330,26,0 449 | 0,104,64,37,64,33.6,0.510,22,1 450 | 0,120,74,18,63,30.5,0.285,26,0 451 | 1,82,64,13,95,21.2,0.415,23,0 452 | 2,134,70,0,0,28.9,0.542,23,1 453 | 0,91,68,32,210,39.9,0.381,25,0 454 | 2,119,0,0,0,19.6,0.832,72,0 455 | 2,100,54,28,105,37.8,0.498,24,0 456 | 14,175,62,30,0,33.6,0.212,38,1 457 | 1,135,54,0,0,26.7,0.687,62,0 458 | 5,86,68,28,71,30.2,0.364,24,0 459 | 10,148,84,48,237,37.6,1.001,51,1 460 | 9,134,74,33,60,25.9,0.460,81,0 461 | 9,120,72,22,56,20.8,0.733,48,0 462 | 1,71,62,0,0,21.8,0.416,26,0 463 | 8,74,70,40,49,35.3,0.705,39,0 464 | 5,88,78,30,0,27.6,0.258,37,0 465 | 10,115,98,0,0,24.0,1.022,34,0 466 | 0,124,56,13,105,21.8,0.452,21,0 467 | 0,74,52,10,36,27.8,0.269,22,0 468 | 0,97,64,36,100,36.8,0.600,25,0 469 | 8,120,0,0,0,30.0,0.183,38,1 470 | 6,154,78,41,140,46.1,0.571,27,0 471 | 1,144,82,40,0,41.3,0.607,28,0 472 | 0,137,70,38,0,33.2,0.170,22,0 473 | 0,119,66,27,0,38.8,0.259,22,0 474 | 7,136,90,0,0,29.9,0.210,50,0 475 | 4,114,64,0,0,28.9,0.126,24,0 476 | 0,137,84,27,0,27.3,0.231,59,0 477 | 2,105,80,45,191,33.7,0.711,29,1 478 | 7,114,76,17,110,23.8,0.466,31,0 479 | 8,126,74,38,75,25.9,0.162,39,0 480 | 4,132,86,31,0,28.0,0.419,63,0 481 | 3,158,70,30,328,35.5,0.344,35,1 482 | 0,123,88,37,0,35.2,0.197,29,0 483 | 4,85,58,22,49,27.8,0.306,28,0 484 | 0,84,82,31,125,38.2,0.233,23,0 485 | 0,145,0,0,0,44.2,0.630,31,1 486 | 0,135,68,42,250,42.3,0.365,24,1 487 | 1,139,62,41,480,40.7,0.536,21,0 488 | 0,173,78,32,265,46.5,1.159,58,0 489 | 4,99,72,17,0,25.6,0.294,28,0 490 | 8,194,80,0,0,26.1,0.551,67,0 491 | 2,83,65,28,66,36.8,0.629,24,0 492 | 2,89,90,30,0,33.5,0.292,42,0 493 | 4,99,68,38,0,32.8,0.145,33,0 494 | 4,125,70,18,122,28.9,1.144,45,1 495 | 3,80,0,0,0,0.0,0.174,22,0 496 | 6,166,74,0,0,26.6,0.304,66,0 497 | 5,110,68,0,0,26.0,0.292,30,0 498 | 2,81,72,15,76,30.1,0.547,25,0 499 | 7,195,70,33,145,25.1,0.163,55,1 500 | 6,154,74,32,193,29.3,0.839,39,0 501 | 2,117,90,19,71,25.2,0.313,21,0 502 | 3,84,72,32,0,37.2,0.267,28,0 503 | 6,0,68,41,0,39.0,0.727,41,1 504 | 7,94,64,25,79,33.3,0.738,41,0 505 | 3,96,78,39,0,37.3,0.238,40,0 506 | 10,75,82,0,0,33.3,0.263,38,0 507 | 0,180,90,26,90,36.5,0.314,35,1 508 | 1,130,60,23,170,28.6,0.692,21,0 509 | 2,84,50,23,76,30.4,0.968,21,0 510 | 8,120,78,0,0,25.0,0.409,64,0 511 | 12,84,72,31,0,29.7,0.297,46,1 512 | 0,139,62,17,210,22.1,0.207,21,0 513 | 9,91,68,0,0,24.2,0.200,58,0 514 | 2,91,62,0,0,27.3,0.525,22,0 515 | 3,99,54,19,86,25.6,0.154,24,0 516 | 3,163,70,18,105,31.6,0.268,28,1 517 | 9,145,88,34,165,30.3,0.771,53,1 518 | 7,125,86,0,0,37.6,0.304,51,0 519 | 13,76,60,0,0,32.8,0.180,41,0 520 | 6,129,90,7,326,19.6,0.582,60,0 521 | 2,68,70,32,66,25.0,0.187,25,0 522 | 3,124,80,33,130,33.2,0.305,26,0 523 | 6,114,0,0,0,0.0,0.189,26,0 524 | 9,130,70,0,0,34.2,0.652,45,1 525 | 3,125,58,0,0,31.6,0.151,24,0 526 | 3,87,60,18,0,21.8,0.444,21,0 527 | 1,97,64,19,82,18.2,0.299,21,0 528 | 3,116,74,15,105,26.3,0.107,24,0 529 | 0,117,66,31,188,30.8,0.493,22,0 530 | 0,111,65,0,0,24.6,0.660,31,0 531 | 2,122,60,18,106,29.8,0.717,22,0 532 | 0,107,76,0,0,45.3,0.686,24,0 533 | 1,86,66,52,65,41.3,0.917,29,0 534 | 6,91,0,0,0,29.8,0.501,31,0 535 | 1,77,56,30,56,33.3,1.251,24,0 536 | 4,132,0,0,0,32.9,0.302,23,1 537 | 0,105,90,0,0,29.6,0.197,46,0 538 | 0,57,60,0,0,21.7,0.735,67,0 539 | 0,127,80,37,210,36.3,0.804,23,0 540 | 3,129,92,49,155,36.4,0.968,32,1 541 | 8,100,74,40,215,39.4,0.661,43,1 542 | 3,128,72,25,190,32.4,0.549,27,1 543 | 10,90,85,32,0,34.9,0.825,56,1 544 | 4,84,90,23,56,39.5,0.159,25,0 545 | 1,88,78,29,76,32.0,0.365,29,0 546 | 8,186,90,35,225,34.5,0.423,37,1 547 | 5,187,76,27,207,43.6,1.034,53,1 548 | 4,131,68,21,166,33.1,0.160,28,0 549 | 1,164,82,43,67,32.8,0.341,50,0 550 | 4,189,110,31,0,28.5,0.680,37,0 551 | 1,116,70,28,0,27.4,0.204,21,0 552 | 3,84,68,30,106,31.9,0.591,25,0 553 | 6,114,88,0,0,27.8,0.247,66,0 554 | 1,88,62,24,44,29.9,0.422,23,0 555 | 1,84,64,23,115,36.9,0.471,28,0 556 | 7,124,70,33,215,25.5,0.161,37,0 557 | 1,97,70,40,0,38.1,0.218,30,0 558 | 8,110,76,0,0,27.8,0.237,58,0 559 | 11,103,68,40,0,46.2,0.126,42,0 560 | 11,85,74,0,0,30.1,0.300,35,0 561 | 6,125,76,0,0,33.8,0.121,54,1 562 | 0,198,66,32,274,41.3,0.502,28,1 563 | 1,87,68,34,77,37.6,0.401,24,0 564 | 6,99,60,19,54,26.9,0.497,32,0 565 | 0,91,80,0,0,32.4,0.601,27,0 566 | 2,95,54,14,88,26.1,0.748,22,0 567 | 1,99,72,30,18,38.6,0.412,21,0 568 | 6,92,62,32,126,32.0,0.085,46,0 569 | 4,154,72,29,126,31.3,0.338,37,0 570 | 0,121,66,30,165,34.3,0.203,33,1 571 | 3,78,70,0,0,32.5,0.270,39,0 572 | 2,130,96,0,0,22.6,0.268,21,0 573 | 3,111,58,31,44,29.5,0.430,22,0 574 | 2,98,60,17,120,34.7,0.198,22,0 575 | 1,143,86,30,330,30.1,0.892,23,0 576 | 1,119,44,47,63,35.5,0.280,25,0 577 | 6,108,44,20,130,24.0,0.813,35,0 578 | 2,118,80,0,0,42.9,0.693,21,1 579 | 10,133,68,0,0,27.0,0.245,36,0 580 | 2,197,70,99,0,34.7,0.575,62,1 581 | 0,151,90,46,0,42.1,0.371,21,1 582 | 6,109,60,27,0,25.0,0.206,27,0 583 | 12,121,78,17,0,26.5,0.259,62,0 584 | 8,100,76,0,0,38.7,0.190,42,0 585 | 8,124,76,24,600,28.7,0.687,52,1 586 | 1,93,56,11,0,22.5,0.417,22,0 587 | 8,143,66,0,0,34.9,0.129,41,1 588 | 6,103,66,0,0,24.3,0.249,29,0 589 | 3,176,86,27,156,33.3,1.154,52,1 590 | 0,73,0,0,0,21.1,0.342,25,0 591 | 11,111,84,40,0,46.8,0.925,45,1 592 | 2,112,78,50,140,39.4,0.175,24,0 593 | 3,132,80,0,0,34.4,0.402,44,1 594 | 2,82,52,22,115,28.5,1.699,25,0 595 | 6,123,72,45,230,33.6,0.733,34,0 596 | 0,188,82,14,185,32.0,0.682,22,1 597 | 0,67,76,0,0,45.3,0.194,46,0 598 | 1,89,24,19,25,27.8,0.559,21,0 599 | 1,173,74,0,0,36.8,0.088,38,1 600 | 1,109,38,18,120,23.1,0.407,26,0 601 | 1,108,88,19,0,27.1,0.400,24,0 602 | 6,96,0,0,0,23.7,0.190,28,0 603 | 1,124,74,36,0,27.8,0.100,30,0 604 | 7,150,78,29,126,35.2,0.692,54,1 605 | 4,183,0,0,0,28.4,0.212,36,1 606 | 1,124,60,32,0,35.8,0.514,21,0 607 | 1,181,78,42,293,40.0,1.258,22,1 608 | 1,92,62,25,41,19.5,0.482,25,0 609 | 0,152,82,39,272,41.5,0.270,27,0 610 | 1,111,62,13,182,24.0,0.138,23,0 611 | 3,106,54,21,158,30.9,0.292,24,0 612 | 3,174,58,22,194,32.9,0.593,36,1 613 | 7,168,88,42,321,38.2,0.787,40,1 614 | 6,105,80,28,0,32.5,0.878,26,0 615 | 11,138,74,26,144,36.1,0.557,50,1 616 | 3,106,72,0,0,25.8,0.207,27,0 617 | 6,117,96,0,0,28.7,0.157,30,0 618 | 2,68,62,13,15,20.1,0.257,23,0 619 | 9,112,82,24,0,28.2,1.282,50,1 620 | 0,119,0,0,0,32.4,0.141,24,1 621 | 2,112,86,42,160,38.4,0.246,28,0 622 | 2,92,76,20,0,24.2,1.698,28,0 623 | 6,183,94,0,0,40.8,1.461,45,0 624 | 0,94,70,27,115,43.5,0.347,21,0 625 | 2,108,64,0,0,30.8,0.158,21,0 626 | 4,90,88,47,54,37.7,0.362,29,0 627 | 0,125,68,0,0,24.7,0.206,21,0 628 | 0,132,78,0,0,32.4,0.393,21,0 629 | 5,128,80,0,0,34.6,0.144,45,0 630 | 4,94,65,22,0,24.7,0.148,21,0 631 | 7,114,64,0,0,27.4,0.732,34,1 632 | 0,102,78,40,90,34.5,0.238,24,0 633 | 2,111,60,0,0,26.2,0.343,23,0 634 | 1,128,82,17,183,27.5,0.115,22,0 635 | 10,92,62,0,0,25.9,0.167,31,0 636 | 13,104,72,0,0,31.2,0.465,38,1 637 | 5,104,74,0,0,28.8,0.153,48,0 638 | 2,94,76,18,66,31.6,0.649,23,0 639 | 7,97,76,32,91,40.9,0.871,32,1 640 | 1,100,74,12,46,19.5,0.149,28,0 641 | 0,102,86,17,105,29.3,0.695,27,0 642 | 4,128,70,0,0,34.3,0.303,24,0 643 | 6,147,80,0,0,29.5,0.178,50,1 644 | 4,90,0,0,0,28.0,0.610,31,0 645 | 3,103,72,30,152,27.6,0.730,27,0 646 | 2,157,74,35,440,39.4,0.134,30,0 647 | 1,167,74,17,144,23.4,0.447,33,1 648 | 0,179,50,36,159,37.8,0.455,22,1 649 | 11,136,84,35,130,28.3,0.260,42,1 650 | 0,107,60,25,0,26.4,0.133,23,0 651 | 1,91,54,25,100,25.2,0.234,23,0 652 | 1,117,60,23,106,33.8,0.466,27,0 653 | 5,123,74,40,77,34.1,0.269,28,0 654 | 2,120,54,0,0,26.8,0.455,27,0 655 | 1,106,70,28,135,34.2,0.142,22,0 656 | 2,155,52,27,540,38.7,0.240,25,1 657 | 2,101,58,35,90,21.8,0.155,22,0 658 | 1,120,80,48,200,38.9,1.162,41,0 659 | 11,127,106,0,0,39.0,0.190,51,0 660 | 3,80,82,31,70,34.2,1.292,27,1 661 | 10,162,84,0,0,27.7,0.182,54,0 662 | 1,199,76,43,0,42.9,1.394,22,1 663 | 8,167,106,46,231,37.6,0.165,43,1 664 | 9,145,80,46,130,37.9,0.637,40,1 665 | 6,115,60,39,0,33.7,0.245,40,1 666 | 1,112,80,45,132,34.8,0.217,24,0 667 | 4,145,82,18,0,32.5,0.235,70,1 668 | 10,111,70,27,0,27.5,0.141,40,1 669 | 6,98,58,33,190,34.0,0.430,43,0 670 | 9,154,78,30,100,30.9,0.164,45,0 671 | 6,165,68,26,168,33.6,0.631,49,0 672 | 1,99,58,10,0,25.4,0.551,21,0 673 | 10,68,106,23,49,35.5,0.285,47,0 674 | 3,123,100,35,240,57.3,0.880,22,0 675 | 8,91,82,0,0,35.6,0.587,68,0 676 | 6,195,70,0,0,30.9,0.328,31,1 677 | 9,156,86,0,0,24.8,0.230,53,1 678 | 0,93,60,0,0,35.3,0.263,25,0 679 | 3,121,52,0,0,36.0,0.127,25,1 680 | 2,101,58,17,265,24.2,0.614,23,0 681 | 2,56,56,28,45,24.2,0.332,22,0 682 | 0,162,76,36,0,49.6,0.364,26,1 683 | 0,95,64,39,105,44.6,0.366,22,0 684 | 4,125,80,0,0,32.3,0.536,27,1 685 | 5,136,82,0,0,0.0,0.640,69,0 686 | 2,129,74,26,205,33.2,0.591,25,0 687 | 3,130,64,0,0,23.1,0.314,22,0 688 | 1,107,50,19,0,28.3,0.181,29,0 689 | 1,140,74,26,180,24.1,0.828,23,0 690 | 1,144,82,46,180,46.1,0.335,46,1 691 | 8,107,80,0,0,24.6,0.856,34,0 692 | 13,158,114,0,0,42.3,0.257,44,1 693 | 2,121,70,32,95,39.1,0.886,23,0 694 | 7,129,68,49,125,38.5,0.439,43,1 695 | 2,90,60,0,0,23.5,0.191,25,0 696 | 7,142,90,24,480,30.4,0.128,43,1 697 | 3,169,74,19,125,29.9,0.268,31,1 698 | 0,99,0,0,0,25.0,0.253,22,0 699 | 4,127,88,11,155,34.5,0.598,28,0 700 | 4,118,70,0,0,44.5,0.904,26,0 701 | 2,122,76,27,200,35.9,0.483,26,0 702 | 6,125,78,31,0,27.6,0.565,49,1 703 | 1,168,88,29,0,35.0,0.905,52,1 704 | 2,129,0,0,0,38.5,0.304,41,0 705 | 4,110,76,20,100,28.4,0.118,27,0 706 | 6,80,80,36,0,39.8,0.177,28,0 707 | 10,115,0,0,0,0.0,0.261,30,1 708 | 2,127,46,21,335,34.4,0.176,22,0 709 | 9,164,78,0,0,32.8,0.148,45,1 710 | 2,93,64,32,160,38.0,0.674,23,1 711 | 3,158,64,13,387,31.2,0.295,24,0 712 | 5,126,78,27,22,29.6,0.439,40,0 713 | 10,129,62,36,0,41.2,0.441,38,1 714 | 0,134,58,20,291,26.4,0.352,21,0 715 | 3,102,74,0,0,29.5,0.121,32,0 716 | 7,187,50,33,392,33.9,0.826,34,1 717 | 3,173,78,39,185,33.8,0.970,31,1 718 | 10,94,72,18,0,23.1,0.595,56,0 719 | 1,108,60,46,178,35.5,0.415,24,0 720 | 5,97,76,27,0,35.6,0.378,52,1 721 | 4,83,86,19,0,29.3,0.317,34,0 722 | 1,114,66,36,200,38.1,0.289,21,0 723 | 1,149,68,29,127,29.3,0.349,42,1 724 | 5,117,86,30,105,39.1,0.251,42,0 725 | 1,111,94,0,0,32.8,0.265,45,0 726 | 4,112,78,40,0,39.4,0.236,38,0 727 | 1,116,78,29,180,36.1,0.496,25,0 728 | 0,141,84,26,0,32.4,0.433,22,0 729 | 2,175,88,0,0,22.9,0.326,22,0 730 | 2,92,52,0,0,30.1,0.141,22,0 731 | 3,130,78,23,79,28.4,0.323,34,1 732 | 8,120,86,0,0,28.4,0.259,22,1 733 | 2,174,88,37,120,44.5,0.646,24,1 734 | 2,106,56,27,165,29.0,0.426,22,0 735 | 2,105,75,0,0,23.3,0.560,53,0 736 | 4,95,60,32,0,35.4,0.284,28,0 737 | 0,126,86,27,120,27.4,0.515,21,0 738 | 8,65,72,23,0,32.0,0.600,42,0 739 | 2,99,60,17,160,36.6,0.453,21,0 740 | 1,102,74,0,0,39.5,0.293,42,1 741 | 11,120,80,37,150,42.3,0.785,48,1 742 | 3,102,44,20,94,30.8,0.400,26,0 743 | 1,109,58,18,116,28.5,0.219,22,0 744 | 9,140,94,0,0,32.7,0.734,45,1 745 | 13,153,88,37,140,40.6,1.174,39,0 746 | 12,100,84,33,105,30.0,0.488,46,0 747 | 1,147,94,41,0,49.3,0.358,27,1 748 | 1,81,74,41,57,46.3,1.096,32,0 749 | 3,187,70,22,200,36.4,0.408,36,1 750 | 6,162,62,0,0,24.3,0.178,50,1 751 | 4,136,70,0,0,31.2,1.182,22,1 752 | 1,121,78,39,74,39.0,0.261,28,0 753 | 3,108,62,24,0,26.0,0.223,25,0 754 | 0,181,88,44,510,43.3,0.222,26,1 755 | 8,154,78,32,0,32.4,0.443,45,1 756 | 1,128,88,39,110,36.5,1.057,37,1 757 | 7,137,90,41,0,32.0,0.391,39,0 758 | 0,123,72,0,0,36.3,0.258,52,1 759 | 1,106,76,0,0,37.5,0.197,26,0 760 | 6,190,92,0,0,35.5,0.278,66,1 761 | 2,88,58,26,16,28.4,0.766,22,0 762 | 9,170,74,31,0,44.0,0.403,43,1 763 | 9,89,62,0,0,22.5,0.142,33,0 764 | 10,101,76,48,180,32.9,0.171,63,0 765 | 2,122,70,27,0,36.8,0.340,27,0 766 | 5,121,72,23,112,26.2,0.245,30,0 767 | 1,126,60,0,0,30.1,0.349,47,1 768 | 1,93,70,31,0,30.4,0.315,23,0 -------------------------------------------------------------------------------- /diabetiesPredictions.py: -------------------------------------------------------------------------------- 1 | from keras.models import Sequential 2 | from keras.layers import Dense 3 | import numpy 4 | 5 | # fix random seed for reproducibility 6 | numpy.random.seed(7) 7 | 8 | # load pima indians dataset 9 | dataset = numpy.loadtxt("data/pima-indians-diabetes.data.csv", delimiter=",") 10 | # split into input (X) and output (Y) variables 11 | datasetX = dataset[:,0:8] 12 | datasetY = dataset[:,8] 13 | 14 | # create model 15 | model = Sequential() 16 | model.add(Dense(12, input_dim=8, activation='relu')) 17 | model.add(Dense(8, activation='relu')) 18 | model.add(Dense(1, activation='sigmoid')) 19 | 20 | # Compile model 21 | model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) 22 | 23 | # Callback for tensorboard 24 | import keras 25 | tbCallBack = keras.callbacks.TensorBoard(log_dir='/tmp/keras_logs', write_graph=True) 26 | 27 | # Fit the model 28 | model.fit(datasetX, datasetY, epochs=150, batch_size=10, verbose=1, validation_split=0.3, callbacks=[tbCallBack]) 29 | 30 | # evaluate the model 31 | scores = model.evaluate(datasetX, datasetY) 32 | print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100)) -------------------------------------------------------------------------------- /gotCharactersDeathPredictions.py: -------------------------------------------------------------------------------- 1 | # load GOT characters death dataset 2 | import pandas as pds 3 | dataframeX = pds.read_csv('data/character-predictions.csv', usecols= [7, 16, 17, 18, 19, 20, 25, 26, 28, 29, 30, 31]) 4 | dataframeY = pds.read_csv('data/character-predictions.csv', usecols=[32]) 5 | 6 | # Splitting the dataset into the Training set and Test set 7 | from sklearn.model_selection import train_test_split 8 | X_train, X_test, Y_train, Y_test = train_test_split(dataframeX.values, dataframeY.values, test_size = 0.2) 9 | 10 | # Feature Scaling 11 | from sklearn.preprocessing import StandardScaler 12 | sc = StandardScaler() 13 | X_train = sc.fit_transform(X_train) 14 | X_test = sc.transform(X_test) 15 | 16 | # fix random seed for reproducibility 17 | import numpy as np 18 | seed = 7 19 | np.random.seed(seed) 20 | 21 | # create model 22 | from keras.models import Sequential 23 | from keras.layers import Dense 24 | model = Sequential() 25 | model.add(Dense(15, input_dim=12, activation='relu')) 26 | model.add(Dense(15, activation='relu')) 27 | model.add(Dense(15, activation='relu')) 28 | model.add(Dense(1, activation='sigmoid')) 29 | model.summary() 30 | 31 | # Compile model 32 | model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) 33 | 34 | # log for tensorboard graph purpose 35 | import keras 36 | tbCallBack = keras.callbacks.TensorBoard(log_dir='/tmp/keras_logs', write_graph=True) 37 | 38 | # Fit the model 39 | model.fit(X_train, Y_train, epochs=100, batch_size=50, verbose=1, callbacks=[tbCallBack]) 40 | 41 | # Save the model 42 | model.save('models/gotCharactersDeathPredictions.h5') 43 | 44 | # Predicting the Test set results 45 | Y_pred = model.predict(X_test) 46 | Y_pred = (Y_pred > 0.5) 47 | 48 | # Creating the Confusion Matrix 49 | from sklearn.metrics import confusion_matrix,accuracy_score 50 | cm = confusion_matrix(Y_test, Y_pred) 51 | print("\nConfusion Matrix:") 52 | print(cm) 53 | acs = accuracy_score(Y_test, Y_pred) 54 | print("\nAccuracy Score: %.2f%%" % (acs * 100)) 55 | -------------------------------------------------------------------------------- /gotCharactersDeathPredictionsAPI.py: -------------------------------------------------------------------------------- 1 | import flask 2 | import numpy as np 3 | import tensorflow as tf 4 | from keras.models import load_model 5 | 6 | # initialize our Flask application and the Keras model 7 | app = flask.Flask(__name__) 8 | def init(): 9 | global model,graph 10 | # load the pre-trained Keras model 11 | model = load_model('models/gotCharactersDeathPredictions.h5') 12 | graph = tf.get_default_graph() 13 | 14 | # API for prediction 15 | @app.route("/predict", methods=["GET"]) 16 | def predict(): 17 | nameOfTheCharacter = flask.request.args.get('name') 18 | parameters = getParameters() 19 | inputFeature = np.asarray(parameters).reshape(1, 12) 20 | with graph.as_default(): 21 | raw_prediction = model.predict(inputFeature)[0][0] 22 | if raw_prediction > 0.5: 23 | prediction = 'Alive' 24 | else: 25 | prediction = 'Dead' 26 | return sendResponse({nameOfTheCharacter: prediction}) 27 | 28 | # Getting Parameters 29 | def getParameters(): 30 | parameters = [] 31 | parameters.append(flask.request.args.get('male')) 32 | parameters.append(flask.request.args.get('book1')) 33 | parameters.append(flask.request.args.get('book2')) 34 | parameters.append(flask.request.args.get('book3')) 35 | parameters.append(flask.request.args.get('book4')) 36 | parameters.append(flask.request.args.get('book5')) 37 | parameters.append(flask.request.args.get('isMarried')) 38 | parameters.append(flask.request.args.get('isNoble')) 39 | parameters.append(flask.request.args.get('numDeadRelations')) 40 | parameters.append(flask.request.args.get('boolDeadRelations')) 41 | parameters.append(flask.request.args.get('isPopular')) 42 | parameters.append(flask.request.args.get('popularity')) 43 | return parameters 44 | 45 | # Cross origin support 46 | def sendResponse(responseObj): 47 | response = flask.jsonify(responseObj) 48 | response.headers.add('Access-Control-Allow-Origin', '*') 49 | response.headers.add('Access-Control-Allow-Methods', 'GET') 50 | response.headers.add('Access-Control-Allow-Headers', 'accept,content-type,Origin,X-Requested-With,Content-Type,access_token,Accept,Authorization,source') 51 | response.headers.add('Access-Control-Allow-Credentials', True) 52 | return response 53 | 54 | # if this is the main thread of execution first load the model and then start the server 55 | if __name__ == "__main__": 56 | print(("* Loading Keras model and Flask starting server..." 57 | "please wait until server has fully started")) 58 | init() 59 | app.run(threaded=True) -------------------------------------------------------------------------------- /mnist-flask/README.md: -------------------------------------------------------------------------------- 1 | To run this you need Keras, TensorFlow, Python. 2 | 3 | Please follow [this link](https://medium.com/@ashok.tankala/building-digit-prediction-web-application-using-tensorflow-with-keras-and-flask-19f8bbdaec0b) to know more about this code/project. 4 | 5 | Please follow [this link](https://medium.com/@ashok.tankala/build-your-first-deep-learning-neural-network-model-using-keras-tensorflow-in-python-a3e76a6b3ccb) to build your model. -------------------------------------------------------------------------------- /mnist-flask/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, render_template, request 2 | from PIL import Image 3 | import numpy as np 4 | from keras.models import load_model 5 | import tensorflow as tf 6 | 7 | app = Flask(__name__, template_folder='templates') 8 | 9 | def init(): 10 | global model,graph 11 | # load the pre-trained Keras model 12 | model = load_model('model/mnistCNN.h5') 13 | graph = tf.get_default_graph() 14 | 15 | @app.route('/') 16 | def upload_file(): 17 | return render_template('index.html') 18 | 19 | @app.route('/uploader', methods = ['POST']) 20 | def upload_image_file(): 21 | if request.method == 'POST': 22 | img = Image.open(request.files['file'].stream).convert("L") 23 | img = img.resize((28,28)) 24 | im2arr = np.array(img) 25 | im2arr = im2arr.reshape(1,28,28,1) 26 | with graph.as_default(): 27 | y_pred = model.predict_classes(im2arr) 28 | 29 | return 'Predicted Number: ' + str(y_pred[0]) 30 | 31 | if __name__ == '__main__': 32 | print(("* Loading Keras model and Flask starting server..." 33 | "please wait until server has fully started")) 34 | init() 35 | app.run(debug = True) -------------------------------------------------------------------------------- /mnist-flask/model/mnistCNN.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/mnist-flask/model/mnistCNN.h5 -------------------------------------------------------------------------------- /mnist-flask/requirements.txt: -------------------------------------------------------------------------------- 1 | tensorflow==1.13.1 2 | numpy==1.16.2 3 | Flask==1.0.3 4 | Keras==2.2.4 5 | Pillow==6.2.0 6 | -------------------------------------------------------------------------------- /mnist-flask/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
5 | 6 | 7 |
8 | 9 | -------------------------------------------------------------------------------- /mnistCNN.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from keras.datasets import mnist 3 | from keras.models import Sequential 4 | from keras.layers import Dense 5 | from keras.layers import Dropout 6 | from keras.layers import Flatten 7 | from keras.layers.convolutional import Conv2D 8 | from keras.layers.convolutional import MaxPooling2D 9 | from keras.optimizers import Adam 10 | from keras.utils import np_utils 11 | 12 | # fix random seed for reproducibility 13 | seed = 7 14 | numpy.random.seed(seed) 15 | 16 | # load data 17 | (X_train, y_train), (X_test, y_test) = mnist.load_data() 18 | 19 | # Reshaping to format which CNN expects (batch, height, width, channels) 20 | X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 1).astype('float32') 21 | X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2], 1).astype('float32') 22 | 23 | # normalize inputs from 0-255 to 0-1 24 | X_train/=255 25 | X_test/=255 26 | 27 | # one hot encode 28 | number_of_classes = 10 29 | y_train = np_utils.to_categorical(y_train, number_of_classes) 30 | y_test = np_utils.to_categorical(y_test, number_of_classes) 31 | 32 | # create model 33 | model = Sequential() 34 | model.add(Conv2D(32, (5, 5), input_shape=(X_train.shape[1], X_train.shape[2], 1), activation='relu')) 35 | model.add(MaxPooling2D(pool_size=(2, 2))) 36 | model.add(Conv2D(32, (3, 3), activation='relu')) 37 | model.add(MaxPooling2D(pool_size=(2, 2))) 38 | model.add(Dropout(0.2)) 39 | model.add(Flatten()) 40 | model.add(Dense(128, activation='relu')) 41 | model.add(Dense(number_of_classes, activation='softmax')) 42 | 43 | # Compile model 44 | model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy']) 45 | 46 | # Fit the model 47 | model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200) 48 | 49 | # Save the model 50 | model.save('models/mnistCNN.h5') 51 | 52 | # Final evaluation of the model 53 | metrics = model.evaluate(X_test, y_test, verbose=0) 54 | print("Metrics(Test loss & Test Accuracy): ") 55 | print(metrics) -------------------------------------------------------------------------------- /mnistCNNModelPredictions.py: -------------------------------------------------------------------------------- 1 | # Importing the Keras libraries and packages 2 | from keras.models import load_model 3 | model = load_model('models/mnistCNN.h5') 4 | 5 | from PIL import Image 6 | import numpy as np 7 | 8 | for index in range(10): 9 | img = Image.open('data/' + str(index) + '.png').convert("L") 10 | img = img.resize((28,28)) 11 | im2arr = np.array(img) 12 | im2arr = im2arr.reshape(1,28,28,1) 13 | # Predicting the Test set results 14 | y_pred = model.predict(im2arr) 15 | print(y_pred) 16 | -------------------------------------------------------------------------------- /mnist_with_own_images.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "/Users/ashoktankala/tensorflow/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", 13 | " from ._conv import register_converters as _register_converters\n", 14 | "Using TensorFlow backend.\n" 15 | ] 16 | } 17 | ], 18 | "source": [ 19 | "from keras.datasets import mnist\n", 20 | "from keras.models import Sequential\n", 21 | "from keras.layers import Dense\n", 22 | "from keras.layers import Dropout\n", 23 | "from keras.layers import Flatten\n", 24 | "from keras.layers.convolutional import Conv2D\n", 25 | "from keras.layers.convolutional import MaxPooling2D\n", 26 | "from keras.optimizers import Adam\n", 27 | "from keras.utils import np_utils\n", 28 | "from PIL import Image\n", 29 | "import numpy as np\n", 30 | "import os" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 2, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# fix random seed for reproducibility\n", 40 | "seed = 7\n", 41 | "np.random.seed(seed)" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 3, 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | "# load data\n", 51 | "(X_train, y_train), (X_test, y_test) = mnist.load_data()" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 4, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "# Reshaping to format which CNN expects (batch, height, width, channels)\n", 61 | "X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], X_train.shape[2], 1).astype('float32')\n", 62 | "X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2], 1).astype('float32')" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": 5, 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [ 71 | "# To load images to features and labels\n", 72 | "def load_images_to_data(image_label, image_directory, features_data, label_data):\n", 73 | " list_of_files = os.listdir(image_directory)\n", 74 | " for file in list_of_files:\n", 75 | " image_file_name = os.path.join(image_directory, file)\n", 76 | " if \".png\" in image_file_name:\n", 77 | " img = Image.open(image_file_name).convert(\"L\")\n", 78 | " img = np.resize(img, (28,28,1))\n", 79 | " im2arr = np.array(img)\n", 80 | " im2arr = im2arr.reshape(1,28,28,1)\n", 81 | " features_data = np.append(features_data, im2arr, axis=0)\n", 82 | " label_data = np.append(label_data, [image_label], axis=0)\n", 83 | " return features_data, label_data" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": 6, 89 | "metadata": {}, 90 | "outputs": [], 91 | "source": [ 92 | "# Load your own images to training and test data\n", 93 | "X_train, y_train = load_images_to_data('1', 'data/mnist_data/train/1', X_train, y_train)\n", 94 | "X_test, y_test = load_images_to_data('1', 'data/mnist_data/validation/1', X_test, y_test)" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": 7, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "# normalize inputs from 0-255 to 0-1\n", 104 | "X_train/=255\n", 105 | "X_test/=255" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 8, 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "# one hot encode\n", 115 | "number_of_classes = 10\n", 116 | "y_train = np_utils.to_categorical(y_train, number_of_classes)\n", 117 | "y_test = np_utils.to_categorical(y_test, number_of_classes)" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": 9, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "# create model\n", 127 | "model = Sequential()\n", 128 | "model.add(Conv2D(32, (5, 5), input_shape=(X_train.shape[1], X_train.shape[2], 1), activation='relu'))\n", 129 | "model.add(MaxPooling2D(pool_size=(2, 2)))\n", 130 | "model.add(Conv2D(32, (3, 3), activation='relu'))\n", 131 | "model.add(MaxPooling2D(pool_size=(2, 2)))\n", 132 | "model.add(Dropout(0.5))\n", 133 | "model.add(Flatten())\n", 134 | "model.add(Dense(128, activation='relu'))\n", 135 | "model.add(Dropout(0.5))\n", 136 | "model.add(Dense(number_of_classes, activation='softmax'))" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": 10, 142 | "metadata": {}, 143 | "outputs": [], 144 | "source": [ 145 | "# Compile model\n", 146 | "model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": 11, 152 | "metadata": {}, 153 | "outputs": [ 154 | { 155 | "name": "stdout", 156 | "output_type": "stream", 157 | "text": [ 158 | "Train on 60005 samples, validate on 10002 samples\n", 159 | "Epoch 1/7\n", 160 | "60005/60005 [==============================] - 43s 715us/step - loss: 0.5108 - acc: 0.8353 - val_loss: 0.0791 - val_acc: 0.9747\n", 161 | "Epoch 2/7\n", 162 | "60005/60005 [==============================] - 41s 685us/step - loss: 0.1575 - acc: 0.9529 - val_loss: 0.0510 - val_acc: 0.9836\n", 163 | "Epoch 3/7\n", 164 | "60005/60005 [==============================] - 41s 686us/step - loss: 0.1213 - acc: 0.9641 - val_loss: 0.0429 - val_acc: 0.9870\n", 165 | "Epoch 4/7\n", 166 | "60005/60005 [==============================] - 41s 687us/step - loss: 0.1002 - acc: 0.9702 - val_loss: 0.0376 - val_acc: 0.9883\n", 167 | "Epoch 5/7\n", 168 | "60005/60005 [==============================] - 41s 683us/step - loss: 0.0888 - acc: 0.9733 - val_loss: 0.0309 - val_acc: 0.9895\n", 169 | "Epoch 6/7\n", 170 | "60005/60005 [==============================] - 42s 693us/step - loss: 0.0795 - acc: 0.9761 - val_loss: 0.0290 - val_acc: 0.9904\n", 171 | "Epoch 7/7\n", 172 | "60005/60005 [==============================] - 41s 689us/step - loss: 0.0747 - acc: 0.9780 - val_loss: 0.0270 - val_acc: 0.9917\n" 173 | ] 174 | }, 175 | { 176 | "data": { 177 | "text/plain": [ 178 | "" 179 | ] 180 | }, 181 | "execution_count": 11, 182 | "metadata": {}, 183 | "output_type": "execute_result" 184 | } 185 | ], 186 | "source": [ 187 | "# Fit the model\n", 188 | "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=7, batch_size=200)" 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "execution_count": 12, 194 | "metadata": {}, 195 | "outputs": [], 196 | "source": [ 197 | "# Save the model\n", 198 | "# model.save('models/mnistCNN.h5')" 199 | ] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": 13, 204 | "metadata": {}, 205 | "outputs": [ 206 | { 207 | "name": "stdout", 208 | "output_type": "stream", 209 | "text": [ 210 | "Metrics(Test loss & Test Accuracy): \n", 211 | "[0.02703550534894785, 0.9917016596680663]\n" 212 | ] 213 | } 214 | ], 215 | "source": [ 216 | "# Final evaluation of the model\n", 217 | "metrics = model.evaluate(X_test, y_test, verbose=0)\n", 218 | "print(\"Metrics(Test loss & Test Accuracy): \")\n", 219 | "print(metrics)" 220 | ] 221 | }, 222 | { 223 | "cell_type": "code", 224 | "execution_count": 16, 225 | "metadata": {}, 226 | "outputs": [], 227 | "source": [ 228 | "img = Image.open('data/mnist_data/validation/1/1_2.png').convert(\"L\")\n", 229 | "img = np.resize(img, (28,28,1))\n", 230 | "im2arr = np.array(img)\n", 231 | "im2arr = im2arr.reshape(1,28,28,1)" 232 | ] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "execution_count": 17, 237 | "metadata": {}, 238 | "outputs": [ 239 | { 240 | "name": "stdout", 241 | "output_type": "stream", 242 | "text": [ 243 | "[1]\n" 244 | ] 245 | } 246 | ], 247 | "source": [ 248 | "y_pred = model.predict_classes(im2arr)\n", 249 | "print(y_pred)" 250 | ] 251 | } 252 | ], 253 | "metadata": { 254 | "kernelspec": { 255 | "display_name": "Python 3", 256 | "language": "python", 257 | "name": "python3" 258 | }, 259 | "language_info": { 260 | "codemirror_mode": { 261 | "name": "ipython", 262 | "version": 3 263 | }, 264 | "file_extension": ".py", 265 | "mimetype": "text/x-python", 266 | "name": "python", 267 | "nbconvert_exporter": "python", 268 | "pygments_lexer": "ipython3", 269 | "version": "3.6.4" 270 | } 271 | }, 272 | "nbformat": 4, 273 | "nbformat_minor": 2 274 | } 275 | -------------------------------------------------------------------------------- /models/gotCharactersDeathPredictions.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/models/gotCharactersDeathPredictions.h5 -------------------------------------------------------------------------------- /models/mnistCNN.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tankala/ai-examples/e359085b0e85addfc1fbfa9daea65315e604ead7/models/mnistCNN.h5 -------------------------------------------------------------------------------- /mountain_car_game_understanding.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "/Users/ashoktankala/tensorflow/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", 13 | " from ._conv import register_converters as _register_converters\n", 14 | "Using TensorFlow backend.\n" 15 | ] 16 | } 17 | ], 18 | "source": [ 19 | "import gym\n", 20 | "import random\n", 21 | "import numpy as np\n", 22 | "from keras.models import Sequential\n", 23 | "from keras.layers import Dense\n", 24 | "from keras.optimizers import Adam" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 2, 30 | "metadata": {}, 31 | "outputs": [ 32 | { 33 | "name": "stdout", 34 | "output_type": "stream", 35 | "text": [ 36 | "\u001b[33mWARN: gym.spaces.Box autodetected dtype as . Please provide explicit dtype.\u001b[0m\n" 37 | ] 38 | } 39 | ], 40 | "source": [ 41 | "env = gym.make('MountainCar-v0')\n", 42 | "env.reset()\n", 43 | "goal_steps = 200\n", 44 | "score_requirement = -198\n", 45 | "intial_games = 10000" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 3, 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "def play_a_random_game_first():\n", 55 | " for step_index in range(goal_steps):\n", 56 | "# env.render()\n", 57 | " action = env.action_space.sample()\n", 58 | " observation, reward, done, info = env.step(action)\n", 59 | " print(\"Step {}:\".format(step_index))\n", 60 | " print(\"action: {}\".format(action))\n", 61 | " print(\"observation: {}\".format(observation))\n", 62 | " print(\"reward: {}\".format(reward))\n", 63 | " print(\"done: {}\".format(done))\n", 64 | " print(\"info: {}\".format(info))\n", 65 | " if done:\n", 66 | " break\n", 67 | " env.reset()" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": 4, 73 | "metadata": {}, 74 | "outputs": [ 75 | { 76 | "name": "stdout", 77 | "output_type": "stream", 78 | "text": [ 79 | "Step 0:\n", 80 | "action: 0\n", 81 | "observation: [-0.55321127 -0.00078406]\n", 82 | "reward: -1.0\n", 83 | "done: False\n", 84 | "info: {}\n", 85 | "Step 1:\n", 86 | "action: 1\n", 87 | "observation: [-0.55377353 -0.00056225]\n", 88 | "reward: -1.0\n", 89 | "done: False\n", 90 | "info: {}\n", 91 | "Step 2:\n", 92 | "action: 0\n", 93 | "observation: [-0.55510978 -0.00133625]\n", 94 | "reward: -1.0\n", 95 | "done: False\n", 96 | "info: {}\n", 97 | "Step 3:\n", 98 | "action: 1\n", 99 | "observation: [-0.55621005 -0.00110027]\n", 100 | "reward: -1.0\n", 101 | "done: False\n", 102 | "info: {}\n", 103 | "Step 4:\n", 104 | "action: 1\n", 105 | "observation: [-0.55706613 -0.00085608]\n", 106 | "reward: -1.0\n", 107 | "done: False\n", 108 | "info: {}\n", 109 | "Step 5:\n", 110 | "action: 2\n", 111 | "observation: [-5.56671622e-01 3.94506118e-04]\n", 112 | "reward: -1.0\n", 113 | "done: False\n", 114 | "info: {}\n", 115 | "Step 6:\n", 116 | "action: 0\n", 117 | "observation: [-5.57029476e-01 -3.57854310e-04]\n", 118 | "reward: -1.0\n", 119 | "done: False\n", 120 | "info: {}\n", 121 | "Step 7:\n", 122 | "action: 2\n", 123 | "observation: [-0.55613702 0.00089246]\n", 124 | "reward: -1.0\n", 125 | "done: False\n", 126 | "info: {}\n", 127 | "Step 8:\n", 128 | "action: 0\n", 129 | "observation: [-5.56000915e-01 1.36105289e-04]\n", 130 | "reward: -1.0\n", 131 | "done: False\n", 132 | "info: {}\n", 133 | "Step 9:\n", 134 | "action: 0\n", 135 | "observation: [-0.55662218 -0.00062126]\n", 136 | "reward: -1.0\n", 137 | "done: False\n", 138 | "info: {}\n", 139 | "Step 10:\n", 140 | "action: 0\n", 141 | "observation: [-0.55799617 -0.00137399]\n", 142 | "reward: -1.0\n", 143 | "done: False\n", 144 | "info: {}\n", 145 | "Step 11:\n", 146 | "action: 2\n", 147 | "observation: [-5.58112635e-01 -1.16467827e-04]\n", 148 | "reward: -1.0\n", 149 | "done: False\n", 150 | "info: {}\n", 151 | "Step 12:\n", 152 | "action: 1\n", 153 | "observation: [-5.57970711e-01 1.41923841e-04]\n", 154 | "reward: -1.0\n", 155 | "done: False\n", 156 | "info: {}\n", 157 | "Step 13:\n", 158 | "action: 2\n", 159 | "observation: [-0.55657145 0.00139926]\n", 160 | "reward: -1.0\n", 161 | "done: False\n", 162 | "info: {}\n", 163 | "Step 14:\n", 164 | "action: 2\n", 165 | "observation: [-0.55392531 0.00264615]\n", 166 | "reward: -1.0\n", 167 | "done: False\n", 168 | "info: {}\n", 169 | "Step 15:\n", 170 | "action: 0\n", 171 | "observation: [-0.55205202 0.00187328]\n", 172 | "reward: -1.0\n", 173 | "done: False\n", 174 | "info: {}\n", 175 | "Step 16:\n", 176 | "action: 1\n", 177 | "observation: [-0.5499656 0.00208642]\n", 178 | "reward: -1.0\n", 179 | "done: False\n", 180 | "info: {}\n", 181 | "Step 17:\n", 182 | "action: 1\n", 183 | "observation: [-0.54768163 0.00228397]\n", 184 | "reward: -1.0\n", 185 | "done: False\n", 186 | "info: {}\n", 187 | "Step 18:\n", 188 | "action: 1\n", 189 | "observation: [-0.54521719 0.00246443]\n", 190 | "reward: -1.0\n", 191 | "done: False\n", 192 | "info: {}\n", 193 | "Step 19:\n", 194 | "action: 1\n", 195 | "observation: [-0.54259074 0.00262646]\n", 196 | "reward: -1.0\n", 197 | "done: False\n", 198 | "info: {}\n", 199 | "Step 20:\n", 200 | "action: 0\n", 201 | "observation: [-0.54082191 0.00176882]\n", 202 | "reward: -1.0\n", 203 | "done: False\n", 204 | "info: {}\n", 205 | "Step 21:\n", 206 | "action: 1\n", 207 | "observation: [-0.53892398 0.00189794]\n", 208 | "reward: -1.0\n", 209 | "done: False\n", 210 | "info: {}\n", 211 | "Step 22:\n", 212 | "action: 0\n", 213 | "observation: [-0.53791114 0.00101284]\n", 214 | "reward: -1.0\n", 215 | "done: False\n", 216 | "info: {}\n", 217 | "Step 23:\n", 218 | "action: 0\n", 219 | "observation: [-5.37790997e-01 1.20145186e-04]\n", 220 | "reward: -1.0\n", 221 | "done: False\n", 222 | "info: {}\n", 223 | "Step 24:\n", 224 | "action: 1\n", 225 | "observation: [-5.37564443e-01 2.26554693e-04]\n", 226 | "reward: -1.0\n", 227 | "done: False\n", 228 | "info: {}\n", 229 | "Step 25:\n", 230 | "action: 2\n", 231 | "observation: [-0.53623318 0.00133127]\n", 232 | "reward: -1.0\n", 233 | "done: False\n", 234 | "info: {}\n", 235 | "Step 26:\n", 236 | "action: 0\n", 237 | "observation: [-5.35807174e-01 4.26001871e-04]\n", 238 | "reward: -1.0\n", 239 | "done: False\n", 240 | "info: {}\n", 241 | "Step 27:\n", 242 | "action: 2\n", 243 | "observation: [-0.53428963 0.00151754]\n", 244 | "reward: -1.0\n", 245 | "done: False\n", 246 | "info: {}\n", 247 | "Step 28:\n", 248 | "action: 0\n", 249 | "observation: [-0.53369192 0.00059771]\n", 250 | "reward: -1.0\n", 251 | "done: False\n", 252 | "info: {}\n", 253 | "Step 29:\n", 254 | "action: 1\n", 255 | "observation: [-0.53301852 0.0006734 ]\n", 256 | "reward: -1.0\n", 257 | "done: False\n", 258 | "info: {}\n", 259 | "Step 30:\n", 260 | "action: 1\n", 261 | "observation: [-0.53227448 0.00074404]\n", 262 | "reward: -1.0\n", 263 | "done: False\n", 264 | "info: {}\n", 265 | "Step 31:\n", 266 | "action: 2\n", 267 | "observation: [-0.53046538 0.0018091 ]\n", 268 | "reward: -1.0\n", 269 | "done: False\n", 270 | "info: {}\n", 271 | "Step 32:\n", 272 | "action: 0\n", 273 | "observation: [-0.52960479 0.00086059]\n", 274 | "reward: -1.0\n", 275 | "done: False\n", 276 | "info: {}\n", 277 | "Step 33:\n", 278 | "action: 1\n", 279 | "observation: [-0.52869915 0.00090564]\n", 280 | "reward: -1.0\n", 281 | "done: False\n", 282 | "info: {}\n", 283 | "Step 34:\n", 284 | "action: 1\n", 285 | "observation: [-0.52775526 0.00094389]\n", 286 | "reward: -1.0\n", 287 | "done: False\n", 288 | "info: {}\n", 289 | "Step 35:\n", 290 | "action: 1\n", 291 | "observation: [-0.5267802 0.00097506]\n", 292 | "reward: -1.0\n", 293 | "done: False\n", 294 | "info: {}\n", 295 | "Step 36:\n", 296 | "action: 0\n", 297 | "observation: [-5.26781282e-01 -1.07873350e-06]\n", 298 | "reward: -1.0\n", 299 | "done: False\n", 300 | "info: {}\n", 301 | "Step 37:\n", 302 | "action: 2\n", 303 | "observation: [-0.52575849 0.00102279]\n", 304 | "reward: -1.0\n", 305 | "done: False\n", 306 | "info: {}\n", 307 | "Step 38:\n", 308 | "action: 0\n", 309 | "observation: [-5.25719505e-01 3.89874597e-05]\n", 310 | "reward: -1.0\n", 311 | "done: False\n", 312 | "info: {}\n", 313 | "Step 39:\n", 314 | "action: 2\n", 315 | "observation: [-0.52466461 0.00105489]\n", 316 | "reward: -1.0\n", 317 | "done: False\n", 318 | "info: {}\n", 319 | "Step 40:\n", 320 | "action: 2\n", 321 | "observation: [-0.52260173 0.00206289]\n", 322 | "reward: -1.0\n", 323 | "done: False\n", 324 | "info: {}\n", 325 | "Step 41:\n", 326 | "action: 0\n", 327 | "observation: [-0.52154632 0.00105541]\n", 328 | "reward: -1.0\n", 329 | "done: False\n", 330 | "info: {}\n", 331 | "Step 42:\n", 332 | "action: 2\n", 333 | "observation: [-0.5195063 0.00204002]\n", 334 | "reward: -1.0\n", 335 | "done: False\n", 336 | "info: {}\n", 337 | "Step 43:\n", 338 | "action: 0\n", 339 | "observation: [-0.51849698 0.00100932]\n", 340 | "reward: -1.0\n", 341 | "done: False\n", 342 | "info: {}\n", 343 | "Step 44:\n", 344 | "action: 0\n", 345 | "observation: [-5.18525918e-01 -2.89394081e-05]\n", 346 | "reward: -1.0\n", 347 | "done: False\n", 348 | "info: {}\n", 349 | "Step 45:\n", 350 | "action: 0\n", 351 | "observation: [-0.5195929 -0.00106698]\n", 352 | "reward: -1.0\n", 353 | "done: False\n", 354 | "info: {}\n", 355 | "Step 46:\n", 356 | "action: 1\n", 357 | "observation: [-0.52068993 -0.00109703]\n", 358 | "reward: -1.0\n", 359 | "done: False\n", 360 | "info: {}\n", 361 | "Step 47:\n", 362 | "action: 1\n", 363 | "observation: [-0.52180877 -0.00111884]\n", 364 | "reward: -1.0\n", 365 | "done: False\n", 366 | "info: {}\n", 367 | "Step 48:\n", 368 | "action: 2\n", 369 | "observation: [-5.21941042e-01 -1.32268713e-04]\n", 370 | "reward: -1.0\n", 371 | "done: False\n", 372 | "info: {}\n", 373 | "Step 49:\n", 374 | "action: 0\n", 375 | "observation: [-0.52308574 -0.0011447 ]\n", 376 | "reward: -1.0\n", 377 | "done: False\n", 378 | "info: {}\n", 379 | "Step 50:\n", 380 | "action: 0\n", 381 | "observation: [-0.52523429 -0.00214855]\n", 382 | "reward: -1.0\n", 383 | "done: False\n", 384 | "info: {}\n", 385 | "Step 51:\n", 386 | "action: 1\n", 387 | "observation: [-0.52737058 -0.00213628]\n", 388 | "reward: -1.0\n", 389 | "done: False\n", 390 | "info: {}\n", 391 | "Step 52:\n", 392 | "action: 0\n", 393 | "observation: [-0.53047857 -0.003108 ]\n", 394 | "reward: -1.0\n", 395 | "done: False\n", 396 | "info: {}\n", 397 | "Step 53:\n", 398 | "action: 1\n", 399 | "observation: [-0.53353497 -0.0030564 ]\n", 400 | "reward: -1.0\n", 401 | "done: False\n", 402 | "info: {}\n", 403 | "Step 54:\n", 404 | "action: 2\n", 405 | "observation: [-0.53551686 -0.00198189]\n", 406 | "reward: -1.0\n", 407 | "done: False\n", 408 | "info: {}\n", 409 | "Step 55:\n", 410 | "action: 2\n", 411 | "observation: [-0.53640939 -0.00089252]\n", 412 | "reward: -1.0\n", 413 | "done: False\n", 414 | "info: {}\n", 415 | "Step 56:\n", 416 | "action: 0\n", 417 | "observation: [-0.53820585 -0.00179647]\n", 418 | "reward: -1.0\n", 419 | "done: False\n", 420 | "info: {}\n", 421 | "Step 57:\n", 422 | "action: 1\n", 423 | "observation: [-0.5398928 -0.00168695]\n", 424 | "reward: -1.0\n", 425 | "done: False\n", 426 | "info: {}\n", 427 | "Step 58:\n", 428 | "action: 1\n", 429 | "observation: [-0.54145759 -0.00156479]\n", 430 | "reward: -1.0\n", 431 | "done: False\n", 432 | "info: {}\n", 433 | "Step 59:\n", 434 | "action: 1\n", 435 | "observation: [-0.54288851 -0.00143092]\n", 436 | "reward: -1.0\n", 437 | "done: False\n", 438 | "info: {}\n", 439 | "Step 60:\n", 440 | "action: 1\n", 441 | "observation: [-0.54417483 -0.00128632]\n", 442 | "reward: -1.0\n", 443 | "done: False\n", 444 | "info: {}\n", 445 | "Step 61:\n", 446 | "action: 2\n", 447 | "observation: [-5.44306935e-01 -1.32101018e-04]\n", 448 | "reward: -1.0\n", 449 | "done: False\n", 450 | "info: {}\n", 451 | "Step 62:\n", 452 | "action: 2\n", 453 | "observation: [-0.54328382 0.00102311]\n", 454 | "reward: -1.0\n", 455 | "done: False\n", 456 | "info: {}\n", 457 | "Step 63:\n", 458 | "action: 2\n", 459 | "observation: [-0.54111316 0.00217066]\n", 460 | "reward: -1.0\n", 461 | "done: False\n", 462 | "info: {}\n", 463 | "Step 64:\n", 464 | "action: 0\n", 465 | "observation: [-0.5398112 0.00130196]\n", 466 | "reward: -1.0\n", 467 | "done: False\n", 468 | "info: {}\n", 469 | "Step 65:\n", 470 | "action: 2\n", 471 | "observation: [-0.5373877 0.00242351]\n", 472 | "reward: -1.0\n", 473 | "done: False\n", 474 | "info: {}\n", 475 | "Step 66:\n", 476 | "action: 1\n", 477 | "observation: [-0.53486081 0.00252689]\n", 478 | "reward: -1.0\n", 479 | "done: False\n", 480 | "info: {}\n", 481 | "Step 67:\n", 482 | "action: 0\n", 483 | "observation: [-0.53324946 0.00161134]\n", 484 | "reward: -1.0\n", 485 | "done: False\n", 486 | "info: {}\n", 487 | "Step 68:\n", 488 | "action: 1\n", 489 | "observation: [-0.53156575 0.00168371]\n", 490 | "reward: -1.0\n", 491 | "done: False\n", 492 | "info: {}\n", 493 | "Step 69:\n", 494 | "action: 2\n", 495 | "observation: [-0.52882229 0.00274346]\n", 496 | "reward: -1.0\n", 497 | "done: False\n", 498 | "info: {}\n", 499 | "Step 70:\n", 500 | "action: 0\n", 501 | "observation: [-0.52703966 0.00178263]\n", 502 | "reward: -1.0\n", 503 | "done: False\n", 504 | "info: {}\n", 505 | "Step 71:\n", 506 | "action: 0\n", 507 | "observation: [-0.52623122 0.00080844]\n", 508 | "reward: -1.0\n", 509 | "done: False\n", 510 | "info: {}\n", 511 | "Step 72:\n", 512 | "action: 2\n", 513 | "observation: [-0.52440304 0.00182818]\n", 514 | "reward: -1.0\n", 515 | "done: False\n", 516 | "info: {}\n", 517 | "Step 73:\n", 518 | "action: 0\n", 519 | "observation: [-0.52356882 0.00083421]\n", 520 | "reward: -1.0\n", 521 | "done: False\n", 522 | "info: {}\n", 523 | "Step 74:\n", 524 | "action: 0\n", 525 | "observation: [-5.23734834e-01 -1.66010231e-04]\n", 526 | "reward: -1.0\n", 527 | "done: False\n", 528 | "info: {}\n", 529 | "Step 75:\n", 530 | "action: 0\n", 531 | "observation: [-0.52489982 -0.00116499]\n", 532 | "reward: -1.0\n", 533 | "done: False\n", 534 | "info: {}\n", 535 | "Step 76:\n", 536 | "action: 0\n", 537 | "observation: [-0.52705506 -0.00215523]\n", 538 | "reward: -1.0\n", 539 | "done: False\n", 540 | "info: {}\n", 541 | "Step 77:\n", 542 | "action: 0\n", 543 | "observation: [-0.53018437 -0.00312931]\n", 544 | "reward: -1.0\n", 545 | "done: False\n", 546 | "info: {}\n", 547 | "Step 78:\n", 548 | "action: 0\n", 549 | "observation: [-0.53426429 -0.00407992]\n", 550 | "reward: -1.0\n", 551 | "done: False\n", 552 | "info: {}\n", 553 | "Step 79:\n", 554 | "action: 2\n", 555 | "observation: [-0.53726423 -0.00299994]\n", 556 | "reward: -1.0\n", 557 | "done: False\n", 558 | "info: {}\n", 559 | "Step 80:\n", 560 | "action: 0\n", 561 | "observation: [-0.54116171 -0.00389748]\n", 562 | "reward: -1.0\n", 563 | "done: False\n", 564 | "info: {}\n", 565 | "Step 81:\n", 566 | "action: 2\n", 567 | "observation: [-0.54392753 -0.00276582]\n", 568 | "reward: -1.0\n", 569 | "done: False\n", 570 | "info: {}\n", 571 | "Step 82:\n", 572 | "action: 1\n", 573 | "observation: [-0.54654098 -0.00261345]\n", 574 | "reward: -1.0\n", 575 | "done: False\n", 576 | "info: {}\n", 577 | "Step 83:\n", 578 | "action: 1\n", 579 | "observation: [-0.5489825 -0.00244152]\n", 580 | "reward: -1.0\n", 581 | "done: False\n", 582 | "info: {}\n", 583 | "Step 84:\n", 584 | "action: 1\n", 585 | "observation: [-0.55123383 -0.00225132]\n", 586 | "reward: -1.0\n", 587 | "done: False\n", 588 | "info: {}\n", 589 | "Step 85:\n", 590 | "action: 0\n", 591 | "observation: [-0.55427813 -0.0030443 ]\n", 592 | "reward: -1.0\n", 593 | "done: False\n", 594 | "info: {}\n", 595 | "Step 86:\n", 596 | "action: 1\n", 597 | "observation: [-0.55709265 -0.00281453]\n", 598 | "reward: -1.0\n", 599 | "done: False\n", 600 | "info: {}\n", 601 | "Step 87:\n", 602 | "action: 1\n", 603 | "observation: [-0.5596564 -0.00256375]\n", 604 | "reward: -1.0\n", 605 | "done: False\n", 606 | "info: {}\n", 607 | "Step 88:\n", 608 | "action: 1\n", 609 | "observation: [-0.56195024 -0.00229384]\n", 610 | "reward: -1.0\n", 611 | "done: False\n", 612 | "info: {}\n", 613 | "Step 89:\n", 614 | "action: 0\n", 615 | "observation: [-0.56495708 -0.00300684]\n", 616 | "reward: -1.0\n", 617 | "done: False\n", 618 | "info: {}\n", 619 | "Step 90:\n", 620 | "action: 1\n", 621 | "observation: [-0.56765453 -0.00269745]\n", 622 | "reward: -1.0\n", 623 | "done: False\n", 624 | "info: {}\n", 625 | "Step 91:\n", 626 | "action: 2\n", 627 | "observation: [-0.56902252 -0.00136799]\n", 628 | "reward: -1.0\n", 629 | "done: False\n", 630 | "info: {}\n", 631 | "Step 92:\n", 632 | "action: 0\n", 633 | "observation: [-0.57105089 -0.00202837]\n", 634 | "reward: -1.0\n", 635 | "done: False\n", 636 | "info: {}\n", 637 | "Step 93:\n", 638 | "action: 1\n", 639 | "observation: [-0.57272457 -0.00167368]\n", 640 | "reward: -1.0\n", 641 | "done: False\n", 642 | "info: {}\n", 643 | "Step 94:\n", 644 | "action: 2\n", 645 | "observation: [-5.73031131e-01 -3.06565256e-04]\n", 646 | "reward: -1.0\n", 647 | "done: False\n", 648 | "info: {}\n", 649 | "Step 95:\n", 650 | "action: 0\n", 651 | "observation: [-0.57396831 -0.00093718]\n", 652 | "reward: -1.0\n", 653 | "done: False\n", 654 | "info: {}\n", 655 | "Step 96:\n", 656 | "action: 2\n", 657 | "observation: [-5.73529156e-01 4.39155504e-04]\n", 658 | "reward: -1.0\n", 659 | "done: False\n", 660 | "info: {}\n", 661 | "Step 97:\n", 662 | "action: 0\n", 663 | "observation: [-5.73716921e-01 -1.87765460e-04]\n", 664 | "reward: -1.0\n", 665 | "done: False\n", 666 | "info: {}\n", 667 | "Step 98:\n", 668 | "action: 1\n", 669 | "observation: [-5.73530215e-01 1.86705989e-04]\n", 670 | "reward: -1.0\n", 671 | "done: False\n", 672 | "info: {}\n", 673 | "Step 99:\n", 674 | "action: 2\n", 675 | "observation: [-0.57197042 0.00155979]\n", 676 | "reward: -1.0\n", 677 | "done: False\n", 678 | "info: {}\n", 679 | "Step 100:\n", 680 | "action: 2\n", 681 | "observation: [-0.56904911 0.00292131]\n", 682 | "reward: -1.0\n", 683 | "done: False\n", 684 | "info: {}\n", 685 | "Step 101:\n", 686 | "action: 1\n", 687 | "observation: [-0.56578798 0.00326113]\n", 688 | "reward: -1.0\n", 689 | "done: False\n", 690 | "info: {}\n", 691 | "Step 102:\n", 692 | "action: 0\n", 693 | "observation: [-0.56321128 0.00257671]\n", 694 | "reward: -1.0\n", 695 | "done: False\n", 696 | "info: {}\n", 697 | "Step 103:\n", 698 | "action: 1\n", 699 | "observation: [-0.56033818 0.0028731 ]\n", 700 | "reward: -1.0\n", 701 | "done: False\n", 702 | "info: {}\n", 703 | "Step 104:\n", 704 | "action: 1\n", 705 | "observation: [-0.55719009 0.00314809]\n", 706 | "reward: -1.0\n", 707 | "done: False\n", 708 | "info: {}\n", 709 | "Step 105:\n", 710 | "action: 0\n", 711 | "observation: [-0.55479049 0.0023996 ]\n", 712 | "reward: -1.0\n", 713 | "done: False\n", 714 | "info: {}\n", 715 | "Step 106:\n", 716 | "action: 2\n", 717 | "observation: [-0.5511573 0.00363319]\n", 718 | "reward: -1.0\n", 719 | "done: False\n", 720 | "info: {}\n", 721 | "Step 107:\n", 722 | "action: 2\n", 723 | "observation: [-0.54631765 0.00483965]\n", 724 | "reward: -1.0\n", 725 | "done: False\n", 726 | "info: {}\n", 727 | "Step 108:\n", 728 | "action: 2\n", 729 | "observation: [-0.54030774 0.00600991]\n", 730 | "reward: -1.0\n", 731 | "done: False\n", 732 | "info: {}\n", 733 | "Step 109:\n", 734 | "action: 2\n", 735 | "observation: [-0.53317257 0.00713517]\n", 736 | "reward: -1.0\n", 737 | "done: False\n", 738 | "info: {}\n", 739 | "Step 110:\n", 740 | "action: 1\n", 741 | "observation: [-0.52596561 0.00720697]\n", 742 | "reward: -1.0\n", 743 | "done: False\n", 744 | "info: {}\n", 745 | "Step 111:\n", 746 | "action: 2\n", 747 | "observation: [-0.51774089 0.00822472]\n", 748 | "reward: -1.0\n", 749 | "done: False\n", 750 | "info: {}\n", 751 | "Step 112:\n", 752 | "action: 2\n", 753 | "observation: [-0.50856011 0.00918078]\n", 754 | "reward: -1.0\n", 755 | "done: False\n", 756 | "info: {}\n", 757 | "Step 113:\n", 758 | "action: 2\n", 759 | "observation: [-0.49849207 0.01006803]\n", 760 | "reward: -1.0\n", 761 | "done: False\n", 762 | "info: {}\n", 763 | "Step 114:\n", 764 | "action: 2\n", 765 | "observation: [-0.48761216 0.01087991]\n", 766 | "reward: -1.0\n", 767 | "done: False\n", 768 | "info: {}\n", 769 | "Step 115:\n", 770 | "action: 2\n", 771 | "observation: [-0.47600163 0.01161053]\n", 772 | "reward: -1.0\n", 773 | "done: False\n", 774 | "info: {}\n", 775 | "Step 116:\n", 776 | "action: 0\n", 777 | "observation: [-0.46574686 0.01025477]\n", 778 | "reward: -1.0\n", 779 | "done: False\n", 780 | "info: {}\n", 781 | "Step 117:\n", 782 | "action: 1\n", 783 | "observation: [-0.45592381 0.00982305]\n", 784 | "reward: -1.0\n", 785 | "done: False\n", 786 | "info: {}\n", 787 | "Step 118:\n", 788 | "action: 2\n", 789 | "observation: [-0.44560483 0.01031897]\n", 790 | "reward: -1.0\n", 791 | "done: False\n", 792 | "info: {}\n", 793 | "Step 119:\n", 794 | "action: 2\n", 795 | "observation: [-0.4348655 0.01073934]\n", 796 | "reward: -1.0\n", 797 | "done: False\n", 798 | "info: {}\n", 799 | "Step 120:\n", 800 | "action: 1\n", 801 | "observation: [-0.42478382 0.01008167]\n", 802 | "reward: -1.0\n", 803 | "done: False\n", 804 | "info: {}\n", 805 | "Step 121:\n", 806 | "action: 2\n", 807 | "observation: [-0.41443246 0.01035137]\n", 808 | "reward: -1.0\n", 809 | "done: False\n", 810 | "info: {}\n", 811 | "Step 122:\n", 812 | "action: 1\n", 813 | "observation: [-0.40488528 0.00954718]\n", 814 | "reward: -1.0\n", 815 | "done: False\n", 816 | "info: {}\n", 817 | "Step 123:\n", 818 | "action: 0\n", 819 | "observation: [-0.39720975 0.00767553]\n", 820 | "reward: -1.0\n", 821 | "done: False\n", 822 | "info: {}\n", 823 | "Step 124:\n", 824 | "action: 2\n", 825 | "observation: [-0.38945959 0.00775016]\n", 826 | "reward: -1.0\n", 827 | "done: False\n", 828 | "info: {}\n", 829 | "Step 125:\n", 830 | "action: 2\n", 831 | "observation: [-0.38168854 0.00777105]\n", 832 | "reward: -1.0\n", 833 | "done: False\n", 834 | "info: {}\n", 835 | "Step 126:\n", 836 | "action: 0\n", 837 | "observation: [-0.37594995 0.00573859]\n", 838 | "reward: -1.0\n", 839 | "done: False\n", 840 | "info: {}\n", 841 | "Step 127:\n", 842 | "action: 2\n", 843 | "observation: [-0.37028288 0.00566708]\n", 844 | "reward: -1.0\n", 845 | "done: False\n", 846 | "info: {}\n", 847 | "Step 128:\n", 848 | "action: 0\n", 849 | "observation: [-0.36672555 0.00355732]\n", 850 | "reward: -1.0\n", 851 | "done: False\n", 852 | "info: {}\n", 853 | "Step 129:\n", 854 | "action: 0\n", 855 | "observation: [-0.36530183 0.00142373]\n", 856 | "reward: -1.0\n", 857 | "done: False\n", 858 | "info: {}\n", 859 | "Step 130:\n", 860 | "action: 2\n", 861 | "observation: [-0.3640212 0.00128062]\n", 862 | "reward: -1.0\n", 863 | "done: False\n", 864 | "info: {}\n", 865 | "Step 131:\n", 866 | "action: 0\n", 867 | "observation: [-0.36489221 -0.00087101]\n", 868 | "reward: -1.0\n", 869 | "done: False\n", 870 | "info: {}\n", 871 | "Step 132:\n", 872 | "action: 2\n", 873 | "observation: [-0.36590906 -0.00101685]\n", 874 | "reward: -1.0\n", 875 | "done: False\n", 876 | "info: {}\n", 877 | "Step 133:\n", 878 | "action: 2\n", 879 | "observation: [-0.36706496 -0.0011559 ]\n", 880 | "reward: -1.0\n", 881 | "done: False\n", 882 | "info: {}\n", 883 | "Step 134:\n", 884 | "action: 2\n", 885 | "observation: [-0.36835219 -0.00128723]\n", 886 | "reward: -1.0\n", 887 | "done: False\n", 888 | "info: {}\n", 889 | "Step 135:\n", 890 | "action: 0\n", 891 | "observation: [-0.37176212 -0.00340994]\n", 892 | "reward: -1.0\n", 893 | "done: False\n", 894 | "info: {}\n", 895 | "Step 136:\n", 896 | "action: 0\n", 897 | "observation: [-0.37727186 -0.00550974]\n", 898 | "reward: -1.0\n", 899 | "done: False\n", 900 | "info: {}\n", 901 | "Step 137:\n", 902 | "action: 0\n", 903 | "observation: [-0.38484414 -0.00757228]\n", 904 | "reward: -1.0\n", 905 | "done: False\n", 906 | "info: {}\n", 907 | "Step 138:\n", 908 | "action: 1\n", 909 | "observation: [-0.39342729 -0.00858315]\n", 910 | "reward: -1.0\n", 911 | "done: False\n", 912 | "info: {}\n", 913 | "Step 139:\n", 914 | "action: 2\n", 915 | "observation: [-0.40196209 -0.00853481]\n", 916 | "reward: -1.0\n", 917 | "done: False\n", 918 | "info: {}\n", 919 | "Step 140:\n", 920 | "action: 0\n", 921 | "observation: [-0.41238906 -0.01042697]\n", 922 | "reward: -1.0\n", 923 | "done: False\n", 924 | "info: {}\n", 925 | "Step 141:\n", 926 | "action: 1\n", 927 | "observation: [-0.42363472 -0.01124565]\n", 928 | "reward: -1.0\n", 929 | "done: False\n", 930 | "info: {}\n", 931 | "Step 142:\n", 932 | "action: 2\n", 933 | "observation: [-0.43461891 -0.0109842 ]\n", 934 | "reward: -1.0\n", 935 | "done: False\n", 936 | "info: {}\n", 937 | "Step 143:\n", 938 | "action: 2\n", 939 | "observation: [-0.44526256 -0.01064365]\n", 940 | "reward: -1.0\n", 941 | "done: False\n", 942 | "info: {}\n", 943 | "Step 144:\n", 944 | "action: 2\n", 945 | "observation: [-0.45548834 -0.01022578]\n", 946 | "reward: -1.0\n", 947 | "done: False\n", 948 | "info: {}\n", 949 | "Step 145:\n", 950 | "action: 1\n", 951 | "observation: [-0.4662214 -0.01073306]\n", 952 | "reward: -1.0\n", 953 | "done: False\n", 954 | "info: {}\n", 955 | "Step 146:\n", 956 | "action: 0\n", 957 | "observation: [-0.47838266 -0.01216127]\n", 958 | "reward: -1.0\n", 959 | "done: False\n", 960 | "info: {}\n", 961 | "Step 147:\n", 962 | "action: 0\n", 963 | "observation: [-0.49188201 -0.01349935]\n", 964 | "reward: -1.0\n", 965 | "done: False\n", 966 | "info: {}\n", 967 | "Step 148:\n", 968 | "action: 0\n", 969 | "observation: [-0.50661888 -0.01473687]\n", 970 | "reward: -1.0\n", 971 | "done: False\n", 972 | "info: {}\n", 973 | "Step 149:\n", 974 | "action: 0\n", 975 | "observation: [-0.52248304 -0.01586416]\n", 976 | "reward: -1.0\n", 977 | "done: False\n", 978 | "info: {}\n", 979 | "Step 150:\n", 980 | "action: 2\n", 981 | "observation: [-0.53735557 -0.01487253]\n", 982 | "reward: -1.0\n", 983 | "done: False\n", 984 | "info: {}\n", 985 | "Step 151:\n", 986 | "action: 2\n", 987 | "observation: [-0.55112495 -0.01376938]\n", 988 | "reward: -1.0\n", 989 | "done: False\n", 990 | "info: {}\n", 991 | "Step 152:\n", 992 | "action: 0\n", 993 | "observation: [-0.56568812 -0.01456317]\n", 994 | "reward: -1.0\n", 995 | "done: False\n", 996 | "info: {}\n", 997 | "Step 153:\n", 998 | "action: 0\n", 999 | "observation: [-0.58093646 -0.01524834]\n", 1000 | "reward: -1.0\n", 1001 | "done: False\n", 1002 | "info: {}\n", 1003 | "Step 154:\n", 1004 | "action: 0\n", 1005 | "observation: [-0.59675688 -0.01582042]\n", 1006 | "reward: -1.0\n", 1007 | "done: False\n", 1008 | "info: {}\n", 1009 | "Step 155:\n", 1010 | "action: 1\n", 1011 | "observation: [-0.61203301 -0.01527613]\n", 1012 | "reward: -1.0\n", 1013 | "done: False\n", 1014 | "info: {}\n", 1015 | "Step 156:\n", 1016 | "action: 2\n", 1017 | "observation: [-0.62565364 -0.01362063]\n", 1018 | "reward: -1.0\n", 1019 | "done: False\n", 1020 | "info: {}\n", 1021 | "Step 157:\n", 1022 | "action: 0\n", 1023 | "observation: [-0.63952075 -0.01386712]\n", 1024 | "reward: -1.0\n", 1025 | "done: False\n", 1026 | "info: {}\n", 1027 | "Step 158:\n", 1028 | "action: 0\n", 1029 | "observation: [-0.65353588 -0.01401512]\n", 1030 | "reward: -1.0\n", 1031 | "done: False\n", 1032 | "info: {}\n", 1033 | "Step 159:\n", 1034 | "action: 1\n", 1035 | "observation: [-0.66660096 -0.01306509]\n", 1036 | "reward: -1.0\n", 1037 | "done: False\n", 1038 | "info: {}\n", 1039 | "Step 160:\n", 1040 | "action: 0\n", 1041 | "observation: [-0.67962613 -0.01302517]\n", 1042 | "reward: -1.0\n", 1043 | "done: False\n", 1044 | "info: {}\n", 1045 | "Step 161:\n", 1046 | "action: 2\n", 1047 | "observation: [-0.69052336 -0.01089723]\n", 1048 | "reward: -1.0\n", 1049 | "done: False\n", 1050 | "info: {}\n", 1051 | "Step 162:\n", 1052 | "action: 1\n", 1053 | "observation: [-0.70022033 -0.00969697]\n", 1054 | "reward: -1.0\n", 1055 | "done: False\n", 1056 | "info: {}\n", 1057 | "Step 163:\n", 1058 | "action: 1\n", 1059 | "observation: [-0.70865376 -0.00843343]\n", 1060 | "reward: -1.0\n", 1061 | "done: False\n", 1062 | "info: {}\n", 1063 | "Step 164:\n", 1064 | "action: 1\n", 1065 | "observation: [-0.71576948 -0.00711572]\n", 1066 | "reward: -1.0\n", 1067 | "done: False\n", 1068 | "info: {}\n", 1069 | "Step 165:\n", 1070 | "action: 0\n", 1071 | "observation: [-0.72252244 -0.00675296]\n", 1072 | "reward: -1.0\n", 1073 | "done: False\n", 1074 | "info: {}\n", 1075 | "Step 166:\n", 1076 | "action: 0\n", 1077 | "observation: [-0.72887046 -0.00634802]\n", 1078 | "reward: -1.0\n", 1079 | "done: False\n", 1080 | "info: {}\n", 1081 | "Step 167:\n", 1082 | "action: 0\n", 1083 | "observation: [-0.73477443 -0.00590396]\n", 1084 | "reward: -1.0\n", 1085 | "done: False\n", 1086 | "info: {}\n", 1087 | "Step 168:\n", 1088 | "action: 1\n", 1089 | "observation: [-0.73919841 -0.00442399]\n", 1090 | "reward: -1.0\n", 1091 | "done: False\n", 1092 | "info: {}\n", 1093 | "Step 169:\n", 1094 | "action: 1\n", 1095 | "observation: [-0.74211581 -0.0029174 ]\n", 1096 | "reward: -1.0\n", 1097 | "done: False\n", 1098 | "info: {}\n", 1099 | "Step 170:\n", 1100 | "action: 2\n", 1101 | "observation: [-7.42509223e-01 -3.93409029e-04]\n", 1102 | "reward: -1.0\n", 1103 | "done: False\n", 1104 | "info: {}\n", 1105 | "Step 171:\n", 1106 | "action: 1\n", 1107 | "observation: [-0.7413763 0.00113292]\n", 1108 | "reward: -1.0\n", 1109 | "done: False\n", 1110 | "info: {}\n", 1111 | "Step 172:\n", 1112 | "action: 0\n", 1113 | "observation: [-0.73972379 0.00165251]\n", 1114 | "reward: -1.0\n", 1115 | "done: False\n", 1116 | "info: {}\n", 1117 | "Step 173:\n", 1118 | "action: 0\n", 1119 | "observation: [-0.73756155 0.00216224]\n", 1120 | "reward: -1.0\n", 1121 | "done: False\n", 1122 | "info: {}\n", 1123 | "Step 174:\n", 1124 | "action: 1\n", 1125 | "observation: [-0.73390254 0.00365901]\n", 1126 | "reward: -1.0\n", 1127 | "done: False\n", 1128 | "info: {}\n", 1129 | "Step 175:\n", 1130 | "action: 2\n", 1131 | "observation: [-0.72776883 0.00613371]\n", 1132 | "reward: -1.0\n", 1133 | "done: False\n", 1134 | "info: {}\n", 1135 | "Step 176:\n", 1136 | "action: 1\n", 1137 | "observation: [-0.72019781 0.00757102]\n", 1138 | "reward: -1.0\n", 1139 | "done: False\n", 1140 | "info: {}\n", 1141 | "Step 177:\n", 1142 | "action: 1\n", 1143 | "observation: [-0.7112363 0.0089615]\n", 1144 | "reward: -1.0\n", 1145 | "done: False\n", 1146 | "info: {}\n", 1147 | "Step 178:\n", 1148 | "action: 1\n", 1149 | "observation: [-0.70094067 0.01029563]\n", 1150 | "reward: -1.0\n", 1151 | "done: False\n", 1152 | "info: {}\n", 1153 | "Step 179:\n", 1154 | "action: 0\n", 1155 | "observation: [-0.69037684 0.01056383]\n", 1156 | "reward: -1.0\n", 1157 | "done: False\n", 1158 | "info: {}\n", 1159 | "Step 180:\n", 1160 | "action: 0\n", 1161 | "observation: [-0.67961371 0.01076313]\n", 1162 | "reward: -1.0\n", 1163 | "done: False\n", 1164 | "info: {}\n", 1165 | "Step 181:\n", 1166 | "action: 0\n", 1167 | "observation: [-0.66872272 0.01089098]\n", 1168 | "reward: -1.0\n", 1169 | "done: False\n", 1170 | "info: {}\n", 1171 | "Step 182:\n", 1172 | "action: 1\n", 1173 | "observation: [-0.65677737 0.01194535]\n", 1174 | "reward: -1.0\n", 1175 | "done: False\n", 1176 | "info: {}\n", 1177 | "Step 183:\n", 1178 | "action: 2\n", 1179 | "observation: [-0.64285954 0.01391783]\n", 1180 | "reward: -1.0\n", 1181 | "done: False\n", 1182 | "info: {}\n", 1183 | "Step 184:\n", 1184 | "action: 1\n", 1185 | "observation: [-0.62806621 0.01479333]\n", 1186 | "reward: -1.0\n", 1187 | "done: False\n", 1188 | "info: {}\n", 1189 | "Step 185:\n", 1190 | "action: 1\n", 1191 | "observation: [-0.61250215 0.01556407]\n", 1192 | "reward: -1.0\n", 1193 | "done: False\n", 1194 | "info: {}\n", 1195 | "Step 186:\n", 1196 | "action: 0\n", 1197 | "observation: [-0.59727918 0.01522297]\n", 1198 | "reward: -1.0\n", 1199 | "done: False\n", 1200 | "info: {}\n", 1201 | "Step 187:\n", 1202 | "action: 0\n", 1203 | "observation: [-0.5825081 0.01477108]\n", 1204 | "reward: -1.0\n", 1205 | "done: False\n", 1206 | "info: {}\n", 1207 | "Step 188:\n", 1208 | "action: 1\n", 1209 | "observation: [-0.5672975 0.0152106]\n", 1210 | "reward: -1.0\n", 1211 | "done: False\n", 1212 | "info: {}\n", 1213 | "Step 189:\n", 1214 | "action: 2\n", 1215 | "observation: [-0.55076009 0.01653741]\n", 1216 | "reward: -1.0\n", 1217 | "done: False\n", 1218 | "info: {}\n", 1219 | "Step 190:\n", 1220 | "action: 0\n", 1221 | "observation: [-0.5350192 0.01574089]\n", 1222 | "reward: -1.0\n", 1223 | "done: False\n", 1224 | "info: {}\n", 1225 | "Step 191:\n", 1226 | "action: 2\n", 1227 | "observation: [-0.51819267 0.01682653]\n", 1228 | "reward: -1.0\n", 1229 | "done: False\n", 1230 | "info: {}\n", 1231 | "Step 192:\n", 1232 | "action: 2\n", 1233 | "observation: [-0.50040669 0.01778598]\n", 1234 | "reward: -1.0\n", 1235 | "done: False\n", 1236 | "info: {}\n", 1237 | "Step 193:\n", 1238 | "action: 1\n", 1239 | "observation: [-0.48279451 0.01761218]\n", 1240 | "reward: -1.0\n", 1241 | "done: False\n", 1242 | "info: {}\n", 1243 | "Step 194:\n", 1244 | "action: 1\n", 1245 | "observation: [-0.46548759 0.01730691]\n", 1246 | "reward: -1.0\n", 1247 | "done: False\n", 1248 | "info: {}\n", 1249 | "Step 195:\n", 1250 | "action: 1\n", 1251 | "observation: [-0.44861431 0.01687328]\n", 1252 | "reward: -1.0\n", 1253 | "done: False\n", 1254 | "info: {}\n", 1255 | "Step 196:\n", 1256 | "action: 2\n", 1257 | "observation: [-0.43129868 0.01731563]\n", 1258 | "reward: -1.0\n", 1259 | "done: False\n", 1260 | "info: {}\n", 1261 | "Step 197:\n", 1262 | "action: 0\n", 1263 | "observation: [-0.41566648 0.01563219]\n", 1264 | "reward: -1.0\n", 1265 | "done: False\n", 1266 | "info: {}\n", 1267 | "Step 198:\n", 1268 | "action: 0\n", 1269 | "observation: [-0.40182971 0.01383677]\n", 1270 | "reward: -1.0\n", 1271 | "done: False\n", 1272 | "info: {}\n" 1273 | ] 1274 | }, 1275 | { 1276 | "name": "stdout", 1277 | "output_type": "stream", 1278 | "text": [ 1279 | "Step 199:\n", 1280 | "action: 1\n", 1281 | "observation: [-0.38888603 0.01294368]\n", 1282 | "reward: -1.0\n", 1283 | "done: True\n", 1284 | "info: {}\n" 1285 | ] 1286 | } 1287 | ], 1288 | "source": [ 1289 | "play_a_random_game_first()" 1290 | ] 1291 | }, 1292 | { 1293 | "cell_type": "code", 1294 | "execution_count": 7, 1295 | "metadata": {}, 1296 | "outputs": [], 1297 | "source": [ 1298 | "def model_data_preparation():\n", 1299 | " training_data = []\n", 1300 | " accepted_scores = []\n", 1301 | " for game_index in range(intial_games):\n", 1302 | " score = 0\n", 1303 | " game_memory = []\n", 1304 | " previous_observation = []\n", 1305 | " for step_index in range(goal_steps):\n", 1306 | " action = random.randrange(0, 3)\n", 1307 | " observation, reward, done, info = env.step(action)\n", 1308 | " \n", 1309 | " if len(previous_observation) > 0:\n", 1310 | " game_memory.append([previous_observation, action])\n", 1311 | " \n", 1312 | " previous_observation = observation\n", 1313 | " if observation[0] > -0.2:\n", 1314 | " reward = 1\n", 1315 | " \n", 1316 | " score += reward\n", 1317 | " if done:\n", 1318 | " break\n", 1319 | " \n", 1320 | " if score >= score_requirement:\n", 1321 | " accepted_scores.append(score)\n", 1322 | " for data in game_memory:\n", 1323 | " if data[1] == 1:\n", 1324 | " output = [0, 1, 0]\n", 1325 | " elif data[1] == 0:\n", 1326 | " output = [1, 0, 0]\n", 1327 | " elif data[1] == 2:\n", 1328 | " output = [0, 0, 1]\n", 1329 | " training_data.append([data[0], output])\n", 1330 | " \n", 1331 | " env.reset()\n", 1332 | " \n", 1333 | " print(accepted_scores)\n", 1334 | " \n", 1335 | " return training_data" 1336 | ] 1337 | }, 1338 | { 1339 | "cell_type": "code", 1340 | "execution_count": 9, 1341 | "metadata": {}, 1342 | "outputs": [ 1343 | { 1344 | "name": "stdout", 1345 | "output_type": "stream", 1346 | "text": [ 1347 | "[-158.0, -172.0, -188.0, -196.0, -168.0, -182.0, -180.0, -184.0, -184.0, -184.0, -168.0, -184.0, -176.0, -182.0, -182.0, -196.0, -184.0, -194.0, -178.0, -176.0, -170.0, -190.0, -182.0, -184.0, -184.0, -188.0, -184.0, -192.0, -172.0, -186.0, -174.0, -166.0, -188.0, -186.0, -174.0, -190.0, -178.0, -170.0, -164.0, -180.0, -184.0, -172.0, -168.0, -174.0, -172.0, -174.0, -186.0]\n" 1348 | ] 1349 | } 1350 | ], 1351 | "source": [ 1352 | "training_data = model_data_preparation()" 1353 | ] 1354 | }, 1355 | { 1356 | "cell_type": "code", 1357 | "execution_count": 10, 1358 | "metadata": {}, 1359 | "outputs": [], 1360 | "source": [ 1361 | "def build_model(input_size, output_size):\n", 1362 | " model = Sequential()\n", 1363 | " model.add(Dense(128, input_dim=input_size, activation='relu'))\n", 1364 | " model.add(Dense(52, activation='relu'))\n", 1365 | " model.add(Dense(output_size, activation='linear'))\n", 1366 | " model.compile(loss='mse', optimizer=Adam())\n", 1367 | "\n", 1368 | " return model" 1369 | ] 1370 | }, 1371 | { 1372 | "cell_type": "code", 1373 | "execution_count": 11, 1374 | "metadata": {}, 1375 | "outputs": [], 1376 | "source": [ 1377 | "def train_model(training_data):\n", 1378 | " X = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]))\n", 1379 | " y = np.array([i[1] for i in training_data]).reshape(-1, len(training_data[0][1]))\n", 1380 | " model = build_model(input_size=len(X[0]), output_size=len(y[0]))\n", 1381 | " \n", 1382 | " model.fit(X, y, epochs=5)\n", 1383 | " return model" 1384 | ] 1385 | }, 1386 | { 1387 | "cell_type": "code", 1388 | "execution_count": 12, 1389 | "metadata": {}, 1390 | "outputs": [ 1391 | { 1392 | "name": "stdout", 1393 | "output_type": "stream", 1394 | "text": [ 1395 | "Epoch 1/5\n", 1396 | "9353/9353 [==============================] - 1s 90us/step - loss: 0.2262\n", 1397 | "Epoch 2/5\n", 1398 | "9353/9353 [==============================] - 1s 66us/step - loss: 0.2217\n", 1399 | "Epoch 3/5\n", 1400 | "9353/9353 [==============================] - 1s 65us/step - loss: 0.2209\n", 1401 | "Epoch 4/5\n", 1402 | "9353/9353 [==============================] - 1s 64us/step - loss: 0.2201\n", 1403 | "Epoch 5/5\n", 1404 | "9353/9353 [==============================] - 1s 61us/step - loss: 0.2199\n" 1405 | ] 1406 | } 1407 | ], 1408 | "source": [ 1409 | "trained_model = train_model(training_data)" 1410 | ] 1411 | }, 1412 | { 1413 | "cell_type": "code", 1414 | "execution_count": 13, 1415 | "metadata": {}, 1416 | "outputs": [ 1417 | { 1418 | "name": "stdout", 1419 | "output_type": "stream", 1420 | "text": [ 1421 | "[-164.0, -92.0, -162.0, -107.0, -105.0, -93.0, -97.0, -90.0, -96.0, -170.0, -99.0, -200.0, -164.0, -91.0, -200.0, -92.0, -195.0, -166.0, -104.0, -93.0, -164.0, -200.0, -200.0, -164.0, -179.0, -176.0, -122.0, -101.0, -91.0, -162.0, -99.0, -164.0, -190.0, -199.0, -101.0, -200.0, -186.0, -185.0, -170.0, -128.0, -164.0, -164.0, -166.0, -101.0, -167.0, -89.0, -105.0, -168.0, -166.0, -100.0, -100.0, -91.0, -90.0, -163.0, -165.0, -167.0, -165.0, -105.0, -88.0, -134.0, -95.0, -90.0, -166.0, -166.0, -89.0, -167.0, -162.0, -165.0, -164.0, -171.0, -163.0, -127.0, -95.0, -159.0, -89.0, -89.0, -96.0, -168.0, -96.0, -163.0, -89.0, -90.0, -183.0, -166.0, -164.0, -163.0, -171.0, -167.0, -163.0, -97.0, -171.0, -166.0, -89.0, -200.0, -162.0, -175.0, -198.0, -93.0, -200.0, -106.0]\n", 1422 | "Average Score: -141.12\n", 1423 | "choice 1:0.007936507936507936 choice 0:0.5136054421768708 choice 2:0.47845804988662133\n" 1424 | ] 1425 | } 1426 | ], 1427 | "source": [ 1428 | "scores = []\n", 1429 | "choices = []\n", 1430 | "for each_game in range(100):\n", 1431 | " score = 0\n", 1432 | " prev_obs = []\n", 1433 | " for step_index in range(goal_steps):\n", 1434 | " # Uncomment this line if you want to see how our bot playing\n", 1435 | " # env.render()\n", 1436 | " if len(prev_obs)==0:\n", 1437 | " action = random.randrange(0,2)\n", 1438 | " else:\n", 1439 | " action = np.argmax(trained_model.predict(prev_obs.reshape(-1, len(prev_obs)))[0])\n", 1440 | " \n", 1441 | " choices.append(action)\n", 1442 | " new_observation, reward, done, info = env.step(action)\n", 1443 | " prev_obs = new_observation\n", 1444 | " score+=reward\n", 1445 | " if done:\n", 1446 | " break\n", 1447 | "\n", 1448 | " env.reset()\n", 1449 | " scores.append(score)\n", 1450 | "\n", 1451 | "print(scores)\n", 1452 | "print('Average Score:',sum(scores)/len(scores))\n", 1453 | "print('choice 1:{} choice 0:{} choice 2:{}'.format(choices.count(1)/len(choices),choices.count(0)/len(choices),choices.count(2)/len(choices)))" 1454 | ] 1455 | } 1456 | ], 1457 | "metadata": { 1458 | "kernelspec": { 1459 | "display_name": "Python 3", 1460 | "language": "python", 1461 | "name": "python3" 1462 | }, 1463 | "language_info": { 1464 | "codemirror_mode": { 1465 | "name": "ipython", 1466 | "version": 3 1467 | }, 1468 | "file_extension": ".py", 1469 | "mimetype": "text/x-python", 1470 | "name": "python", 1471 | "nbconvert_exporter": "python", 1472 | "pygments_lexer": "ipython3", 1473 | "version": "3.6.4" 1474 | } 1475 | }, 1476 | "nbformat": 4, 1477 | "nbformat_minor": 2 1478 | } 1479 | -------------------------------------------------------------------------------- /noShowAppointments.py: -------------------------------------------------------------------------------- 1 | import pandas as pds 2 | 3 | dataframeX = pds.read_csv('data/KaggleV2-May-2016.csv', usecols= [2, 5, 7, 8, 9, 10, 11, 12]) 4 | dataframeY = pds.read_csv('data/KaggleV2-May-2016.csv', usecols=[13]) 5 | 6 | def genderToInt(gender): 7 | if gender == 'M': 8 | return 0 9 | else: 10 | return 1 11 | 12 | def statusToInt(status): 13 | if status == 'No': 14 | return 0 15 | else: 16 | return 1 17 | 18 | dataframeX.Gender = dataframeX.Gender.apply(genderToInt) 19 | dataframeY.Noshow = dataframeY.Noshow.apply(statusToInt) 20 | 21 | print(dataframeX.head()) 22 | print(dataframeY.head()) 23 | 24 | # 1 25 | import numpy as np 26 | seed = 7 27 | np.random.seed(seed) 28 | 29 | # 2 30 | from keras.models import Sequential 31 | from keras.layers import Dense 32 | model = Sequential() 33 | model.add(Dense(9, input_shape=(8,), init='uniform', activation='sigmoid')) 34 | model.add(Dense(9, init='uniform', activation='sigmoid')) 35 | model.add(Dense(9, init='uniform', activation='sigmoid')) 36 | model.add(Dense(1, init='uniform', activation='sigmoid')) 37 | model.summary() 38 | 39 | # 3 40 | import keras 41 | tbCallBack = keras.callbacks.TensorBoard(log_dir='/tmp/keras_logs', write_graph=True) 42 | 43 | # 4 44 | model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) 45 | model.fit(dataframeX.values, dataframeY.values, epochs=9, batch_size=50, verbose=1, validation_split=0.3, callbacks=[tbCallBack]) --------------------------------------------------------------------------------