├── README.md ├── architecture.jpg ├── color_net_training.ipynb ├── color_net_training.py └── confusion_matrix.jpg /README.md: -------------------------------------------------------------------------------- 1 | # Color-Classification-CNN 2 | Color recognition implemented in Keras. 3 | This is an Keras implementation of CNN. The implementation supports TensorFlow backends. 4 | 5 | The objective of the original model is to detect vehicle colors from traffic cameras. 6 | In the paper, they presented the vehicle color recognition system using CNN.The model successfully captured vehicle color in very high accuracy, 94.47%. 7 | To know more about how the model works, please refer to the [original paper](https://arxiv.org/pdf/1510.07391.pdf) 8 | (Vehicle Color Recognition Using Convolutional Neural Network , Reza Fuad Rachmadi and I Ketut Eddy Purnama) 9 | 10 | 11 | ## Model Architecture 12 | ![architecture](https://raw.githubusercontent.com/beerboaa/Color-Classification-CNN/master/architecture.jpg) 13 | 14 | 15 | ## Confusion Matrix of the model 16 | ![architecture](https://raw.githubusercontent.com/beerboaa/Color-Classification-CNN/master/confusion_matrix.jpg) 17 | 18 | 19 | -------------------------------------------------------------------------------- /architecture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/beerboaa/Color-Classification-CNN/0d1ef11f7a8eec6a586609c9af8872f167ea45b3/architecture.jpg -------------------------------------------------------------------------------- /color_net_training.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "Using TensorFlow backend.\n" 13 | ] 14 | } 15 | ], 16 | "source": [ 17 | "from keras.models import Sequential,Model,load_model\n", 18 | "from keras.optimizers import SGD\n", 19 | "from keras.layers import BatchNormalization, Lambda, Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation\n", 20 | "from keras.layers.merge import Concatenate\n", 21 | "from keras.preprocessing.image import ImageDataGenerator\n", 22 | "from keras.callbacks import ModelCheckpoint\n", 23 | "import numpy as np\n", 24 | "import keras.backend as K" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 1, 30 | "metadata": { 31 | "collapsed": true 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "def color_net(num_classes):\n", 36 | " # placeholder for input image\n", 37 | " input_image = Input(shape=(224,224,3))\n", 38 | " # ============================================= TOP BRANCH ===================================================\n", 39 | " # first top convolution layer\n", 40 | " top_conv1 = Convolution2D(filters=48,kernel_size=(11,11),strides=(4,4),\n", 41 | " input_shape=(224,224,3),activation='relu')(input_image)\n", 42 | " top_conv1 = BatchNormalization()(top_conv1)\n", 43 | " top_conv1 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(top_conv1)\n", 44 | "\n", 45 | " # second top convolution layer\n", 46 | " # split feature map by half\n", 47 | " top_top_conv2 = Lambda(lambda x : x[:,:,:,:24])(top_conv1)\n", 48 | " top_bot_conv2 = Lambda(lambda x : x[:,:,:,24:])(top_conv1)\n", 49 | "\n", 50 | " top_top_conv2 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_top_conv2)\n", 51 | " top_top_conv2 = BatchNormalization()(top_top_conv2)\n", 52 | " top_top_conv2 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(top_top_conv2)\n", 53 | "\n", 54 | " top_bot_conv2 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_bot_conv2)\n", 55 | " top_bot_conv2 = BatchNormalization()(top_bot_conv2)\n", 56 | " top_bot_conv2 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(top_bot_conv2)\n", 57 | "\n", 58 | " # third top convolution layer\n", 59 | " # concat 2 feature map\n", 60 | " top_conv3 = Concatenate()([top_top_conv2,top_bot_conv2])\n", 61 | " top_conv3 = Convolution2D(filters=192,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_conv3)\n", 62 | "\n", 63 | " # fourth top convolution layer\n", 64 | " # split feature map by half\n", 65 | " top_top_conv4 = Lambda(lambda x : x[:,:,:,:96])(top_conv3)\n", 66 | " top_bot_conv4 = Lambda(lambda x : x[:,:,:,96:])(top_conv3)\n", 67 | "\n", 68 | " top_top_conv4 = Convolution2D(filters=96,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_top_conv4)\n", 69 | " top_bot_conv4 = Convolution2D(filters=96,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_bot_conv4)\n", 70 | "\n", 71 | " # fifth top convolution layer\n", 72 | " top_top_conv5 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_top_conv4)\n", 73 | " top_top_conv5 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(top_top_conv5) \n", 74 | "\n", 75 | " top_bot_conv5 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_bot_conv4)\n", 76 | " top_bot_conv5 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(top_bot_conv5)\n", 77 | "\n", 78 | " # ============================================= TOP BOTTOM ===================================================\n", 79 | " # first bottom convolution layer\n", 80 | " bottom_conv1 = Convolution2D(filters=48,kernel_size=(11,11),strides=(4,4),\n", 81 | " input_shape=(227,227,3),activation='relu')(input_image)\n", 82 | " bottom_conv1 = BatchNormalization()(bottom_conv1)\n", 83 | " bottom_conv1 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(bottom_conv1)\n", 84 | "\n", 85 | " # second bottom convolution layer\n", 86 | " # split feature map by half\n", 87 | " bottom_top_conv2 = Lambda(lambda x : x[:,:,:,:24])(bottom_conv1)\n", 88 | " bottom_bot_conv2 = Lambda(lambda x : x[:,:,:,24:])(bottom_conv1)\n", 89 | "\n", 90 | " bottom_top_conv2 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_top_conv2)\n", 91 | " bottom_top_conv2 = BatchNormalization()(bottom_top_conv2)\n", 92 | " bottom_top_conv2 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(bottom_top_conv2)\n", 93 | "\n", 94 | " bottom_bot_conv2 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_bot_conv2)\n", 95 | " bottom_bot_conv2 = BatchNormalization()(bottom_bot_conv2)\n", 96 | " bottom_bot_conv2 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(bottom_bot_conv2)\n", 97 | "\n", 98 | " # third bottom convolution layer\n", 99 | " # concat 2 feature map\n", 100 | " bottom_conv3 = Concatenate()([bottom_top_conv2,bottom_bot_conv2])\n", 101 | " bottom_conv3 = Convolution2D(filters=192,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_conv3)\n", 102 | "\n", 103 | " # fourth bottom convolution layer\n", 104 | " # split feature map by half\n", 105 | " bottom_top_conv4 = Lambda(lambda x : x[:,:,:,:96])(bottom_conv3)\n", 106 | " bottom_bot_conv4 = Lambda(lambda x : x[:,:,:,96:])(bottom_conv3)\n", 107 | "\n", 108 | " bottom_top_conv4 = Convolution2D(filters=96,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_top_conv4)\n", 109 | " bottom_bot_conv4 = Convolution2D(filters=96,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_bot_conv4)\n", 110 | "\n", 111 | " # fifth bottom convolution layer\n", 112 | " bottom_top_conv5 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_top_conv4)\n", 113 | " bottom_top_conv5 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(bottom_top_conv5) \n", 114 | "\n", 115 | " bottom_bot_conv5 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_bot_conv4)\n", 116 | " bottom_bot_conv5 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(bottom_bot_conv5)\n", 117 | "\n", 118 | " # ======================================== CONCATENATE TOP AND BOTTOM BRANCH =================================\n", 119 | " conv_output = Concatenate()([top_top_conv5,top_bot_conv5,bottom_top_conv5,bottom_bot_conv5])\n", 120 | "\n", 121 | " # Flatten\n", 122 | " flatten = Flatten()(conv_output)\n", 123 | "\n", 124 | " # Fully-connected layer\n", 125 | " FC_1 = Dense(units=4096, activation='relu')(flatten)\n", 126 | " FC_1 = Dropout(0.6)(FC_1)\n", 127 | " FC_2 = Dense(units=4096, activation='relu')(FC_1)\n", 128 | " FC_2 = Dropout(0.6)(FC_2)\n", 129 | " output = Dense(units=num_classes, activation='softmax')(FC_2)\n", 130 | " \n", 131 | " model = Model(inputs=input_image,outputs=output)\n", 132 | " sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)\n", 133 | " # sgd = SGD(lr=0.01, momentum=0.9, decay=0.0005, nesterov=True)\n", 134 | " model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])\n", 135 | " \n", 136 | " return model" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": 11, 142 | "metadata": {}, 143 | "outputs": [ 144 | { 145 | "name": "stdout", 146 | "output_type": "stream", 147 | "text": [ 148 | "Found 12483 images belonging to 8 classes.\n", 149 | "Found 3118 images belonging to 8 classes.\n", 150 | "Epoch 1/5\n", 151 | "11999/12000 [============================>.] - ETA: 0s - loss: 0.2570 - acc: 0.9071Epoch 00000: val_acc improved from -inf to 0.86160, saving model to beer_net_color_weights.hdf5\n", 152 | "12000/12000 [==============================] - 3404s - loss: 0.2570 - acc: 0.9071 - val_loss: 0.3841 - val_acc: 0.8616\n", 153 | "Epoch 2/5\n", 154 | "11999/12000 [============================>.] - ETA: 0s - loss: 0.1378 - acc: 0.9483Epoch 00001: val_acc did not improve\n", 155 | "12000/12000 [==============================] - 3357s - loss: 0.1378 - acc: 0.9483 - val_loss: 0.5655 - val_acc: 0.8428\n", 156 | "Epoch 3/5\n", 157 | "11999/12000 [============================>.] - ETA: 0s - loss: 0.1019 - acc: 0.9609Epoch 00002: val_acc improved from 0.86160 to 0.86803, saving model to beer_net_color_weights.hdf5\n", 158 | "12000/12000 [==============================] - 3361s - loss: 0.1019 - acc: 0.9609 - val_loss: 0.4238 - val_acc: 0.8680\n", 159 | "Epoch 4/5\n", 160 | "11999/12000 [============================>.] - ETA: 0s - loss: 0.0781 - acc: 0.9697Epoch 00003: val_acc did not improve\n", 161 | "12000/12000 [==============================] - 3350s - loss: 0.0781 - acc: 0.9697 - val_loss: 0.7076 - val_acc: 0.8491\n", 162 | "Epoch 5/5\n", 163 | "11999/12000 [============================>.] - ETA: 0s - loss: 0.0636 - acc: 0.9754Epoch 00004: val_acc improved from 0.86803 to 0.88031, saving model to beer_net_color_weights.hdf5\n", 164 | "12000/12000 [==============================] - 3355s - loss: 0.0636 - acc: 0.9754 - val_loss: 0.5297 - val_acc: 0.8803\n" 165 | ] 166 | } 167 | ], 168 | "source": [ 169 | "img_rows , img_cols = 227,227\n", 170 | "num_classes = 8\n", 171 | "batch_size = 32\n", 172 | "nb_epoch = 5\n", 173 | "\n", 174 | "# initialise model\n", 175 | "model = color_net(num_classes)\n", 176 | "\n", 177 | "filepath = 'color_weights.hdf5'\n", 178 | "checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n", 179 | "callbacks_list = [checkpoint]\n", 180 | "\n", 181 | "train_datagen = ImageDataGenerator(\n", 182 | " rescale=1./255,\n", 183 | " shear_range=0.2,\n", 184 | " zoom_range=0.3,\n", 185 | " horizontal_flip=True)\n", 186 | "test_datagen = ImageDataGenerator(rescale=1./255)\n", 187 | "\n", 188 | "training_set = train_datagen.flow_from_directory(\n", 189 | " 'train/',\n", 190 | " target_size=(img_rows, img_cols),\n", 191 | " batch_size=batch_size,\n", 192 | " class_mode='categorical')\n", 193 | "test_set = test_datagen.flow_from_directory(\n", 194 | " 'test/',\n", 195 | " target_size=(img_rows, img_cols),\n", 196 | " batch_size=batch_size,\n", 197 | " class_mode='categorical')\n", 198 | "\n", 199 | "model.fit_generator(\n", 200 | " training_set,\n", 201 | " steps_per_epoch=12000,\n", 202 | " epochs=nb_epoch,\n", 203 | " validation_data=test_set,\n", 204 | " validation_steps=3000,\n", 205 | " callbacks=callbacks_list)\n", 206 | "\n", 207 | "model.save('color_model.h5')\n" 208 | ] 209 | } 210 | ], 211 | "metadata": { 212 | "kernelspec": { 213 | "display_name": "Python 3", 214 | "language": "python", 215 | "name": "python3" 216 | }, 217 | "language_info": { 218 | "codemirror_mode": { 219 | "name": "ipython", 220 | "version": 3 221 | }, 222 | "file_extension": ".py", 223 | "mimetype": "text/x-python", 224 | "name": "python", 225 | "nbconvert_exporter": "python", 226 | "pygments_lexer": "ipython3", 227 | "version": "3.5.2" 228 | } 229 | }, 230 | "nbformat": 4, 231 | "nbformat_minor": 2 232 | } 233 | -------------------------------------------------------------------------------- /color_net_training.py: -------------------------------------------------------------------------------- 1 | from keras.models import Sequential,Model,load_model 2 | from keras.optimizers import SGD 3 | from keras.layers import BatchNormalization, Lambda, Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation 4 | from keras.layers.merge import Concatenate 5 | from keras.preprocessing.image import ImageDataGenerator 6 | from keras.callbacks import ModelCheckpoint 7 | import numpy as np 8 | import keras.backend as K 9 | 10 | def beer_net(num_classes): 11 | # placeholder for input image 12 | input_image = Input(shape=(224,224,3)) 13 | # ============================================= TOP BRANCH =================================================== 14 | # first top convolution layer 15 | top_conv1 = Convolution2D(filters=48,kernel_size=(11,11),strides=(4,4), 16 | input_shape=(224,224,3),activation='relu')(input_image) 17 | top_conv1 = BatchNormalization()(top_conv1) 18 | top_conv1 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(top_conv1) 19 | 20 | # second top convolution layer 21 | # split feature map by half 22 | top_top_conv2 = Lambda(lambda x : x[:,:,:,:24])(top_conv1) 23 | top_bot_conv2 = Lambda(lambda x : x[:,:,:,24:])(top_conv1) 24 | 25 | top_top_conv2 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_top_conv2) 26 | top_top_conv2 = BatchNormalization()(top_top_conv2) 27 | top_top_conv2 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(top_top_conv2) 28 | 29 | top_bot_conv2 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_bot_conv2) 30 | top_bot_conv2 = BatchNormalization()(top_bot_conv2) 31 | top_bot_conv2 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(top_bot_conv2) 32 | 33 | # third top convolution layer 34 | # concat 2 feature map 35 | top_conv3 = Concatenate()([top_top_conv2,top_bot_conv2]) 36 | top_conv3 = Convolution2D(filters=192,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_conv3) 37 | 38 | # fourth top convolution layer 39 | # split feature map by half 40 | top_top_conv4 = Lambda(lambda x : x[:,:,:,:96])(top_conv3) 41 | top_bot_conv4 = Lambda(lambda x : x[:,:,:,96:])(top_conv3) 42 | 43 | top_top_conv4 = Convolution2D(filters=96,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_top_conv4) 44 | top_bot_conv4 = Convolution2D(filters=96,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_bot_conv4) 45 | 46 | # fifth top convolution layer 47 | top_top_conv5 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_top_conv4) 48 | top_top_conv5 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(top_top_conv5) 49 | 50 | top_bot_conv5 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(top_bot_conv4) 51 | top_bot_conv5 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(top_bot_conv5) 52 | 53 | # ============================================= TOP BOTTOM =================================================== 54 | # first bottom convolution layer 55 | bottom_conv1 = Convolution2D(filters=48,kernel_size=(11,11),strides=(4,4), 56 | input_shape=(224,224,3),activation='relu')(input_image) 57 | bottom_conv1 = BatchNormalization()(bottom_conv1) 58 | bottom_conv1 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(bottom_conv1) 59 | 60 | # second bottom convolution layer 61 | # split feature map by half 62 | bottom_top_conv2 = Lambda(lambda x : x[:,:,:,:24])(bottom_conv1) 63 | bottom_bot_conv2 = Lambda(lambda x : x[:,:,:,24:])(bottom_conv1) 64 | 65 | bottom_top_conv2 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_top_conv2) 66 | bottom_top_conv2 = BatchNormalization()(bottom_top_conv2) 67 | bottom_top_conv2 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(bottom_top_conv2) 68 | 69 | bottom_bot_conv2 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_bot_conv2) 70 | bottom_bot_conv2 = BatchNormalization()(bottom_bot_conv2) 71 | bottom_bot_conv2 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(bottom_bot_conv2) 72 | 73 | # third bottom convolution layer 74 | # concat 2 feature map 75 | bottom_conv3 = Concatenate()([bottom_top_conv2,bottom_bot_conv2]) 76 | bottom_conv3 = Convolution2D(filters=192,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_conv3) 77 | 78 | # fourth bottom convolution layer 79 | # split feature map by half 80 | bottom_top_conv4 = Lambda(lambda x : x[:,:,:,:96])(bottom_conv3) 81 | bottom_bot_conv4 = Lambda(lambda x : x[:,:,:,96:])(bottom_conv3) 82 | 83 | bottom_top_conv4 = Convolution2D(filters=96,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_top_conv4) 84 | bottom_bot_conv4 = Convolution2D(filters=96,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_bot_conv4) 85 | 86 | # fifth bottom convolution layer 87 | bottom_top_conv5 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_top_conv4) 88 | bottom_top_conv5 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(bottom_top_conv5) 89 | 90 | bottom_bot_conv5 = Convolution2D(filters=64,kernel_size=(3,3),strides=(1,1),activation='relu',padding='same')(bottom_bot_conv4) 91 | bottom_bot_conv5 = MaxPooling2D(pool_size=(3,3),strides=(2,2))(bottom_bot_conv5) 92 | 93 | # ======================================== CONCATENATE TOP AND BOTTOM BRANCH ================================= 94 | conv_output = Concatenate()([top_top_conv5,top_bot_conv5,bottom_top_conv5,bottom_bot_conv5]) 95 | 96 | # Flatten 97 | flatten = Flatten()(conv_output) 98 | 99 | # Fully-connected layer 100 | FC_1 = Dense(units=4096, activation='relu')(flatten) 101 | FC_1 = Dropout(0.6)(FC_1) 102 | FC_2 = Dense(units=4096, activation='relu')(FC_1) 103 | FC_2 = Dropout(0.6)(FC_2) 104 | output = Dense(units=num_classes, activation='softmax')(FC_2) 105 | 106 | model = Model(inputs=input_image,outputs=output) 107 | sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True) 108 | # sgd = SGD(lr=0.01, momentum=0.9, decay=0.0005, nesterov=True) 109 | model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) 110 | 111 | return model 112 | 113 | img_rows , img_cols = 224,224 114 | num_classes = 9 115 | batch_size = 32 116 | nb_epoch = 5 117 | 118 | # initialise model 119 | model = beer_net(num_classes) 120 | 121 | filepath = 'color_weights.hdf5' 122 | checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') 123 | callbacks_list = [checkpoint] 124 | 125 | train_datagen = ImageDataGenerator( 126 | rescale=1./255, 127 | shear_range=0.2, 128 | zoom_range=0.3, 129 | horizontal_flip=True) 130 | test_datagen = ImageDataGenerator(rescale=1./255) 131 | 132 | training_set = train_datagen.flow_from_directory( 133 | 'train/', 134 | target_size=(img_rows, img_cols), 135 | batch_size=batch_size, 136 | class_mode='categorical') 137 | test_set = test_datagen.flow_from_directory( 138 | 'test/', 139 | target_size=(img_rows, img_cols), 140 | batch_size=batch_size, 141 | class_mode='categorical') 142 | 143 | model.fit_generator( 144 | training_set, 145 | steps_per_epoch=100000, 146 | epochs=nb_epoch, 147 | validation_data=test_set, 148 | validation_steps=30000, 149 | callbacks=callbacks_list) 150 | 151 | model.save('color_model.h5') 152 | -------------------------------------------------------------------------------- /confusion_matrix.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/beerboaa/Color-Classification-CNN/0d1ef11f7a8eec6a586609c9af8872f167ea45b3/confusion_matrix.jpg --------------------------------------------------------------------------------