├── Accuracy.png
├── LiveClassifier.py
├── README.md
├── activity.ipynb
├── ezgif-2-bdba792399.gif
└── main.py
/Accuracy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JithLord/Activity_Detection/7fd5606558644129d8864be0f47f254a3f4abece/Accuracy.png
--------------------------------------------------------------------------------
/LiveClassifier.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | from PIL import Image, ImageOps
4 | from tensorflow.keras import models
5 | import tensorflow as tf
6 | from datetime import datetime
7 |
8 | #Load the saved model
9 | model = models.load_model('activity4.h5')
10 | video = cv2.VideoCapture(0)
11 | class_names=["Control","Sitting","Walking on Stairs","Standing","Sleeping","Walking"]
12 | # freq
13 | while True:
14 | _, frame = video.read()
15 | image = Image.fromarray(frame, 'RGB')
16 | size = (150, 150)
17 | data = np.ndarray(shape=(1, 150, 150, 3), dtype=np.float32)
18 | image = ImageOps.fit(image, size, Image.ANTIALIAS)
19 |
20 | image_array = np.asarray(image)
21 | normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
22 | data[0] = normalized_image_array
23 |
24 | prediction = model.predict(data)
25 | score = tf.nn.softmax(prediction[0])
26 | print(datetime.now(),class_names[np.argmax(score)])
27 |
28 | cv2.imshow("Capturing", frame)
29 | key=cv2.waitKey(1)
30 |
31 | if key == ord('q'):
32 | break
33 | video.release()
34 | cv2.destroyAllWindows()
35 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | # Activity_Detection
6 | This repository uses tensorflow to train on 4268 Non copyrighted images obtained after scraping duckduckgo searches to classify live using the camera into 6 different categories Sitting, Standing, Walking, Walking on Stairs and Control.
7 |
8 | Issues? Email Me: 308rwe3oa@relay.firefox.com
9 |
10 | ## IMPORTANT
11 | Feel free to use the code, make sure you give us credits everytime you use the code.
12 | ## Dataset
13 | Dataset divided into 6 categories of 150x150x3 ie the image of size 150x150 and a 3 channel(RGB). Category Control used to prevent Random images to be classified into any of the remaining 5 categories. Use of Augmentation to have more number of images to train the model. The dataset involved humans of various backgrounds, ages, skin tone, time of day.
14 |
15 |
16 |
17 | *My dataset is now available to download for free on kaggle https://www.kaggle.com/jithinnambiarj/human-activity-detection-dataset?rvi=1 .*
18 | **Please do give me credits everytime you use it.**
19 |
20 |
21 | ## Code
22 | Using InceptionV3 transfer learning model, along with mixed7 layer, 1024 neuron Selu with activation, 0.1 Dropout, 512 neuron with Selu activation, 0.1 dropout and 6 Neuron with softmax Activation.
23 | Use of RMSprop optimizer with learning rate 0.0001 and loss function as categorical crossentropy.
24 |
25 |
26 |
27 |
28 |
29 |
30 | Block Diagram of the Project
31 |
32 |
33 |
34 |
35 |
36 | Training Accuracy Training Loss
37 |
38 | ## Important
39 | You can run this program on colab.research.google.com with a TPU or GPU for faster processing.
40 | THe code is attached here in .ipynb format, just copy the code to the colab website.
41 | Click runtime and change runtime type to GPU or TPU for faster processing
42 |
43 | ## Download the .h5 model
44 | https://we.tl/t-HBscIiHwov
45 |
46 | ## For loading images from your drive:
47 | ```
48 | from google.colab import drive
49 | drive.mount('/content/drive')
50 | ```
51 | or you could do this and also specify the exact folder where you want it to take from:
52 | 1. Runtime -> Change runtime type -> GPU or TPU
53 | 2. Click on Files from the left toolbar
54 | 3. Click Mount drive
55 | 4. Copy the path of the dataset folder (and assign them to the train_dir and val_dir)
56 |
57 |
58 | ### Please feel free to open any issues
59 |
60 | ## Some resources you can use to improve your model:
61 | 1. https://www.tensorflow.org/tutorials/keras/overfit_and_underfit
62 | 1. https://www.tensorflow.org/tutorials/keras/save_and_load
63 | 1. https://www.tensorflow.org/tutorials/images/classification (Super useful)
64 | 1. https://www.tensorflow.org/tutorials/images/transfer_learning
65 | 1. https://www.tensorflow.org/api_docs/python/tf/keras/losses
66 | 1. https://www.tensorflow.org/api_docs/python/tf/keras/layers
67 |
68 | ## The LiveClassifier.py file can be used to classify images from a webcam or any camera.
69 |
--------------------------------------------------------------------------------
/activity.ipynb:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from tensorflow.keras import layers
4 | from tensorflow.keras import Model
5 | import tensorflow as tf
6 |
7 | class myCallback(tf.keras.callbacks.Callback):
8 | def on_epoch_end(self, epoch, logs={}):
9 | if(logs.get('acc')>0.8) and (logs.get('val_acc')>0.8):
10 | self.model.stop_training = True
11 |
12 | callbacks = myCallback()
13 | img_height=300
14 | img_weight=300
15 | from keras_preprocessing.image import ImageDataGenerator
16 | import tensorflow as tf
17 |
18 | TRAINING_DIR = r"/content/drive/My Drive/Dataset/Images"
19 | training_datagen = ImageDataGenerator(
20 | rescale = 1./255,
21 | # rotation_range=10,
22 | # width_shift_range=0.3,
23 | # height_shift_range=0.3,
24 | featurewise_std_normalization=True,
25 | samplewise_std_normalization=True,
26 | brightness_range=(0.2,0.5),
27 | # shear_range=0.2,
28 | # zoom_range=0.2,
29 | horizontal_flip=True,
30 | fill_mode='nearest')
31 |
32 | VALIDATION_DIR = "/content/drive/My Drive/Dataset/Validation"
33 | validation_datagen = ImageDataGenerator(rescale = 1./255)
34 |
35 | train_generator = training_datagen.flow_from_directory(
36 | TRAINING_DIR,
37 | target_size=(img_height, img_weight),
38 | class_mode='categorical',
39 | batch_size=2
40 | )
41 |
42 | validation_generator = validation_datagen.flow_from_directory(
43 | VALIDATION_DIR,
44 | target_size=(img_height, img_weight),
45 | class_mode='categorical',
46 | batch_size=2
47 | )
48 |
49 | # model.save("activity2.h5")
50 |
51 | from tensorflow.keras import layers
52 | model = tf.keras.models.Sequential([
53 | layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_weight, 3)),
54 | tf.keras.layers.Conv2D(16, (3,3), padding='same'),
55 | tf.keras.layers.MaxPooling2D(2, 2),
56 | tf.keras.layers.BatchNormalization(),
57 | tf.keras.layers.Conv2D(32, (3,3), padding='same'),
58 | tf.keras.layers.MaxPooling2D(2, 2),
59 | tf.keras.layers.BatchNormalization(),
60 | tf.keras.layers.Conv2D(64, (3,3), padding='same'),
61 | tf.keras.layers.MaxPooling2D(2, 2),
62 | tf.keras.layers.BatchNormalization(),
63 | tf.keras.layers.Conv2D(128, (3,3)),
64 | tf.keras.layers.MaxPooling2D(2, 2),
65 | tf.keras.layers.BatchNormalization(),
66 | # tf.keras.layers.Conv2D(64, (3,3)),
67 | # tf.keras.layers.MaxPooling2D(2, 2),
68 | # tf.keras.layers.BatchNormalization(),
69 | layers.Dropout(0.2),
70 | tf.keras.layers.Flatten(),
71 | tf.keras.layers.Dense(2048, activation='relu'),
72 | tf.keras.layers.Dense(7, activation='softmax')
73 | ])
74 |
75 |
76 | model.summary()
77 |
78 | model.compile(loss = tf.keras.losses.CategoricalCrossentropy(), optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.00010), metrics=['acc'])
79 |
80 | history = model.fit(train_generator, epochs=1000, steps_per_epoch=10, validation_data = validation_generator, verbose = 1, validation_steps=1, callbacks=[callbacks])
81 |
82 |
83 | import matplotlib.pyplot as plt
84 | acc = history.history['acc']
85 | val_acc = history.history['val_acc']
86 | loss = history.history['loss']
87 | val_loss = history.history['val_loss']
88 |
89 | epochs = range(len(acc))
90 | epochs1 = range(len(val_acc))
91 | print("val_loss",val_loss)
92 | plt.subplot(1, 2, 1)
93 | plt.plot(epochs, acc, 'r', label='Training accuracy')
94 | plt.plot(epochs1, val_acc, 'b', label='Validation accuracy')
95 | plt.title('Training and validation accuracy')
96 | plt.legend(loc=0)
97 | plt.figure()
98 |
99 | plt.subplot(1, 2, 2)
100 | plt.plot(epochs, loss, 'r', label='Training accuracy')
101 | plt.plot(epochs1, val_loss, 'b', label='Validation accuracy')
102 |
103 |
104 | import matplotlib.pyplot as plt
105 | acc = history.history['acc']
106 | val_acc = history.history['val_acc']
107 | loss = history.history['loss']
108 | val_loss = history.history['val_loss']
109 |
110 | epochs = range(len(acc))
111 | epochs1 = range(len(val_acc))
112 | print("val_loss",val_loss)
113 | plt.plot(epochs, acc, 'r', label='Training accuracy')
114 | plt.plot(epochs1, val_acc, 'b', label='Validation accuracy')
115 | plt.title('Training and validation accuracy')
116 | plt.legend(loc=0)
117 | plt.figure()
118 |
--------------------------------------------------------------------------------
/ezgif-2-bdba792399.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JithLord/Activity_Detection/7fd5606558644129d8864be0f47f254a3f4abece/ezgif-2-bdba792399.gif
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from tensorflow.keras import layers
4 | from tensorflow.keras import Model
5 | !wget --no-check-certificate \
6 | https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \
7 | -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5
8 |
9 | from tensorflow.keras.applications.inception_v3 import InceptionV3
10 |
11 | local_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
12 |
13 | pre_trained_model = InceptionV3(input_shape = (150, 150, 3),
14 | include_top = False,
15 | weights = None)
16 |
17 | pre_trained_model.load_weights(local_weights_file)
18 |
19 | for layer in pre_trained_model.layers:
20 | layer.trainable = False
21 |
22 | # pre_trained_model.summary()
23 |
24 | last_layer = pre_trained_model.get_layer('mixed7')
25 | print('last layer output shape: ', last_layer.output_shape)
26 | last_output = last_layer.output
27 |
28 | from tensorflow.keras.optimizers import RMSprop
29 |
30 | # Flatten the output layer to 1 dimension
31 | x = layers.Flatten()(last_output)
32 | x = layers.Dense(1024, activation='selu')(x)
33 | x = layers.Dropout(0.1)(x)
34 | x = layers.Dense(512, activation='selu')(x)
35 | x = layers.Dropout(0.1)(x)
36 | # Final Softmax layer for classification
37 | x = layers.Dense(6, activation='softmax')(x)
38 |
39 | model = Model( pre_trained_model.input, x)
40 |
41 | model.compile(optimizer = RMSprop(lr=0.0001),
42 | loss = 'categorical_crossentropy',
43 | metrics = ['acc'])
44 |
45 | from tensorflow.keras.preprocessing.image import ImageDataGenerator
46 |
47 | train_dir = r"/content/drive/MyDrive/DatasetNew/DatasetNew"
48 | validation_dir = r"/content/drive/MyDrive/Validation"
49 |
50 | # Add our data-augmentation parameters to ImageDataGenerator
51 | train_datagen = ImageDataGenerator(rescale = 1./255,
52 | rotation_range=10,
53 | width_shift_range=0.3,
54 | height_shift_range=0.3,
55 | # featurewise_std_normalization=True,
56 | # samplewise_std_normalization=True,
57 | brightness_range=(0.2,0.5),
58 | # shear_range=0.2,
59 | # zoom_range=0.2,
60 | horizontal_flip=True,
61 | fill_mode='nearest')
62 |
63 | # Note that the validation data should not be augmented!
64 | # test_datagen = ImageDataGenerator( rescale = 1.0/255. )
65 |
66 | # Flow training images in batches of 20 using train_datagen generator
67 | train_generator = train_datagen.flow_from_directory(train_dir,
68 | batch_size = 20,
69 | shuffle="True",
70 | class_mode = 'categorical',
71 | target_size = (150, 150))
72 |
73 | # Flow validation images in batches of 20 using test_datagen generator
74 | validation_generator = train_datagen.flow_from_directory(validation_dir,
75 | batch_size = 20,
76 | shuffle="True",
77 | class_mode = 'categorical',
78 | target_size = (150, 150))
79 |
80 |
81 | import tensorflow as tf
82 | from PIL import ImageFile
83 | ImageFile.LOAD_TRUNCATED_IMAGES = True
84 | class myCallback(tf.keras.callbacks.Callback):
85 | def on_epoch_end(self, epoch, logs={}):
86 | if (logs.get('acc')>0.85):
87 | self.model.stop_training = True
88 | callbacks=myCallback()
89 | history = model.fit(
90 | train_generator,
91 | validation_data = validation_generator,
92 | steps_per_epoch = 20,
93 | epochs = 500,
94 | validation_steps = 20,
95 | verbose = 1,callbacks=[callbacks])
96 | model.save("activity34.h5")
97 | import matplotlib.pyplot as plt
98 | acc = history.history['acc']
99 | # val_acc = history.history['val_acc']
100 | loss = history.history['loss']
101 | # val_loss = history.history['val_loss']
102 |
103 | epochs = range(len(acc))
104 |
105 | model.save("activity4.h5")
106 | plt.plot(epochs, acc, 'r', label='Training accuracy')
107 | plt.title('Training Accuracy')
108 | plt.legend(loc=0)
109 | plt.figure()
110 | plt.show()
111 |
112 | model.save("activity4.h5")
113 | plt.plot(epochs, loss, 'r', label='Training Loss')
114 | plt.title('Training Loss')
115 | plt.legend(loc=0)
116 | plt.figure()
117 | plt.show()
118 |
119 |
--------------------------------------------------------------------------------