├── AI_Train.py ├── Output Examples ├── Explosion008_x264.gif └── Explosion023_x264.gif ├── README.md ├── __pycache__ ├── c3d.cpython-36.pyc ├── classifier.cpython-36.pyc ├── configuration.cpython-36.pyc └── parameters.cpython-36.pyc ├── c3d.py ├── c3d.pyc ├── classifier.py ├── classifier.pyc ├── configuration.py ├── configuration.pyc ├── input └── Robbery056_x264.mp4 ├── output └── Robbery056_x264.gif ├── parameters.py ├── parameters.pyc ├── test_detect.py ├── trained_models ├── model.json ├── weights.mat └── weights_L1L2.mat └── utils ├── array_util.py ├── array_util.pyc ├── video_util.py ├── video_util.pyc ├── visualization_util.py └── visualization_util.pyc /AI_Train.py: -------------------------------------------------------------------------------- 1 | from keras.models import Sequential 2 | from keras.layers import Dense, Dropout 3 | from keras.regularizers import l2 4 | from keras.optimizers import Adagrad 5 | from scipy.io import savemat 6 | from keras.models import model_from_json 7 | import theano.tensor as T 8 | import theano 9 | import os 10 | from os import listdir 11 | import numpy as np 12 | import numpy 13 | from datetime import datetime 14 | 15 | def save_model(model, json_path, weight_path): 16 | json_string = model.to_json() 17 | open(json_path, 'w').write(json_string) 18 | dict = {} 19 | i = 0 20 | for layer in model.layers: 21 | weights = layer.get_weights() 22 | my_list = np.zeros(len(weights), dtype=np.object) 23 | my_list[:] = weights 24 | dict[str(i)] = my_list 25 | i += 1 26 | savemat(weight_path, dict) 27 | 28 | def load_model(json_path): 29 | model = model_from_json(open(json_path).read()) 30 | return model 31 | 32 | def load_dataset_Train_batch(AbnormalPath, NormalPath): 33 | 34 | batchsize=60 35 | n_exp= int(batchsize/2) 36 | 37 | Num_abnormal = 900 38 | Num_Normal = 792 39 | 40 | 41 | Abnor_list_iter = np.random.permutation(Num_abnormal) 42 | Abnor_list_iter = Abnor_list_iter[Num_abnormal-n_exp:] 43 | Norm_list_iter = np.random.permutation(Num_Normal) 44 | Norm_list_iter = Norm_list_iter[Num_Normal-n_exp:] 45 | 46 | All_Videos=[] 47 | with open(AbnormalPath+"anomaly.txt", 'r') as f1: #file contain path to anomaly video file. 48 | for line in f1: 49 | All_Videos.append(line.strip()) 50 | AllFeatures = [] 51 | print("Loading Anomaly videos Features...") 52 | 53 | Video_count=-1 54 | for iv in Abnor_list_iter: 55 | Video_count=Video_count+1 56 | VideoPath = os.path.join(AbnormalPath, All_Videos[iv]) 57 | f = open(VideoPath, "r") 58 | words = f.read().split() 59 | num_feat = len(words) / 4096 60 | 61 | count = -1; 62 | VideoFeatues = [] 63 | for feat in range(0, int(num_feat)): 64 | feat_row1 = np.float32(words[feat * 4096:feat * 4096 + 4096]) 65 | count = count + 1 66 | if count == 0: 67 | VideoFeatues = feat_row1 68 | if count > 0: 69 | VideoFeatues = np.vstack((VideoFeatues, feat_row1)) 70 | 71 | if Video_count == 0: 72 | AllFeatures = VideoFeatues 73 | if Video_count > 0: 74 | AllFeatures = np.vstack((AllFeatures, VideoFeatues)) 75 | print("Abnormal Features loaded") 76 | 77 | All_Videos=[] 78 | with open(NormalPath+"normal.txt", 'r') as f1: #file contain path to normal video file. 79 | for line in f1: 80 | All_Videos.append(line.strip()) 81 | 82 | print("Loading Normal videos...") 83 | 84 | for iv in Norm_list_iter: 85 | VideoPath = os.path.join(NormalPath, All_Videos[iv]) 86 | f = open(VideoPath, "r") 87 | words = f.read().split() 88 | feat_row1 = np.array([]) 89 | num_feat = len(words) /4096 90 | count = -1; 91 | VideoFeatues = [] 92 | for feat in range(0, int(num_feat)): 93 | feat_row1 = np.float32(words[feat * 4096:feat * 4096 + 4096]) 94 | count = count + 1 95 | if count == 0: 96 | VideoFeatues = feat_row1 97 | if count > 0: 98 | VideoFeatues = np.vstack((VideoFeatues, feat_row1)) 99 | feat_row1 = [] 100 | AllFeatures = np.vstack((AllFeatures, VideoFeatues)) 101 | 102 | print("Features loaded") 103 | 104 | AllLabels = np.zeros(32*batchsize, dtype='uint8') 105 | th_loop1=n_exp*32 106 | th_loop2=n_exp*32-1 107 | 108 | for iv in range(0, 32*batchsize): 109 | if iv< th_loop1: 110 | AllLabels[iv] = int(0) 111 | if iv > th_loop2: 112 | AllLabels[iv] = int(1) 113 | 114 | return AllFeatures,AllLabels 115 | 116 | 117 | def custom_objective(y_true, y_pred): 118 | 119 | y_true = T.flatten(y_true) 120 | y_pred = T.flatten(y_pred) 121 | 122 | n_seg = 32 123 | nvid = 60 124 | n_exp = nvid / 2 125 | Num_d=32*nvid 126 | 127 | sub_max = T.ones_like(y_pred) 128 | sub_sum_labels = T.ones_like(y_true) 129 | sub_sum_l1=T.ones_like(y_true) 130 | sub_l2 = T.ones_like(y_true) 131 | 132 | for ii in range(0, nvid, 1): 133 | 134 | mm = y_true[ii * n_seg:ii * n_seg + n_seg] 135 | sub_sum_labels = T.concatenate([sub_sum_labels, T.stack(T.sum(mm))]) 136 | 137 | Feat_Score = y_pred[ii * n_seg:ii * n_seg + n_seg] 138 | sub_max = T.concatenate([sub_max, T.stack(T.max(Feat_Score))]) 139 | sub_sum_l1 = T.concatenate([sub_sum_l1, T.stack(T.sum(Feat_Score))]) 140 | 141 | z1 = T.ones_like(Feat_Score) 142 | z2 = T.concatenate([z1, Feat_Score]) 143 | z3 = T.concatenate([Feat_Score, z1]) 144 | z_22 = z2[31:] 145 | z_44 = z3[:33] 146 | z = z_22 - z_44 147 | z = z[1:32] 148 | z = T.sum(T.sqr(z)) 149 | sub_l2 = T.concatenate([sub_l2, T.stack(z)]) 150 | 151 | 152 | sub_score = sub_max[Num_d:] 153 | F_labels = sub_sum_labels[Num_d:] 154 | 155 | 156 | sub_sum_l1 = sub_sum_l1[Num_d:] 157 | sub_sum_l1 = sub_sum_l1[:n_exp] 158 | sub_l2 = sub_l2[Num_d:] 159 | sub_l2 = sub_l2[:n_exp] 160 | 161 | indx_nor = theano.tensor.eq(F_labels, 32).nonzero()[0] 162 | indx_abn = theano.tensor.eq(F_labels, 0).nonzero()[0] 163 | 164 | n_Nor=n_exp 165 | 166 | Sub_Nor = sub_score[indx_nor] 167 | Sub_Abn = sub_score[indx_abn] 168 | 169 | z = T.ones_like(y_true) 170 | for ii in range(0, n_Nor, 1): 171 | sub_z = T.maximum(1 - Sub_Abn + Sub_Nor[ii], 0) 172 | z = T.concatenate([z, T.stack(T.sum(sub_z))]) 173 | 174 | z = z[Num_d:] 175 | z = T.mean(z, axis=-1) + 0.00008*T.sum(sub_sum_l1) + 0.00008*T.sum(sub_l2) 176 | 177 | return z 178 | 179 | 180 | 181 | 182 | # Path contains C3D features (.txt file) of each video. 183 | # Each text file contains 32 features, each of 4096 dimension 184 | AllClassPath='C:\\Users\\Var\\Downloads\\Compressed\\anomaly-detection-master\\anomaly-detection-master\\out\\' 185 | 186 | output_dir='C:\\Users\\Var\\Downloads\\Compressed\\anomaly-detection-master\\anomaly-detection-master\\trained_model\\' 187 | 188 | # Output_dir save trained weights and model. 189 | 190 | weights_path = output_dir + 'weights.mat' 191 | 192 | model_path = output_dir + 'model.json' 193 | 194 | 195 | 196 | #Create Full connected Model 197 | model = Sequential() 198 | model.add(Dense(512, input_dim=4096,kernel_initializer='glorot_normal',kernel_regularizer=l2(0.001),activation='relu')) 199 | model.add(Dropout(0.6)) 200 | model.add(Dense(32,kernel_initializer='glorot_normal',kernel_regularizer=l2(0.001))) 201 | model.add(Dropout(0.6)) 202 | model.add(Dense(1,kernel_initializer='glorot_normal',kernel_regularizer=l2(0.001),activation='sigmoid')) 203 | 204 | adagrad=Adagrad(lr=0.01, epsilon=1e-08) 205 | 206 | model.compile(loss=custom_objective, optimizer=adagrad) 207 | 208 | if not os.path.exists(output_dir): 209 | os.makedirs(output_dir) 210 | 211 | All_class_files= listdir(AllClassPath) 212 | All_class_files.sort() 213 | loss_graph =[] 214 | num_iters = 6 215 | total_iterations = 0 216 | batchsize=60 217 | time_before = datetime.now() 218 | 219 | for it_num in range(num_iters): 220 | inputs, targets=load_dataset_Train_batch(AllClassPath, AllClassPath) 221 | batch_loss =model.train_on_batch(inputs, targets) 222 | loss_graph = np.hstack((loss_graph, batch_loss)) 223 | total_iterations += 1 224 | if total_iterations % 20 == 1: 225 | print ("Iteration=" + str(total_iterations) + " took: " + str(datetime.now() - time_before) + ", with loss of " + str(batch_loss)) 226 | print("Train Successful - Model saved") 227 | save_model(model, model_path, weights_path) -------------------------------------------------------------------------------- /Output Examples/Explosion008_x264.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/Output Examples/Explosion008_x264.gif -------------------------------------------------------------------------------- /Output Examples/Explosion023_x264.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/Output Examples/Explosion023_x264.gif -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | Implementation of [Real-world Anomaly Detection in Surveillance Videos](https://arxiv.org/pdf/1801.04264.pdf) paper from CVPR 2018. 3 | 4 | Download C3D sports-1m weights from [here](https://github.com/adamcasson/c3d/releases/download/v0.1/sports1M_weights_tf.h5) and save them to 'trained_models' folder as 'c3d_sports1m.h5'. 5 | 6 | #### Libraries Used 7 | 8 | * Keras : 2.2.0 9 | * Tensorflow : 1.10.1 10 | * Numpy : 1.14.5 11 | * OpenCV : 3.3.0.10 12 | * Scipy : 0.19.1 13 | * Matplotlib : 2.0.2 14 | -------------------------------------------------------------------------------- /__pycache__/c3d.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/__pycache__/c3d.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/classifier.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/__pycache__/classifier.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/configuration.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/__pycache__/configuration.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/parameters.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/__pycache__/parameters.cpython-36.pyc -------------------------------------------------------------------------------- /c3d.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import keras.backend as K 4 | from keras.models import Sequential 5 | from keras.models import Model 6 | from keras.layers.core import Dense, Dropout, Flatten 7 | import configuration as cfg 8 | from keras.layers.convolutional import Conv3D, MaxPooling3D, ZeroPadding3D 9 | import numpy as np 10 | from scipy.misc import imresize 11 | from keras.utils.data_utils import get_file 12 | 13 | C3D_MEAN_PATH = 'https://github.com/adamcasson/c3d/releases/download/v0.1/c3d_mean.npy' 14 | 15 | def preprocess_input(video): 16 | 17 | intervals = np.ceil(np.linspace(0, video.shape[0] - 1, 16)).astype(int) 18 | frames = video[intervals] 19 | 20 | # Reshape to 128x171 21 | reshape_frames = np.zeros((frames.shape[0], 128, 171, frames.shape[3])) 22 | for i, img in enumerate(frames): 23 | img = imresize(img, (128, 171), 'bicubic') 24 | reshape_frames[i, :, :, :] = img 25 | 26 | mean_path = get_file('c3d_mean.npy', 27 | C3D_MEAN_PATH, 28 | cache_subdir='models', 29 | md5_hash='08a07d9761e76097985124d9e8b2fe34') 30 | 31 | mean = np.load(mean_path) 32 | reshape_frames -= mean 33 | # Crop to 112x112 34 | reshape_frames = reshape_frames[:, 8:120, 30:142, :] 35 | # Add extra dimension for samples 36 | reshape_frames = np.expand_dims(reshape_frames, axis=0) 37 | 38 | return reshape_frames 39 | 40 | 41 | def C3D(weights='sports1M'): 42 | 43 | if weights not in {'sports1M', None}: 44 | raise ValueError('weights should be either be sports1M or None') 45 | 46 | if K.image_data_format() == 'channels_last': 47 | shape = (16, 112, 112,3) 48 | else: 49 | shape = (3, 16, 112, 112) 50 | 51 | model = Sequential() 52 | model.add(Conv3D(64, 3, activation='relu', padding='same', name='conv1', input_shape=shape)) 53 | model.add(MaxPooling3D(pool_size=(1,2,2), strides=(1,2,2), padding='same', name='pool1')) 54 | 55 | model.add(Conv3D(128, 3, activation='relu', padding='same', name='conv2')) 56 | model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool2')) 57 | 58 | model.add(Conv3D(256, 3, activation='relu', padding='same', name='conv3a')) 59 | model.add(Conv3D(256, 3, activation='relu', padding='same', name='conv3b')) 60 | model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool3')) 61 | 62 | model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv4a')) 63 | model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv4b')) 64 | model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool4')) 65 | 66 | model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv5a')) 67 | model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv5b')) 68 | model.add(ZeroPadding3D(padding=(0,1,1))) 69 | model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool5')) 70 | 71 | model.add(Flatten()) 72 | 73 | model.add(Dense(4096, activation='relu', name='fc6')) 74 | model.add(Dropout(0.5)) 75 | model.add(Dense(4096, activation='relu', name='fc7')) 76 | model.add(Dropout(0.5)) 77 | model.add(Dense(487, activation='softmax', name='fc8')) 78 | 79 | if weights == 'sports1M': 80 | model.load_weights(cfg.c3d_model_weights) 81 | 82 | return model 83 | 84 | 85 | def c3d_feature_extractor(): 86 | model = C3D() 87 | layer_name = 'fc6' 88 | feature_extractor_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output) 89 | return feature_extractor_model -------------------------------------------------------------------------------- /c3d.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/c3d.pyc -------------------------------------------------------------------------------- /classifier.py: -------------------------------------------------------------------------------- 1 | import keras 2 | import scipy.io as sio 3 | from keras import Sequential 4 | from keras.layers import Dense, Dropout 5 | from keras.regularizers import l2 6 | 7 | import configuration as cfg 8 | 9 | 10 | def classifier_model(): 11 | model = Sequential() 12 | model.add(Dense(512, input_dim=4096, kernel_initializer='glorot_normal', kernel_regularizer=l2(0.001), activation='relu')) 13 | model.add(Dropout(0.6)) 14 | model.add(Dense(32, kernel_initializer='glorot_normal', kernel_regularizer=l2(0.001))) 15 | model.add(Dropout(0.6)) 16 | model.add(Dense(1, kernel_initializer='glorot_normal', kernel_regularizer=l2(0.001), activation='sigmoid')) 17 | return model 18 | 19 | 20 | def build_classifier_model(): 21 | model = classifier_model() 22 | model = load_weights(model, cfg.classifier_model_weigts) 23 | return model 24 | 25 | 26 | def conv_dict(dict2): 27 | dict = {} 28 | for i in range(len(dict2)): 29 | if str(i) in dict2: 30 | if dict2[str(i)].shape == (0, 0): 31 | dict[str(i)] = dict2[str(i)] 32 | else: 33 | weights = dict2[str(i)][0] 34 | weights2 = [] 35 | for weight in weights: 36 | if weight.shape in [(1, x) for x in range(0, 5000)]: 37 | weights2.append(weight[0]) 38 | else: 39 | weights2.append(weight) 40 | dict[str(i)] = weights2 41 | return dict 42 | 43 | 44 | def load_weights(model, weights_file): 45 | dict2 = sio.loadmat(weights_file) 46 | dict = conv_dict(dict2) 47 | i = 0 48 | for layer in model.layers: 49 | weights = dict[str(i)] 50 | layer.set_weights(weights) 51 | i += 1 52 | return model 53 | 54 | if __name__ == '__main__': 55 | model = build_classifier_model() 56 | model.summary() -------------------------------------------------------------------------------- /classifier.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/classifier.pyc -------------------------------------------------------------------------------- /configuration.py: -------------------------------------------------------------------------------- 1 | c3d_model_weights = './trained_models/c3d_sports1m.h5' 2 | classifier_model_weigts = './trained_models/weights_L1L2.mat' 3 | classifier_model_json = './trained_models/model.json' 4 | 5 | input_folder = './input' 6 | output_folder = './output' 7 | 8 | sample_video_path = './input/Arrest013_x264.mp4' -------------------------------------------------------------------------------- /configuration.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/configuration.pyc -------------------------------------------------------------------------------- /input/Robbery056_x264.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/input/Robbery056_x264.mp4 -------------------------------------------------------------------------------- /output/Robbery056_x264.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/output/Robbery056_x264.gif -------------------------------------------------------------------------------- /parameters.py: -------------------------------------------------------------------------------- 1 | frame_height = 240 2 | frame_width = 320 3 | channels = 3 4 | 5 | frame_count = 16 6 | 7 | features_per_bag = 32 -------------------------------------------------------------------------------- /parameters.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/parameters.pyc -------------------------------------------------------------------------------- /test_detect.py: -------------------------------------------------------------------------------- 1 | import os 2 | from c3d import * 3 | from classifier import * 4 | from utils.visualization_util import * 5 | 6 | 7 | def run_demo(): 8 | 9 | video_name = os.path.basename(cfg.sample_video_path).split('.')[0] 10 | 11 | # read video 12 | video_clips, num_frames = get_video_clips(cfg.sample_video_path) 13 | 14 | print("Number of clips in the video : ", len(video_clips)) 15 | 16 | # build models 17 | feature_extractor = c3d_feature_extractor() 18 | classifier_model = build_classifier_model() 19 | 20 | print("Models initialized") 21 | 22 | # extract features 23 | rgb_features = [] 24 | for i, clip in enumerate(video_clips): 25 | clip = np.array(clip) 26 | if len(clip) < params.frame_count: 27 | continue 28 | 29 | clip = preprocess_input(clip) 30 | rgb_feature = feature_extractor.predict(clip)[0] 31 | rgb_features.append(rgb_feature) 32 | 33 | print("Processed clip : ", i) 34 | 35 | rgb_features = np.array(rgb_features) 36 | 37 | # bag features 38 | rgb_feature_bag = interpolate(rgb_features, params.features_per_bag) 39 | 40 | # classify using the trained classifier model 41 | predictions = classifier_model.predict(rgb_feature_bag) 42 | 43 | predictions = np.array(predictions).squeeze() 44 | 45 | predictions = extrapolate(predictions, num_frames) 46 | 47 | save_path = os.path.join(cfg.output_folder, video_name + '.gif') 48 | # visualize predictions 49 | print('Executed Successfully - '+video_name + '.gif saved') 50 | visualize_predictions(cfg.sample_video_path, predictions, save_path) 51 | 52 | 53 | if __name__ == '__main__': 54 | run_demo() -------------------------------------------------------------------------------- /trained_models/model.json: -------------------------------------------------------------------------------- 1 | {"class_name": "Sequential", "keras_version": "2.2.3", "config": {"layers": [{"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "normal", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_4", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "dtype": "float32", "activation": "relu", "trainable": true, "kernel_regularizer": {"class_name": "L1L2", "config": {"l2": 0.0010000000474974513, "l1": 0.0}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 512, "batch_input_shape": [null, 4096], "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dropout", "config": {"rate": 0.6, "noise_shape": null, "trainable": true, "seed": null, "name": "dropout_3"}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "normal", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_5", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "linear", "trainable": true, "kernel_regularizer": {"class_name": "L1L2", "config": {"l2": 0.0010000000474974513, "l1": 0.0}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 32, "use_bias": true, "activity_regularizer": null}}, {"class_name": "Dropout", "config": {"rate": 0.6, "noise_shape": null, "trainable": true, "seed": null, "name": "dropout_4"}}, {"class_name": "Dense", "config": {"kernel_initializer": {"class_name": "VarianceScaling", "config": {"distribution": "normal", "scale": 1.0, "seed": null, "mode": "fan_avg"}}, "name": "dense_6", "kernel_constraint": null, "bias_regularizer": null, "bias_constraint": null, "activation": "sigmoid", "trainable": true, "kernel_regularizer": {"class_name": "L1L2", "config": {"l2": 0.0010000000474974513, "l1": 0.0}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "units": 1, "use_bias": true, "activity_regularizer": null}}], "name": "sequential_2"}, "backend": "theano"} -------------------------------------------------------------------------------- /trained_models/weights.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/trained_models/weights.mat -------------------------------------------------------------------------------- /trained_models/weights_L1L2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/trained_models/weights_L1L2.mat -------------------------------------------------------------------------------- /utils/array_util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def sliding_window(arr, size, stride): 5 | num_chunks = int((len(arr) - size) / stride) + 2 6 | result = [] 7 | for i in range(0, num_chunks * stride, stride): 8 | if len(arr[i:i + size]) > 0: 9 | result.append(arr[i:i + size]) 10 | return np.array(result) 11 | 12 | 13 | def chunks(l, n): 14 | for i in range(0, len(l), n): 15 | yield l[i:i + n] 16 | 17 | 18 | def interpolate(features, features_per_bag): 19 | feature_size = np.array(features).shape[1] 20 | interpolated_features = np.zeros((features_per_bag, feature_size)) 21 | interpolation_indicies = np.round(np.linspace(0, len(features) - 1, num=features_per_bag + 1)) 22 | count = 0 23 | for index in range(0, len(interpolation_indicies)-1): 24 | start = int(interpolation_indicies[index]) 25 | end = int(interpolation_indicies[index + 1]) 26 | 27 | assert end >= start 28 | 29 | if start == end: 30 | temp_vect = features[start, :] 31 | else: 32 | temp_vect = np.mean(features[start:end+1, :], axis=0) 33 | 34 | temp_vect = temp_vect / np.linalg.norm(temp_vect) 35 | 36 | if np.linalg.norm(temp_vect) == 0: 37 | print("Error") 38 | 39 | interpolated_features[count,:]=temp_vect 40 | count = count + 1 41 | 42 | return np.array(interpolated_features) 43 | 44 | 45 | def extrapolate(outputs, num_frames): 46 | extrapolated_outputs = [] 47 | extrapolation_indicies = np.round(np.linspace(0, len(outputs) - 1, num=num_frames)) 48 | for index in extrapolation_indicies: 49 | extrapolated_outputs.append(outputs[int(index)]) 50 | return np.array(extrapolated_outputs) 51 | 52 | 53 | def test_interpolate(): 54 | test_case1 = np.random.randn(24, 2048) 55 | output_case1 = interpolate(test_case1, 32) 56 | assert output_case1.shape == (32, 2048) 57 | 58 | test_case2 = np.random.randn(32, 2048) 59 | output_case2 = interpolate(test_case2, 32) 60 | assert output_case2.shape == (32, 2048) 61 | 62 | test_case3 = np.random.randn(42, 2048) 63 | output_case3 = interpolate(test_case3, 32) 64 | assert output_case3.shape == (32, 2048) 65 | 66 | -------------------------------------------------------------------------------- /utils/array_util.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/utils/array_util.pyc -------------------------------------------------------------------------------- /utils/video_util.py: -------------------------------------------------------------------------------- 1 | from utils.array_util import * 2 | import parameters as params 3 | import cv2 4 | 5 | 6 | def get_video_clips(video_path): 7 | frames = get_video_frames(video_path) 8 | clips = sliding_window(frames, params.frame_count, params.frame_count) 9 | return clips, len(frames) 10 | 11 | 12 | def get_video_frames(video_path): 13 | cap = cv2.VideoCapture(video_path) 14 | frames = [] 15 | while (cap.isOpened()): 16 | ret, frame = cap.read() 17 | if ret == True: 18 | frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) 19 | else: 20 | break 21 | return frames -------------------------------------------------------------------------------- /utils/video_util.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/utils/video_util.pyc -------------------------------------------------------------------------------- /utils/visualization_util.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | matplotlib.use('Agg') 3 | import matplotlib.pyplot as plt 4 | from matplotlib.animation import FuncAnimation 5 | from utils.video_util import * 6 | 7 | 8 | def visualize_clip(clip, convert_bgr=False, save_gif=False, file_path=None): 9 | num_frames = len(clip) 10 | fig, ax = plt.subplots() 11 | fig.set_tight_layout(True) 12 | 13 | def update(i): 14 | if convert_bgr: 15 | frame = cv2.cvtColor(clip[i], cv2.COLOR_BGR2RGB) 16 | else: 17 | frame = clip[i] 18 | plt.imshow(frame) 19 | return plt 20 | 21 | # FuncAnimation will call the 'update' function for each frame; here 22 | # animating over 10 frames, with an interval of 20ms between frames. 23 | anim = FuncAnimation(fig, update, frames=np.arange(0, num_frames), interval=1) 24 | if save_gif: 25 | anim.save(file_path, dpi=80, writer='imagemagick') 26 | else: 27 | # plt.show() will just loop the animation forever. 28 | plt.show() 29 | 30 | 31 | def visualize_predictions(video_path, predictions, save_path): 32 | frames = get_video_frames(video_path) 33 | assert len(frames) == len(predictions) 34 | 35 | fig, ax = plt.subplots(figsize=(5, 5)) 36 | fig.set_tight_layout(True) 37 | 38 | fig_frame = plt.subplot(2, 1, 1) 39 | fig_prediction = plt.subplot(2, 1, 2) 40 | fig_prediction.set_xlim(0, len(frames)) 41 | fig_prediction.set_ylim(0, 1.15) 42 | 43 | def update(i): 44 | frame = frames[i] 45 | x = range(0, i) 46 | y = predictions[0:i] 47 | fig_prediction.plot(x, y, '-') 48 | fig_frame.imshow(frame) 49 | return plt 50 | 51 | # FuncAnimation will call the 'update' function for each frame; here 52 | # animating over 10 frames, with an interval of 20ms between frames. 53 | 54 | anim = FuncAnimation(fig, update, frames=np.arange(0, len(frames), 10), interval=1, repeat=False) 55 | 56 | if save_path: 57 | anim.save(save_path, dpi=200, writer='imagemagick') 58 | else: 59 | plt.show() 60 | 61 | return 62 | 63 | 64 | -------------------------------------------------------------------------------- /utils/visualization_util.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShreyasArthur/AbnormalEventDetection/9400d98574f014592b44c1f0c0ef2024f9c17dee/utils/visualization_util.pyc --------------------------------------------------------------------------------