├── requirements.txt ├── README.md ├── LICENSE ├── .gitignore └── main.py /requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-contrib 4.1.1 2 | numpy 1.17.0 3 | sklearn 0.21.3 4 | scipy 1.3.1 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Fall-Detection 2 | #### This is a program to detect fall and not-fall events using SVM. 3 | 4 | For running successfully: 5 | 6 | 1. Define the path to save the extracted features of fall and not-fall events from train videos. 7 | 8 | 2. Define the path to the train videos. 9 | 10 | 3. Define the paths to all extracted fall and not-fall features (new and old). 11 | 12 | 4. Define the path to save the trained SVM. 13 | 14 | #### Now we can use the trained svm to detect fall and not-fall #### 15 | 16 | 5. Define the path to the trained svm. 17 | 18 | 6. Define the path to a test video. 19 | 20 | To extract the features of fall and not fall events, press the 'z' button. 21 | 22 | #### Result #### 23 | In the below video we can see one output of our method. The dataset to train and test the model has been collected by Mr. Farahnezhad as we see him in the video. Moreover, he has contributed to the developement of the code. 24 | 25 | Thanks Mr. Farahnezhad... 26 | 27 | https://user-images.githubusercontent.com/62461020/122333347-3eebbc00-cf4d-11eb-95a9-a6d4cd8a05d8.mp4 28 | 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 hassancpu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | """" 2 | The main file for the fall detection task 3 | 4 | Written by Hassan Keshvari Khojasteh 5 | 6 | """ 7 | 8 | """ Fist let import the necessary library""" 9 | 10 | import math 11 | import numpy as np 12 | import cv2.cv2 as cv2 13 | from sklearn.externals import joblib 14 | from scipy.ndimage.interpolation import shift 15 | from matplotlib import pyplot as plt 16 | from sklearn.metrics import classification_report, confusion_matrix 17 | from sklearn.model_selection import KFold 18 | from sklearn.model_selection import cross_val_score 19 | from sklearn.model_selection import train_test_split 20 | from sklearn.svm import SVC 21 | 22 | 23 | 24 | class falldetection(): 25 | """ 26 | Feature Extracting for training SVM 27 | 28 | """ 29 | 30 | def features_extractor(self,path_to_video, path_to_save): 31 | """ 32 | This Function save Extracted features from desired video with shape of (number_of_fall or not_fall ,3*25) 33 | 34 | path_to_video---the path to desired video for extracting features 35 | path_to_save---the path to save extracted features 36 | 37 | return 38 | """ 39 | 40 | 41 | """ define the necessary parameters""" 42 | count = 0 43 | framenumber = 0 44 | mat3 = 0 45 | cnt = 30 46 | mat1 = np.zeros(10) 47 | mat2 = np.zeros(10) 48 | features = np.zeros([3, 25]) 49 | features_temp = [] 50 | features_all = [] 51 | font = cv2.FONT_HERSHEY_SIMPLEX 52 | 53 | """capturing video from Webcam or from Computer""" 54 | 55 | cap = cv2.VideoCapture(path_to_video) 56 | fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=5000, nmixtures=5, backgroundRatio=0.1) 57 | 58 | while True: 59 | # Reading the famres of Video 60 | ret, frame = cap.read() 61 | if ret == False: 62 | break 63 | if cnt < 25: 64 | cv2.putText(frame, 'feature extraction', (150, 50), font, 1, (0, 0, 255), 3, cv2.LINE_AA) 65 | cv2.imshow('frame', frame) 66 | 67 | # Blur the readed frame with lowpass median filter 68 | frame1 = cv2.medianBlur(frame, 11) 69 | # apply MOG background subtractor 70 | fgmask = fgbg.apply(frame1) 71 | kernel = np.ones((5, 5), np.uint8) 72 | # apply dilation filter 73 | dilation = cv2.dilate(fgmask, kernel, iterations=14) 74 | a = cv2.dilate(fgmask, kernel, iterations=14) 75 | framenumber = framenumber + 1 76 | ret, threshed_img = cv2.threshold(a, 127, 255, 77 | cv2.THRESH_BINARY) 78 | # find contours in the frame 79 | contours, hierarchy = cv2.findContours(threshed_img, cv2.RETR_TREE, 80 | cv2.CHAIN_APPROX_SIMPLE) 81 | 82 | # if the z key pressed, start feature extraction 83 | p = cv2.waitKey(1) & 0xff 84 | if p == ord('z'): 85 | cnt = 0 86 | 87 | if len(contours) != 0 and cnt < 25: 88 | 89 | # find the biggest area 90 | c = max(contours, key=cv2.contourArea) 91 | # Extract the height to width ratio and append the value to features_temp 92 | x, y, w, h = cv2.boundingRect(c) 93 | 94 | cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) 95 | hw = h / w 96 | features_temp.append(hw) 97 | # Extract the angle and append absolute value of it to features_temp 98 | rows, cols = a.shape[:2] 99 | [vx, vy, x, y] = cv2.fitLine(c, cv2.DIST_L2, 0, 0.01, 0.01) 100 | lefty = int((-x * vy / vx) + y) 101 | righty = int(((cols - x) * vy / vx) + y) 102 | 103 | if (lefty - righty) != 0: 104 | x = math.atan(1 / ((cols - 1) / (lefty - righty))) 105 | x1 = 180 * x / math.pi 106 | features_temp.append(abs(x1)) 107 | # Extract the momentum and add it's value to features_temp 108 | M = cv2.moments(c) 109 | 110 | cx = int(M['m10'] / M['m00']) 111 | cy = int(M['m01'] / M['m00']) 112 | 113 | mat1[framenumber % 10] = cx 114 | mat2[framenumber % 10] = cy 115 | mat4 = mat2[5] - mat2[0] 116 | for i in range(9): 117 | mat3 = mat3 + np.sqrt(((mat1[i + 1] - mat1[i]) * (mat1[i + 1] - mat1[i])) + ( 118 | (mat2[i + 1] - mat2[i]) * (mat2[i + 1] - mat2[i]))) 119 | 120 | features_temp.append(mat3) 121 | # put features_temp in features columns until the 25'th frame 122 | if cnt < 25 and len(features_temp) != 0: 123 | features[:, cnt] = np.reshape(np.asarray(features_temp), [3]) 124 | cnt += 1 125 | # append the extracted features of 25 frames to features_all 126 | if cnt == 25: 127 | features_all.append(features) 128 | features = np.zeros([3, 25]) 129 | cnt = 30 130 | 131 | # back the value of features_temp and mat3 to their initial values 132 | features_temp = [] 133 | mat3 = 0 134 | 135 | cap.release() 136 | cv2.destroyAllWindows() 137 | 138 | # Convert the list to numpy array and the final array will have the shape of (number_of_fall,3,25) and is suitable to train SVM 139 | features_all = np.asarray(features_all) 140 | features_all = features_all.reshape([len(features_all), 3 * 25]) 141 | # Saving the extracted features 142 | np.savetxt(path_to_save, features_all, delimiter=',') 143 | return 144 | 145 | 146 | """" 147 | Training svm with the extracted features of our dataset that is created by Mr.Farahnejad 148 | 149 | """ 150 | 151 | def train_svm(self,path_to_old_fall_features, path_to_old_not_fall_features, path_to_new_fall_features, 152 | path_to_new_not_fall_features, path_to_save_trained_SVM): 153 | """ 154 | path_to_old_fall_features---the path to old fall features extracted 155 | path_to_old_not_fall_features--- the path to old not fall features extracted 156 | path_to_new_fall_features---the path to new fall features extracted 157 | path_to_new_not_fall_features--- the path to new not fall features extracted 158 | path_to_save_trained_SVM---the path to save trained svm 159 | return 160 | this function will print the accuracy in Train set data and the accuracy in Test set data and finally 161 | the cross_validation_accuracy 162 | """ 163 | 164 | 165 | # loading the new extracted features of fall and not_fall windows 166 | 167 | fall_new = np.loadtxt(path_to_new_fall_features, delimiter=',') 168 | label_new_fall = np.ones([np.shape(fall_new)[0], 1], dtype=np.int16) 169 | 170 | not_fall_new = np.loadtxt(path_to_new_not_fall_features, delimiter=',') 171 | label_new_not_fall = np.zeros([np.shape(not_fall_new)[0], 1], dtype=np.int16) 172 | 173 | # loading the old extracted features of fall and not_fall windows 174 | 175 | fall_old = np.loadtxt(path_to_old_fall_features, delimiter=',') 176 | label_old_fall = np.ones([np.shape(fall_old)[0], 1], dtype=np.int16) 177 | 178 | not_fall_old = np.loadtxt(path_to_old_not_fall_features, delimiter=',') 179 | label_old_not_fall = np.zeros([np.shape(not_fall_old)[0], 1], dtype=np.int16) 180 | 181 | # Concatenate the new and old features to train svm 182 | var = np.concatenate([fall_old, not_fall_old, fall_new, not_fall_new], axis=0) 183 | targ = np.concatenate([label_old_fall, label_old_not_fall, label_new_fall, label_new_not_fall], axis=0) 184 | 185 | # split data to train and test 186 | var_train, var_test, targ_train, targ_test = train_test_split(var, targ, train_size=0.7, 187 | random_state=0, shuffle=True) 188 | 189 | # hard margin,linearkernel 190 | 191 | svclassifier = SVC(kernel='linear', C=10000) 192 | svclassifier.fit(var_train, targ_train) 193 | y_pred = svclassifier.predict(var_train) 194 | print(classification_report(targ_train, y_pred)) 195 | 196 | # test error 197 | y_pred_test_li = svclassifier.predict(var_test) 198 | print(classification_report(targ_test, y_pred_test_li)) 199 | 200 | # cross validation score 201 | 202 | svclassifier_c = SVC(kernel='linear', C=10000) 203 | print(np.mean(cross_val_score(svclassifier_c, var, targ, cv=2))) 204 | 205 | # save the trained svm 206 | _ = joblib.dump(svclassifier, path_to_save_trained_SVM, compress=9) 207 | return 208 | 209 | 210 | """ 211 | Fall detection using the trained SVM 212 | 213 | """ 214 | 215 | def fall_webcam(self,path_svm, path_video): 216 | """ 217 | inputs 218 | 219 | path_svm---the path to trained svm 220 | path_video---the path to the video 221 | 222 | return 223 | """ 224 | 225 | 226 | """ import the trained SVM""" 227 | svm = joblib.load(path_svm) 228 | """ define the necessary parameters""" 229 | count = 0 230 | features_temp = [] 231 | labels = [] 232 | features = np.zeros([3, 25]) 233 | framenumber = 0 234 | mat3 = 0 235 | cnt = 0 236 | h = 0 237 | mat1 = np.zeros(10) 238 | mat2 = np.zeros(10) 239 | 240 | """capturing video from from Computer""" 241 | 242 | cap = cv2.VideoCapture(path_video) 243 | 244 | fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=5000, nmixtures=5, backgroundRatio=0.1) 245 | font = cv2.FONT_HERSHEY_SIMPLEX 246 | 247 | while True: 248 | # Reading the famres of Video 249 | ret, frame = cap.read() 250 | 251 | if ret == False: 252 | break 253 | # Blur the readed frame with lowpass median filter 254 | frame1 = cv2.medianBlur(frame, 11) 255 | if h == 1: 256 | cv2.putText(frame, 'FALL', (180, 80), font, 2, (0, 0, 255), 10, cv2.LINE_AA) 257 | h = 0 258 | # apply MOG background subtractor 259 | fgmask = fgbg.apply(frame1) 260 | kernel = np.ones((5, 5), np.uint8) 261 | # apply dilation filter 262 | dilation = cv2.dilate(fgmask, kernel, iterations=14) 263 | a = cv2.dilate(fgmask, kernel, iterations=14) 264 | framenumber = framenumber + 1 265 | ret, threshed_img = cv2.threshold(a, 127, 255, 266 | cv2.THRESH_BINARY) 267 | # find contours in the frame 268 | contours, hierarchy = cv2.findContours(threshed_img, cv2.RETR_TREE, 269 | cv2.CHAIN_APPROX_SIMPLE) 270 | 271 | if len(contours) != 0: 272 | 273 | # find the biggest area 274 | c = max(contours, key=cv2.contourArea) 275 | # Extract the height to eidth ratio and append the value to features_temp 276 | x, y, w, h = cv2.boundingRect(c) 277 | 278 | cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) 279 | hw = h / w 280 | features_temp.append(hw) 281 | # Extract the angle and append absolute value of it to features_temp 282 | rows, cols = a.shape[:2] 283 | [vx, vy, x, y] = cv2.fitLine(c, cv2.DIST_L2, 0, 0.01, 0.01) 284 | lefty = int((-x * vy / vx) + y) 285 | righty = int(((cols - x) * vy / vx) + y) 286 | 287 | if (lefty - righty) != 0: 288 | x = math.atan(1 / ((cols - 1) / (lefty - righty))) 289 | x1 = 180 * x / math.pi 290 | features_temp.append(abs(x1)) 291 | else: 292 | features_temp.append(0) 293 | 294 | # Extract the momentum and add it's value to features_temp 295 | M = cv2.moments(c) 296 | 297 | cx = int(M['m10'] / M['m00']) 298 | cy = int(M['m01'] / M['m00']) 299 | 300 | mat1[framenumber % 10] = cx 301 | mat2[framenumber % 10] = cy 302 | mat4 = mat2[5] - mat2[0] 303 | for i in range(9): 304 | mat3 = mat3 + np.sqrt(((mat1[i + 1] - mat1[i]) * (mat1[i + 1] - mat1[i])) + ( 305 | (mat2[i + 1] - mat2[i]) * (mat2[i + 1] - mat2[i]))) 306 | 307 | features_temp.append(mat3) 308 | # put features_temp in features columns until the 25'th frame 309 | if cnt < 25 and len(features_temp) != 0: 310 | features[:, cnt] = np.reshape(np.asarray(features_temp), [3]) 311 | cnt += 1 312 | # shift the features to left and add the new features_temp in last colum of features 313 | if cnt > 25 and len(features_temp) != 0: 314 | features = shift(features, [0, -1]) 315 | features[:, 24] = np.reshape(np.asarray(features_temp), [3]) 316 | # predict the label of 25 frames by trained svm 317 | if cnt > 24 and len(contours) != 0: 318 | features_input = np.reshape(features, [1, 3 * 25]) 319 | label = svm.predict(features_input) 320 | labels.append(label) 321 | # if predicted label is 1(fall) the put text fall in the current frame 322 | if label == 1: 323 | cv2.putText(frame, 'FALL', (180, 80), font, 2, (0, 0, 255), 10, cv2.LINE_AA) 324 | h = label 325 | # show the current frame 326 | cv2.imshow('frame1', frame) 327 | # out.write(frame) 328 | # back the value of features_temp and mat3 to their initial values 329 | features_temp = [] 330 | mat3 = 0 331 | k = cv2.waitKey(10) & 0xff 332 | if k == 27: 333 | break 334 | 335 | cap.release() 336 | cv2.destroyAllWindows() 337 | return 338 | 339 | 340 | 341 | def main(): 342 | 343 | fall = falldetection() 344 | 345 | ############## First Extract the defined features of video ############################################ 346 | path_to_save = './etracted_new_fall.txt' 347 | path_to_video = './fall1.mp4' 348 | fall.features_extractor(path_to_video, path_to_save) 349 | 350 | path_to_save = './etracted_new_not_fall.txt' 351 | path_to_video = './fall1.mp4' 352 | fall.features_extractor(path_to_video, path_to_save) 353 | 354 | ############## Train the SVM with extracted features ################################################## 355 | path_to_new_fall_features = './etracted_new_fall.txt' 356 | path_to_new_not_fall_features = './etracted_new_not_fall.txt' 357 | path_to_old_fall_features = './etracted_old_fall.txt' 358 | path_to_old_not_fall_features = './etracted_old_not_fall.txt' 359 | path_to_save_trained_SVM = './trained_svm.joblib1.pkl' 360 | 361 | fall.train_svm(path_to_old_fall_features, path_to_old_not_fall_features, path_to_new_fall_features, 362 | path_to_new_not_fall_features, path_to_save_trained_SVM) 363 | 364 | ############## Detect the Fall with trined SVM ################################################## 365 | path_svm = './trained_svm.joblib.pkl' 366 | path_video = './fall2.mp4' 367 | 368 | fall.fall_webcam(path_svm, path_video) 369 | 370 | if __name__=='__main__': 371 | main () 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | --------------------------------------------------------------------------------