├── Codes ├── etc │ ├── [Prep]_ch_05_Save_C3DFeatures_32Segments.py │ ├── [Prep]_sy_03_TrainTest_Code.ipynb │ └── anomaly.ipynb ├── 모델학습 │ └── [Prep]_sb_07_TrainTest_Code.ipynb └── 전처리 │ ├── [Prep]_ch_01_Save_C3DFeatures_32Segments.py │ ├── [Prep]_mj_01_check_video_size.ipynb │ ├── [Prep]_mj_02_C3DV0_exe.ipynb │ ├── [Prep]_mj_03_Save_C3DFeatures_32Segments.ipynb │ ├── [Prep]_mj_04_코드비교_Save_C3DFeatures_32Segments.ipynb │ ├── [Prep]_sb_06_c3dfeature.ipynb │ ├── [Prep]_sy_01_C3D_with_colab.ipynb │ └── [Prep]_sy_02_C3DV0.ipynb ├── Outputs └── c3d │ ├── abnormal │ ├── abnormal_video_001_C.txt │ ├── abnormal_video_002_C.txt │ ├── abnormal_video_003_C.txt │ ├── abnormal_video_004_C.txt │ ├── abnormal_video_005_C.txt │ ├── abnormal_video_006_C.txt │ ├── abnormal_video_007_C.txt │ ├── abnormal_video_008_C.txt │ ├── abnormal_video_009_C.txt │ ├── abnormal_video_010_C.txt │ ├── abnormal_video_011_C.txt │ ├── abnormal_video_012_C.txt │ ├── abnormal_video_013_C.txt │ ├── abnormal_video_014_C.txt │ ├── abnormal_video_015_C.txt │ ├── abnormal_video_016_C.txt │ ├── abnormal_video_017_C.txt │ ├── abnormal_video_018_C.txt │ ├── abnormal_video_019_C.txt │ ├── abnormal_video_020_C.txt │ ├── abnormal_video_021_C.txt │ ├── abnormal_video_022_C.txt │ ├── abnormal_video_023_C.txt │ ├── abnormal_video_024_C.txt │ ├── abnormal_video_025_C.txt │ ├── abnormal_video_026_C.txt │ ├── abnormal_video_027_C.txt │ ├── abnormal_video_028_C.txt │ ├── abnormal_video_029_C.txt │ ├── abnormal_video_030_C.txt │ ├── abnormal_video_031_C.txt │ ├── abnormal_video_032_C.txt │ ├── abnormal_video_035_C.txt │ ├── abnormal_video_036_C.txt │ ├── abnormal_video_037_C.txt │ ├── abnormal_video_038_C.txt │ ├── abnormal_video_039_C.txt │ ├── abnormal_video_040_C.txt │ ├── abnormal_video_041_C.txt │ ├── abnormal_video_042_C.txt │ ├── abnormal_video_043_C.txt │ └── abnormal_video_044_C.txt │ ├── normal │ ├── normal_video_001_C.txt │ ├── normal_video_002_C.txt │ ├── normal_video_003_C.txt │ ├── normal_video_004_C.txt │ ├── normal_video_005_C.txt │ ├── normal_video_006_C.txt │ ├── normal_video_007_C.txt │ ├── normal_video_008_C.txt │ ├── normal_video_009_C.txt │ ├── normal_video_010_C.txt │ ├── normal_video_011_C.txt │ ├── normal_video_012_C.txt │ ├── normal_video_013_C.txt │ ├── normal_video_014_C.txt │ ├── normal_video_015_C.txt │ ├── normal_video_016_C.txt │ ├── normal_video_017_C.txt │ ├── normal_video_018_C.txt │ ├── normal_video_019_C.txt │ ├── normal_video_020_C.txt │ ├── normal_video_021_C.txt │ ├── normal_video_022_C.txt │ ├── normal_video_023_C.txt │ ├── normal_video_024_C.txt │ ├── normal_video_033_C.txt │ ├── normal_video_034_C.txt │ ├── normal_video_035_C.txt │ ├── normal_video_036_C.txt │ ├── normal_video_037_C.txt │ ├── normal_video_038_C.txt │ ├── normal_video_039_C.txt │ ├── normal_video_040_C.txt │ ├── normal_video_041_C.txt │ └── normal_video_042_C.txt │ ├── sultani_abnormal │ ├── Abuse001_x264_C.txt │ ├── Abuse002_x264_C.txt │ ├── Abuse003_x264_C.txt │ ├── Abuse004_x264_C.txt │ ├── Abuse005_x264_C.txt │ ├── Abuse006_x264_C.txt │ ├── Abuse007_x264_C.txt │ ├── Abuse008_x264_C.txt │ ├── Abuse009_x264_C.txt │ ├── Abuse010_x264_C.txt │ ├── Abuse011_x264_C.txt │ ├── Abuse012_x264_C.txt │ ├── Abuse013_x264_C.txt │ ├── Abuse014_x264_C.txt │ ├── Abuse015_x264_C.txt │ ├── Abuse016_x264_C.txt │ ├── Abuse017_x264_C.txt │ ├── Abuse018_x264_C.txt │ ├── Abuse019_x264_C.txt │ ├── Abuse020_x264_C.txt │ ├── Abuse021_x264_C.txt │ ├── Abuse022_x264_C.txt │ ├── Abuse023_x264_C.txt │ ├── Abuse024_x264_C.txt │ ├── Abuse025_x264_C.txt │ ├── Abuse026_x264_C.txt │ ├── Abuse027_x264_C.txt │ ├── Abuse028_x264_C.txt │ ├── Abuse029_x264_C.txt │ ├── Abuse030_x264_C.txt │ ├── Abuse031_x264_C.txt │ ├── Abuse032_x264_C.txt │ ├── Abuse033_x264_C.txt │ ├── Abuse034_x264_C.txt │ ├── Abuse035_x264_C.txt │ ├── Abuse036_x264_C.txt │ ├── Abuse037_x264_C.txt │ ├── Abuse038_x264_C.txt │ ├── Abuse039_x264_C.txt │ ├── Abuse040_x264_C.txt │ ├── Abuse041_x264_C.txt │ ├── Abuse042_x264_C.txt │ ├── Abuse043_x264_C.txt │ ├── Abuse044_x264_C.txt │ ├── Abuse045_x264_C.txt │ ├── Abuse046_x264_C.txt │ ├── Abuse047_x264_C.txt │ ├── Abuse048_x264_C.txt │ ├── Abuse049_x264_C.txt │ └── Abuse050_x264_C.txt │ └── sultani_normal │ ├── Normal_Videos008_x264_C.txt │ ├── Normal_Videos009_x264_C.txt │ ├── Normal_Videos011_x264_C.txt │ ├── Normal_Videos012_x264_C.txt │ ├── Normal_Videos013_x264_C.txt │ ├── Normal_Videos016_x264_C.txt │ ├── Normal_Videos017_x264_C.txt │ ├── Normal_Videos020_x264_C.txt │ ├── Normal_Videos021_x264_C.txt │ ├── Normal_Videos022_x264_C.txt │ ├── Normal_Videos023_x264_C.txt │ ├── Normal_Videos026_x264_C.txt │ ├── Normal_Videos028_x264_C.txt │ ├── Normal_Videos029_x264_C.txt │ ├── Normal_Videos030_x264_C.txt │ ├── Normal_Videos031_x264_C.txt │ ├── Normal_Videos032_x264_C.txt │ ├── Normal_Videos035_x264_C.txt │ ├── Normal_Videos036_x264_C.txt │ ├── Normal_Videos037_x264_C.txt │ ├── Normal_Videos038_x264_C.txt │ ├── Normal_Videos039_x264_C.txt │ ├── Normal_Videos040_x264_C.txt │ ├── Normal_Videos043_x264_C.txt │ ├── Normal_Videos044_x264_C.txt │ ├── Normal_Videos045_x264_C.txt │ ├── Normal_Videos046_x264_C.txt │ ├── Normal_Videos047_x264_C.txt │ ├── Normal_Videos049_x264_C.txt │ ├── Normal_Videos052_x264_C.txt │ ├── Normal_Videos053_x264_C.txt │ ├── Normal_Videos054_x264_C.txt │ ├── Normal_Videos055_x264_C.txt │ ├── Normal_Videos057_x264_C.txt │ ├── Normal_Videos058_x264_C.txt │ ├── Normal_Videos060_x264_C.txt │ ├── Normal_Videos061_x264_C.txt │ ├── Normal_Videos062_x264_C.txt │ ├── Normal_Videos064_x264_C.txt │ ├── Normal_Videos065_x264_C.txt │ ├── Normal_Videos066_x264_C.txt │ ├── Normal_Videos068_x264_C.txt │ ├── Normal_Videos069_x264_C.txt │ └── Normal_Videos071_x264_C.txt ├── README.md └── web ├── Pipfile ├── Pipfile.lock └── yonomAIproject ├── Pipfile ├── Pipfile.lock ├── db.sqlite3 ├── manage.py ├── post ├── __init__.py ├── admin.py ├── apps.py ├── migrations │ ├── 0001_initial.py │ ├── 0002_auto_20200911_1157.py │ ├── 0003_auto_20200917_1208.py │ ├── 0004_auto_20200917_1214.py │ ├── 0005_auto_20200917_1216.py │ ├── 0006_mat.py │ ├── 0007_mat_title.py │ ├── 0008_video_content_title.py │ └── __init__.py ├── models.py ├── static │ └── post │ │ ├── about2.css │ │ ├── home.css │ │ ├── home2.css │ │ ├── home3.css │ │ ├── home4.css │ │ ├── image │ │ ├── backimage.png │ │ ├── ch.jpg │ │ ├── howtoc3d.jpg │ │ ├── howtowork.jpg │ │ ├── mj.jpg │ │ ├── sb.jpg │ │ └── sy.jpg │ │ ├── made_by.css │ │ └── result.css ├── templates │ └── post │ │ ├── about.html │ │ ├── about2.html │ │ ├── about3.html │ │ ├── error.html │ │ ├── home.html │ │ ├── home2.html │ │ ├── home3.html │ │ ├── home4.html │ │ ├── inserted.html │ │ ├── made_by.html │ │ └── result.html ├── tests.py ├── urls.py └── views.py ├── static └── cover.css ├── templates └── base.html └── yonomAIproject ├── __init__.py ├── asgi.py ├── settings.py ├── urls.py └── wsgi.py /Codes/etc/[Prep]_ch_05_Save_C3DFeatures_32Segments.py: -------------------------------------------------------------------------------- 1 | # clc() ##command창에 표시된 코드들 사라지게해 2 | #-*- encoding: utf8 -*- 3 | import os 4 | import numpy as np 5 | import struct 6 | # clear(mstring('all')) 7 | # close(mstring('all')) 8 | 9 | # This code save already computed C3D features into 32 (video features) segments. 10 | # We assume that C3D features for a video are already computed. We use 11 | # default settings for computing C3D features, i.e., we compute C3D features for 12 | # every 16 frames and obtain the features from fc6. 13 | 14 | import struct 15 | 16 | def read_bin(input_file): 17 | # input_file = open(r'.\normal_video_001\000032.fc6-1','rb') 18 | input_file = open(input_file,'rb') 19 | try: 20 | sizes = [struct.unpack('i',input_file.read(4))[0] for i in range(5)] 21 | m = np.prod(sizes) 22 | data = [struct.unpack('f',input_file.read(4))[0] for i in range(m)] 23 | finally: 24 | input_file 25 | feature_vector = np.array(data) 26 | 27 | return feature_vector, feature_vector.shape 28 | 29 | C3D_Path = r'C:\Users\seong\Desktop\32segment\normal_output' 30 | C3D_Path_Seg = r'C:\Users\seong\Desktop\32segment\output' 31 | 32 | if not os.path.isdir(C3D_Path_Seg): 33 | os.mkdir(C3D_Path_Seg) 34 | 35 | print('DONE') 36 | 37 | All_Folder = os.listdir(C3D_Path) 38 | # All_Folder = All_Folder[3:end] 39 | subcript = '_C.txt' 40 | 41 | for ifolder in All_Folder: 42 | #% START 1 LOOP WITH 1 FC FOLDER, ex: Abuse028 has N=1392 frames 43 | 44 | Folder_Path = str(C3D_Path) + "\\" + str(ifolder) 45 | #Folder_Path is path of a folder which contains C3D features (for every 16 frames) for a paricular video. 46 | # N=1392 frames --> it has [1392/16] = 88 fc6-1 files 47 | 48 | AllFiles = os.listdir(Folder_Path) ##"/.fc6-1 확장자 파일 싹다 리스트로 반환" 49 | # fc6-1 files in feature directory, each file = a clip in video 50 | # one clip = 16 frames 51 | 52 | if len(AllFiles) == 0: 53 | print("no fc6-1 file in path") 54 | continue 55 | 56 | feature_vector = np.zeros((len(AllFiles), 4096)) 57 | # each fc6-1 = 1 clips 16 frames = 4096-d ==> Total is [N/16]=88 clips like that 58 | #% Iterate each fc6-1 file (16 frames each) 59 | for ifile in range(0,len(AllFiles)): 60 | FilePath =Folder_Path + '\\' + AllFiles[ifile] 61 | 62 | data,_ = read_bin(FilePath) 63 | _,s = read_bin(FilePath) 64 | feature_vector[ifile]=data #% 1 column 4096-d in 88x4096 is assign by 1 clip feature (4096) 65 | 66 | # clear(mstring('data')) # clear라는 변수를 매트랩 내에서 삭제 67 | 68 | #% At this point, Feature vector is filled with all actual data from 69 | # all 16-frame clips in video, each clip is 4096-d, therefore 88x4096 70 | # is now filled with actual data 71 | # if sum(sum(feature_vector, [])) == 0: ## 고쳐야됨 : 각 열의 합인 한 행짜리 행렬로 72 | # print('error1') 73 | 74 | # Write C3D features in text file to load in 75 | # Training_AnomalyDetector_public ( You can directly use .mat format if you want). 76 | txt_file_name = C3D_Path_Seg + '/' + ifolder +subcript 77 | # feature txt name i.e Abuse028_x264_C.txt 78 | 79 | # if exist(txt_file_name, 'file'): 80 | # continue 81 | 82 | fid1 = open(txt_file_name, 'w') 83 | ## sum(x,1) = sum vertically (column) 84 | ## sum(x,2) = sum horizontally (row) 85 | # if not isempty(find(sum(Feature_vect, 2) == 0)): # sum row --> 88x4096 results in 88 rows 86 | # # k = find(X,n)은 X의 0이 아닌 요소에 대응하는 처음 n개의 인덱스를 반환합니다 87 | # print('error2') 88 | 89 | 90 | # if not isempty(find(isnan(Feature_vect(mslice[:])))): 91 | # print('error3') 92 | 93 | # if not isempty(find(Feature_vect(mslice[:]) == Inf)): 94 | # print('error4') 95 | 96 | #% 32 Segments 97 | 98 | Segments_Features = np.zeros((32, 4096)) #32 row, 4096 column 99 | thirty2_shots = np.linspace(1, len(AllFiles), 33).round(0) 100 | # thirty2shots = divide 88 frames to 33 segment, start from 1 to 88 101 | # SO: thirty2shots = [1 , 4, 6, 10, ..... 83, 85, 88], total elements 102 | # is 33, vector 1x33 103 | count = -1 104 | #% WRITE 88x4096 TO 32x4096 105 | for ishots in range(0,len(thirty2_shots) - 1): # ishorts starts from 1 to 32 106 | ss = int(thirty2_shots[ishots] ) # start clip index in 88x4096 107 | ee = int(thirty2_shots[ishots + 1] - 1) # end clip index in 88x4096 108 | 109 | # print(ss,ee,'llllll') 110 | if ishots == len(thirty2_shots): 111 | ee = int(thirty2_shots[ishots + 1]) 112 | #% THIS IS A FEATURE FOR 1 SEGMENT 113 | #ALL BELOW CASE, temp_vect is always 4096-d based on value of start ss and end ee index 114 | if ss == ee: 115 | temp_vect = feature_vector[ss] ##ss번째 행 벡터 추출 # ss==ee --> get 1 vector 4096-d from 88x4096 116 | 117 | elif ee < ss: 118 | temp_vect = feature_vector[ss] # ee < ss --> get 1 vector 4096-d from 88x4096 119 | 120 | else: 121 | temp_vect = feature_vector[ss:ee].mean(axis=0) ##각 열의 평균값을 가진 1*4096행 추출 122 | # for i in range(ss,ee): 123 | # feature_vector 124 | # ss < ee --> get all clip vectors from ss to ee (ex: 3 vectors) from 88x4096 125 | # origin feature, than take mean value of all (i.e 3 vectors) that vectors to 126 | # get a new one has 4096-d (shape of result is shape of row when get mean a 127 | # matrix) 128 | #mean a vector = mean of each column = sum column/total row --> 129 | #shape = number of row (=4096) after this mean operation 130 | # print(temp_vect.shape) 131 | #% AFTER HAS 1 SEGMENT FEATURE, CALCULATE NORM-2 (L2) 132 | temp_vect = temp_vect / np.linalg.norm(temp_vect) 133 | # temp_vect = temp_vect / np.norm(temp_vect) #% divide by norm-2 (L2) of vector (Euclidean norm)=cumsum(sqrt(x[i]^2)) # divide by norm-2 (L2) of vector (Euclidean norm)=cumsum(sqrt(x[i]^2)) 134 | 135 | # if np.linalg.norm(temp_vect) == 0: 136 | # print('error5') 137 | 138 | count = count + 1 # next segment (max=32) 139 | Segments_Features[count]= temp_vect # push each segment feature to final 32 video segments feature 140 | 141 | #verify 142 | 143 | # if not isempty(find(sum(Segments_Features, 2) == 0)): 144 | # print('error6') 145 | 146 | # if not isempty(find(isnan(Segments_Features(mslice[:])))): 147 | # print('error7') 148 | 149 | 150 | # if not isempty(find(Segments_Features(mslice[:]) == Inf)): 151 | # print('error8') 152 | 153 | 154 | # save 32 segment features in text file ( You can directly save and load .mat file in python as well). 155 | print(Segments_Features) 156 | print(Segments_Features.shape) 157 | 158 | for i in range(0,Segments_Features.shape[0]): 159 | feat_text = str(Segments_Features[i].tolist()) 160 | fid1.write(feat_text) 161 | fid1.write('\n') 162 | 163 | fid1.close() 164 | 165 | -------------------------------------------------------------------------------- /Codes/etc/[Prep]_sy_03_TrainTest_Code.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"TrainTest_Code.ipynb","provenance":[],"collapsed_sections":[],"toc_visible":true,"authorship_tag":"ABX9TyPJJ21m7sFE5k+waXw6mPth"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"1k7wc_fQK9bj","colab_type":"text"},"source":["# 도롱빈 Training Code\n","https://github.com/dolongbien/HumanBehaviorBKU/blob/master/TrainTest_Code.ipynb"]},{"cell_type":"code","metadata":{"id":"WrGXP34iLmnk","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":497},"executionInfo":{"status":"ok","timestamp":1600523151911,"user_tz":-540,"elapsed":8868,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"8379d214-873e-45fe-983e-3de026efae8e"},"source":["from tensorflow.python.client import device_lib \n","device_lib.list_local_devices()"],"execution_count":1,"outputs":[{"output_type":"execute_result","data":{"text/plain":["[name: \"/device:CPU:0\"\n"," device_type: \"CPU\"\n"," memory_limit: 268435456\n"," locality {\n"," }\n"," incarnation: 6909644792455160536, name: \"/device:XLA_CPU:0\"\n"," device_type: \"XLA_CPU\"\n"," memory_limit: 17179869184\n"," locality {\n"," }\n"," incarnation: 274237312469959737\n"," physical_device_desc: \"device: XLA_CPU device\", name: \"/device:XLA_GPU:0\"\n"," device_type: \"XLA_GPU\"\n"," memory_limit: 17179869184\n"," locality {\n"," }\n"," incarnation: 3759078092548666511\n"," physical_device_desc: \"device: XLA_GPU device\", name: \"/device:GPU:0\"\n"," device_type: \"GPU\"\n"," memory_limit: 11146783616\n"," locality {\n"," bus_id: 1\n"," links {\n"," }\n"," }\n"," incarnation: 5779681983101651734\n"," physical_device_desc: \"device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7\"]"]},"metadata":{"tags":[]},"execution_count":1}]},{"cell_type":"code","metadata":{"id":"-OiuI5SWK0Tk","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":35},"executionInfo":{"status":"ok","timestamp":1600523175964,"user_tz":-540,"elapsed":21907,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"dab14880-6ae7-4d7b-816c-e2257e5bc0fb"},"source":["from google.colab import drive\n","drive.mount('/content/gdrive')"],"execution_count":2,"outputs":[{"output_type":"stream","text":["Mounted at /content/gdrive\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"nUUkr8eLLRVB","colab_type":"text"},"source":["# Install dependencies\n","Note: turn on GPU mode to start using CUDA, change runtime type to Python 3.\n","\n","1. Keras 1.1.0\n","2. Theano 0.9.0\n","3. By default, latest CUDA installed. Downgrade to CUDA 8.0 for compatible purpose, modify GCC version accordingly."]},{"cell_type":"code","metadata":{"id":"fp-BbkcgLS02","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":408},"executionInfo":{"status":"ok","timestamp":1600523272216,"user_tz":-540,"elapsed":12861,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"73bf82eb-b899-43db-dc38-38774dfc7088"},"source":["!sudo apt-get install python-yaml -y\n","!sudo apt-get install libhdf5-serial-dev -y\n","!sudo pip install keras==1.1.0\n","!pip install theano==0.9.0\n","!pip install path.py"],"execution_count":4,"outputs":[{"output_type":"stream","text":["Reading package lists... Done\n","Building dependency tree \n","Reading state information... Done\n","python-yaml is already the newest version (3.12-1build2).\n","0 upgraded, 0 newly installed, 0 to remove and 11 not upgraded.\n","Reading package lists... Done\n","Building dependency tree \n","Reading state information... Done\n","libhdf5-serial-dev is already the newest version (1.10.0-patch1+docs-4).\n","0 upgraded, 0 newly installed, 0 to remove and 11 not upgraded.\n","Requirement already satisfied: keras==1.1.0 in /usr/local/lib/python3.6/dist-packages (1.1.0)\n","Requirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from keras==1.1.0) (3.13)\n","Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from keras==1.1.0) (1.15.0)\n","Requirement already satisfied: theano in /usr/local/lib/python3.6/dist-packages (from keras==1.1.0) (0.9.0)\n","Requirement already satisfied: scipy>=0.14 in /usr/local/lib/python3.6/dist-packages (from theano->keras==1.1.0) (1.4.1)\n","Requirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python3.6/dist-packages (from theano->keras==1.1.0) (1.18.5)\n","Requirement already satisfied: theano==0.9.0 in /usr/local/lib/python3.6/dist-packages (0.9.0)\n","Requirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python3.6/dist-packages (from theano==0.9.0) (1.18.5)\n","Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from theano==0.9.0) (1.15.0)\n","Requirement already satisfied: scipy>=0.14 in /usr/local/lib/python3.6/dist-packages (from theano==0.9.0) (1.4.1)\n","Requirement already satisfied: path.py in /usr/local/lib/python3.6/dist-packages (12.5.0)\n","Requirement already satisfied: path in /usr/local/lib/python3.6/dist-packages (from path.py) (15.0.0)\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"OcjAAk-1LqBS","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":552},"executionInfo":{"status":"ok","timestamp":1600523351143,"user_tz":-540,"elapsed":86924,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"28144d60-939a-401c-a01c-d46d5492c28d"},"source":["!apt update -qq;\n","!wget https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb;\n","!dpkg -i cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb;\n","!apt-key add /var/cuda-repo-8-0-local-ga2/7fa2af80.pub;\n","!apt-get update -qq;"],"execution_count":5,"outputs":[{"output_type":"stream","text":["36 packages can be upgraded. Run 'apt list --upgradable' to see them.\n","--2020-09-19 13:47:55-- https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb\n","Resolving developer.nvidia.com (developer.nvidia.com)... 152.199.16.29\n","Connecting to developer.nvidia.com (developer.nvidia.com)|152.199.16.29|:443... connected.\n","HTTP request sent, awaiting response... 302 Found\n","Location: https://developer.download.nvidia.com/compute/cuda/8.0/secure/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64.deb?IwDJnLssOkFx5KMVP0qr4sDf1ODDa_0qYFnAkHN0NQ8PBbYL_g44R5x_obQ3WC0KaN05HH1cGLpxgwfhtHVG2rAzwSFKKYl9aR8SLuOTafp3OmDUnjfsY60SOVTzwKxgAVsCLnAsAJoCmD1aAWMbCqUNpBi2YQJ6NqFYQUitLoNM9P5fBi6AbyYww9IVFJmeeD4c1yisMQsa65PJccjb1BgzWg [following]\n","--2020-09-19 13:47:56-- https://developer.download.nvidia.com/compute/cuda/8.0/secure/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64.deb?IwDJnLssOkFx5KMVP0qr4sDf1ODDa_0qYFnAkHN0NQ8PBbYL_g44R5x_obQ3WC0KaN05HH1cGLpxgwfhtHVG2rAzwSFKKYl9aR8SLuOTafp3OmDUnjfsY60SOVTzwKxgAVsCLnAsAJoCmD1aAWMbCqUNpBi2YQJ6NqFYQUitLoNM9P5fBi6AbyYww9IVFJmeeD4c1yisMQsa65PJccjb1BgzWg\n","Resolving developer.download.nvidia.com (developer.download.nvidia.com)... 152.199.20.126\n","Connecting to developer.download.nvidia.com (developer.download.nvidia.com)|152.199.20.126|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 1913589814 (1.8G) [application/x-deb]\n","Saving to: ‘cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb’\n","\n","cuda-repo-ubuntu160 100%[===================>] 1.78G 56.1MB/s in 15s \n","\n","2020-09-19 13:48:11 (120 MB/s) - ‘cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb’ saved [1913589814/1913589814]\n","\n","Selecting previously unselected package cuda-repo-ubuntu1604-8-0-local-ga2.\n","(Reading database ... 144711 files and directories currently installed.)\n","Preparing to unpack cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb ...\n","Unpacking cuda-repo-ubuntu1604-8-0-local-ga2 (8.0.61-1) ...\n","Setting up cuda-repo-ubuntu1604-8-0-local-ga2 (8.0.61-1) ...\n","Warning: The postinst maintainerscript of the package cuda-repo-ubuntu1604-8-0-local-ga2\n","Warning: seems to use apt-key (provided by apt) without depending on gnupg or gnupg2.\n","Warning: This will BREAK in the future and should be fixed by the package maintainer(s).\n","Note: Check first if apt-key functionality is needed at all - it probably isn't!\n","Warning: apt-key should not be used in scripts (called from postinst maintainerscript of the package cuda-repo-ubuntu1604-8-0-local-ga2)\n","OK\n","OK\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"xqgpSX25LqoO","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":1000},"executionInfo":{"status":"ok","timestamp":1600523418795,"user_tz":-540,"elapsed":151195,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"972419c5-ce33-40ad-e42b-50a57d6d60dd"},"source":["!apt install g++-5\n","!update-alternatives --remove-all gcc \n","!update-alternatives --remove-all g++\n","\n","!update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 20\n","!update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-5 20\n","\n","!update-alternatives --install /usr/bin/cc cc /usr/bin/gcc 30\n","!update-alternatives --set cc /usr/bin/gcc\n","\n","!update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++ 30\n","!update-alternatives --set c++ /usr/bin/g++\n","\n","!sudo apt install cuda-8.0;"],"execution_count":6,"outputs":[{"output_type":"stream","text":["Reading package lists... Done\n","Building dependency tree \n","Reading state information... Done\n","The following additional packages will be installed:\n"," cpp-5 gcc-5 gcc-5-base libasan2 libgcc-5-dev libisl15 libmpx0\n"," libstdc++-5-dev\n","Suggested packages:\n"," gcc-5-locales g++-5-multilib gcc-5-doc libstdc++6-5-dbg gcc-5-multilib\n"," libgcc1-dbg libgomp1-dbg libitm1-dbg libatomic1-dbg libasan2-dbg\n"," liblsan0-dbg libtsan0-dbg libubsan0-dbg libcilkrts5-dbg libmpx0-dbg\n"," libquadmath0-dbg libstdc++-5-doc\n","The following NEW packages will be installed:\n"," cpp-5 g++-5 gcc-5 gcc-5-base libasan2 libgcc-5-dev libisl15 libmpx0\n"," libstdc++-5-dev\n","0 upgraded, 9 newly installed, 0 to remove and 36 not upgraded.\n","Need to get 29.1 MB of archives.\n","After this operation, 100 MB of additional disk space will be used.\n","Get:1 http://archive.ubuntu.com/ubuntu bionic/universe amd64 gcc-5-base amd64 5.5.0-12ubuntu1 [17.1 kB]\n","Get:2 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libisl15 amd64 0.18-4 [548 kB]\n","Get:3 http://archive.ubuntu.com/ubuntu bionic/universe amd64 cpp-5 amd64 5.5.0-12ubuntu1 [7,785 kB]\n","Get:4 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libasan2 amd64 5.5.0-12ubuntu1 [264 kB]\n","Get:5 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libmpx0 amd64 5.5.0-12ubuntu1 [9,888 B]\n","Get:6 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libgcc-5-dev amd64 5.5.0-12ubuntu1 [2,224 kB]\n","Get:7 http://archive.ubuntu.com/ubuntu bionic/universe amd64 gcc-5 amd64 5.5.0-12ubuntu1 [8,357 kB]\n","Get:8 http://archive.ubuntu.com/ubuntu bionic/universe amd64 libstdc++-5-dev amd64 5.5.0-12ubuntu1 [1,415 kB]\n","Get:9 http://archive.ubuntu.com/ubuntu bionic/universe amd64 g++-5 amd64 5.5.0-12ubuntu1 [8,450 kB]\n","Fetched 29.1 MB in 0s (59.7 MB/s)\n","Selecting previously unselected package gcc-5-base:amd64.\n","(Reading database ... 144805 files and directories currently installed.)\n","Preparing to unpack .../0-gcc-5-base_5.5.0-12ubuntu1_amd64.deb ...\n","Unpacking gcc-5-base:amd64 (5.5.0-12ubuntu1) ...\n","Selecting previously unselected package libisl15:amd64.\n","Preparing to unpack .../1-libisl15_0.18-4_amd64.deb ...\n","Unpacking libisl15:amd64 (0.18-4) ...\n","Selecting previously unselected package cpp-5.\n","Preparing to unpack .../2-cpp-5_5.5.0-12ubuntu1_amd64.deb ...\n","Unpacking cpp-5 (5.5.0-12ubuntu1) ...\n","Selecting previously unselected package libasan2:amd64.\n","Preparing to unpack .../3-libasan2_5.5.0-12ubuntu1_amd64.deb ...\n","Unpacking libasan2:amd64 (5.5.0-12ubuntu1) ...\n","Selecting previously unselected package libmpx0:amd64.\n","Preparing to unpack .../4-libmpx0_5.5.0-12ubuntu1_amd64.deb ...\n","Unpacking libmpx0:amd64 (5.5.0-12ubuntu1) ...\n","Selecting previously unselected package libgcc-5-dev:amd64.\n","Preparing to unpack .../5-libgcc-5-dev_5.5.0-12ubuntu1_amd64.deb ...\n","Unpacking libgcc-5-dev:amd64 (5.5.0-12ubuntu1) ...\n","Selecting previously unselected package gcc-5.\n","Preparing to unpack .../6-gcc-5_5.5.0-12ubuntu1_amd64.deb ...\n","Unpacking gcc-5 (5.5.0-12ubuntu1) ...\n","Selecting previously unselected package libstdc++-5-dev:amd64.\n","Preparing to unpack .../7-libstdc++-5-dev_5.5.0-12ubuntu1_amd64.deb ...\n","Unpacking libstdc++-5-dev:amd64 (5.5.0-12ubuntu1) ...\n","Selecting previously unselected package g++-5.\n","Preparing to unpack .../8-g++-5_5.5.0-12ubuntu1_amd64.deb ...\n","Unpacking g++-5 (5.5.0-12ubuntu1) ...\n","Setting up libisl15:amd64 (0.18-4) ...\n","Setting up gcc-5-base:amd64 (5.5.0-12ubuntu1) ...\n","Setting up libmpx0:amd64 (5.5.0-12ubuntu1) ...\n","Setting up libasan2:amd64 (5.5.0-12ubuntu1) ...\n","Setting up libgcc-5-dev:amd64 (5.5.0-12ubuntu1) ...\n","Setting up cpp-5 (5.5.0-12ubuntu1) ...\n","Setting up libstdc++-5-dev:amd64 (5.5.0-12ubuntu1) ...\n","Setting up gcc-5 (5.5.0-12ubuntu1) ...\n","Setting up g++-5 (5.5.0-12ubuntu1) ...\n","Processing triggers for man-db (2.8.3-2ubuntu0.1) ...\n","Processing triggers for libc-bin (2.27-3ubuntu1.2) ...\n","/sbin/ldconfig.real: /usr/local/lib/python3.6/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n","\n","update-alternatives: error: no alternatives for gcc\n","update-alternatives: error: no alternatives for g++\n","update-alternatives: using /usr/bin/gcc-5 to provide /usr/bin/gcc (gcc) in auto mode\n","update-alternatives: using /usr/bin/g++-5 to provide /usr/bin/g++ (g++) in auto mode\n","Reading package lists... Done\n","Building dependency tree \n","Reading state information... Done\n","Note, selecting 'cuda-8-0' for regex 'cuda-8.0'\n","Note, selecting 'libcuda-8.0-1' for regex 'cuda-8.0'\n","The following additional packages will be installed:\n"," cuda-command-line-tools-8-0 cuda-core-8-0 cuda-cublas-8-0\n"," cuda-cublas-dev-8-0 cuda-cudart-8-0 cuda-cudart-dev-8-0 cuda-cufft-8-0\n"," cuda-cufft-dev-8-0 cuda-curand-8-0 cuda-curand-dev-8-0 cuda-cusolver-8-0\n"," cuda-cusolver-dev-8-0 cuda-cusparse-8-0 cuda-cusparse-dev-8-0\n"," cuda-demo-suite-8-0 cuda-documentation-8-0 cuda-driver-dev-8-0\n"," cuda-license-8-0 cuda-misc-headers-8-0 cuda-npp-8-0 cuda-npp-dev-8-0\n"," cuda-nvgraph-8-0 cuda-nvgraph-dev-8-0 cuda-nvml-dev-8-0 cuda-nvrtc-8-0\n"," cuda-nvrtc-dev-8-0 cuda-runtime-8-0 cuda-samples-8-0 cuda-toolkit-8-0\n"," cuda-visual-tools-8-0\n","The following NEW packages will be installed:\n"," cuda-8-0 cuda-command-line-tools-8-0 cuda-core-8-0 cuda-cublas-8-0\n"," cuda-cublas-dev-8-0 cuda-cudart-8-0 cuda-cudart-dev-8-0 cuda-cufft-8-0\n"," cuda-cufft-dev-8-0 cuda-curand-8-0 cuda-curand-dev-8-0 cuda-cusolver-8-0\n"," cuda-cusolver-dev-8-0 cuda-cusparse-8-0 cuda-cusparse-dev-8-0\n"," cuda-demo-suite-8-0 cuda-documentation-8-0 cuda-driver-dev-8-0\n"," cuda-license-8-0 cuda-misc-headers-8-0 cuda-npp-8-0 cuda-npp-dev-8-0\n"," cuda-nvgraph-8-0 cuda-nvgraph-dev-8-0 cuda-nvml-dev-8-0 cuda-nvrtc-8-0\n"," cuda-nvrtc-dev-8-0 cuda-runtime-8-0 cuda-samples-8-0 cuda-toolkit-8-0\n"," cuda-visual-tools-8-0\n","0 upgraded, 31 newly installed, 0 to remove and 36 not upgraded.\n","Need to get 0 B/1,312 MB of archives.\n","After this operation, 2,079 MB of additional disk space will be used.\n","Get:1 file:/var/cuda-repo-8-0-local-ga2 cuda-license-8-0 8.0.61-1 [27.6 kB]\n","Get:2 file:/var/cuda-repo-8-0-local-ga2 cuda-misc-headers-8-0 8.0.61-1 [1,077 kB]\n","Get:3 file:/var/cuda-repo-8-0-local-ga2 cuda-core-8-0 8.0.61-1 [20.0 MB]\n","Get:4 file:/var/cuda-repo-8-0-local-ga2 cuda-cudart-8-0 8.0.61-1 [135 kB]\n","Get:5 file:/var/cuda-repo-8-0-local-ga2 cuda-driver-dev-8-0 8.0.61-1 [14.1 kB]\n","Get:6 file:/var/cuda-repo-8-0-local-ga2 cuda-cudart-dev-8-0 8.0.61-1 [1,071 kB]\n","Get:7 file:/var/cuda-repo-8-0-local-ga2 cuda-command-line-tools-8-0 8.0.61-1 [26.1 MB]\n","Get:8 file:/var/cuda-repo-8-0-local-ga2 cuda-nvrtc-8-0 8.0.61-1 [9,585 kB]\n","Get:9 file:/var/cuda-repo-8-0-local-ga2 cuda-nvrtc-dev-8-0 8.0.61-1 [10.8 kB]\n","Get:10 file:/var/cuda-repo-8-0-local-ga2 cuda-cusolver-8-0 8.0.61-1 [29.3 MB]\n","Get:11 file:/var/cuda-repo-8-0-local-ga2 cuda-cusolver-dev-8-0 8.0.61-1 [6,816 kB]\n","Get:12 file:/var/cuda-repo-8-0-local-ga2 cuda-cublas-8-0 8.0.61-1 [27.2 MB]\n","Get:13 file:/var/cuda-repo-8-0-local-ga2 cuda-cublas-dev-8-0 8.0.61-1 [57.4 MB]\n","Get:14 file:/var/cuda-repo-8-0-local-ga2 cuda-cufft-8-0 8.0.61-1 [117 MB]\n","Get:15 file:/var/cuda-repo-8-0-local-ga2 cuda-cufft-dev-8-0 8.0.61-1 [94.8 MB]\n","Get:16 file:/var/cuda-repo-8-0-local-ga2 cuda-curand-8-0 8.0.61-1 [43.7 MB]\n","Get:17 file:/var/cuda-repo-8-0-local-ga2 cuda-curand-dev-8-0 8.0.61-1 [67.7 MB]\n","Get:18 file:/var/cuda-repo-8-0-local-ga2 cuda-cusparse-8-0 8.0.61-1 [28.8 MB]\n","Get:19 file:/var/cuda-repo-8-0-local-ga2 cuda-cusparse-dev-8-0 8.0.61-1 [29.6 MB]\n","Get:20 file:/var/cuda-repo-8-0-local-ga2 cuda-npp-8-0 8.0.61-1 [157 MB]\n","Get:21 file:/var/cuda-repo-8-0-local-ga2 cuda-npp-dev-8-0 8.0.61-1 [82.3 MB]\n","Get:22 file:/var/cuda-repo-8-0-local-ga2 cuda-samples-8-0 8.0.61-1 [101 MB]\n","Get:23 file:/var/cuda-repo-8-0-local-ga2 cuda-documentation-8-0 8.0.61-1 [113 MB]\n","Get:24 file:/var/cuda-repo-8-0-local-ga2 cuda-nvml-dev-8-0 8.0.61-1 [48.4 kB]\n","Get:25 file:/var/cuda-repo-8-0-local-ga2 cuda-nvgraph-8-0 8.0.61-1 [2,948 kB]\n","Get:26 file:/var/cuda-repo-8-0-local-ga2 cuda-nvgraph-dev-8-0 8.0.61-1 [3,028 kB]\n","Get:27 file:/var/cuda-repo-8-0-local-ga2 cuda-visual-tools-8-0 8.0.61-1 [286 MB]\n","Get:28 file:/var/cuda-repo-8-0-local-ga2 cuda-toolkit-8-0 8.0.61-1 [2,892 B]\n","Get:29 file:/var/cuda-repo-8-0-local-ga2 cuda-runtime-8-0 8.0.61-1 [2,574 B]\n","Get:30 file:/var/cuda-repo-8-0-local-ga2 cuda-demo-suite-8-0 8.0.61-1 [4,988 kB]\n","Get:31 file:/var/cuda-repo-8-0-local-ga2 cuda-8-0 8.0.61-1 [2,556 B]\n","debconf: unable to initialize frontend: Dialog\n","debconf: (No usable dialog-like program is installed, so the dialog based frontend cannot be used. at /usr/share/perl5/Debconf/FrontEnd/Dialog.pm line 76, <> line 31.)\n","debconf: falling back to frontend: Readline\n","debconf: unable to initialize frontend: Readline\n","debconf: (This frontend requires a controlling tty.)\n","debconf: falling back to frontend: Teletype\n","dpkg-preconfigure: unable to re-open stdin: \n","Selecting previously unselected package cuda-license-8-0.\n","(Reading database ... 145805 files and directories currently installed.)\n","Preparing to unpack .../00-cuda-license-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-license-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-misc-headers-8-0.\n","Preparing to unpack .../01-cuda-misc-headers-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-misc-headers-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-core-8-0.\n","Preparing to unpack .../02-cuda-core-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-core-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-cudart-8-0.\n","Preparing to unpack .../03-cuda-cudart-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-cudart-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-driver-dev-8-0.\n","Preparing to unpack .../04-cuda-driver-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-driver-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-cudart-dev-8-0.\n","Preparing to unpack .../05-cuda-cudart-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-cudart-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-command-line-tools-8-0.\n","Preparing to unpack .../06-cuda-command-line-tools-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-command-line-tools-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-nvrtc-8-0.\n","Preparing to unpack .../07-cuda-nvrtc-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-nvrtc-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-nvrtc-dev-8-0.\n","Preparing to unpack .../08-cuda-nvrtc-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-nvrtc-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-cusolver-8-0.\n","Preparing to unpack .../09-cuda-cusolver-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-cusolver-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-cusolver-dev-8-0.\n","Preparing to unpack .../10-cuda-cusolver-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-cusolver-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-cublas-8-0.\n","Preparing to unpack .../11-cuda-cublas-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-cublas-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-cublas-dev-8-0.\n","Preparing to unpack .../12-cuda-cublas-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-cublas-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-cufft-8-0.\n","Preparing to unpack .../13-cuda-cufft-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-cufft-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-cufft-dev-8-0.\n","Preparing to unpack .../14-cuda-cufft-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-cufft-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-curand-8-0.\n","Preparing to unpack .../15-cuda-curand-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-curand-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-curand-dev-8-0.\n","Preparing to unpack .../16-cuda-curand-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-curand-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-cusparse-8-0.\n","Preparing to unpack .../17-cuda-cusparse-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-cusparse-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-cusparse-dev-8-0.\n","Preparing to unpack .../18-cuda-cusparse-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-cusparse-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-npp-8-0.\n","Preparing to unpack .../19-cuda-npp-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-npp-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-npp-dev-8-0.\n","Preparing to unpack .../20-cuda-npp-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-npp-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-samples-8-0.\n","Preparing to unpack .../21-cuda-samples-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-samples-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-documentation-8-0.\n","Preparing to unpack .../22-cuda-documentation-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-documentation-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-nvml-dev-8-0.\n","Preparing to unpack .../23-cuda-nvml-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-nvml-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-nvgraph-8-0.\n","Preparing to unpack .../24-cuda-nvgraph-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-nvgraph-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-nvgraph-dev-8-0.\n","Preparing to unpack .../25-cuda-nvgraph-dev-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-nvgraph-dev-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-visual-tools-8-0.\n","Preparing to unpack .../26-cuda-visual-tools-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-visual-tools-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-toolkit-8-0.\n","Preparing to unpack .../27-cuda-toolkit-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-toolkit-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-runtime-8-0.\n","Preparing to unpack .../28-cuda-runtime-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-runtime-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-demo-suite-8-0.\n","Preparing to unpack .../29-cuda-demo-suite-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-demo-suite-8-0 (8.0.61-1) ...\n","Selecting previously unselected package cuda-8-0.\n","Preparing to unpack .../30-cuda-8-0_8.0.61-1_amd64.deb ...\n","Unpacking cuda-8-0 (8.0.61-1) ...\n","Setting up cuda-license-8-0 (8.0.61-1) ...\n","*** LICENSE AGREEMENT ***\n","By using this software you agree to fully comply with the terms and \n","conditions of the EULA (End User License Agreement). The EULA is located\n","at /usr/local/cuda-8.0/doc/EULA.txt. The EULA can also be found at\n","http://docs.nvidia.com/cuda/eula/index.html. If you do not agree to the\n","terms and conditions of the EULA, do not use the software.\n","\n","Setting up cuda-nvgraph-8-0 (8.0.61-1) ...\n","Setting up cuda-cufft-8-0 (8.0.61-1) ...\n","Setting up cuda-npp-8-0 (8.0.61-1) ...\n","Setting up cuda-nvgraph-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-cudart-8-0 (8.0.61-1) ...\n","Setting up cuda-driver-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-cusolver-8-0 (8.0.61-1) ...\n","Setting up cuda-nvml-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-cufft-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-misc-headers-8-0 (8.0.61-1) ...\n","Setting up cuda-cusparse-8-0 (8.0.61-1) ...\n","Setting up cuda-nvrtc-8-0 (8.0.61-1) ...\n","Setting up cuda-nvrtc-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-curand-8-0 (8.0.61-1) ...\n","Setting up cuda-cublas-8-0 (8.0.61-1) ...\n","Setting up cuda-cusolver-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-core-8-0 (8.0.61-1) ...\n","Setting up cuda-curand-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-npp-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-cudart-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-cublas-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-runtime-8-0 (8.0.61-1) ...\n","Setting up cuda-cusparse-dev-8-0 (8.0.61-1) ...\n","Setting up cuda-command-line-tools-8-0 (8.0.61-1) ...\n","Setting up cuda-demo-suite-8-0 (8.0.61-1) ...\n","Setting up cuda-samples-8-0 (8.0.61-1) ...\n","Setting up cuda-visual-tools-8-0 (8.0.61-1) ...\n","Setting up cuda-documentation-8-0 (8.0.61-1) ...\n","Setting up cuda-toolkit-8-0 (8.0.61-1) ...\n","Setting up cuda-8-0 (8.0.61-1) ...\n","Processing triggers for libc-bin (2.27-3ubuntu1.2) ...\n","/sbin/ldconfig.real: /usr/local/lib/python3.6/dist-packages/ideep4py/lib/libmkldnn.so.0 is not a symbolic link\n","\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"pRpXtW_zL0r1","colab_type":"text"},"source":["**cuDNN (v5.1): a GPU-accelerated library of primitives for deep neural networks**"]},{"cell_type":"code","metadata":{"id":"FwI4v5eSLqgK","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":319},"executionInfo":{"status":"ok","timestamp":1600523422849,"user_tz":-540,"elapsed":113725,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"270488c8-08c7-481a-ef00-2c5fae1f5eea"},"source":["%cd /content\n","!wget http://developer.download.nvidia.com/compute/redist/cudnn/v5.1/cudnn-8.0-linux-x64-v5.1.tgz\n","!tar -xzvf cudnn-8.0-linux-x64-v5.1.tgz\n","!sudo cp -P cuda/include/cudnn.h /usr/local/cuda-8.0/include\n","!sudo cp -P cuda/lib64/libcudnn* /usr/local/cuda-8.0/lib64/\n","!sudo chmod a+r /usr/local/cuda-8.0/lib64/libcudnn*"],"execution_count":7,"outputs":[{"output_type":"stream","text":["/content\n","--2020-09-19 13:50:18-- http://developer.download.nvidia.com/compute/redist/cudnn/v5.1/cudnn-8.0-linux-x64-v5.1.tgz\n","Resolving developer.download.nvidia.com (developer.download.nvidia.com)... 152.199.20.126\n","Connecting to developer.download.nvidia.com (developer.download.nvidia.com)|152.199.20.126|:80... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 103174002 (98M) [application/x-compressed]\n","Saving to: ‘cudnn-8.0-linux-x64-v5.1.tgz’\n","\n","cudnn-8.0-linux-x64 100%[===================>] 98.39M 288MB/s in 0.3s \n","\n","2020-09-19 13:50:18 (288 MB/s) - ‘cudnn-8.0-linux-x64-v5.1.tgz’ saved [103174002/103174002]\n","\n","cuda/include/cudnn.h\n","cuda/lib64/libcudnn.so\n","cuda/lib64/libcudnn.so.5\n","cuda/lib64/libcudnn.so.5.1.10\n","cuda/lib64/libcudnn_static.a\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"NzK19IyCMC0r","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":177},"executionInfo":{"status":"ok","timestamp":1600506459779,"user_tz":-540,"elapsed":921,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"94d79418-26ad-4c0b-b3a4-d86ef35c5e62"},"source":["!gcc --version\n","!nvcc -V"],"execution_count":null,"outputs":[{"output_type":"stream","text":["gcc (Ubuntu 5.5.0-12ubuntu1) 5.5.0 20171010\n","Copyright (C) 2015 Free Software Foundation, Inc.\n","This is free software; see the source for copying conditions. There is NO\n","warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n","\n","nvcc: NVIDIA (R) Cuda compiler driver\n","Copyright (c) 2005-2016 NVIDIA Corporation\n","Built on Tue_Jan_10_13:22:03_CST_2017\n","Cuda compilation tools, release 8.0, V8.0.61\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"-vx7jfW3MCqu","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":372},"executionInfo":{"status":"ok","timestamp":1600506461311,"user_tz":-540,"elapsed":768,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"111f0b8f-ec5b-4f1e-da0c-9f666b9fd3d9"},"source":["!nvidia-smi"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Sat Sep 19 09:07:42 2020 \n","+-----------------------------------------------------------------------------+\n","| NVIDIA-SMI 450.66 Driver Version: 418.67 CUDA Version: 10.1 |\n","|-------------------------------+----------------------+----------------------+\n","| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n","| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n","| | | MIG M. |\n","|===============================+======================+======================|\n","| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |\n","| N/A 46C P0 33W / 250W | 359MiB / 16280MiB | 0% Default |\n","| | | ERR! |\n","+-------------------------------+----------------------+----------------------+\n"," \n","+-----------------------------------------------------------------------------+\n","| Processes: |\n","| GPU GI CI PID Type Process name GPU Memory |\n","| ID ID Usage |\n","|=============================================================================|\n","| No running processes found |\n","+-----------------------------------------------------------------------------+\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"KDwlHYudMTky","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":569},"executionInfo":{"status":"ok","timestamp":1600506473560,"user_tz":-540,"elapsed":9350,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"f939a2e5-6c69-4ed9-99db-d17d9fae7181"},"source":["!pip install numpy==1.14.6 # downgrade to match the Theano version (if needed)"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Collecting numpy==1.14.6\n","\u001b[?25l Downloading https://files.pythonhosted.org/packages/e5/c4/395ebb218053ba44d64935b3729bc88241ec279915e72100c5979db10945/numpy-1.14.6-cp36-cp36m-manylinux1_x86_64.whl (13.8MB)\n","\u001b[K |████████████████████████████████| 13.8MB 238kB/s \n","\u001b[31mERROR: xarray 0.15.1 has requirement numpy>=1.15, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: umap-learn 0.4.6 has requirement numpy>=1.17, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: tifffile 2020.9.3 has requirement numpy>=1.15.1, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: tensorflow 2.3.0 has requirement numpy<1.19.0,>=1.16.0, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: spacy 2.2.4 has requirement numpy>=1.15.0, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: pymc3 3.7 has requirement theano>=1.0.4, but you'll have theano 0.9.0 which is incompatible.\u001b[0m\n","\u001b[31mERROR: plotnine 0.6.0 has requirement numpy>=1.16.0, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: numba 0.48.0 has requirement numpy>=1.15, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: kapre 0.1.3.1 has requirement keras>=2.0.0, but you'll have keras 1.1.0 which is incompatible.\u001b[0m\n","\u001b[31mERROR: imgaug 0.2.9 has requirement numpy>=1.15.0, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: fbprophet 0.7.1 has requirement numpy>=1.15.4, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: fastai 1.0.61 has requirement numpy>=1.15, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: fancyimpute 0.4.3 has requirement keras>=2.0.0, but you'll have keras 1.1.0 which is incompatible.\u001b[0m\n","\u001b[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.\u001b[0m\n","\u001b[31mERROR: cvxpy 1.0.31 has requirement numpy>=1.15, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: blis 0.4.1 has requirement numpy>=1.15.0, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: astropy 4.0.1.post1 has requirement numpy>=1.16, but you'll have numpy 1.14.6 which is incompatible.\u001b[0m\n","\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\n","\u001b[?25hInstalling collected packages: numpy\n"," Found existing installation: numpy 1.18.5\n"," Uninstalling numpy-1.18.5:\n"," Successfully uninstalled numpy-1.18.5\n","Successfully installed numpy-1.14.6\n"],"name":"stdout"},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["numpy"]}}},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"_UhsUqt-MVTf","colab_type":"text"},"source":["# Initialization"]},{"cell_type":"code","metadata":{"id":"KN3VWPUVMWAM","colab_type":"code","colab":{}},"source":["import os\n","os.environ[\"THEANO_FLAGS\"] = \"mode=FAST_RUN,device=gpu0,floatX=float32\"\n","os.environ[\"KERAS_BACKEND\"] = \"theano\""],"execution_count":null,"outputs":[]},{"cell_type":"code","metadata":{"id":"MvAaxPnmMYFL","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":179},"executionInfo":{"status":"ok","timestamp":1600506499430,"user_tz":-540,"elapsed":13342,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"d4e78245-adc2-4824-8bc7-3b6352a1c366"},"source":["from keras.models import Sequential\n","from keras.layers import Dense, Dropout, Activation, TimeDistributedDense ,LSTM,Reshape\n","from keras.regularizers import l2\n","from keras.optimizers import SGD,Adam, Adagrad\n","from scipy.io import loadmat, savemat\n","from keras.models import model_from_json\n","import theano.tensor as T\n","import theano\n","import csv\n","import configparser\n","import collections\n","import time\n","import csv\n","import os\n","from os import listdir\n","import skimage.transform\n","from skimage import color\n","from os.path import isfile, join\n","import numpy as np\n","import numpy\n","from datetime import datetime\n","import path\n","from os.path import basename\n","import glob\n","import matplotlib.pyplot as plt\n","import theano.sandbox\n","theano.sandbox.cuda.use('gpu0')\n","from IPython.display import clear_output\n","import math"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Using Theano backend.\n","WARNING (theano.sandbox.cuda): The cuda backend is deprecated and will be removed in the next release (v0.10). Please switch to the gpuarray backend. You can get more information about how to switch at this URL:\n"," https://github.com/Theano/Theano/wiki/Converting-to-the-new-gpu-back-end%28gpuarray%29\n","\n","Using gpu device 0: Tesla P100-PCIE-16GB (CNMeM is disabled, cuDNN 5110)\n","WARNING (theano.sandbox.cuda): The cuda backend is deprecated and will be removed in the next release (v0.10). Please switch to the gpuarray backend. You can get more information about how to switch at this URL:\n"," https://github.com/Theano/Theano/wiki/Converting-to-the-new-gpu-back-end%28gpuarray%29\n","\n"],"name":"stderr"}]},{"cell_type":"markdown","metadata":{"id":"wjumzz1CM6Pq","colab_type":"text"},"source":["3 FC Layers"]},{"cell_type":"code","metadata":{"id":"jE_Udv6cMcdw","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":319},"executionInfo":{"status":"ok","timestamp":1600506527590,"user_tz":-540,"elapsed":5677,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"785aa79f-9cbc-4a33-9f1b-b8a5f7562fe0"},"source":["print(\"Create Model\")\n","model = Sequential()\n","model.add(Dense(512, input_dim=4096,init='glorot_normal',W_regularizer=l2(0.001),activation='relu'))\n","model.add(Dropout(0.6))\n","model.add(Dense(32,init='glorot_normal',W_regularizer=l2(0.001)))\n","model.add(Dropout(0.6))\n","model.add(Dense(1,init='glorot_normal',W_regularizer=l2(0.001),activation='sigmoid'))\n","print(model.summary())"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Create Model\n","____________________________________________________________________________________________________\n","Layer (type) Output Shape Param # Connected to \n","====================================================================================================\n","dense_1 (Dense) (None, 512) 2097664 dense_input_1[0][0] \n","____________________________________________________________________________________________________\n","dropout_1 (Dropout) (None, 512) 0 dense_1[0][0] \n","____________________________________________________________________________________________________\n","dense_2 (Dense) (None, 32) 16416 dropout_1[0][0] \n","____________________________________________________________________________________________________\n","dropout_2 (Dropout) (None, 32) 0 dense_2[0][0] \n","____________________________________________________________________________________________________\n","dense_3 (Dense) (None, 1) 33 dropout_2[0][0] \n","====================================================================================================\n","Total params: 2114113\n","____________________________________________________________________________________________________\n","None\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"vpnUXaYhM8qr","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":52},"executionInfo":{"status":"ok","timestamp":1600507782806,"user_tz":-540,"elapsed":803,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"61882505-baa0-46b9-fb8f-132434293f97"},"source":["# COUNT THE NUMBER OF VIDEO\n","\n","# !ls -l \"/content/gdrive/My Drive/HumanBehaviors/C3D_Feature_txt/Train_RoadAccidents/Normal\" | egrep -c '^-'\n","# !ls -l \"/content/gdrive/My Drive/HumanBehaviors/C3D_Feature_txt/Train_RoadAccidents/Abnormal\" | egrep -c '^-'\n","# !ls -l \"/content/gdrive/My Drive/HumanBehaviors/C3D_Feature_txt/Test_RoadAccidents\" | egrep -c '^-'\n","\n","!ls -l \"/content/gdrive/My Drive/32segment/normal_output\" | egrep -c '^-'\n","!ls -l \"/content/gdrive/My Drive/32segment/abnormal_output\" | egrep -c '^-'\n"],"execution_count":null,"outputs":[{"output_type":"stream","text":["42\n","44\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"hPOgc9gnNBvt","colab_type":"text"},"source":["# Load and label video (0/1), implement objective function\n","1. Utility functions: load/save model/weight, load video feature and make label (0/1) in batch size of 60.\n","2. Objective function"]},{"cell_type":"code","metadata":{"id":"3SSTvyPIM8sz","colab_type":"code","colab":{}},"source":["def load_model(json_path):\n"," model = model_from_json(open(json_path).read())\n"," return model\n","\n","def load_weights(model, weight_path): # Function to load the model weights\n"," dict2 = loadmat(weight_path)\n"," dict = conv_dict(dict2)\n"," i = 0\n"," for layer in model.layers:\n"," weights = dict[str(i)]\n"," layer.set_weights(weights)\n"," i += 1\n"," return model\n","\n","def conv_dict(dict2):\n"," i = 0\n"," dict = {}\n"," for i in range(len(dict2)):\n"," if str(i) in dict2:\n"," if dict2[str(i)].shape == (0, 0):\n"," dict[str(i)] = dict2[str(i)]\n"," else:\n"," weights = dict2[str(i)][0]\n"," weights2 = []\n"," for weight in weights:\n"," if weight.shape in [(1, x) for x in range(0, 5000)]:\n"," weights2.append(weight[0])\n"," else:\n"," weights2.append(weight)\n"," dict[str(i)] = weights2\n"," return dict\n","\n","def save_model(model, json_path, weight_path):\n"," json_string = model.to_json()\n"," open(json_path, 'w').write(json_string)\n"," dict = {}\n"," i = 0\n"," for layer in model.layers:\n"," weights = layer.get_weights()\n"," my_list = np.zeros(len(weights), dtype=np.object)\n"," my_list[:] = weights\n"," dict[str(i)] = my_list\n"," i += 1\n"," savemat(weight_path, dict)\n","\n","\n","# Load Training Dataset and label training videos\n","\n","def load_dataset_Train_batch(AbnormalPath, NormalPath):\n","\n"," batchsize=60 # Each batch contain 60 videos.\n"," n_exp=int(batchsize/2) # 30 normal and 30 road accident videos\n","\n"," Num_abnormal = 366 # number of road accident videos in Training set.\n"," Num_Normal = 330 # number of normal videos in Training set.\n","\n","\n"," # We assume the features of abnormal videos and normal videos are located in two different folders.\n"," Abnor_list_iter = np.random.permutation(Num_abnormal)\n"," Abnor_list_iter = Abnor_list_iter[Num_abnormal-n_exp:] # Indexes for randomly selected Abnormal Videos\n"," Norm_list_iter = np.random.permutation(Num_Normal)\n"," Norm_list_iter = Norm_list_iter[Num_Normal-n_exp:] # Indexes for randomly selected Normal Videos\n","\n"," AllVideos_Path = AbnormalPath\n"," def listdir_nohidden(AllVideos_Path): # To ignore hidden files\n"," file_dir_extension = os.path.join(AllVideos_Path, '*_C.txt')\n"," for f in glob.glob(file_dir_extension):\n"," if not f.startswith('.'):\n"," yield os.path.basename(f)\n","\n"," All_Videos=sorted(listdir_nohidden(AllVideos_Path))\n"," All_Videos.sort()\n"," AllFeatures = [] # To store C3D features of a batch\n"," print(\"Loading Abnormal videos Features...\")\n","\n"," Video_count=-1\n"," \n"," ###################### READ RANDOMLY ABNORMAL FEATURE 32x4096 #################\n"," for iv in Abnor_list_iter:\n"," Video_count=Video_count+1\n"," VideoPath = os.path.join(AllVideos_Path, All_Videos[iv])\n"," f = open(VideoPath, \"r\")\n"," words = f.read().split()\n"," num_feat = int(len(words) / 4096)\n"," # Number of features per video to be loaded.\n"," # In our case num_feat=32, as we divide the video into 32 segments. \n","\n"," count = -1;\n"," VideoFeatues = []\n"," for feat in range(0, num_feat):\n"," feat_row1 = np.float32(words[feat * 4096:feat * 4096 + 4096])\n"," count = count + 1\n"," if count == 0:\n"," VideoFeatues = feat_row1\n"," if count > 0:\n"," VideoFeatues = np.vstack((VideoFeatues, feat_row1))\n","\n"," if Video_count == 0:\n"," AllFeatures = VideoFeatues\n"," if Video_count > 0:\n"," AllFeatures = np.vstack((AllFeatures, VideoFeatues))\n","\n"," \n"," \n"," print(\"Loading Normal videos...\")\n"," AllVideos_Path = NormalPath\n","\n"," def listdir_nohidden(AllVideos_Path): # To ignore hidden files\n"," file_dir_extension = os.path.join(AllVideos_Path, '*_C.txt')\n"," for f in glob.glob(file_dir_extension):\n"," if not f.startswith('.'):\n"," yield os.path.basename(f)\n","\n"," All_Videos = sorted(listdir_nohidden(AllVideos_Path))\n"," All_Videos.sort()\n","\n"," for iv in Norm_list_iter:\n"," VideoPath = os.path.join(AllVideos_Path, All_Videos[iv])\n"," f = open(VideoPath, \"r\")\n"," words = f.read().split()\n"," feat_row1 = np.array([])\n"," num_feat = int(len(words) /4096) \n"," # Number of features to be loaded. \n"," # In our case num_feat=32, as we divide the video into 32 segments.\n","\n"," count = -1;\n"," VideoFeatues = []\n"," for feat in range(0, num_feat):\n","\n","\n"," feat_row1 = np.float32(words[feat * 4096:feat * 4096 + 4096])\n"," count = count + 1\n"," if count == 0:\n"," VideoFeatues = feat_row1\n"," if count > 0:\n"," VideoFeatues = np.vstack((VideoFeatues, feat_row1))\n"," feat_row1 = []\n"," AllFeatures = np.vstack((AllFeatures, VideoFeatues))\n","\n"," print(\"Features loaded\")\n","\n","\n"," AllLabels = np.zeros(32*batchsize, dtype='uint8') \n"," # 60*32 = 1920 label, 0/1 (INTEGER, please remember, regression init)\n"," \n"," th_loop1=n_exp*32\n"," th_loop2=n_exp*32-1\n"," \n"," # mini-batch size = 60, 30 normals, 30 abnormals\n"," # --> 30*32= 640 feature vectors for 32 segments and 30 videos\n","\n","\n"," # Load Abnormal path fist--> index 0-639 is assigned value 0,\n"," # remains 640 from th_loop2 = 640-1920 to label 1\n"," \n"," for iv in range(0, 32*batchsize): # 1920\n"," if iv< th_loop1:\n"," AllLabels[iv] = int(0) # All instances of abnormal videos are labeled 0. This will be used in custom_objective to keep track of normal and abnormal videos indexes.\n"," if iv > th_loop2:\n"," AllLabels[iv] = int(1) # All instances of Normal videos are labeled 1. This will be used in custom_objective to keep track of normal and abnormal videos indexes.\n","\n"," # There are 1920 labels in total (all instances 32segment*60video a batch)\n"," \n"," return AllFeatures,AllLabels"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"3xh7XI23NeBK","colab_type":"text"},"source":["Objective function"]},{"cell_type":"code","metadata":{"id":"MCWwJM2PM8oT","colab_type":"code","colab":{}},"source":["def custom_objective(y_true, y_pred):\n"," \n"," y_true = T.flatten(y_true)\n"," y_pred = T.flatten(y_pred)\n"," n_seg = 32 \n"," nvid = 60\n"," n_exp = int(nvid / 2)\n"," Num_d=32*nvid\n","\n","\n"," sub_max = T.ones_like(y_pred) # sub_max represents the highest scoring instants in bags (videos).\n"," sub_sum_labels = T.ones_like(y_true) # It is used to sum the labels in order to distinguish between normal and abnormal videos.\n"," sub_sum_l1=T.ones_like(y_true) # For holding the concatenation of summation of scores in the bag.\n"," sub_l2 = T.ones_like(y_true) # For holding the concatenation of L2 of score in the bag.\n"," \n"," \n"," for ii in range(0, nvid, 1):\n"," # For Labels\n"," mm = y_true[ii * n_seg:ii * n_seg + n_seg]\n"," sub_sum_labels = T.concatenate([sub_sum_labels, T.stack(T.sum(mm))]) # Just to keep track of abnormal and normal vidoes\n"," \n"," # For Features scores\n"," Feat_Score = y_pred[ii * n_seg:ii * n_seg + n_seg]\n"," sub_max = T.concatenate([sub_max, T.stack(T.max(Feat_Score))]) # Keep the maximum score of scores of all instances in a Bag (video)\n"," sub_sum_l1 = T.concatenate([sub_sum_l1, T.stack(T.sum(Feat_Score))]) # Keep the sum of scores of all instances in a Bag (video)\n","\n"," z1 = T.ones_like(Feat_Score)\n"," z2 = T.concatenate([z1, Feat_Score])\n"," z3 = T.concatenate([Feat_Score, z1])\n"," z_22 = z2[31:]\n"," z_44 = z3[:33]\n"," z = z_22 - z_44\n"," z = z[1:32]\n"," z = T.sum(T.sqr(z))\n"," sub_l2 = T.concatenate([sub_l2, T.stack(z)])\n","\n"," # sub_max[Num_d:] means include all elements after Num_d.\n"," # AllLabels =[2 , 4, 3 ,9 ,6 ,12,7 ,18 ,9 ,14]\n"," # z=x[4:]\n"," #[ 6. 12. 7. 18. 9. 14.]\n","\n"," sub_score = sub_max[Num_d:] # We need this step since we have used T.ones_like\n"," F_labels = sub_sum_labels[Num_d:] # We need this step since we have used T.ones_like\n"," # F_labels contains integer 32 for normal video and 0 for abnormal videos. This because of labeling done at the end of \"load_dataset_Train_batch\"\n","\n","\n","\n"," # AllLabels =[2 , 4, 3 ,9 ,6 ,12,7 ,18 ,9 ,14]\n"," # z=x[:4]\n"," # [ 2 4 3 9]... This shows 0 to 3 elements\n","\n"," sub_sum_l1 = sub_sum_l1[Num_d:] # We need this step since we have used T.ones_like\n"," sub_sum_l1 = sub_sum_l1[:n_exp]\n"," sub_l2 = sub_l2[Num_d:] # We need this step since we have used T.ones_like\n"," sub_l2 = sub_l2[:n_exp]\n","\n","\n"," indx_nor = theano.tensor.eq(F_labels, 32).nonzero()[0] # Index of normal videos: Since we labeled 1 for each of 32 segments of normal videos F_labels=32 for normal video\n"," indx_abn = theano.tensor.eq(F_labels, 0).nonzero()[0]\n"," n_Nor=n_exp\n","\n"," Sub_Nor = sub_score[indx_nor] # Maximum Score for each of abnormal video\n"," Sub_Abn = sub_score[indx_abn] # Maximum Score for each of normal video\n","\n"," z = T.ones_like(y_true)\n"," for ii in range(0, n_Nor, 1):\n"," sub_z = T.maximum(1 - Sub_Abn + Sub_Nor[ii], 0) + T.maximum(0, Sub_Nor[ii] - T.log2(Sub_Abn))\n"," z = T.concatenate([z, T.stack(T.sum(sub_z))])\n","\n"," z = z[Num_d:] # We need this step since we have used T.ones_like\n"," z = T.mean(z, axis=-1) + 0.00008*T.sum(sub_sum_l1) + 0.00008*T.sum(sub_l2) # Final Loss f\n"," \n"," return z # this is y_predicted tensor, the return of loss function"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"x82EXeqpNsKr","colab_type":"text"},"source":["# Training code\n","Train on batch, size of 60, save model's weight after each 1000 iterations.\n","\n","1. Input: specify video feature (txt file) directory (Normal and Abnormal); directory to save the weight;\n","2. Output: model's weight (*.mat file). In addition, plot the loss value in every batch after 20 iterations."]},{"cell_type":"code","metadata":{"id":"KzvDlWr4NvKm","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":401},"executionInfo":{"status":"error","timestamp":1600510165818,"user_tz":-540,"elapsed":2515,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"835163a0-73bb-44c8-c979-13dcaa2e6e6a"},"source":["adagrad=Adagrad(lr=0.01, epsilon=1e-08)\n","\n","model.compile(loss=custom_objective, optimizer=adagrad)\n","\n","print(\"Starting training...\")\n","\n","# AllClassPath='/content/gdrive/My Drive/HumanBehaviors/C3D_Feature_txt/Train_RoadAccidents/'\n","AllClassPath='/content/gdrive/My Drive/32segment'\n","# AllClassPath contains C3D features (.txt file) of each video. Each text file contains 32 features, each of 4096 dimension\n","\n","# output_dir='/content/gdrive/My Drive/HumanBehaviors/Result/Weight/'\n","output_dir='/content/gdrive/My Drive/TrainingOutput'\n","# Output_dir is the directory where you want to save trained weights\n","\n","# loss_dir = 'content/gdrive/My Drive/HumanBehaviors/Result/Loss' # to save figure\n","loss_dir = '/content/gdrive/My Drive/TrainingLoss'\n","weights_path = output_dir + 'weights.mat'\n","# weights.mat are the model weights that you will get after (or during) that training\n","model_path = output_dir + 'model.json'\n","\n","if not os.path.exists(output_dir):\n"," os.makedirs(output_dir)\n"," \n","if not os.path.exists(loss_dir):\n"," os.makedirs(loss_dir)\n"," \n","All_class_files= listdir(AllClassPath)\n","All_class_files.sort()\n","loss_graph =[]\n","num_iters = 10000\n","total_iterations = 0\n","batchsize=60\n","time_before = datetime.now()\n","\n","for it_num in range(num_iters):\n","\n"," AbnormalPath = os.path.join(AllClassPath, All_class_files[0]) # Path of abnormal already computed C3D features\n"," NormalPath = os.path.join(AllClassPath, All_class_files[1]) # Path of Normal already computed C3D features\n"," inputs, targets=load_dataset_Train_batch(AbnormalPath, NormalPath) # Load normal and abnormal video C3D features\n"," # INPUTS: (1920x4096)\n"," # Targets (1920,) (32*60 video)\n"," # INPUT: \n"," # 1/ A BATCH of 1920 feature vector (4096d) for each segment, 810 abnormal, 798 normal\n"," # 2/ LABEL of 1920 segments, integer value 0/1 (regression)\n"," \n"," print(\"------------------- TRAIN ON BATCH- iteration \", it_num)\n"," batch_loss =model.train_on_batch(inputs, targets)\n"," loss_graph = np.hstack((loss_graph, batch_loss)) #put to stack of numpy array\n"," total_iterations += 1\n"," \n"," # PLOT THE LOSS\n"," plt.plot(loss_graph, label='loss')\n"," plt.title('Loss')\n"," plt.legend()\n"," plt.xlabel('Number of iteration')\n"," plt.ylabel('Loss')\n"," \n"," if total_iterations % 20 == 1:\n"," plt.savefig(loss_dir + 'loss_' + str(total_iterations) +'.png')\n"," print(\"These iteration=\" + str(total_iterations) + \") took: \" + str(datetime.now() - time_before) + \", with loss of \" + str(batch_loss))\n"," plt.show()\n"," \n"," if total_iterations % 20 == 1:\n"," iteration_path = output_dir + 'Iterations_graph_' + str(total_iterations) + '.mat'\n"," savemat(iteration_path, dict(loss_graph=loss_graph))\n"," clear_output()\n"," if total_iterations % 1000 == 0: # Save the model at every 1000th iterations.\n"," weights_path = output_dir + 'weights_AllLoss2_L1L2_' + str(total_iterations) + '.mat'\n"," save_model(model, model_path, weights_path)\n","\n","save_model(model, model_path, weights_path)"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Starting training...\n","Loading Abnormal videos Features...\n"],"name":"stdout"},{"output_type":"error","ename":"IndexError","evalue":"ignored","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)","\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[0mAbnormalPath\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mAllClassPath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mAll_class_files\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Path of abnormal already computed C3D features\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 38\u001b[0m \u001b[0mNormalPath\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mAllClassPath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mAll_class_files\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Path of Normal already computed C3D features\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 39\u001b[0;31m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtargets\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mload_dataset_Train_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mAbnormalPath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mNormalPath\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# Load normal and abnormal video C3D features\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 40\u001b[0m \u001b[0;31m# INPUTS: (1920x4096)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 41\u001b[0m \u001b[0;31m# Targets (1920,) (32*60 video)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m\u001b[0m in \u001b[0;36mload_dataset_Train_batch\u001b[0;34m(AbnormalPath, NormalPath)\u001b[0m\n\u001b[1;32m 79\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0miv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mAbnor_list_iter\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 80\u001b[0m \u001b[0mVideo_count\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mVideo_count\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 81\u001b[0;31m \u001b[0mVideoPath\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mAllVideos_Path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mAll_Videos\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0miv\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 82\u001b[0m \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mVideoPath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"r\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 83\u001b[0m \u001b[0mwords\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mIndexError\u001b[0m: list index out of range"]}]},{"cell_type":"markdown","metadata":{"id":"5UomHWz3N9SC","colab_type":"text"},"source":["# Testing\n","Predict the score for each segment within a video on Testing set\n","\n","1. Input: model's weight (*.mat file), specify the directory of testing video feauture (100 videos)\n","2. Output: .*mat file, each file corresponds to each video score. A video score is a vector of 32-d, each value is abnormal score of each video segments."]},{"cell_type":"code","metadata":{"id":"m8rNPi0vOCRm","colab_type":"code","colab":{}},"source":["numpy.random.seed(seed)\n","\n","\n","def load_model(json_path): # Function to load the model\n"," model = model_from_json(open(json_path).read())\n"," return model\n","\n","def load_weights(model, weight_path): # Function to load the model weights\n"," dict2 = loadmat(weight_path)\n"," dict = conv_dict(dict2)\n"," i = 0\n"," for layer in model.layers:\n"," weights = dict[str(i)]\n"," layer.set_weights(weights)\n"," i += 1\n"," return model\n","\n","def conv_dict(dict2):\n"," i = 0\n"," dict = {}\n"," for i in range(len(dict2)):\n"," if str(i) in dict2:\n"," if dict2[str(i)].shape == (0, 0):\n"," dict[str(i)] = dict2[str(i)]\n"," else:\n"," weights = dict2[str(i)][0]\n"," weights2 = []\n"," for weight in weights:\n"," if weight.shape in [(1, x) for x in range(0, 5000)]:\n"," weights2.append(weight[0])\n"," else:\n"," weights2.append(weight)\n"," dict[str(i)] = weights2\n"," return dict\n","\n","# Load Video\n","\n","def load_dataset_One_Video_Features(Test_Video_Path):\n","\n"," VideoPath =Test_Video_Path\n"," f = open(VideoPath, \"r\")\n"," words = f.read().split()\n"," num_feat = int(len(words) / 4096)\n"," # Number of features per video to be loaded. In our case num_feat=32, as we divide the video into 32 segments. Note that\n"," # we have already computed C3D features for the whole video and divided the video features into 32 segments.\n","\n"," count = -1;\n"," VideoFeatues = []\n"," for feat in range(0, num_feat):\n"," feat_row1 = np.float32(words[feat * 4096:feat * 4096 + 4096])\n"," count = count + 1\n"," if count == 0:\n"," VideoFeatues = feat_row1\n"," if count > 0:\n"," VideoFeatues = np.vstack((VideoFeatues, feat_row1))\n"," AllFeatures = VideoFeatues\n","\n"," return AllFeatures\n","\n","\n","\n","print(\"Starting testing...\")\n","\n","\n","AllTest_Video_Path = '/content/gdrive/My Drive/HumanBehaviors/C3D_Feature_txt/Test_RoadAccidents/'\n","# AllTest_Video_Path contains C3D features (txt file)of each video.\n","\n","Results_Path = '/content/gdrive/My Drive/HumanBehaviors/Result/PredictedScore/'\n","# Results_Path is the folder where you can save your results\n","\n","Model_dir='/content/gdrive/My Drive/HumanBehaviors/Result/Weight/'\n","# Model_dir is the folder where we have placed our trained weights\n","\n","weights_path = Model_dir + 'weights_ALL_L1L2_10000.mat'\n","# weights_path is Trained model weights\n","\n","model_path = Model_dir + 'model.json'\n","\n","if not os.path.exists(Results_Path):\n"," os.makedirs(Results_Path)\n","\n","All_Test_files= listdir(AllTest_Video_Path)\n","All_Test_files.sort()\n","\n","model=load_model(model_path)\n","load_weights(model, weights_path)\n","nVideos=len(All_Test_files)\n","time_before = datetime.now()\n","\n","for iv in range(nVideos):\n","\n"," Test_Video_Path = os.path.join(AllTest_Video_Path, All_Test_files[iv])\n"," inputs=load_dataset_One_Video_Features(Test_Video_Path) # 32 segments features for one testing video\n"," predictions = model.predict_on_batch(inputs) # Get anomaly prediction for each of 32 video segments.\n"," aa=All_Test_files[iv]\n"," aa=aa[0:-6]\n"," A_predictions_path = Results_Path + aa + '.mat' # Save array of 1*32, containing anomaly score for each segment. Please see Evaluate Anomaly Detector to compute ROC.\n"," savemat(A_predictions_path, dict(predictions = predictions))\n"," print (A_predictions_path)\n"," print (\"Total Time took: \" + str(datetime.now() - time_before))"],"execution_count":null,"outputs":[]}]} -------------------------------------------------------------------------------- /Codes/전처리/[Prep]_ch_01_Save_C3DFeatures_32Segments.py: -------------------------------------------------------------------------------- 1 | # clc() ##command창에 표시된 코드들 사라지게해 2 | #-*- encoding: utf8 -*- 3 | import os 4 | import numpy as np 5 | import struct 6 | # clear(mstring('all')) 7 | # close(mstring('all')) 8 | 9 | # This code save already computed C3D features into 32 (video features) segments. 10 | # We assume that C3D features for a video are already computed. We use 11 | # default settings for computing C3D features, i.e., we compute C3D features for 12 | # every 16 frames and obtain the features from fc6. 13 | 14 | import struct 15 | 16 | def read_bin(input_file): 17 | # input_file = open(r'.\normal_video_001\000032.fc6-1','rb') 18 | input_file = open(input_file,'rb') 19 | try: 20 | sizes = [struct.unpack('i',input_file.read(4))[0] for i in range(5)] 21 | m = np.prod(sizes) 22 | data = [struct.unpack('f',input_file.read(4))[0] for i in range(m)] 23 | finally: 24 | input_file 25 | feature_vector = np.array(data) 26 | 27 | return feature_vector, feature_vector.shape 28 | 29 | C3D_Path = r'C:\Users\cahca\OneDrive\바탕 화면\Warmingup_proj\TEST_FC' 30 | C3D_Path_Seg = r'C:\Users\cahca\OneDrive\바탕 화면\Warmingup_proj\OUTPUT' 31 | 32 | if not os.path.isdir(C3D_Path_Seg): 33 | os.mkdir(C3D_Path_Seg) 34 | 35 | print('DONE') 36 | 37 | All_Folder = os.listdir(C3D_Path) 38 | # All_Folder = All_Folder[3:end] 39 | subcript = '_C.txt' 40 | 41 | for ifolder in All_Folder: 42 | #% START 1 LOOP WITH 1 FC FOLDER, ex: Abuse028 has N=1392 frames 43 | 44 | Folder_Path = str(C3D_Path) + "\\" + str(ifolder) 45 | #Folder_Path is path of a folder which contains C3D features (for every 16 frames) for a paricular video. 46 | # N=1392 frames --> it has [1392/16] = 88 fc6-1 files 47 | 48 | AllFiles = os.listdir(Folder_Path) ##"/.fc6-1 확장자 파일 싹다 리스트로 반환" 49 | # fc6-1 files in feature directory, each file = a clip in video 50 | # one clip = 16 frames 51 | 52 | if len(AllFiles) == 0: 53 | print("no fc6-1 file in path") 54 | continue 55 | 56 | feature_vector = np.zeros((len(AllFiles), 4096)) 57 | # each fc6-1 = 1 clips 16 frames = 4096-d ==> Total is [N/16]=88 clips like that 58 | #% Iterate each fc6-1 file (16 frames each) 59 | for ifile in range(0,len(AllFiles)): 60 | FilePath =Folder_Path + '\\' + AllFiles[ifile] 61 | 62 | data,_ = read_bin(FilePath) 63 | _,s = read_bin(FilePath) 64 | feature_vector[ifile]=data; #% 1 column 4096-d in 88x4096 is assign by 1 clip feature (4096) 65 | 66 | # clear(mstring('data')) # clear라는 변수를 매트랩 내에서 삭제 67 | 68 | #% At this point, Feature vector is filled with all actual data from 69 | # all 16-frame clips in video, each clip is 4096-d, therefore 88x4096 70 | # is now filled with actual data 71 | # if sum(sum(feature_vector, [])) == 0: ## 고쳐야됨 : 각 열의 합인 한 행짜리 행렬로 72 | # print('error1') 73 | 74 | # Write C3D features in text file to load in 75 | # Training_AnomalyDetector_public ( You can directly use .mat format if you want). 76 | txt_file_name = C3D_Path_Seg + '/' + ifolder +subcript 77 | # feature txt name i.e Abuse028_x264_C.txt 78 | 79 | # if exist(txt_file_name, 'file'): 80 | # continue 81 | 82 | fid1 = open(txt_file_name, 'w') 83 | ## sum(x,1) = sum vertically (column) 84 | ## sum(x,2) = sum horizontally (row) 85 | # if not isempty(find(sum(Feature_vect, 2) == 0)): # sum row --> 88x4096 results in 88 rows 86 | # # k = find(X,n)은 X의 0이 아닌 요소에 대응하는 처음 n개의 인덱스를 반환합니다 87 | # print('error2') 88 | 89 | 90 | # if not isempty(find(isnan(Feature_vect(mslice[:])))): 91 | # print('error3') 92 | 93 | # if not isempty(find(Feature_vect(mslice[:]) == Inf)): 94 | # print('error4') 95 | 96 | #% 32 Segments 97 | 98 | Segments_Features = np.zeros((32, 4096)) #32 row, 4096 column 99 | thirty2_shots = np.linspace(1, len(AllFiles), 33).round(0) 100 | # thirty2shots = divide 88 frames to 33 segment, start from 1 to 88 101 | # SO: thirty2shots = [1 , 4, 6, 10, ..... 83, 85, 88], total elements 102 | # is 33, vector 1x33 103 | count = -1 104 | #% WRITE 88x4096 TO 32x4096 105 | for ishots in range(0,len(thirty2_shots) - 1): # ishorts starts from 1 to 32 106 | ss = int(thirty2_shots[ishots] ) # start clip index in 88x4096 107 | ee = int(thirty2_shots[ishots + 1] - 1) # end clip index in 88x4096 108 | 109 | # print(ss,ee,'llllll') 110 | if ishots == len(thirty2_shots): 111 | ee = int(thirty2_shots[ishots + 1]) 112 | #% THIS IS A FEATURE FOR 1 SEGMENT 113 | #ALL BELOW CASE, temp_vect is always 4096-d based on value of start ss and end ee index 114 | if ss == ee: 115 | temp_vect = feature_vector[ss] ##ss번째 행 벡터 추출 # ss==ee --> get 1 vector 4096-d from 88x4096 116 | 117 | elif ee < ss: 118 | temp_vect = feature_vector[ss] # ee < ss --> get 1 vector 4096-d from 88x4096 119 | 120 | else: 121 | temp_vect = feature_vector[ss:ee].mean(axis=0) ##각 열의 평균값을 가진 1*4096행 추출 122 | # for i in range(ss,ee): 123 | # feature_vector 124 | # ss < ee --> get all clip vectors from ss to ee (ex: 3 vectors) from 88x4096 125 | # origin feature, than take mean value of all (i.e 3 vectors) that vectors to 126 | # get a new one has 4096-d (shape of result is shape of row when get mean a 127 | # matrix) 128 | #mean a vector = mean of each column = sum column/total row --> 129 | #shape = number of row (=4096) after this mean operation 130 | # print(temp_vect.shape) 131 | #% AFTER HAS 1 SEGMENT FEATURE, CALCULATE NORM-2 (L2) 132 | temp_vect = temp_vect / np.linalg.norm(temp_vect) 133 | # temp_vect = temp_vect / np.norm(temp_vect) #% divide by norm-2 (L2) of vector (Euclidean norm)=cumsum(sqrt(x[i]^2)) # divide by norm-2 (L2) of vector (Euclidean norm)=cumsum(sqrt(x[i]^2)) 134 | 135 | # if np.linalg.norm(temp_vect) == 0: 136 | # print('error5') 137 | 138 | count = count + 1 # next segment (max=32) 139 | Segments_Features[count]= temp_vect # push each segment feature to final 32 video segments feature 140 | 141 | #verify 142 | 143 | # if not isempty(find(sum(Segments_Features, 2) == 0)): 144 | # print('error6') 145 | 146 | # if not isempty(find(isnan(Segments_Features(mslice[:])))): 147 | # print('error7') 148 | 149 | 150 | # if not isempty(find(Segments_Features(mslice[:]) == Inf)): 151 | # print('error8') 152 | 153 | 154 | # save 32 segment features in text file ( You can directly save and load .mat file in python as well). 155 | print(Segments_Features) 156 | print(Segments_Features.shape) 157 | 158 | for i in range(0,Segments_Features.shape[0]): 159 | feat_text = str(Segments_Features[i].tolist()) 160 | fid1.write(feat_text) 161 | fid1.write('\n') 162 | 163 | fid1.close() 164 | 165 | -------------------------------------------------------------------------------- /Codes/전처리/[Prep]_mj_01_check_video_size.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "check_video_size.ipynb", 7 | "provenance": [], 8 | "collapsed_sections": [] 9 | }, 10 | "kernelspec": { 11 | "name": "python3", 12 | "display_name": "Python 3" 13 | } 14 | }, 15 | "cells": [ 16 | { 17 | "cell_type": "code", 18 | "metadata": { 19 | "id": "qLNXcA4k4GoY", 20 | "colab_type": "code", 21 | "colab": {} 22 | }, 23 | "source": [ 24 | "from google.colab import drive\n", 25 | "drive.mount('/content/gdrive')" 26 | ], 27 | "execution_count": null, 28 | "outputs": [] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "metadata": { 33 | "id": "U6wThuC-4NxN", 34 | "colab_type": "code", 35 | "colab": { 36 | "base_uri": "https://localhost:8080/", 37 | "height": 161 38 | }, 39 | "outputId": "c1fd3c44-bf9e-46f9-e189-b72ad161907b" 40 | }, 41 | "source": [ 42 | "# check Utubtu version\n", 43 | "!lsb_release -a\n", 44 | "# by default, CUDA is enabled under GPU mode of Google Colab\n", 45 | "!ls /usr/local" 46 | ], 47 | "execution_count": null, 48 | "outputs": [ 49 | { 50 | "output_type": "stream", 51 | "text": [ 52 | "No LSB modules are available.\n", 53 | "Distributor ID:\tUbuntu\n", 54 | "Description:\tUbuntu 18.04.5 LTS\n", 55 | "Release:\t18.04\n", 56 | "Codename:\tbionic\n", 57 | "bin\t cuda-10.1 _gcs_config_ops.so LICENSE.txt setup.cfg xgboost\n", 58 | "cuda\t etc\t include\t\t man\t share\n", 59 | "cuda-10.0 games lib\t\t sbin\t src\n" 60 | ], 61 | "name": "stdout" 62 | } 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "metadata": { 68 | "id": "iWCLAg8g4N77", 69 | "colab_type": "code", 70 | "colab": { 71 | "base_uri": "https://localhost:8080/", 72 | "height": 53 73 | }, 74 | "outputId": "79cbf80a-1924-40cb-efde-b6318bb27cb6" 75 | }, 76 | "source": [ 77 | "# 영상 위치로 이동\n", 78 | "%cd /content/drive/My Drive/warming-up project/yOnOm/raw_videos/\n", 79 | "!ls" 80 | ], 81 | "execution_count": null, 82 | "outputs": [ 83 | { 84 | "output_type": "stream", 85 | "text": [ 86 | "/content/drive/My Drive/warming-up project/yOnOm/raw_videos\n", 87 | "videos_abnormal videos_normal\n" 88 | ], 89 | "name": "stdout" 90 | } 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "metadata": { 96 | "id": "DzohTswmLpOs", 97 | "colab_type": "code", 98 | "colab": { 99 | "base_uri": "https://localhost:8080/", 100 | "height": 73 101 | }, 102 | "outputId": "8d656b64-ccee-4388-ccc3-cdb19d55af54" 103 | }, 104 | "source": [ 105 | "import glob\n", 106 | "abnormal = './videos_abnormal'\n", 107 | "normal = './videos_normal'\n", 108 | "\n", 109 | "file_ab = sorted(glob.glob(abnormal + \"/*.mp4\"))\n", 110 | "file_no = sorted(glob.glob(normal + '/*.mp4'))\n", 111 | "print(file_ab)\n", 112 | "print(file_no)" 113 | ], 114 | "execution_count": null, 115 | "outputs": [ 116 | { 117 | "output_type": "stream", 118 | "text": [ 119 | "['./videos_abnormal/abnormal_video_001.mp4', './videos_abnormal/abnormal_video_002.mp4', './videos_abnormal/abnormal_video_003.mp4', './videos_abnormal/abnormal_video_004.mp4', './videos_abnormal/abnormal_video_005.mp4', './videos_abnormal/abnormal_video_006.mp4', './videos_abnormal/abnormal_video_007.mp4', './videos_abnormal/abnormal_video_008.mp4', './videos_abnormal/abnormal_video_009.mp4', './videos_abnormal/abnormal_video_010.mp4', './videos_abnormal/abnormal_video_011.mp4', './videos_abnormal/abnormal_video_012.mp4', './videos_abnormal/abnormal_video_013.mp4', './videos_abnormal/abnormal_video_014.mp4', './videos_abnormal/abnormal_video_015.mp4', './videos_abnormal/abnormal_video_016.mp4', './videos_abnormal/abnormal_video_017.mp4', './videos_abnormal/abnormal_video_018.mp4', './videos_abnormal/abnormal_video_019.mp4', './videos_abnormal/abnormal_video_020.mp4', './videos_abnormal/abnormal_video_021.mp4', './videos_abnormal/abnormal_video_022.mp4', './videos_abnormal/abnormal_video_023.mp4', './videos_abnormal/abnormal_video_024.mp4', './videos_abnormal/abnormal_video_025.mp4', './videos_abnormal/abnormal_video_026.mp4', './videos_abnormal/abnormal_video_027.mp4', './videos_abnormal/abnormal_video_028.mp4', './videos_abnormal/abnormal_video_029.mp4', './videos_abnormal/abnormal_video_030.mp4', './videos_abnormal/abnormal_video_031.mp4', './videos_abnormal/abnormal_video_032.mp4', './videos_abnormal/abnormal_video_033.mp4', './videos_abnormal/abnormal_video_034.mp4', './videos_abnormal/abnormal_video_035.mp4', './videos_abnormal/abnormal_video_036.mp4', './videos_abnormal/abnormal_video_037.mp4', './videos_abnormal/abnormal_video_038.mp4', './videos_abnormal/abnormal_video_039.mp4', './videos_abnormal/abnormal_video_040.mp4', './videos_abnormal/abnormal_video_041.mp4', './videos_abnormal/abnormal_video_042.mp4', './videos_abnormal/abnormal_video_043.mp4', './videos_abnormal/abnormal_video_044.mp4']\n", 120 | "['./videos_normal/normal_video_001.mp4', './videos_normal/normal_video_002.mp4', './videos_normal/normal_video_003.mp4', './videos_normal/normal_video_004.mp4', './videos_normal/normal_video_005.mp4', './videos_normal/normal_video_006.mp4', './videos_normal/normal_video_007.mp4', './videos_normal/normal_video_008.mp4', './videos_normal/normal_video_009.mp4', './videos_normal/normal_video_010.mp4', './videos_normal/normal_video_011.mp4', './videos_normal/normal_video_012.mp4', './videos_normal/normal_video_013.mp4', './videos_normal/normal_video_014.mp4', './videos_normal/normal_video_015.mp4', './videos_normal/normal_video_016.mp4', './videos_normal/normal_video_017.mp4', './videos_normal/normal_video_018.mp4', './videos_normal/normal_video_019.mp4', './videos_normal/normal_video_020.mp4', './videos_normal/normal_video_021.mp4', './videos_normal/normal_video_022.mp4', './videos_normal/normal_video_023.mp4', './videos_normal/normal_video_024.mp4', './videos_normal/normal_video_025.mp4', './videos_normal/normal_video_026.mp4', './videos_normal/normal_video_027.mp4', './videos_normal/normal_video_028.mp4', './videos_normal/normal_video_029.mp4', './videos_normal/normal_video_030.mp4', './videos_normal/normal_video_031.mp4', './videos_normal/normal_video_032.mp4', './videos_normal/normal_video_033.mp4', './videos_normal/normal_video_034.mp4', './videos_normal/normal_video_035.mp4', './videos_normal/normal_video_036.mp4', './videos_normal/normal_video_037.mp4', './videos_normal/normal_video_038.mp4', './videos_normal/normal_video_039.mp4', './videos_normal/normal_video_040.mp4', './videos_normal/normal_video_041.mp4', './videos_normal/normal_video_042.mp4']\n" 121 | ], 122 | "name": "stdout" 123 | } 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "metadata": { 129 | "id": "CQPQ3jMT4OAn", 130 | "colab_type": "code", 131 | "colab": { 132 | "base_uri": "https://localhost:8080/", 133 | "height": 35 134 | }, 135 | "outputId": "b1d3e874-5e8e-4fad-ffa0-a54257b6dab3" 136 | }, 137 | "source": [ 138 | "# total frames\n", 139 | " ## input.mp4에 영상 파일 이름 넣기\n", 140 | "!ffprobe -v error -select_streams v:0 -show_entries stream=nb_frames -of default=nokey=1:noprint_wrappers=1 ./videos_abnormal/abnormal_video_001.mp4" 141 | ], 142 | "execution_count": null, 143 | "outputs": [ 144 | { 145 | "output_type": "stream", 146 | "text": [ 147 | "689\n" 148 | ], 149 | "name": "stdout" 150 | } 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "metadata": { 156 | "id": "2Ig_DdUB4OEU", 157 | "colab_type": "code", 158 | "colab": { 159 | "base_uri": "https://localhost:8080/", 160 | "height": 1000 161 | }, 162 | "outputId": "21ce7932-c896-4486-a1d8-14236af731b7" 163 | }, 164 | "source": [ 165 | "# Get dimensions of a video file\n", 166 | " ## 우리 파일\n", 167 | "import cv2\n", 168 | "\n", 169 | "for path in [file_ab, file_no]:\n", 170 | " for file_path in path: \n", 171 | " vid = cv2.VideoCapture(file_path)\n", 172 | " height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n", 173 | " width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n", 174 | " print(\"{} : {} x {}\".format(file_path.split('/')[-1], int(height), int(width)))" 175 | ], 176 | "execution_count": null, 177 | "outputs": [ 178 | { 179 | "output_type": "stream", 180 | "text": [ 181 | "abnormal_video_001.mp4 : 2160 x 3840\n", 182 | "abnormal_video_002.mp4 : 2160 x 3840\n", 183 | "abnormal_video_003.mp4 : 2160 x 3840\n", 184 | "abnormal_video_004.mp4 : 2160 x 3840\n", 185 | "abnormal_video_005.mp4 : 2160 x 3840\n", 186 | "abnormal_video_006.mp4 : 2160 x 3840\n", 187 | "abnormal_video_007.mp4 : 2160 x 3840\n", 188 | "abnormal_video_008.mp4 : 2160 x 3840\n", 189 | "abnormal_video_009.mp4 : 2160 x 3840\n", 190 | "abnormal_video_010.mp4 : 2160 x 3840\n", 191 | "abnormal_video_011.mp4 : 2160 x 3840\n", 192 | "abnormal_video_012.mp4 : 2160 x 3840\n", 193 | "abnormal_video_013.mp4 : 2160 x 3840\n", 194 | "abnormal_video_014.mp4 : 2160 x 3840\n", 195 | "abnormal_video_015.mp4 : 2160 x 3840\n", 196 | "abnormal_video_016.mp4 : 2160 x 3840\n", 197 | "abnormal_video_017.mp4 : 2160 x 3840\n", 198 | "abnormal_video_018.mp4 : 2160 x 3840\n", 199 | "abnormal_video_019.mp4 : 2160 x 3840\n", 200 | "abnormal_video_020.mp4 : 2160 x 3840\n", 201 | "abnormal_video_021.mp4 : 2160 x 3840\n", 202 | "abnormal_video_022.mp4 : 2160 x 3840\n", 203 | "abnormal_video_023.mp4 : 2160 x 3840\n", 204 | "abnormal_video_024.mp4 : 2160 x 3840\n", 205 | "abnormal_video_025.mp4 : 2160 x 3840\n", 206 | "abnormal_video_026.mp4 : 2160 x 3840\n", 207 | "abnormal_video_027.mp4 : 2160 x 3840\n", 208 | "abnormal_video_028.mp4 : 2160 x 3840\n", 209 | "abnormal_video_029.mp4 : 2160 x 3840\n", 210 | "abnormal_video_030.mp4 : 2160 x 3840\n", 211 | "abnormal_video_031.mp4 : 2160 x 3840\n", 212 | "abnormal_video_032.mp4 : 2160 x 3840\n", 213 | "abnormal_video_033.mp4 : 2160 x 3840\n", 214 | "abnormal_video_034.mp4 : 2160 x 3840\n", 215 | "abnormal_video_035.mp4 : 2160 x 3840\n", 216 | "abnormal_video_036.mp4 : 2160 x 3840\n", 217 | "abnormal_video_037.mp4 : 2160 x 3840\n", 218 | "abnormal_video_038.mp4 : 2160 x 3840\n", 219 | "abnormal_video_039.mp4 : 2160 x 3840\n", 220 | "abnormal_video_040.mp4 : 2160 x 3840\n", 221 | "abnormal_video_041.mp4 : 2160 x 3840\n", 222 | "abnormal_video_042.mp4 : 2160 x 3840\n", 223 | "abnormal_video_043.mp4 : 2160 x 3840\n", 224 | "abnormal_video_044.mp4 : 2160 x 3840\n", 225 | "normal_video_001.mp4 : 2160 x 3840\n", 226 | "normal_video_002.mp4 : 2160 x 3840\n", 227 | "normal_video_003.mp4 : 2160 x 3840\n", 228 | "normal_video_004.mp4 : 2160 x 3840\n", 229 | "normal_video_005.mp4 : 2160 x 3840\n", 230 | "normal_video_006.mp4 : 2160 x 3840\n", 231 | "normal_video_007.mp4 : 2160 x 3840\n", 232 | "normal_video_008.mp4 : 2160 x 3840\n", 233 | "normal_video_009.mp4 : 2160 x 3840\n", 234 | "normal_video_010.mp4 : 2160 x 3840\n", 235 | "normal_video_011.mp4 : 2160 x 3840\n", 236 | "normal_video_012.mp4 : 2160 x 3840\n", 237 | "normal_video_013.mp4 : 2160 x 3840\n", 238 | "normal_video_014.mp4 : 2160 x 3840\n", 239 | "normal_video_015.mp4 : 2160 x 3840\n", 240 | "normal_video_016.mp4 : 2160 x 3840\n", 241 | "normal_video_017.mp4 : 2160 x 3840\n", 242 | "normal_video_018.mp4 : 2160 x 3840\n", 243 | "normal_video_019.mp4 : 2160 x 3840\n", 244 | "normal_video_020.mp4 : 2160 x 3840\n", 245 | "normal_video_021.mp4 : 2160 x 3840\n", 246 | "normal_video_022.mp4 : 2160 x 3840\n", 247 | "normal_video_023.mp4 : 2160 x 3840\n", 248 | "normal_video_024.mp4 : 2160 x 3840\n", 249 | "normal_video_025.mp4 : 2160 x 3840\n", 250 | "normal_video_026.mp4 : 2160 x 3840\n", 251 | "normal_video_027.mp4 : 2160 x 3840\n", 252 | "normal_video_028.mp4 : 2160 x 3840\n", 253 | "normal_video_029.mp4 : 2160 x 3840\n", 254 | "normal_video_030.mp4 : 2160 x 3840\n", 255 | "normal_video_031.mp4 : 2160 x 3840\n", 256 | "normal_video_032.mp4 : 2160 x 3840\n", 257 | "normal_video_033.mp4 : 2160 x 3840\n", 258 | "normal_video_034.mp4 : 2160 x 3840\n", 259 | "normal_video_035.mp4 : 2160 x 3840\n", 260 | "normal_video_036.mp4 : 2160 x 3840\n", 261 | "normal_video_037.mp4 : 2160 x 3840\n", 262 | "normal_video_038.mp4 : 2160 x 3840\n", 263 | "normal_video_039.mp4 : 2160 x 3840\n", 264 | "normal_video_040.mp4 : 2160 x 3840\n", 265 | "normal_video_041.mp4 : 2160 x 3840\n", 266 | "normal_video_042.mp4 : 2160 x 3840\n" 267 | ], 268 | "name": "stdout" 269 | } 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "metadata": { 275 | "id": "LjRIPeV24OHR", 276 | "colab_type": "code", 277 | "colab": { 278 | "base_uri": "https://localhost:8080/", 279 | "height": 71 280 | }, 281 | "outputId": "f0ae6d33-1d37-46c8-a16b-578c6e3c6d54" 282 | }, 283 | "source": [ 284 | "# Get dimensions of a video file\n", 285 | " ## 제공된 파일 \n", 286 | "% cd /content/drive/My Drive/warming-up project/c3d_ex/input/Train_Normal/\n", 287 | "\n", 288 | "path = glob.glob('*.avi')\n", 289 | "for file_path in path: \n", 290 | " vid = cv2.VideoCapture(file_path)\n", 291 | " height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n", 292 | " width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n", 293 | " print(\"{} : {} x {}\".format(file_path.split('/')[-1], int(height), int(width)))" 294 | ], 295 | "execution_count": null, 296 | "outputs": [ 297 | { 298 | "output_type": "stream", 299 | "text": [ 300 | "/content/drive/My Drive/warming-up project/c3d_ex/input/Train_Normal\n", 301 | "v_BaseballPitch_g01_c01.avi : 240 x 320\n", 302 | "v_ApplyEyeMakeup_g01_c01.avi : 240 x 320\n" 303 | ], 304 | "name": "stdout" 305 | } 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "metadata": { 311 | "id": "2yjZ46bg4OKk", 312 | "colab_type": "code", 313 | "colab": {} 314 | }, 315 | "source": [ 316 | "# Get dimensions of a video file\n", 317 | " ## 제공된 파일 UCF101_samples\n", 318 | "import cv2\n", 319 | "import glob\n", 320 | "% cd /content/drive/My Drive/warming-up project/UCF101_samples/\n", 321 | "!ls\n", 322 | "\n", 323 | "cates = ['./Abuse', './Arrest', './Arson', './Assault']\n", 324 | "\n", 325 | "for cate in cates: \n", 326 | " path = glob.glob(cate + '/*.mp4')\n", 327 | " for file_path in path: \n", 328 | " vid = cv2.VideoCapture(file_path)\n", 329 | " height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n", 330 | " width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n", 331 | " print(\"{} : {} x {}\".format(file_path.split('/')[-1], int(height), int(width)))\n", 332 | "\n", 333 | "\n", 334 | "# Arson002_x264.mp4 : 240 x 320\n", 335 | "# Arson001_x264.mp4 : 240 x 320\n", 336 | "# Arson003_x264.mp4 : 240 x 320\n", 337 | "# Arson005_x264.mp4 : 240 x 320\n", 338 | "# Arson006_x264.mp4 : 240 x 320\n", 339 | "# Arson007_x264.mp4 : 240 x 320\n", 340 | "# Assault052_x264.mp4 : 240 x 320\n", 341 | "# Assault051_x264.mp4 : 240 x 320\n", 342 | "# Assault050_x264.mp4 : 240 x 320\n", 343 | "# Assault049_x264.mp4 : 240 x 320\n", 344 | "# Assault048_x264.mp4 : 240 x 320\n", 345 | "# Assault001_x264.mp4 : 240 x 320\n", 346 | "# Assault002_x264.mp4 : 240 x 320\n", 347 | "# Assault003_x264.mp4 : 240 x 320\n", 348 | "# Assault004_x264.mp4 : 240 x 320\n", 349 | "# Assault005_x264.mp4 : 240 x 320\n", 350 | "# Assault006_x264.mp4 : 240 x 320\n", 351 | "# Assault007_x264.mp4 : 240 x 320\n", 352 | "# Assault008_x264.mp4 : 240 x 320\n", 353 | "# Assault009_x264.mp4 : 240 x 320\n", 354 | "# Assault010_x264.mp4 : 240 x 320\n", 355 | "# Assault011_x264.mp4 : 240 x 320 " 356 | ], 357 | "execution_count": null, 358 | "outputs": [] 359 | }, 360 | { 361 | "cell_type": "code", 362 | "metadata": { 363 | "id": "4PJdYKHv4ONg", 364 | "colab_type": "code", 365 | "colab": {} 366 | }, 367 | "source": [ 368 | "" 369 | ], 370 | "execution_count": null, 371 | "outputs": [] 372 | }, 373 | { 374 | "cell_type": "code", 375 | "metadata": { 376 | "id": "CfuxhtWG4OQo", 377 | "colab_type": "code", 378 | "colab": {} 379 | }, 380 | "source": [ 381 | "" 382 | ], 383 | "execution_count": null, 384 | "outputs": [] 385 | }, 386 | { 387 | "cell_type": "code", 388 | "metadata": { 389 | "id": "oM4AJSFO4OWS", 390 | "colab_type": "code", 391 | "colab": {} 392 | }, 393 | "source": [ 394 | "" 395 | ], 396 | "execution_count": null, 397 | "outputs": [] 398 | } 399 | ] 400 | } -------------------------------------------------------------------------------- /Codes/전처리/[Prep]_sy_01_C3D_with_colab.ipynb: -------------------------------------------------------------------------------- 1 | {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"C3D_with_colab.ipynb","provenance":[],"collapsed_sections":[],"toc_visible":true,"mount_file_id":"1HofU29uPnYklMA0GkVGfOEXqMiM4rtSn","authorship_tag":"ABX9TyN65h90wnQZQyIn80CyNqRs"},"kernelspec":{"name":"python3","display_name":"Python 3"},"accelerator":"GPU"},"cells":[{"cell_type":"markdown","metadata":{"id":"GkwKki9ctzVp","colab_type":"text"},"source":["# caffe2를 GPU 버전으로 설치\n","https://stackoverflow.com/questions/48500120/may-i-install-caffe-or-caffe2-on-google-colaboratory"]},{"cell_type":"code","metadata":{"id":"TPGlONa4AYH5","colab_type":"code","colab":{}},"source":["!apt install -y caffe-cuda"],"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"xmRWfb6NuaCK","colab_type":"text"},"source":["# 깃허브 보고 따라하기 1\n","https://github.com/rutviz/anomaly-detection\n","- training안의 ipynb 따라함"]},{"cell_type":"code","metadata":{"id":"weMqUtm1Anub","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":35},"executionInfo":{"status":"ok","timestamp":1600089687744,"user_tz":-540,"elapsed":938,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"4fadebc9-6bc2-4b80-ab03-e1952afb381b"},"source":["##### required\n","from google.colab import drive\n","drive.mount('/content/drive/')"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Drive already mounted at /content/drive/; to attempt to forcibly remount, call drive.mount(\"/content/drive/\", force_remount=True).\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"Wt5AURi9uoKv","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":198},"executionInfo":{"status":"ok","timestamp":1600089724741,"user_tz":-540,"elapsed":4975,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"881fae67-d9df-449d-c53e-d464107c59e2"},"source":["#### required\n","!pip install configparser"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Collecting configparser\n"," Downloading https://files.pythonhosted.org/packages/4b/6b/01baa293090240cf0562cc5eccb69c6f5006282127f2b846fad011305c79/configparser-5.0.0-py3-none-any.whl\n","Installing collected packages: configparser\n","Successfully installed configparser-5.0.0\n"],"name":"stdout"},{"output_type":"display_data","data":{"application/vnd.colab-display-data+json":{"pip_warning":{"packages":["configparser"]}}},"metadata":{"tags":[]}}]},{"cell_type":"code","metadata":{"id":"ddWesojhuoHY","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":410},"executionInfo":{"status":"ok","timestamp":1600089737852,"user_tz":-540,"elapsed":5659,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"9c474416-b378-4cc1-c118-b205db8a8a94"},"source":["##### required\n","!pip install keras==1.1.0"],"execution_count":null,"outputs":[{"output_type":"stream","text":["Collecting keras==1.1.0\n","\u001b[?25l Downloading https://files.pythonhosted.org/packages/a4/5e/7e64f15f0e5ae65a29c738fc261ce1e0a72d92acfc45f06ef906c6e84bf2/Keras-1.1.0.tar.gz (150kB)\n","\u001b[K |████████████████████████████████| 153kB 2.8MB/s \n","\u001b[?25hRequirement already satisfied: theano in /usr/local/lib/python3.6/dist-packages (from keras==1.1.0) (1.0.5)\n","Requirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from keras==1.1.0) (3.13)\n","Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from keras==1.1.0) (1.15.0)\n","Requirement already satisfied: scipy>=0.14 in /usr/local/lib/python3.6/dist-packages (from theano->keras==1.1.0) (1.4.1)\n","Requirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python3.6/dist-packages (from theano->keras==1.1.0) (1.18.5)\n","Building wheels for collected packages: keras\n"," Building wheel for keras (setup.py) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for keras: filename=Keras-1.1.0-cp36-none-any.whl size=178685 sha256=ede9d5d45df0bc35256e52afd78d3791df913c78196d78341380e735b692e2dc\n"," Stored in directory: /root/.cache/pip/wheels/ae/83/3e/c42ce0672e537640ee706143ebdd1dd691b7693b4ca50f72a8\n","Successfully built keras\n","\u001b[31mERROR: textgenrnn 1.4.1 has requirement keras>=2.1.5, but you'll have keras 1.1.0 which is incompatible.\u001b[0m\n","\u001b[31mERROR: kapre 0.1.3.1 has requirement keras>=2.0.0, but you'll have keras 1.1.0 which is incompatible.\u001b[0m\n","\u001b[31mERROR: fancyimpute 0.4.3 has requirement keras>=2.0.0, but you'll have keras 1.1.0 which is incompatible.\u001b[0m\n","Installing collected packages: keras\n"," Found existing installation: Keras 2.4.3\n"," Uninstalling Keras-2.4.3:\n"," Successfully uninstalled Keras-2.4.3\n","Successfully installed keras-1.1.0\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"Yg3IEL23uoFr","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":124},"executionInfo":{"status":"ok","timestamp":1600161087202,"user_tz":-540,"elapsed":4059,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"ddee3d9d-6489-4b06-ac8d-e20536ee54c2"},"source":["##### required\n","# 이유는 모르겠는데 런타임할 때마다 install해야함\n","!pip install path.py"],"execution_count":1,"outputs":[{"output_type":"stream","text":["Collecting path.py\n"," Downloading https://files.pythonhosted.org/packages/8f/04/130b7a538c25693c85c4dee7e25d126ebf5511b1eb7320e64906687b159e/path.py-12.5.0-py3-none-any.whl\n","Collecting path\n"," Downloading https://files.pythonhosted.org/packages/cb/81/b9090d24e60369fd9413b92fcd87e13a37bf43dad3427d35e09915f788ac/path-15.0.0-py3-none-any.whl\n","Installing collected packages: path, path.py\n","Successfully installed path-15.0.0 path.py-12.5.0\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"0JcXQ80duv_h","colab_type":"code","colab":{},"executionInfo":{"status":"ok","timestamp":1600161479783,"user_tz":-540,"elapsed":4462,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}}},"source":["# from keras.models import Sequential 에서 모든 keras앞에 tensorflow.을 붙임\n","from tensorflow.keras import Sequential\n","from tensorflow.keras.models import Model\n","from tensorflow.keras.layers import Dense, Dropout, Activation, LSTM, Reshape\n","from tensorflow.keras.regularizers import l2\n","from tensorflow.keras.optimizers import SGD, Adam, Adagrad\n","from scipy.io import loadmat, savemat\n","from tensorflow.keras.models import model_from_json\n","import theano.tensor as T\n","import tensorflow as tf\n","import theano\n","import configparser\n","import collections\n","import time\n","import path\n","import os\n","from os import listdir\n","import skimage.transform\n","from skimage import color\n","import numpy as np\n","import numpy\n","from datetime import datetime"],"execution_count":2,"outputs":[]},{"cell_type":"code","metadata":{"id":"0cWchmIRuv3Y","colab_type":"code","colab":{},"executionInfo":{"status":"ok","timestamp":1600161481843,"user_tz":-540,"elapsed":732,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}}},"source":["def save_model(model, json_path, weight_path):\n"," json_string = model.to_json()\n"," open(json_path, 'w').write(json_string)\n"," dict = {}\n"," i = 0\n"," for layer in model.layers:\n"," weights = layer.get_weights()\n"," my_list = np.zeros(len(weights), dtype=np.object)\n"," my_list[:] = weights\n"," dict[str(i)] = my_list\n"," i += 1\n"," savemat(weight_path, dict)"],"execution_count":3,"outputs":[]},{"cell_type":"code","metadata":{"id":"i5YJXB5AyLPR","colab_type":"code","colab":{},"executionInfo":{"status":"ok","timestamp":1600161483222,"user_tz":-540,"elapsed":762,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}}},"source":["def load_model(json_path): # Function to load the model\n"," model = model_from_json(open(json_path).read()) # encoding=\"UTF-8\"을 넣으려고 했지만 실패\n"," return model"],"execution_count":4,"outputs":[]},{"cell_type":"code","metadata":{"id":"xqbBMXXuyLT2","colab_type":"code","colab":{},"executionInfo":{"status":"ok","timestamp":1600161486321,"user_tz":-540,"elapsed":739,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}}},"source":["def load_dataset_Train_batch(AbnormalPath, NormalPath):\n"," \n"," batchsize=60\n"," n_exp= int(batchsize/2)\n","\n"," Num_abnormal = 900\n"," Num_Normal = 792.\n","\n","\n"," Abnor_list_iter = np.random.permutation(Num_abnormal)\n"," Abnor_list_iter = Abnor_list_iter[Num_abnormal-n_exp:]\n"," Norm_list_iter = np.random.permutation(Num_Normal)\n"," Norm_list_iter = Norm_list_iter[Num_Normal-n_exp:]\n"," \n"," All_Videos=[]\n"," with open(AbnormalPath+\"/anomaly.txt\", 'r') as f1: #file contain path to anomaly video file.\n"," for line in f1:\n"," All_Videos.append(line.strip())\n"," AllFeatures = []\n"," print(\"Loading Anomaly videos Features...\")\n","\n"," Video_count=-1\n"," for iv in Abnor_list_iter:\n"," Video_count=Video_count+1\n"," VideoPath = os.path.join(AbnormalPath, All_Videos[iv])\n"," f = open(VideoPath, \"r\")\n"," words = f.read().split()\n"," num_feat = len(words) / 4096\n"," \n"," count = -1;\n"," VideoFeatues = []\n"," for feat in range(0, int(num_feat)):\n"," feat_row1 = np.float32(words[feat * 4096:feat * 4096 + 4096])\n"," count = count + 1\n"," if count == 0:\n"," VideoFeatues = feat_row1\n"," if count > 0:\n"," VideoFeatues = np.vstack((VideoFeatues, feat_row1))\n","\n"," if Video_count == 0:\n"," AllFeatures = VideoFeatues\n"," if Video_count > 0:\n"," AllFeatures = np.vstack((AllFeatures, VideoFeatues))\n"," print(\" Abnormal Features loaded\")\n","\n"," All_Videos=[]\n"," with open(NormalPath+\"/normal.txt\", 'r') as f1: #file contain path to normal video file.\n"," for line in f1:\n"," All_Videos.append(line.strip())\n"," \n"," print(\"Loading Normal videos...\")\n"," \n"," for iv in Norm_list_iter:\n"," VideoPath = os.path.join(NormalPath, All_Videos[iv])\n"," f = open(VideoPath, \"r\")\n"," words = f.read().split()\n"," feat_row1 = np.array([])\n"," num_feat = len(words) /4096\n"," count = -1;\n"," VideoFeatues = []\n"," for feat in range(0, int(num_feat)):\n"," feat_row1 = np.float32(words[feat * 4096:feat * 4096 + 4096])\n"," count = count + 1\n"," if count == 0:\n"," VideoFeatues = feat_row1\n"," if count > 0:\n"," VideoFeatues = np.vstack((VideoFeatues, feat_row1))\n"," feat_row1 = []\n"," AllFeatures = np.vstack((AllFeatures, VideoFeatues))\n","\n"," print(\"Features loaded\")\n","\n"," AllLabels = np.zeros(32*batchsize, dtype='uint8')\n"," th_loop1=n_exp*32\n"," th_loop2=n_exp*32-1\n","\n"," for iv in range(0, 32*batchsize):\n"," if iv< th_loop1:\n"," AllLabels[iv] = int(0)\n"," if iv > th_loop2:\n"," AllLabels[iv] = int(1)\n","\n"," return AllFeatures,AllLabels"],"execution_count":5,"outputs":[]},{"cell_type":"code","metadata":{"id":"jvBSLOADyLG_","colab_type":"code","colab":{},"executionInfo":{"status":"ok","timestamp":1600161490921,"user_tz":-540,"elapsed":749,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}}},"source":["#For custom loss function - ref = https://towardsdatascience.com/advanced-keras-constructing-complex-custom-losses-and-metrics-c07ca130a618. \n","\n","def custom_objective(y_true, y_pred):\n","\n"," y_true = T.flatten(y_true)\n"," y_pred = T.flatten(y_pred)\n"," \n"," n_seg = 32\n"," nvid = 60\n"," n_exp = nvid / 2\n"," Num_d=32*nvid\n","\n"," sub_max = T.ones_like(y_pred)\n"," sub_sum_labels = T.ones_like(y_true)\n"," sub_sum_l1=T.ones_like(y_true) \n"," sub_l2 = T.ones_like(y_true)\n","\n"," for ii in range(0, nvid, 1):\n"," \n"," mm = y_true[ii * n_seg:ii * n_seg + n_seg]\n"," sub_sum_labels = T.concatenate([sub_sum_labels, T.stack(T.sum(mm))])\n","\n"," Feat_Score = y_pred[ii * n_seg:ii * n_seg + n_seg]\n"," sub_max = T.concatenate([sub_max, T.stack(T.max(Feat_Score))]) \n"," sub_sum_l1 = T.concatenate([sub_sum_l1, T.stack(T.sum(Feat_Score))])\n","\n"," z1 = T.ones_like(Feat_Score)\n"," z2 = T.concatenate([z1, Feat_Score])\n"," z3 = T.concatenate([Feat_Score, z1])\n"," z_22 = z2[31:]\n"," z_44 = z3[:33]\n"," z = z_22 - z_44\n"," z = z[1:32]\n"," z = T.sum(T.sqr(z))\n"," sub_l2 = T.concatenate([sub_l2, T.stack(z)])\n","\n","\n"," sub_score = sub_max[Num_d:]\n"," F_labels = sub_sum_labels[Num_d:]\n"," \n","\n"," sub_sum_l1 = sub_sum_l1[Num_d:]\n"," sub_sum_l1 = sub_sum_l1[:n_exp]\n"," sub_l2 = sub_l2[Num_d:]\n"," sub_l2 = sub_l2[:n_exp]\n","\n"," indx_nor = theano.tensor.eq(F_labels, 32).nonzero()[0]\n"," indx_abn = theano.tensor.eq(F_labels, 0).nonzero()[0]\n","\n"," n_Nor=n_exp\n","\n"," Sub_Nor = sub_score[indx_nor]\n"," Sub_Abn = sub_score[indx_abn]\n","\n"," z = T.ones_like(y_true)\n"," for ii in range(0, n_Nor, 1):\n"," sub_z = T.maximum(1 - Sub_Abn + Sub_Nor[ii], 0)\n"," z = T.concatenate([z, T.stack(T.sum(sub_z))])\n","\n"," z = z[Num_d:]\n"," z = T.mean(z, axis=-1) + 0.00008*T.sum(sub_sum_l1) + 0.00008*T.sum(sub_l2)\n","\n"," return z"],"execution_count":6,"outputs":[]},{"cell_type":"code","metadata":{"id":"G2K99WQ2ymoI","colab_type":"code","colab":{"base_uri":"https://localhost:8080/","height":401},"executionInfo":{"status":"error","timestamp":1600161565892,"user_tz":-540,"elapsed":6771,"user":{"displayName":"최서윤","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgwZGkUZqiYw3xHdV1cCS1S_q0Rlh15mLgIMRwm=s64","userId":"00430026924922124027"}},"outputId":"d98e99fb-c7fa-43ce-b6dd-1c7f0c17dd70"},"source":["# Path contains C3D features (.txt file) of each video.\n","# Each text file contains 32 features, each of 4096 dimension\n","\n","\n","# AllClassPath='/content/drive/My Drive/C3D/C3D-v1.0/examples/c3d_feature_extraction/out' 를 아래로 변경\n","AllClassPath='/content/drive/My Drive/Colab Notebooks/C3D' # 각 비디오의 C3D features(txt file)을 가지고 있는 경로\n","\n","# output_dir='/content/' 를 아래로 변경\n","output_dir='/content/drive/My Drive/Colab Notebooks/C3D' # 결과를 저장할 경로\n","\n","# Output_dir save trained weights and model.\n","weights_path = output_dir + 'weights.mat' # 훈련된 모델의 가중치\n","model_path = output_dir + 'model.json' # 훈련된 가중치를 보관하는 폴더\n","\n","# model=load_model(\"/content/drive/My Drive/C3D/C3D-v1.0/examples/c3d_feature_extraction/out/\"+model_path) 를 아래로 변경\n","# model=load_model(\"/content/drive/My Drive/Colab Notebooks/C3D\"+model_path) 했는데 오류나서 visual코드보니까 없길래 다시 주석처리함\n","\n","\n","# .py에는 이렇게 되어있음\n","# model=load_model(model_path)\n","# load_weights(model, weights_path)\n","# nVideos=len(All_Test_files)\n","# time_before = datetime.now()\n","\n","\n","# Create Full connected Model\n","model = Sequential()\n","model.add(Dense(512, input_dim=4096,kernel_initializer='glorot_normal',kernel_regularizer=l2(0.001),activation='relu'))\n","model.add(Dropout(0.6))\n","model.add(Dense(32,kernel_initializer='glorot_normal',kernel_regularizer=l2(0.001)))\n","model.add(Dropout(0.6))\n","model.add(Dense(1,kernel_initializer='glorot_normal',kernel_regularizer=l2(0.001),activation='sigmoid'))\n","\n","adagrad=Adagrad(lr=0.01, epsilon=1e-08)\n","\n","model.compile(loss=custom_objective, optimizer=adagrad)\n","\n","if not os.path.exists(output_dir):\n"," os.makedirs(output_dir)\n","\n","All_class_files= listdir(AllClassPath)\n","All_class_files.sort()\n","loss_graph =[]\n","num_iters = 20000\n","total_iterations = 0\n","batchsize=60\n","time_before = datetime.now()\n","\n","for it_num in range(num_iters):\n"," inputs, targets=load_dataset_Train_batch(AllClassPath, AllClassPath)\n"," batch_loss =model.train_on_batch(inputs, targets)\n"," loss_graph = np.hstack((loss_graph, batch_loss))\n"," total_iterations += 1\n"," if total_iterations % 20 == 1:\n"," print (\"These iteration=\" + str(total_iterations) + \") took: \" + str(datetime.now() - time_before) + \", with loss of \" + str(batch_loss))\n","\n","save_model(model, model_path, weights_path)"],"execution_count":7,"outputs":[{"output_type":"error","ename":"IndexError","evalue":"ignored","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)","\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mit_num\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnum_iters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtargets\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mload_dataset_Train_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mAllClassPath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mAllClassPath\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0mbatch_loss\u001b[0m \u001b[0;34m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain_on_batch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtargets\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0mloss_graph\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhstack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_loss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m\u001b[0m in \u001b[0;36mload_dataset_Train_batch\u001b[0;34m(AbnormalPath, NormalPath)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0mAbnor_list_iter\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpermutation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mNum_abnormal\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0mAbnor_list_iter\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mAbnor_list_iter\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mNum_abnormal\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mn_exp\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0mNorm_list_iter\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpermutation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mNum_Normal\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 13\u001b[0m \u001b[0mNorm_list_iter\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mNorm_list_iter\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mNum_Normal\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mn_exp\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32mmtrand.pyx\u001b[0m in \u001b[0;36mnumpy.random.mtrand.RandomState.permutation\u001b[0;34m()\u001b[0m\n","\u001b[0;31mIndexError\u001b[0m: x must be an integer or at least 1-dimensional"]}]},{"cell_type":"code","metadata":{"id":"8LXpk6M3BBwq","colab_type":"code","colab":{}},"source":[""],"execution_count":null,"outputs":[]}]} -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AI 프로젝트 2 | 3 | ## CCTV 이상행동 감지 서비스 만들기 4 | ### 개요 및 개발 필요성 5 | - 인공지능이 CCTV 의 영상을 분석하고 , 이상행동을 감지하는 서비스 6 | - 지능형 CCTV로 효율적으로 관리인력을 분배, 범죄상황에 대한 분석 및 대응을 자동화할 수 있다. 7 | - - - 8 | ### 데이터 9 | - 수원시 이상행동 CCTV 데이터 10 | - https://aihub.or.kr/aidata/139/tool 11 | - UCF 이상, 정상 영상 데이터 12 | - https://visionlab.uncc.edu/download/summary/60-data/477-ucf-anomaly-detection-dataset 13 | - - - 14 | ### 전처리 15 | ![image](https://user-images.githubusercontent.com/66463059/102057232-f84bc680-3e30-11eb-847b-7d19dbc4122d.png) 16 | - 각 영상(.mp4) 에서 16프레임단위로 fc6-1 데이터를 생성한다. 17 | - 만들어진 데이터를 평균 내고 정규화 하여 C3D feature 텍스트를 추출한다. 18 | - - - 19 | ### 모델 학습 20 | ##### 논문 참조 Real world Anomaly Detection in Surveillance VideosVideos(2018, Waqas Sultani Chen Chen) 21 | ![image](https://user-images.githubusercontent.com/66463059/102055494-4f03d100-3e2e-11eb-9679-1691b4f70c99.png) 22 | - Real-world Anomaly Detection in Surveillance Videos(2018, Waqas Sultani, Chen Chen)의 모델 구조를 사용한다. 23 | - 하나의 영상을 32개의 segment c3d feature로 나눈 결과를 bag에 넣으면 이런 세그먼트 하나하나가 bag instance가 된다. 24 | - 해당 instance는 mean, normalization을 통해 (1,4096)형태가 되고, 이를 모델에 넣어 각 bag instance의 이상행동 score를 정한다. 25 | - positive bag과 negative bag 안의 가장 높은 score를 가진 instance를 비교하여 positive bag instance의 score가 더 크다면 맞게 판단했고, negative bag instance의 score가 더 크다면 틀린 판단을 했다고 정의한다. 26 | - 손실함수를 통해 반복 학습하여 전체 손실을 줄이고 분류 정확성을 높인다. 27 | - - - 28 | ### 요약 및 결론 29 | ![image](https://user-images.githubusercontent.com/66463059/102055902-e23d0680-3e2e-11eb-8044-e2367c153197.png) 30 | - CCTV 영상에서 이상행동을 분류해 내는 기술을 웹에 서비스하고자 웹에서 영상을 입력했을 때, 해당 영상에서 이상행동을 탐지하고 그 발생시기를 시각화하여 영상을 분석할 수 있게 웹을 구현했다. 배포는 진행하지 않았다. 31 | - - - 32 | ### ref 33 | 1. [https://github.com/WaqasSultani/AnomalyDetectionCVPR2018](https://github.com/WaqasSultani/AnomalyDetectionCVPR2018) 34 | 2. [https://github.com/dolongbien/HumanBehaviorBKU](https://github.com/dolongbien/HumanBehaviorBKU) 35 | 36 | 37 | -------------------------------------------------------------------------------- /web/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | django = "*" 10 | pillow = "*" 11 | scipy = "*" 12 | matplotlib = "*" 13 | opencv-python = "*" 14 | 15 | [requires] 16 | python_version = "3.8" 17 | -------------------------------------------------------------------------------- /web/Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "99e05df5068db6033d426c0729dc669c96e5fcca6c2ed441eceda68a3fb2d472" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": { 8 | "python_version": "3.8" 9 | }, 10 | "sources": [ 11 | { 12 | "name": "pypi", 13 | "url": "https://pypi.org/simple", 14 | "verify_ssl": true 15 | } 16 | ] 17 | }, 18 | "default": { 19 | "asgiref": { 20 | "hashes": [ 21 | "sha256:7e51911ee147dd685c3c8b805c0ad0cb58d360987b56953878f8c06d2d1c6f1a", 22 | "sha256:9fc6fb5d39b8af147ba40765234fa822b39818b12cc80b35ad9b0cef3a476aed" 23 | ], 24 | "markers": "python_version >= '3.5'", 25 | "version": "==3.2.10" 26 | }, 27 | "certifi": { 28 | "hashes": [ 29 | "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3", 30 | "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41" 31 | ], 32 | "version": "==2020.6.20" 33 | }, 34 | "cycler": { 35 | "hashes": [ 36 | "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d", 37 | "sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8" 38 | ], 39 | "version": "==0.10.0" 40 | }, 41 | "django": { 42 | "hashes": [ 43 | "sha256:59c8125ca873ed3bdae9c12b146fbbd6ed8d0f743e4cf5f5817af50c51f1fc2f", 44 | "sha256:b5fbb818e751f660fa2d576d9f40c34a4c615c8b48dd383f5216e609f383371f" 45 | ], 46 | "index": "pypi", 47 | "version": "==3.1.1" 48 | }, 49 | "kiwisolver": { 50 | "hashes": [ 51 | "sha256:03662cbd3e6729f341a97dd2690b271e51a67a68322affab12a5b011344b973c", 52 | "sha256:18d749f3e56c0480dccd1714230da0f328e6e4accf188dd4e6884bdd06bf02dd", 53 | "sha256:247800260cd38160c362d211dcaf4ed0f7816afb5efe56544748b21d6ad6d17f", 54 | "sha256:38d05c9ecb24eee1246391820ed7137ac42a50209c203c908154782fced90e44", 55 | "sha256:443c2320520eda0a5b930b2725b26f6175ca4453c61f739fef7a5847bd262f74", 56 | "sha256:4eadb361baf3069f278b055e3bb53fa189cea2fd02cb2c353b7a99ebb4477ef1", 57 | "sha256:556da0a5f60f6486ec4969abbc1dd83cf9b5c2deadc8288508e55c0f5f87d29c", 58 | "sha256:603162139684ee56bcd57acc74035fceed7dd8d732f38c0959c8bd157f913fec", 59 | "sha256:60a78858580761fe611d22127868f3dc9f98871e6fdf0a15cc4203ed9ba6179b", 60 | "sha256:63f55f490b958b6299e4e5bdac66ac988c3d11b7fafa522800359075d4fa56d1", 61 | "sha256:7cc095a4661bdd8a5742aaf7c10ea9fac142d76ff1770a0f84394038126d8fc7", 62 | "sha256:be046da49fbc3aa9491cc7296db7e8d27bcf0c3d5d1a40259c10471b014e4e0c", 63 | "sha256:c31bc3c8e903d60a1ea31a754c72559398d91b5929fcb329b1c3a3d3f6e72113", 64 | "sha256:c955791d80e464da3b471ab41eb65cf5a40c15ce9b001fdc5bbc241170de58ec", 65 | "sha256:d069ef4b20b1e6b19f790d00097a5d5d2c50871b66d10075dab78938dc2ee2cf", 66 | "sha256:d52b989dc23cdaa92582ceb4af8d5bcc94d74b2c3e64cd6785558ec6a879793e", 67 | "sha256:e586b28354d7b6584d8973656a7954b1c69c93f708c0c07b77884f91640b7657", 68 | "sha256:efcf3397ae1e3c3a4a0a0636542bcad5adad3b1dd3e8e629d0b6e201347176c8", 69 | "sha256:fccefc0d36a38c57b7bd233a9b485e2f1eb71903ca7ad7adacad6c28a56d62d2" 70 | ], 71 | "markers": "python_version >= '3.6'", 72 | "version": "==1.2.0" 73 | }, 74 | "matplotlib": { 75 | "hashes": [ 76 | "sha256:06866c138d81a593b535d037b2727bec9b0818cadfe6a81f6ec5715b8dd38a89", 77 | "sha256:16b241c3d17be786966495229714de37de04472da472277869b8d5b456a8df00", 78 | "sha256:27f9de4784ae6fb97679556c5542cf36c0751dccb4d6407f7c62517fa2078868", 79 | "sha256:2f5eefc17dc2a71318d5a3496313be5c351c0731e8c4c6182c9ac3782cfc4076", 80 | "sha256:371518c769d84af8ec9b7dcb871ac44f7a67ef126dd3a15c88c25458e6b6d205", 81 | "sha256:3d2edbf59367f03cd9daf42939ca06383a7d7803e3993eb5ff1bee8e8a3fbb6b", 82 | "sha256:3fb0409754b26f48045bacd6818e44e38ca9338089f8ba689e2f9344ff2847c7", 83 | "sha256:548cfe81476dbac44db96e9c0b074b6fb333b4d1f12b1ae68dbed47e45166384", 84 | "sha256:57be9e21073fc367237b03ecac0d9e4b8ddbe38e86ec4a316857d8d93ac9286c", 85 | "sha256:5ccecb5f78b51b885f0028b646786889f49c54883e554fca41a2a05998063f23", 86 | "sha256:69cf76d673682140f46c6cb5e073332c1f1b2853c748dc1cb04f7d00023567f7", 87 | "sha256:793e061054662aa27acaff9201cdd510a698541c6e8659eeceb31d66c16facc6", 88 | "sha256:799c421bc245a0749c1515b6dea6dc02db0a8c1f42446a0f03b3b82a60a900dc", 89 | "sha256:8bc1d3284dee001f41ec98f59675f4d723683e1cc082830b440b5f081d8e0ade", 90 | "sha256:a522de31e07ed7d6f954cda3fbd5ca4b8edbfc592a821a7b00291be6f843292e", 91 | "sha256:be2f0ec62e0939a9dcfd3638c140c5a74fc929ee3fd1f31408ab8633db6e1523", 92 | "sha256:c5d0c2ae3e3ed4e9f46b7c03b40d443601012ffe8eb8dfbb2bd6b2d00509f797", 93 | "sha256:f0268613073df055bcc6a490de733012f2cf4fe191c1adb74e41cec8add1a165" 94 | ], 95 | "index": "pypi", 96 | "version": "==3.3.2" 97 | }, 98 | "numpy": { 99 | "hashes": [ 100 | "sha256:04c7d4ebc5ff93d9822075ddb1751ff392a4375e5885299445fcebf877f179d5", 101 | "sha256:0bfd85053d1e9f60234f28f63d4a5147ada7f432943c113a11afcf3e65d9d4c8", 102 | "sha256:0c66da1d202c52051625e55a249da35b31f65a81cb56e4c69af0dfb8fb0125bf", 103 | "sha256:0d310730e1e793527065ad7dde736197b705d0e4c9999775f212b03c44a8484c", 104 | "sha256:1669ec8e42f169ff715a904c9b2105b6640f3f2a4c4c2cb4920ae8b2785dac65", 105 | "sha256:2117536e968abb7357d34d754e3733b0d7113d4c9f1d921f21a3d96dec5ff716", 106 | "sha256:3733640466733441295b0d6d3dcbf8e1ffa7e897d4d82903169529fd3386919a", 107 | "sha256:4339741994c775396e1a274dba3609c69ab0f16056c1077f18979bec2a2c2e6e", 108 | "sha256:51ee93e1fac3fe08ef54ff1c7f329db64d8a9c5557e6c8e908be9497ac76374b", 109 | "sha256:54045b198aebf41bf6bf4088012777c1d11703bf74461d70cd350c0af2182e45", 110 | "sha256:58d66a6b3b55178a1f8a5fe98df26ace76260a70de694d99577ddeab7eaa9a9d", 111 | "sha256:59f3d687faea7a4f7f93bd9665e5b102f32f3fa28514f15b126f099b7997203d", 112 | "sha256:62139af94728d22350a571b7c82795b9d59be77fc162414ada6c8b6a10ef5d02", 113 | "sha256:7118f0a9f2f617f921ec7d278d981244ba83c85eea197be7c5a4f84af80a9c3c", 114 | "sha256:7c6646314291d8f5ea900a7ea9c4261f834b5b62159ba2abe3836f4fa6705526", 115 | "sha256:967c92435f0b3ba37a4257c48b8715b76741410467e2bdb1097e8391fccfae15", 116 | "sha256:9a3001248b9231ed73894c773142658bab914645261275f675d86c290c37f66d", 117 | "sha256:aba1d5daf1144b956bc87ffb87966791f5e9f3e1f6fab3d7f581db1f5b598f7a", 118 | "sha256:addaa551b298052c16885fc70408d3848d4e2e7352de4e7a1e13e691abc734c1", 119 | "sha256:b594f76771bc7fc8a044c5ba303427ee67c17a09b36e1fa32bde82f5c419d17a", 120 | "sha256:c35a01777f81e7333bcf276b605f39c872e28295441c265cd0c860f4b40148c1", 121 | "sha256:cebd4f4e64cfe87f2039e4725781f6326a61f095bc77b3716502bed812b385a9", 122 | "sha256:d526fa58ae4aead839161535d59ea9565863bb0b0bdb3cc63214613fb16aced4", 123 | "sha256:d7ac33585e1f09e7345aa902c281bd777fdb792432d27fca857f39b70e5dd31c", 124 | "sha256:e6ddbdc5113628f15de7e4911c02aed74a4ccff531842c583e5032f6e5a179bd", 125 | "sha256:eb25c381d168daf351147713f49c626030dcff7a393d5caa62515d415a6071d8" 126 | ], 127 | "markers": "python_version >= '3.6'", 128 | "version": "==1.19.2" 129 | }, 130 | "opencv-python": { 131 | "hashes": [ 132 | "sha256:0039506845d7076e6871c0075227881a84de69799d70ed37c8704d203b740911", 133 | "sha256:02f7e31c710a7c82229fc4ad98e7e4cf265d19ab52b4451cbe7e33a840fe6595", 134 | "sha256:093c1bfa6da24a9d4dde2d54a22b9acfb46f5cb2c50d7387356cf897f0db0ab9", 135 | "sha256:17663f0469b2944b7d4051d4b1c425235d153777f17310c6990370bbb4d12695", 136 | "sha256:177f14625ea164f38b5b6f5c2b316f8ff8163e996cc0432de90f475956a9069a", 137 | "sha256:324a2c680caae9edbd843a355a2e03792cbd23faf6c24c20dd594fa9aac80765", 138 | "sha256:34d0d2c9a80c02d55f83a67c29fc4145a9dcf1fe3ddef0535d0b0d9c7b89b8d2", 139 | "sha256:505bd984aae24c489910bbd168e515580d62bc1dbdd5ee36f2c2d42803c4b795", 140 | "sha256:608dae0444065669fc26fa6bf1653072e40735b33dfa514c74a6165563a99e97", 141 | "sha256:78a0796ec15d1b41f5a87c41f339356eb04858749c8845936be532cb3436f898", 142 | "sha256:80a51a797f71ee4a401d281749bb096370007202204bbcd1ecfc9ead58bd3b0b", 143 | "sha256:a35b3a3540623090ba5fdad7ed97d0d75ca80ee55f5d7c1cecddda723665c0f8", 144 | "sha256:a6e1d065a45ec1bf466f47bdf767e0505b244c9470140cf8bab1dd8835f0d3ee", 145 | "sha256:b3ae62990faebefbc3cbc5430f7b6de57bafdcf297134113a9c6d6ccfce4438f", 146 | "sha256:bcb24c4f82fa79f049db4bfd0da1d18a315da66a55aa3d4cde81d1ec18f0a7ff", 147 | "sha256:cb00bbd41268f5fa0fa327ca30f7621a8ece983e0d8ae472e2ffe7ab1617606f", 148 | "sha256:ccd92a126d253c7bd65b36184fe097a0eea77da4d72d427e1630633bc586233e", 149 | "sha256:d19cbbcdc05caf7b41e28898f05c076c94b07647b4556c8327663a40acd4e3bd", 150 | "sha256:f5b82cd49b560e004608ca53ce625e5167b41f0fdc610758d6989083e26b5a03", 151 | "sha256:fae421571a7709ae0baa9bfd08177165bc1d56d7c79c806d12627d58a6faf2d1", 152 | "sha256:fec63240ea3179a2b4176a3256a99682129d75450a15bf2807904600ec64b45a" 153 | ], 154 | "index": "pypi", 155 | "version": "==4.4.0.42" 156 | }, 157 | "pillow": { 158 | "hashes": [ 159 | "sha256:0295442429645fa16d05bd567ef5cff178482439c9aad0411d3f0ce9b88b3a6f", 160 | "sha256:06aba4169e78c439d528fdeb34762c3b61a70813527a2c57f0540541e9f433a8", 161 | "sha256:09d7f9e64289cb40c2c8d7ad674b2ed6105f55dc3b09aa8e4918e20a0311e7ad", 162 | "sha256:0a80dd307a5d8440b0a08bd7b81617e04d870e40a3e46a32d9c246e54705e86f", 163 | "sha256:1ca594126d3c4def54babee699c055a913efb01e106c309fa6b04405d474d5ae", 164 | "sha256:25930fadde8019f374400f7986e8404c8b781ce519da27792cbe46eabec00c4d", 165 | "sha256:431b15cffbf949e89df2f7b48528be18b78bfa5177cb3036284a5508159492b5", 166 | "sha256:52125833b070791fcb5710fabc640fc1df07d087fc0c0f02d3661f76c23c5b8b", 167 | "sha256:5e51ee2b8114def244384eda1c82b10e307ad9778dac5c83fb0943775a653cd8", 168 | "sha256:612cfda94e9c8346f239bf1a4b082fdd5c8143cf82d685ba2dba76e7adeeb233", 169 | "sha256:6d7741e65835716ceea0fd13a7d0192961212fd59e741a46bbed7a473c634ed6", 170 | "sha256:6edb5446f44d901e8683ffb25ebdfc26988ee813da3bf91e12252b57ac163727", 171 | "sha256:725aa6cfc66ce2857d585f06e9519a1cc0ef6d13f186ff3447ab6dff0a09bc7f", 172 | "sha256:8dad18b69f710bf3a001d2bf3afab7c432785d94fcf819c16b5207b1cfd17d38", 173 | "sha256:94cf49723928eb6070a892cb39d6c156f7b5a2db4e8971cb958f7b6b104fb4c4", 174 | "sha256:97f9e7953a77d5a70f49b9a48da7776dc51e9b738151b22dacf101641594a626", 175 | "sha256:9ad7f865eebde135d526bb3163d0b23ffff365cf87e767c649550964ad72785d", 176 | "sha256:9c87ef410a58dd54b92424ffd7e28fd2ec65d2f7fc02b76f5e9b2067e355ebf6", 177 | "sha256:a060cf8aa332052df2158e5a119303965be92c3da6f2d93b6878f0ebca80b2f6", 178 | "sha256:c79f9c5fb846285f943aafeafda3358992d64f0ef58566e23484132ecd8d7d63", 179 | "sha256:c92302a33138409e8f1ad16731568c55c9053eee71bb05b6b744067e1b62380f", 180 | "sha256:d08b23fdb388c0715990cbc06866db554e1822c4bdcf6d4166cf30ac82df8c41", 181 | "sha256:d350f0f2c2421e65fbc62690f26b59b0bcda1b614beb318c81e38647e0f673a1", 182 | "sha256:e901964262a56d9ea3c2693df68bc9860b8bdda2b04768821e4c44ae797de117", 183 | "sha256:ec29604081f10f16a7aea809ad42e27764188fc258b02259a03a8ff7ded3808d", 184 | "sha256:edf31f1150778abd4322444c393ab9c7bd2af271dd4dafb4208fb613b1f3cdc9", 185 | "sha256:f7e30c27477dffc3e85c2463b3e649f751789e0f6c8456099eea7ddd53be4a8a", 186 | "sha256:ffe538682dc19cc542ae7c3e504fdf54ca7f86fb8a135e59dd6bc8627eae6cce" 187 | ], 188 | "index": "pypi", 189 | "version": "==7.2.0" 190 | }, 191 | "pyparsing": { 192 | "hashes": [ 193 | "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", 194 | "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" 195 | ], 196 | "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", 197 | "version": "==2.4.7" 198 | }, 199 | "python-dateutil": { 200 | "hashes": [ 201 | "sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c", 202 | "sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a" 203 | ], 204 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 205 | "version": "==2.8.1" 206 | }, 207 | "pytz": { 208 | "hashes": [ 209 | "sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed", 210 | "sha256:c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048" 211 | ], 212 | "version": "==2020.1" 213 | }, 214 | "scipy": { 215 | "hashes": [ 216 | "sha256:066c513d90eb3fd7567a9e150828d39111ebd88d3e924cdfc9f8ce19ab6f90c9", 217 | "sha256:07e52b316b40a4f001667d1ad4eb5f2318738de34597bd91537851365b6c61f1", 218 | "sha256:0a0e9a4e58a4734c2eba917f834b25b7e3b6dc333901ce7784fd31aefbd37b2f", 219 | "sha256:1c7564a4810c1cd77fcdee7fa726d7d39d4e2695ad252d7c86c3ea9d85b7fb8f", 220 | "sha256:315aa2165aca31375f4e26c230188db192ed901761390be908c9b21d8b07df62", 221 | "sha256:6e86c873fe1335d88b7a4bfa09d021f27a9e753758fd75f3f92d714aa4093768", 222 | "sha256:8e28e74b97fc8d6aa0454989db3b5d36fc27e69cef39a7ee5eaf8174ca1123cb", 223 | "sha256:92eb04041d371fea828858e4fff182453c25ae3eaa8782d9b6c32b25857d23bc", 224 | "sha256:a0afbb967fd2c98efad5f4c24439a640d39463282040a88e8e928db647d8ac3d", 225 | "sha256:a785409c0fa51764766840185a34f96a0a93527a0ff0230484d33a8ed085c8f8", 226 | "sha256:cca9fce15109a36a0a9f9cfc64f870f1c140cb235ddf27fe0328e6afb44dfed0", 227 | "sha256:d56b10d8ed72ec1be76bf10508446df60954f08a41c2d40778bc29a3a9ad9bce", 228 | "sha256:dac09281a0eacd59974e24525a3bc90fa39b4e95177e638a31b14db60d3fa806", 229 | "sha256:ec5fe57e46828d034775b00cd625c4a7b5c7d2e354c3b258d820c6c72212a6ec", 230 | "sha256:eecf40fa87eeda53e8e11d265ff2254729d04000cd40bae648e76ff268885d66", 231 | "sha256:fc98f3eac993b9bfdd392e675dfe19850cc8c7246a8fd2b42443e506344be7d9" 232 | ], 233 | "index": "pypi", 234 | "version": "==1.5.2" 235 | }, 236 | "six": { 237 | "hashes": [ 238 | "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259", 239 | "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced" 240 | ], 241 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 242 | "version": "==1.15.0" 243 | }, 244 | "sqlparse": { 245 | "hashes": [ 246 | "sha256:022fb9c87b524d1f7862b3037e541f68597a730a8843245c349fc93e1643dc4e", 247 | "sha256:e162203737712307dfe78860cc56c8da8a852ab2ee33750e33aeadf38d12c548" 248 | ], 249 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 250 | "version": "==0.3.1" 251 | } 252 | }, 253 | "develop": {} 254 | } 255 | -------------------------------------------------------------------------------- /web/yonomAIproject/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | opencv-python = "*" 10 | 11 | [requires] 12 | python_version = "3.8" 13 | -------------------------------------------------------------------------------- /web/yonomAIproject/Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "deb7ff6e53178a7b1d23d45423b203c79de5c981efbd40522721cf7698ee14b3" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": { 8 | "python_version": "3.8" 9 | }, 10 | "sources": [ 11 | { 12 | "name": "pypi", 13 | "url": "https://pypi.org/simple", 14 | "verify_ssl": true 15 | } 16 | ] 17 | }, 18 | "default": { 19 | "numpy": { 20 | "hashes": [ 21 | "sha256:04c7d4ebc5ff93d9822075ddb1751ff392a4375e5885299445fcebf877f179d5", 22 | "sha256:0bfd85053d1e9f60234f28f63d4a5147ada7f432943c113a11afcf3e65d9d4c8", 23 | "sha256:0c66da1d202c52051625e55a249da35b31f65a81cb56e4c69af0dfb8fb0125bf", 24 | "sha256:0d310730e1e793527065ad7dde736197b705d0e4c9999775f212b03c44a8484c", 25 | "sha256:1669ec8e42f169ff715a904c9b2105b6640f3f2a4c4c2cb4920ae8b2785dac65", 26 | "sha256:2117536e968abb7357d34d754e3733b0d7113d4c9f1d921f21a3d96dec5ff716", 27 | "sha256:3733640466733441295b0d6d3dcbf8e1ffa7e897d4d82903169529fd3386919a", 28 | "sha256:4339741994c775396e1a274dba3609c69ab0f16056c1077f18979bec2a2c2e6e", 29 | "sha256:51ee93e1fac3fe08ef54ff1c7f329db64d8a9c5557e6c8e908be9497ac76374b", 30 | "sha256:54045b198aebf41bf6bf4088012777c1d11703bf74461d70cd350c0af2182e45", 31 | "sha256:58d66a6b3b55178a1f8a5fe98df26ace76260a70de694d99577ddeab7eaa9a9d", 32 | "sha256:59f3d687faea7a4f7f93bd9665e5b102f32f3fa28514f15b126f099b7997203d", 33 | "sha256:62139af94728d22350a571b7c82795b9d59be77fc162414ada6c8b6a10ef5d02", 34 | "sha256:7118f0a9f2f617f921ec7d278d981244ba83c85eea197be7c5a4f84af80a9c3c", 35 | "sha256:7c6646314291d8f5ea900a7ea9c4261f834b5b62159ba2abe3836f4fa6705526", 36 | "sha256:967c92435f0b3ba37a4257c48b8715b76741410467e2bdb1097e8391fccfae15", 37 | "sha256:9a3001248b9231ed73894c773142658bab914645261275f675d86c290c37f66d", 38 | "sha256:aba1d5daf1144b956bc87ffb87966791f5e9f3e1f6fab3d7f581db1f5b598f7a", 39 | "sha256:addaa551b298052c16885fc70408d3848d4e2e7352de4e7a1e13e691abc734c1", 40 | "sha256:b594f76771bc7fc8a044c5ba303427ee67c17a09b36e1fa32bde82f5c419d17a", 41 | "sha256:c35a01777f81e7333bcf276b605f39c872e28295441c265cd0c860f4b40148c1", 42 | "sha256:cebd4f4e64cfe87f2039e4725781f6326a61f095bc77b3716502bed812b385a9", 43 | "sha256:d526fa58ae4aead839161535d59ea9565863bb0b0bdb3cc63214613fb16aced4", 44 | "sha256:d7ac33585e1f09e7345aa902c281bd777fdb792432d27fca857f39b70e5dd31c", 45 | "sha256:e6ddbdc5113628f15de7e4911c02aed74a4ccff531842c583e5032f6e5a179bd", 46 | "sha256:eb25c381d168daf351147713f49c626030dcff7a393d5caa62515d415a6071d8" 47 | ], 48 | "markers": "python_version >= '3.6'", 49 | "version": "==1.19.2" 50 | }, 51 | "opencv-python": { 52 | "hashes": [ 53 | "sha256:0039506845d7076e6871c0075227881a84de69799d70ed37c8704d203b740911", 54 | "sha256:02f7e31c710a7c82229fc4ad98e7e4cf265d19ab52b4451cbe7e33a840fe6595", 55 | "sha256:093c1bfa6da24a9d4dde2d54a22b9acfb46f5cb2c50d7387356cf897f0db0ab9", 56 | "sha256:17663f0469b2944b7d4051d4b1c425235d153777f17310c6990370bbb4d12695", 57 | "sha256:177f14625ea164f38b5b6f5c2b316f8ff8163e996cc0432de90f475956a9069a", 58 | "sha256:324a2c680caae9edbd843a355a2e03792cbd23faf6c24c20dd594fa9aac80765", 59 | "sha256:34d0d2c9a80c02d55f83a67c29fc4145a9dcf1fe3ddef0535d0b0d9c7b89b8d2", 60 | "sha256:505bd984aae24c489910bbd168e515580d62bc1dbdd5ee36f2c2d42803c4b795", 61 | "sha256:608dae0444065669fc26fa6bf1653072e40735b33dfa514c74a6165563a99e97", 62 | "sha256:78a0796ec15d1b41f5a87c41f339356eb04858749c8845936be532cb3436f898", 63 | "sha256:80a51a797f71ee4a401d281749bb096370007202204bbcd1ecfc9ead58bd3b0b", 64 | "sha256:a35b3a3540623090ba5fdad7ed97d0d75ca80ee55f5d7c1cecddda723665c0f8", 65 | "sha256:a6e1d065a45ec1bf466f47bdf767e0505b244c9470140cf8bab1dd8835f0d3ee", 66 | "sha256:b3ae62990faebefbc3cbc5430f7b6de57bafdcf297134113a9c6d6ccfce4438f", 67 | "sha256:bcb24c4f82fa79f049db4bfd0da1d18a315da66a55aa3d4cde81d1ec18f0a7ff", 68 | "sha256:cb00bbd41268f5fa0fa327ca30f7621a8ece983e0d8ae472e2ffe7ab1617606f", 69 | "sha256:ccd92a126d253c7bd65b36184fe097a0eea77da4d72d427e1630633bc586233e", 70 | "sha256:d19cbbcdc05caf7b41e28898f05c076c94b07647b4556c8327663a40acd4e3bd", 71 | "sha256:f5b82cd49b560e004608ca53ce625e5167b41f0fdc610758d6989083e26b5a03", 72 | "sha256:fae421571a7709ae0baa9bfd08177165bc1d56d7c79c806d12627d58a6faf2d1", 73 | "sha256:fec63240ea3179a2b4176a3256a99682129d75450a15bf2807904600ec64b45a" 74 | ], 75 | "index": "pypi", 76 | "version": "==4.4.0.42" 77 | } 78 | }, 79 | "develop": {} 80 | } 81 | -------------------------------------------------------------------------------- /web/yonomAIproject/db.sqlite3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/db.sqlite3 -------------------------------------------------------------------------------- /web/yonomAIproject/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | """Run administrative tasks.""" 9 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yonomAIproject.settings') 10 | try: 11 | from django.core.management import execute_from_command_line 12 | except ImportError as exc: 13 | raise ImportError( 14 | "Couldn't import Django. Are you sure it's installed and " 15 | "available on your PYTHONPATH environment variable? Did you " 16 | "forget to activate a virtual environment?" 17 | ) from exc 18 | execute_from_command_line(sys.argv) 19 | 20 | 21 | if __name__ == '__main__': 22 | main() 23 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/post/__init__.py -------------------------------------------------------------------------------- /web/yonomAIproject/post/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | from .models import video_content, mat 3 | # Register your models here. 4 | admin.site.register(video_content) 5 | admin.site.register(mat) -------------------------------------------------------------------------------- /web/yonomAIproject/post/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | 4 | class PostConfig(AppConfig): 5 | name = 'post' 6 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/migrations/0001_initial.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.1.1 on 2020-09-11 02:00 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | initial = True 9 | 10 | dependencies = [ 11 | ] 12 | 13 | operations = [ 14 | migrations.CreateModel( 15 | name='video_content', 16 | fields=[ 17 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 18 | ('dummi_videos', models.FileField(null=True, upload_to='post')), 19 | ], 20 | ), 21 | ] 22 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/migrations/0002_auto_20200911_1157.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.1.1 on 2020-09-11 02:57 2 | 3 | from django.db import migrations 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('post', '0001_initial'), 10 | ] 11 | 12 | operations = [ 13 | migrations.RenameField( 14 | model_name='video_content', 15 | old_name='dummi_videos', 16 | new_name='video', 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/migrations/0003_auto_20200917_1208.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.1.1 on 2020-09-17 03:08 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('post', '0002_auto_20200911_1157'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AlterField( 14 | model_name='video_content', 15 | name='video', 16 | field=models.FileField(null=True, upload_to=''), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/migrations/0004_auto_20200917_1214.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.1.1 on 2020-09-17 03:14 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('post', '0003_auto_20200917_1208'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AlterField( 14 | model_name='video_content', 15 | name='video', 16 | field=models.FileField(null=True, upload_to=None), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/migrations/0005_auto_20200917_1216.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.1.1 on 2020-09-17 03:16 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('post', '0004_auto_20200917_1214'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AlterField( 14 | model_name='video_content', 15 | name='video', 16 | field=models.FileField(null=True, upload_to='post'), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/migrations/0006_mat.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.1.1 on 2020-09-20 15:03 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('post', '0005_auto_20200917_1216'), 10 | ] 11 | 12 | operations = [ 13 | migrations.CreateModel( 14 | name='mat', 15 | fields=[ 16 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 17 | ('mat', models.FileField(null=True, upload_to='mat')), 18 | ], 19 | ), 20 | ] 21 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/migrations/0007_mat_title.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.1.1 on 2020-09-20 15:58 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('post', '0006_mat'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AddField( 14 | model_name='mat', 15 | name='title', 16 | field=models.CharField(max_length=50, null=True), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/migrations/0008_video_content_title.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.1.1 on 2020-09-21 06:16 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('post', '0007_mat_title'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AddField( 14 | model_name='video_content', 15 | name='title', 16 | field=models.CharField(max_length=50, null=True), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/post/migrations/__init__.py -------------------------------------------------------------------------------- /web/yonomAIproject/post/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | 3 | # Create your models here. 4 | 5 | class video_content(models.Model): 6 | video = models.FileField(upload_to='post', null=True) 7 | title = models.CharField(max_length=50, null=True) 8 | 9 | def __str__(self): 10 | return self.title 11 | 12 | 13 | class mat(models.Model): 14 | title = models.CharField(max_length=50, null=True) 15 | mat = models.FileField(upload_to='mat', null=True) 16 | 17 | def __str__(self): 18 | return self.title 19 | 20 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/about2.css: -------------------------------------------------------------------------------- 1 | #model_text { 2 | margin-left: 3%; 3 | } 4 | 5 | #c3d_text{ 6 | margin-left: 3%; 7 | } -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/home.css: -------------------------------------------------------------------------------- 1 | body{ 2 | background: url('./image/backimage.png') no-repeat fixed 50% 50%/100% 100% 3 | } 4 | 5 | .main_home_content{ 6 | 7 | /* border: 5px; 8 | border-style: solid; 9 | border-color: red; */ 10 | padding: 10px; 11 | 12 | width: 100%; 13 | height : 720px; 14 | 15 | /* background-color: palevioletred; */ 16 | display : flex; 17 | flex-direction: column; 18 | justify-content: center; 19 | align-items: space-between; 20 | } 21 | 22 | .main_header{ 23 | 24 | width: 100%; 25 | height: 20%; 26 | 27 | /* background-color: #111111; */ 28 | margin-top: -10px; 29 | 30 | display: flex; 31 | flex-direction: column; 32 | /* justify-content: center; */ 33 | /* align-items: center; */ 34 | } 35 | 36 | 37 | .yonom{ 38 | font-size: 60px; 39 | color: rgb(216, 214, 214); 40 | margin-left: 15%; 41 | margin-right: 60%; 42 | margin-top: 5px; 43 | /* background-color: red; */ 44 | } 45 | 46 | .cctvabnormal{ 47 | font-size: 20px; 48 | color: rgba(120, 120, 120, 0.7); 49 | margin-left: 15%; 50 | margin-right: 60%; 51 | margin-bottom: 10px; 52 | /* background-color: yellow; */ 53 | } 54 | 55 | .btn_btn-primary2{ 56 | background-color: rgba(170, 170, 170, 0.5); 57 | color: whitesmoke; 58 | padding: 10px; 59 | margin-left: 75%; 60 | margin-right: 15%; 61 | margin-top: -80px; 62 | 63 | border: 1px; 64 | border-style: solid; 65 | border-radius: 30px; 66 | border-color: rgba(170, 170, 170, 0.5); 67 | } 68 | 69 | /* .madin_home1_container{ 70 | border: 5px; 71 | border-style: solid; 72 | border-color: blue; 73 | 74 | width: 100%; 75 | height : 700px; 76 | 77 | 78 | display : flex; 79 | flex-direction: column; 80 | justify-content: center; 81 | align-items: space-between; 82 | 83 | } */ 84 | 85 | .main_form{ 86 | /* border: 5px; 87 | border-style: solid; 88 | border-color: red; */ 89 | 90 | width: 100%; 91 | height : 80%; 92 | 93 | display : flex; 94 | flex-direction: column; 95 | justify-content: center; 96 | align-items: center; 97 | } 98 | 99 | .form-group1{ 100 | width: 450px; 101 | height : 400px; 102 | 103 | /* background-color: #292929; */ 104 | /* opacity: 0.5; */ 105 | 106 | margin-top: 180px; 107 | padding: 30px; 108 | 109 | display : flex; 110 | flex-direction: column; 111 | /* justify-content: flex-end; */ 112 | /* align-items: center; */ 113 | 114 | } 115 | 116 | /* .form-group1::after { 117 | width: 100%; 118 | height : 80%; 119 | content: ""; 120 | background-color: #292929; 121 | position: absolute; 122 | top: 0; 123 | left: 0; 124 | z-index: -1; 125 | opacity: 0.8; 126 | } */ 127 | 128 | .label1{ 129 | /* background-color: pink; */ 130 | color:rgb(216, 214, 214); 131 | font-size: 30px; 132 | text-align: center; 133 | margin-top: -10px; 134 | } 135 | 136 | .form-control1{ 137 | background-color: rgba(170, 170, 170, 0.5); 138 | border: 1px; 139 | border-style: solid; 140 | border-color: rgba(170, 170, 170, 0.5); 141 | border-radius: 3px; 142 | padding: 10px; 143 | margin-bottom: 10px; 144 | } 145 | 146 | .findID{ 147 | color: gray; 148 | text-align: right; 149 | margin-top: 5px; 150 | margin-bottom: 10px; 151 | font-size: 15px; 152 | } 153 | 154 | #ex_chk{ 155 | background-color: tomato; 156 | margin-right: 30px; 157 | margin-top: 10px; 158 | } 159 | 160 | .btn_btn-primary1{ 161 | background-color: rgba(170, 170, 170, 0.5); 162 | color: whitesmoke; 163 | padding: 10px; 164 | margin-top: 7px; 165 | 166 | border: 1px; 167 | border-radius: 3px; 168 | } -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/home2.css: -------------------------------------------------------------------------------- 1 | 2 | .home2_container{ 3 | /* border: 1px; 4 | border-style: solid; 5 | border-color: #7A8A8A; */ 6 | 7 | height: 70%; 8 | 9 | display: flex; 10 | flex-direction: column; 11 | justify-content: center; 12 | align-items: center; 13 | } 14 | 15 | .home2_content1{ 16 | /* border: 1px; 17 | border-style: solid; 18 | border-color: #7A8A8A; */ 19 | 20 | width: 100%; 21 | height: 90%; 22 | 23 | display: flex; 24 | justify-content: center; 25 | align-items: center; 26 | 27 | } 28 | .home2_content2{ 29 | /* border: 1px; 30 | border-style: solid; 31 | border-color: #7A8A8A; */ 32 | 33 | width: 100%; 34 | height: 10%; 35 | 36 | display: flex; 37 | 38 | justify-content: space-between ; 39 | align-items: center; 40 | 41 | 42 | } 43 | .home2_content2_1{ 44 | border: 1px; 45 | border-style: solid; 46 | border-color: #7A8A8A; 47 | 48 | width: 80%; 49 | height: 100%; 50 | 51 | display: flex; 52 | flex-direction: column; 53 | justify-content: space-around; 54 | 55 | 56 | } 57 | 58 | /* home2 content2 */ 59 | 60 | .home2_content2_2{ 61 | border: 1px; 62 | border-style: solid; 63 | border-color: #7A8A8A; 64 | 65 | width: 20%; 66 | height: 100%; 67 | 68 | display: flex; 69 | flex-direction: column; 70 | justify-content: center; 71 | align-items: center; 72 | 73 | } 74 | .home2_content2_2_video_num{ 75 | width: 100%; 76 | height: 50%; 77 | 78 | display: flex; 79 | flex-direction: column; 80 | justify-content: center; 81 | align-items: center; 82 | } 83 | 84 | .home2_content2_2_button{ 85 | width: 100%; 86 | height: 50%; 87 | 88 | display: flex; 89 | flex-direction: column; 90 | justify-content: center; 91 | align-items: center; 92 | 93 | } 94 | 95 | .content2_video{ 96 | /* border: 1px; 97 | border-style: solid; 98 | border-color: red; */ 99 | 100 | width: 100%; 101 | height: 100%; 102 | 103 | 104 | display: flex; 105 | justify-content: space-between; 106 | align-items: center; 107 | 108 | } 109 | 110 | 111 | 112 | .home2_video{ 113 | border: 1px; 114 | border-style: solid; 115 | border-color: #7A8A8A; 116 | 117 | 118 | width: 70%; 119 | height: 100%; 120 | 121 | display: flex; 122 | justify-content: center; 123 | align-items: center; 124 | 125 | } 126 | 127 | 128 | .home2_video_1{ 129 | /* border: 1px; 130 | border-style: solid; 131 | border-color: #7A8A8A; */ 132 | 133 | 134 | margin:5px; 135 | 136 | width: 100%; 137 | height : 100%; 138 | 139 | display: flex; 140 | justify-content: center; 141 | align-items: center; 142 | } 143 | 144 | 145 | .home2_event{ 146 | /* border: 1px; 147 | border-style: solid; 148 | border-color: #7A8A8A; */ 149 | 150 | 151 | width: 30%; 152 | height: 100%; 153 | 154 | 155 | } 156 | 157 | .home2_log_event{ 158 | border: 1px; 159 | border-style: solid; 160 | border-color: #7A8A8A; 161 | 162 | 163 | width:100%; 164 | height: 90%; 165 | 166 | display: flex; 167 | justify-content: center; 168 | align-items: flex-end; 169 | } 170 | 171 | 172 | .home2_result_event{ 173 | border: 1px; 174 | border-style: solid; 175 | border-color: #7A8A8A; 176 | 177 | width:100%; 178 | height: 10%; 179 | 180 | 181 | display: flex; 182 | justify-content: center; 183 | } 184 | 185 | 186 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/home3.css: -------------------------------------------------------------------------------- 1 | 2 | .home3_container{ 3 | border: 1px; 4 | border-style: solid; 5 | border-color: red; 6 | 7 | height: 70%; 8 | 9 | display: flex; 10 | flex-direction: column; 11 | justify-content: center; 12 | align-items: center; 13 | } 14 | 15 | .home3_content1{ 16 | border: 1px; 17 | border-style: solid; 18 | border-color: red; 19 | 20 | width: 100%; 21 | height: 80%; 22 | 23 | display: flex; 24 | justify-content: center; 25 | align-items: center; 26 | 27 | } 28 | .home3_content2{ 29 | border: 1px; 30 | border-style: solid; 31 | border-color: red; 32 | 33 | width: 100%; 34 | height: 20%; 35 | 36 | display: flex; 37 | justify-content: space-between ; 38 | align-items: center; 39 | 40 | 41 | } 42 | .content3_video{ 43 | border: 1px; 44 | border-style: solid; 45 | border-color: red; 46 | 47 | width: 100%; 48 | height: 100%; 49 | 50 | 51 | display: flex; 52 | justify-content: space-between; 53 | align-items: center; 54 | 55 | } 56 | 57 | .home3_content2_1{ 58 | border: 1px; 59 | border-style: solid; 60 | border-color: red; 61 | 62 | width: 80%; 63 | height: 100%; 64 | 65 | display: flex; 66 | flex-direction: column; 67 | justify-content: space-around; 68 | 69 | 70 | } 71 | 72 | .home3_content2_2{ 73 | border: 1px; 74 | border-style: solid; 75 | border-color: red; 76 | 77 | width: 20%; 78 | height: 100%; 79 | 80 | display: flex; 81 | flex-direction: column; 82 | justify-content: center; 83 | align-items: center; 84 | 85 | } 86 | .home3_content2_2_video_num{ 87 | width: 100%; 88 | height: 50%; 89 | 90 | display: flex; 91 | flex-direction: column; 92 | justify-content: center; 93 | align-items: center; 94 | } 95 | 96 | .home3_content2_2_button{ 97 | width: 100%; 98 | height: 50%; 99 | 100 | display: flex; 101 | flex-direction: column; 102 | justify-content: center; 103 | align-items: center; 104 | 105 | } 106 | .home3_video{ 107 | border: 1px; 108 | border-style: solid; 109 | border-color: red; 110 | 111 | width: 70%; 112 | height: 100%; 113 | 114 | display: flex; 115 | justify-content: center; 116 | align-items: center; 117 | 118 | } 119 | 120 | 121 | .home3_video_1{ 122 | border: 1px; 123 | border-style: solid; 124 | border-color: red; 125 | 126 | margin:5px; 127 | 128 | width: 30%; 129 | height : 60%; 130 | 131 | display: flex; 132 | justify-content: center; 133 | align-items: center; 134 | } 135 | 136 | .home3_video_2{ 137 | border: 1px; 138 | border-style: solid; 139 | border-color: red; 140 | 141 | margin:5px; 142 | 143 | width : 30%; 144 | height: 60%; 145 | 146 | display: flex; 147 | justify-content: center; 148 | align-items: center; 149 | } 150 | 151 | .home3_video_3{ 152 | border: 1px; 153 | border-style: solid; 154 | border-color: red; 155 | 156 | margin:5px; 157 | 158 | width : 30%; 159 | height: 60%; 160 | 161 | display: flex; 162 | justify-content: center; 163 | align-items: center; 164 | } 165 | 166 | 167 | .home3_event{ 168 | border: 1px; 169 | border-style: solid; 170 | border-color: red; 171 | 172 | width: 30%; 173 | height: 100%; 174 | 175 | 176 | } 177 | 178 | .home3_log_event{ 179 | border: 1px; 180 | border-style: solid; 181 | border-color: red; 182 | 183 | width:100%; 184 | height: 90%; 185 | 186 | display: flex; 187 | justify-content: center; 188 | align-items: flex-end; 189 | } 190 | 191 | 192 | .home3_result_event{ 193 | border: 1px; 194 | border-style: solid; 195 | border-color: red; 196 | 197 | width:100%; 198 | height: 10%; 199 | 200 | 201 | display: flex; 202 | justify-content: center; 203 | } 204 | 205 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/home4.css: -------------------------------------------------------------------------------- 1 | 2 | .home4_container{ 3 | border: 1px; 4 | border-style: solid; 5 | border-color: red; 6 | 7 | height: 75%; 8 | 9 | display: flex; 10 | flex-direction: column; 11 | justify-content: center; 12 | align-items: center; 13 | } 14 | 15 | .home4_content1{ 16 | border: 1px; 17 | border-style: solid; 18 | border-color: red; 19 | 20 | 21 | width: 100%; 22 | height: 77%; 23 | 24 | display: flex; 25 | justify-content: center; 26 | align-items: center; 27 | 28 | } 29 | .home4_content2{ 30 | border: 1px; 31 | border-style: solid; 32 | border-color: red; 33 | 34 | 35 | 36 | width: 100%; 37 | height: 23%; 38 | 39 | display: flex; 40 | 41 | } 42 | .content4_video{ 43 | border: 1px; 44 | border-style: solid; 45 | border-color: red; 46 | 47 | width: 100%; 48 | height: 100%; 49 | 50 | 51 | display: flex; 52 | justify-content: space-between; 53 | align-items: center; 54 | 55 | } 56 | 57 | .home4_content2_1{ 58 | border: 1px; 59 | border-style: solid; 60 | border-color: red; 61 | 62 | width: 80%; 63 | height: 100%; 64 | 65 | display: flex; 66 | flex-direction: column; 67 | justify-content: space-around; 68 | 69 | 70 | } 71 | .home4_content2_2{ 72 | border: 1px; 73 | border-style: solid; 74 | border-color: red; 75 | 76 | width: 20%; 77 | height: 100%; 78 | 79 | display: flex; 80 | flex-direction: column; 81 | justify-content: center; 82 | align-items: center; 83 | 84 | } 85 | .home4_content2_2_video_num{ 86 | width: 100%; 87 | height: 50%; 88 | 89 | display: flex; 90 | flex-direction: column; 91 | justify-content: center; 92 | align-items: center; 93 | } 94 | 95 | .home4_content2_2_button{ 96 | width: 100%; 97 | height: 50%; 98 | 99 | display: flex; 100 | flex-direction: column; 101 | justify-content: center; 102 | align-items: center; 103 | 104 | } 105 | 106 | .home4_video{ 107 | border: 1px; 108 | border-style: solid; 109 | border-color: red; 110 | 111 | width: 70%; 112 | height: 100%; 113 | 114 | display: flex; 115 | flex-direction : column; 116 | justify-content: center; 117 | align-items: center; 118 | 119 | } 120 | 121 | .home4_video_12{ 122 | border: 1px; 123 | border-style: solid; 124 | border-color: red; 125 | 126 | width: 100%; 127 | height: 50%; 128 | 129 | 130 | display: flex; 131 | justify-content: center; 132 | align-items: center; 133 | } 134 | 135 | .home4_video_34{ 136 | border: 1px; 137 | border-style: solid; 138 | border-color: red; 139 | 140 | width: 100%; 141 | height: 50%; 142 | 143 | display: flex; 144 | justify-content: center; 145 | align-items: center; 146 | } 147 | 148 | .home4_video_1{ 149 | border: 1px; 150 | border-style: solid; 151 | border-color: red; 152 | 153 | margin:5px; 154 | 155 | width: 45%; 156 | height : 95%; 157 | 158 | display: flex; 159 | justify-content: center; 160 | align-items: center; 161 | } 162 | 163 | .home4_video_2{ 164 | border: 1px; 165 | border-style: solid; 166 | border-color: red; 167 | 168 | margin:5px; 169 | 170 | width : 45%; 171 | height: 95%; 172 | 173 | display: flex; 174 | justify-content: center; 175 | align-items: center; 176 | } 177 | 178 | .home4_video_3{ 179 | border: 1px; 180 | border-style: solid; 181 | border-color: red; 182 | 183 | margin:5px; 184 | 185 | width : 45%; 186 | height: 95%; 187 | 188 | display: flex; 189 | justify-content: center; 190 | align-items: center; 191 | } 192 | .home4_video_4{ 193 | border: 1px; 194 | border-style: solid; 195 | border-color: red; 196 | 197 | margin:5px; 198 | 199 | width : 45%; 200 | height: 95%; 201 | 202 | display: flex; 203 | justify-content: center; 204 | align-items: center; 205 | } 206 | 207 | 208 | .home4_event{ 209 | border: 1px; 210 | border-style: solid; 211 | border-color: red; 212 | 213 | width: 30%; 214 | height: 100%; 215 | 216 | 217 | } 218 | 219 | .home4_log_event{ 220 | border: 1px; 221 | border-style: solid; 222 | border-color: red; 223 | 224 | width:100%; 225 | height: 90%; 226 | 227 | display: flex; 228 | justify-content: center; 229 | align-items: flex-end; 230 | } 231 | 232 | 233 | .home4_result_event{ 234 | border: 1px; 235 | border-style: solid; 236 | border-color: red; 237 | 238 | width:100%; 239 | height: 10%; 240 | 241 | 242 | display: flex; 243 | justify-content: center; 244 | } 245 | 246 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/image/backimage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/post/static/post/image/backimage.png -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/image/ch.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/post/static/post/image/ch.jpg -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/image/howtoc3d.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/post/static/post/image/howtoc3d.jpg -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/image/howtowork.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/post/static/post/image/howtowork.jpg -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/image/mj.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/post/static/post/image/mj.jpg -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/image/sb.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/post/static/post/image/sb.jpg -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/image/sy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/post/static/post/image/sy.jpg -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/made_by.css: -------------------------------------------------------------------------------- 1 | #team_intro1{ 2 | font-size: 15px; 3 | 4 | display: flex; 5 | justify-content: center; 6 | align-items: space-between; 7 | } 8 | #team_intro2{ 9 | font-size: 15px; 10 | 11 | display: flex; 12 | justify-content: center; 13 | align-items: space-between; 14 | } 15 | section{ 16 | margin-right : 5%; 17 | margin-left : 5%; 18 | text-align: center; 19 | } 20 | h1{ 21 | text-align: center; 22 | } 23 | 24 | .made_by_container{ 25 | height: 70%; 26 | 27 | display: flex; 28 | flex-direction: column; 29 | justify-content: center; 30 | align-items: center; 31 | } 32 | 33 | .team_name{ 34 | width: 100%; 35 | height: 10%; 36 | 37 | } 38 | 39 | .team_info{ 40 | width: 100%; 41 | height: 90%; 42 | 43 | display: flex; 44 | flex-direction: column; 45 | justify-content: center; 46 | 47 | 48 | } -------------------------------------------------------------------------------- /web/yonomAIproject/post/static/post/result.css: -------------------------------------------------------------------------------- 1 | 2 | .result_container{ 3 | border: 1px; 4 | border-style: solid; 5 | border-color: #7A8A8A; 6 | 7 | height: 70%; 8 | 9 | display: flex; 10 | flex-direction: column; 11 | justify-content: center; 12 | align-items: center; 13 | } 14 | 15 | .result_content{ 16 | border: 1px; 17 | border-style: solid; 18 | border-color: #7A8A8A; 19 | 20 | width: 100%; 21 | height: 90%; 22 | 23 | display: flex; 24 | /* justify-content: center; 25 | align-items: center; */ 26 | } 27 | 28 | .result_video_graph{ 29 | border: 1px; 30 | border-style: solid; 31 | border-color: #7A8A8A; 32 | 33 | width: 80%; 34 | height: 100%; 35 | 36 | display: flex; 37 | flex-direction:column; 38 | justify-content: center; 39 | align-items: center; 40 | } 41 | .result_video{ 42 | border: 1px; 43 | border-style: solid; 44 | border-color: #7A8A8A; 45 | 46 | width: 100%; 47 | height: 50%; 48 | 49 | display: flex; 50 | justify-content: center; 51 | align-items: center; 52 | 53 | } 54 | .result_score_graph{ 55 | border: 1px; 56 | border-style: solid; 57 | border-color: #7A8A8A; 58 | 59 | width: 100%; 60 | height: 50%; 61 | 62 | display: flex; 63 | justify-content: center; 64 | align-items: center; 65 | } 66 | .result_image{ 67 | width: 100%; 68 | height: 100%; 69 | 70 | } 71 | .result_score{ 72 | border: 1px; 73 | border-style: solid; 74 | border-color: #7A8A8A; 75 | 76 | width: 20%; 77 | height: 100%; 78 | 79 | display: flex; 80 | flex-direction: column; 81 | justify-content: space-around; 82 | /* align-items: center; */ 83 | 84 | 85 | } 86 | 87 | #score { 88 | font-size :12px; 89 | 90 | } 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | .result_video_title{ 103 | border: 1px; 104 | border-style: solid; 105 | border-color: #7A8A8A; 106 | 107 | width: 100%; 108 | height: 10%; 109 | 110 | display: flex; 111 | /* justify-content: center; */ 112 | /* align-items: center; */ 113 | } 114 | 115 | .result_title{ 116 | border: 1px; 117 | border-style: solid; 118 | border-color: #7A8A8A; 119 | 120 | width: 80%; 121 | height: 100%; 122 | 123 | display: flex; 124 | justify-content: center; 125 | align-items: center; 126 | } 127 | 128 | .result_event{ 129 | border: 1px; 130 | border-style: solid; 131 | border-color: #7A8A8A; 132 | 133 | width: 20%; 134 | height: 100%; 135 | 136 | display: flex; 137 | justify-content: center; 138 | align-items: center; 139 | } 140 | 141 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/about.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% load static %} 3 | {% block style %} 4 | 5 | {% endblock %} 6 | 7 | {% block content%} 8 |

[yOnOm CCTV Detection 동작 원리]

9 |
10 |
11 | c3d_원리 12 |

13 |
14 |
15 | C3D의 feature extraction 기능을 이용하고자 각 영상(mp4)에서
16프레임단위로 fc6-1데이터를 생성합니다. 16 |
만들어진 데이터를 평균 내고, 정규화 하여 각 영상의 C3D feature를 추출합니다.

17 |
18 | next 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 |
27 | {% endblock%} 28 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/about2.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% load static %} 3 | {% block style %} 4 | 5 | {% endblock %} 6 | 7 | {% block content%} 8 | 9 |
10 |
11 | 모델_원리 12 |

13 |
14 | 위 C3D와 segment 변환을 거쳐 영상을 전처리해 feature를 추출합니다.
15 | 추출된 (1,4096)의 feature를 모델에 넣어 각 bag instance의 이상행동 score를 정합니다.
16 |
17 | positive bag과 negative bag 안의 가장 높은 score를 가진 instance를 비교하여,
positive bag instance의 score가 더 크다면 올바른 판단,
18 | negative bag instance의 score가 더 크다면 틀린 판단이라고 정의합니다
이를 손실함수에 적용하고 반복적으로 학습하여 loss를 줄입니다.

19 | 위 모델은 이상행동의 기준이 주관적이기 때문에 어려웠던 기존의 분류 문제를
재귀적인 방법으로 해결하고 높은 수준의 성능을 보입니다. 20 |
21 |

22 |
23 | 38 | {% endblock%} 39 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/about3.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% load static %} 3 | {% block style %} 4 | {% endblock %} 5 | {% block content%} 6 |
7 |

참고 문헌 목록

8 |
9 | 10 | - Real-world Anomaly Detection in Surveillance Videos(2018) 11 |
12 | 13 | - https://github.com/dolongbien/HumanBehaviorBKU 14 |
15 | 16 | - https://github.com/WaqasSultani/AnomalyDetectionCVPR2018 17 |
18 |
19 |
20 |

데이터셋 출처

21 |
22 | 23 | - https://aihub.or.kr/aidata/139/tool 24 |
25 | 26 | - UCF-Crime Dataset 27 |
28 |
29 | 30 | 31 | 32 | 33 | before 34 | 35 | {% endblock%} -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/error.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block style %} 4 | {% endblock %} 5 | 6 | {% block content %} 7 |

서비스 준비중입니다!!

8 |
9 |
10 |

돌아가기ㅎㅎ

11 | {%endblock%} -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/home.html: -------------------------------------------------------------------------------- 1 | {% comment %} {% extends 'base.html' %} 2 | {% load static %} 3 | 4 | {% block style %} 5 | 6 | {% endblock %} 7 | 8 | 9 | 10 | {% block content %} 11 |
12 | 13 |
14 | {% csrf_token %} 15 |







16 |
17 | 18 | 19 | If you don't have an ID, please click non-member login 20 |
21 |
22 | 23 | 24 |
25 |
26 | 27 |
28 | 29 | 30 |
31 |
32 | {%endblock%} 33 | 34 | {% endcomment %} 35 | 36 | {% load static %} 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | yOnOm AI project 46 | 47 | 48 | 49 | 50 | 51 |
52 |
53 | 54 |
55 |
yOnOm
56 |
CCTV Abnormal Behavior Detection
57 | 58 |
59 | 60 | 61 |
62 | {% csrf_token %} 63 |
64 | 65 | 66 | {% comment %} {% endcomment %} 67 | 68 | {% comment %} If you don't have an ID, please click non-member login
{% endcomment %} 69 | 70 | 71 | {% comment %} 72 | {% endcomment %} 73 | {% comment %} 아이디/비밀번호 찾기
{% endcomment %} 74 | 이메일 / 비밀번호 찾기 75 |
76 | 77 | 78 |
79 |
80 | 81 |
82 |
83 | 84 | 85 | 86 | 87 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/home2.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% load static %} 3 | 4 | {% block style %} 5 | 6 | 7 | 8 | {% endblock %} 9 | 10 | {% block content %} 11 |
12 |
13 |
14 |
15 | 16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 | 24 |
25 |
26 | 27 |
28 | {% csrf_token %} 29 |
30 | 31 |
32 |
33 |
34 |
35 |
36 | 37 |
38 |
39 | {% endblock%} -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/home3.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% load static %} 3 | 4 | {% block style %} 5 | 6 | {% endblock %} 7 | 8 | {% block content %} 9 | 10 |
11 |
12 |
13 |
14 | 15 |
16 |
17 | 18 |
19 |
20 | 21 |
22 |
23 |
24 |
event log
25 |
event result
26 |
27 | 28 |
29 |
30 |
31 | {% csrf_token %} 32 | 33 |
34 | 35 | 36 | 37 |
38 | 39 |
40 |
41 |
42 |
43 |
44 | 45 |
46 |
47 | 48 | 49 | {% endblock%} -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/home4.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% load static %} 3 | 4 | {% block style %} 5 | 6 | {% endblock %} 7 | 8 | {% block content %} 9 |
10 |
11 |
12 |
13 |
14 | 15 |
16 |
17 | 18 |
19 |
20 | 21 |
22 |
23 | 24 |
25 |
26 | 27 |
28 |
29 |
30 |
31 |
event log
32 |
event result
33 |
34 | 35 |
36 | 37 |
38 |
39 | {% csrf_token %} 40 |
41 | 42 | 43 | 44 | 45 |
46 |
47 |
48 |
49 |
50 |
51 | 52 |
53 |
54 | 55 | {% endblock%} -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/inserted.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% load static %} 3 | 4 | {% block style %} 5 | 6 | 7 | 8 | {% endblock %} 9 | 10 | {% block content %} 11 |
12 |
13 |
14 |
15 | 16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 | 24 |
25 |
26 | 27 |
28 | {% csrf_token %} 29 |
30 |

{{video.title}}

31 |
32 |
33 |
34 |
35 |
36 | 37 |
38 |
39 | {% endblock%} -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/made_by.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% load static %} 3 | 4 | {% block style %} 5 | 6 | 7 | {% endblock %} 8 | 9 | {% block content %} 10 |
11 |
12 | {% comment %}

yOnOm Team Member {% endcomment %} 13 | 14 | 15 | 16 |

17 |
18 |
19 |
20 |
21 |
22 | 23 | 24 |
25 | 김 26 |

27 |
28 | 강민지
29 | email : 5622kmj@likelion.org
30 | 담당 : 데이터 전처리, 모델링 31 |
32 |

33 |
34 |
35 | 김 36 |

37 |
38 | 김채현
39 | email : 9808gus@likelion.org
40 | 담당 : 데이터 전처리, 모델링, 웹 구현 41 |
42 |

43 |
44 |
45 | 46 |
47 | 김 48 |

49 |
50 | 배성빈
51 | email : seongbin5962@likelion.org
52 | 담당 : 데이터 전처리, 모델링, 웹 구현
53 |
54 |

55 |
56 |
57 | 김 58 |

59 |
60 | 최서윤
61 | email : ab3492@likelion.org
62 | 담당 : 데이터 전처리, 모델링, 웹 구현 63 |
64 |

65 |
66 |
67 |
68 |
69 |
70 | {% endblock%} -------------------------------------------------------------------------------- /web/yonomAIproject/post/templates/post/result.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | {% load static %} 3 | 4 | {% block style %} 5 | 6 | 7 | 8 | {% endblock %} 9 | 10 | {% block content %} 11 |
12 |
13 |
14 |
15 | 16 |
17 |
18 | result_image 19 |
20 |
21 | 22 |
23 | {% for i in format_score_str %} 24 | {{i}} 25 | {% endfor %} 26 |
27 |
28 |
29 |
30 | {{video.title}} 31 |
32 |
33 | {{isnormal}} 34 |
35 |
36 |
37 | {% endblock%} -------------------------------------------------------------------------------- /web/yonomAIproject/post/tests.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | # Create your tests here. 4 | -------------------------------------------------------------------------------- /web/yonomAIproject/post/urls.py: -------------------------------------------------------------------------------- 1 | from django.urls import path 2 | from . import views 3 | 4 | app_name = "post" 5 | urlpatterns = [ 6 | path('', views.home, name='home'), 7 | path('about/', views.about, name='about'), 8 | path('made_by/', views.made_by, name='made_by'), 9 | path('video_content/', views.video_content, name='input_video'), 10 | path('home_number/', views.home_number, name='home_number'), 11 | path('main/', views.main, name='main'), 12 | path('error/', views.error, name='error'), 13 | path('insert_video/', views.insert_video, name='insert_video'), 14 | path('about2/', views.about2, name='about2'), 15 | path('about3/', views.about3, name='about3'), 16 | path('home_video/', views.home_video, name='home_video'), 17 | path('result/', views.result, name='result'), 18 | ] -------------------------------------------------------------------------------- /web/yonomAIproject/post/views.py: -------------------------------------------------------------------------------- 1 | from django.shortcuts import render, redirect 2 | from .models import video_content, mat 3 | from scipy.io import loadmat 4 | import matplotlib.pyplot as plt 5 | import io 6 | import urllib, base64 7 | import cv2 8 | import numpy as np 9 | import os 10 | import urllib, base64 11 | 12 | ################## 반복 실행 시키기 #################### 13 | # import time 14 | # import threading 15 | 16 | # def thread_run(): 17 | # print("test") 18 | 19 | # rel = 0 20 | # count = 5 21 | # while count: 22 | # threading.Timer(5+rel, thread_run).start() 23 | # rel = rel + 5 24 | # count = count - 1 25 | 26 | ####################################################### 27 | 28 | # import chart_studio.plotly as py 29 | # import plotly.graph_objects as go 30 | # from plotly.offline import plot 31 | # import plotly.graph_objs as go 32 | # import chart_studio.plotly as py 33 | # Create your views here. 34 | 35 | def home(request): 36 | video = video_content.objects.all() 37 | video.delete() 38 | # context = { 39 | # 'video' : video, 40 | # } 41 | # video = video_content.objects.all() 42 | # video.delete() 43 | 44 | return render(request, 'post/home.html') 45 | 46 | def about(request): 47 | return render(request, 'post/about.html') 48 | 49 | def made_by(request): 50 | return render(request, 'post/made_by.html') 51 | 52 | # def video_content(request): 53 | 54 | # if 'image' in request.FILES: 55 | # video = request.FILES['image'] 56 | # video = video_content(dummi_videos = video) 57 | # video.save() 58 | 59 | # return redirect('post:home') 60 | def insert_video(request): 61 | video_delete = video_content.objects.all() 62 | video_delete.delete() 63 | 64 | input_video = request.FILES['input_video_1'] 65 | str_input_video = str(input_video) 66 | str_input_video = str_input_video.split('.')[0] 67 | video = video_content(video = input_video, title=str_input_video) 68 | video.save() 69 | 70 | # print(video) 71 | script_mat = '_C' 72 | mat_name = str_input_video.split('.')[0] 73 | 74 | mat_check = mat.objects.all() 75 | 76 | 77 | 78 | # print(mat_score) 79 | video = video_content.objects.all() 80 | url = "../media/post/" 81 | video_path = url + str(input_video) 82 | print(video_path) 83 | 84 | context = {} 85 | context['video'] = video[0] 86 | 87 | 88 | 89 | return render(request, 'post/inserted.html', context) 90 | 91 | 92 | def home_number(request): 93 | if 'non_member_login' in request.POST.keys(): 94 | video = video_content.objects.all() 95 | video.delete() 96 | 97 | return render(request, 'post/home2.html') 98 | 99 | else: 100 | return render(request, 'post/error.html') 101 | 102 | def main(request): 103 | return render(request, 'post/home2.html') 104 | 105 | def error(request): 106 | 107 | return render(request, 'post/error.html') 108 | 109 | def about2(request): 110 | return render(request, 'post/about2.html') 111 | 112 | def about3(request): 113 | return render(request, 'post/about3.html') 114 | 115 | 116 | def home_video(request): 117 | video = video_content.objects.all() 118 | video.delete() 119 | 120 | 121 | return render(request, 'post/home2.html') 122 | 123 | 124 | def result(request): 125 | video = video_content.objects.all() 126 | 127 | isnormal = "normal" 128 | script = '_C' 129 | mat_name = video[0].title 130 | print(str(mat_name)) 131 | str_mat_name = str(mat_name) 132 | 133 | getmat = mat.objects.get(title = str_mat_name + ".mat") 134 | load_mat = loadmat(getmat.mat) 135 | mat_score = load_mat['predictions'] 136 | format_score = [] 137 | format_score_str = {} 138 | count = 1 139 | for i in mat_score: 140 | 141 | formatting = format(i[0], '.4f') 142 | float_formatting = float(formatting) 143 | count_to_dic = str(count) + "/32" 144 | if float_formatting > 0.4: 145 | format_score_str[count] = count_to_dic+ " " + formatting + " abnormal" 146 | else: 147 | pass 148 | 149 | format_score.append(float_formatting) 150 | count = count + 1 151 | 152 | # 12개 153 | 154 | if len(format_score_str.values()) > 3: 155 | isnormal = "abnormal" 156 | else: 157 | pass 158 | 159 | ###### plt ######## 160 | ##### matplot ##### 161 | # plt.plot(range(10)) 162 | # fig = plt.gcf() 163 | # buf = io.BytesIO() 164 | # fig.savefig(buf, format='png') 165 | # buf.seek(0) 166 | # string = base64.b64encode(buf.read()) 167 | # url = urllib.parse.quote(string) 168 | 169 | # context['data'] = url 170 | # data2 = np.linspace(1, len(data1), 32) 171 | # data3 = pd.DataFrame(data1, data2) 172 | 173 | # plt.plot(data3) 174 | # plt.savefig() 175 | 176 | plt.figure(figsize=(5.1,2.2)) 177 | plt.plot(np.linspace(1, 32, 32), format_score, c='r', label="abnormal_score") 178 | plt.plot(np.linspace(1, 32, 32), [0.4 for x in range(32)], 'b--') 179 | plt.yticks(np.linspace(0, 1, 11)) 180 | plt.xticks(np.linspace(0, 32, 9)) 181 | plt.legend() 182 | 183 | fig = plt.gcf() 184 | buf = io.BytesIO() 185 | fig.savefig(buf, format='png') 186 | buf.seek(0) 187 | string = base64.b64encode(buf.read()) 188 | uri = urllib.parse.quote(string) 189 | 190 | ##################### 191 | 192 | context = { 'video' : video[0], 193 | 'isnormal' : isnormal, 194 | 'format_score_str' : format_score_str.values(), 195 | 'data' : uri 196 | } 197 | return render(request, 'post/result.html', context) -------------------------------------------------------------------------------- /web/yonomAIproject/static/cover.css: -------------------------------------------------------------------------------- 1 | /* 2 | * Globals 3 | */ 4 | 5 | /* Links */ 6 | a, 7 | a:focus, 8 | a:hover { 9 | color: #fff; 10 | } 11 | 12 | /* Custom default button */ 13 | .btn-secondary, 14 | .btn-secondary:hover, 15 | .btn-secondary:focus { 16 | color: #333; 17 | text-shadow: none; /* Prevent inheritance from `body` */ 18 | background-color: #fff; 19 | border: .05rem solid #fff; 20 | } 21 | 22 | 23 | /* 24 | * Base structure 25 | */ 26 | 27 | html, 28 | body { 29 | height: 100%; 30 | background-color: #333; 31 | } 32 | 33 | body { 34 | display: -ms-flexbox; 35 | display: flex; 36 | color: #fff; 37 | text-shadow: 0 .05rem .1rem rgba(0, 0, 0, .5); 38 | box-shadow: inset 0 0 5rem rgba(0, 0, 0, .5); 39 | } 40 | 41 | .cover-container { 42 | max-width: 42em; 43 | 44 | } 45 | 46 | 47 | /* 48 | * Header 49 | */ 50 | .masthead { 51 | margin-bottom: 2rem; 52 | } 53 | 54 | .masthead-brand { 55 | margin-bottom: 0; 56 | } 57 | 58 | .nav-masthead .nav-link { 59 | padding: .25rem 0; 60 | font-weight: 700; 61 | color: rgba(255, 255, 255, .5); 62 | background-color: transparent; 63 | border-bottom: .25rem solid transparent; 64 | } 65 | 66 | .nav-masthead .nav-link:hover, 67 | .nav-masthead .nav-link:focus { 68 | border-bottom-color: rgba(255, 255, 255, 0.25); 69 | } 70 | 71 | .nav-masthead .nav-link + .nav-link { 72 | margin-left: 1rem; 73 | } 74 | 75 | .nav-masthead .active { 76 | color: #fff; 77 | border-bottom-color: #fff; 78 | } 79 | 80 | @media (min-width: 42em) { 81 | .masthead-brand { 82 | float: left; 83 | } 84 | .nav-masthead { 85 | float: right; 86 | } 87 | } 88 | 89 | 90 | /* 91 | * Cover 92 | */ 93 | /* .mystyle { 94 | border: 1px; 95 | border-style: solid; 96 | border-color: lightgray; 97 | 98 | display:flex; 99 | flex-direction: column; 100 | justify-content: center; 101 | align-items: center; 102 | 103 | height: 75%; 104 | } 105 | .block_videos_1_2{ 106 | /* border: 1px; 107 | border-style: solid; 108 | border-color: lightgray; */ 109 | 110 | 111 | /* width : 100%; 112 | height: 45%; 113 | 114 | display:flex; 115 | justify-content: center; 116 | align-items: center; 117 | 118 | } */ 119 | 120 | 121 | /* .block_event{ 122 | border: 1px; 123 | border-style: solid; 124 | border-color: lightgray; 125 | 126 | width : 100%; 127 | height: 20%; 128 | 129 | display:flex; 130 | flex-direction: column; 131 | justify-content: center; 132 | align-items: center; 133 | 134 | } 135 | 136 | 137 | .content_video{ 138 | border: 1px; 139 | border-style: solid; 140 | border-color: lightgray; 141 | 142 | width: 100%; 143 | height: 50%; 144 | } 145 | .result_event{ 146 | border: 1px; 147 | border-style: solid; 148 | border-color: lightgray; 149 | 150 | width: 100%; 151 | height: 50%; 152 | } */ 153 | /* .block_videos{ 154 | border: 1px; 155 | border-style: solid; 156 | border-color: lightgray; 157 | 158 | display:flex; 159 | justify-content: center; 160 | align-items: center; 161 | 162 | width:50%; 163 | height:100%; 164 | } 165 | 166 | .block_event{ 167 | border: 1px; 168 | border-style: solid; 169 | border-color: lightgray; 170 | 171 | display:flex; 172 | justify-content: center; 173 | align-items: center; 174 | 175 | width:50%; 176 | height:100%; 177 | } */ 178 | 179 | /* .cover { 180 | border: 1px; 181 | border-style: solid; 182 | border-color: lightgray; 183 | 184 | padding: 0 1.5rem; 185 | } 186 | .cover .btn-lg { 187 | 188 | 189 | padding: .75rem 1.25rem; 190 | font-weight: 700; 191 | } */ 192 | 193 | 194 | /* 195 | * Footer 196 | */ 197 | .mastfoot { 198 | color: rgba(255, 255, 255, .5); 199 | } 200 | -------------------------------------------------------------------------------- /web/yonomAIproject/templates/base.html: -------------------------------------------------------------------------------- 1 | {% load static %} 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | yOnOm AI project 11 | 12 | 13 | 14 | 15 | {% comment %} {% endcomment %} 16 | 17 | 33 | 34 | 35 | {% block style %} 36 | {% endblock %} 37 | 38 | 39 |
40 |
41 |
42 |

yOnOm

43 | 48 |
49 | {% comment %}
{% endcomment %} 50 | 51 | {%block content%} 52 | {%endblock%} 53 |
54 |
55 | {% comment %}

Cover template for Bootstrap, by @mdo.

{% endcomment %} 56 |
57 |
58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /web/yonomAIproject/yonomAIproject/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yOnOmproj/CCTV_anomaly_detection/1879e9bac9d845017e2f3645d6731d9d1d28c19c/web/yonomAIproject/yonomAIproject/__init__.py -------------------------------------------------------------------------------- /web/yonomAIproject/yonomAIproject/asgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | ASGI config for yonomAIproject project. 3 | 4 | It exposes the ASGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.asgi import get_asgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yonomAIproject.settings') 15 | 16 | application = get_asgi_application() 17 | -------------------------------------------------------------------------------- /web/yonomAIproject/yonomAIproject/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for yonomAIproject project. 3 | 4 | Generated by 'django-admin startproject' using Django 3.1.1. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.1/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/3.1/ref/settings/ 11 | """ 12 | 13 | from pathlib import Path 14 | import os 15 | 16 | # Build paths inside the project like this: BASE_DIR / 'subdir'. 17 | BASE_DIR = Path(__file__).resolve().parent.parent 18 | 19 | 20 | # Quick-start development settings - unsuitable for production 21 | # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ 22 | 23 | # SECURITY WARNING: keep the secret key used in production secret! 24 | SECRET_KEY = '9532in-grz7ff-aju+xfq6babp&)j9b1g^xgnozrqesrlk$h*^' 25 | 26 | # SECURITY WARNING: don't run with debug turned on in production! 27 | DEBUG = True 28 | 29 | ALLOWED_HOSTS = [] 30 | 31 | 32 | # Application definition 33 | 34 | INSTALLED_APPS = [ 35 | 'django.contrib.admin', 36 | 'django.contrib.auth', 37 | 'django.contrib.contenttypes', 38 | 'django.contrib.sessions', 39 | 'django.contrib.messages', 40 | 'django.contrib.staticfiles', 41 | 'post' 42 | ] 43 | 44 | MIDDLEWARE = [ 45 | 'django.middleware.security.SecurityMiddleware', 46 | 'django.contrib.sessions.middleware.SessionMiddleware', 47 | 'django.middleware.common.CommonMiddleware', 48 | 'django.middleware.csrf.CsrfViewMiddleware', 49 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 50 | 'django.contrib.messages.middleware.MessageMiddleware', 51 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 52 | ] 53 | 54 | ROOT_URLCONF = 'yonomAIproject.urls' 55 | 56 | TEMPLATES = [ 57 | { 58 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 59 | 'DIRS': [os.path.join(BASE_DIR, 'templates')], 60 | 'APP_DIRS': True, 61 | 'OPTIONS': { 62 | 'context_processors': [ 63 | 'django.template.context_processors.debug', 64 | 'django.template.context_processors.request', 65 | 'django.contrib.auth.context_processors.auth', 66 | 'django.contrib.messages.context_processors.messages', 67 | ], 68 | }, 69 | }, 70 | ] 71 | 72 | WSGI_APPLICATION = 'yonomAIproject.wsgi.application' 73 | 74 | 75 | # Database 76 | # https://docs.djangoproject.com/en/3.1/ref/settings/#databases 77 | 78 | DATABASES = { 79 | 'default': { 80 | 'ENGINE': 'django.db.backends.sqlite3', 81 | 'NAME': BASE_DIR / 'db.sqlite3', 82 | } 83 | } 84 | 85 | 86 | # Password validation 87 | # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators 88 | 89 | AUTH_PASSWORD_VALIDATORS = [ 90 | { 91 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 92 | }, 93 | { 94 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 95 | }, 96 | { 97 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 98 | }, 99 | { 100 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 101 | }, 102 | ] 103 | 104 | 105 | # Internationalization 106 | # https://docs.djangoproject.com/en/3.1/topics/i18n/ 107 | 108 | LANGUAGE_CODE = 'en-us' 109 | 110 | TIME_ZONE = 'Asia/Seoul' 111 | 112 | USE_I18N = True 113 | 114 | USE_L10N = True 115 | 116 | USE_TZ = True 117 | 118 | 119 | # Static files (CSS, JavaScript, Images) 120 | # https://docs.djangoproject.com/en/3.1/howto/static-files/ 121 | 122 | STATIC_URL = '/static/' 123 | 124 | STATICFILES_DIRS = [ 125 | os.path.join(BASE_DIR, "static"), 126 | ] 127 | 128 | MEDIA_ROOT = os.path.join(BASE_DIR, 'media') 129 | MEDIA_URL = '/media/' -------------------------------------------------------------------------------- /web/yonomAIproject/yonomAIproject/urls.py: -------------------------------------------------------------------------------- 1 | """yonomAIproject URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/3.1/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | from django.contrib import admin 17 | from django.urls import include, path 18 | from post import views 19 | from django.conf import settings 20 | from django.conf.urls.static import static 21 | 22 | urlpatterns = [ 23 | path('admin/', admin.site.urls), 24 | path('', include('post.urls')), 25 | path('about/', views.about, name='about'), 26 | 27 | ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) 28 | 29 | -------------------------------------------------------------------------------- /web/yonomAIproject/yonomAIproject/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for yonomAIproject project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yonomAIproject.settings') 15 | 16 | application = get_wsgi_application() 17 | --------------------------------------------------------------------------------