├── Test_Phase ├── readme.md ├── TEST_PHASE_Functions.py ├── TEST_PHASE.py └── test_performance.prototxt ├── Installation ├── images │ ├── readme.md │ └── Select Target Platform.png └── readme.md ├── Create LMDB ├── create LMDB using numpy │ ├── readme.md │ └── CreateLMDB.py └── readme.md └── README.md /Test_Phase/readme.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Installation/images/readme.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Create LMDB/create LMDB using numpy/readme.md: -------------------------------------------------------------------------------- 1 | Python file to create LMDB from numpy. 2 | -------------------------------------------------------------------------------- /Installation/images/Select Target Platform.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/astorfi/Caffe_Deep_Learning/HEAD/Installation/images/Select Target Platform.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Caffe_Deep_Learning 2 | [Caffe](http://caffe.berkeleyvision.org/) is a deep learning framework developed by the Berkeley Vision and Learning Center([BVLC](http://bair.berkeley.edu/)). 3 | This repository is dedicated to how to work with Caffe from installation to implementing archtitecctures. 4 | 5 | You are more than welcome to contribute to this repository. If you'd like to contribute to Caffe_Deep_Learning, be sure to create a side branch first because pull request to master branch is locked. 6 | 7 | ## Caffe Installation 8 | 9 | As lots of the users may say `Caffe installation` is a challenging task. Please refer to [this installation](https://github.com/astorfi/Caffe_Deep_Learning/tree/master/Installation) for following a step by step process. The installation is subjected to CUDA 8.0 and NVIDIA TITAN X(Pwered by Pascal) GPUs. 10 | -------------------------------------------------------------------------------- /Create LMDB/readme.md: -------------------------------------------------------------------------------- 1 | # Create LMDB 2 | 3 | This part is to provide instructions of how to create LMDB file dormat which is supported by the Caffe and to the best of our knowledge 4 | it is the most compatible file format with the Caffe and provides fastest processing among all other type, i.e., HDF5 and etc. 5 | 6 | The are two general ways to cerate LMDB file. One is to use [numpy](http://www.numpy.org/) file format as the imput and python for creating the LMDB. The alternative and faster way is to use shell file and raw images(with .png or .jpg format) as the input. 7 | 8 | ## Using Numpy file format 9 | 10 | For creating file format from numpy files it is clear that all the files whether of image raw pixels or feature or basically what ever elements that should be stored in LMDB file, must have numpy(.npy) format. The python file for creating LMDB file from numpy is provided here. 11 | -------------------------------------------------------------------------------- /Test_Phase/TEST_PHASE_Functions.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import sys 4 | import multiprocessing 5 | from sklearn import preprocessing 6 | import lmdb 7 | import numpy as np 8 | import caffe 9 | import cv2 10 | from caffe.proto import caffe_pb2 11 | from sklearn import metrics 12 | import scipy.io as sio 13 | import matplotlib.pyplot as plt 14 | import random 15 | 16 | def K_Fold_Validation(label, distance, k = 5): 17 | 18 | # Flipping the distance to get the dissimilarity 19 | similarity_output = - distance 20 | 21 | """ 22 | K-Fold validation on data 23 | """ 24 | 25 | # Get the fold lenth 26 | fold_lenth = int(len(label) / k) 27 | EER = np.zeros((k, 1)) 28 | AUC = np.zeros((k, 1)) 29 | 30 | EER_stat = [] 31 | AUC_stat = [] 32 | 33 | # Looping over all folds 34 | for itr in range(k): 35 | # Calculating the ROC curve 36 | fpr, tpr, thresholds = metrics.roc_curve(label[itr*fold_lenth:(itr+1)*fold_lenth], similarity_output[itr*fold_lenth:(itr+1)*fold_lenth], pos_label=1) 37 | 38 | # Calculating EER 39 | intersect_x = fpr[np.abs(fpr - (1 - tpr)).argmin(0)] 40 | EER[itr, 0] = intersect_x 41 | 42 | # AUC(area under the curve) calculation 43 | AUC[itr, 0] = np.trapz(tpr, fpr) 44 | 45 | EER_stat.append(np.mean(EER,axis=0)) 46 | EER_stat.append(np.std(EER, axis=0)) 47 | 48 | AUC_stat.append(np.mean(AUC, axis=0)) 49 | AUC_stat.append(np.std(AUC, axis=0)) 50 | 51 | return EER_stat, AUC_stat 52 | 53 | def Plot_HIST_Fn(label, distance, choice_phase, save_name): 54 | 55 | # Plot histogram of output 56 | gen_dissimilarity = [] 57 | imp_dissimilarity = [] 58 | for i in range(len(label)): 59 | if label[i] == 1: 60 | gen_dissimilarity.append(distance[i][0]) 61 | else: 62 | imp_dissimilarity.append(distance[i][0]) 63 | 64 | bins = np.linspace(0, np.amax(distance), 50) 65 | fig = plt.figure() 66 | plt.hist(gen_dissimilarity, bins, alpha=0.5, facecolor='blue', normed=False, label='gen_dist') 67 | plt.hist(imp_dissimilarity, bins, alpha=0.5, facecolor='red', normed=False, label='imp_dist') 68 | plt.legend(loc='upper right') 69 | plt.show() 70 | fig.savefig(choice_phase + '_' + save_name) 71 | 72 | 73 | def Plot_ROC_Fn(label, distance, choice_phase, save_name): 74 | 75 | similarity_output = - distance 76 | 77 | # Calculating the ROC curve for the whole data. 78 | fpr, tpr, thresholds = metrics.roc_curve(label, similarity_output, pos_label=1) 79 | 80 | # for itr in range(k): 81 | 82 | # Calculating EER 83 | intersect_x = fpr[np.abs(fpr - (1 - tpr)).argmin(0)] 84 | EER = intersect_x 85 | # print("EER = ", float(("{0:.%ie}" % 1).format(intersect_x))) 86 | 87 | # AUC(area under the curve) calculation 88 | AUC = np.trapz(tpr, fpr) 89 | # print("AUC = ", float(("{0:.%ie}" % 1).format(AUC))) 90 | 91 | # Save .mat files 92 | sio.savemat('roc_data/fpr.mat', {'fpr': fpr}) 93 | sio.savemat('roc_data/tpr.mat', {'tpr': tpr}) 94 | sio.savemat('roc_data/label.mat', {'label': label}) 95 | sio.savemat('roc_data/distance.mat', {'distance': distance}) 96 | 97 | # Plot the ROC 98 | fig = plt.figure() 99 | ax = fig.gca() 100 | lines = plt.plot(fpr, tpr, label='ROC Curve') 101 | plt.setp(lines, linewidth=3, color='r') 102 | ax.set_xticks(np.arange(0, 1, 0.1)) 103 | ax.set_yticks(np.arange(0, 1, 0.1)) 104 | plt.title('ROC Curve') 105 | plt.xlabel('False Positive Rate') 106 | plt.ylabel('True Positive Rate') 107 | 108 | # Cutting the floating number 109 | AUC = '%.2f' % AUC 110 | EER = '%.2f' % EER 111 | 112 | # Setting text to plot 113 | plt.text(0.5, 0.5, 'AUC = ' + str(AUC), fontdict=None) 114 | plt.text(0.5, 0.4, 'EER = ' + str(EER), fontdict=None) 115 | plt.grid() 116 | plt.show() 117 | fig.savefig(choice_phase + '_' + save_name) 118 | 119 | 120 | -------------------------------------------------------------------------------- /Test_Phase/TEST_PHASE.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import sys 4 | import multiprocessing 5 | from sklearn import preprocessing 6 | import lmdb 7 | import numpy as np 8 | import caffe 9 | import cv2 10 | from caffe.proto import caffe_pb2 11 | from sklearn import metrics 12 | import scipy.io as sio 13 | import matplotlib.pyplot as plt 14 | import random 15 | from PyQt4.QtGui import * 16 | from PySide import QtGui, QtCore 17 | from easygui import * 18 | from TEST_PHASE_Functions import * 19 | 20 | """ 21 | GUI PART 22 | """ 23 | 24 | 25 | class MyButtons(QtGui.QDialog): 26 | """""" 27 | 28 | def __init__(self, choices, title): 29 | # Initialized and super call. 30 | super(MyButtons, self).__init__() 31 | self.initUI(choices, title) 32 | self.choice = choices 33 | 34 | def initUI(self, choices, title): 35 | option1Button = QtGui.QPushButton(choices[0]) 36 | option1Button.clicked.connect(self.onOption1) 37 | option2Button = QtGui.QPushButton(choices[1]) 38 | option2Button.clicked.connect(self.onOption2) 39 | option3Button = QtGui.QPushButton(choices[2]) 40 | option3Button.clicked.connect(self.onOption3) 41 | option4Button = QtGui.QPushButton(choices[3]) 42 | option4Button.clicked.connect(self.onOption4) 43 | 44 | buttonBox = QtGui.QDialogButtonBox() 45 | buttonBox = QtGui.QDialogButtonBox(QtCore.Qt.Horizontal) 46 | buttonBox.addButton(option1Button, QtGui.QDialogButtonBox.ActionRole) 47 | buttonBox.addButton(option2Button, QtGui.QDialogButtonBox.ActionRole) 48 | buttonBox.addButton(option3Button, QtGui.QDialogButtonBox.ActionRole) 49 | buttonBox.addButton(option4Button, QtGui.QDialogButtonBox.ActionRole) 50 | # 51 | mainLayout = QtGui.QVBoxLayout() 52 | mainLayout.addWidget(buttonBox) 53 | 54 | self.setLayout(mainLayout) 55 | # define window xLoc,yLoc,xDim,yDim 56 | self.setGeometry(250, 250, 100, 100) 57 | self.setWindowTitle(title) 58 | self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) 59 | 60 | def onOption1(self): 61 | self.retStatus = 1 62 | self.close() 63 | self.choice = self.choice[0] 64 | 65 | def onOption2(self): 66 | self.retStatus = 2 67 | self.close() 68 | self.choice = self.choice[1] 69 | 70 | def onOption3(self): 71 | self.retStatus = 3 72 | self.close() 73 | self.choice = self.choice[2] 74 | 75 | def onOption4(self): 76 | self.retStatus = 4 77 | self.close() 78 | self.choice = self.choice[3] 79 | 80 | """ 81 | GUI for training or testing phase. 82 | """ 83 | app = QtGui.QApplication(sys.argv) 84 | user_options = ['TRAIN', 'TEST', 'Cancel', 'Continue'] 85 | task_title = 'You want to see the results for training or testing?!' 86 | form = MyButtons(choices=user_options, title=task_title) 87 | form.exec_() 88 | choice_phase = form.choice 89 | 90 | if choice_phase == "TRAIN": 91 | LMDB_FILE_NAME = 'PATH/to?LMDB/train' 92 | elif choice_phase == "TEST": 93 | LMDB_FILE_NAME = 'PATH/to?LMDB/test' 94 | # If user canceled the operation. 95 | elif choice_phase == 'Cancel': 96 | sys.exit("Canceled by the user") 97 | 98 | # Forward passing mean if you want to feed the data to network and see the output of any layer! 99 | user_options = ['YES', 'NO', 'Cancel', 'Continue'] 100 | task_title = 'Do want to perform the forward passing to network??!' 101 | form = MyButtons(choices=user_options, title=task_title) 102 | form.exec_() 103 | choice_passing = form.choice 104 | 105 | if choice_passing == "YES": 106 | choice_netwrok = 'forward_passing' 107 | elif choice_passing == "NO": 108 | choice_netwrok = 'input_visualization' 109 | # If user canceled the operation. 110 | elif choice_passing == 'Cancel': 111 | sys.exit("Canceled by the user") 112 | 113 | 114 | # Forward passing mean if you want to feed the data to network and see the output of any layer! 115 | user_options = ['YES', 'NO', 'Cancel', 'Continue'] 116 | task_title = 'Do want to Visualize the ROC curve and histogram??!' 117 | form = MyButtons(choices=user_options, title=task_title) 118 | form.exec_() 119 | choice_visualize = form.choice 120 | 121 | """ 122 | Calling the structure 123 | """ 124 | np.set_printoptions(threshold=np.nan) 125 | caffe.set_device(1) 126 | caffe.set_mode_gpu() 127 | 128 | # Necessary file calling 129 | MODEL_FILE = 'test_performance.prototxt' 130 | PRETRAINED_MODEL = 'Path/to/CAFFEMODEL' 131 | 132 | 133 | # Activating the fowrad pass towards the network. 134 | if choice_netwrok == 'forward_passing': 135 | # Creating net 136 | net = caffe.Net(MODEL_FILE, PRETRAINED_MODEL, caffe.TEST) 137 | 138 | # Opening LMDB file 139 | lmdb_env = lmdb.open(LMDB_FILE_NAME) 140 | lmdb_txn = lmdb_env.begin() 141 | lmdb_cursor = lmdb_txn.cursor() 142 | datum = caffe_pb2.Datum() 143 | 144 | # Calculate number of files. 145 | num_img = 0 146 | for key, value in lmdb_cursor: 147 | num_img += 1 148 | label = [] 149 | distance = np.zeros((num_img, 1)) 150 | loss = np.zeros((num_img, 1)) 151 | similarity = np.zeros((num_img, 1)) 152 | print "Number of pairs: %d ", num_img 153 | 154 | # Going through all files in LMDB file 155 | original_features = [] 156 | counter = 0 157 | for key, value in lmdb_cursor: 158 | datum.ParseFromString(value) 159 | 160 | # Getting the label and data 161 | label.append(int(datum.label)) 162 | pair_features = caffe.io.datum_to_array(datum) 163 | original_features.append(pair_features) 164 | 165 | if choice_netwrok == 'forward_passing': 166 | 167 | # Feeding the data. 168 | net.blobs['data'].data[...] = pair_features 169 | 170 | # Fowrard passing 171 | out = net.forward() 172 | loss[counter] = out['loss'] # Calculated by the caffe: (np.sum(np.square(out_1 - out_2))) / 2 173 | 174 | out_1 = net.blobs['fc7'].data 175 | out_2 = net.blobs['fc7_p'].data 176 | 177 | # # Calculate output distance metric Manually(Warning!! This is the same implementation by the caffe for batch_size = 1) 178 | distance[counter] = np.sqrt((np.sum(np.square(out_1 - out_2)))) 179 | 180 | 181 | counter = counter + 1 182 | if counter % 100 == 0: 183 | print("The %d - th data passed!" % counter) 184 | 185 | 186 | """ 187 | PART1: Output of the Network. 188 | """ 189 | 190 | if choice_netwrok == 'forward_passing': 191 | 192 | # TODO: K-Flod validation. 193 | EER_VALIDATION, AUC_VALIDATION = K_Fold_Validation(label, distance, k=5) 194 | 195 | if choice_visualize == 'YES': 196 | 197 | # Plot histogram. 198 | Plot_HIST_Fn(label, distance, choice_phase, 'Siamese_Output_Histogram.jpg') 199 | 200 | # Plot ROC 201 | Plot_ROC_Fn(label, distance, choice_phase, 'Siamese_Output_ROC.jpg') 202 | 203 | """ 204 | PART2: Plotting the histogram of original features and the ROC curve 205 | """ 206 | 207 | ## Calculation of the original input distances. 208 | distance_original = np.zeros([len(original_features), 1]) 209 | for i in range(len(original_features)): 210 | # Euclidean loss 211 | distance_original[i] = np.sqrt(np.sum(np.square(original_features[i][:, :, 0] - original_features[i][:, :, 1]))) # L2 - norm 212 | 213 | if choice_visualize == 'YES': 214 | 215 | # Plot input histogram 216 | Plot_HIST_Fn(label, distance_original, choice_phase, 'OriginalFeatures_Histogram.jpg') 217 | 218 | # Plot input ROC 219 | Plot_ROC_Fn(label, distance_original, choice_phase, 'Siamese_Input_ROC.jpg') 220 | -------------------------------------------------------------------------------- /Installation/readme.md: -------------------------------------------------------------------------------- 1 | # Caffe Installation In Ubuntu with GPU support 2 | 3 | The step by step processes of caffe installation in `Ubuntu(14.04)` is provided here. 4 | 5 | ### Hardware & cuda version 6 | The following installation has been implemented and successfully tested on [CUDA 8.0](http://on-demand.gputechconf.com/gtc/2016/webinar/cuda-8-features-overview.pdf) and [NVIDIA TITAN X(Pwered by Pascal) GPU](http://www.geforce.com/hardware/10series/titan-x-pascal). However the method can simply be used for older version of `CUDA` and older `GPU architectures`. The assumption is that `CUDA 8.0` is already installed. However the complete `CUDA` installation guide is provide [here](https://github.com/astorfi/CUDA-Installation). 7 | 8 | ## Caffe Installation Using Python 9 | Caffe has different dependencies which are required by its structure. In the following subsections an abstract list of these dependencies and the commands for installing them are provided. Depending on the available installed packages on the system, more or less dependencies might be required. 10 | 11 | The Caffe installation in this documentation uses the build-in python of the 'Ubuntu-Trusty(14.04)'. However 'Anaconda-based' installation can be performed but it is not required as for the moment it has more incompatibilies and may make the installation more complicated. 12 | 13 | **WARNING:** make sure the `Python` recognized by the system is the `default built-in Python` by the ubuntu and *Anaconda does not own the path* for python. You can check that with the following command which returns the root pf python: 14 | 15 | ``` 16 | which python 17 | ``` 18 | Basically you need to check that the Anaconda is not installed or the `default Python` does not belongs to the `Anaconda path`. With this check-up you can make sure that the `Caffe` installation does not point to the wrong path. This step is crucial for using `Python interface` of the `Caffe`. 19 | 20 | ### Installing git, BLAS and unzip 21 | `BLAS` can be used as the backend of matrix and vector computations of Caffe. There are different implementations of this library. [OpenBLAS](http://www.openblas.net/) has been chosed. 22 | ``` 23 | sudo apt-get install libopenblas-dev git unzip 24 | ``` 25 | Alternatively you can refer to [OpenBLAS repository](https://github.com/xianyi/OpenBLAS). 26 | 27 | ### Install OpenCV 28 | The [OpenCV](https://help.ubuntu.com/community/OpenCV) is the well-known open-source computer vision library. 29 | refer to [This repository](https://github.com/astorfi/Install-OpenCV) for OpenCV installation. 30 | 31 | ### Install other dependecies(Boost,...) 32 | ``` 33 | sudo apt-get update 34 | sudo apt-get install python-skimage python-opencv 35 | sudo apt-get install libboost-all-dev 36 | sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libboost-all-dev libhdf5-serial-dev 37 | sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev protobuf-compiler 38 | sudo apt-get install libatlas-base-dev 39 | ``` 40 | 41 | ### Install protobuf 42 | 43 | For protobuf installation, simple `pip installation` is recommended. 44 | ``` 45 | sudo pip install protobuf 46 | ``` 47 | 48 | You may need to install `pip` before installation of the `protobuf`. 49 | 50 | ### Clone and Install Caffe from Source 51 | In this phase, the Caffe repository must be cloned and install: 52 | 53 | ``` 54 | git clone https://github.com/BVLC/caffe 55 | ``` 56 | 57 | After going to code directory, a copy of `Makefile.config.example` file under the new name of `Makefile.config` must be make to be modified if necessary. 58 | ``` 59 | cd caffe 60 | cp Makefile.config.example Makefile.config 61 | ``` 62 | For making any modification, the `Makefile.config` must be edited. Here's are few possible modifications: 63 | 64 | * The CuDNN can be activated using the assigned flag. 65 | * Instead of Python, Anaconda can be used by changing the associated paths. 66 | * The default is using GPU but if the "CPU_ONLY := 1" is activated, then there is no GPU support! 67 | 68 | In the end we can compile and make all the test files: 69 | ``` 70 | make all 71 | make test 72 | ``` 73 | It is worth mentioning that `-jX` command can be added to the above commands to increase the speed of process. `X` is the 74 | number of supported CPU cores. 75 | 76 | ### Installing Pycaffe 77 | For having a python interface for the caffe use the following: 78 | ``` 79 | cd python 80 | for req in $(cat requirements.txt); do sudo pip install $req; done 81 | cd .. 82 | make pycaffe 83 | ``` 84 | In the above terminal commands the assumption is that we are in the `$CAFFE_ROOT`. The `sudo` part is to overcome the 85 | `permission denied` issue while installing dependencies. However adding `sudo` has not been mentioned as part of the 86 | documentation provided by the [official Caffe installation](http://caffe.berkeleyvision.org/installation.html#prerequisites), it demonstrated incompatibility by ignoring `sudo`. 87 | 88 | WARNING: The above requirement must be installed in the default python which is in the root otherwise the cannot be recognized by the `pycaffe`. However using the command `sudo apt-get install python-skimage` probably immune the installation from its last part which is installing the dependencies defined in the `requirements.txt` file. 89 | 90 | In the end we can run all the tests: 91 | ``` 92 | make runtest 93 | ``` 94 | 95 | ### Alternatinve Method 96 | 97 | Another way for considering the procedure of the installation is to to the following: 98 | ``` 99 | cd python 100 | for req in $(cat requirements.txt); do sudo pip install $req; done 101 | cd .. 102 | ``` 103 | 104 | Then make all the necessary elements: 105 | ``` 106 | make all 107 | make pycaffe 108 | make test 109 | make runtest 110 | ``` 111 | 112 | Now add the following to the `source bash file`: 113 | ``` 114 | export CAFFE_ROOT=/path/to/caffe (ex: /home/username/caffe) 115 | export PYTHONPATH=$CAFFE_ROOT/python:$PYTHONPATH 116 | ``` 117 | 118 | ### Installation check 119 | 120 | By using the following command check if the `Caffe` is already installed and can be loaded by `Pycaffe`: 121 | ``` 122 | python 123 | import caffe 124 | ``` 125 | 126 | **CAVEAT:** If you cannot import `Caffe` that does not mean `Caffe` is not installed! Passing all tests in the previous phase 127 | guaranties the success of installation `Caffe`. However not being able to import caffe after running python is related to the pycaffe interface. 128 | 129 | ### Reported Issue 130 | Then it might be necessary to copy appropriete files be copied in order to prevent [this issue](https://github.com/BVLC/caffe/issues/1463). 131 | ``` 132 | sudo cp libhdf5_hl.so.7 libhdf5_hl.so.8 133 | sudo cp libhdf5.so.7 libhdf5.so.8 134 | ``` 135 | 136 | 137 | ## Caffe Installation Using Anaconda 138 | 139 | At this moment the assumption is that the user wants to install `Anaconda` and use the `Caffe`. So after `Python Installation Procedure`, few modifications must be done. 140 | 141 | ### Download 142 | 143 | Anaconda must be downloaded from its [website](https://www.continuum.io/downloads). `Anaconda Python 2.7` is recommended. 144 | 145 | ### Editing the bash file 146 | 147 | The following two command should be added to the end of `source bash file`. However based on the experiments the necessity of the second one has not been proven! 148 | ``` 149 | export PATH="/home/username/anaconda/bin:$PATH" 150 | export LD_LIBRARY_PATH=/home/username/anaconda/lib:$LD_LIBRARY_PATH 151 | 152 | ``` 153 | 154 | ### Installing protobuf 155 | The protobuf should be installed this time using `conda`: 156 | ``` 157 | conda install protobuf 158 | ``` 159 | 160 | ## Using Pycharm or other IDEs 161 | 162 | In order to use the IDE and importing Caffe, The IDE(ex: Pycharm) *must be run from the terminal*. 163 | 164 | 165 | -------------------------------------------------------------------------------- /Create LMDB/create LMDB using numpy/CreateLMDB.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import glob 4 | import sys 5 | import multiprocessing 6 | import random 7 | import dircache 8 | import lmdb 9 | import numpy as np 10 | import caffe 11 | from caffe.proto import caffe_pb2 12 | from PyQt4.QtGui import * 13 | from PySide import QtGui, QtCore 14 | import cv2 15 | 16 | """ 17 | This file creates an LMDB file from numpy files. The application is face varification in which the 18 | LMDB file will be created from pairs of images. The genuine pairs get label "1" and the imposter 19 | pairs get label "0". The genuine files contains "gen" in their file name. 20 | 21 | For this process, the numpy files are features 22 | but they basically can be anything saved in numpy files. 23 | 24 | This file do the following: 25 | 1 - load all the ".npy" data from a folder and create a big numpy array 26 | * The numpy files initially have the format of (Width, height, num_channels, 2) and 2 is due to having pairs. 27 | Change the dimensions along this code as required by your specific file shape. 28 | 2 - Create an LMDB file from that numpy array 29 | """ 30 | 31 | """ 32 | GUI Class definition 33 | """ 34 | 35 | 36 | class MyButtons(QtGui.QDialog): 37 | """""" 38 | 39 | def __init__(self, choices, title): 40 | # Initialized and super call. 41 | super(MyButtons, self).__init__() 42 | self.initUI(choices, title) 43 | self.choice = choices 44 | 45 | def initUI(self, choices, title): 46 | option1Button = QtGui.QPushButton(choices[0]) 47 | option1Button.clicked.connect(self.onOption1) 48 | option2Button = QtGui.QPushButton(choices[1]) 49 | option2Button.clicked.connect(self.onOption2) 50 | option3Button = QtGui.QPushButton(choices[2]) 51 | option3Button.clicked.connect(self.onOption3) 52 | option4Button = QtGui.QPushButton(choices[3]) 53 | option4Button.clicked.connect(self.onOption4) 54 | 55 | buttonBox = QtGui.QDialogButtonBox() 56 | buttonBox = QtGui.QDialogButtonBox(QtCore.Qt.Horizontal) 57 | buttonBox.addButton(option1Button, QtGui.QDialogButtonBox.ActionRole) 58 | buttonBox.addButton(option2Button, QtGui.QDialogButtonBox.ActionRole) 59 | buttonBox.addButton(option3Button, QtGui.QDialogButtonBox.ActionRole) 60 | buttonBox.addButton(option4Button, QtGui.QDialogButtonBox.ActionRole) 61 | # 62 | mainLayout = QtGui.QVBoxLayout() 63 | mainLayout.addWidget(buttonBox) 64 | 65 | self.setLayout(mainLayout) 66 | # define window xLoc,yLoc,xDim,yDim 67 | self.setGeometry(250, 250, 100, 100) 68 | self.setWindowTitle(title) 69 | self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) 70 | 71 | def onOption1(self): 72 | self.retStatus = 1 73 | self.close() 74 | self.choice = self.choice[0] 75 | 76 | def onOption2(self): 77 | self.retStatus = 2 78 | self.close() 79 | self.choice = self.choice[1] 80 | 81 | def onOption3(self): 82 | self.retStatus = 3 83 | self.close() 84 | self.choice = self.choice[2] 85 | 86 | def onOption4(self): 87 | self.retStatus = 4 88 | self.close() 89 | self.choice = self.choice[3] 90 | 91 | 92 | """ 93 | GUI for training or testing phase. 94 | """ 95 | app = QtGui.QApplication(sys.argv) 96 | user_options = ['TRAIN', 'TEST', 'Cancel', 'Continue'] 97 | task_title = 'Are you intended to create testing or training pairs?!' 98 | form = MyButtons(choices=user_options, title=task_title) 99 | form.exec_() 100 | choice_phase = form.choice 101 | 102 | # If user canceled the operation. 103 | if choice_phase == 'Cancel': 104 | sys.exit("Canceled by the user") 105 | 106 | """ 107 | GUI for getting the type of features. 108 | """ 109 | user_options = ['Image', 'HOG', 'LBP', 'none'] 110 | task_title = 'From which kind of features you want to create pairs?!' 111 | form = MyButtons(choices=user_options, title=task_title) 112 | form.exec_() 113 | choice_feature = form.choice 114 | 115 | """ 116 | GUI for getting the type of features. 117 | """ 118 | user_options = ['Yes', 'No', 'DC', 'none'] 119 | task_title = 'mean subtraction by channel?!' 120 | form = MyButtons(choices=user_options, title=task_title) 121 | form.exec_() 122 | choice_mean = form.choice 123 | 124 | # Source and destination paths(both with absolute path). 125 | this_path = os.path.dirname(os.path.abspath(__file__)) 126 | # This path definition is specific. Change it as needed. 127 | src_folder_path = 'Path/to/root/source' + choice_feature + '/' + choice_phase 128 | dst_folder_path = choice_feature + '_' + choice_phase + '_' + 'train-2010to2013-unique' 129 | 130 | # # Getting the number of cores for parallel processing 131 | # num_cores = multiprocessing.cpu_count() 132 | # print('Total number of cores', num_cores) 133 | 134 | # Getting the number of files in the folder 135 | num_files = (len([name for name in os.listdir(src_folder_path) if os.path.isfile(os.path.join(src_folder_path, name))])) 136 | print("Number of files = ", num_files) 137 | 138 | # Read a random file for getting the shapes. 139 | RandFile = random.choice(dircache.listdir(src_folder_path)) 140 | FileShape = np.load(os.path.join(src_folder_path,RandFile)).shape 141 | print("File shape: ", FileShape) 142 | 143 | 144 | # This part is specific for the feature cube of pairs that been generated. 145 | N = num_files 146 | X = np.zeros((N, FileShape[2], FileShape[0], 2 * FileShape[1]), dtype=np.int64) # the number 2 is because the hog features of a pairs should be considered separately 147 | y = np.zeros(N, dtype=np.int64) 148 | FileNum = np.zeros(N, dtype=np.int64) 149 | 150 | # Initialize a counter. 151 | counter = 0 152 | 153 | # We should shuffle the order to save the files in LMDB format. 154 | Rand_idx = np.random.permutation(range(N)) 155 | mean_image = np.load('mean_image.npy') 156 | mean_image = np.transpose(mean_image, (2, 0, 1)) 157 | mean_image = np.mean(mean_image, axis=(1,2)) 158 | 159 | 160 | # Reading all the numpy files 161 | for f in glob.glob(os.path.join(src_folder_path, "*.npy")): 162 | # Load numpy file 163 | numpy_array = np.load(f) 164 | 165 | # # Uncomment if the files have naming order and you want to save the naming order too. 166 | # file_num = os.path.basename(os.path.basename(f).split('_')[1]).split('.')[0] # This gets the number of file 167 | # FileNum[Rand_idx[counter]] = file_num 168 | 169 | # Save to new big vector X.(since numpy reshape does not preserve the order, we put all by hand!!) 170 | # FOr caffianeting the data, the second dimension in the data blob should be the channel! 171 | 172 | # Initial format 173 | left = numpy_array[:, :, :, 0] 174 | right = numpy_array[:, :, :, 1] 175 | 176 | # Change format 177 | # The format supported by Caffe is (Num_Samples, Num_Channels, Width, Height). 178 | # Comment this part if you already have the numpy files with the correct format. 179 | left = np.transpose(left, (2, 0, 1)) 180 | right = np.transpose(right, (2, 0, 1)) 181 | 182 | if choice_mean == 'Yes': 183 | left[0, :, :] = left[0, :, :] - mean_image[0] 184 | left[1, :, :] = left[1, :, :] - mean_image[1] 185 | left[2, :, :] = left[2, :, :] - mean_image[2] 186 | 187 | right[0, :, :] = right[0, :, :] - mean_image[0] 188 | right[1, :, :] = right[1, :, :] - mean_image[1] 189 | right[2, :, :] = right[2, :, :] - mean_image[2] 190 | 191 | X[Rand_idx[counter], :, :, :224] = left 192 | X[Rand_idx[counter], :, :, 224:] = right 193 | 194 | 195 | # If the pairs is genuine, then it has a "gen" in its name. 196 | if 'gen' in f: 197 | y[Rand_idx[counter]] = 1 198 | if counter % 100 == 0: 199 | # print("Processing file: {}".format(f)) 200 | print("Processing %d pairs" % counter) 201 | counter = counter + 1 202 | 203 | # # This part is commented due to memory problem for large data. 204 | # # Create a big array in order to turn into LMDB 205 | # X = np.delete(X, np.s_[counter:X.shape[0]],0) # delete extra pre-allocated space 206 | 207 | # LMDB GENERATION 208 | ExtraMem = 100000 # Rule of thumb: the bigger the better if not out of memory!! 209 | env = lmdb.open(dst_folder_path, map_size=ExtraMem * counter) 210 | 211 | lmdb_file = dst_folder_path 212 | batch_size = 256 213 | number_of_batches = np.ceil(num_files/batch_size).astype(int) 214 | 215 | lmdb_env = lmdb.open(lmdb_file, map_size=int(1e12)) 216 | lmdb_file = lmdb_env.begin(write=True) 217 | datum = caffe_pb2.Datum() 218 | 219 | batch_num = 1 220 | file_to_LMDB_num = 0 221 | for i in range(N): 222 | 223 | # prepare the data and label 224 | # .astype(np.uint8) is for creating int instead of float for 225 | # faster postprocessing if we don't want any mean subtraction or etc. 226 | data = X[i,:,:,:].astype(np.uint8) 227 | label = int(y[i]) 228 | 229 | # save in datum 230 | datum = caffe.io.array_to_datum(data, label) 231 | key = '{:0>8d}'.format(file_to_LMDB_num) 232 | lmdb_file.put(key, datum.SerializeToString()) 233 | 234 | # write batch(batch size is flexible) 235 | if file_to_LMDB_num % batch_size == 0 and file_to_LMDB_num > 0: 236 | lmdb_file.commit() 237 | lmdb_file = lmdb_env.begin(write=True) 238 | print ("Generating batch {} of {}".format(batch_num,number_of_batches)) 239 | batch_num += 1 240 | 241 | # Increasing the counter 242 | file_to_LMDB_num += 1 243 | 244 | # write last batch(because the number of files cannot be necessary divisive by the batch size) 245 | if (file_to_LMDB_num) % batch_size != 0: 246 | lmdb_file.commit() 247 | print('generating last batch ...') 248 | print(file_to_LMDB_num) 249 | -------------------------------------------------------------------------------- /Test_Phase/test_performance.prototxt: -------------------------------------------------------------------------------- 1 | name: "test_performance" 2 | 3 | 4 | input: "data" 5 | input_shape { 6 | dim: 1 7 | dim: 3 8 | dim: 224 9 | dim: 448 10 | } 11 | 12 | 13 | layer { 14 | name: "slice_pair" 15 | type: "Slice" 16 | bottom: "data" 17 | top: "data_q" 18 | top: "data_p" 19 | slice_param { 20 | axis: 3 21 | slice_point: 224 22 | } 23 | } 24 | ########### Branch Q ########################### 25 | 26 | layer { 27 | name: "conv1_1" 28 | type: "Convolution" 29 | bottom: "data_q" 30 | top: "conv1_1" 31 | param{ 32 | name: "conv1_1_w" 33 | lr_mult: 0 34 | } 35 | param{ 36 | name: "conv1_1_b" 37 | lr_mult: 0 38 | } 39 | convolution_param { 40 | num_output: 64 41 | pad: 1 42 | kernel_size: 3 43 | } 44 | 45 | } 46 | 47 | layer { 48 | bottom: "conv1_1" 49 | top: "conv1_1" 50 | name: "relu1_1" 51 | type: "ReLU" 52 | } 53 | layer { 54 | bottom: "conv1_1" 55 | top: "conv1_2" 56 | name: "conv1_2" 57 | type: "Convolution" 58 | convolution_param { 59 | num_output: 64 60 | pad: 1 61 | kernel_size: 3 62 | } 63 | param { 64 | name: "conv1_2_w" 65 | lr_mult: 0 66 | } 67 | param { 68 | name: "conv1_2_b" 69 | lr_mult: 0 70 | } 71 | } 72 | layer { 73 | bottom: "conv1_2" 74 | top: "conv1_2" 75 | name: "relu1_2" 76 | type: "ReLU" 77 | } 78 | layer { 79 | bottom: "conv1_2" 80 | top: "pool1" 81 | name: "pool1" 82 | type: "Pooling" 83 | pooling_param { 84 | pool: MAX 85 | kernel_size: 2 86 | stride: 2 87 | } 88 | } 89 | layer { 90 | bottom: "pool1" 91 | top: "conv2_1" 92 | name: "conv2_1" 93 | type: "Convolution" 94 | convolution_param { 95 | num_output: 128 96 | pad: 1 97 | kernel_size: 3 98 | } 99 | param { 100 | name: "conv2_1_w" 101 | lr_mult: 0 102 | } 103 | param { 104 | name: "conv2_1_b" 105 | lr_mult: 0 106 | } 107 | } 108 | layer { 109 | bottom: "conv2_1" 110 | top: "conv2_1" 111 | name: "relu2_1" 112 | type: "ReLU" 113 | } 114 | layer { 115 | bottom: "conv2_1" 116 | top: "conv2_2" 117 | name: "conv2_2" 118 | type: "Convolution" 119 | convolution_param { 120 | num_output: 128 121 | pad: 1 122 | kernel_size: 3 123 | } 124 | param { 125 | name: "conv2_2_w" 126 | lr_mult: 0 127 | } 128 | param { 129 | name: "conv2_2_b" 130 | lr_mult: 0 131 | } 132 | } 133 | layer { 134 | bottom: "conv2_2" 135 | top: "conv2_2" 136 | name: "relu2_2" 137 | type: "ReLU" 138 | } 139 | layer { 140 | bottom: "conv2_2" 141 | top: "pool2" 142 | name: "pool2" 143 | type: "Pooling" 144 | pooling_param { 145 | pool: MAX 146 | kernel_size: 2 147 | stride: 2 148 | } 149 | } 150 | layer { 151 | bottom: "pool2" 152 | top: "conv3_1" 153 | name: "conv3_1" 154 | type: "Convolution" 155 | convolution_param { 156 | num_output: 256 157 | pad: 1 158 | kernel_size: 3 159 | } 160 | param { 161 | name: "conv3_1_w" 162 | lr_mult: 0 163 | } 164 | param { 165 | name: "conv3_1_b" 166 | lr_mult: 0 167 | } 168 | } 169 | layer { 170 | bottom: "conv3_1" 171 | top: "conv3_1" 172 | name: "relu3_1" 173 | type: "ReLU" 174 | } 175 | layer { 176 | bottom: "conv3_1" 177 | top: "conv3_2" 178 | name: "conv3_2" 179 | type: "Convolution" 180 | convolution_param { 181 | num_output: 256 182 | pad: 1 183 | kernel_size: 3 184 | } 185 | param { 186 | name: "conv3_2_w" 187 | lr_mult: 0 188 | } 189 | param { 190 | name: "conv3_2_b" 191 | lr_mult: 0 192 | } 193 | } 194 | layer { 195 | bottom: "conv3_2" 196 | top: "conv3_2" 197 | name: "relu3_2" 198 | type: "ReLU" 199 | } 200 | layer { 201 | bottom: "conv3_2" 202 | top: "conv3_3" 203 | name: "conv3_3" 204 | type: "Convolution" 205 | convolution_param { 206 | num_output: 256 207 | pad: 1 208 | kernel_size: 3 209 | } 210 | param { 211 | name: "conv3_3_w" 212 | lr_mult: 0 213 | } 214 | param { 215 | name: "conv3_3_b" 216 | lr_mult: 0 217 | } 218 | } 219 | layer { 220 | bottom: "conv3_3" 221 | top: "conv3_3" 222 | name: "relu3_3" 223 | type: "ReLU" 224 | } 225 | layer { 226 | bottom: "conv3_3" 227 | top: "pool3" 228 | name: "pool3" 229 | type: "Pooling" 230 | pooling_param { 231 | pool: MAX 232 | kernel_size: 2 233 | stride: 2 234 | } 235 | } 236 | layer { 237 | bottom: "pool3" 238 | top: "conv4_1" 239 | name: "conv4_1" 240 | type: "Convolution" 241 | convolution_param { 242 | num_output: 512 243 | pad: 1 244 | kernel_size: 3 245 | } 246 | param { 247 | name: "conv4_1_w" 248 | lr_mult: 0 249 | } 250 | param { 251 | name: "conv4_1_b" 252 | lr_mult: 0 253 | } 254 | } 255 | layer { 256 | bottom: "conv4_1" 257 | top: "conv4_1" 258 | name: "relu4_1" 259 | type: "ReLU" 260 | } 261 | layer { 262 | bottom: "conv4_1" 263 | top: "conv4_2" 264 | name: "conv4_2" 265 | type: "Convolution" 266 | convolution_param { 267 | num_output: 512 268 | pad: 1 269 | kernel_size: 3 270 | } 271 | param { 272 | name: "conv4_2_w" 273 | lr_mult: 0 274 | } 275 | param { 276 | name: "conv4_2_b" 277 | lr_mult: 0 278 | } 279 | } 280 | layer { 281 | bottom: "conv4_2" 282 | top: "conv4_2" 283 | name: "relu4_2" 284 | type: "ReLU" 285 | } 286 | layer { 287 | bottom: "conv4_2" 288 | top: "conv4_3" 289 | name: "conv4_3" 290 | type: "Convolution" 291 | convolution_param { 292 | num_output: 512 293 | pad: 1 294 | kernel_size: 3 295 | } 296 | param { 297 | name: "conv4_3_w" 298 | lr_mult: 0 299 | } 300 | param { 301 | name: "conv4_3_b" 302 | lr_mult: 0 303 | } 304 | } 305 | layer { 306 | bottom: "conv4_3" 307 | top: "conv4_3" 308 | name: "relu4_3" 309 | type: "ReLU" 310 | } 311 | layer { 312 | bottom: "conv4_3" 313 | top: "pool4" 314 | name: "pool4" 315 | type: "Pooling" 316 | pooling_param { 317 | pool: MAX 318 | kernel_size: 2 319 | stride: 2 320 | } 321 | } 322 | layer { 323 | bottom: "pool4" 324 | top: "conv5_1" 325 | name: "conv5_1" 326 | type: "Convolution" 327 | convolution_param { 328 | num_output: 512 329 | pad: 1 330 | kernel_size: 3 331 | } 332 | param { 333 | name: "conv5_1_w" 334 | lr_mult: 1 335 | } 336 | param { 337 | name: "conv5_1_b" 338 | lr_mult: 1 339 | } 340 | } 341 | layer { 342 | bottom: "conv5_1" 343 | top: "conv5_1" 344 | name: "relu5_1" 345 | type: "ReLU" 346 | } 347 | layer { 348 | bottom: "conv5_1" 349 | top: "conv5_2" 350 | name: "conv5_2" 351 | type: "Convolution" 352 | convolution_param { 353 | num_output: 512 354 | pad: 1 355 | kernel_size: 3 356 | } 357 | param { 358 | name: "conv5_2_w" 359 | lr_mult: 1 360 | } 361 | param { 362 | name: "conv5_2_b" 363 | lr_mult: 1 364 | } 365 | } 366 | layer { 367 | bottom: "conv5_2" 368 | top: "conv5_2" 369 | name: "relu5_2" 370 | type: "ReLU" 371 | } 372 | layer { 373 | bottom: "conv5_2" 374 | top: "conv5_3" 375 | name: "conv5_3" 376 | type: "Convolution" 377 | convolution_param { 378 | num_output: 512 379 | pad: 1 380 | kernel_size: 3 381 | } 382 | param { 383 | name: "conv5_3_w" 384 | lr_mult: 1 385 | } 386 | param { 387 | name: "conv5_3_b" 388 | lr_mult: 1 389 | } 390 | } 391 | layer { 392 | bottom: "conv5_3" 393 | top: "conv5_3" 394 | name: "relu5_3" 395 | type: "ReLU" 396 | } 397 | layer { 398 | bottom: "conv5_3" 399 | top: "pool5" 400 | name: "pool5" 401 | type: "Pooling" 402 | pooling_param { 403 | pool: MAX 404 | kernel_size: 2 405 | stride: 2 406 | } 407 | } 408 | layer { 409 | bottom: "pool5" 410 | top: "fc6" 411 | name: "fc6" 412 | type: "InnerProduct" 413 | inner_product_param { 414 | num_output: 4096 415 | } 416 | param { 417 | name: "fc6_w" 418 | lr_mult: 0 419 | } 420 | param { 421 | name: "fc6_b" 422 | lr_mult: 0 423 | } 424 | } 425 | 426 | layer { 427 | bottom: "fc6" 428 | top: "fc6" 429 | name: "relu6" 430 | type: "ReLU" 431 | } 432 | layer { 433 | bottom: "fc6" 434 | top: "fc7" 435 | name: "fc7" 436 | type: "InnerProduct" 437 | inner_product_param { 438 | num_output: 4096 439 | } 440 | param { 441 | name: "fc7_w" 442 | lr_mult: 0 443 | } 444 | param { 445 | name: "fc7_b" 446 | lr_mult: 0 447 | } 448 | } 449 | 450 | ########### Branch P ########################### 451 | 452 | layer { 453 | name: "conv1_1_p" 454 | type: "Convolution" 455 | bottom: "data_p" 456 | top: "conv1_1_p" 457 | param{ 458 | name: "conv1_1_w" 459 | lr_mult: 0 460 | } 461 | param{ 462 | name: "conv1_1_b" 463 | lr_mult: 0 464 | } 465 | convolution_param { 466 | num_output: 64 467 | pad: 1 468 | kernel_size: 3 469 | } 470 | 471 | } 472 | 473 | layer { 474 | bottom: "conv1_1_p" 475 | top: "conv1_1_p" 476 | name: "relu1_1_p" 477 | type: "ReLU" 478 | } 479 | layer { 480 | bottom: "conv1_1_p" 481 | top: "conv1_2_p" 482 | name: "conv1_2_p" 483 | type: "Convolution" 484 | convolution_param { 485 | num_output: 64 486 | pad: 1 487 | kernel_size: 3 488 | } 489 | param { 490 | name: "conv1_2_w" 491 | lr_mult: 0 492 | } 493 | param { 494 | name: "conv1_2_b" 495 | lr_mult: 0 496 | } 497 | } 498 | layer { 499 | bottom: "conv1_2_p" 500 | top: "conv1_2_p" 501 | name: "relu1_2_p" 502 | type: "ReLU" 503 | } 504 | layer { 505 | bottom: "conv1_2_p" 506 | top: "pool1_p" 507 | name: "pool1_p" 508 | type: "Pooling" 509 | pooling_param { 510 | pool: MAX 511 | kernel_size: 2 512 | stride: 2 513 | } 514 | } 515 | layer { 516 | bottom: "pool1_p" 517 | top: "conv2_1_p" 518 | name: "conv2_1_p" 519 | type: "Convolution" 520 | convolution_param { 521 | num_output: 128 522 | pad: 1 523 | kernel_size: 3 524 | } 525 | param { 526 | name: "conv2_1_w" 527 | lr_mult: 0 528 | } 529 | param { 530 | name: "conv2_1_b" 531 | lr_mult: 0 532 | } 533 | } 534 | layer { 535 | bottom: "conv2_1_p" 536 | top: "conv2_1_p" 537 | name: "relu2_1_p" 538 | type: "ReLU" 539 | } 540 | layer { 541 | bottom: "conv2_1_p" 542 | top: "conv2_2_p" 543 | name: "conv2_2_p" 544 | type: "Convolution" 545 | convolution_param { 546 | num_output: 128 547 | pad: 1 548 | kernel_size: 3 549 | } 550 | param { 551 | name: "conv2_2_w" 552 | lr_mult: 0 553 | } 554 | param { 555 | name: "conv2_2_b" 556 | lr_mult: 0 557 | } 558 | } 559 | layer { 560 | bottom: "conv2_2_p" 561 | top: "conv2_2_p" 562 | name: "relu2_2_p" 563 | type: "ReLU" 564 | } 565 | layer { 566 | bottom: "conv2_2_p" 567 | top: "pool2_p" 568 | name: "pool2_p" 569 | type: "Pooling" 570 | pooling_param { 571 | pool: MAX 572 | kernel_size: 2 573 | stride: 2 574 | } 575 | } 576 | layer { 577 | bottom: "pool2_p" 578 | top: "conv3_1_p" 579 | name: "conv3_1_p" 580 | type: "Convolution" 581 | convolution_param { 582 | num_output: 256 583 | pad: 1 584 | kernel_size: 3 585 | } 586 | param { 587 | name: "conv3_1_w" 588 | lr_mult: 0 589 | } 590 | param { 591 | name: "conv3_1_b" 592 | lr_mult: 0 593 | } 594 | } 595 | layer { 596 | bottom: "conv3_1_p" 597 | top: "conv3_1_p" 598 | name: "relu3_1_p" 599 | type: "ReLU" 600 | } 601 | layer { 602 | bottom: "conv3_1_p" 603 | top: "conv3_2_p" 604 | name: "conv3_2_p" 605 | type: "Convolution" 606 | convolution_param { 607 | num_output: 256 608 | pad: 1 609 | kernel_size: 3 610 | } 611 | param { 612 | name: "conv3_2_w" 613 | lr_mult: 0 614 | } 615 | param { 616 | name: "conv3_2_b" 617 | lr_mult: 0 618 | } 619 | } 620 | layer { 621 | bottom: "conv3_2_p" 622 | top: "conv3_2_p" 623 | name: "relu3_2_p" 624 | type: "ReLU" 625 | } 626 | layer { 627 | bottom: "conv3_2_p" 628 | top: "conv3_3_p" 629 | name: "conv3_3_p" 630 | type: "Convolution" 631 | convolution_param { 632 | num_output: 256 633 | pad: 1 634 | kernel_size: 3 635 | } 636 | param { 637 | name: "conv3_3_w" 638 | lr_mult: 0 639 | } 640 | param { 641 | name: "conv3_3_b" 642 | lr_mult: 0 643 | } 644 | } 645 | layer { 646 | bottom: "conv3_3_p" 647 | top: "conv3_3_p" 648 | name: "relu3_3_p" 649 | type: "ReLU" 650 | } 651 | layer { 652 | bottom: "conv3_3_p" 653 | top: "pool3_p" 654 | name: "pool3_p" 655 | type: "Pooling" 656 | pooling_param { 657 | pool: MAX 658 | kernel_size: 2 659 | stride: 2 660 | } 661 | } 662 | layer { 663 | bottom: "pool3_p" 664 | top: "conv4_1_p" 665 | name: "conv4_1_p" 666 | type: "Convolution" 667 | convolution_param { 668 | num_output: 512 669 | pad: 1 670 | kernel_size: 3 671 | } 672 | param { 673 | name: "conv4_1_w" 674 | lr_mult: 0 675 | } 676 | param { 677 | name: "conv4_1_b" 678 | lr_mult: 0 679 | } 680 | } 681 | layer { 682 | bottom: "conv4_1_p" 683 | top: "conv4_1_p" 684 | name: "relu4_1_p" 685 | type: "ReLU" 686 | } 687 | layer { 688 | bottom: "conv4_1_p" 689 | top: "conv4_2_p" 690 | name: "conv4_2_p" 691 | type: "Convolution" 692 | convolution_param { 693 | num_output: 512 694 | pad: 1 695 | kernel_size: 3 696 | } 697 | param { 698 | name: "conv4_2_w" 699 | lr_mult: 0 700 | } 701 | param { 702 | name: "conv4_2_b" 703 | lr_mult: 0 704 | } 705 | } 706 | layer { 707 | bottom: "conv4_2_p" 708 | top: "conv4_2_p" 709 | name: "relu4_2_p" 710 | type: "ReLU" 711 | } 712 | layer { 713 | bottom: "conv4_2_p" 714 | top: "conv4_3_p" 715 | name: "conv4_3_p" 716 | type: "Convolution" 717 | convolution_param { 718 | num_output: 512 719 | pad: 1 720 | kernel_size: 3 721 | } 722 | param { 723 | name: "conv4_3_w" 724 | lr_mult: 0 725 | } 726 | param { 727 | name: "conv4_3_b" 728 | lr_mult: 0 729 | } 730 | } 731 | layer { 732 | bottom: "conv4_3_p" 733 | top: "conv4_3_p" 734 | name: "relu4_3_p" 735 | type: "ReLU" 736 | } 737 | layer { 738 | bottom: "conv4_3_p" 739 | top: "pool4_p" 740 | name: "pool4_p" 741 | type: "Pooling" 742 | pooling_param { 743 | pool: MAX 744 | kernel_size: 2 745 | stride: 2 746 | } 747 | } 748 | layer { 749 | bottom: "pool4_p" 750 | top: "conv5_1_p" 751 | name: "conv5_1_p" 752 | type: "Convolution" 753 | convolution_param { 754 | num_output: 512 755 | pad: 1 756 | kernel_size: 3 757 | } 758 | param { 759 | name: "conv5_1_w" 760 | lr_mult: 1 761 | } 762 | param { 763 | name: "conv5_1_b" 764 | lr_mult: 1 765 | } 766 | } 767 | layer { 768 | bottom: "conv5_1_p" 769 | top: "conv5_1_p" 770 | name: "relu5_1_p" 771 | type: "ReLU" 772 | } 773 | layer { 774 | bottom: "conv5_1_p" 775 | top: "conv5_2_p" 776 | name: "conv5_2_p" 777 | type: "Convolution" 778 | convolution_param { 779 | num_output: 512 780 | pad: 1 781 | kernel_size: 3 782 | } 783 | param { 784 | name: "conv5_2_w" 785 | lr_mult: 1 786 | } 787 | param { 788 | name: "conv5_2_b" 789 | lr_mult: 1 790 | } 791 | } 792 | layer { 793 | bottom: "conv5_2_p" 794 | top: "conv5_2_p" 795 | name: "relu5_2_p" 796 | type: "ReLU" 797 | } 798 | layer { 799 | bottom: "conv5_2_p" 800 | top: "conv5_3_p" 801 | name: "conv5_3_p" 802 | type: "Convolution" 803 | convolution_param { 804 | num_output: 512 805 | pad: 1 806 | kernel_size: 3 807 | } 808 | param { 809 | name: "conv5_3_w" 810 | lr_mult: 1 811 | } 812 | param { 813 | name: "conv5_3_b" 814 | lr_mult: 1 815 | } 816 | } 817 | layer { 818 | bottom: "conv5_3_p" 819 | top: "conv5_3_p" 820 | name: "relu5_3_p" 821 | type: "ReLU" 822 | } 823 | layer { 824 | bottom: "conv5_3_p" 825 | top: "pool5_p" 826 | name: "pool5_p" 827 | type: "Pooling" 828 | pooling_param { 829 | pool: MAX 830 | kernel_size: 2 831 | stride: 2 832 | } 833 | } 834 | layer { 835 | bottom: "pool5_p" 836 | top: "fc6_p" 837 | name: "fc6_p" 838 | type: "InnerProduct" 839 | inner_product_param { 840 | num_output: 4096 841 | } 842 | param { 843 | name: "fc6_w" 844 | lr_mult: 0 845 | } 846 | param { 847 | name: "fc6_b" 848 | lr_mult: 0 849 | } 850 | } 851 | layer { 852 | bottom: "fc6_p" 853 | top: "fc6_p" 854 | name: "relu6_p" 855 | type: "ReLU" 856 | } 857 | layer { 858 | bottom: "fc6_p" 859 | top: "fc7_p" 860 | name: "fc7_p" 861 | type: "InnerProduct" 862 | inner_product_param { 863 | num_output: 4096 864 | } 865 | param { 866 | name: "fc7_w" 867 | lr_mult: 0 868 | } 869 | param { 870 | name: "fc7_b" 871 | lr_mult: 0 872 | } 873 | } 874 | 875 | 876 | 877 | #################### Output ############################ 878 | 879 | layer { 880 | name: "loss" 881 | type: "EuclideanLoss" 882 | bottom: "fc6" 883 | bottom: "fc6_p" 884 | top: "loss" 885 | } 886 | 887 | 888 | --------------------------------------------------------------------------------