├── .idea
├── deployment.xml
└── vcs.xml
└── HOG+SVM classifer
├── classifier.py
├── config.py
├── data
└── config
│ └── config.cfg
├── extractFeat.py
└── readme.txt
/.idea/deployment.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/HOG+SVM classifer/classifier.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #encoding:utf-8
3 | """
4 | @author:
5 | @time:2017/3/19 11:08
6 | """
7 | from sklearn.svm import LinearSVC
8 | from sklearn.externals import joblib
9 | import numpy as np
10 | import glob
11 | import os
12 | import time
13 | from config import *
14 |
15 | if __name__ == "__main__":
16 | t0 = time.time()
17 | clf_type = 'LIN_SVM'
18 | fds = []
19 | labels = []
20 | num = 0
21 | total = 0
22 | for feat_path in glob.glob(os.path.join(train_feat_path, '*.feat')):
23 | data = joblib.load(feat_path)
24 | fds.append(data[:-1])
25 | labels.append(data[-1])
26 | if clf_type is 'LIN_SVM':
27 | clf = LinearSVC()
28 | print("Training a Linear SVM Classifier.")
29 | clf.fit(fds, labels)
30 | # If feature directories don't exist, create them
31 | # if not os.path.isdir(os.path.split(model_path)[0]):
32 | # os.makedirs(os.path.split(model_path)[0])
33 | # joblib.dump(clf, model_path)
34 | # clf = joblib.load(model_path)
35 | print("Classifier saved to {}".format(model_path))
36 | for feat_path in glob.glob(os.path.join(test_feat_path, '*.feat')):
37 | total += 1
38 | data_test = joblib.load(feat_path)
39 | temp = data_test[:-1]
40 | data_test_feat = temp.reshape((1, -1))
41 | result = clf.predict(data_test_feat)
42 | if int(result) == int(data_test[-1]):
43 | num += 1
44 | rate = float(num)/total
45 | t1 = time.time()
46 | print('The classification accuracy is %f'%rate)
47 | print('The cast of time is :%f'%(t1-t0))
48 |
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------
/HOG+SVM classifer/config.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #encoding:utf-8
3 | """
4 | @author:
5 | @time:2017/3/18 21:03
6 | Set the config variable.
7 | """
8 | import configparser as cp
9 | import json
10 |
11 | config = cp.RawConfigParser()
12 | config.read('./data/config/config.cfg')
13 |
14 | orientations = json.loads(config.get("hog", "orientations"))
15 | pixels_per_cell = json.loads(config.get("hog", "pixels_per_cell"))
16 | cells_per_block = json.loads(config.get("hog", "cells_per_block"))
17 | visualize = config.getboolean("hog", "visualize")
18 | normalize = config.getboolean("hog", "normalize")
19 | train_feat_path = config.get("path", "train_feat_path")
20 | test_feat_path = config.get("path", "test_feat_path")
21 | model_path = config.get("path", "model_path")
22 |
--------------------------------------------------------------------------------
/HOG+SVM classifer/data/config/config.cfg:
--------------------------------------------------------------------------------
1 | [hog]
2 | orientations: 9
3 | pixels_per_cell: [8, 8]
4 | cells_per_block: [2, 2]
5 | visualize: False
6 | normalize: True
7 |
8 |
9 | [path]
10 | train_feat_path : ./data/features/train
11 | test_feat_path : ./data/features/test
12 | model_path : ./data/models/svm.model
13 |
14 |
--------------------------------------------------------------------------------
/HOG+SVM classifer/extractFeat.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #encoding:utf-8
3 | """
4 | @author:
5 | @time:2017/3/18 14:33
6 | """
7 | # Import the functions to calculate feature descriptions
8 | from skimage.feature import hog
9 | import numpy as np
10 | from sklearn.externals import joblib
11 | # To read image file and save image feature descriptions
12 | import os
13 | import time
14 | # import glob
15 | import pickle as pk
16 | from config import *
17 | # import matplotlib.pyplot as plt
18 |
19 |
20 | def unpickle(file):
21 | import pickle
22 | with open(file, 'rb') as fo:
23 | dict = pickle.load(fo, encoding='bytes')
24 | return dict
25 |
26 | def getData(filePath):
27 | TrainData = []
28 | for childDir in os.listdir(filePath):
29 | if 'data_batch' in childDir:
30 | f = os.path.join(filePath, childDir)
31 | data = unpickle(f)
32 | # train = np.reshape(data[str.encode('data')], (10000, 3, 32 * 32))
33 | # If your python version do not support to use this way to transport str to bytes.
34 | # Think another way and you can.
35 | train = np.reshape(data[b'data'], (10000, 3, 32 * 32))
36 | labels = np.reshape(data[b'labels'], (10000, 1))
37 | fileNames = np.reshape(data[b'filenames'], (10000, 1))
38 | datalebels = zip(train, labels, fileNames)
39 | TrainData.extend(datalebels)
40 | if childDir == "test_batch":
41 | f = os.path.join(filePath, childDir)
42 | data = unpickle(f)
43 | test = np.reshape(data[b'data'], (10000, 3, 32 * 32))
44 | labels = np.reshape(data[b'labels'], (10000, 1))
45 | fileNames = np.reshape(data[b'filenames'], (10000, 1))
46 | TestData = zip(test, labels, fileNames)
47 | return TrainData, TestData
48 |
49 |
50 | def getFeat(TrainData, TestData):
51 | for data in TestData:
52 | image = np.reshape(data[0].T, (32, 32, 3))
53 | gray = rgb2gray(image)/255.0
54 | fd = hog(gray, orientations, pixels_per_cell, cells_per_block)
55 | fd = np.concatenate((fd, data[1]))
56 | filename = list(data[2])
57 | fd_name = str(filename[0], encoding = "utf-8") .split('.')[0]+'.feat'
58 | fd_path = os.path.join('./data/features/test/', fd_name)
59 | joblib.dump(fd, fd_path)
60 | print("Test features are extracted and saved.")
61 | for data in TrainData:
62 | image = np.reshape(data[0].T, (32, 32, 3))
63 | gray = rgb2gray(image)/255.0
64 | fd = hog(gray, orientations, pixels_per_cell, cells_per_block)
65 | fd = np.concatenate((fd, data[1]))
66 | filename = list(data[2])
67 | fd_name = str(filename[0], encoding="utf-8") .split('.')[0]+'.feat'
68 | fd_path = os.path.join('./data/features/train/', fd_name)
69 | joblib.dump(fd, fd_path)
70 | print("Train features are extracted and saved.")
71 | def rgb2gray(im):
72 | gray = im[:, :, 0]*0.2989+im[:, :, 1]*0.5870+im[:, :, 2]*0.1140
73 | return gray
74 | if __name__ == '__main__':
75 | t0 = time.time()
76 | filePath = r'E:\dataset\cifar-10\cifar-10-batches-py'
77 | TrainData, TestData = getData(filePath)
78 | getFeat(TrainData, TestData)
79 | t1 = time.time()
80 | print("Features are extracted and saved.")
81 | print('The cast of time is:%f'%(t1-t0))
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
--------------------------------------------------------------------------------
/HOG+SVM classifer/readme.txt:
--------------------------------------------------------------------------------
1 | step1:添加需要的包,比较简单。
2 | step2:下载训练图片和测试图片数据。该程序使用的是The CIFAR-10 dataset
3 | http://www.cs.toronto.edu/~kriz/cifar.html
4 | step3:将filePath = r'D:\ObjectClassify\cifar-10-batches-py',改成你的数据路径就好。
5 | ######################
6 | 之前的版本是python2.7写的,最近上传的是python3.5的。如果还有版本错误请issues。
--------------------------------------------------------------------------------