├── .gitignore
├── CMakeLists.txt
├── README.md
├── classification.bat
├── cpp
├── LenetClassifier.cpp
├── LenetClassifier.h
├── evaluation.cpp
└── evaluationcpp.vcxproj
├── cpp4caffe
├── cnnpredictor.h
├── cpp4caffe.vcxproj
└── evaluation.cpp
├── models
├── deploy.prototxt
├── labels.txt
├── mean.binaryproto
├── mean.npy
├── plate_fromimg.prototxt
├── plate_lenet.prototxt
└── solver.prototxt
├── train.bat
├── train.py
├── train.sh
└── util
├── deploy.prototxt
├── evaluation.py
├── meanfilebinartynpy.py
├── plotaccuracy.py
├── preprocess.py
├── solver.prototxt
├── test.prototxt
├── train.prototxt
├── train.txt
└── val.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | build
2 | data
3 | output
4 | error
5 | error.txt
6 | *.solverstate
7 | *.caffemodel
8 | *lmdb
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8)
2 | set(ProjName caffe-onclick)
3 | project(${ProjName})
4 | set(CMAKE_CXX_STANDARD 11)
5 | find_package(OpenCV REQUIRED)
6 | include_directories(${OpenCV_INCLUDE_DIRS})
7 |
8 | find_package(Caffe REQUIRED)
9 | include_directories(${Caffe_INCLUDE_DIRS})
10 |
11 | add_executable(evaluation cpp/evaluation.cpp cpp/LenetClassifier.cpp)
12 | target_link_libraries(evaluation ${OpenCV_LIBS} ${Caffe_LIBRARIES})
13 |
14 | add_executable(cpp4caffe cpp4caffe/evaluation.cpp)
15 | target_link_libraries(cpp4caffe ${OpenCV_LIBS} ${Caffe_LIBRARIES})
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | caffe一键式训练评估集成开发环境
2 | ====================================
3 | **Last Update 2020.07.08**
4 |
5 | ## 概述
6 |
7 | 本项目提供一个集成式开发环境,在配好caffe环境的前提下,只需将准备好的图片放入data目录下,便可以一键生成lmdb数据文件、均值文件、标注文件和测试评估模型、找出错误样本、部署模型等所有的操作,更难能可贵的是它是跨平台的,可以无缝的在Windos和Linux间切换。
8 |
9 | 使用深度学习完成一个特定的任务比如说字符识别、人脸识别等大致可以分为数据准备、定义模型、训练模型、评估模型和部署模型等几个步骤。
10 |
11 | ### 配置caffe
12 |
13 | 现在配置caffe十分方便,仅需几行命令即可搞定,确保安装了所需的依赖,这里仅摘录最关键的部分,其余的详细内容可参见参考链接.
14 |
15 | **Windows**
16 |
17 | ::为了减少日后不必要的麻烦,建议VS2015,Cuda8.0,cudnn5.1及以上,python2.7
18 | git clone https://github.com/BVLC/caffe
19 | cd caffe
20 | git checkout windows
21 | scripts\build_win.cmd
22 |
23 | **Linux**
24 |
25 | git clone https://github.com/BVLC/caffe
26 | cd caffe
27 | mkdir build
28 | cd build
29 | cmake ..
30 | make -j8
31 |
32 | ### 1.数据准备
33 |
34 | 首先收集要任务相关的数据,这里准备了一个车牌字符数据(仅包含0-9共10个数字),直接解压[data.zip](https://github.com/imistyrain/caffe-oneclick/releases/download/1.0/data.zip)到当前文件夹即可,格式如下图所示,每类图片对应一个文件夹,放到一个data文件夹下,注意格式一致型(都为.jpg或.png文件),仔细筛查,不要含有其他的非图片文件在里面,你也可以用自己的数据替换这些车牌字符数据。
35 |
36 | 
37 |
38 | caffe使用了lmdb内存数据库等来加快训练时读取数据的速度,为此,caffe自带的tools里提供了一个工具(可由convert_imageset.cpp编译生成),它的输入是图片路径和标签对组成的文件,每次都手动生成这个文件不胜其烦。
39 |
40 | 我们希望是自动化的从文件夹读取的功能,因此,本项目通过preprocess/preprocess.py来获取如下图所示的文件夹下所有的文件路径以及对应的文件标签的功能,它输出训练和验证集preprocess/train.txt和preprocess/val.txt以及标签映射文件modef/labels.txt
41 |
42 | 你也可以直接下载已经转换好的[lmdb.tar.gz](https://github.com/imistyrain/caffe-oneclick/releases/download/1.0/lmdb.tar.gz)文件直接使用
43 |
44 | ### 2.定义模型
45 |
46 | 训练定义文件位于models下的plate_train_test.prototxt,部署文件在deploy.prototxt,你可以通过[网络结构可视化](http://ethereon.github.io/netscope/#/editor)对这些网络进行可视化,以便更清晰的理解他们的含义
47 |
48 | ### 3.训练模型
49 |
50 | ```
51 | ./train.sh
52 | ```
53 |
54 | ### 4.评估模型
55 |
56 | [evaluation.py](util/evaluation.py)用来对data文件下下的数据进行评估,它会得出迭代次数为10000时模型的错误率,并且打印出误识别图片对应的真值和预测值,并把相应数据保存在error文件夹下,命名格式为字符文件夹/图片在文件夹内的序号_真值类别_预测类别(以0/190_0_4.jpg为例,代表0/190.jpg被误识为4),这些错误识别的样本需要仔细分析,不断调试参数,以获得期望的结果。
57 |
58 | 本项目提供了一个训练好的[模型文件](https://github.com/imistyrain/caffe-oneclick/releases/download/1.0/plate999.caffemodel),其错误率低于0.1%,这就意味着其达到了99.9%以上的准确率。
59 |
60 | ### 5.部署模型
61 |
62 | 由于速度原因,实际中多使用C++而不是python进行部署,因此本项目在cpp文件夹下提供了evaluationcpp工程,它使用单例模式来防止每次预测都加载模型,只需使用如下代码即可在你的项目中一行代码使用CNN,此外,该项目也提供了对模型进行评估的功能。
63 |
64 | ```
65 | cv::Mat img=cv::imread("imagepath.jpg");
66 | string result=CnnPredictor::getInstance()->predict(img);
67 | ```
68 |
69 | 当然,你也可以运行calssification.bat来调用caffe自身进行分类识别
70 |
71 | ```
72 | "../build/examples/cpp_classification/classification" "modeldef/deploy.prototxt" "trainedmodels/platerecognition_iter_1000.caffemodel" "modeldef/mean.binaryproto" "modeldef/labels.txt" "data/0/4-3.jpg"
73 | ```
74 |
75 |
76 |
77 |
78 | 其返回了最高的5个类别的相似度,不难看出训练的网络对于data/0/0.jpg有高达93%的概率认为其属于0这个字符,结果还是非常理想的
79 |
80 | ## 参考
81 |
82 | * [Caffe 配置与示例运行](http://blog.csdn.net/guoyk1990/article/details/52909864)
83 |
84 | * [图文并解caffe源码](http://blog.csdn.net/mounty_fsc/article/category/6136645)
85 |
86 | * [caffe源码解析](http://blog.csdn.net/qq_16055159)
87 |
88 | * [从零开始山寨Caffe caffe为什么这么设计?](http://www.cnblogs.com/neopenx/)
89 |
90 | * [Caffe代码导读 21天实战caffe作者博客](http://blog.csdn.net/kkk584520/article/category/2620891/2)
91 |
92 | * [CNN卷积神经网络推导和实现](http://blog.csdn.net/zouxy09/article/details/9993371)
93 |
94 | * [caffe卷积层代码阅读笔记](http://blog.csdn.net/tangwei2014/article/details/47730797)
95 |
96 | * [caffe添加新层教程](http://blog.csdn.net/shuzfan/article/details/51322976)
97 |
98 | * [caffe中各语言预处理对应方式](http://blog.csdn.net/minstyrain/article/details/78373914)
99 |
100 | * [mxnet 训练自己的数据](https://github.com/imistyrain/mxnet-mr)
101 |
102 | * [MatconvNet 训练自己的数据](https://github.com/imistyrain/MatConvNet-mr)
--------------------------------------------------------------------------------
/classification.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | set CAFFE_DIR=..
3 | set eval_iter=10000
4 | set imagepath=data/0/0.jpg
5 | set trainedmodel=snapshot/plate_iter_%eval_iter%.caffemodel
6 | ::set trainedmodel=platere996.caffemodel
7 | echo %imagepath% %eval_iter%
8 | "%CAFFE_DIR%/build/examples/cpp_classification/classification" "models/deploy.prototxt" "%trainedmodel%" "models/mean.binaryproto" "models/labels.txt" "%imagepath%"
9 | pause
--------------------------------------------------------------------------------
/cpp/LenetClassifier.cpp:
--------------------------------------------------------------------------------
1 | #include "LenetClassifier.h"
2 | std::pairCLenetClassifier::predict(const cv::Mat &img)
3 | {
4 | std::pairp;
5 | if (!bloaded)
6 | {
7 | load();
8 | }
9 | else
10 | {
11 | cv::Mat input;
12 | cv::resize(img, input, cv::Size(20, 20));
13 | cv::Mat inputBlob = blobFromImage(input);// , 255.0f, cv::Size(20, 20), _mean, false);
14 | cv::Mat prob;
15 | _net.setInput(inputBlob, "data");
16 | prob = _net.forward("prob");
17 | cv::Mat probMat = prob.reshape(1, 1);
18 | cv::Point classNumber;
19 | cv::minMaxLoc(probMat, NULL, &p.second, NULL, &classNumber);
20 | p.first = classNumber.x;
21 | }
22 |
23 | return p;
24 | }
25 |
26 | bool CLenetClassifier::load(cv::String modelTxt, cv::String modelBin)
27 | {
28 | _net = cv::dnn::readNetFromCaffe(modelTxt, modelBin);
29 | // _mean = cv::Scalar(66, 66, 66);
30 | bloaded = !_net.empty();
31 | return bloaded;
32 | }
--------------------------------------------------------------------------------
/cpp/LenetClassifier.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include "string"
3 | #include "opencv2/opencv.hpp"
4 | #include
5 | using namespace cv::dnn;
6 | using namespace std;
7 | const string caffeplatedir = "../";
8 | const string model_file = caffeplatedir + "/models/deploy.prototxt";
9 | const string trained_file = caffeplatedir + "/models/plate999.caffemodel";
10 | const string mean_file = caffeplatedir + "/models/mean.binaryproto";
11 |
12 | class CLenetClassifier
13 | {
14 | public:
15 | static CLenetClassifier*getInstance()
16 | {
17 | static CLenetClassifier instance;
18 | return &instance;
19 | }
20 | std::pairpredict(const cv::Mat &img);
21 | bool load(cv::String modelTxt = model_file, cv::String modelBin = trained_file);
22 | private:
23 | bool bloaded = false;
24 | Net _net;
25 | cv::Scalar _mean;
26 | CLenetClassifier() {
27 | }
28 | };
--------------------------------------------------------------------------------
/cpp/evaluation.cpp:
--------------------------------------------------------------------------------
1 | #include "mrdir.h"
2 | #include "mrutil.h"
3 | #include "mropencv.h"
4 | #include "LenetClassifier.h"
5 | using namespace std;
6 | const string errordir = caffeplatedir + "/error";
7 | const string platedatadir = caffeplatedir + "data";
8 |
9 | void cleardir(const string dir)
10 | {
11 | vectorfiles=getAllFilesinDir(dir);
12 | for (int i = 0; i < files.size(); i++)
13 | {
14 | string filepath = dir + "/" + files[i];
15 | remove(filepath.c_str());
16 | }
17 | }
18 |
19 | void clearerror(const string dir)
20 | {
21 | cout << "clearing" << dir << endl;
22 | vectorsubdirs=getAllSubdirs(dir);
23 | for (int i = 0; i < subdirs.size(); i++)
24 | {
25 | string subdir = dir + "/" + subdirs[i];
26 | cout << subdirs[i]<subdirs=getAllSubdirs(platedatadir);
44 | for (auto sub : subdirs)
45 | {
46 | string subdir = platedatadir + "/" + sub;
47 | vectorfiles=getAllFilesinDir(subdir);
48 | for (auto file : files)
49 | {
50 | string fileapth = subdir + "/" + file;
51 | cv::Mat img = cv::imread(fileapth);
52 | auto ret=CLenetClassifier::getInstance()->predict(img).first;
53 | if (ret == string2int(sub))
54 | rightcount++;
55 | else
56 | {
57 | cout << sub + "/" + file.substr(0, file.size() - 4) + ":" + int2string(ret) << endl;
58 | errorcount++;
59 | string errorlabeldir = errordir;
60 | errorlabeldir = errorlabeldir + "/" + sub;
61 | if (!EXISTS(errorlabeldir.c_str()))
62 | {
63 | MKDIR(errorlabeldir.c_str());
64 | }
65 | string errorfilepath = errorlabeldir + "/" + file.substr(0,file.size()-4) + "_" + sub + "_" + int2string(ret) + ".png";
66 | //imshow("error", img);
67 | imwrite(errorfilepath, img);
68 | //cv::waitKey(1);
69 | }
70 | total++;
71 | }
72 | }
73 | cout << "acc:" << rightcount << "/" << total << endl;
74 | cout << rightcount*1.0 / total << endl;
75 | return 0;
76 | }
77 |
78 | int testimg(const std::string imgpath = "data/0/0.jpg")
79 | {
80 | cv::Mat img = imread(imgpath);
81 | TickMeter tm;
82 | tm.start();
83 | auto p = CLenetClassifier::getInstance()->predict(img);
84 | tm.stop();
85 | std::cout << p.first << std::endl;// " " << p.second << endl;
86 | std::cout << tm.getTimeMilli() << "ms" << std::endl;
87 | return 0;
88 | }
89 |
90 | int testdir(const std::string dir = "img")
91 | {
92 | auto files = getAllFilesinDir(dir);
93 | for (int i = 0; i < files.size(); i++)
94 | {
95 | std::string imgpath = dir + "/" + files[i];
96 | std::cout << files[i] << ":";
97 | testimg(imgpath);
98 | }
99 | return 0;
100 | }
101 |
102 | int main(int argc,char*argv[])
103 | {
104 | if (argc==1)
105 | evaluation();
106 | else
107 | {
108 | testimg();
109 | testdir();
110 | }
111 | return 0;
112 | }
--------------------------------------------------------------------------------
/cpp/evaluationcpp.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | Win32
7 |
8 |
9 | Debug
10 | x64
11 |
12 |
13 | Release
14 | Win32
15 |
16 |
17 | Release
18 | x64
19 |
20 |
21 |
22 | {0A2F0DB8-57C5-4380-93D9-E45110DE3719}
23 | Win32Proj
24 | charsrecog
25 | 8.1
26 |
27 |
28 |
29 | Application
30 | true
31 | v140
32 | Unicode
33 |
34 |
35 | Application
36 | true
37 | v140
38 | Unicode
39 |
40 |
41 | Application
42 | false
43 | v140
44 | true
45 | Unicode
46 |
47 |
48 | Application
49 | false
50 | v140
51 | true
52 | Unicode
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 | true
73 |
74 |
75 | true
76 | D:\opencv33\build\include;$(IncludePath)
77 | D:\opencv33\build\x64\vc14\lib;$(LibraryPath)
78 |
79 |
80 | false
81 |
82 |
83 | false
84 | D:\opencv33\build\include;$(IncludePath)
85 | D:\opencv33\build\x64\vc14\lib;$(LibraryPath)
86 |
87 |
88 |
89 |
90 |
91 | Level3
92 | Disabled
93 | WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)
94 | true
95 |
96 |
97 | Console
98 | true
99 |
100 |
101 |
102 |
103 |
104 |
105 | Level3
106 | Disabled
107 | WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)
108 | true
109 | $(SolutionDir)include;%(AdditionalIncludeDirectories)
110 | /D_CRT_SECURE_NO_WARNINGS %(AdditionalOptions)
111 |
112 |
113 | Console
114 | true
115 | kernel32.lib;user32.lib;gdi32.lib;winspool.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;comdlg32.lib;advapi32.lib;cudart.lib;cublas.lib;curand.lib;libprotobufd.lib;hdf5_tools.lib;hdf5_hl_fortran.lib;hdf5_fortran.lib;hdf5_hl_f90cstub.lib;hdf5_f90cstub.lib;hdf5_cpp.lib;hdf5_hl_cpp.lib;hdf5_hl.lib;hdf5.lib;zlib.lib;szip.lib;caffelibd.lib;opencv_world300d.lib;shlwapi.lib;leveldbd.lib;cuda.lib;libglog.lib;lmdb.lib;cudnn.lib;libopenblas.dll.a;libgflags.lib;cublas_device.lib;%(AdditionalDependencies)
116 |
117 |
118 | compute_20,sm_20;compute_30,sm_30;compute_35,sm_35;compute_50,sm_50;
119 |
120 |
121 |
122 |
123 | Level3
124 |
125 |
126 | MaxSpeed
127 | true
128 | true
129 | WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)
130 | true
131 |
132 |
133 | Console
134 | true
135 | true
136 | true
137 |
138 |
139 |
140 |
141 | Level3
142 |
143 |
144 | Disabled
145 | true
146 | true
147 | WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)
148 | true
149 | $(SolutionDir)include;%(AdditionalIncludeDirectories)
150 | true
151 | /D_CRT_SECURE_NO_WARNINGS %(AdditionalOptions)
152 |
153 |
154 | Console
155 | true
156 | true
157 | true
158 | kernel32.lib;user32.lib;gdi32.lib;winspool.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;comdlg32.lib;advapi32.lib;%(AdditionalDependencies)
159 |
160 |
161 |
162 |
163 |
164 |
165 | Shared
166 |
167 |
168 | 64
169 | compute_20,sm_20;compute_30,sm_30;compute_35,sm_35;compute_50,sm_50;
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
--------------------------------------------------------------------------------
/cpp4caffe/cnnpredictor.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include "mrdir.h"
4 | #include "opencv2/opencv.hpp"
5 | #include
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | using namespace std;
12 | const string caffeplatedir = "../";
13 | const string model_file = caffeplatedir + "models/deploy.prototxt";
14 | const string trained_file = caffeplatedir + "models/plate999.caffemodel";
15 | const string mean_file = caffeplatedir + "models/mean.binaryproto";
16 | const string label_file = caffeplatedir + "models/labels.txt";
17 | using namespace caffe; // NOLINT(build/namespaces)
18 | using std::string;
19 |
20 | /* Pair (label, confidence) representing a prediction. */
21 | typedef std::pair Prediction;
22 |
23 | class Classifier {
24 | public:
25 | Classifier(const string& model_file,
26 | const string& trained_file,
27 | const string& mean_file,
28 | const string& label_file);
29 |
30 | std::vector Classify(const cv::Mat& img, int N = 5);
31 |
32 | private:
33 | void SetMean(const string& mean_file);
34 |
35 | std::vector Predict(const cv::Mat& img);
36 |
37 | void WrapInputLayer(std::vector* input_channels);
38 |
39 | void Preprocess(const cv::Mat& img,
40 | std::vector* input_channels);
41 |
42 | private:
43 | std::shared_ptr > net_;
44 | cv::Size input_geometry_;
45 | int num_channels_;
46 | cv::Mat mean_;
47 | std::vector labels_;
48 | };
49 |
50 | Classifier::Classifier(const string& model_file,
51 | const string& trained_file,
52 | const string& mean_file,
53 | const string& label_file) {
54 | #ifdef CPU_ONLY
55 | Caffe::set_mode(Caffe::CPU);
56 | #else
57 | Caffe::set_mode(Caffe::GPU);
58 | #endif
59 |
60 | /* Load the network. */
61 | net_.reset(new Net(model_file, TEST));
62 | net_->CopyTrainedLayersFrom(trained_file);
63 |
64 | CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input.";
65 | CHECK_EQ(net_->num_outputs(), 1) << "Network should have exactly one output.";
66 |
67 | Blob* input_layer = net_->input_blobs()[0];
68 | num_channels_ = input_layer->channels();
69 | CHECK(num_channels_ == 3 || num_channels_ == 1)
70 | << "Input layer should have 1 or 3 channels.";
71 | input_geometry_ = cv::Size(input_layer->width(), input_layer->height());
72 |
73 | /* Load the binaryproto mean file. */
74 | SetMean(mean_file);
75 |
76 | /* Load labels. */
77 | std::ifstream labels(label_file.c_str());
78 | CHECK(labels) << "Unable to open labels file " << label_file;
79 | string line;
80 | while (std::getline(labels, line))
81 | labels_.push_back(string(line));
82 |
83 | Blob* output_layer = net_->output_blobs()[0];
84 | CHECK_EQ(labels_.size(), output_layer->channels())
85 | << "Number of labels is different from the output layer dimension.";
86 | }
87 |
88 | static bool PairCompare(const std::pair& lhs,
89 | const std::pair& rhs) {
90 | return lhs.first > rhs.first;
91 | }
92 |
93 | /* Return the indices of the top N values of vector v. */
94 | static std::vector Argmax(const std::vector& v, int N) {
95 | std::vector > pairs;
96 | for (size_t i = 0; i < v.size(); ++i)
97 | pairs.push_back(std::make_pair(v[i], i));
98 | std::partial_sort(pairs.begin(), pairs.begin() + N, pairs.end(), PairCompare);
99 |
100 | std::vector result;
101 | for (int i = 0; i < N; ++i)
102 | result.push_back(pairs[i].second);
103 | return result;
104 | }
105 |
106 | /* Return the top N predictions. */
107 | std::vector Classifier::Classify(const cv::Mat& img, int N) {
108 | std::vector output = Predict(img);
109 |
110 | N = std::min(labels_.size(), N);
111 | std::vector maxN = Argmax(output, N);
112 | std::vector predictions;
113 | for (int i = 0; i < N; ++i) {
114 | int idx = maxN[i];
115 | predictions.push_back(std::make_pair(labels_[idx], output[idx]));
116 | }
117 |
118 | return predictions;
119 | }
120 |
121 | /* Load the mean file in binaryproto format. */
122 | void Classifier::SetMean(const string& mean_file) {
123 | BlobProto blob_proto;
124 | ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);
125 |
126 | /* Convert from BlobProto to Blob */
127 | Blob mean_blob;
128 | mean_blob.FromProto(blob_proto);
129 | CHECK_EQ(mean_blob.channels(), num_channels_)
130 | << "Number of channels of mean file doesn't match input layer.";
131 |
132 | /* The format of the mean file is planar 32-bit float BGR or grayscale. */
133 | std::vector channels;
134 | float* data = mean_blob.mutable_cpu_data();
135 | for (int i = 0; i < num_channels_; ++i) {
136 | /* Extract an individual channel. */
137 | cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data);
138 | channels.push_back(channel);
139 | data += mean_blob.height() * mean_blob.width();
140 | }
141 |
142 | /* Merge the separate channels into a single image. */
143 | cv::Mat mean;
144 | cv::merge(channels, mean);
145 |
146 | /* Compute the global mean pixel value and create a mean image
147 | * filled with this value. */
148 | cv::Scalar channel_mean = cv::mean(mean);
149 | mean_ = cv::Mat(input_geometry_, mean.type(), channel_mean);
150 | }
151 |
152 | std::vector Classifier::Predict(const cv::Mat& img) {
153 | Blob* input_layer = net_->input_blobs()[0];
154 | input_layer->Reshape(1, num_channels_,
155 | input_geometry_.height, input_geometry_.width);
156 | /* Forward dimension change to all layers. */
157 | net_->Reshape();
158 |
159 | std::vector input_channels;
160 | WrapInputLayer(&input_channels);
161 |
162 | Preprocess(img, &input_channels);
163 |
164 | net_->Forward();
165 |
166 | /* Copy the output layer to a std::vector */
167 | Blob* output_layer = net_->output_blobs()[0];
168 | const float* begin = output_layer->cpu_data();
169 | const float* end = begin + output_layer->channels();
170 | return std::vector(begin, end);
171 | }
172 |
173 | /* Wrap the input layer of the network in separate cv::Mat objects
174 | * (one per channel). This way we save one memcpy operation and we
175 | * don't need to rely on cudaMemcpy2D. The last preprocessing
176 | * operation will write the separate channels directly to the input
177 | * layer. */
178 | void Classifier::WrapInputLayer(std::vector* input_channels) {
179 | Blob* input_layer = net_->input_blobs()[0];
180 |
181 | int width = input_layer->width();
182 | int height = input_layer->height();
183 | float* input_data = input_layer->mutable_cpu_data();
184 | for (int i = 0; i < input_layer->channels(); ++i) {
185 | cv::Mat channel(height, width, CV_32FC1, input_data);
186 | input_channels->push_back(channel);
187 | input_data += width * height;
188 | }
189 | }
190 |
191 | void Classifier::Preprocess(const cv::Mat& img,
192 | std::vector* input_channels) {
193 | /* Convert the input image to the input image format of the network. */
194 | cv::Mat sample;
195 | if (img.channels() == 3 && num_channels_ == 1)
196 | cv::cvtColor(img, sample, CV_BGR2GRAY);
197 | else if (img.channels() == 4 && num_channels_ == 1)
198 | cv::cvtColor(img, sample, CV_BGRA2GRAY);
199 | else if (img.channels() == 4 && num_channels_ == 3)
200 | cv::cvtColor(img, sample, CV_BGRA2BGR);
201 | else if (img.channels() == 1 && num_channels_ == 3)
202 | cv::cvtColor(img, sample, CV_GRAY2BGR);
203 | else
204 | sample = img;
205 |
206 | cv::Mat sample_resized;
207 | if (sample.size() != input_geometry_)
208 | cv::resize(sample, sample_resized, input_geometry_);
209 | else
210 | sample_resized = sample;
211 |
212 | cv::Mat sample_float;
213 | if (num_channels_ == 3)
214 | sample_resized.convertTo(sample_float, CV_32FC3);
215 | else
216 | sample_resized.convertTo(sample_float, CV_32FC1);
217 |
218 | cv::Mat sample_normalized;
219 | // cv::subtract(sample_float, mean_, sample_normalized);
220 | sample_normalized = sample_float;
221 | /* This operation will write the separate BGR planes directly to the
222 | * input layer of the network because it is wrapped by the cv::Mat
223 | * objects in input_channels. */
224 | cv::split(sample_normalized, *input_channels);
225 |
226 | CHECK(reinterpret_cast(input_channels->at(0).data)
227 | == net_->input_blobs()[0]->cpu_data())
228 | << "Input channels are not wrapping the input layer of the network.";
229 | }
230 |
231 | class CnnPredictor
232 | {
233 | public:
234 | static CnnPredictor*getInstance()
235 | {
236 | static CnnPredictor instance;
237 | return &instance;
238 | }
239 | const string predict(cv::Mat &img)
240 | {
241 | std::vector predictions = pclassifier->Classify(img);
242 | return predictions[0].first;
243 | }
244 | private:
245 | CnnPredictor()
246 | {
247 | pclassifier = new Classifier(model_file, trained_file, mean_file, label_file);
248 | };
249 | ~CnnPredictor()
250 | {
251 | if (pclassifier)
252 | delete pclassifier;
253 | };
254 | Classifier *pclassifier;
255 | };
--------------------------------------------------------------------------------
/cpp4caffe/cpp4caffe.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | x64
7 |
8 |
9 | Release
10 | x64
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 | {D9C1D98F-F412-44F6-A819-D1AFB27D31E7}
21 | Win32Proj
22 | x64
23 | cpp4caffe
24 | NoUpgrade
25 | 8.1
26 |
27 |
28 |
29 | Application
30 | false
31 | MultiByte
32 | v140
33 |
34 |
35 | Application
36 | false
37 | MultiByte
38 | v140
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 | <_ProjectFileVersion>10.0.20506.1
49 | Debug\
50 | Debug\
51 | $(ProjectName)-d
52 | .exe
53 | true
54 | true
55 | ..\bin\
56 | Release\
57 | $(ProjectName)
58 | .exe
59 | false
60 | true
61 |
62 |
63 |
64 | Debug/
65 | EnableFastChecks
66 | CompileAsCpp
67 | ProgramDatabase
68 | Sync
69 | Disabled
70 | MaxSpeed
71 | NotUsing
72 | MultiThreadedDebugDLL
73 | true
74 | Level3
75 | WIN32;_WINDOWS;CAFFE_VERSION=1.0.0;BOOST_ALL_NO_LIB;USE_LMDB;USE_LEVELDB;USE_CUDNN;USE_OPENCV;CMAKE_WINDOWS_BUILD;GLOG_NO_ABBREVIATED_SEVERITIES;GOOGLE_GLOG_DLL_DECL=__declspec(dllimport);GOOGLE_GLOG_DLL_DECL_FOR_UNITTESTS=__declspec(dllimport);H5_BUILT_AS_DYNAMIC_LIB=1;CMAKE_INTDIR="Debug";%(PreprocessorDefinitions)
76 | $(IntDir)
77 |
78 |
79 | WIN32;_DEBUG;_WINDOWS;CAFFE_VERSION=1.0.0;BOOST_ALL_NO_LIB;USE_LMDB;USE_LEVELDB;USE_CUDNN;USE_OPENCV;CMAKE_WINDOWS_BUILD;GLOG_NO_ABBREVIATED_SEVERITIES;GOOGLE_GLOG_DLL_DECL=__declspec(dllimport);GOOGLE_GLOG_DLL_DECL_FOR_UNITTESTS=__declspec(dllimport);H5_BUILT_AS_DYNAMIC_LIB=1;CMAKE_INTDIR=\"Debug\";%(PreprocessorDefinitions)
80 |
81 |
82 | $(ProjectDir)/$(IntDir)
83 | %(Filename).h
84 | %(Filename).tlb
85 | %(Filename)_i.c
86 | %(Filename)_p.c
87 |
88 |
89 | %(AdditionalOptions) /machine:x64
90 | kernel32.lib;user32.lib;gdi32.lib;winspool.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;comdlg32.lib;advapi32.lib;caffe-d.lib;caffeproto-d.lib;boost_system-vc140-mt-gd-1_61.lib;boost_thread-vc140-mt-gd-1_61.lib;boost_filesystem-vc140-mt-gd-1_61.lib;boost_chrono-vc140-mt-gd-1_61.lib;boost_date_time-vc140-mt-gd-1_61.lib;boost_atomic-vc140-mt-gd-1_61.lib;glogd.lib;gflagsd.lib;shlwapi.lib;libprotobufd.lib;caffehdf5_hl_D.lib;caffehdf5_D.lib;caffezlibd.lib;lmdbd.lib;ntdll.lib;leveldbd.lib;snappy_staticd.lib;caffezlibd.lib;cudart.lib;curand.lib;cublas.lib;cublas_device.lib;cudnn.lib;opencv_highgui310d.lib;opencv_videoio310d.lib;opencv_imgcodecs310d.lib;opencv_imgproc310d.lib;opencv_core310d.lib;libopenblas.dll.a;python27.lib;boost_python-vc140-mt-gd-1_61.lib
91 | %(AdditionalLibraryDirectories)
92 | Debug
93 | %(IgnoreSpecificDefaultLibraries)
94 | MTCNN-d.pdb
95 | Console
96 |
97 |
98 |
99 |
100 | false
101 |
102 |
103 |
104 |
105 | %(AdditionalIncludeDirectories)
106 | Release/
107 | CompileAsCpp
108 | Sync
109 | AnySuitable
110 | MaxSpeed
111 | NotUsing
112 | MultiThreadedDLL
113 | true
114 | Level3
115 | WIN32;_WINDOWS;NDEBUG;CAFFE_VERSION=1.0.0;BOOST_ALL_NO_LIB;USE_LMDB;USE_LEVELDB;USE_CUDNN;USE_OPENCV;CMAKE_WINDOWS_BUILD;GLOG_NO_ABBREVIATED_SEVERITIES;GOOGLE_GLOG_DLL_DECL=__declspec(dllimport);GOOGLE_GLOG_DLL_DECL_FOR_UNITTESTS=__declspec(dllimport);H5_BUILT_AS_DYNAMIC_LIB=1;CMAKE_INTDIR="Release";%(PreprocessorDefinitions)
116 | $(IntDir)
117 | ProgramDatabase
118 |
119 |
120 | WIN32;_WINDOWS;NDEBUG;CAFFE_VERSION=1.0.0;BOOST_ALL_NO_LIB;USE_LMDB;USE_LEVELDB;USE_CUDNN;USE_OPENCV;CMAKE_WINDOWS_BUILD;GLOG_NO_ABBREVIATED_SEVERITIES;GOOGLE_GLOG_DLL_DECL=__declspec(dllimport);GOOGLE_GLOG_DLL_DECL_FOR_UNITTESTS=__declspec(dllimport);H5_BUILT_AS_DYNAMIC_LIB=1;CMAKE_INTDIR=\"Release\";%(PreprocessorDefinitions)
121 |
122 |
123 | $(ProjectDir)/$(IntDir)
124 | %(Filename).h
125 | %(Filename).tlb
126 | %(Filename)_i.c
127 | %(Filename)_p.c
128 |
129 |
130 | %(AdditionalOptions) /machine:x64
131 | kernel32.lib;user32.lib;gdi32.lib;winspool.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;comdlg32.lib;advapi32.lib;boost_system-vc140-mt-1_61.lib;boost_thread-vc140-mt-1_61.lib;boost_filesystem-vc140-mt-1_61.lib;boost_chrono-vc140-mt-1_61.lib;boost_date_time-vc140-mt-1_61.lib;boost_atomic-vc140-mt-1_61.lib;glog.lib;gflags.lib;shlwapi.lib;libprotobuf.lib;caffehdf5_hl.lib;caffehdf5.lib;caffezlib.lib;lmdb.lib;ntdll.lib;leveldb.lib;snappy_static.lib;caffe.lib;caffeproto.lib;cudart.lib;curand.lib;cublas.lib;cublas_device.lib;cudnn.lib;opencv_highgui310.lib;opencv_imgcodecs310.lib;opencv_imgproc310.lib;opencv_core310.lib;libopenblas.dll.a;python27.lib;boost_python-vc140-mt-1_61.lib
132 | %(AdditionalLibraryDirectories)
133 | true
134 | %(IgnoreSpecificDefaultLibraries)
135 | MTCNN.pdb
136 | Console
137 |
138 |
139 |
140 |
141 | false
142 |
143 |
144 |
145 |
146 |
147 |
--------------------------------------------------------------------------------
/cpp4caffe/evaluation.cpp:
--------------------------------------------------------------------------------
1 | #define REG_USE_CNN 1
2 | #pragma warning(disable:4996)
3 | #include "mrdir.h"
4 | #include "mrutil.h"
5 | #include "cnnpredictor.h"
6 | const string errordir = caffeplatedir + "error";
7 | const string platedatadir = caffeplatedir+"data";
8 |
9 | void cleardir(const string dir)
10 | {
11 | vectorfiles=getAllFilesinDir(dir);
12 | for (int i = 0; i < files.size(); i++)
13 | {
14 | string filepath = dir + "/" + files[i];
15 | remove(filepath.c_str());
16 | }
17 | }
18 |
19 | void clearerror(const string dir)
20 | {
21 | vectorsubdirs=getAllSubdirs(dir);
22 | for (int i = 0; i < subdirs.size(); i++)
23 | {
24 | string subdir = dir + "/" + subdirs[i];
25 | cleardir(subdir);
26 | }
27 | }
28 |
29 | int evaluation()
30 | {
31 | string line;
32 | string label;
33 | int rightcount = 0, errorcount = 0, total = 0;
34 | if (!EXISTS(errordir.c_str()))
35 | {
36 | cout << "Error dir not exist" << endl;
37 | MKDIR(errordir.c_str());
38 | }
39 | clearerror(errordir);
40 | vectorsubdirs=getAllSubdirs(platedatadir);
41 | for (auto sub : subdirs)
42 | {
43 | string subdir = platedatadir + "/" + sub;
44 | vectorfiles=getAllFilesinDir(subdir);
45 | for (auto file : files)
46 | {
47 | string fileapth = subdir + "/" + file;
48 | cv::Mat img = cv::imread(fileapth);
49 | auto ret = split(CnnPredictor::getInstance()->predict(img), " ")[1];
50 | if (ret == sub)
51 | rightcount++;
52 | else
53 | {
54 | errorcount++;
55 | string errorlabeldir = errordir;
56 | errorlabeldir = errorlabeldir + "/" + sub;
57 | if (!EXISTS(errorlabeldir.c_str()))
58 | {
59 | MKDIR(errorlabeldir.c_str());
60 | }
61 | string errorfilepath = errorlabeldir + "/" + file.substr(0,file.size()-4) + "_" + sub + "_" + ret + ".png";
62 | cout << sub + "/" + file.substr(0, file.size() - 4) + ":" + ret << endl;
63 | //imshow("error", img);
64 | imwrite(errorfilepath, img);
65 | //cv::waitKey(1);
66 | }
67 | total++;
68 | }
69 | }
70 | cout << "acc:" << rightcount << "/" << total << endl;
71 | cout << rightcount*1.0 / total << endl;
72 | return 0;
73 | }
74 |
75 | int main(int argc,char*argv[])
76 | {
77 | if (argc==1)
78 | evaluation();
79 | else
80 | {
81 | cv::Mat img = cv::imread(argv[1]);
82 | cout << CnnPredictor::getInstance()->predict(img) << endl;
83 | }
84 | return 0;
85 | }
--------------------------------------------------------------------------------
/models/deploy.prototxt:
--------------------------------------------------------------------------------
1 | name: "LeNet"
2 | layer{
3 | name:"data"
4 | type:"Input"
5 | top: "data"
6 | input_param { shape: {dim:1 dim:3 dim:20 dim:20 } }
7 | }
8 | layer {
9 | name: "conv1"
10 | type: "Convolution"
11 | bottom: "data"
12 | top: "conv1"
13 | param {
14 | lr_mult: 1
15 | }
16 | param {
17 | lr_mult: 2
18 | }
19 | convolution_param {
20 | num_output: 20
21 | kernel_size: 5
22 | stride: 1
23 | weight_filler {
24 | type: "xavier"
25 | }
26 | bias_filler {
27 | type: "constant"
28 | }
29 | }
30 | }
31 | layer {
32 | name: "pool1"
33 | type: "Pooling"
34 | bottom: "conv1"
35 | top: "pool1"
36 | pooling_param {
37 | pool: MAX
38 | kernel_size: 2
39 | stride: 2
40 | }
41 | }
42 | layer {
43 | name: "conv2"
44 | type: "Convolution"
45 | bottom: "pool1"
46 | top: "conv2"
47 | param {
48 | lr_mult: 1
49 | }
50 | param {
51 | lr_mult: 2
52 | }
53 | convolution_param {
54 | num_output: 50
55 | kernel_size: 5
56 | stride: 1
57 | weight_filler {
58 | type: "xavier"
59 | }
60 | bias_filler {
61 | type: "constant"
62 | }
63 | }
64 | }
65 | layer {
66 | name: "pool2"
67 | type: "Pooling"
68 | bottom: "conv2"
69 | top: "pool2"
70 | pooling_param {
71 | pool: MAX
72 | kernel_size: 2
73 | stride: 2
74 | }
75 | }
76 |
77 | layer {
78 | name: "fc1"
79 | type: "InnerProduct"
80 | bottom: "pool2"
81 | top: "fc1"
82 | param {
83 | lr_mult: 1
84 | }
85 | param {
86 | lr_mult: 2
87 | }
88 | inner_product_param {
89 | num_output: 500
90 | weight_filler {
91 | type: "xavier"
92 | }
93 | bias_filler {
94 | type: "constant"
95 | }
96 | }
97 | }
98 | layer {
99 | name: "relu1"
100 | type: "ReLU"
101 | bottom: "fc1"
102 | top: "fc1"
103 | }
104 | layer {
105 | name: "fc2"
106 | type: "InnerProduct"
107 | bottom: "fc1"
108 | top: "fc2"
109 | param {
110 | lr_mult: 1
111 | }
112 | param {
113 | lr_mult: 2
114 | }
115 | inner_product_param {
116 | num_output: 10
117 | weight_filler {
118 | type: "xavier"
119 | }
120 | bias_filler {
121 | type: "constant"
122 | }
123 | }
124 | }
125 | layer {
126 | name: "prob"
127 | type: "Softmax"
128 | bottom: "fc2"
129 | top: "prob"
130 | }
--------------------------------------------------------------------------------
/models/labels.txt:
--------------------------------------------------------------------------------
1 | 0 0
2 | 1 1
3 | 2 2
4 | 3 3
5 | 4 4
6 | 5 5
7 | 6 6
8 | 7 7
9 | 8 8
10 | 9 9
11 |
--------------------------------------------------------------------------------
/models/mean.binaryproto:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/imistyrain/caffe-oneclick/2f4d0ace52b5c94a281376de706842cc3245775d/models/mean.binaryproto
--------------------------------------------------------------------------------
/models/mean.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/imistyrain/caffe-oneclick/2f4d0ace52b5c94a281376de706842cc3245775d/models/mean.npy
--------------------------------------------------------------------------------
/models/plate_fromimg.prototxt:
--------------------------------------------------------------------------------
1 | name: "LeNet"
2 | layer {
3 | name: "mnist"
4 | type: "ImageData"
5 | top: "data"
6 | top: "label"
7 | image_data_param {
8 | source: "util/train.txt"
9 | root_folder: "data/"
10 | new_height: 20
11 | new_width: 20
12 | batch_size: 64
13 | shuffle: true
14 | }
15 | include: { phase: TRAIN }
16 | }
17 | layer {
18 | name: "mnist"
19 | type: "ImageData"
20 | top: "data"
21 | top: "label"
22 | image_data_param {
23 | source: "util/val.txt"
24 | root_folder: "data/"
25 | new_height: 20
26 | new_width: 20
27 | batch_size: 64
28 | }
29 | include: { phase: TEST }
30 | }
31 | layer {
32 | name: "conv1"
33 | type: "Convolution"
34 | bottom: "data"
35 | top: "conv1"
36 | param {
37 | lr_mult: 1
38 | }
39 | param {
40 | lr_mult: 2
41 | }
42 | convolution_param {
43 | num_output: 20
44 | kernel_size: 5
45 | stride: 1
46 | weight_filler {
47 | type: "xavier"
48 | }
49 | bias_filler {
50 | type: "constant"
51 | }
52 | }
53 | }
54 | layer {
55 | name: "pool1"
56 | type: "Pooling"
57 | bottom: "conv1"
58 | top: "pool1"
59 | pooling_param {
60 | pool: MAX
61 | kernel_size: 2
62 | stride: 2
63 | }
64 | }
65 | layer {
66 | name: "conv2"
67 | type: "Convolution"
68 | bottom: "pool1"
69 | top: "conv2"
70 | param {
71 | lr_mult: 1
72 | }
73 | param {
74 | lr_mult: 2
75 | }
76 | convolution_param {
77 | num_output: 50
78 | kernel_size: 5
79 | stride: 1
80 | weight_filler {
81 | type: "xavier"
82 | }
83 | bias_filler {
84 | type: "constant"
85 | }
86 | }
87 | }
88 | layer {
89 | name: "pool2"
90 | type: "Pooling"
91 | bottom: "conv2"
92 | top: "pool2"
93 | pooling_param {
94 | pool: MAX
95 | kernel_size: 2
96 | stride: 2
97 | }
98 | }
99 | layer {
100 | name: "fc1"
101 | type: "InnerProduct"
102 | bottom: "pool2"
103 | top: "fc1"
104 | param {
105 | lr_mult: 1
106 | }
107 | param {
108 | lr_mult: 2
109 | }
110 | inner_product_param {
111 | num_output: 500
112 | weight_filler {
113 | type: "xavier"
114 | }
115 | bias_filler {
116 | type: "constant"
117 | }
118 | }
119 | }
120 | layer {
121 | name: "relu1"
122 | type: "ReLU"
123 | bottom: "fc1"
124 | top: "fc1"
125 | }
126 | layer {
127 | name: "fc2"
128 | type: "InnerProduct"
129 | bottom: "fc1"
130 | top: "fc2"
131 | param {
132 | lr_mult: 1
133 | }
134 | param {
135 | lr_mult: 2
136 | }
137 | inner_product_param {
138 | num_output: 10
139 | weight_filler {
140 | type: "xavier"
141 | }
142 | bias_filler {
143 | type: "constant"
144 | }
145 | }
146 | }
147 | layer {
148 | name: "accuracy"
149 | type: "Accuracy"
150 | bottom: "fc2"
151 | bottom: "label"
152 | top: "accuracy"
153 | include {
154 | phase: TEST
155 | }
156 | }
157 | layer {
158 | name: "loss"
159 | type: "SoftmaxWithLoss"
160 | bottom: "fc2"
161 | bottom: "label"
162 | top: "loss"
163 | }
--------------------------------------------------------------------------------
/models/plate_lenet.prototxt:
--------------------------------------------------------------------------------
1 | name: "Lenet"
2 | layer {
3 | name: "Lenet"
4 | type: "Data"
5 | top: "data"
6 | top: "label"
7 | transform_param {
8 | scale: 0.00390625
9 | }
10 | data_param {
11 | source: "lmdb/train_lmdb"
12 | backend: LMDB
13 | batch_size: 64
14 | }
15 | include: { phase: TRAIN }
16 | }
17 | layer {
18 | name: "Lenet"
19 | type: "Data"
20 | top: "data"
21 | top: "label"
22 | transform_param {
23 | scale: 0.00390625
24 | }
25 | data_param {
26 | source: "lmdb/val_lmdb"
27 | backend: LMDB
28 | batch_size: 64
29 | }
30 | include: { phase: TEST }
31 | }
32 | layer {
33 | name: "conv1"
34 | type: "Convolution"
35 | bottom: "data"
36 | top: "conv1"
37 | param {
38 | lr_mult: 1
39 | }
40 | param {
41 | lr_mult: 2
42 | }
43 | convolution_param {
44 | num_output: 20
45 | kernel_size: 5
46 | stride: 1
47 | weight_filler {
48 | type: "xavier"
49 | }
50 | bias_filler {
51 | type: "constant"
52 | }
53 | }
54 | }
55 | layer {
56 | name: "pool1"
57 | type: "Pooling"
58 | bottom: "conv1"
59 | top: "pool1"
60 | pooling_param {
61 | pool: MAX
62 | kernel_size: 2
63 | stride: 2
64 | }
65 | }
66 | layer {
67 | name: "conv2"
68 | type: "Convolution"
69 | bottom: "pool1"
70 | top: "conv2"
71 | param {
72 | lr_mult: 1
73 | }
74 | param {
75 | lr_mult: 2
76 | }
77 | convolution_param {
78 | num_output: 50
79 | kernel_size: 5
80 | stride: 1
81 | weight_filler {
82 | type: "xavier"
83 | }
84 | bias_filler {
85 | type: "constant"
86 | }
87 | }
88 | }
89 | layer {
90 | name: "pool2"
91 | type: "Pooling"
92 | bottom: "conv2"
93 | top: "pool2"
94 | pooling_param {
95 | pool: MAX
96 | kernel_size: 2
97 | stride: 2
98 | }
99 | }
100 |
101 | layer {
102 | name: "fc1"
103 | type: "InnerProduct"
104 | bottom: "pool2"
105 | top: "fc1"
106 | param {
107 | lr_mult: 1
108 | }
109 | param {
110 | lr_mult: 2
111 | }
112 | inner_product_param {
113 | num_output: 500
114 | weight_filler {
115 | type: "xavier"
116 | }
117 | bias_filler {
118 | type: "constant"
119 | }
120 | }
121 | }
122 | layer {
123 | name: "relu1"
124 | type: "ReLU"
125 | bottom: "fc1"
126 | top: "fc1"
127 | }
128 | layer {
129 | name: "fc2"
130 | type: "InnerProduct"
131 | bottom: "fc1"
132 | top: "fc2"
133 | param {
134 | lr_mult: 1
135 | }
136 | param {
137 | lr_mult: 2
138 | }
139 | inner_product_param {
140 | num_output: 10
141 | weight_filler {
142 | type: "xavier"
143 | }
144 | bias_filler {
145 | type: "constant"
146 | }
147 | }
148 | }
149 | layer {
150 | name: "accuracy"
151 | type: "Accuracy"
152 | bottom: "fc2"
153 | bottom: "label"
154 | top: "accuracy"
155 | include {
156 | phase: TEST
157 | }
158 | }
159 | layer {
160 | name: "loss"
161 | type: "SoftmaxWithLoss"
162 | bottom: "fc2"
163 | bottom: "label"
164 | top: "loss"
165 | }
--------------------------------------------------------------------------------
/models/solver.prototxt:
--------------------------------------------------------------------------------
1 | # The train/test net protocol buffer definition
2 | net: "models/plate_lenet.prototxt"
3 | #net: "models/plate_fromimg.prototxt"
4 | # test_iter specifies how many forward passes the test should carry out.
5 | # In the case of MNIST, we have test batch size 100 and 100 test iterations,
6 | # covering the full 10,000 testing images.
7 | test_iter: 24
8 | # Carry out testing every 500 training iterations.
9 | test_interval: 1000
10 | # The base learning rate, momentum and the weight decay of the network.
11 | base_lr: 0.01
12 | momentum: 0.9
13 | #solver_type: ADAGRAD
14 | weight_decay: 0.0005
15 | #weight_decay: 0.0005
16 | # The learning rate policy
17 | lr_policy: "inv"
18 | gamma: 0.0001
19 | power: 0.75
20 | # Display every 100 iterations
21 | display: 1000
22 | # The maximum number of iterations
23 | max_iter: 10000
24 | # snapshot intermediate results
25 | snapshot: 5000
26 | snapshot_prefix: "output/plate"
27 | # solver mode: CPU or GPU
28 | solver_mode: GPU
29 |
--------------------------------------------------------------------------------
/train.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 | set CAFFE_DIR=..
3 | set DATA=data
4 | set REISZE_DIM=20
5 | set converttool=%CAFFE_DIR%/build/tools/convert_imageset
6 | set finetunemodel=models/plate999.caffemodel
7 | if exist "lmdb/train_lmdb" (set regenlmdb=0) else (set regenlmdb=1)
8 | ::set regenlmdb=1
9 |
10 | if %regenlmdb% equ 1 goto regeneratelmdb
11 |
12 | goto train
13 | :regeneratelmdb
14 | echo "Creating train lmdb..."
15 | del "lmdb/train_lmdb\*.*" /f /s /q
16 | del "lmdb/val_lmdb\*.*" /f /s /q
17 | rd /s /q "lmdb/train_lmdb"
18 | rd /s /q "lmdb/val_lmdb"
19 | rd /s /q lmdb
20 | mkdir lmdb
21 | "%converttool%" --resize_height=%REISZE_DIM% --resize_width=%REISZE_DIM% --shuffle "%DATA%/" "util/train.txt" "lmdb/train_lmdb"
22 | echo "Creating val lmdb..."
23 | "%converttool%" --resize_height=%REISZE_DIM% --resize_width=%REISZE_DIM% --shuffle "%DATA%/" "util/val.txt" "lmdb/val_lmdb"
24 |
25 | echo "Computing mean:"
26 | "%CAFFE_DIR%/build/tools/compute_image_mean" "lmdb/train_lmdb" "models/mean.binaryproto"
27 |
28 | :train
29 | rem if exist %finetunemodel% (set extra_cmd="--weights=%finetunemodel%")
30 | rem "%CAFFE_DIR%/build/tools/caffe" train --solver=models/solver.prototxt %extra_cmd%
31 | python3 train.py
32 |
33 | :evaluation
34 | python util/evaluation.py
35 | echo "Done"
36 | pause
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | sys.path.insert(0, '../python')
4 | import caffe
5 | from caffe import layers as L, params as P
6 | from caffe.proto import caffe_pb2
7 | import lmdb
8 | from tqdm import tqdm
9 | import logging
10 | import cv2
11 | import numpy as np
12 |
13 | root_folder="data/"
14 | batch_size = 64
15 | test_batch_size = 100
16 | input_size = [20,20]
17 | G4 = 4*1024*1024*1024
18 |
19 | def remove_if_exists(db):
20 | if os.path.exists(db):
21 | logger.info('remove %s'%db)
22 | shutil.rmtree(db)
23 |
24 | def get_test_num(valpath = "util/val.txt"):
25 | with open(valpath) as f:
26 | lines = f.readlines()
27 | return len(lines)
28 |
29 | def make_datum(img,label):
30 | return caffe_pb2.Datum(channels=3,width=input_size[0],height=input_size[1],label=label,
31 | data=np.rollaxis(img,2).tobytes())
32 |
33 | def gen_data_layer(phase="train",uselmdb=True):
34 | if uselmdb:
35 | source = "lmdb/"+phase+"_lmdb"
36 | if not os.path.exists(source):
37 | print("creating "+source)
38 | os.makedirs(source)
39 | db = lmdb.open(source, map_size=G4)
40 | txn = db.begin(write=True)
41 | txtfile="util/"+phase+".txt"
42 | with open(txtfile) as f:
43 | lines = f.readlines()
44 | for i,line in tqdm(enumerate(lines)):
45 | items = line.split()
46 | imgpath = root_folder+"/"+items[0]
47 | img = cv2.imread(imgpath)
48 | if img is None:
49 | logging.info("cannot read"+imgpath)
50 | key = "%08d_data"%(i)
51 | label=int(items[1])
52 | txn.put(key,make_datum(img,label).SerializeToString())
53 | if i %1000 == 0:
54 | txn.commit()
55 | txn = db.begin(write=True)
56 | db.close()
57 | data, label = L.Data(batch_size=batch_size, backend=P.Data.LMDB,source=source,transform_param=dict(scale=1./255), ntop=2)
58 | else:
59 | txtfile="util/"+phase+".txt"
60 | data, label = L.ImageData(image_data_param=dict(source=txtfile,root_folder=root_folder,batch_size=batch_size,shuffle=phase=="train",new_width=20,new_height=20),ntop=2,transform_param=dict(scale=1./255))
61 | return data,label
62 |
63 | def lenet(phase="train",batch_size=64):
64 | n = caffe.NetSpec()
65 | n.data, n.label = gen_data_layer(phase)
66 | n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
67 | n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
68 | n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
69 | n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
70 | n.fc1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
71 | n.relu1 = L.ReLU(n.fc1, in_place=True)
72 | n.fc2 = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
73 | n.acc = L.Accuracy(n.fc2, n.label)
74 | n.loss = L.SoftmaxWithLoss(n.fc2, n.label)
75 | return n
76 |
77 | def lenet_deploy(net,deploy_net_file="util/deploy.prototxt"):
78 | deploy_net = net
79 | with open(deploy_net_file, 'w') as f:
80 | net_param = deploy_net.to_proto()
81 | del net_param.layer[0]
82 | del net_param.layer[-1]
83 | del net_param.layer[-1]
84 | net_param.name = 'lenet'
85 | net_param.input.extend(['data'])
86 | net_param.input_shape.extend([
87 | caffe_pb2.BlobShape(dim=[1, 3, input_size[0], input_size[1]])])
88 | f.write(str(net_param))
89 |
90 | def gen_solver_txt(train_net_path, test_net_path):
91 | s = caffe_pb2.SolverParameter()
92 | s.train_net = train_net_path
93 | s.test_net.append(test_net_path)
94 | s.test_interval = 500
95 | s.test_iter.append(int(get_test_num()/test_batch_size))
96 | s.max_iter = 10000
97 | s.base_lr = 0.01
98 | s.lr_policy = 'step'
99 | s.gamma = 0.1
100 | s.power = 0.75
101 | s.stepsize = 5000
102 | s.momentum = 0.9
103 | s.weight_decay = 5e-4
104 | s.display = 1000
105 | s.snapshot = 5000
106 | s.snapshot_prefix = 'output/plate'
107 | s.solver_mode = caffe_pb2.SolverParameter.GPU
108 | return s
109 |
110 | def main():
111 | train_net_path = 'util/train.prototxt'
112 | net = lenet('train',batch_size)
113 | with open(train_net_path, 'w') as f:
114 | f.write(str(net.to_proto()))
115 | test_net_path = 'util/test.prototxt'
116 | net = lenet('val',test_batch_size)
117 | with open(test_net_path, 'w') as f:
118 | f.write(str(net.to_proto()))
119 | lenet_deploy(net)
120 | solver_path = 'util/solver.prototxt'
121 | with open(solver_path, 'w') as f:
122 | f.write(str(gen_solver_txt(train_net_path, test_net_path)))
123 | caffe.set_mode_gpu()
124 | solver = caffe.get_solver(solver_path)
125 | solver.solve()
126 |
127 | if __name__=="__main__":
128 | main()
--------------------------------------------------------------------------------
/train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | set -e
3 | DATA=data
4 | TOOLS=../build/tools
5 | regeneratelmdb=0
6 |
7 | convert_lmdb(){
8 | RESIZE_HEIGHT=20
9 | RESIZE_WIDTH=20
10 | echo "Creating train lmdb..."
11 | GLOG_logtostderr=1 $TOOLS/convert_imageset \
12 | --resize_height=$RESIZE_HEIGHT \
13 | --resize_width=$RESIZE_WIDTH \
14 | --shuffle $DATA/ \
15 | util/train.txt \
16 | lmdb/train_lmdb
17 | echo "Creating val lmdb..."
18 | GLOG_logtostderr=1 $TOOLS/convert_imageset \
19 | --resize_height=$RESIZE_HEIGHT \
20 | --resize_width=$RESIZE_WIDTH \
21 | --shuffle $DATA/ \
22 | util/val.txt \
23 | lmdb/val_lmdb
24 | }
25 | create_lmdb(){
26 | if [ -d lmdb ] ; then
27 | if [ $regeneratelmdb -eq 1 ] ; then
28 | rm lmdb -rf
29 | mkdir lmdb
30 | convert_lmdb
31 | fi
32 | else
33 | mkdir lmdb
34 | convert_lmdb
35 | fi
36 | }
37 | train(){
38 | if [ ! -d output ]; then
39 | mkdir output
40 | fi
41 | latest=$(ls -t models/*.caffemodel | head -n 1)
42 | if [ -f ${latest} ]; then
43 | echo "Resume training from ${latest}"
44 | $TOOLS/caffe train --solver=models/solver.prototxt --weights=$latest
45 | else
46 | echo "Start Training"
47 | $TOOLS/caffe train --solver=models/solver.prototxt
48 | fi
49 | echo "Done"
50 | }
51 | # python evaluate
52 | evaluate(){
53 | latest=$(ls -t output/*.caffemodel | head -n 1)
54 | echo "Evaluating "${latest}
55 | python util/evaluation.py --weights=${latest}
56 | cd ..
57 | }
58 | # c++ evaluate
59 | run(){
60 | if [ not -d build ]; then
61 | mkdir build
62 | fi
63 | cd build
64 | cmake ..
65 | make -j8
66 | ./evaluation
67 | #./cpp4caffe
68 | }
69 | # generate util/train.txt and val.txt for training
70 | # python3 util/preprocess.py
71 | #create_lmdb
72 | # train
73 | python train.py
74 | evaluate
75 | #run
--------------------------------------------------------------------------------
/util/deploy.prototxt:
--------------------------------------------------------------------------------
1 | name: "lenet"
2 | input: "data"
3 | input_shape {
4 | dim: 1
5 | dim: 3
6 | dim: 20
7 | dim: 20
8 | }
9 | layer {
10 | name: "conv1"
11 | type: "Convolution"
12 | bottom: "data"
13 | top: "conv1"
14 | convolution_param {
15 | num_output: 20
16 | kernel_size: 5
17 | weight_filler {
18 | type: "xavier"
19 | }
20 | }
21 | }
22 | layer {
23 | name: "pool1"
24 | type: "Pooling"
25 | bottom: "conv1"
26 | top: "pool1"
27 | pooling_param {
28 | pool: MAX
29 | kernel_size: 2
30 | stride: 2
31 | }
32 | }
33 | layer {
34 | name: "conv2"
35 | type: "Convolution"
36 | bottom: "pool1"
37 | top: "conv2"
38 | convolution_param {
39 | num_output: 50
40 | kernel_size: 5
41 | weight_filler {
42 | type: "xavier"
43 | }
44 | }
45 | }
46 | layer {
47 | name: "pool2"
48 | type: "Pooling"
49 | bottom: "conv2"
50 | top: "pool2"
51 | pooling_param {
52 | pool: MAX
53 | kernel_size: 2
54 | stride: 2
55 | }
56 | }
57 | layer {
58 | name: "fc1"
59 | type: "InnerProduct"
60 | bottom: "pool2"
61 | top: "fc1"
62 | inner_product_param {
63 | num_output: 500
64 | weight_filler {
65 | type: "xavier"
66 | }
67 | }
68 | }
69 | layer {
70 | name: "relu1"
71 | type: "ReLU"
72 | bottom: "fc1"
73 | top: "fc1"
74 | }
75 | layer {
76 | name: "fc2"
77 | type: "InnerProduct"
78 | bottom: "fc1"
79 | top: "fc2"
80 | inner_product_param {
81 | num_output: 10
82 | weight_filler {
83 | type: "xavier"
84 | }
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/util/evaluation.py:
--------------------------------------------------------------------------------
1 | import os,argparse,sys,time,shutil
2 | import numpy as np
3 | import sys
4 | sys.path.append('../python')
5 | import caffe
6 | import logging
7 | from tqdm import tqdm
8 |
9 | def create_logger(logdir="output"):
10 | time_str = time.strftime('%Y%m%d-%H%M%S')
11 | log_file = '{}/{}.log'.format(logdir, time_str)
12 | head = '%(asctime)-15s %(message)s'
13 | logging.basicConfig(filename=str(log_file),format=head)
14 | logger = logging.getLogger()
15 | logger.setLevel(logging.INFO)
16 | console = logging.StreamHandler()
17 | logging.getLogger('').addHandler(console)
18 |
19 | def clearlasterrors(args):
20 | if os.path.exists("error"):
21 | subdirs=os.listdir(args.errordir)
22 | for subdir in subdirs:
23 | files=os.listdir(args.errordir+"/"+subdir)
24 | for file in files:
25 | os.remove(args.errordir+"/"+subdir+"/"+file)
26 | os.rmdir(args.errordir+"/"+subdir)
27 |
28 | def loadmean(meanprotopath):
29 | blob = caffe.proto.caffe_pb2.BlobProto()
30 | blob.ParseFromString(open(meanprotopath, 'rb').read())
31 | return np.array(caffe.io.blobproto_to_array(blob))[0]
32 |
33 | def getclassifier(args):
34 | classifier = caffe.Classifier(args.modeldef, args.weights,image_dims=args.image_dims
35 | )
36 | ##mean=loadmean(args.meanfile).mean(1).mean(1),#raw_scale=255,channel_swap=[2,1,0]
37 | caffe.set_mode_gpu()
38 | return classifier
39 |
40 | class EvalStatic:
41 | total = 0
42 | error = 0
43 | def __str__(self):
44 | return str(self.error)+","+str(self.total)+","+str(self.error*1.0/self.total)
45 |
46 | def evaluationonebyone(args):
47 | labels=[w.split()[1] for w in open(args.labelfile).readlines()]
48 | classifier = getclassifier(args)
49 | start = time.time()
50 | if not os.path.exists(args.errordir):
51 | os.mkdir(args.errordir)
52 | subdirs=os.listdir(args.datadir)
53 | evalstatics=[]
54 | for subdir in subdirs:
55 | print(subdir+":")
56 | evalstatic=EvalStatic()
57 | files=os.listdir(args.datadir+'/'+subdir)
58 | evalstatic.total=len(files)
59 | for file in tqdm(files):
60 | imgpath=args.datadir+'/'+subdir+'/'+file
61 | inputs = [caffe.io.load_image(imgpath)]
62 | try:
63 | predictions = classifier.predict(inputs,oversample=False)
64 | except Exception as e:
65 | print(e)
66 | p=predictions[0,:].argmax()
67 | label=labels[p]
68 | if subdir!=label:
69 | logging.info(subdir+" "+file+":"+str(label))
70 | evalstatic.error=evalstatic.error+1
71 | if not os.path.exists(args.errordir+'/'+subdir):
72 | os.mkdir(args.errordir+'/'+subdir)
73 | errorfilepath=args.errordir+'/'+subdir+'/'+file[:-4]+"_"+subdir+'_'+label+'.jpg'
74 | shutil.copy(imgpath,errorfilepath)
75 | evalstatics.append(evalstatic)
76 | logging.info("Done in %.2f s." % (time.time() - start))
77 | totalcount=0
78 | error=0
79 | for i,evalstatic in enumerate(evalstatics):
80 | error=error+evalstatic.error
81 | totalcount=totalcount+evalstatic.total
82 | logging.info(subdirs[i]+":"+str(evalstatic))
83 | logging.info("Toal error")
84 | logging.info(str(error)+" "+str(totalcount)+" "+str(error*1.0/totalcount))
85 |
86 | def evaluation_batch(args):
87 | labels=[w.split()[1] for w in open(args.labelfile).readlines()]
88 | classifier=getclassifier(args)
89 | start = time.time()
90 | if not os.path.exists(args.errordir):
91 | os.mkdir(args.errordir)
92 | subdirs=os.listdir(args.datadir)
93 | evalstatics=[]
94 | for subdir in subdirs:
95 | print(subdir)
96 | evalstatic=EvalStatic()
97 | files=os.listdir(args.datadir+'/'+subdir)
98 | evalstatic.total=len(files)
99 | inputs=[caffe.io.load_image(args.datadir+'/'+subdir+'/'+file) for file in files]
100 | try:
101 | predictions = classifier.predict(inputs,oversample=False)
102 | except Exception as e:
103 | print(e)
104 | for i in tqdm(range(len(files))):
105 | p=predictions[i,:].argmax()
106 | label=labels[p]
107 | if subdir!=label:
108 | logging.info(subdir+" "+files[i]+":"+str(label))
109 | evalstatic.error=evalstatic.error+1
110 | if not os.path.exists(args.errordir+'/'+subdir):
111 | os.mkdir(args.errordir+'/'+subdir)
112 | imgpath=args.datadir+"/"+subdir+"/"+files[i]
113 | errorfilepath=args.errordir+'/'+subdir+'/'+files[i][:-4]+"_"+subdir+'_'+label+'.jpg'
114 | shutil.copy(imgpath,errorfilepath)
115 | evalstatics.append(evalstatic)
116 | logging.info("Done in %.2f s." % (time.time() - start))
117 | totalcount=0
118 | error=0
119 | for i,evalstatic in enumerate(evalstatics):
120 | error=error+evalstatic.error
121 | totalcount=totalcount+evalstatic.total
122 | logging.info(subdirs[i]+":"+str(evalstatic))
123 | logging.info("Toal error")
124 | logging.info(str(error)+" "+str(totalcount)+" "+str(error*1.0/totalcount))
125 |
126 | def get_args():
127 | parser = argparse.ArgumentParser()
128 | parser.add_argument("--iter",default=10000,help="caffemodel iter to evaluation")
129 | parser.add_argument("--datadir",default="data",help="datadir")
130 | parser.add_argument("--image_dims",default=[20,20],help="image_dims")
131 | parser.add_argument("--modeldef",default="util/deploy.prototxt",help="deploy file")
132 | parser.add_argument("--weights",default="models/plate999.caffemodel",help="caffemodel")
133 | parser.add_argument("--labelfile",default="models/labels.txt",help="label file")
134 | parser.add_argument("--meanfile",default="models/mean.binaryproto",help="meanfile")
135 | parser.add_argument("--errordir",default="error",help="errordir")
136 | parser.add_argument("--logfile",default="error.txt",help="log txt")
137 | parser.add_argument("--evaluationonebyone",default=True,help="log txt")
138 | parser.add_argument("--imgpath",default="data/0/0.jpg",help="image path")
139 | args = parser.parse_args()
140 | return args
141 |
142 | def classification():
143 | args = get_args()
144 | args = parser.parse_args()
145 | labels=[w.split()[1] for w in open(args.labelfile).readlines()]
146 | classifier=getclassifier(args)
147 | inputs=[caffe.io.load_image(args.imgpath)]
148 | predictions = classifier.predict(inputs,oversample=False)
149 | p=predictions[0,:].argmax()
150 | label=labels[p]
151 | print(label,predictions[0,p])
152 | top_inds = predictions[0,:].argsort()[::-1][:5]
153 |
154 | def evaluation():
155 | args = get_args()
156 | clearlasterrors(args)
157 | create_logger()
158 | if args.evaluationonebyone:
159 | evaluationonebyone(args)
160 | else:
161 | evaluation_batch(args)
162 |
163 | if __name__=='__main__':
164 | evaluation()
165 | #classification()
166 |
--------------------------------------------------------------------------------
/util/meanfilebinartynpy.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | sys.path.append('../../python')
4 |
5 | MEAN_BIN = "../models/mean.binaryproto"
6 | MEAN_NPY = "../models/mean.npy"
7 |
8 | from caffe.proto import caffe_pb2
9 | from caffe.io import blobproto_to_array
10 | print('generating mean file...')
11 | mean_blob = caffe_pb2.BlobProto()
12 | mean_blob.ParseFromString(open(MEAN_BIN, 'rb').read())
13 |
14 | import numpy as np
15 | mean_npy = blobproto_to_array(mean_blob)
16 | mean_npy_shape = mean_npy.shape
17 | mean_npy = mean_npy.reshape(mean_npy_shape[1], mean_npy_shape[2], mean_npy_shape[3])
18 |
19 | np.save(open(MEAN_NPY, 'wb'), mean_npy)
20 | print('done...')
--------------------------------------------------------------------------------
/util/plotaccuracy.py:
--------------------------------------------------------------------------------
1 | #coding =utf-8
2 | #迷若烟雨@Baidu
3 | import re
4 | import matplotlib.pyplot as plt
5 | import os
6 | import sys
7 | import datetime
8 |
9 | def getlastesttraininfofilefromdir(logdir):
10 | logfiles=os.listdir(logdir)
11 | infologfiles=filter(lambda s:s.startswith('INFO'),logfiles)
12 | infologfiles=filter(lambda s:s.endswith('.txt'),infologfiles)
13 | if infologfiles:
14 | lastestfile=infologfiles[0]
15 | maxtm=0
16 | for logf in infologfiles:
17 | path = os.path.join(logdir, logf)
18 | timestamp = os.path.getmtime(path)
19 | date = datetime.datetime.fromtimestamp(timestamp)
20 | if timestamp>maxtm:
21 | lastestfile=path
22 | return lastestfile
23 | else:
24 | return None
25 | def plotaccuarcy():
26 | logdir='log'
27 | infologfile=getlastesttraininfofilefromdir(logdir)
28 | print(infologfile)
29 | if infologfile:
30 | # infologfile='../log/INFO2015-11-19T19-45-15.txt'
31 | f=open(infologfile)
32 | lines=f.read()
33 | #print lines
34 | iterations=re.findall('Iteration \d*',lines)
35 | accuracysstrings=re.findall('accuracy = \d*.\d*',lines)
36 | trainlosstrings=re.findall('Train net output #0: loss = \d*.\d*',lines)
37 | testlossstrings=re.findall('Test net output #1: loss = \d*.\d*',lines)
38 | f.close()
39 | accuracys=[ac[11:] for ac in accuracysstrings]
40 | trainlosses=[loss[27:-1]for loss in trainlosstrings]
41 | testlosses=[loss[27:-1]for loss in testlossstrings]
42 | #for ac in accuracys:
43 | # print ac
44 | plt.plot(range(len(accuracys)),accuracys)
45 | #plt.plot(range(len(trainlosses)),trainlosses)
46 | #plt.plot(range(len(testlosses)),testlosses)
47 | plt.show()
48 | if __name__=="__main__":
49 | plotaccuarcy()
--------------------------------------------------------------------------------
/util/preprocess.py:
--------------------------------------------------------------------------------
1 | import os,argparse,random,shutil
2 |
3 | def main(args):
4 | datadir=args.rootdir+"/"+args.dataname
5 | print("loading data from "+datadir+":")
6 | trainfile=open("util/train.txt","w")
7 | valfile=open("util/val.txt","w")
8 | categoryfile=open("models/labels.txt",'w')
9 | paths=os.listdir(datadir)
10 | classindex=0
11 | trainpaths=[]
12 | valpaths=[]
13 | categorys=[]
14 | for subdir in paths:
15 | if(os.path.isdir(datadir+"/"+subdir)):
16 | categorys.append(str(classindex)+" "+subdir+"\n")
17 | files=os.listdir(datadir+"/"+subdir)
18 | files=[file for file in files]
19 | random.shuffle(files)
20 | print(subdir,len(files))
21 | num2train=len(files)*args.trainrtaio
22 | for fileindex,file in enumerate(files):
23 | if fileindex