├── README.md ├── bpnn.cpp ├── testdata.txt └── traindata.txt /README.md: -------------------------------------------------------------------------------- 1 | # BP神经网络的C++实现 2 | 3 | 视频地址:[彻底搞懂BP神经网络 理论推导+代码实现(C++)](https://www.bilibili.com/video/BV1Y64y1z7jM) 4 | 5 | ### 注意 6 | 7 | 本项目代码已重构,当前代码不再建议使用。请使用下方项目代码: 8 | 9 | [![GavinTechStudio/Back-Propagation-Neural-Network - GitHub](https://gh-card.dev/repos/GavinTechStudio/Back-Propagation-Neural-Network.svg)](https://github.com/GavinTechStudio/Back-Propagation-Neural-Network) 10 | 11 | ### 可能遇到的问题 12 | 13 | #### 收敛问题 14 | 15 | 项目中提供的`traindata.txt`和`testdata.txt`换行符为`\r`,所以可能在Windows环境下会出现读入数据多一个空行,导致程序无法收敛。 16 | 17 | [bp网络无法收敛 · Issue #1 · GavinTechStudio/bpnn_with_cpp (github.com)](https://github.com/GavinTechStudio/bpnn_with_cpp/issues/1) 18 | 19 | #### 数据读入问题 20 | 21 | 如遇到输出`Error in reading traindata.txt`,则是因为`traindata.txt`和`testdata.txt`所放位置不正确导致的,应将这两个数据文件放到对应的可执行程序的目录下。 22 | -------------------------------------------------------------------------------- /bpnn.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #define INNODE 2 8 | #define HIDENODE 4 9 | #define OUTNODE 1 10 | 11 | double rate = 0.8; 12 | double threshold = 1e-4; 13 | size_t mosttimes = 1e6; 14 | 15 | struct Sample { 16 | std::vector in, out; 17 | }; 18 | 19 | struct Node { 20 | double value{}, bias{}, bias_delta{}; 21 | std::vector weight, weight_delta; 22 | }; 23 | 24 | namespace utils { 25 | 26 | inline double sigmoid(double x) { 27 | double res = 1.0 / (1.0 + std::exp(-x)); 28 | return res; 29 | } 30 | 31 | std::vector getFileData(std::string filename) { 32 | std::vector res; 33 | 34 | std::ifstream in(filename); 35 | if (in.is_open()) { 36 | while (!in.eof()) { 37 | double buffer; 38 | in >> buffer; 39 | res.push_back(buffer); 40 | } 41 | in.close(); 42 | } else { 43 | std::cout << "Error in reading " << filename << std::endl; 44 | } 45 | 46 | return res; 47 | } 48 | 49 | std::vector getTrainData(std::string filename) { 50 | std::vector res; 51 | 52 | std::vector buffer = getFileData(filename); 53 | 54 | for (size_t i = 0; i < buffer.size(); i += INNODE + OUTNODE) { 55 | Sample tmp; 56 | for (size_t t = 0; t < INNODE; t++) { 57 | tmp.in.push_back(buffer[i + t]); 58 | } 59 | for (size_t t = 0; t < OUTNODE; t++) { 60 | tmp.out.push_back(buffer[i + INNODE + t]); 61 | } 62 | res.push_back(tmp); 63 | } 64 | 65 | return res; 66 | } 67 | 68 | std::vector getTestData(std::string filename) { 69 | std::vector res; 70 | 71 | std::vector buffer = getFileData(filename); 72 | 73 | for (size_t i = 0; i < buffer.size(); i += INNODE) { 74 | Sample tmp; 75 | for (size_t t = 0; t < INNODE; t++) { 76 | tmp.in.push_back(buffer[i + t]); 77 | } 78 | res.push_back(tmp); 79 | } 80 | 81 | return res; 82 | } 83 | 84 | } 85 | 86 | Node *inputLayer[INNODE], *hideLayer[HIDENODE], *outLayer[OUTNODE]; 87 | 88 | inline void init() { 89 | std::mt19937 rd; 90 | rd.seed(std::random_device()()); 91 | 92 | std::uniform_real_distribution distribution(-1, 1); 93 | 94 | for (size_t i = 0; i < INNODE; i++) { 95 | ::inputLayer[i] = new Node(); 96 | for (size_t j = 0; j < HIDENODE; j++) { 97 | ::inputLayer[i]->weight.push_back(distribution(rd)); 98 | ::inputLayer[i]->weight_delta.push_back(0.f); 99 | } 100 | } 101 | 102 | for (size_t i = 0; i < HIDENODE; i++) { 103 | ::hideLayer[i] = new Node(); 104 | ::hideLayer[i]->bias = distribution(rd); 105 | for (size_t j = 0; j < OUTNODE; j++) { 106 | ::hideLayer[i]->weight.push_back(distribution(rd)); 107 | ::hideLayer[i]->weight_delta.push_back(0.f); 108 | } 109 | } 110 | 111 | for (size_t i = 0; i < OUTNODE; i++) { 112 | ::outLayer[i] = new Node(); 113 | ::outLayer[i]->bias = distribution(rd); 114 | } 115 | 116 | } 117 | 118 | inline void reset_delta() { 119 | 120 | for (size_t i = 0; i < INNODE; i++) { 121 | ::inputLayer[i]->weight_delta.assign(::inputLayer[i]->weight_delta.size(), 0.f); 122 | } 123 | 124 | for (size_t i = 0; i < HIDENODE; i++) { 125 | ::hideLayer[i]->bias_delta = 0.f; 126 | ::hideLayer[i]->weight_delta.assign(::hideLayer[i]->weight_delta.size(), 0.f); 127 | } 128 | 129 | for (size_t i = 0; i < OUTNODE; i++) { 130 | ::outLayer[i]->bias_delta = 0.f; 131 | } 132 | 133 | } 134 | 135 | int main(int argc, char *argv[]) { 136 | 137 | init(); 138 | 139 | std::vector train_data = utils::getTrainData("traindata.txt"); 140 | 141 | // training 142 | for (size_t times = 0; times < mosttimes; times++) { 143 | 144 | reset_delta(); 145 | 146 | double error_max = 0.f; 147 | 148 | for (auto &idx : train_data) { 149 | 150 | for (size_t i = 0; i < INNODE; i++) { 151 | ::inputLayer[i]->value = idx.in[i]; 152 | } 153 | 154 | // 正向传播 155 | for (size_t j = 0; j < HIDENODE; j++) { 156 | double sum = 0; 157 | for (size_t i = 0; i < INNODE; i++) { 158 | sum += ::inputLayer[i]->value * ::inputLayer[i]->weight[j]; 159 | } 160 | sum -= ::hideLayer[j]->bias; 161 | 162 | ::hideLayer[j]->value = utils::sigmoid(sum); 163 | } 164 | 165 | for (size_t j = 0; j < OUTNODE; j++) { 166 | double sum = 0; 167 | for (size_t i = 0; i < HIDENODE; i++) { 168 | sum += ::hideLayer[i]->value * ::hideLayer[i]->weight[j]; 169 | } 170 | sum -= ::outLayer[j]->bias; 171 | 172 | ::outLayer[j]->value = utils::sigmoid(sum); 173 | } 174 | 175 | // 计算误差 176 | double error = 0.f; 177 | for (size_t i = 0; i < OUTNODE; i++) { 178 | double tmp = std::fabs(::outLayer[i]->value - idx.out[i]); 179 | error += tmp * tmp / 2; 180 | } 181 | 182 | error_max = std::max(error_max, error); 183 | 184 | // 反向传播 185 | 186 | for (size_t i = 0; i < OUTNODE; i++) { 187 | double bias_delta = -(idx.out[i] - ::outLayer[i]->value) * 188 | ::outLayer[i]->value * (1.0 - ::outLayer[i]->value); 189 | ::outLayer[i]->bias_delta += bias_delta; 190 | } 191 | 192 | for (size_t i = 0; i < HIDENODE; i++) { 193 | for (size_t j = 0; j < OUTNODE; j++) { 194 | double weight_delta = (idx.out[j] - ::outLayer[j]->value) * 195 | ::outLayer[j]->value * (1.0 - ::outLayer[j]->value) * 196 | ::hideLayer[i]->value; 197 | ::hideLayer[i]->weight_delta[j] += weight_delta; 198 | } 199 | } 200 | 201 | for (size_t i = 0; i < HIDENODE; i++) { 202 | double sum = 0; 203 | for (size_t j = 0; j < OUTNODE; j++) { 204 | sum += -(idx.out[j] - ::outLayer[j]->value) * 205 | ::outLayer[j]->value * (1.0 - ::outLayer[j]->value) * 206 | ::hideLayer[i]->weight[j]; 207 | } 208 | ::hideLayer[i]->bias_delta += 209 | sum * ::hideLayer[i]->value * (1.0 - ::hideLayer[i]->value); 210 | } 211 | 212 | for (size_t i = 0; i < INNODE; i++) { 213 | for (size_t j = 0; j < HIDENODE; j++) { 214 | double sum = 0.f; 215 | for (size_t k = 0; k < OUTNODE; k++) { 216 | sum += (idx.out[k] - ::outLayer[k]->value) * 217 | ::outLayer[k]->value * (1.0 - ::outLayer[k]->value) * 218 | ::hideLayer[j]->weight[k]; 219 | } 220 | ::inputLayer[i]->weight_delta[j] += 221 | sum * 222 | ::hideLayer[j]->value * (1.0 - ::hideLayer[j]->value) * 223 | ::inputLayer[i]->value; 224 | } 225 | } 226 | 227 | } 228 | 229 | if (error_max < ::threshold) { 230 | std::cout << "Success with " << times + 1 << " times training." << std::endl; 231 | std::cout << "Maximum error: " << error_max << std::endl; 232 | break; 233 | } 234 | 235 | auto train_data_size = double(train_data.size()); 236 | 237 | for (size_t i = 0; i < INNODE; i++) { 238 | for (size_t j = 0; j < HIDENODE; j++) { 239 | ::inputLayer[i]->weight[j] += 240 | rate * ::inputLayer[i]->weight_delta[j] / train_data_size; 241 | } 242 | } 243 | 244 | for (size_t i = 0; i < HIDENODE; i++) { 245 | ::hideLayer[i]->bias += 246 | rate * ::hideLayer[i]->bias_delta / train_data_size; 247 | for (size_t j = 0; j < OUTNODE; j++) { 248 | ::hideLayer[i]->weight[j] += 249 | rate * ::hideLayer[i]->weight_delta[j] / train_data_size; 250 | } 251 | } 252 | 253 | for (size_t i = 0; i < OUTNODE; i++) { 254 | ::outLayer[i]->bias += 255 | rate * ::outLayer[i]->bias_delta / train_data_size; 256 | } 257 | 258 | } 259 | 260 | std::vector test_data = utils::getTestData("testdata.txt"); 261 | 262 | // predict 263 | for (auto &idx : test_data) { 264 | 265 | for (size_t i = 0; i < INNODE; i++) { 266 | ::inputLayer[i]->value = idx.in[i]; 267 | } 268 | 269 | for (size_t j = 0; j < HIDENODE; j++) { 270 | double sum = 0; 271 | for (size_t i = 0; i < INNODE; i++) { 272 | sum += ::inputLayer[i]->value * inputLayer[i]->weight[j]; 273 | } 274 | sum -= ::hideLayer[j]->bias; 275 | 276 | ::hideLayer[j]->value = utils::sigmoid(sum); 277 | } 278 | 279 | for (size_t j = 0; j < OUTNODE; j++) { 280 | double sum = 0; 281 | for (size_t i = 0; i < HIDENODE; i++) { 282 | sum += ::hideLayer[i]->value * ::hideLayer[i]->weight[j]; 283 | } 284 | sum -= ::outLayer[j]->bias; 285 | 286 | ::outLayer[j]->value = utils::sigmoid(sum); 287 | 288 | idx.out.push_back(::outLayer[j]->value); 289 | 290 | for (auto &tmp : idx.in) { 291 | std::cout << tmp << " "; 292 | } 293 | for (auto &tmp : idx.out) { 294 | std::cout << tmp << " "; 295 | } 296 | std::cout << std::endl; 297 | } 298 | 299 | } 300 | 301 | return 0; 302 | } 303 | -------------------------------------------------------------------------------- /testdata.txt: -------------------------------------------------------------------------------- 1 | 0.111 0.112 2 | 0.001 0.999 3 | 0.123 0.345 4 | 0.123 0.456 5 | 0.123 0.789 6 | 0.234 0.567 7 | 0.234 0.678 8 | 0.387 0.401 9 | 0.616 0.717 10 | 0.701 0.919 11 | -------------------------------------------------------------------------------- /traindata.txt: -------------------------------------------------------------------------------- 1 | 0 0 0 2 | 0 1 1 3 | 1 0 1 4 | 1 1 0 5 | 0.8 0.8 0 6 | 0.6 0.6 0 7 | 0.4 0.4 0 8 | 0.2 0.2 0 9 | 1.0 0.8 1 10 | 1.0 0.6 1 11 | 1.0 0.4 1 12 | 1.0 0.2 1 13 | 0.8 0.6 1 14 | 0.6 0.4 1 15 | 0.4 0.2 1 16 | 0.2 0 1 17 | 0.999 0.666 1 18 | 0.666 0.333 1 19 | 0.333 0 1 20 | 0.8 0.4 1 21 | 0.4 0 1 22 | 0 0.123 1 23 | 0.12 0.23 1 24 | 0.23 0.34 1 25 | 0.34 0.45 1 26 | 0.45 0.56 1 27 | 0.56 0.67 1 28 | 0.67 0.78 1 29 | 0.78 0.89 1 30 | 0.89 0.99 1 31 | --------------------------------------------------------------------------------