├── 技术报告.pdf
├── cifar-10-torch
├── figures
│ └── code.png
├── cifar-source
│ ├── res
│ │ └── distilled_lenet5_best.pt
│ ├── save_params.py
│ ├── test.py
│ ├── train.py
│ ├── distill.py
│ └── models.py
└── README.md
├── quantification
├── distilled_lenet5_best.pt
├── quantification_img.py
├── input_pic2_lable8.txt
└── quantification_para.py
├── CNN-FPGA-Vivado
├── CNN-FPGA-Vivado.srcs
│ ├── sim_1
│ │ └── new
│ │ │ └── lenet_TB.v
│ └── sources_1
│ │ ├── new
│ │ ├── Lenet.v
│ │ ├── FindMax.v
│ │ ├── MaxPoolMulti.v
│ │ ├── UsingTheRelu16.v
│ │ ├── max.v
│ │ ├── UsingTheRelu.v
│ │ ├── MaxPoolSingle.v
│ │ └── IEEE162IEEE32.v
│ │ └── imports
│ │ └── Integration first part
│ │ ├── layer.v
│ │ ├── convUnit.v
│ │ ├── floatAdd.v
│ │ ├── floatMult.v
│ │ ├── floatAdd16.v
│ │ ├── floatMult16.v
│ │ ├── weightMemory.v
│ │ ├── convLayerMulti.v
│ │ ├── convLayerSingle.v
│ │ ├── processingElement.v
│ │ ├── activationFunction.v
│ │ ├── processingElement16.v
│ │ ├── RFselector.v
│ │ ├── softmax_tb.v
│ │ ├── IntegrationConvPart.v
│ │ └── ANNfull.v
├── README.md
└── CNN-FPGA-Vivado.xpr
├── weight
├── layer1.txt
├── layer2.txt
└── classifier.txt
├── LICENSE
└── README.md
/技术报告.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/技术报告.pdf
--------------------------------------------------------------------------------
/cifar-10-torch/figures/code.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/cifar-10-torch/figures/code.png
--------------------------------------------------------------------------------
/quantification/distilled_lenet5_best.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/quantification/distilled_lenet5_best.pt
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sim_1/new/lenet_TB.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sim_1/new/lenet_TB.v
--------------------------------------------------------------------------------
/cifar-10-torch/cifar-source/res/distilled_lenet5_best.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/cifar-10-torch/cifar-source/res/distilled_lenet5_best.pt
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/Lenet.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/Lenet.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/FindMax.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/FindMax.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/MaxPoolMulti.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/MaxPoolMulti.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/UsingTheRelu16.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/UsingTheRelu16.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/layer.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/layer.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/convUnit.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/convUnit.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/floatAdd.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/floatAdd.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/floatMult.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/floatMult.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/floatAdd16.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/floatAdd16.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/floatMult16.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/floatMult16.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/weightMemory.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/weightMemory.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/convLayerMulti.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/convLayerMulti.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/convLayerSingle.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/convLayerSingle.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/processingElement.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/processingElement.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/activationFunction.v:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Robin-WZQ/CNN-FPGA/HEAD/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/activationFunction.v
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/README.md:
--------------------------------------------------------------------------------
1 | # CNN-FPGA-Vivado
2 |
3 | ### 运行方法
4 |
5 | - 打开项目
6 |
7 | 打开CNN-FPGA-Vivado.xpr
8 |
9 | - 修改路径
10 |
11 | 1. leNet_TB.v
12 | 修改33行,41行,将路径改为权重所在绝对路径
13 |
14 | 2. ANNfull.v
15 | 修改34行,44行,54行,将路径改为权重所在绝对路径
16 |
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/max.v:
--------------------------------------------------------------------------------
1 | module max (n1,n2,n3,n4,max);
2 |
3 | parameter DATA_WIDTH = 16;
4 |
5 | input [DATA_WIDTH-1:0]n1;
6 | input [DATA_WIDTH-1:0]n2;
7 | input [DATA_WIDTH-1:0]n3;
8 | input [DATA_WIDTH-1:0]n4;
9 | output [DATA_WIDTH-1:0]max;
10 | wire [DATA_WIDTH-1:0]max12;
11 | wire [DATA_WIDTH-1:0]max34;
12 |
13 | assign max12=(n1>n2)?n1:n2;
14 | assign max34=(n3>n4)?n3:n4;
15 | assign max = (max12>=max34) ? max12 : max34;
16 | endmodule
--------------------------------------------------------------------------------
/cifar-10-torch/cifar-source/save_params.py:
--------------------------------------------------------------------------------
1 | from models import Lenet5, NormalCNN
2 | import torch
3 | import numpy as np
4 |
5 | np.set_printoptions(threshold=np.inf, linewidth=100)
6 |
7 | model = Lenet5()
8 | model.load_state_dict(torch.load('./res/distilled_lenet5_best.pt'))
9 |
10 | f = open('lenet5_parameters_cifar.txt', 'w', encoding='utf-8')
11 | for name, params in model.named_parameters():
12 | print(name, params.shape)
13 | f.write(name + ':' + str(params.shape) + '\n')
14 | f.write(str(params.detach().numpy()) + '\n')
15 |
--------------------------------------------------------------------------------
/weight/layer1.txt:
--------------------------------------------------------------------------------
1 | 2461aafe2b65a524a71ca0932b6ba816a91d28ccb17935442a19b6592ff8b0d13882b4aab605358faeb536bdb14fb6883304
2 | 2c45246b2dccb066ad3db0ffb2a326b0300a2f3dab85b449af982e1333f8a4c7b2ecb4762b57333c309caf89b138b06f306c
3 | 32b82ce6afd7b172a95a2565b5b9b6282c432e45b543b52630e532eb26302257346334f3321ba5cf2d662e692d55ab92b11e
4 | 2d93b267aab62d6927a7aed93116ac9da5ad221231d4b1c4a8b2ab8e99d833383552ac3aafacb36eb431332335d22cf3aefc
5 | 2f62a89431da2b6d3460250cae9731072cf330d1b337b0f7ab3ab419b104b16d34ed14a733113403144b3194b02a2ec7b230
6 | ac952ee22cf62ed528c5a5b7b33c345dafb136042b19b3e8b5afb5e8b51c2ff332982d3a356fb1b0aed5301fb0d333f5a459
7 |
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/UsingTheRelu.v:
--------------------------------------------------------------------------------
1 | `timescale 1ns / 1ps
2 | //////////////////////////////////////////////////////////////////////////////////
3 | // Company:
4 | // Engineer:
5 | //
6 | // Create Date: 2022/08/29 16:30:03
7 | // Design Name:
8 | // Module Name: UsingTheRelu
9 | // Project Name:
10 | // Target Devices:
11 | // Tool Versions:
12 | // Description:
13 | //
14 | // Dependencies:
15 | //
16 | // Revision:
17 | // Revision 0.01 - File Created
18 | // Additional Comments:
19 | //
20 | //////////////////////////////////////////////////////////////////////////////////
21 |
22 |
23 | module UsingTheRelu(
24 |
25 | );
26 | endmodule
27 |
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/processingElement16.v:
--------------------------------------------------------------------------------
1 | `timescale 100 ns / 10 ps
2 |
3 | module processingElement16(clk,reset,floatA,floatB,result);
4 |
5 | parameter DATA_WIDTH = 16;
6 |
7 | input clk, reset;
8 | input [DATA_WIDTH-1:0] floatA, floatB;
9 | output reg [DATA_WIDTH-1:0] result;
10 |
11 | wire [DATA_WIDTH-1:0] multResult;
12 | wire [DATA_WIDTH-1:0] addResult;
13 |
14 | floatMult16 FM (floatA,floatB,multResult);
15 | floatAdd16 FADD (multResult,result,addResult);
16 |
17 | always @ (posedge clk or posedge reset) begin
18 | if (reset == 1'b1) begin
19 | result = 0;
20 | end else begin
21 | result = addResult;
22 | end
23 | end
24 |
25 | endmodule
26 |
--------------------------------------------------------------------------------
/cifar-10-torch/README.md:
--------------------------------------------------------------------------------
1 | # 文件说明
2 |
3 | **cifar-source:**源代码文件夹
4 |
5 | - cifar-10-python:数据集文件夹
6 | - res:模型存储文件夹
7 | - distilled_lenet5_best.pt:蒸馏得到的Lenet5模型参数文件
8 | - train.py:Teacher model训练代码
9 | - distill.py:模型蒸馏训练代码
10 | - test.py:验证评估代码
11 | - save_params.py:参数导出代码
12 | - models.py:模型结构代码
13 | - Lenet5_parameters_cifar.txt:导出模型参数文件
14 |
15 | # 实验环境
16 |
17 | Python==3.9
18 |
19 | torch==1.9.0+cu111
20 |
21 | torchvision==0.10.0+cu111
22 |
23 | numpy==1.20.3
24 |
25 | # 代码说明
26 |
27 |

28 |
29 |
30 | # 运行说明
31 |
32 | 以cifar-source目录为源根
33 |
34 | 训练Teacher model
35 |
36 | ```
37 | python train.py
38 | ```
39 |
40 | 模型蒸馏
41 |
42 | ```
43 | python distill.py
44 | ```
45 |
46 | 参数导出
47 |
48 | ```
49 | python save_params.py
50 | ```
51 |
52 |
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/MaxPoolSingle.v:
--------------------------------------------------------------------------------
1 | `timescale 1 ns / 10 ps
2 |
3 | module MaxPoolSingle(aPoolIn,aPoolOut);
4 |
5 | parameter DATA_WIDTH = 16;
6 | parameter InputH = 28;
7 | parameter InputW = 28;
8 | parameter Depth = 1;
9 |
10 | input [0:InputH*InputW*Depth*DATA_WIDTH-1] aPoolIn;
11 | output [0:(InputH/2)*(InputW/2)*Depth*DATA_WIDTH-1] aPoolOut;
12 |
13 | genvar i,j;
14 |
15 | generate
16 | for (i=0; i<(InputH); i=i+2) begin
17 | for (j=0; j<(InputW); j=j+2) begin
18 | max
19 | #(
20 | .DATA_WIDTH(DATA_WIDTH)
21 | ) max1
22 | (
23 | .n1(aPoolIn[(i*InputH+j)*DATA_WIDTH+:DATA_WIDTH]),
24 | .n2(aPoolIn[(i*InputH+j+1)*DATA_WIDTH+:DATA_WIDTH]),
25 | .n3(aPoolIn[((i+1)*InputH+j)*DATA_WIDTH+:DATA_WIDTH]),
26 | .n4(aPoolIn[((i+1)*InputH+j+1)*DATA_WIDTH+:DATA_WIDTH]),
27 | .max(aPoolOut[(i/2*InputH/2+j/2)*DATA_WIDTH+:DATA_WIDTH])
28 | );
29 | end
30 | end
31 | endgenerate
32 |
33 | endmodule
34 |
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/new/IEEE162IEEE32.v:
--------------------------------------------------------------------------------
1 | module IEEE162IEEE32(clk,reset,input_fc,output_fc);
2 |
3 | parameter DATA_WIDTH_1 = 16;
4 | parameter DATA_WIDTH_2 = 32;
5 | parameter NODES = 400;
6 |
7 | input clk, reset;
8 | input [DATA_WIDTH_1*NODES-1:0] input_fc;
9 | output reg [DATA_WIDTH_2*NODES-1:0] output_fc;
10 |
11 | reg [7:0] temp;
12 | integer i;
13 |
14 | always @ (negedge clk or posedge reset) begin
15 | if (reset == 1'b1) begin
16 | output_fc = 0;
17 | end else begin
18 | temp = 8'b00000000;
19 | for (i = 0; i < NODES; i = i + 1) begin
20 | output_fc[DATA_WIDTH_2*(i+1)-1] = input_fc[DATA_WIDTH_1*(i+1)-1];
21 | temp[0+:5] = input_fc[(DATA_WIDTH_1*i+10)+:5];
22 | output_fc[(DATA_WIDTH_2*i+23)+:8] = temp + 8'b01110000;
23 | output_fc[(DATA_WIDTH_2*i+13)+:10] = input_fc[(DATA_WIDTH_1*i+0)+:10];
24 | output_fc[(DATA_WIDTH_2*i)+:13] = 0;
25 | end
26 |
27 | end
28 | end
29 |
30 | endmodule
31 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Robin-WZQ
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/RFselector.v:
--------------------------------------------------------------------------------
1 | `timescale 100 ns / 10 ps
2 |
3 | module RFselector(image,rowNumber, column,receptiveField);
4 |
5 | parameter DATA_WIDTH = 16;
6 | parameter D = 1; //Depth of the filter
7 | parameter H = 32; //Height of the image
8 | parameter W = 32; //Width of the image
9 | parameter F = 5; //Size of the filter
10 |
11 | input [0:D*H*W*DATA_WIDTH-1] image;
12 | input [5:0] rowNumber, column;
13 | output reg [0:(((W-F+1)/2)*D*F*F*DATA_WIDTH)-1] receptiveField;
14 |
15 | integer address, c, k, i;
16 |
17 | always @ (image or rowNumber or column) begin
18 | address = 0;
19 | if (column == 0) begin
20 | for (c = 0; c < (W-F+1)/2; c = c + 1) begin
21 | for (k = 0; k < D; k = k + 1) begin
22 | for (i = 0; i < F; i = i + 1) begin
23 | receptiveField[address*F*DATA_WIDTH+:F*DATA_WIDTH] = image[rowNumber*W*DATA_WIDTH+c*DATA_WIDTH+k*H*W*DATA_WIDTH+i*W*DATA_WIDTH+:F*DATA_WIDTH];
24 | address = address + 1;
25 | end
26 | end
27 | end
28 | end else begin
29 | for (c = (W-F+1)/2; c < (W-F+1); c = c + 1) begin
30 | for (k = 0; k < D; k = k + 1) begin
31 | for (i = 0; i < F; i = i + 1) begin
32 | receptiveField[address*F*DATA_WIDTH+:F*DATA_WIDTH] = image[rowNumber*W*DATA_WIDTH+c*DATA_WIDTH+k*H*W*DATA_WIDTH+i*W*DATA_WIDTH+:F*DATA_WIDTH];
33 | address = address + 1;
34 | end
35 | end
36 | end
37 | end
38 |
39 | end
40 |
41 | endmodule
42 |
43 |
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/softmax_tb.v:
--------------------------------------------------------------------------------
1 | `timescale 1ps/1ps
2 | module softmax_tb();
3 | localparam DATA_WIDTH=32;
4 | localparam inputNum=10;
5 | reg [DATA_WIDTH*inputNum-1:0] inputs;
6 | reg clk;
7 | reg enable;
8 | wire [DATA_WIDTH*inputNum-1:0] outputs;
9 | wire ackSoft;
10 | softmax1 #(.DATA_WIDTH(DATA_WIDTH)) soft(inputs,clk,enable,outputs,ackSoft);
11 |
12 | localparam PERIOD = 100;
13 | integer count;
14 | always
15 | #(PERIOD/2) clk = ~clk;
16 |
17 | initial begin
18 | clk=1'b1;
19 | inputs=320'b00111110010011001100110011001101_10111110010011001100110011001101_00111111100110011001100110011010_00111111101001100110011001100110_10111111011001100110011001100110_00111110100110011001100110011010_01000000010001100110011001100110_10111100101000111101011100001010_00111111100011100001010001111011_00111110101001010110000001000010;
20 | //inputs are 0.2 -0.2 1.2 1.3 -0.9 0.3 3.1 -0.02 1.11 0.323
21 | count=1;
22 | enable=1'b0;
23 | #(PERIOD);
24 | enable=1'b1;
25 |
26 | while(ackSoft!=1'b1) begin
27 | count=count+1;
28 | #(PERIOD);
29 | end
30 | //outputs are 0.03255, 0.02182, 0.08847, 0.09776, 0.0108, 0.0359, 0.5687, 0.02612, 0.0808, 0.03681
31 |
32 | inputs=320'b00111111001100001010001111010111_10111110010011001100110011001101_00111111100110011001100110011010_00111111101001100110011001100110_10111111011001100110011001100110_00111110100110011001100110011010_01000000010001100110011001100110_10111100101000111101011100001010_00111111100011100001010001111011_00111110101001010110000001000010;
33 | //inputs are 0.69 -0.2 1.2 1.3 -0.9 0.3 3.1 -0.02 1.11 0.323
34 | count=1;
35 | enable=1'b0;
36 | #(PERIOD);
37 | enable=1'b1;
38 | while(ackSoft!=1'b1) begin
39 | count=count+1;
40 | #(PERIOD);
41 |
42 | end
43 | //outputs are 0.05207118 0.0213835 0.0866926 0.09579553 0.01062096 0.03525543 0.5572659 0.0256007 0.07923851 0.0360757
44 |
45 | end
46 |
47 | endmodule
48 |
--------------------------------------------------------------------------------
/cifar-10-torch/cifar-source/test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.utils.data import DataLoader
3 | from models import NormalCNN, Lenet5
4 | from torchvision.models import resnet152
5 | from torchvision.datasets import CIFAR10
6 | from torchvision.transforms import transforms
7 | import torch.nn as nn
8 |
9 | device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
10 |
11 |
12 | def evaluate(model, data_loader):
13 | model.eval()
14 | pred_tags = []
15 | true_tags = []
16 | with torch.no_grad():
17 | for batch in data_loader:
18 | batch_data = batch['img'].to(device)
19 | batch_label = batch['label'].to(device)
20 |
21 | logits = model(batch_data)
22 |
23 | pred = torch.argmax(logits, dim=1).cpu().numpy()
24 | tags = batch_label.cpu().numpy()
25 |
26 | pred_tags.extend(pred)
27 | true_tags.extend(tags)
28 |
29 | assert len(pred_tags) == len(true_tags)
30 | correct_num = sum(int(x == y) for (x, y) in zip(pred_tags, true_tags))
31 | accuracy = correct_num / len(pred_tags)
32 |
33 | return accuracy
34 |
35 |
36 | if __name__ == '__main__':
37 | transform = transforms.Compose([
38 | transforms.Grayscale(),
39 | transforms.ToTensor(),
40 | transforms.Normalize((0.4734), (0.2507))
41 | ])
42 | test_dataloader = DataLoader(CIFAR10(root='./cifar-10-python/', train=False, transform=transform),
43 | batch_size=128)
44 | model = Lenet5()
45 | # model.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
46 | # model.fc = nn.Linear(2048, 10)
47 | model.to(device)
48 | model.load_state_dict(torch.load('./res/distilled_lenet5_best.pt'))
49 |
50 | model.eval()
51 | pred_tags = []
52 | true_tags = []
53 | with torch.no_grad():
54 | for batch in test_dataloader:
55 | batch_data = batch[0].to(device)
56 | batch_label = batch[1].to(device)
57 |
58 | logits = model(batch_data)
59 |
60 | pred = torch.argmax(logits, dim=1).cpu().numpy()
61 | tags = batch_label.cpu().numpy()
62 |
63 | pred_tags.extend(pred)
64 | true_tags.extend(tags)
65 |
66 | assert len(pred_tags) == len(true_tags)
67 | correct_num = sum(int(x == y) for (x, y) in zip(pred_tags, true_tags))
68 | accuracy = correct_num / len(pred_tags)
69 |
70 | print(accuracy * 100)
71 |
--------------------------------------------------------------------------------
/quantification/quantification_img.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | from torchvision import transforms
4 | import torch
5 |
6 | def dec2bin(x):
7 | x -= int(x)
8 | bins = []
9 | while x:
10 | x *= 2
11 | bins.append("1" if x>=1. else "0")
12 | x -= int(x)
13 | return "".join(bins)
14 |
15 | def float2IEEE16(x):
16 | ms = "0" if x > 0 else "1"
17 | x = abs(x)
18 | x0 = int(x) # 整数部分
19 | x1 = x - x0 # 小数部分
20 | x0 = bin(x0).replace("0b", "")
21 | x1 = dec2bin(x1)
22 | if x0[0] == "0":
23 | E = 15 - x1.find("1") - 1
24 | m = x1[x1.find("1"):]
25 | if E < 0:
26 | E = 15
27 | m = "00000000000"
28 | else:
29 | E = 15 + len(x0) - 1
30 | m = x0 + x1
31 | E = bin(E).replace("0b", "")
32 | if len(E)>5:
33 | E = E[:5]
34 | else:
35 | for i in range(5 - len(E)):
36 | E = "0" + E
37 | m = m[1:]
38 | if len(m)>10:
39 | m = m[:10]
40 | else:
41 | for i in range(10 - len(m)):
42 | m += "0"
43 | y = ms+E+m
44 | y1 = ""
45 | for i in range(len(y)//4):
46 | y1 += hex(int(y[4*i:4*(i+1)], 2)).replace("0x", "")
47 | return y1
48 |
49 | # print(halfpre2spre('0011110000000000')) # 0.1
50 | # print(uint82float16(255)) #0101101111111000/5bf8
51 |
52 | def unpickle(file):
53 | import pickle
54 | with open(file, 'rb') as fo:
55 | dict = pickle.load(fo, encoding='bytes')
56 | return dict
57 |
58 | if __name__ == '__main__':
59 | transform = transforms.Compose([
60 | transforms.ToPILImage(),
61 | transforms.Grayscale(),
62 | transforms.ToTensor(),
63 | transforms.Normalize((0.4734), (0.2507))
64 | ])
65 |
66 | filename = "./cifar-10-python/cifar-10-batches-py/test_batch"
67 | dataset = unpickle(filename)
68 | data = dataset[b"data"]
69 | labels = dataset[b"labels"]
70 |
71 | id = 2
72 |
73 | one_data = np.zeros((3, 1024), dtype=np.float32)
74 | one_data[0], one_data[1], one_data[2] = data[id][:1024], data[id][1024:2048], data[id][2048:]
75 | one_data = one_data.reshape((3, 32, 32))
76 | one_data = torch.tensor(one_data, dtype=torch.float32)
77 | one_data = transform(one_data)
78 |
79 | one_label = labels[id]
80 | print(f"图像的标签为 {one_label}")
81 |
82 | result = ""
83 |
84 | for i in range(1):
85 | for j in range(32):
86 | for k in range(32):
87 | result += float2IEEE16(one_data[i][j][k])
88 |
89 | f = open(f"input_pic{id}_lable{one_label}.txt", 'w', encoding="utf-8")
90 | f.write(result)
91 |
92 | img = np.reshape(data[id], (3, 32, 32))
93 | plt.imshow(img.transpose((1, 2, 0)))
94 | plt.show()
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/IntegrationConvPart.v:
--------------------------------------------------------------------------------
1 | module integrationConv (clk,reset,CNNinput,Conv1F,Conv2F,iConvOutput);
2 |
3 | parameter DATA_WIDTH = 16;
4 | parameter ImgInW = 32;
5 | parameter ImgInH = 32;
6 | parameter Conv1Out = 28;
7 | parameter MvgP1out = 14;
8 | parameter Conv2Out = 10;
9 | parameter Kernel = 5;
10 | parameter MvgP2out = 5;
11 | parameter DepthC1 = 6;
12 | parameter DepthC2 = 16;
13 |
14 | integer counter;
15 |
16 | input clk, reset;
17 | input [ImgInW*ImgInH*DATA_WIDTH-1:0] CNNinput;
18 | input [Kernel*Kernel*DepthC1*DATA_WIDTH-1:0] Conv1F;
19 | input [DepthC2*Kernel*Kernel*DepthC1*DATA_WIDTH-1:0] Conv2F;
20 | output [MvgP2out*MvgP2out*DepthC2*DATA_WIDTH-1:0] iConvOutput;
21 |
22 | reg C1rst,C2rst,MP1rst,MP2rst,Relu1Reset,Relu2Reset,enRelu;
23 | //wire Tanh1Flag,Tanh2Flag,Tanh3Flag;
24 |
25 | wire [Conv1Out*Conv1Out*DepthC1*DATA_WIDTH-1:0] C1out;
26 | wire [Conv1Out*Conv1Out*DepthC1*DATA_WIDTH-1:0] C1outRelu;
27 |
28 | wire [MvgP1out*MvgP1out*DepthC1*DATA_WIDTH-1:0] MP1out;
29 |
30 | wire [Conv2Out*Conv2Out*DepthC2*DATA_WIDTH-1:0] C2out;
31 | wire [Conv2Out*Conv2Out*DepthC2*DATA_WIDTH-1:0] C2outRelu;
32 |
33 | wire [MvgP2out*MvgP2out*DepthC2*DATA_WIDTH-1:0] MP2out;
34 |
35 | convLayerMulti C1
36 | (
37 | .clk(clk),
38 | .reset(reset),
39 | .image(CNNinput),
40 | .filters(Conv1F),
41 | .outputConv(C1out)
42 | );
43 |
44 | UsingTheRelu16
45 | #(.OUTPUT_NODES(Conv1Out*Conv1Out*DepthC1))
46 | relu_1
47 | (
48 | .clk(clk),
49 | .reset(Relu1Reset),
50 | .en(enRelu),
51 | .input_fc(C1out),
52 | .output_fc(C1outRelu)
53 | );
54 |
55 | MaxPoolMulti MP1
56 | (
57 | .clk(clk),
58 | .reset(MP1rst),
59 | .apInput(C1outRelu),
60 | .apOutput(MP1out)
61 | );
62 |
63 | convLayerMulti
64 | #(
65 | .DATA_WIDTH(16),
66 | .D(6),
67 | .H(14),
68 | .W(14),
69 | .F(5),
70 | .K(16)
71 | ) C2
72 | (
73 | .clk(clk),
74 | .reset(C2rst),
75 | .image(MP1out),
76 | .filters(Conv2F),
77 | .outputConv(C2out)
78 | );
79 |
80 | UsingTheRelu16
81 | #(.OUTPUT_NODES(Conv2Out*Conv2Out*DepthC2))
82 | relu_2
83 | (
84 | .clk(clk),
85 | .reset(Relu2Reset),
86 | .en(enRelu),
87 | .input_fc(C2out),
88 | .output_fc(C2outRelu)
89 | );
90 |
91 | MaxPoolMulti
92 | #(
93 | .D(16),
94 | .H(10),
95 | .W(10)
96 | ) MP2
97 | (
98 | .clk(clk),
99 | .reset(MP2rst),
100 | .apInput(C2outRelu),
101 | .apOutput(iConvOutput)
102 | );
103 |
104 | always @(posedge clk or posedge reset) begin
105 | if (reset == 1'b1) begin
106 | C1rst = 1'b1;
107 | C2rst = 1'b1;
108 | MP1rst = 1'b1;
109 | MP2rst = 1'b1;
110 | Relu1Reset = 1'b1;
111 | Relu2Reset = 1'b1;
112 | enRelu = 1'b1;
113 | counter = 0;
114 | end
115 | else begin
116 | counter = counter + 1;
117 | if (counter > 0 && counter < 7*1457) begin
118 | C1rst = 1'b0;
119 | end
120 | else if (counter > 7*1457 && counter < 7*1457+6*784*6) begin
121 | Relu1Reset = 1'b0;
122 | end
123 | else if (counter > 7*1457+6*784*6 && counter < 7*1457+6*784*6+8) begin
124 | MP1rst = 1'b0;
125 | end
126 | else if (counter > 7*1457+6*784*6+8 && counter < 7*1457+6*784*6+8+18*22*152) begin
127 | C2rst = 1'b0;
128 | end
129 | else if (counter > 7*1457+6*784*6+8+18*22*152 && counter < 7*1457+6*784*6+8+18*22*152 + 6*1600) begin
130 | Relu2Reset = 1'b0;
131 | end
132 | else if (counter > 7*1457+6*784*6+8+18*22*152 + 6*1600 && counter < 7*1457+6*784*6+8+18*22*152 + 6*1600 + 20) begin
133 | MP2rst = 1'b0;
134 | end
135 | end
136 | end
137 |
138 | endmodule
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.srcs/sources_1/imports/Integration first part/ANNfull.v:
--------------------------------------------------------------------------------
1 | module ANNfull(clk,reset,input_ANN,output_ANN);
2 |
3 | parameter DATA_WIDTH = 32;
4 | parameter INPUT_NODES_L1 = 400;
5 | parameter INPUT_NODES_L2 = 120;
6 | parameter INPUT_NODES_L3 = 84;
7 | parameter OUTPUT_NODES = 10;
8 |
9 | input clk, reset;
10 | input [DATA_WIDTH*INPUT_NODES_L1-1:0] input_ANN;
11 | output [3:0] output_ANN;
12 |
13 | reg rstLayer;
14 | reg rstRelu;
15 | reg enRelu;
16 |
17 | reg [8:0] address;
18 |
19 | wire [DATA_WIDTH*INPUT_NODES_L2-1:0] output_L1;
20 | wire [DATA_WIDTH*INPUT_NODES_L2-1:0] output_L1_relu;
21 |
22 | wire [DATA_WIDTH*INPUT_NODES_L3-1:0] output_L2;
23 | wire [DATA_WIDTH*INPUT_NODES_L3-1:0] output_L2_relu;
24 |
25 | wire [DATA_WIDTH*OUTPUT_NODES-1:0] output_L3;
26 |
27 | wire [DATA_WIDTH*INPUT_NODES_L2-1:0] WL1;
28 | wire [DATA_WIDTH*INPUT_NODES_L3-1:0] WL2;
29 | wire [DATA_WIDTH*OUTPUT_NODES-1:0] WL3;
30 |
31 | weightMemory
32 | #(.INPUT_NODES(INPUT_NODES_L1),
33 | .OUTPUT_NODES(INPUT_NODES_L2),
34 | .file("C:/Users/24857/Desktop/try/para/fc1.txt"))
35 | W1(
36 | .clk(clk),
37 | .address(address),
38 | .weights(WL1)
39 | );
40 |
41 | weightMemory
42 | #(.INPUT_NODES(INPUT_NODES_L2),
43 | .OUTPUT_NODES(INPUT_NODES_L3),
44 | .file("C:/Users/24857/Desktop/try/para/fc2.txt"))
45 | W2(
46 | .clk(clk),
47 | .address(address),
48 | .weights(WL2)
49 | );
50 |
51 | weightMemory
52 | #(.INPUT_NODES(INPUT_NODES_L3),
53 | .OUTPUT_NODES(OUTPUT_NODES),
54 | .file("C:/Users/24857/Desktop/try/para/classifier.txt"))
55 | W3(
56 | .clk(clk),
57 | .address(address),
58 | .weights(WL3)
59 | );
60 |
61 | layer
62 | #(.INPUT_NODES(INPUT_NODES_L1),
63 | .OUTPUT_NODES(INPUT_NODES_L2))
64 | L1(
65 | .clk(clk),
66 | .reset(rstLayer),
67 | .input_fc(input_ANN),
68 | .weights(WL1),
69 | .output_fc(output_L1)
70 | );
71 |
72 | activationFunction #(.OUTPUT_NODES(INPUT_NODES_L2)) relu_1
73 | (
74 | .clk(clk),
75 | .reset(rstRelu),
76 | .en(enRelu),
77 | .input_fc(output_L1),
78 | .output_fc(output_L1_relu)
79 | );
80 |
81 | layer
82 | #(.INPUT_NODES(INPUT_NODES_L2),
83 | .OUTPUT_NODES(INPUT_NODES_L3))
84 | L2(
85 | .clk(clk),
86 | .reset(rstLayer),
87 | .input_fc(output_L1_relu),
88 | .weights(WL2),
89 | .output_fc(output_L2)
90 | );
91 |
92 | activationFunction #(.OUTPUT_NODES(INPUT_NODES_L3)) relu_2
93 | (
94 | .clk(clk),
95 | .reset(rstRelu),
96 | .en(enRelu),
97 | .input_fc(output_L2),
98 | .output_fc(output_L2_relu)
99 | );
100 |
101 | layer
102 | #(.INPUT_NODES(INPUT_NODES_L3),
103 | .OUTPUT_NODES(OUTPUT_NODES))
104 | L3(
105 | .clk(clk),
106 | .reset(rstLayer),
107 | .input_fc(output_L2_relu),
108 | .weights(WL3),
109 | .output_fc(output_L3)
110 | );
111 |
112 | FindMax findmax1
113 | (
114 | .n(output_L3),
115 | .max(output_ANN)
116 | );
117 |
118 |
119 | always @(posedge clk or posedge reset) begin
120 | if (reset == 1'b1) begin
121 | rstRelu = 1'b1;
122 | rstLayer = 1'b1;
123 | address = -1;
124 | enRelu = 1'b0;
125 | end
126 | else begin
127 | rstRelu = 1'b0;
128 | rstLayer = 1'b0;
129 | if (address == INPUT_NODES_L1+1) begin
130 | address = address + 1;
131 | enRelu = 1'b1;
132 | end
133 | else if (address == INPUT_NODES_L1+2) begin
134 | address = -1;
135 | enRelu = 1'b0;
136 | rstLayer = 1'b1;
137 | end
138 | else begin
139 | address = address + 1;
140 | end
141 | end
142 | end
143 |
144 | endmodule
--------------------------------------------------------------------------------
/cifar-10-torch/cifar-source/train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 | from torch.utils.data import DataLoader
5 | from models import Lenet5, NormalCNN
6 | from torchvision import transforms
7 | from torchvision.models import vgg19_bn, resnet152
8 | from torchvision.datasets import CIFAR10
9 | from torch.optim import AdamW, Adam, SGD
10 | import os
11 |
12 | device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
13 |
14 |
15 | def train(EPOCHS, lr, batch_size):
16 | transform_train = transforms.Compose([
17 | transforms.Grayscale(),
18 | transforms.Resize((224, 224)),
19 | transforms.RandomHorizontalFlip(0.5),
20 | transforms.ToTensor(),
21 | transforms.Normalize((0.449), (0.226))
22 | ])
23 | transform_test = transforms.Compose([
24 | transforms.Grayscale(),
25 | transforms.Resize((224, 224)),
26 | transforms.ToTensor(),
27 | transforms.Normalize((0.449), (0.226))
28 | ])
29 |
30 | train_dataloader = DataLoader(CIFAR10(root='./cifar-10-python/', train=True, transform=transform_train), shuffle=True, batch_size=batch_size, num_workers=4)
31 | test_dataloader = DataLoader(CIFAR10(root='./cifar-10-python/', train=False, transform=transform_test), batch_size=batch_size, num_workers=4)
32 |
33 | model = resnet152(pretrained=True)
34 | model.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
35 | model.fc = nn.Linear(2048, 10)
36 | # model = Lenet5()
37 | model.to(device)
38 | optimizer = Adam(model.parameters(), lr=lr)
39 | loss_fn = nn.CrossEntropyLoss()
40 |
41 | max_test_acc = 0.
42 | for epoch in range(EPOCHS):
43 | model.train()
44 | for step, batch in enumerate(train_dataloader):
45 | batch_train = batch[0].to(device)
46 | batch_label = batch[1].to(device)
47 |
48 | model.zero_grad()
49 | logits = model(batch_train)
50 | loss = loss_fn(logits, batch_label)
51 |
52 | loss.backward()
53 | # nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.)
54 | optimizer.step()
55 |
56 | if step % (len(train_dataloader) // 9) == 0:
57 | print("epoch: {} step: {}/{}".format(epoch, step, len(train_dataloader)))
58 |
59 | torch.save(model.state_dict(), './res/lenet5_last_1c.pt')
60 |
61 | # train_acc = evaluate(model, train_dataloader)
62 | test_acc = evaluate(model, test_dataloader)
63 | if test_acc > max_test_acc:
64 | max_test_acc = test_acc
65 | torch.save(model.state_dict(), './res/lenet5_best_1c.pt')
66 | print("Best model saved!")
67 |
68 | print("epoch: {} test_acc: {:.2f}%".format(epoch, test_acc * 100))
69 |
70 |
71 | def evaluate(model, data_loader):
72 | model.eval()
73 | pred_tags = []
74 | true_tags = []
75 | with torch.no_grad():
76 | for batch in data_loader:
77 | batch_data = batch[0].to(device)
78 | batch_label = batch[1].to(device)
79 |
80 | logits = model(batch_data)
81 |
82 | pred = torch.argmax(logits, dim=1).cpu().numpy()
83 | tags = batch_label.cpu().numpy()
84 |
85 | pred_tags.extend(pred)
86 | true_tags.extend(tags)
87 |
88 | assert len(pred_tags) == len(true_tags)
89 | correct_num = sum(int(x == y) for (x, y) in zip(pred_tags, true_tags))
90 | accuracy = correct_num / len(pred_tags)
91 |
92 | return accuracy
93 |
94 |
95 | if __name__ == '__main__':
96 | if not os.path.exists('./res'):
97 | os.mkdir('./res')
98 | train(50, 0.001, 128)
99 |
--------------------------------------------------------------------------------
/quantification/input_pic2_lable8.txt:
--------------------------------------------------------------------------------
1 | ba18b9d8b66fb46fba98bc3cbc7cbcccbd0cbd3cbd5dbd0cbdbdbdedbe1dbe4dbd3cbe0dbe5dbe7dbe8dbe5dbe7dbe7dbe9dbdedbe2dbeadbe4dbdedbe7dbe9dbb59bb59b878b52fbb59bcccbcecbcfcbd4cbdbdbdbdbd2cbe0dbe5dbe6dbeadbd8dbe1dbe5dbeddbefdbe4dbe6dbecdbecdbe0dbe6dbefdbe4dbe0dbeedbefdbb99bb79b8f8b5afbbb9bd0cbd4cbcbcbd3cbdfdbd9dbd2cbdcdbe1dbe1dbebdbd9dbe0dbe5dbeadbe2dbcecbe2dbe9dbefdbe1dbe8dbf2dbe6dbdedbf2dbeddbbd9bb79b8f8b66fbbd9bcecbd6dbcfcbd5dbe1dbdadbd1cbd5dbe0dbdadbebdbdbdbdedbe4dbe1db9d8b8d8bdddbeadbf0dbe0dbe6dbf2dbe5dbdbdbf0dbebdbc3cbc0cb958b5afbbd9bcccbd7dbcccbcecbe3dbd9dbcfcbd2cbdedbcccbe4dbdbdbd9dbe3dbd5db7f0b8b8bd9dbe5dbefdbdfdbe3dbeedbdfdbe0dbeedbe8dbc5cbc1cb998b42fbbf9bc9cbd8dbcecbd1cbe4dbd6dbcfcbcfcbdcdbbf9bc5cbc8cbc7cbe6dbc1c3326b770bdadbe0dbeddbdddbd9dbe7dbdadbdedbeadbe1dbc3cbbb9b958a970bb39bc2cbd5dbcecbd3cbe0dbd0cbcccbcfcbdcdba98b918bab8bad8bcacbaf83025b1ddb998ba98bd6dbd7dbcecbdedbd4cbd7dbe4dbdadbc5cbbd9b8f8acb8bb99bc8cbcbcbcacbd2cbdadbcacbcccbcecbd0cbbf9bc5cb918b15d3653398a3a8a3b6b3b6b3c453714b9f8bc6cbd7dbcacbc0cbd9dbd1cb8d8b838b52f1c88b838b938b770ba18bb79bbd9bb39bbf9bc2cbb39bb18bb99b35d345337d43794384a386a37143b4b3aca32a6b62fb938b770b42fbb39b97834d334933513379436d4365335d33025a1beab71afb9b2ddb46fb0ddb52fb5af2c4934d334d334d337d437d4388a398aaeb92f4ab42fb1dd35d337142d4933263b6b3a4a3a6a3b2b390a382a392a38aa388a369337543693359334932e4a3225379437943754382a398a3a8a375434132c493613b05dbb1833263693386a382a3c253aca398a394a3125341338ea3b0b382a30a53693365330251c882e4a345334d3361335d335933714388ab52fbab8b9b8b1dd3693bbd9b2dd384a38ea38ea3b6b380a369339ca359335d339aa388a2523b25db0dd30a531252a9233a636d4382a382a36d43493359331a5b8d8bb59bc6cbad833a6b72fb8383aeb390a390a394a3754380a39ca390a3aca3a2a38ea375433a61c882d4932a6302530a534133453332633a62d492d49b3deb9b8b9b8ba58ba38b46f3125b6ef3b2b3c253a8a38ca38aa388a394a394a3a2a3a2a37d43025ab71b1ddb46fb5afb5efb56fb42fb7b0b818b770b770b5efb818b938b8f8b958b858b7b0388a39ea3c053ca53c053453355331a533a631a5b15db35db56fb72fb898b8f8b8f8b958b958b998b838b838b8d8b898b56f30a52a92afb9b6efb878b72fb7f036d43c1538ea39ea3b0b2e4a32a62523b25db0ddb42fb6efb72fb7f0b878b8f8b938b978b958b8d8b770b1ddb2ddb3dea1be386a3413b05db52fb7b0b6efb6ef1c88b0ddba98b46f371434132d49a970302535933613b2ddb7b0b878b898b898b818b62fb42fb15db05dab71b15db770b56fa970b3deb72fb5afb6afb4efb52fb3deb5afb5efb46fb1dd31253653394a39ca37542c49b05db62fb770b46fb05dacb8aeb9b1ddb46fb5efb770b52fb72fb838b5efb6efb818b72fb5efb56fb56fb15d2a9233263125b35d36d43aca396a2c49b3deb66fb42fafb9a9702523afb9b4afb6afb838b878b7f0b6efb3deb5afb770b66fb62fb6efb7f0b56fb2dd2e4a365338ca3aeb398aa6e02f4a30252d49a970a970a970b0dda9702523b52fb6efb770b6efb62fb25d2c4935d3380ab05db66fb66fb66fb5afb3de31253794388a39ea3bcb3b4b3b0b3c0530a52523a970a6e0a970aeb9b15db15d1c88afb9b05db2ddb42fb35d37543b8b3c053c152f4ab62fb35dacb831a5375438aa390a3a4a3c053c053aca3cf53e7639ea2d49aeb9acb8acb8acb831a53613382a38aa390a2d49b42fb2dd361339aa38ca386a332633a63693384a38ca38ea3a8a3bab3c753cd53c853d363e363e963aeb34132e4a2f4a2c4930253a0a3beb3b2b3a6a3a0a3025b35db1dd2e4a35d3382a394a396a394a392a38ea39ea3beb3cf53ca53cf53d263db63e363e663e963a4a361331a530a52c492a92386a396a37d43653341332a63613392a3aca3b0b3a4a39ea392a38ea3a0a3c353cb53c953c053c753d663db63de63e363e663e863a0a37143125322533a634d3375438aa394a3a4a3b8b3beb3c053b6b3aeb3a2a39ca3a6a3b4b3c353cb53cb53c953ce53c553cf53df63e163e063e363e663e363beb3b8b3aaa3bab3c153c653cb53cc53ca53c653c253beb3b6b3b2b3b4b3bab3c453d153d463c853c953cb53d363d863d463dd63e263e163e163e363e263e363dd63df63e263e063da63d863d763d263c853c553c453c653cb53d153d263d153d463cd53aca3c253d263d763da63da63de63e263e363e263e163e063df63eb63de63e163e563e363df63dc63db63da63d763d863db63db63d763d153ce53ce53d363c6539ca3c953de63db63d963e063e263e363e263e063e063db63e863f663e163e263e663e563e463e263e263e063de63d963d463d263cf53cf53d053d363db63cf53aeb3d563e263db63e363e463e463e463e263e063de63df63fb6400b3e863e463e663e263df63dc63da63d763d563d363d153d363d363d563d963de63e863da63bcb3d863e063e363e463e463e563e563e663e163dc63f364013400b3e163df63dd63db63db63da63da63d963d863d763d963dd63e163e763f063f763ff63ee63cc53dd63e763e663e663e563e663e863e863dd63e96400b401b3ff6
--------------------------------------------------------------------------------
/cifar-10-torch/cifar-source/distill.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from models import Lenet5
3 | from torchvision.models import resnet152
4 | from torch.optim import Adam
5 | from torch.utils.data import DataLoader
6 | import torch.nn as nn
7 | import torch.nn.functional as F
8 | import torch
9 | from torchvision.transforms import transforms
10 | from torchvision.datasets import CIFAR10
11 |
12 | device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
13 |
14 |
15 | def run(EPOCHS, lr, batch_size, alpha, T, resume=True):
16 | teacher_model = resnet152()
17 | teacher_model.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
18 | teacher_model.fc = nn.Linear(2048, 10)
19 | teacher_model.to(device)
20 | teacher_model.load_state_dict(torch.load('./res/resnet152_best_1c.pt'))
21 | student_model = Lenet5().to(device)
22 | if resume:
23 | student_model.load_state_dict(torch.load('./res/distilled_lenet5_best.pt'))
24 |
25 | transform_teacher = transforms.Compose([
26 | transforms.Grayscale(),
27 | transforms.Resize((224, 224)),
28 | transforms.ToTensor(),
29 | transforms.Normalize((0.449), (0.226)),
30 | ])
31 | transform_student = transforms.Compose([
32 | transforms.Grayscale(),
33 | transforms.RandomHorizontalFlip(0.5),
34 | transforms.ToTensor(),
35 | transforms.Normalize((0.4734), (0.2507)),
36 | ])
37 | transform_test = transforms.Compose([
38 | transforms.Grayscale(),
39 | transforms.ToTensor(),
40 | transforms.Normalize((0.4734), (0.2507))
41 | ])
42 | torch.manual_seed(0)
43 | g = torch.Generator()
44 | train_dataloader_teacher = DataLoader(CIFAR10(root='./cifar-10-python/', train=True, transform=transform_teacher),
45 | shuffle=True, generator=g, batch_size=batch_size, num_workers=4)
46 | torch.manual_seed(0)
47 | g = torch.Generator()
48 | train_dataloader_student = DataLoader(CIFAR10(root='./cifar-10-python/', train=True, transform=transform_student),
49 | shuffle=True, generator=g, batch_size=batch_size, num_workers=4)
50 | test_dataloader = DataLoader(CIFAR10(root='./cifar-10-python/', train=False, transform=transform_test),
51 | batch_size=batch_size, num_workers=4)
52 |
53 | optimizer = Adam(student_model.parameters(), lr=lr)
54 | student_loss = nn.CrossEntropyLoss()
55 | distill_loss = nn.KLDivLoss(reduction='batchmean')
56 |
57 | teacher_model.eval()
58 | max_test_acc = 0.
59 | for epoch in range(EPOCHS):
60 | student_model.train()
61 | for step, (batch_tea, batch_stu) in enumerate(zip(train_dataloader_teacher, train_dataloader_student)):
62 | tea_data = batch_tea[0].to(device)
63 | stu_data = batch_stu[0].to(device)
64 | stu_label = batch_stu[1].to(device)
65 |
66 | with torch.no_grad():
67 | teacher_logits = teacher_model(tea_data)
68 |
69 | student_model.zero_grad()
70 | logits = student_model(stu_data)
71 | loss = alpha * student_loss(logits, stu_label) + (1 - alpha) * distill_loss(
72 | F.log_softmax(logits / T, dim=1), F.softmax(teacher_logits / T, dim=1))
73 |
74 | loss.backward()
75 | optimizer.step()
76 |
77 | if step % (len(train_dataloader_student) // 9) == 0:
78 | print("epoch: {} step: {}/{}".format(epoch, step, len(train_dataloader_student)))
79 |
80 | torch.save(student_model.state_dict(), './res/distilled_lenet5_last.pt')
81 |
82 | test_acc = evaluate(student_model, test_dataloader)
83 | if test_acc > max_test_acc:
84 | max_test_acc = test_acc
85 | torch.save(student_model.state_dict(), './res/distilled_lenet5_best.pt')
86 | print("Best model saved!")
87 |
88 | print("epoch: {} test_acc: {:.2f}%".format(epoch, test_acc * 100))
89 |
90 |
91 | def evaluate(model, data_loader):
92 | model.eval()
93 | pred_tags = []
94 | true_tags = []
95 | with torch.no_grad():
96 | for batch in data_loader:
97 | batch_data = batch[0].to(device)
98 | batch_label = batch[1].to(device)
99 |
100 | logits = model(batch_data)
101 |
102 | pred = torch.argmax(logits, dim=1).cpu().numpy()
103 | tags = batch_label.cpu().numpy()
104 |
105 | pred_tags.extend(pred)
106 | true_tags.extend(tags)
107 |
108 | assert len(pred_tags) == len(true_tags)
109 | correct_num = sum(int(x == y) for (x, y) in zip(pred_tags, true_tags))
110 | accuracy = correct_num / len(pred_tags)
111 |
112 | return accuracy
113 |
114 |
115 | if __name__ == '__main__':
116 | run(EPOCHS=100, lr=0.001, batch_size=128, alpha=0.3, T=7, resume=True)
117 |
--------------------------------------------------------------------------------
/quantification/quantification_para.py:
--------------------------------------------------------------------------------
1 | from multiprocessing.spawn import import_main_path
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 | import struct
6 | import ctypes
7 |
8 |
9 | class Lenet5(nn.Module):
10 | '''
11 | 所用的卷积神经网络
12 | '''
13 |
14 | def __init__(self):
15 | super(Lenet5, self).__init__()
16 | # 32, 32, 1
17 | self.layer1 = nn.Sequential(
18 | nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0),
19 | nn.ReLU(),
20 | nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
21 | )
22 | # 14, 14, 6
23 | self.layer2 = nn.Sequential(
24 | nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
25 | nn.ReLU(),
26 | nn.MaxPool2d(kernel_size=2, stride=2)
27 | )
28 | # 5, 5, 16
29 | self.fc1 = nn.Sequential(
30 | nn.Linear(400, 120),
31 | nn.ReLU()
32 | )
33 | self.fc2 = nn.Sequential(
34 | nn.Linear(120, 84),
35 | nn.ReLU()
36 | )
37 | self.classifier = nn.Linear(84, 10)
38 |
39 | def forward(self, input):
40 | x = self.layer1(input)
41 | x = self.layer2(x)
42 | x = self.fc1(x.view(x.shape[0], -1))
43 | x = self.fc2(x)
44 | logits = self.classifier(x)
45 |
46 | return logits
47 |
48 |
49 | def dec2bin(x):
50 | '''
51 | 将十进制小数x转为对应的二进制小数
52 | '''
53 | x -= int(x)
54 | bins = []
55 | while x:
56 | x *= 2
57 | bins.append("1" if x >= 1. else "0")
58 | x -= int(x)
59 | return "".join(bins)
60 |
61 |
62 | def float2IEEE16(x):
63 | '''
64 | float转IEEE754的半精度浮点数
65 | '''
66 | ms = "0" if x > 0 else "1"
67 | x = abs(x)
68 | x0 = int(x) # 整数部分
69 | x1 = x - x0 # 小数部分
70 | x0 = bin(x0).replace("0b", "")
71 | x1 = dec2bin(x1)
72 | if x0[0] == "0":
73 | E = 15 - x1.find("1") - 1
74 | m = x1[x1.find("1"):]
75 | if E < 0:
76 | E = 15
77 | m = "00000000000"
78 | else:
79 | E = 15 + len(x0) - 1
80 | m = x0 + x1
81 | E = bin(E).replace("0b", "")
82 | if len(E) > 5:
83 | E = E[:5]
84 | else:
85 | for i in range(5 - len(E)):
86 | E = "0" + E
87 | m = m[1:]
88 | if len(m) > 10:
89 | m = m[:10]
90 | else:
91 | for i in range(10 - len(m)):
92 | m += "0"
93 | y = ms+E+m
94 | y1 = ""
95 | for i in range(len(y)//4):
96 | y1 += hex(int(y[4*i:4*(i+1)], 2)).replace("0x", "")
97 | return y1
98 |
99 |
100 | def float2IEEE32(x):
101 | '''
102 | float转IEEE754的单精度浮点数
103 | '''
104 | ms = "0" if x > 0 else "1"
105 | x = abs(x)
106 | x0 = int(x) # 整数部分
107 | x1 = x - x0 # 小数部分
108 | x0 = bin(x0).replace("0b", "")
109 | x1 = dec2bin(x1)
110 | if x0[0] == "0":
111 | E = 127 - x1.find("1") - 1
112 | m = x1[x1.find("1"):]
113 | if E < 0:
114 | E = 127
115 | m = "000000000000000000000000"
116 | else:
117 | E = 127 + len(x0) - 1
118 | m = x0 + x1
119 | E = bin(E).replace("0b", "")
120 | if len(E) > 8:
121 | E = E[:8]
122 | else:
123 | for i in range(8 - len(E)):
124 | E = "0" + E
125 | m = m[1:]
126 | if len(m) > 23:
127 | m = m[:23]
128 | else:
129 | for i in range(23 - len(m)):
130 | m += "0"
131 | y = ms+E+m
132 | y1 = ""
133 | for i in range(len(y)//4):
134 | y1 += hex(int(y[4*i:4*(i+1)], 2)).replace("0x", "")
135 | return y1
136 |
137 |
138 | if __name__ == '__main__':
139 | model = Lenet5()
140 | model.load_state_dict(torch.load("distilled_lenet5_best.pt"))
141 | for name in model.state_dict():
142 | # print(name, '\t', model.state_dict()[name].shape)
143 | # print(model.state_dict()[name])
144 |
145 | # 卷积层权重量化
146 | if name in ["layer1.0.weight", "layer2.0.weight"]:
147 | fname = name.split(".")[0]
148 | Tensor = model.state_dict()[name]
149 | s1, s2, s3, s4 = Tensor.shape
150 | with open("parameters/"+fname+".txt", "w", encoding="utf-8") as f:
151 | for i in range(s1):
152 | for j in range(s2):
153 | for k in range(s3):
154 | for t in range(s4):
155 | f.write(float2IEEE16(Tensor[i][j][k][t]))
156 | f.write("\n")
157 | # 全连接层权重量化
158 | if name in ["fc1.0.weight", "fc2.0.weight", "classifier.weight"]:
159 | fname = name.split(".")[0]
160 | Matrix = model.state_dict()[name].T
161 | with open("parameters/"+fname+".txt", "w", encoding="utf-8") as f:
162 | for i in range(Matrix.shape[0]):
163 | for j in range(Matrix.shape[1]):
164 | f.write(float2IEEE32(Matrix[i][j])+"\n")
165 |
--------------------------------------------------------------------------------
/cifar-10-torch/cifar-source/models.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 |
3 |
4 | class Lenet5(nn.Module):
5 | def __init__(self):
6 | super(Lenet5, self).__init__()
7 | # 32, 32, 1
8 | self.layer1 = nn.Sequential(
9 | nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0),
10 | nn.ReLU(),
11 | nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
12 | )
13 | # 14, 14, 6
14 | self.layer2 = nn.Sequential(
15 | nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
16 | nn.ReLU(),
17 | nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
18 | )
19 | # 5, 5, 16
20 | self.fc1 = nn.Sequential(
21 | nn.Linear(400, 120),
22 | nn.ReLU(),
23 | nn.Dropout(0.2)
24 | )
25 | self.fc2 = nn.Sequential(
26 | nn.Linear(120, 84),
27 | nn.ReLU(),
28 | nn.Dropout(0.2)
29 | )
30 | self.classifier = nn.Linear(84, 10)
31 |
32 | def forward(self, input):
33 | x = self.layer1(input)
34 | x = self.layer2(x)
35 | x = self.fc1(x.view(x.shape[0], -1))
36 | x = self.fc2(x)
37 | logits = self.classifier(x)
38 |
39 | return logits
40 |
41 |
42 | class NormalCNN(nn.Module):
43 | def __init__(self):
44 | super(NormalCNN, self).__init__()
45 | # (bs, 32, 32, 1)
46 | self.layer1 = nn.Sequential(
47 | nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=0),
48 | nn.BatchNorm2d(32),
49 | nn.ReLU()
50 | )
51 | # (bs, 28, 28, 32)
52 | self.layer2 = nn.Sequential(
53 | nn.Conv2d(32, 32, kernel_size=5, stride=1, padding=0),
54 | nn.BatchNorm2d(32),
55 | nn.ReLU(),
56 | nn.MaxPool2d(kernel_size=2, stride=2),
57 | nn.Dropout(0.2)
58 | )
59 | # (bs, 12, 12, 32)
60 | self.layer3 = nn.Sequential(
61 | nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=0),
62 | nn.BatchNorm2d(64),
63 | nn.ReLU(),
64 | )
65 | # (bs, 10, 10, 64)
66 | self.layer4 = nn.Sequential(
67 | nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0),
68 | nn.BatchNorm2d(64),
69 | nn.ReLU(),
70 | nn.MaxPool2d(kernel_size=2, stride=2),
71 | nn.Dropout(0.2)
72 | )
73 | # bs, 4, 4, 64 -> bs, 1024
74 | self.layer5 = nn.Sequential(
75 | nn.Linear(1024, 256),
76 | nn.ReLU()
77 | )
78 | self.classifier = nn.Linear(256, 10)
79 |
80 | def forward(self, input):
81 | x = self.layer1(input)
82 | x = self.layer2(x)
83 | x = self.layer3(x)
84 | x = self.layer4(x)
85 | x = x.view(x.shape[0], -1)
86 | x = self.layer5(x)
87 | logits = self.classifier(x)
88 |
89 | return logits
90 |
91 |
92 | class NormalCNN2(nn.Module):
93 | def __init__(self):
94 | super(NormalCNN2, self).__init__()
95 | # (bs, 32, 32, 1)
96 | self.layer1 = nn.Sequential(
97 | nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
98 | nn.ReLU(),
99 | # nn.BatchNorm2d(32),
100 | nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
101 | nn.ReLU(),
102 | nn.MaxPool2d(kernel_size=2, stride=2)
103 | )
104 | # (bs, 28, 28, 32)
105 | self.layer2 = nn.Sequential(
106 | nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
107 | # nn.BatchNorm2d(32),
108 | nn.ReLU(),
109 | nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
110 | nn.ReLU(),
111 | nn.MaxPool2d(kernel_size=2, stride=2)
112 | # nn.Dropout(0.2)
113 | )
114 | # (bs, 12, 12, 32)
115 | self.layer3 = nn.Sequential(
116 | nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
117 | # nn.BatchNorm2d(64),
118 | nn.ReLU(),
119 | nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
120 | nn.ReLU(),
121 | nn.MaxPool2d(kernel_size=2, stride=2)
122 | )
123 | # (bs, 10, 10, 64)
124 | # self.layer4 = nn.Sequential(
125 | # nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0),
126 | # nn.BatchNorm2d(64),
127 | # nn.ReLU(),
128 | # nn.MaxPool2d(kernel_size=2, stride=2),
129 | # nn.Dropout(0.2)
130 | # )
131 | # bs, 4, 4, 64 -> bs, 1024
132 | # self.layer5 = nn.Sequential(
133 | # nn.Linear(1024, 256),
134 | # nn.ReLU()
135 | # )
136 | self.classifier = nn.Linear(4096, 10)
137 |
138 | def forward(self, input):
139 | x = self.layer1(input)
140 | x = self.layer2(x)
141 | x = self.layer3(x)
142 | # x = self.layer4(x)
143 | x = x.view(x.shape[0], -1)
144 | # x = self.layer5(x)
145 | logits = self.classifier(x)
146 |
147 | return logits
148 |
--------------------------------------------------------------------------------
/weight/layer2.txt:
--------------------------------------------------------------------------------
1 | 2e3f9a68ac152e1aacaf2fcf2bbd28972d9eae9533a625512d4a2acd2600355331f22fa9a19825b035aa2f613202a9321afeaeac25d4303997fe1b61b0852d1b2d132a4e29dfb1262fa92435af30ad87202928fc1ec2acbba54d2da92e271d762bd02ceb31dfa819270b27892a19b447b728b7afb6c1b4dab05ba8e81bdb2f442df7b14eaa4d2682a1042e5421710d352834aa89abf22b8723be9da42e25a88d2af3af261d352cd521faa415aaec2958a3892a9c1d55aea7b09c2966a64c2b1baec0ac402da0a56730392b3a272b2c9630fe2e77a4ee2eb02d852ff5a59ca78c2c14a8fea47caece2da02a81ae3eaf82ac2230552fb32c79a506b1f9b102b097278f2d55b3b4b6e7b58aae3aa855b417b28523842dea3313b061a7142fa62dfa2bd5ac4ea8ea261629ed24b8
2 | 2d03281b281bae1d34ca302d33ae33ed32b534fb3418337c3448345834f53021ad65315f2c2cae3db450b506b68db999ba2b2498a1d7ac212d561af72c5c2b7f26582a27209f2cb82e652d8ba482273e28f82c4929b8aa4c2f72b025b106b375af27b1972df0af2d242caa29a755a368ae54aff0aca6accb1f5ca910ae65ad49230121dda4b398e6a4f32f1cac061c412d79312a1ce6acceac4cae63a88ea678ae832bc62da6a7bda5fdaa4a2a392aa928f5aec1a6c22f292e33a87eb1242167acc4b0b0a886b1bb26032b90aca6294baff92a5f2c8723102662aba21099a97c2ecf2fb0af24addfa7c28b0620c5aae7b23eb122abd2adb39eda2dd7a0152d3d24e8298d2f942871a8d9aca62b82302e1f96ad5ba6072edb30da28492e462c0dae73342f325630d52f23b183
3 | af4fb2f8a7a1218229deb071af9628c22eb42e7fb025aef22865339c302bb36fb4412fc82879266dae9727ecaa55a803a5e82e3b23dd2119ad87b6b0317e2a6c329cb06cb0d425fc2e182c1ab09bab75305d2e10a5cdad4aaa412a9e2708a9bf2218ad39a874ae2baaf92a2f2849a914a6882cff1c61ad49a38524ed2ca5a917b5b52dd52ee22deeb4cfb05d2d1031ad2bbdac7e2f0fb55db294a87c308b3204b51ab610a42a2f013094adecb263a0da2c823218b088b35eace13115321625c2af36273f2f9b2cedb3afac79a50b2d242f06b25eabee2d4d31ce2cbbb3deaa6231ed309ca9eeadf02a7a31111ebf311cb1162dcd22602cfa2fae300a304d27d4ac7cae4832dcb0a5b251b253af3c2916b135abaeb485b5abaa86a7f4b11cb07cacc5a8daa5d7aac6b0baa178
4 | 2baeaf31300b28acadd42cf7ac9baddd2a86ac37298ca8a9addfa26da9f9a7afad3baa42b06eab1c291e30eb2eb8293c2e51a9fdaa3e2c90287fa09322fca2b6ac4818eaa969284dafdbac02b079aa25adedacc0a995b053acaaa74a2cf5a2632a26ad4026b4a8f6a4fbac1f261daaaaaf5ea63dafa813c428b2897f26fdacbcabad1c5ea4b929949cce2cfbaa65a4e8ac17a50529082714ad89ac7ea81daaffad21ac8b9d48a5b721782ca02b3fb002a279a2c727d7ac9e9c8f2063a484aa8caf87220faf88ae772a4eb14ea5e2a8d62c832d2fa8812c862b4524e127cbab8da79cad2fa5fe2747308629e130372ccb2ae9a852ae9ea191abd633ce33e0312e3296336235ee353a33c335263525338b31e431c3327934c5343930ce34ae31a434ac34363440330831d8347c
5 | a93d2d5b2ed42c16a2272d7f28b22ddb2cf12d86ab911b542641254628852ef82a7396eca37e2d4fa438b149b17fb340ad2caff0aa5eb024af7fb290b12cae37ac5b26caa52aacfdacb32015ac172ed4b13020f62a252e9f34072f30326b31c0345b319621c8a8ca259d2cc4a4531d832e7a30592a742e7a2e79305c30d92d012dc632992f2132c12d64a6da291730a92e1832c035362a4d2ec81d8da845ad3b30609f07296598fbb004307c2ec42d64afc8b055306eadc9b065b238b184aee0b1d7b054a48cafa42fe22cfb2c7caacaa81f28ef315d2d5a2c9ab20c305b31232f192d54b2d131ff307d2d220fd4b47e2890243fad9ca7a8b37ab0b3a64029f09805a596aaf6a9ccac202b79a0eaae65ad16a54b2836ab9c27ed2e54001eb05bb4a3a5071da4add9af16b0d1
6 | b443b705b4e7b12129b4b445b514b42dab279cc2affcb2ac25712a201c03b001a81b2c772c05a721a8d32c792ece2a5baded2d5ca1a7a191ac679495b127a60c0c2aacc43026a5c12a51a90ca517a350249e284b256f1d3b25cbad0122a7a9d1a45dacf6327c3283335033aa31f5326832a43308332da5373356304f2d1c2a6927b32b1730609cba2fba2cd9aca8997f2b8f3041adfcaa48ad51af622d172d56aea52614a8a520002a2ba4352d162819297b991ba8d4a0361d97a563b0c09ddaadeeaf21a5bdb074a6d42f832618a63228b330842f0c2d352ebdb2252c3a245f2dbfae882eeaa9071b00a8a7af652c672a9ba862b1ca9e5817d7ac5fa48324922f302d20ac9eb1a5a9f630122dd5af70b067b171ad429a53ad33ad702c632c809f279caa29f52fc925deac15
7 | b086ad1f2e661426ae59b0a2ae99a2c2acf6b084a803b3a4b158aa4aaaaf2db30977a2862a762fab2295ac96ab45afa3ac0baf572a75a480a979a503a4d3a48b1e441b22afb9a945a8152d08177d26fd30bd2e6e31132e8f2fc12ec23472351f31c0357521a2a9f3a7ca22c1a6cbad7bab4e2d029005256aa28bb0a6aeffb11ca5802ebea9cab2bab1a5ae622c0b2a42a241af2db372aea520973287328c2fbdb0ed24da300a2f8130f5aba1b276a6722fc6315fa9372af5ac58216e25d13384a84db272b306b3462c3b25ea295bad79b09ca1bd2178a0dd9434ae2a26952c6aa16b28d828859e5fa06e24162d512cb2b3fbaa8eac87a42b2e6f25c51e18ad07a91c2dcc323920cab001a24d2c4031853064297aaa9ba88530e131392761aaf6b18d33852e8833802c12af1a
8 | adaba4afaff3a6ab2aaaa547b46b2a8b2e1fa648b15328fca6d7aefda8e7acddabc3a461ac8da86e2d2a2ae9adfa9bfda11a32122c792cba2b0ea945308132d5a735aa5c2eb7b44620aa292c2be9a93eb550b3022cdc324d2a1fb0c8a88b2f4f2e7c31eab021b2489014a776ad11b3d3b225b106b27ca43cad93b4bbb4d5afd2af41b2eab01327c6aceaad6cb16215381e3c2a14afaeaf5030002cd730b82f442cfb306833ac2dc12a7c335c30fa27372da5302d2c4c279c2f0093cf2c1a2fb5233f2a3a2b842f2db045ad34ace0a18c2a86b174a0e628fdaced221caf331a1aadfca7c8a9602e79b070b074a504adb11f592af8ac4cad6aa30430a8300631b2a2b3a46234ba2d1315b93532256db165362235281f742d96b8432d59368d35b91ce5aceab40dadc3329833b7
9 | 32912c68ae2ead72b23e2c60316b27e3af17b045afe025292465ad79b116b2ebb3c9b0a3aaf8b30aa8d1b2e9b2b6acadab3c2e12a1f8ace92908a82cab38311f2cfeadbb2ece2b4dae642f7d2b67a9c3351fa8d8aea62166955e2b742f8aacbaac15b05125832562add82c6eaf6cacf82a4425c027d9b020b1a02e603084286aafe0b418a3473201327928a7b4a5b425b1c930022d5331bc29152e942e5c25872fa431d52b7b262aaef424ff348532782325a8daaf2718c0342c316f998028c4aac1a2af258d242624692bab2e0ea933ab3faf06aad02c592bdea2e7aa461f77aab828c72d4a2c1a2c9cac07af2c9dd32d9728e9b09cae55a4a12c23a6402f022e49b37b325434ae2733ae77b00b960534f433e7b1a3b5332cafa27f32d423b3ae4d321b22dfae16a0a8b070
10 | a76426db23812fb11c572abcb4112e292c252c9ea40fab6a304b2be92a4daeb934d4a1652a10242432a932cf2f6829e32bf3283bad34a37b2ceb2a802947a407b03127c7a550a6662d2db01facf72885ab3ea4b03065afb6a9cba43c9e052b13ab70b0832cb0b179aaa8ac79ae06aec9ab322c8ea879ae0131b92632b53fa9222c552ae9b255aef730843054ad632224347029c81539b10c27f72e0922013128b05a29502b3d2b202dddaea2ab272a5e2d622acb2c8a2ecab3d62b0aa4e322b2297728d92d0730e0aa5d2250a475ace6a7eeb094301ba823b1bb2b09b0a92a02ac18a8222eafac1bb28d2e46305c3058a99bac05275c26202f2d2d7fa4742dc5b1aaab5f30deaf10a77fa003ad7930e2aea5b5582904a44129b52fdbb2f1b119aed92c112679ac55b241b07e
11 | 304e2cb533932c573120a3a6b1d629272c301acd1d3ab64eb091309529b8ade8b564aef531bb314bb24ab563b227312230ad1e5e9c7729fa329532ceb0452b88adca2af62537b0c525ccab7c2c8a2c42adb22421ae27311a2c8ab0d3a782b3f0aae0232dadc4abb2ac00b574b1e7301e291d3063af49b32c318032b832052cc924a4a5001529a00e1ecda8c226f42864b04caaf58be6abebaa77a4d8b491b46aa97c295330272cdaa539a6f4a68130472f9f2a72a762a38a2d742ee62d612b15a7c0a681307031d532172f78a865a98d2c7b1cf6af08b1c8b3fab472acc02bd5ac7db2f4b7a6aa5a314c31722b7fadf6ae67283729532fcd299fae51a9efa93fab80214c307d30c82975aea9af453148359d34bd30112d06ae8328c2301bab603029b4d4b329b417b62fb268
12 | 358d3454228a2e002c253003ada22d662bd52e8624532e21297b300a2a04ac75abb7a4082cd0a29329602702a912b087aee72e122e12316d2da3297f2fd725afae3a2d26a8903171a4d2aa882897acf431172e3fad39ac61ab5b2c2a2a492b46ae08a4c02c9caced33213383236eac1334e92f62b4bfac98345b2e29b0eeaecfb09834bdaceab06aa422b049317ea4e7b0fda98f14f9a3e7b250adc7a5140de5b021b041acde25372acbae1ea828a8bc285d2cf6ad091e1e2d0826672d19a85130b732db306e2562255625e6a7bb2878287b28f6ad32ac26adaa2a2b2589af05b11eae44a7cc3037b117290a25b62dfb30a8ac9a2a5f2d002a9dad85a45aa7eda6eca41722a92442adf9ae7929769b04a5f5a611ac2ba50baad725c9ad14a8b22aaa2588ac21afa1a0461c37
13 | 3153336b34a224c2a6f932303330357e21c6af403206328334df131aaaa42e092e8730af3000240e300d2c0d309f2814a1fa2cd12cb430fb2c212377a51f2e4c30062ae325c729ee1f3e297dac262a1f9ca3230b2140b02eadf5a794249eb0d2b046a959b0432a271c33aa1aadcab0efaf5c910bb17aac33b037b1afb149b106ada6b08db111b46cb1ccaec3a154ac0ab158ad63a4592c5dacb0ad8fadd2b07a9defaf16a70a9e82248d24d8a28529e32ff52fc92d93a72d2d692f2a2dc92d68225e2a4a2a2e29c82ab52b8cab142cd9201ab019af31a8f4b0122a5fad01aee7aa72a87d253e1e5fae141c32289e2da1113aa5f5aa8821292ba7aea020a09af0af402c18b389ab78a9702db4abfdb2bdb72cb38eac83a2abb085b1dfb0c4231e26ba2cafaddc2aae284d2f76
14 | aaa4236bb0beb8e7b954301bab39b81abb3ab8ff2a74a88ab724b8a6b55c293c264caabab30cabe5314532c328152d9eafc42a7b2868b096aec1af18308f2475a54eacd6aa5a2ca92c5a284bad9b273baa2ea55a28fbb0b82c5d2f5f2b8f91e3249c2c118c002959b169b446b04520d9a818ae4823032bff288422f1a6962d35310e20412b7f24e6307b32002bc22c26aa189143300b2c9e2d43ac1ab1d4b4812ed52c102b84b032ac48a72a1f772f423028a8fa284d2cbd30042f59a2f5ade92433a5f0a4a9a71930752cd2293fadd7303a29c51c19a71cb0222470a46faba8ac38b03dac71a7842e3a27672f902c2e26392b25a07f2b16b027ab08ab4eb02ab0a2adcf29dcac52b262b33aadeaa339a42520a7ad48306da8502b612c2f2c89303b305f2d482e312c3d30f8
15 | 2d332fb430612bc92db7add3ad71afddaf21af5e21c0b0d2ab2bad61b020b011ac6eaf7db16c9a86b539b31ab1b4b04ab38a341631da34e13355340d28f42115ac9ba73faac428021e98b1f0ad00ae362a5bb054b359b00fb1152d06a84d294db522b3ca12622b1b28e824f4303bb0f4b108a2cd2dd4329dae23ab14ae10add724a2b1d5ad2c28582c1416b8af87ab67ad0a296830102deb28c2a56ba3d8af2626f129adac7027b124712d392f3b20ed2d9a2f0a30412ecb30e12b1a2d9330d62f6c339232802c4427ddafd6a446a2c3afb42c482a26203323982c71a43bb11d9e9ca9059f17a62da8002cc52e622ea8300ba10f284b2e0730e2aa4da60e2ef130812056a162aca6b1afb075ac1eb1492904acefb07db1f8af099858aab5b3fab0dda432b50ab0922a55afd0
16 | 2fcbaeaeb08fb0ab3002abf9ab6ab1942943a7aa2b0b30f0aa4730e925ab2d7d2cbfafd1a96fb44fb523b91bb96eb806bb07af21a872a8d0a04930b5a589a9c0ac3e2e96309c2a6fa4be21f62a032e922efc2b3ea2e027933055a5392ac7af51276032332fd6a156ad03a1cc2ece30ac28f4adbcb1132d2792c520f6ab4baaeba453a9f4a88225902b7ba18e271a2bec302530552d03adb4b063ace4a5f328b2b1bfac75ac40a3089fe7addeb02ba55da2a1aa1928072b6826d52e202ed0ad8db059a083aca3a36b2d4a2d172c592c3231f2a570ab6aae37314c3328ab38aa18a90ca85232ccac35225215d730372f102bd32ffa2a3ba8d12e09308c319530752ac82ac8313e307a30602e3a2ecd30e22ea62ee52d172e33a8962adc97642417ad67281731f62d75a5f9b136
17 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # CNN-FPGA
2 |
3 | 基于FPGA的cifar-10二维卷积识别任务
4 |
5 | ## Background - 背景
6 |
7 | > 本项目为基于FPGA的二维卷积cifar-10识别任务。
8 | >
9 | > 参考项目链接:https://github.com/omarelhedaby/CNN-FPGA. 我们主要的工作是针对这个开源项目进行补充和修改,并适配到我们的任务上(如果是MNIST也没问题)。
10 |
11 | ## Introduction - 项目简介
12 |
13 | ### 技术路线
14 |
15 |
16 |

17 |
18 |
19 | 如上图所示,我们的技术路线从工作内容来说,一共分为五部分:需求定义、数据准备、模型构建、verilog编写、工程部署。
20 |
21 | 1. 在**需求定义**阶段,我们的主要任务就是确定整个小组的任务目标:针对输入为彩色图像的多对象分类任务仿真。
22 | 2. 基于我们的任务需求,在**数据准备**阶段,我们小组要做的事情就是确定要使用的数据集,并对数据集进行清洗、量化等一系列预处理工作,使数据集能为我们的模型所用。
23 | 3. 准备好数据集后就是**模型构建**阶段,这一阶段主要是针对分类任务的算法层面实现,因此我们工作的发力点在于训练一个合适的神经网络模型,并通过蒸馏、剪枝、权重量化等操作使模型转变为更适合部署在芯片上的状态。
24 | 4. 随后就是模型的**Verilog仿真**实现,在这一阶段我们的主要任务就是学习Verilog的使用,并在确定合适的开发方式后将神经网络模型仿真实现。
25 | 5. 最后一步就是在得到具体芯片的情况下,尝试将我们的仿真结果**上板烧录**,转化为可以实际应用的工程成果。
26 |
27 | ### 网络结构
28 |
29 | 下图展示了我们模型的结构,针对cifar-10数据集中的输入,进行多轮操作,得到最终的分类结果。
30 |
31 |
32 |

33 |
34 |
35 | 更具体的参数设置如下所示(激活函数使用ReLu):
36 |
37 |
38 |

39 |
40 |
41 | ### 识别结果
42 |
43 | | 模型名称 | 精度 |
44 | | :---------------------: | :--------: |
45 | | teacher模型(resnet) | 0.947 |
46 | | student模型(lenet) | 0.674 |
47 | | 非蒸馏lenet | 0.526 |
48 | | 量化后lenet(仿真结果) | 0.55 |
49 |
50 | 最终的结果如上表所示,由于初学的原因,所以没有尝试使用verilog搭建更复杂的网络。因此,我们选择基于resnet模型进行蒸馏,以此来让lenet-5学到更好的结果。那么由于数据量化会损失一定精度,最终基于FPGA的CNN识别cifar-10的准确率到达55%。
51 |
52 | ## Our Work - 我们的工作
53 |
54 | 首先说明为什么参考[这个项目](https://github.com/omarelhedaby/CNN-FPGA):
55 |
56 | 1. 源码采用层次化、模块化设计,方便阅读和理解,对小白及其友好;
57 | 2. 提供了很多写好的代码模块;
58 | 3. B站up主推荐。
59 |
60 | 然后说说我们做了哪些工作:
61 |
62 | 1. 原项目目标为手写数字识别,我们进一步拓展,研究其在cifar-10数据集上的效果;
63 | 2. 据此,我们基于pytorch设计了网络,并使用蒸馏得到更好的结果;
64 | 3. 删除第三层卷积;
65 | 4. 增加了一层全连接;
66 | 5. 在卷积层使用了relu替代tanh(自编代码);
67 | 6. 使用maxpool替代avgpool(自编代码);
68 | 7. 修改了源码中的一些错误(如reset命名错误,数据位宽错误等);
69 | 8. 改变了全连接层的输入维度、输出维度;
70 | 9. 编写了卷积层的testbench,并通过了仿真;
71 | 10. 自编16转32位转换器以及对应tesetbench代码;
72 | 11. 原项目独立编写了卷积和全连接,我们将二者合到了一起;
73 | 12. 上板进行测试;
74 | 13. 编写了中文注释,方便阅读;
75 | 14. 删除了softmax;
76 | 15. 提供了pytroch的训练源码、模型以及量化代码。
77 |
78 | ## Module Description - 模块说明
79 |
80 | > *更多有关其中的计算说明详见**技术报告.pdf***
81 |
82 | ### integrationConv
83 |
84 | **说明:**
85 |
86 | 卷积模块,对模型中的卷积部分(包含激活、池化)进行仿真,对应代码中的integrationConvPart.v。
87 |
88 | **可配置参数:**
89 |
90 | | 名称 | 说明 | 默认值 |
91 | | :--------: | :--------------: | :----: |
92 | | DATA_WIDTH | 数据位宽 | 16 |
93 | | ImgInW | 输入图像的宽度 | 32 |
94 | | ImgInH | 输入图像的高度 | 32 |
95 | | Conv1Out | 第一层卷积输出 | 28 |
96 | | MvgP1out | 最大池化输出 | 14 |
97 | | Conv2Out | 第二层卷积输出 | 10 |
98 | | MvgP2out | 最大池化输出 | 5 |
99 | | Kernel | 卷积核的大小 | 5 |
100 | | DepthC1 | 第一层卷积核数量 | 6 |
101 | | DepthC2 | 第二层卷积核数量 | 16 |
102 |
103 | **输入输出:**
104 |
105 | | 名称 | 类型 | 说明 | 长度 |
106 | | :---------: | :----: | :----------------------------------------------------------: | :----------------------------------------------: |
107 | | CNNinput | input | 输入的图像,数据从左上至右下排列,每一个像素值用半精度浮点数表示 | ImgInW × ImgInH × DATA_WIDTH |
108 | | Conv1F | input | 第一层卷积核权值,从第一个卷积核左上开始,到最后一个卷积核右下,每一个值用半精度浮点数表示 | Kernel × Kernel × DepthC1× DATA_WIDTH |
109 | | Conv2F | input | 第二层卷积核权值,从第一个卷积核左上开始,到最后一个卷积核右下,每一个值用半精度浮点数表示 | DepthC2 × Kernel × Kernel × DepthC1 × DATA_WIDTH |
110 | | iConvOutput | output | 输出的特征图 | MvgP2out × MvgP2out × DepthC2 × DATA_WIDTH |
111 |
112 | ### Relu
113 |
114 | **说明:**
115 |
116 | 激活函数单元1,relu激活函数。对应代码中的UsingTheRelu16.v和activationFunction.v(二者数据位宽不同,其实有冗余,懒得改了)
117 |
118 | **可配置参数:**
119 |
120 | | 名称 | 说明 | 默认值 |
121 | | :----------: | :------: | :----: |
122 | | DATA_WIDTH | 数据位宽 | 16 |
123 | | OUTPUT_NODES | 输出位宽 | 32 |
124 |
125 | **输入输出:**(每层不同,这里仅举例)
126 |
127 | | 名称 | 类型 | 说明 | 长度 |
128 | | :-------: | :----: | :------: | :---------------------------: |
129 | | input_fc | input | 输入特征 | Conv1Out × Conv1Out × DepthC1 |
130 | | output_fc | output | 输出特征 | Conv1Out × Conv1Out × DepthC1 |
131 |
132 | ### MaxPoolMulti
133 |
134 | **说明:**
135 |
136 | 最大池化模块,可以对输入进行最大池化运算。
137 |
138 | **可配置参数:**
139 |
140 | | 名称 | 说明 | 默认值 |
141 | | :--------: | :----------: | :----: |
142 | | DATA_WIDTH | 数据位宽 | 16 |
143 | | D | 数据通道 | 32 |
144 | | H | 输入特征高度 | 28 |
145 | | W | 输出特征宽度 | 28 |
146 |
147 | **输入输出:**(每层不同,这里仅举例)
148 |
149 | | 名称 | 类型 | 说明 | 长度 |
150 | | :------: | :----: | :------: | :----------------------------: |
151 | | apInput | input | 输入特征 | H × W × D × DATA_WIDTH |
152 | | apOutput | output | 输出特征 | (H/2) × (W/2) × D × DATA_WIDTH |
153 |
154 | ### ANNfull
155 |
156 | **说明:**
157 |
158 | 全连接模块,对模型中的全连接部分(包含激活)进行仿真,对应代码中的ANNfull.v。
159 |
160 | **可配置参数:**
161 |
162 | | 名称 | 说明 | 默认值 |
163 | | :------------: | :--------------: | :----: |
164 | | DATA_WIDTH | 数据位宽 | 32 |
165 | | INPUT_NODES_L1 | 第一层输入节点数 | 400 |
166 | | INPUT_NODES_L2 | 第二层输入节点数 | 120 |
167 | | INPUT_NODES_L3 | 第三层输入节点数 | 84 |
168 | | OUTPUT_NODES | 输出节点数 | 10 |
169 |
170 | **输入输出:**
171 |
172 | | 名称 | 类型 | 说明 | 长度 |
173 | | :--------: | :----: | :-------------------------------------------------: | :-------------------------: |
174 | | input_ANN | input | 全连接层的输入,用单精度浮点数表示 | DATA_WIDTH × INPUT_NODES_L1 |
175 | | output_ANN | output | 预测的标签值,cifar-10为10分类,需要用4位二进制表示 | 4 |
176 |
177 | ### IEEE162IEEE32
178 |
179 | **说明:**
180 |
181 | 精度转换模块,将16位宽浮点数转换为32位浮点数,对应代码中的IEEE162IEEE32.v。
182 |
183 | **可配置参数:**
184 |
185 | | 名称 | 说明 | 默认值 |
186 | | :----------: | :----------: | :----: |
187 | | DATA_WIDTH_1 | 输入数据位宽 | 16 |
188 | | DATA_WIDTH_2 | 输出数据位宽 | 32 |
189 | | NODES | 输出节点数 | 400 |
190 |
191 | **输入输出:**
192 |
193 | | 名称 | 类型 | 说明 | 长度 |
194 | | :-------: | :----: | :----------------------------------------: | :------------------: |
195 | | input_fc | input | 精度转换模块的输入,数据用半精度浮点数表示 | DATA_WIDTH_1 × NODES |
196 | | output_fc | output | 精度转换模块的输出,数据用单精度浮点数表示 | DATA_WIDTH_2 × NODES |
197 |
198 | ### LeNet
199 |
200 | **说明:**
201 |
202 | 整个网络模块,包含两层卷积和三层全连接,对应源码中的Lenet.v.
203 |
204 | **可配置参数:**
205 |
206 | | 名称 | 说明 | 默认值 |
207 | | :----------: | :--------------: | :----: |
208 | | DATA_WIDTH_1 | 卷积层数据位宽 | 16 |
209 | | DATA_WIDTH_2 | 全连接层数据位宽 | 32 |
210 | | ImgInW | 输入图像的宽度 | 32 |
211 | | ImgInH | 输入图像的高度 | 32 |
212 | | Kernel | 卷积核的大小 | 5 |
213 | | MvgP2out | 最大池化输出 | 5 |
214 | | DepthC1 | 第一层卷积核数量 | 6 |
215 | | DepthC2 | 第二层卷积核数量 | 16 |
216 |
217 | **输入输出:**
218 |
219 | | 名称 | 类型 | 说明 | 长度 |
220 | | :---------: | :----: | :----------------------------------------------------------: | :------------------------------------------------: |
221 | | CNNinput | input | 输入的图像,数据从左上至右下排列,每一个像素值用半精度浮点数表示 | ImgInW × ImgInH × DATA_WIDTH_1 |
222 | | Conv1F | input | 第一层卷积核权值,从第一个卷积核左上开始,到最后一个卷积核右下,每一个值用半精度浮点数表示 | Kernel × Kernel × DepthC1× DATA_WIDTH_1 |
223 | | Conv2F | input | 第二层卷积核权值,从第一个卷积核左上开始,到最后一个卷积核右下,每一个值用半精度浮点数表示 | DepthC2 × Kernel × Kernel × DepthC1 × DATA_WIDTH_1 |
224 | | LeNetoutput | output | 输出的特征图 | 3 |
225 |
226 | ------
227 |
228 | ## Requirements - 必要条件
229 |
230 | - Windows
231 | - python3.7 or up
232 | - pytorch
233 | - vivado
234 |
235 | ## Usage - 使用方法
236 |
237 | 1. 下载本仓库
238 |
239 | ```
240 | mkdir CNN-FPGA
241 | cd ./CNN-FPGA
242 | git clone https://github.com/Robin-WZQ/CNN-FPGA.git
243 | ```
244 |
245 | 2. 训练模型
246 |
247 | > 已提供训练好的模型。
248 |
249 | ```
250 | cd ./cifar_source_torch
251 | python train.py
252 | python distill.py
253 | python save_params.py
254 | ```
255 |
256 | 3. 量化并保存
257 |
258 | > 如果用自己训练好的模型,就将pt文件移动到quantification文件夹下。
259 | >
260 | > 修改ANNfull.v和Lenet_tb.v中的权重路径,我们提供了我们模型训练好的权重文件,详见weight文件夹中。
261 |
262 | ```
263 | # 权重量化
264 | python quantification_para.py
265 | # 输入图像量化
266 | python quantification_img.py
267 | ```
268 |
269 | 4. 运行仿真
270 |
271 | 打开CNN-FPGA-Vivado,在 vivado 里,运行LeNet_tb的simulation,即可得到结果。
272 |
273 | 一开始为9,之后运行约1~2分钟之后变为预测的数字。
274 |
275 |
276 |

277 |
278 |
279 |
280 | ## Code Tree - 文件代码树
281 |
282 | > 文件及对应说明如下所示
283 |
284 | |cifar_source_torch
285 |
286 | ----|cifar-source # 源代码文件夹
287 |
288 | --------|cifar-10-python # 数据集文件夹
289 |
290 | --------|res # 模型存储文件夹
291 |
292 | --------|distilled_lenet5_best.pt # 蒸馏得到的Lenet5模型参数文件
293 |
294 | --------|train.py # Teacher model训练代码
295 |
296 | --------|distill.py # 模型蒸馏训练代码
297 |
298 | --------|models.py # 模型结构代码
299 |
300 | --------|save_params.py # 参数导出代码
301 |
302 | --------|test.py # 测试代码
303 |
304 | --------|Lenet5_parameters_cifar.txt # 导出模型参数文件
305 |
306 | ----|README.md # 说明文件
307 |
308 | |CNN-FPGA-Vivado # 包含testbench和源码,以及整个工程文件
309 |
310 | ----|CNN-FPGA-Vivado.src # 项目源码
311 |
312 | --------|sim_1/new # testbench源码
313 |
314 | --------|sources_1/new # 工程源码
315 |
316 | --------|CNN-FPGA-Vivado.xpr # 项目工程
317 |
318 | |quantification
319 |
320 | ----|cifar-10-python # 数据集
321 |
322 | ----|distilled_lenet5_best.pt # 训练好的模型
323 |
324 | ----|input_pic2_label8.txt # 数据转换例子
325 |
326 | ----|quantification_img.py # 输入图像量化代码
327 |
328 | ----|quantification_para.py # 模型权重量化代码
329 |
330 | |weight
331 |
332 | ----|classifier.txt # 全连接第3层权重
333 |
334 | ----|fc1.txt # 全连接第1层权重
335 |
336 | ----|fc2.txt # 全连接第2层权重
337 |
338 | ----|layer1.txt # 卷积第1层权重
339 |
340 | ----|layer2.txt # 卷积第2层权重
341 |
342 |
343 | ## LICENSE
344 | MIT
345 |
346 |
--------------------------------------------------------------------------------
/weight/classifier.txt:
--------------------------------------------------------------------------------
1 | be4b4c87
2 | 3ceefe79
3 | 3df8e9db
4 | bd516a79
5 | be7c7673
6 | be152151
7 | be1fd5dc
8 | 3dc65057
9 | 3e7ef06a
10 | 3e0511a1
11 | 3de60a14
12 | 3cb9cbe3
13 | 3da85904
14 | bd1fdd59
15 | bc6086cc
16 | 3de90085
17 | bbbc85b3
18 | 3d7e6d9b
19 | bd428e8a
20 | bec021ff
21 | be696c34
22 | bdd42c08
23 | 3da58f4d
24 | 3d01d170
25 | bdb3b2e8
26 | 3dd2bdab
27 | bcd0b617
28 | 3e499ede
29 | be2f5533
30 | 3e063bbe
31 | bb923bd1
32 | 3f12aa47
33 | bd943697
34 | 3dc6f077
35 | 3e8366f0
36 | bccf3a32
37 | bec2e4a9
38 | bde4fd24
39 | bec128cd
40 | bdeea901
41 | 3e39a185
42 | be1b9092
43 | 3dfbd8bd
44 | be1fefde
45 | be614541
46 | be4667ac
47 | be84c283
48 | befa1750
49 | be15fd73
50 | 3f287d43
51 | 3e3ddcab
52 | 3e3d35de
53 | be445548
54 | 3cfb6877
55 | 3dc37e97
56 | bd29e38d
57 | be942f6f
58 | 3e1b3cd1
59 | bda955d9
60 | 3e0e0536
61 | beb2bdc0
62 | 3e8f994e
63 | 3d94386a
64 | bdc27877
65 | bd1e6e31
66 | be51c476
67 | 3e209b51
68 | 3e184ea3
69 | 3d9694ad
70 | be2a3c10
71 | 3d887726
72 | 3e23fc64
73 | bdf36936
74 | be7cfc6b
75 | 3dff1e27
76 | bd00db19
77 | 3e8af1d0
78 | be101818
79 | be9cad79
80 | 3dcc0c18
81 | bea2cfab
82 | be9b5479
83 | 3e1a1da7
84 | 3d34f0d8
85 | be010c8b
86 | 3e8178e1
87 | bdf3afc6
88 | 3d357e41
89 | beeaaf20
90 | 3e076c98
91 | be463c95
92 | 3e0b3eae
93 | bd1365a3
94 | 3e172c3e
95 | be1e9071
96 | 3ddbbe6b
97 | bc42e3cf
98 | 3e213d16
99 | be15b1d2
100 | 3e0bf2a7
101 | be7987c6
102 | bef632f0
103 | 3dbb129a
104 | 3de9d764
105 | 3d442dfb
106 | 3e5610e2
107 | be5ff38e
108 | be47426f
109 | 3d9da125
110 | 3e6f0e43
111 | 3cb3be23
112 | 3c78f19a
113 | bf0ed667
114 | 3def653e
115 | 3e9fde29
116 | 3c1a8b87
117 | be4a1648
118 | be715a24
119 | 3d11f26c
120 | 3ed57b71
121 | be0700d5
122 | bed43dd6
123 | 3e02fd20
124 | 3dc0c7c3
125 | be9a4225
126 | bdbf4c57
127 | 3e2529be
128 | bd390ca8
129 | 3e5fffc4
130 | 3e4da289
131 | bededc6f
132 | 3eb2c667
133 | 3ed8901c
134 | beb0cc27
135 | 3de257e4
136 | bea91c15
137 | bcbf7298
138 | bd4ad9ab
139 | 3f29a3c3
140 | be17b258
141 | 3ddb49d5
142 | 3d9b97f2
143 | 3e8d7fd4
144 | be6608f8
145 | beebb0f6
146 | bb9d5783
147 | 3e0254fd
148 | bb97b60d
149 | 3db2105b
150 | be2ddc83
151 | 3db5d7ac
152 | bf022625
153 | 3dc6fa45
154 | 3e07d6b1
155 | 3d6f63ee
156 | be2c7eb7
157 | beb83a3f
158 | 3dc3cf10
159 | b8dad2c0
160 | 3d5cc908
161 | 3d00f10c
162 | 3ddce94e
163 | bdab0f37
164 | be27023c
165 | be3a13c3
166 | 3dee60d1
167 | 3da511ee
168 | be38da03
169 | 3d64197f
170 | 3e229db8
171 | 3cc2713f
172 | bf24c0a0
173 | 3c2cc42e
174 | beb2bdf6
175 | 3dcb6672
176 | becbd7f9
177 | beee7517
178 | bf17f1b3
179 | 3ea41113
180 | 3cd11f7b
181 | 3daaf53a
182 | be68df39
183 | bd685ae9
184 | bdb6200f
185 | 3e36f91b
186 | 3e18ae03
187 | 3df5ddc9
188 | 3e4011af
189 | bdeec2ed
190 | beacf0cd
191 | 3d73b97b
192 | be84213f
193 | be37391b
194 | bdad8007
195 | 3e534556
196 | be910a89
197 | 3da5e232
198 | 3df2b739
199 | bee20776
200 | 3e3105db
201 | becd7c03
202 | be04f3d0
203 | be7c8833
204 | 3e346be8
205 | 3dfca8de
206 | 3e6c044c
207 | 3dabf6e6
208 | bea13ba6
209 | 3e84ea7a
210 | 3e25b1b6
211 | 3e631f2c
212 | be05668d
213 | bea00656
214 | 3cb90908
215 | bc26cf0c
216 | bd758caa
217 | 3db27476
218 | 3e371835
219 | be84ace3
220 | 3e3fb6e6
221 | 3e96401e
222 | 3e7ca2f3
223 | beb96dcb
224 | bdd33281
225 | be9aa7cb
226 | 3e564dc9
227 | be8507de
228 | 3e14228a
229 | be0b4ff8
230 | be9a50ba
231 | bd022c3a
232 | be3d4245
233 | 3e49874a
234 | be8639d2
235 | 3d947469
236 | 3e6aa7f3
237 | be63a515
238 | 3ddb556f
239 | 3d55fe41
240 | bee033f0
241 | bd295b2d
242 | bd7a1934
243 | bd292918
244 | 3cb1a640
245 | bd9a7e1b
246 | 3c1f44ac
247 | 3d6fc863
248 | bb8e48fd
249 | 3d7e8b9d
250 | bd32fb2e
251 | 3ec9b0b6
252 | 3d1c0562
253 | 3ef564e0
254 | bd9f0a3a
255 | bf017410
256 | be7b615c
257 | 3cb118e0
258 | be2a541d
259 | be8cbcb7
260 | 3e4bdfba
261 | be868cf8
262 | 3c8b82b3
263 | be6829d6
264 | 3dd14a87
265 | 3ddd8634
266 | 3bc7b5f8
267 | 3d813b0f
268 | 3db10327
269 | 3e241e70
270 | be89dede
271 | 3e0fc28a
272 | 3e0c10ee
273 | 3d9daaa2
274 | bd58100f
275 | be372728
276 | 3e0456e5
277 | 3c7c28e2
278 | be92c5a5
279 | 3e30bfad
280 | 3deebed5
281 | bec19155
282 | be8fb393
283 | bd63b4ef
284 | 3d4c7f5c
285 | 3e6a85f1
286 | 3ddfb626
287 | 3e2f3b7d
288 | 3b452693
289 | 3e3a6b43
290 | be4794bb
291 | 3c974dac
292 | 3e75c7b3
293 | bec124d1
294 | ba3cd6c7
295 | bdc6be8b
296 | be757c79
297 | 3d1487aa
298 | bd7b960c
299 | b9ce92bf
300 | 3e3512af
301 | bdba53e5
302 | 3e1dd6df
303 | bf082324
304 | bde256eb
305 | be50d430
306 | 3e2b8aaf
307 | 3e64f3d9
308 | be0efc9a
309 | 3e86f412
310 | b91f4290
311 | bec413de
312 | 3e842537
313 | 3de4e35d
314 | be2ee674
315 | 3e6f5d89
316 | bafc3a83
317 | 3dccea55
318 | 3d01fee0
319 | 3cb744e0
320 | be4aace8
321 | 3e62696a
322 | beffa57f
323 | be278da9
324 | 3e13a2cb
325 | bdd2d745
326 | bd8df180
327 | 3d6113fc
328 | bda807ae
329 | 3e81e2d7
330 | 3df02bd6
331 | 3daca07e
332 | 3dc02455
333 | bb9bb816
334 | 3cec78d7
335 | bdffcb9d
336 | be297c67
337 | bd456930
338 | be45293a
339 | 3dc2ba39
340 | 3da920b7
341 | bf123b3d
342 | 3ecb4f51
343 | 3e38ffdd
344 | bcf4321a
345 | bd9e04bb
346 | bd76bdbe
347 | be54fe8d
348 | be8c2110
349 | be5d8de4
350 | 3e627536
351 | 3db1ffa3
352 | bdc79259
353 | bd94a564
354 | be531384
355 | 3dc975c9
356 | be19272d
357 | beabe813
358 | 3d8048a6
359 | 3e653772
360 | 3dd9fb6e
361 | be53ae36
362 | 3e18fc95
363 | be9194f5
364 | 3d836c3b
365 | 3e19f681
366 | 3c1057df
367 | be0530a6
368 | 3d2139fc
369 | be9afa4c
370 | 3e252262
371 | be5ab526
372 | bd7c6756
373 | bd852e64
374 | bdf6fb51
375 | be489f5c
376 | 3dba380b
377 | be1d80e8
378 | 3e4e2ca4
379 | bda10ebe
380 | 3e511e4c
381 | 3e2d41d8
382 | bf15df98
383 | 3dcd14bf
384 | 3d811548
385 | 3e114d15
386 | 3dd62ecf
387 | bec7b3ea
388 | 3da8dc7d
389 | bea6d31e
390 | be4c5667
391 | bf075800
392 | bf2216b5
393 | 3d0c6ee5
394 | bf01c228
395 | 3f3de054
396 | beeebbfb
397 | bef02729
398 | 3dd22729
399 | 3e33d67a
400 | 3d4e790c
401 | bd37c3f3
402 | 3c781451
403 | 3d88df1c
404 | bcc66f80
405 | be0b2d2d
406 | 3d56915a
407 | bd7a7871
408 | 3d917d6a
409 | bba9f4f3
410 | bd8e512c
411 | 3c43c976
412 | be896b18
413 | 3d130684
414 | bd58c972
415 | bd8fc565
416 | bed598a5
417 | bd1964e0
418 | 3ea9b437
419 | 3e02397f
420 | 3d0d0a2f
421 | bda64064
422 | 3d124be7
423 | bdf5bc2a
424 | bba43eb5
425 | bdb57cdf
426 | 3d089adf
427 | bcf86830
428 | bdac9296
429 | be078e1c
430 | bded709e
431 | 3db38127
432 | befd1644
433 | 3e89326d
434 | be6f71db
435 | 3e801a82
436 | bdd28715
437 | 3e4b8607
438 | bd8c71ba
439 | 3c07a443
440 | bf0aa91a
441 | beafb748
442 | be303398
443 | bf0c3347
444 | bf03d512
445 | be96f413
446 | 3d992d1d
447 | be7b32b1
448 | 3f6ac1d0
449 | bdce6105
450 | beae3e35
451 | bdb182d3
452 | be0147c9
453 | 3e0ff084
454 | 3e024e99
455 | bb4478ee
456 | 3e05934e
457 | bee1e00d
458 | 3e2404aa
459 | 3e8e944d
460 | be97abc6
461 | 3eb396f0
462 | 3e2a4de7
463 | be46e320
464 | 3e2ec41c
465 | bec7f8b1
466 | b9ad51d8
467 | be641b66
468 | 3db9dd07
469 | be79156a
470 | 3e306831
471 | 3c5e7144
472 | 3cd0523c
473 | 3daa4266
474 | 3e408f95
475 | be9a4b73
476 | beac3908
477 | 3e230a6a
478 | be6ce08d
479 | 3e2e01e4
480 | bc8b58fa
481 | 3e9272c6
482 | be13870e
483 | bf417bc2
484 | bd72f224
485 | 3e37c4b7
486 | 3e95a655
487 | bdd1472f
488 | be75e993
489 | 3e8bdcfb
490 | be179790
491 | be98c6c0
492 | 3e913774
493 | be28c58a
494 | 3da79a04
495 | bd6b28ea
496 | 3e595881
497 | be6adfa5
498 | 3dff4102
499 | 3cf5eab0
500 | bed3c62c
501 | bda1368c
502 | 3e33f94a
503 | 3e13618b
504 | bde4b8dd
505 | bd31ed46
506 | be23c22a
507 | 3d9890b0
508 | 3c55bdd5
509 | 3e354ab9
510 | 3e513dea
511 | 3e683c1f
512 | be3a9377
513 | be1e2ef2
514 | 3dbb5490
515 | 3e3ec1e4
516 | bd8e1b73
517 | bd8051aa
518 | be874d92
519 | 3e526bd9
520 | bec7f2e4
521 | 3f0ac195
522 | 3cd00bf1
523 | be723578
524 | be6d8891
525 | be39fd6d
526 | 3eaed145
527 | 3d8d290b
528 | be3aa2b9
529 | bf0910f5
530 | 3de292c5
531 | bd86f9fa
532 | 3da2d50f
533 | 3c6e107e
534 | 3e2a2f85
535 | 3d45a7fc
536 | be09a22c
537 | 3d557a17
538 | bd207464
539 | beb131d0
540 | 3d6a6cde
541 | be02bc20
542 | be840e79
543 | 3de47749
544 | 3dec05c8
545 | be1ddd41
546 | 3d7c7ba0
547 | bd16a2f6
548 | 3df58cfb
549 | 3d872d1e
550 | be772587
551 | be7e761e
552 | 3d8072c8
553 | be12f833
554 | bd9c8ddc
555 | 3e0d47b2
556 | 3de284cb
557 | be0c68c8
558 | bd991fab
559 | be2e8990
560 | 3e5fad86
561 | 3dfd38f0
562 | 3e5716f6
563 | 3dd6a5aa
564 | bd40cfc5
565 | be81b90a
566 | bd0c04c9
567 | beafd518
568 | be8fc398
569 | 3d030383
570 | bbd5d3a6
571 | 3e5d82bf
572 | 3d75bc4c
573 | 3d8e0492
574 | 3ccbd89f
575 | bec219c0
576 | 3dd65b11
577 | be8bdf5d
578 | 3cdd6662
579 | bdfe181e
580 | 3de9ef79
581 | 3e8bce8a
582 | 3f4620d9
583 | 3ecc825e
584 | be4e4500
585 | bec855b7
586 | be717dc0
587 | 3dff3742
588 | bf00e734
589 | be203a40
590 | 3ef00c92
591 | 3e500c9a
592 | 3e0b6dcb
593 | 3e300e61
594 | be120d4c
595 | 3ca17410
596 | be6df469
597 | 3a0232b6
598 | 3dff5fd2
599 | be1d1768
600 | 3d56c1f8
601 | bda5d7f3
602 | be915235
603 | 3e26758c
604 | 3d6c2061
605 | bc220df9
606 | 3d970f43
607 | bd1121c9
608 | 3d6fcde5
609 | bef7b796
610 | be0f0f76
611 | be373c40
612 | be5923b7
613 | 3e046f2c
614 | 3e84aa2b
615 | bd510266
616 | bdc342ca
617 | 3eb8c21d
618 | 3cc62193
619 | bf2b60c7
620 | be154a82
621 | be784795
622 | bee79fd5
623 | 3e005680
624 | 3deccaaa
625 | 3e6ef988
626 | 3d53a765
627 | bba5c484
628 | 3d8827eb
629 | be20d5ed
630 | bd0fc377
631 | 3da02278
632 | 3e51c5b5
633 | be184a16
634 | be938639
635 | be9f1e8c
636 | 3b01cd9e
637 | 3d7604f4
638 | be028a83
639 | be31f12a
640 | 3e36bc36
641 | beb4bcfd
642 | 3e131049
643 | 3e793c56
644 | 3e61eb48
645 | 3de59df2
646 | 3db20a1e
647 | be6feeaf
648 | be59c316
649 | be298e65
650 | bd251738
651 | 3cf8911e
652 | be7745b2
653 | be317190
654 | 3df12dbb
655 | be8d2474
656 | 3e2f8e5b
657 | bdfc4a22
658 | bae25d72
659 | 3ee8aac5
660 | 3ba0ace6
661 | bdb619f2
662 | bca4e24d
663 | 3cc4f189
664 | bd4c6617
665 | bc36a791
666 | bd96eba2
667 | bdcbe048
668 | 3cb2f8a9
669 | be02fb45
670 | 3d78ed93
671 | 3e2ba221
672 | 3e154be9
673 | 3e26899b
674 | 3c1578d9
675 | 3e07adcb
676 | bea6d642
677 | be3b146c
678 | bcac56fc
679 | 3d27e08a
680 | 3e10be2d
681 | bdb35f49
682 | 3e0aea64
683 | be6e3719
684 | 3dadd721
685 | 3d939025
686 | bde3522e
687 | 3de81125
688 | 3cec8dd2
689 | 3d96360f
690 | 3d9b22ee
691 | 3cf7dcf4
692 | bd9d3632
693 | bd710481
694 | bd1fffa7
695 | 3d418a85
696 | bdb1bddf
697 | 3d491f9a
698 | bdaf92c8
699 | 3d8c79b9
700 | bd9581a6
701 | 3ded73be
702 | bdbc119e
703 | bd928435
704 | 3e160b82
705 | be815055
706 | 3e39de7f
707 | 3edd01fa
708 | be9018ea
709 | bebe0e94
710 | be9d4756
711 | 3dd50256
712 | be2936ca
713 | 3e23747f
714 | 3c9000f3
715 | 3e58981c
716 | 3e206f03
717 | 3d213430
718 | be7008ea
719 | be67bd26
720 | be7c531d
721 | 3e6c7818
722 | 3d9d8956
723 | 3cf9b7b5
724 | be18dd45
725 | 3d4aa977
726 | 3db78579
727 | 3ea64643
728 | bdbea10e
729 | bee9ff3e
730 | be1943f0
731 | befc54ee
732 | 3dd269c5
733 | 3de7b8e5
734 | 3d95a514
735 | be61a55e
736 | 3e91a20d
737 | 3ed6eba3
738 | be214629
739 | be86d795
740 | be98adf7
741 | 3e4abc2d
742 | be9797e7
743 | 3d916d0c
744 | bc82ee4b
745 | bb7d6c48
746 | be1b5874
747 | 3d9c3f24
748 | 3e84f4b0
749 | be0e2f3b
750 | be1a3d39
751 | be23e32a
752 | 3d15090c
753 | 3e26cffe
754 | be0f3a2c
755 | 3d05c102
756 | bc40f081
757 | be3362a4
758 | be468222
759 | 3e1bfedc
760 | 3e014683
761 | 3c653949
762 | bd2a6cf5
763 | 3cf667f3
764 | bd4674df
765 | bdbf0234
766 | bbd7abf7
767 | 3d883bbd
768 | 3cbce24b
769 | 3d9b8e64
770 | be0594b2
771 | 3ed16a98
772 | bec4c752
773 | bdfccee3
774 | be52242a
775 | 3dec26f8
776 | be31bfdc
777 | 3e11fcbd
778 | 3de43c91
779 | bdddb3b7
780 | bd501b7a
781 | 3d658c68
782 | 3d39f1bd
783 | bda6ea6d
784 | 3e479229
785 | be14be18
786 | 3e058743
787 | 3e53f0a1
788 | bebb7016
789 | be14982a
790 | bcbe6125
791 | be2eb4d3
792 | be895188
793 | 3e8fcc5d
794 | be15cb99
795 | 3eb71a23
796 | 3cae6d87
797 | 3e5fab30
798 | be66b481
799 | be6312c5
800 | bedcc7db
801 | be07fbc6
802 | 3e173ce1
803 | 3dfc2c41
804 | 3e0e8938
805 | 3dcd4759
806 | bc2918f1
807 | bd895c7d
808 | bd39970b
809 | be48db34
810 | be748729
811 | 3dc04952
812 | 3dcd7d19
813 | bde91033
814 | 3e22dfd8
815 | be8436e4
816 | 3cda482a
817 | 3dca802e
818 | 3de91d60
819 | 3e07ddd1
820 | be170b5f
821 | 3c82551e
822 | 3e6919b0
823 | be584904
824 | bea5f53e
825 | bda35886
826 | be26dd2c
827 | 3f16a0ad
828 | be15e805
829 | be2cfbf3
830 | be3b0061
831 | 3d34edcf
832 | be02dbf3
833 | be9f1feb
834 | bea491a9
835 | 3e42a5c5
836 | 3e156671
837 | be3fdcae
838 | be266cb6
839 | bdd56ef6
840 | 3e06b589
841 |
--------------------------------------------------------------------------------
/CNN-FPGA-Vivado/CNN-FPGA-Vivado.xpr:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 |
327 |
328 |
329 |
330 |
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 |
346 |
347 |
348 |
349 |
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
366 |
367 |
368 |
369 |
370 |
371 |
372 | default_dashboard
373 |
374 |
375 |
376 |
--------------------------------------------------------------------------------