├── .gitattributes
├── .github
└── workflows
│ ├── macos.yaml
│ ├── static.yml
│ └── ubuntu.yml
├── .gitignore
├── README.md
├── README_EN.md
├── autodiff
├── dor.h
├── graph.h
├── mor.h
├── node.h
├── por.h
└── vectmath.h
├── conv_backward_test.cpp
├── conv_test.cpp
├── cuda_mat
├── add.cu
├── conv_cu_grad.cu
├── mat_grad.cu
└── matrix_cudadef.cu
├── edge_layer_unit_test.cpp
├── grad_edge
└── matrix_grad.h
├── index.html
├── install_diff
├── Makefile
├── bin
│ ├── ann
│ ├── gradient
│ ├── gradient_descent
│ ├── simple
│ └── speed
├── examples
│ ├── Makefile
│ └── src
│ │ ├── ann.cpp
│ │ ├── gradient.cpp
│ │ ├── gradient_descent.cpp
│ │ ├── simple.cpp
│ │ └── speed.cpp
└── root
│ ├── Makefile
│ ├── include
│ ├── dor 2.h
│ ├── dor.h
│ ├── graph 2.h
│ ├── graph.h
│ ├── mor 2.h
│ ├── mor.h
│ ├── node 2.h
│ ├── node.h
│ ├── por 2.h
│ ├── por.h
│ ├── vectmath 2.h
│ └── vectmath.h
│ └── src
│ ├── graph.cpp
│ └── node.cpp
├── logistic
├── .logistic_def.h.swp
├── logistic_def.cpp
└── logistic_def.h
├── main.cpp
├── matrix
├── .matrix_pro.h.swp
├── matrix_def.h
└── matrix_pro.h
├── picture
├── .DS_Store
├── 00.svg
├── 01.svg
├── 02.svg
├── Edge计算引擎.pdf
├── Edge计算引擎.xmind
├── WX20191119-105411@2x.png
├── WX20191119-125244@2x.png
├── apply_axis_0.png
├── apply_axis_1.png
├── autograd.jpg
├── cpu.svg
├── image-20200128154352842.png
├── image-20200418210521131.png
├── jabber.svg
├── logo.png
├── logo2.png
├── nerual_test1.png
├── path.png
├── processwire (1).svg
├── 啊.svg
├── 彩虹.svg
├── 方向.svg
├── 星月.svg
├── 火箭.svg
└── 生产流程.pdf
├── root
├── Makefile
├── include
│ ├── dor.h
│ ├── edgelayer.h
│ ├── graph.h
│ ├── mor.h
│ ├── node.h
│ ├── por.h
│ └── vectmath.h
├── obj
│ ├── graph.o
│ └── node.o
└── src
│ ├── graph.cpp
│ └── node.cpp
├── unit_test.cpp
├── update_conv.md
├── vector_add
└── welcome
├── 1.cpp
├── 2.txt
├── 3.txt
├── 4.txt
├── big.txt
├── ma
├── main
└── score_wel.cpp
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.html linguist-language=c++
--------------------------------------------------------------------------------
/.github/workflows/macos.yaml:
--------------------------------------------------------------------------------
1 | # This is a basic workflow to help you get started with Actions
2 |
3 | name: Macos-test
4 |
5 | # Controls when the action will run.
6 | on:
7 | # Triggers the workflow on push or pull request events but only for the main branch
8 | push:
9 | branches: [ master ]
10 | pull_request:
11 | branches: [ main ]
12 |
13 | # Allows you to run this workflow manually from the Actions tab
14 | workflow_dispatch:
15 |
16 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel
17 | jobs:
18 | # This workflow contains a single job called "build"
19 | build:
20 | # The type of runner that the job will run on
21 | runs-on: macos-latest
22 | if: "contains(github.event.head_commit.message, '[build]')"
23 |
24 | # Steps represent a sequence of tasks that will be executed as part of the job
25 | steps:
26 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
27 | - uses: actions/checkout@v2
28 |
29 | # Runs a single command using the runners shell
30 | - name: Run a one-line script
31 | run: echo Hello, Macos!
32 | - name: Run gcc and test the version
33 | run: gcc -v
34 | - name: lsa
35 | run: ls -a
36 | - name: install_diff dir
37 | run: ls -a
38 | working-directory: ./install_diff
39 | - name: make and install_1
40 | run: make
41 | working-directory: ./install_diff
42 | - name: make and install_2
43 | run: make install
44 | working-directory: ./install_diff
45 | - name: success info
46 | run: echo success install autodiff!
47 | - name: build
48 | run: g++ main.cpp -o ma -lautodiff
49 | - name: build_conv_test
50 | run: g++ conv_test.cpp -o conv_test -lautodiff
51 | - name: run
52 | run: ./ma
53 | - name: run conv test
54 | run: pwd && ./conv_test
55 | - name: run neural network
56 | run: g++ main.cpp -o main -lautodiff && ./main
57 |
58 |
--------------------------------------------------------------------------------
/.github/workflows/static.yml:
--------------------------------------------------------------------------------
1 | name: Deploy static content to Pages
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | build:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - name: Checkout code
14 | uses: actions/checkout@v3
15 |
16 | - name: Build static content
17 | run: |
18 | # 你的构建命令,例如生成静态文件
19 | echo "Building static content..."
20 |
21 | - name: Upload artifact
22 | uses: actions/upload-pages-artifact@v2 # 使用 v2 版本
23 | with:
24 | name: static-content
25 | path: '.' # 上传构建后的静态文件目录
26 |
27 | - name: Deploy to GitHub Pages
28 | uses: actions/deploy-pages@v1 # 使用 v1 版本进行部署
--------------------------------------------------------------------------------
/.github/workflows/ubuntu.yml:
--------------------------------------------------------------------------------
1 | # This is a basic workflow to help you get started with Actions
2 |
3 | name: Ubuntu-test
4 |
5 | # Controls when the action will run.
6 | on:
7 | # Triggers the workflow on push or pull request events but only for the main branch
8 | push:
9 | branches: [ master ]
10 | pull_request:
11 | branches: [ main ]
12 |
13 | # Allows you to run this workflow manually from the Actions tab
14 | workflow_dispatch:
15 |
16 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel
17 | jobs:
18 | # This workflow contains a single job called "build"
19 | build:
20 | # The type of runner that the job will run on
21 | runs-on: ubuntu-latest
22 | if: "contains(github.event.head_commit.message, '[build]')"
23 |
24 |
25 | # Steps represent a sequence of tasks that will be executed as part of the job
26 | steps:
27 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
28 | - uses: actions/checkout@v2
29 |
30 | # Runs a single command using the runners shell
31 | - name: Run a one-line script
32 | run: echo Hello, Macos!
33 | - name: Run gcc and test the version
34 | run: gcc -v
35 | - name: install_diff dir
36 | run: ls -a
37 | working-directory: ./install_diff
38 | - name: make and install_1
39 | run: make
40 | working-directory: ./install_diff
41 | - name: make and install_2
42 | run: make install
43 | working-directory: ./install_diff
44 | - name: success info
45 | run: echo success install autodiff!
46 | - name: build
47 | run: g++ main.cpp -o ma -lautodiff
48 | - name: build_conv_test
49 | run: g++ conv_test.cpp -o conv_test -lautodiff
50 | - name: run
51 | run: ./ma
52 | - name: run conv test
53 | run: ./conv_test
54 | - name: run neural network
55 | run: g++ main.cpp -o main -lautodiff && ./main
56 |
57 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.o
2 | ma
3 | *.a
4 | conv_test
5 | .DS_Store
6 | test.cpp
7 | .vscode
8 | opencv-4.6.0/*
9 | CMakeFiles/
10 | CMakeDownloadLog.txt
11 | opencv-4.6.0.zip
12 | main
13 | .gitignore
14 | .gitignore
15 | install_diff/bin/gradient_descent
16 | install_diff/bin/simple
17 | .gitignore
18 | install_diff/bin/ann
19 | install_diff/
20 | matrix_add
21 | .gitignore
22 | index.html
23 | tools/stream_0_first_frame_20241031_165519_879.jpg
24 | tools/stream_0_first_frame_20241031_165555_446.jpg
25 | tools/stream_0_first_frame_20241031_165645_741.jpg
26 | tools/your_library.so
27 | tools/test_capture
28 | tools/test_capture_frame
29 | tools/read_rtsp.py
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |

2 |
3 |
4 | # Edge-Computing-Engine
5 |
6 | An open-source scientific computing engine designed for matrix computation and machine learning tasks, with CUDA acceleration support.
7 |
8 | [](https://support.apple.com/downloads/macos)
9 | [](https://ubuntu.com/download/server)
10 | [](https://github.com/AllenZYJ/Edge-Computing-Engine/actions)
11 | [](https://raw.githubusercontent.com/tesseract-ocr/tesseract/master/LICENSE)
12 |
13 | # Overview
14 |
15 | Edge-Computing-Engine provides a flexible computational graph implementation that supports:
16 |
17 | 1. Forward propagation for neural network inference
18 | 2. Backward propagation for training via automatic differentiation
19 | 3. CUDA-accelerated operations for improved performance on NVIDIA GPUs
20 |
21 | The computational graph is built using nodes representing operations such as matrix multiplication, element-wise operations, and activation functions.
22 |
23 | # 1. GPU Features
24 |
25 | ### 1.1 Operation Types
26 |
27 | The framework supports the following operations:
28 |
29 | - **OP_INPUT**: Input nodes for data and parameters
30 | - **OP_ADD**: Element-wise addition
31 | - **OP_SUB**: Element-wise subtraction
32 | - **OP_MUL**: Element-wise multiplication
33 | - **OP_DIV**: Element-wise division
34 | - **OP_MATMUL**: Matrix multiplication
35 | - **OP_RELU**: ReLU activation function
36 | - **OP_SIGMOID**: Sigmoid activation function
37 | - **OP_TANH**: Tanh activation function (declared but not implemented)
38 | - **OP_SOFTMAX**: Softmax function (declared but not implemented)
39 |
40 | ### 1.2 Core Classes
41 |
42 | 1. **Matrix_CU**: A custom matrix class with CUDA support
43 | 2. **ComputeNode**: Represents a node in the computational graph
44 | 3. **ComputeGraph**: Manages the graph, including node creation, forward and backward propagation
45 |
46 | ### 1.3 CUDA Kernels
47 |
48 | The code includes several CUDA kernel functions:
49 |
50 | - **reluForwardKernel/reluBackwardKernel**: Forward and backward propagation for ReLU
51 | - **sigmoidForwardKernel/sigmoidBackwardKernel**: Forward and backward propagation for Sigmoid
52 | - **matmulForwardKernel/matmulBackwardAKernel/matmulBackwardBKernel**: Forward and backward propagation for matrix multiplication
53 | - **mulBackwardKernel**: Backward propagation for element-wise multiplication
54 | - **divBackwardKernel**: Backward propagation for element-wise division
55 | - **negGradKernel**: Helper kernel for computing negative gradients
56 |
57 |
58 | ### 1.4 CUDA Example
59 |
60 | ```cpp
61 | // Create a computational graph
62 | ComputeGraph graph;
63 |
64 | // Create input and weight nodes
65 | ComputeNode* input = graph.addInput(input_rows, input_cols, batch_size);
66 | ComputeNode* weight = graph.addInput(input_cols, output_cols);
67 |
68 | // Initialize with random values
69 | input->value.randomInit();
70 | weight->value.randomInit();
71 |
72 | // Build forward computation graph
73 | ComputeNode* fc1 = graph.addMatMul(input, weight); // Fully connected layer
74 | ComputeNode* output_node = graph.addReLU(fc1); // Apply ReLU activation
75 |
76 | // Perform forward propagation
77 | graph.forward();
78 |
79 | // Compute gradients via backward propagation
80 | graph.backward(output_node);
81 | ```
82 | ### 1.5 GPU_Usage
83 |
84 | ```shell
85 | root@5353:/backup# nvcc -arch=sm_80 -I/usr/local/cuda/include -L/usr/local/cuda/lib64 cuda_mat/mat_grad.cu -o main && ./main
86 | root@5353:/backup# ./main
87 | ====== Configuration ======
88 | Input matrix: 10000 x 1000
89 | Weight matrix: 1000 x 500
90 | Batch size: 32
91 | Activation: ReLU
92 |
93 | ====== CUDA Results ======
94 | Input first 5x5:
95 | 0.840188 0.394383 0.783099 0.79844 0.911647
96 | 0.45724 0.640368 0.717092 0.460067 0.54114
97 | 0.0860517 0.180886 0.426423 0.0470658 0.306902
98 | 0.0587587 0.0138811 0.622212 0.0391351 0.221551
99 | 0.456151 0.24875 0.0699601 0.742097 0.216935
100 | Weight first 5x5:
101 | 0.817311 0.0247108 0.0146763 0.939293 0.502699
102 | 0.604392 0.921644 0.675689 0.948712 0.58803
103 | 0.824073 0.747934 0.0448163 0.757354 0.858343
104 | 0.308744 0.659798 0.941692 0.255731 0.539655
105 | 0.89383 0.609149 0.799556 0.726306 0.640965
106 | Output first 5x5:
107 | 256.076 253.23 258.393 259.965 255.971
108 | 250.843 246.827 252.131 253.493 244.151
109 | 247.064 244.543 251.723 247.424 250.466
110 | 249.944 250.359 256.148 252.21 249.615
111 | 246.675 238.295 252.572 242.94 243.512
112 | Input gradient first 5x5:
113 | 244.628 251.341 255.388 250.4 249.891
114 | 244.628 251.341 255.388 250.4 249.891
115 | 244.628 251.341 255.388 250.4 249.891
116 | 244.628 251.341 255.388 250.4 249.891
117 | 244.628 251.341 255.388 250.4 249.891
118 | Weight gradient first 5x5:
119 | 159791 159791 159791 159791 159791
120 | 160010 160010 160010 160010 160010
121 | 160266 160266 160266 160266 160266
122 | 159899 159899 159899 159899 159899
123 | 159964 159964 159964 159964 159964
124 |
125 | ====== Performance ======
126 | CUDA computation time: 2201.45 ms
127 | ```
128 |
129 | ### 1.6 Requirements
130 |
131 | - CUDA-capable NVIDIA GPU (for GPU features)
132 | - CUDA Toolkit
133 | - C++11 or above
134 | - nvcc: NVIDIA (R) Cuda compiler driver
135 | Copyright (c) 2005-2024 NVIDIA Corporation
136 | Built on Thu_Mar_28_02:18:24_PDT_2024
137 | Cuda compilation tools, release 12.4, V12.4.131
138 | Build cuda_12.4.r12.4/compiler.34097967_0
139 |
140 |
141 | # 2. CPU Features
142 |
143 | - `read_csv(string &file_path)`: Reads a formatted file (csv) and returns a matrix with automatically calculated dimensions
144 | - Formatted file writing interface (similar to `pandas.to_csv`)
145 | - Broadcasting mechanism for matrices with padding interface
146 | - Fully connected layer forward and backward propagation interfaces with automatic differentiation support
147 | - Matrix differentiation and automatic differentiation interfaces
148 | - Various matrix operations:
149 | - `create(row,cols)`: Creates a matrix with specified dimensions
150 | - `add`, `subtract`, `mul`: Basic matrix operations
151 | - `get_T`: Matrix transposition
152 | - `flatten`: Returns a flattened array
153 | - `matrix_rs`: Matrix structure compression
154 | - `matrix_sum`, `matrix_mean`: Statistical operations
155 | - `iloc`: Matrix slicing
156 | - And many more
157 | - Activation functions (Relu)
158 | - Loss functions (MSE)
159 | - Neural network components (convolutional layers, pooling layers)
160 | ### 2.1 CPU_autodiff_Installation
161 |
162 | ```shell
163 | git clone git@github.com:AllenZYJ/Edge-Computing-Engine.git
164 | cd Edge-Computing-Engine/install_diff && make && make install
165 | ```
166 |
167 | Note: If you're not a root user, you may need to add sudo to the make install command.
168 |
169 | ### 2.2 CPU_Usage
170 |
171 | ```shell
172 | root@5353:/backup# .g++ main.cpp -o main -lautodiff
173 | root@5353:/backup# ./main
174 | 0.000000+-0.000378*1.000000
175 | -0.000378+0.000000*1.000000
176 | ....
177 | -0.000378+0.000000*1.000000
178 | 0.000000+-0.000378*1.000000
179 | -0.000378+0.000000*1.000000
180 | -0.000378+0.000000*1.000000
181 | weight_1_grad:
182 | [
183 | -0.000378 -0.000378 -0.000378
184 | -0.000378 -0.000378 -0.000378
185 | -0.000378 -0.000378 -0.000378
186 | ]
187 | neraul end;
188 | ```
189 |
190 | # 3. Contributing
191 |
192 | Contributions are welcome. To contribute:
193 |
194 | 1. Fork the repository
195 | 2. Create a new branch: `git checkout -b my-new-feature`
196 | 3. Make your changes and commit them: `git commit -am 'Add some feature'`
197 | 4. Push your changes: `git push origin my-new-feature`
198 | 5. Submit a pull request
199 |
200 | Please ensure your code adheres to the existing style and passes tests before submitting.
201 |
202 | ## Stargazers over time
203 |
204 | [](https://starchart.cc/AllenZYJ/Edge-Computing-Engine)
205 |
206 | ## Contact
207 |
208 | - Email: zk@likedge.top or edge@ibooker.org.cn
209 | - Website: [Likedge](http://likedge.top/)
210 |
211 | ## License
212 |
213 | Edge-Computing-Engine is released under the Apache 2.0 License. See the [`LICENSE`](https://www.apache.org/licenses/LICENSE-2.0) file for details.
--------------------------------------------------------------------------------
/README_EN.md:
--------------------------------------------------------------------------------
1 |
2 | 
3 |
4 | Edge-Computing-Engine
5 | 一个开源的科学计算引擎
6 |
7 |
8 | Email: zk@likedge.top
9 |
10 | [](https://support.apple.com/downloads/macos)
11 | [](https://ubuntu.com/download/server)
12 | [](https://github.com/AllenZYJ/Edge-Computing-Engine/actions)
13 | [](https://raw.githubusercontent.com/tesseract-ocr/tesseract/master/LICENSE)
14 |
15 |
16 |
17 |
18 |
19 |
20 | ------
21 | Edge-Computing-Engine is an open-source scientific computing engine designed for matrix computation and machine learning tasks. It provides a wide range of matrix operations and neural network building blocks.
22 |
23 | Email: zk@likedge.top
24 |
25 |
26 |
27 | ## Features
28 |
29 | - `read_csv(string &file_path)`: Reads a formatted file (csv) and returns a matrix with automatically calculated dimensions.
30 | - Formatted file writing interface. Similar to `pandas.to_csv`.
31 | - Broadcasting mechanism for matrices with padding interface.
32 | - Fully connected layer forward and backward propagation interfaces with automatic differentiation support.
33 | - Matrix differentiation and automatic differentiation interfaces.
34 | - `save_txt(Matrix mid1,string path = "./",string delimiter = ",",string header="./")`: Reads the file header and writes formatted files. It supports writing matrix-type data, custom headers, writing file paths, and custom delimiters (default is ", ").
35 | - `create(row,cols)`: Creates a matrix with specified dimensions and initializes all elements to 0.
36 | - `move_ele(int &ele1, int &ele2)`: Changes the value of an element at a specific position.
37 | - `add(Matrix mid1, Matrix mid2, int flag = 1)`: Matrix addition operation with optional bitwise operation acceleration.
38 | - `subtract(Matrix mid1, Matrix mid2)`: Matrix subtraction operation.
39 | - `mul(Matrix mid1, Matrix mid2)`: Matrix multiplication operation.
40 | - `times_mat(int times,Matrix mid1)`: Scalar matrix multiplication.
41 | - `get_T(Matrix mid1)`: Matrix transposition operation.
42 | - `mul(matrix1,matrix2)`: Matrix product (complete mathematical definition).
43 | - `flatten(Matrix mid1)`: Returns a flattened array.
44 | - `matrix_rs(Matrix mid1,int rs_row,int rs_col)`: Matrix structure compression.
45 | - `matrix_sum(Matrix mid1)`: Matrix summation.
46 | - `matrix_mean(Matrix mid1)`: Matrix mean.
47 | - `apply(Matrix mid1,Matrix mid2,int axis = 0)`: Matrix concatenation.
48 | - `iloc(Matrix mid1,int start_x=0,int end_x=0,int start_y=0,int end_y=0)`: Matrix slicing.
49 | - `mul_simple(Matrix mid1,Matrix mid2)`: Element-wise matrix multiplication for machine learning applications.
50 | - `Relu`: Activation function matrix interface.
51 | - `MSE`: Mean squared error matrix interface.
52 | - Random weight matrix creation interface.
53 | - Convolutional neural network definition (including but not limited to convolution kernel, pooling layer definition, and custom loss interface).
54 |
55 | ## Requirements
56 |
57 | - C++11 or above.
58 |
59 | ## Installation
60 |
61 | - Clone the repository: `git clone git@github.com:AllenZYJ/Edge-Computing-Engine.git`
62 | - Build the project: `cd Edge-Computing-Engine/install_diff && make && make install`
63 |
64 | ## Usage
65 |
66 |
67 | ```shell
68 | g++ main.cpp -o main -lautodiff
69 | ```
70 |
71 | ```shell
72 | ./main
73 | ```
74 |
75 | ## Contributing
76 |
77 | Contributions to Edge-Computing-Engine are welcome. To contribute, please follow these steps:
78 |
79 | - Fork the repository.
80 | - Create a new branch for your feature or bug fix: `git checkout -b my-new-feature`
81 | - Make your changes and commit them: `git commit -am 'Add some feature'`
82 | - Push your changes to the branch: `git push origin my-new-feature`
83 | - Submit a pull request.
84 |
85 | Please ensure your code adheres to the existing style and passes the existing tests before submitting a pull request.
86 |
87 | ## License
88 |
89 | email:zk@likedge.top | edge@ibooker.org.cn
90 |
91 | The author's personal website is [Likedge](http://likedge.top/), and the author's email is zk@likedge.top.
92 |
93 | Edge-Computing-Engine is released under the Apache2.0. See the `LICENSE` file for details.
94 |
95 |
96 | 
97 |
98 |
--------------------------------------------------------------------------------
/autodiff/dor.h:
--------------------------------------------------------------------------------
1 | #ifndef DYADIC_OPERATION_RESULT
2 | #define DYADIC_OPERATION_RESULT
3 |
4 | struct DyadicOperationResult {
5 | double value;
6 | double left_grad;
7 | double right_grad;
8 |
9 | DyadicOperationResult(double value, double left_grad, double right_grad){
10 | this->value = value;
11 | this->left_grad = left_grad;
12 | this->right_grad = right_grad;
13 | }
14 | };
15 |
16 | #endif /* end of include guard: DYADIC_OPERATION_RESULT */
17 |
--------------------------------------------------------------------------------
/autodiff/graph.h:
--------------------------------------------------------------------------------
1 | #ifndef GRAPH_H
2 | #define GRAPH_H
3 |
4 | #include