├── .gitattributes ├── .vscode └── settings.json ├── README.md ├── credit-analysis-application_leo ├── Leo.toml ├── README.md ├── inputs │ └── project.in ├── outputs │ └── project.out └── src │ └── main.leo ├── credit-analysis-application_python ├── 01_train_neural_network.py ├── 02_test_neural_network.py ├── 03_leo_circuit_generator.py └── 04_extract_inputs.py ├── neuralnetwork-initial ├── .gitignore ├── Leo.toml ├── README.md ├── inputs │ ├── nn-med-1.in │ └── nn-med-1.state └── src │ └── main.leo └── python-neuralnetwork-generator ├── leo_neural_network_generator.py ├── main.leo └── project.in /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # leo-neural-networks 2 | 3 | Please check out the corresponding Medium article to this repository. 4 | 5 | ## Initial neural network code 6 | 7 | Please modify this code to run the inference for a multilayer perceptron neural network in the zk-SNARK circuit language Leo. 8 | 9 | ## Python neural network generator 10 | 11 | Please use this Python 3 code to generate neural network architectures of arbitraty size for multilayer perceptron neural networks in the zk-SNARK circuit language Leo. 12 | 13 | ## credit_analysis_application folders 14 | 15 | Please use these folders in combination with the German credit machine learning dataset. Use the Python folder first to generate the Leo program and inputs, and then run these in Leo. -------------------------------------------------------------------------------- /credit-analysis-application_leo/Leo.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "project" 3 | version = "0.1.0" 4 | description = "The project package" 5 | license = "MIT" 6 | 7 | [remote] 8 | author = "[AUTHOR]" # Add your Aleo Package Manager username or team name. 9 | -------------------------------------------------------------------------------- /credit-analysis-application_leo/README.md: -------------------------------------------------------------------------------- 1 | # project 2 | 3 | ## Build Guide 4 | 5 | To compile this Leo program, run: 6 | ```bash 7 | leo build 8 | ``` 9 | 10 | To test this Leo program, run: 11 | ```bash 12 | leo test 13 | ``` 14 | 15 | ## Development 16 | 17 | To output the number of constraints, run: 18 | ```bash 19 | leo build -d 20 | ``` 21 | -------------------------------------------------------------------------------- /credit-analysis-application_leo/inputs/project.in: -------------------------------------------------------------------------------- 1 | [main] 2 | w100: i16 = -9; 3 | w110: i16 = 17; 4 | w120: i16 = -37; 5 | w130: i16 = 19; 6 | w140: i16 = -2; 7 | w150: i16 = -22; 8 | w160: i16 = -7; 9 | w170: i16 = 4; 10 | w180: i16 = 15; 11 | w190: i16 = 16; 12 | w1100: i16 = 8; 13 | w1110: i16 = 13; 14 | w1120: i16 = -5; 15 | w1130: i16 = -13; 16 | w1140: i16 = 21; 17 | w1150: i16 = -23; 18 | w1160: i16 = 9; 19 | w1170: i16 = -16; 20 | w1180: i16 = 5; 21 | w1190: i16 = -12; 22 | w101: i16 = 22; 23 | w111: i16 = -25; 24 | w121: i16 = 13; 25 | w131: i16 = 17; 26 | w141: i16 = 13; 27 | w151: i16 = 13; 28 | w161: i16 = -7; 29 | w171: i16 = -25; 30 | w181: i16 = -22; 31 | w191: i16 = -22; 32 | w1101: i16 = 11; 33 | w1111: i16 = 5; 34 | w1121: i16 = 4; 35 | w1131: i16 = -33; 36 | w1141: i16 = 16; 37 | w1151: i16 = -41; 38 | w1161: i16 = -11; 39 | w1171: i16 = 1; 40 | w1181: i16 = 2; 41 | w1191: i16 = -12; 42 | w102: i16 = 7; 43 | w112: i16 = -23; 44 | w122: i16 = -22; 45 | w132: i16 = -30; 46 | w142: i16 = 13; 47 | w152: i16 = 2; 48 | w162: i16 = 12; 49 | w172: i16 = 4; 50 | w182: i16 = -24; 51 | w192: i16 = 12; 52 | w1102: i16 = 12; 53 | w1112: i16 = -5; 54 | w1122: i16 = -12; 55 | w1132: i16 = 18; 56 | w1142: i16 = 0; 57 | w1152: i16 = -20; 58 | w1162: i16 = 26; 59 | w1172: i16 = -15; 60 | w1182: i16 = 1; 61 | w1192: i16 = -13; 62 | w103: i16 = 16; 63 | w113: i16 = -12; 64 | w123: i16 = 30; 65 | w133: i16 = -11; 66 | w143: i16 = 15; 67 | w153: i16 = -17; 68 | w163: i16 = 14; 69 | w173: i16 = 17; 70 | w183: i16 = -33; 71 | w193: i16 = 19; 72 | w1103: i16 = 9; 73 | w1113: i16 = -2; 74 | w1123: i16 = 2; 75 | w1133: i16 = 27; 76 | w1143: i16 = 9; 77 | w1153: i16 = -3; 78 | w1163: i16 = 51; 79 | w1173: i16 = -38; 80 | w1183: i16 = -21; 81 | w1193: i16 = -4; 82 | w104: i16 = 50; 83 | w114: i16 = -6; 84 | w124: i16 = 4; 85 | w134: i16 = -6; 86 | w144: i16 = 27; 87 | w154: i16 = 8; 88 | w164: i16 = 7; 89 | w174: i16 = 15; 90 | w184: i16 = 1; 91 | w194: i16 = -26; 92 | w1104: i16 = 3; 93 | w1114: i16 = -35; 94 | w1124: i16 = -17; 95 | w1134: i16 = -10; 96 | w1144: i16 = 15; 97 | w1154: i16 = -2; 98 | w1164: i16 = 21; 99 | w1174: i16 = 1; 100 | w1184: i16 = 4; 101 | w1194: i16 = -26; 102 | w105: i16 = 60; 103 | w115: i16 = -17; 104 | w125: i16 = 30; 105 | w135: i16 = -14; 106 | w145: i16 = 8; 107 | w155: i16 = 32; 108 | w165: i16 = 10; 109 | w175: i16 = 21; 110 | w185: i16 = -13; 111 | w195: i16 = -3; 112 | w1105: i16 = 37; 113 | w1115: i16 = 4; 114 | w1125: i16 = 1; 115 | w1135: i16 = 35; 116 | w1145: i16 = 26; 117 | w1155: i16 = -20; 118 | w1165: i16 = 15; 119 | w1175: i16 = 14; 120 | w1185: i16 = -33; 121 | w1195: i16 = 10; 122 | w106: i16 = 10; 123 | w116: i16 = -9; 124 | w126: i16 = 33; 125 | w136: i16 = -6; 126 | w146: i16 = -16; 127 | w156: i16 = 29; 128 | w166: i16 = 21; 129 | w176: i16 = -21; 130 | w186: i16 = 28; 131 | w196: i16 = 8; 132 | w1106: i16 = 4; 133 | w1116: i16 = 13; 134 | w1126: i16 = -11; 135 | w1136: i16 = -19; 136 | w1146: i16 = 6; 137 | w1156: i16 = -25; 138 | w1166: i16 = -8; 139 | w1176: i16 = 27; 140 | w1186: i16 = 7; 141 | w1196: i16 = 8; 142 | w107: i16 = -26; 143 | w117: i16 = 50; 144 | w127: i16 = 6; 145 | w137: i16 = 24; 146 | w147: i16 = -37; 147 | w157: i16 = -4; 148 | w167: i16 = -1; 149 | w177: i16 = 33; 150 | w187: i16 = 13; 151 | w197: i16 = -16; 152 | w1107: i16 = -1; 153 | w1117: i16 = 18; 154 | w1127: i16 = -9; 155 | w1137: i16 = 4; 156 | w1147: i16 = -11; 157 | w1157: i16 = 0; 158 | w1167: i16 = 3; 159 | w1177: i16 = -9; 160 | w1187: i16 = -14; 161 | w1197: i16 = 32; 162 | w108: i16 = 2; 163 | w118: i16 = -3; 164 | w128: i16 = -15; 165 | w138: i16 = -23; 166 | w148: i16 = -24; 167 | w158: i16 = -19; 168 | w168: i16 = 25; 169 | w178: i16 = -3; 170 | w188: i16 = 13; 171 | w198: i16 = 0; 172 | w1108: i16 = -10; 173 | w1118: i16 = -7; 174 | w1128: i16 = -1; 175 | w1138: i16 = 13; 176 | w1148: i16 = -27; 177 | w1158: i16 = -10; 178 | w1168: i16 = 4; 179 | w1178: i16 = -6; 180 | w1188: i16 = -20; 181 | w1198: i16 = -1; 182 | w109: i16 = 12; 183 | w119: i16 = 17; 184 | w129: i16 = 27; 185 | w139: i16 = -24; 186 | w149: i16 = 21; 187 | w159: i16 = -6; 188 | w169: i16 = 24; 189 | w179: i16 = -27; 190 | w189: i16 = 11; 191 | w199: i16 = 10; 192 | w1109: i16 = -21; 193 | w1119: i16 = 19; 194 | w1129: i16 = -26; 195 | w1139: i16 = -24; 196 | w1149: i16 = -1; 197 | w1159: i16 = 0; 198 | w1169: i16 = 26; 199 | w1179: i16 = 7; 200 | w1189: i16 = -13; 201 | w1199: i16 = -3; 202 | b10: i16 = -6; 203 | b11: i16 = 24; 204 | b12: i16 = 2; 205 | b13: i16 = 60; 206 | b14: i16 = 35; 207 | b15: i16 = 35; 208 | b16: i16 = -8; 209 | b17: i16 = -25; 210 | b18: i16 = 8; 211 | b19: i16 = 0; 212 | w200: i16 = -33; 213 | w210: i16 = 23; 214 | w220: i16 = 5; 215 | w230: i16 = 77; 216 | w240: i16 = 0; 217 | w250: i16 = 71; 218 | w260: i16 = -7; 219 | w270: i16 = -29; 220 | w280: i16 = 24; 221 | w290: i16 = -7; 222 | w201: i16 = -1; 223 | w211: i16 = -54; 224 | w221: i16 = -42; 225 | w231: i16 = -50; 226 | w241: i16 = -73; 227 | w251: i16 = -64; 228 | w261: i16 = -46; 229 | w271: i16 = 62; 230 | w281: i16 = 32; 231 | w291: i16 = -31; 232 | b20: i16 = -16; 233 | b21: i16 = 17; 234 | 235 | input0: i16 = -159; 236 | input1: i16 = 161; 237 | input2: i16 = -64; 238 | input3: i16 = 34; 239 | input4: i16 = -89; 240 | input5: i16 = -256; 241 | input6: i16 = 53; 242 | input7: i16 = 16; 243 | input8: i16 = -42; 244 | input9: i16 = -65; 245 | input10: i16 = 58; 246 | input11: i16 = -91; 247 | input12: i16 = -54; 248 | input13: i16 = 153; 249 | input14: i16 = -24; 250 | input15: i16 = -68; 251 | input16: i16 = -44; 252 | input17: i16 = 41; 253 | input18: i16 = -26; 254 | input19: i16 = -59; 255 | 256 | [registers] 257 | r0: [i16; 1] = [0]; -------------------------------------------------------------------------------- /credit-analysis-application_leo/outputs/project.out: -------------------------------------------------------------------------------- 1 | [registers] 2 | r0: [i16; 1] = "64"; 3 | -------------------------------------------------------------------------------- /credit-analysis-application_leo/src/main.leo: -------------------------------------------------------------------------------- 1 | function main(w100: i16, w101: i16, w102: i16, w103: i16, w104: i16, w105: i16, w106: i16, w107: i16, w108: i16, w109: i16, b10: i16, w110: i16, w111: i16, w112: i16, w113: i16, w114: i16, w115: i16, w116: i16, w117: i16, w118: i16, w119: i16, b11: i16, w120: i16, w121: i16, w122: i16, w123: i16, w124: i16, w125: i16, w126: i16, w127: i16, w128: i16, w129: i16, b12: i16, w130: i16, w131: i16, w132: i16, w133: i16, w134: i16, w135: i16, w136: i16, w137: i16, w138: i16, w139: i16, b13: i16, w140: i16, w141: i16, w142: i16, w143: i16, w144: i16, w145: i16, w146: i16, w147: i16, w148: i16, w149: i16, b14: i16, w150: i16, w151: i16, w152: i16, w153: i16, w154: i16, w155: i16, w156: i16, w157: i16, w158: i16, w159: i16, b15: i16, w160: i16, w161: i16, w162: i16, w163: i16, w164: i16, w165: i16, w166: i16, w167: i16, w168: i16, w169: i16, b16: i16, w170: i16, w171: i16, w172: i16, w173: i16, w174: i16, w175: i16, w176: i16, w177: i16, w178: i16, w179: i16, b17: i16, w180: i16, w181: i16, w182: i16, w183: i16, w184: i16, w185: i16, w186: i16, w187: i16, w188: i16, w189: i16, b18: i16, w190: i16, w191: i16, w192: i16, w193: i16, w194: i16, w195: i16, w196: i16, w197: i16, w198: i16, w199: i16, b19: i16, w1100: i16, w1101: i16, w1102: i16, w1103: i16, w1104: i16, w1105: i16, w1106: i16, w1107: i16, w1108: i16, w1109: i16, w1110: i16, w1111: i16, w1112: i16, w1113: i16, w1114: i16, w1115: i16, w1116: i16, w1117: i16, w1118: i16, w1119: i16, w1120: i16, w1121: i16, w1122: i16, w1123: i16, w1124: i16, w1125: i16, w1126: i16, w1127: i16, w1128: i16, w1129: i16, w1130: i16, w1131: i16, w1132: i16, w1133: i16, w1134: i16, w1135: i16, w1136: i16, w1137: i16, w1138: i16, w1139: i16, w1140: i16, w1141: i16, w1142: i16, w1143: i16, w1144: i16, w1145: i16, w1146: i16, w1147: i16, w1148: i16, w1149: i16, w1150: i16, w1151: i16, w1152: i16, w1153: i16, w1154: i16, w1155: i16, w1156: i16, w1157: i16, w1158: i16, w1159: i16, w1160: i16, w1161: i16, w1162: i16, w1163: i16, w1164: i16, w1165: i16, w1166: i16, w1167: i16, w1168: i16, w1169: i16, w1170: i16, w1171: i16, w1172: i16, w1173: i16, w1174: i16, w1175: i16, w1176: i16, w1177: i16, w1178: i16, w1179: i16, w1180: i16, w1181: i16, w1182: i16, w1183: i16, w1184: i16, w1185: i16, w1186: i16, w1187: i16, w1188: i16, w1189: i16, w1190: i16, w1191: i16, w1192: i16, w1193: i16, w1194: i16, w1195: i16, w1196: i16, w1197: i16, w1198: i16, w1199: i16, w200: i16, w201: i16, b20: i16, w210: i16, w211: i16, b21: i16, w220: i16, w221: i16, w230: i16, w231: i16, w240: i16, w241: i16, w250: i16, w251: i16, w260: i16, w261: i16, w270: i16, w271: i16, w280: i16, w281: i16, w290: i16, w291: i16, input0: i16, input1: i16, input2: i16, input3: i16, input4: i16, input5: i16, input6: i16, input7: i16, input8: i16, input9: i16, input10: i16, input11: i16, input12: i16, input13: i16, input14: i16, input15: i16, input16: i16, input17: i16, input18: i16, input19: i16) -> [i16; 1] { 2 | let neuron00: i16 = input0; 3 | let neuron01: i16 = input1; 4 | let neuron02: i16 = input2; 5 | let neuron03: i16 = input3; 6 | let neuron04: i16 = input4; 7 | let neuron05: i16 = input5; 8 | let neuron06: i16 = input6; 9 | let neuron07: i16 = input7; 10 | let neuron08: i16 = input8; 11 | let neuron09: i16 = input9; 12 | let neuron010: i16 = input10; 13 | let neuron011: i16 = input11; 14 | let neuron012: i16 = input12; 15 | let neuron013: i16 = input13; 16 | let neuron014: i16 = input14; 17 | let neuron015: i16 = input15; 18 | let neuron016: i16 = input16; 19 | let neuron017: i16 = input17; 20 | let neuron018: i16 = input18; 21 | let neuron019: i16 = input19; 22 | 23 | let neuron10: i16 = rectified_linear_activation(neuron00 * w100 / 128 + neuron01 * w110 / 128 + neuron02 * w120 / 128 + neuron03 * w130 / 128 + neuron04 * w140 / 128 + neuron05 * w150 / 128 + neuron06 * w160 / 128 + neuron07 * w170 / 128 + neuron08 * w180 / 128 + neuron09 * w190 / 128 + neuron010 * w1100 / 128 + neuron011 * w1110 / 128 + neuron012 * w1120 / 128 + neuron013 * w1130 / 128 + neuron014 * w1140 / 128 + neuron015 * w1150 / 128 + neuron016 * w1160 / 128 + neuron017 * w1170 / 128 + neuron018 * w1180 / 128 + neuron019 * w1190 / 128 + b10); 24 | let neuron11: i16 = rectified_linear_activation(neuron00 * w101 / 128 + neuron01 * w111 / 128 + neuron02 * w121 / 128 + neuron03 * w131 / 128 + neuron04 * w141 / 128 + neuron05 * w151 / 128 + neuron06 * w161 / 128 + neuron07 * w171 / 128 + neuron08 * w181 / 128 + neuron09 * w191 / 128 + neuron010 * w1101 / 128 + neuron011 * w1111 / 128 + neuron012 * w1121 / 128 + neuron013 * w1131 / 128 + neuron014 * w1141 / 128 + neuron015 * w1151 / 128 + neuron016 * w1161 / 128 + neuron017 * w1171 / 128 + neuron018 * w1181 / 128 + neuron019 * w1191 / 128 + b11); 25 | let neuron12: i16 = rectified_linear_activation(neuron00 * w102 / 128 + neuron01 * w112 / 128 + neuron02 * w122 / 128 + neuron03 * w132 / 128 + neuron04 * w142 / 128 + neuron05 * w152 / 128 + neuron06 * w162 / 128 + neuron07 * w172 / 128 + neuron08 * w182 / 128 + neuron09 * w192 / 128 + neuron010 * w1102 / 128 + neuron011 * w1112 / 128 + neuron012 * w1122 / 128 + neuron013 * w1132 / 128 + neuron014 * w1142 / 128 + neuron015 * w1152 / 128 + neuron016 * w1162 / 128 + neuron017 * w1172 / 128 + neuron018 * w1182 / 128 + neuron019 * w1192 / 128 + b12); 26 | let neuron13: i16 = rectified_linear_activation(neuron00 * w103 / 128 + neuron01 * w113 / 128 + neuron02 * w123 / 128 + neuron03 * w133 / 128 + neuron04 * w143 / 128 + neuron05 * w153 / 128 + neuron06 * w163 / 128 + neuron07 * w173 / 128 + neuron08 * w183 / 128 + neuron09 * w193 / 128 + neuron010 * w1103 / 128 + neuron011 * w1113 / 128 + neuron012 * w1123 / 128 + neuron013 * w1133 / 128 + neuron014 * w1143 / 128 + neuron015 * w1153 / 128 + neuron016 * w1163 / 128 + neuron017 * w1173 / 128 + neuron018 * w1183 / 128 + neuron019 * w1193 / 128 + b13); 27 | let neuron14: i16 = rectified_linear_activation(neuron00 * w104 / 128 + neuron01 * w114 / 128 + neuron02 * w124 / 128 + neuron03 * w134 / 128 + neuron04 * w144 / 128 + neuron05 * w154 / 128 + neuron06 * w164 / 128 + neuron07 * w174 / 128 + neuron08 * w184 / 128 + neuron09 * w194 / 128 + neuron010 * w1104 / 128 + neuron011 * w1114 / 128 + neuron012 * w1124 / 128 + neuron013 * w1134 / 128 + neuron014 * w1144 / 128 + neuron015 * w1154 / 128 + neuron016 * w1164 / 128 + neuron017 * w1174 / 128 + neuron018 * w1184 / 128 + neuron019 * w1194 / 128 + b14); 28 | let neuron15: i16 = rectified_linear_activation(neuron00 * w105 / 128 + neuron01 * w115 / 128 + neuron02 * w125 / 128 + neuron03 * w135 / 128 + neuron04 * w145 / 128 + neuron05 * w155 / 128 + neuron06 * w165 / 128 + neuron07 * w175 / 128 + neuron08 * w185 / 128 + neuron09 * w195 / 128 + neuron010 * w1105 / 128 + neuron011 * w1115 / 128 + neuron012 * w1125 / 128 + neuron013 * w1135 / 128 + neuron014 * w1145 / 128 + neuron015 * w1155 / 128 + neuron016 * w1165 / 128 + neuron017 * w1175 / 128 + neuron018 * w1185 / 128 + neuron019 * w1195 / 128 + b15); 29 | let neuron16: i16 = rectified_linear_activation(neuron00 * w106 / 128 + neuron01 * w116 / 128 + neuron02 * w126 / 128 + neuron03 * w136 / 128 + neuron04 * w146 / 128 + neuron05 * w156 / 128 + neuron06 * w166 / 128 + neuron07 * w176 / 128 + neuron08 * w186 / 128 + neuron09 * w196 / 128 + neuron010 * w1106 / 128 + neuron011 * w1116 / 128 + neuron012 * w1126 / 128 + neuron013 * w1136 / 128 + neuron014 * w1146 / 128 + neuron015 * w1156 / 128 + neuron016 * w1166 / 128 + neuron017 * w1176 / 128 + neuron018 * w1186 / 128 + neuron019 * w1196 / 128 + b16); 30 | let neuron17: i16 = rectified_linear_activation(neuron00 * w107 / 128 + neuron01 * w117 / 128 + neuron02 * w127 / 128 + neuron03 * w137 / 128 + neuron04 * w147 / 128 + neuron05 * w157 / 128 + neuron06 * w167 / 128 + neuron07 * w177 / 128 + neuron08 * w187 / 128 + neuron09 * w197 / 128 + neuron010 * w1107 / 128 + neuron011 * w1117 / 128 + neuron012 * w1127 / 128 + neuron013 * w1137 / 128 + neuron014 * w1147 / 128 + neuron015 * w1157 / 128 + neuron016 * w1167 / 128 + neuron017 * w1177 / 128 + neuron018 * w1187 / 128 + neuron019 * w1197 / 128 + b17); 31 | let neuron18: i16 = rectified_linear_activation(neuron00 * w108 / 128 + neuron01 * w118 / 128 + neuron02 * w128 / 128 + neuron03 * w138 / 128 + neuron04 * w148 / 128 + neuron05 * w158 / 128 + neuron06 * w168 / 128 + neuron07 * w178 / 128 + neuron08 * w188 / 128 + neuron09 * w198 / 128 + neuron010 * w1108 / 128 + neuron011 * w1118 / 128 + neuron012 * w1128 / 128 + neuron013 * w1138 / 128 + neuron014 * w1148 / 128 + neuron015 * w1158 / 128 + neuron016 * w1168 / 128 + neuron017 * w1178 / 128 + neuron018 * w1188 / 128 + neuron019 * w1198 / 128 + b18); 32 | let neuron19: i16 = rectified_linear_activation(neuron00 * w109 / 128 + neuron01 * w119 / 128 + neuron02 * w129 / 128 + neuron03 * w139 / 128 + neuron04 * w149 / 128 + neuron05 * w159 / 128 + neuron06 * w169 / 128 + neuron07 * w179 / 128 + neuron08 * w189 / 128 + neuron09 * w199 / 128 + neuron010 * w1109 / 128 + neuron011 * w1119 / 128 + neuron012 * w1129 / 128 + neuron013 * w1139 / 128 + neuron014 * w1149 / 128 + neuron015 * w1159 / 128 + neuron016 * w1169 / 128 + neuron017 * w1179 / 128 + neuron018 * w1189 / 128 + neuron019 * w1199 / 128 + b19); 33 | let neuron20: i16 = (neuron10 * w200 / 128 + neuron11 * w210 / 128 + neuron12 * w220 / 128 + neuron13 * w230 / 128 + neuron14 * w240 / 128 + neuron15 * w250 / 128 + neuron16 * w260 / 128 + neuron17 * w270 / 128 + neuron18 * w280 / 128 + neuron19 * w290 / 128 + b20); 34 | let neuron21: i16 = (neuron10 * w201 / 128 + neuron11 * w211 / 128 + neuron12 * w221 / 128 + neuron13 * w231 / 128 + neuron14 * w241 / 128 + neuron15 * w251 / 128 + neuron16 * w261 / 128 + neuron17 * w271 / 128 + neuron18 * w281 / 128 + neuron19 * w291 / 128 + b21); 35 | return [neuron21];} 36 | 37 | function rectified_linear_activation(x: i16) -> i16 { 38 | let result: i16 = 0; 39 | if x > 0 { 40 | result = x; 41 | } 42 | return result; 43 | } -------------------------------------------------------------------------------- /credit-analysis-application_python/01_train_neural_network.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.utils.data import DataLoader, TensorDataset 4 | import pandas as pd 5 | from sklearn.model_selection import train_test_split 6 | import pickle 7 | 8 | # load the german.data-numeric data set 9 | data = pd.read_csv('german.data-numeric', delim_whitespace=True, header=None, on_bad_lines='skip') 10 | 11 | # define the neural network 12 | class MLP(nn.Module): 13 | def __init__(self, input_size, hidden_size, output_size): 14 | super(MLP, self).__init__() 15 | self.fc1 = nn.Linear(input_size, hidden_size) 16 | self.fc2 = nn.Linear(hidden_size, output_size) 17 | 18 | def forward(self, x): 19 | x = torch.relu(self.fc1(x)) 20 | x = self.fc2(x) 21 | return x 22 | 23 | X = data.iloc[:, 0:20]#df.iloc[:, :-1]#df.iloc[:, 0:6]#df.iloc[:, :-1] 24 | y = data.iloc[:, -1] - 1 25 | 26 | # split training and testing data 27 | x_train, _, y_train, _ = train_test_split(X, y, test_size=0.2, random_state=0) 28 | 29 | # normalize the data 30 | x_train_mean = x_train.mean() 31 | x_train_std = x_train.std() 32 | 33 | x_train = (x_train - x_train_mean) / x_train_std 34 | 35 | # convert pandas dataframes to tensors 36 | x_train = torch.tensor(x_train.values, dtype=torch.float32) 37 | y_train = torch.tensor(y_train.values, dtype=torch.long) 38 | 39 | # combine the data into a dataset and dataloader 40 | dataset = TensorDataset(x_train, y_train) 41 | 42 | train_loader = DataLoader(dataset, batch_size=32, shuffle=True) 43 | 44 | model = MLP(20, 10, 2) 45 | 46 | # define the loss function and optimizer 47 | criterion = nn.CrossEntropyLoss() 48 | optimizer = torch.optim.SGD(model.parameters(), lr=0.01) 49 | 50 | # train the model 51 | for epoch in range(100): 52 | for inputs, labels in train_loader: 53 | optimizer.zero_grad() 54 | outputs = model(inputs) 55 | loss = criterion(outputs, labels) 56 | loss.backward() 57 | optimizer.step() 58 | 59 | # save model 60 | torch.save(model.state_dict(), 'model.pt') 61 | 62 | # save the mean and standard deviation using pickle 63 | with open('mean_std.pkl', 'wb') as f: 64 | pickle.dump((x_train_mean, x_train_std), f) -------------------------------------------------------------------------------- /credit-analysis-application_python/02_test_neural_network.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.utils.data import DataLoader, TensorDataset 4 | import pandas as pd 5 | from sklearn.model_selection import train_test_split 6 | import pickle 7 | 8 | # load the data set 9 | data = pd.read_csv('german.data-numeric', delim_whitespace=True, header=None, on_bad_lines='skip') 10 | 11 | # define the neural network 12 | class MLP(nn.Module): 13 | def __init__(self, input_size, hidden_size, output_size): 14 | super(MLP, self).__init__() 15 | self.fc1 = nn.Linear(input_size, hidden_size) 16 | self.fc2 = nn.Linear(hidden_size, output_size) 17 | 18 | def forward(self, x): 19 | x = torch.relu(self.fc1(x)) 20 | x = self.fc2(x) 21 | return x 22 | 23 | X = data.iloc[:, 0:20]#df.iloc[:, :-1]#df.iloc[:, 0:6]#df.iloc[:, :-1] 24 | y = data.iloc[:, -1] - 1 25 | 26 | # split training and testing data 27 | _, x_test, _, y_test = train_test_split(X, y, test_size=0.2, random_state=0) 28 | 29 | # open the pickle file to load the train mean and std 30 | with open('mean_std.pkl', 'rb') as f: 31 | [x_train_mean, x_train_std] = pickle.load(f) 32 | 33 | x_test = (x_test - x_train_mean) / x_train_std 34 | 35 | # load the model 36 | model = MLP(20, 10, 2) 37 | model.load_state_dict(torch.load('model.pt')) 38 | 39 | # test the model 40 | 41 | x_test = torch.tensor(x_test.values, dtype=torch.float) 42 | y_test = torch.tensor(y_test.values, dtype=torch.float) 43 | test_data = TensorDataset(x_test, y_test) 44 | test_loader = DataLoader(test_data, batch_size=32, shuffle=False) 45 | 46 | from sklearn.metrics import roc_auc_score 47 | with torch.no_grad(): 48 | running_predicted_tensor = torch.tensor([]) 49 | for inputs, labels in test_loader: 50 | outputs = model(inputs) 51 | _, predicted = torch.max(outputs.data, 1) 52 | running_predicted_tensor = torch.cat((running_predicted_tensor, predicted), 0) 53 | auc = roc_auc_score(y_test, running_predicted_tensor) 54 | print('AUC: {}'.format(auc)) -------------------------------------------------------------------------------- /credit-analysis-application_python/03_leo_circuit_generator.py: -------------------------------------------------------------------------------- 1 | neurons_per_layer = [20, 10, 2] # specifies NN architecture 2 | scaling_factor = 4 # specifies scaling factor for fixed point numbers 3 | integer_type = "i16" # specifies used integer type 4 | 5 | if(len(neurons_per_layer) < 2 or min(neurons_per_layer) < 1): 6 | print("error, invalid input") 7 | 8 | str_list_main = [] 9 | str_list_inputs = [] 10 | 11 | str_main="function main(" 12 | 13 | str_inputs = "" 14 | 15 | str_list_inputs.append("[main]\n") 16 | 17 | for i in range(neurons_per_layer[0]): 18 | #str_main += "w0" + str(i)+": " + integer_type + ", b0" + str(i) + ": " + integer_type + ", " 19 | #str_inputs += "w0" + str(i) + ": " + integer_type + " = 0;\n" 20 | #str_inputs += "b0" + str(i) + ": " + integer_type + " = 0;\n" 21 | pass 22 | 23 | str_list_inputs.append(str_inputs) 24 | str_inputs = "" 25 | 26 | for i in range(1, len(neurons_per_layer)): # current layer 27 | for j in range(neurons_per_layer[i-1]): # neuron of previous layer 28 | for k in range(neurons_per_layer[i]): # neuron of current layer 29 | str_main += "w" + str(i) + str(j) + str(k) + ": " + integer_type + ", " 30 | str_inputs += "w" + str(i) + str(j) + str(k) + ": " + integer_type + " = 0;\n" 31 | str_main += "b" + str(i) + str(j) + ": " + integer_type + ", " 32 | str_inputs += "b" + str(i) + str(j) + ": " + integer_type + " = 0;\n" 33 | 34 | for i in range(neurons_per_layer[0]): 35 | str_main += "input"+str(i)+": " + integer_type + ", " 36 | str_inputs += "input"+str(i)+": " + integer_type + " = 0;\n" 37 | 38 | str_main = str_main[:-2] 39 | str_list_inputs.append(str_inputs) 40 | 41 | str_inputs = "[registers]\n" 42 | 43 | str_main += ") -> [" + integer_type + "; " + str(neurons_per_layer[-1]) + "] {\n" 44 | 45 | str_list_main.append(str_main) 46 | 47 | line = "" 48 | 49 | for i in range(neurons_per_layer[0]): # input layer 50 | line += "let neuron0"+str(i) + ": " + integer_type + " = input" + str(i) + " / " + str(2**scaling_factor) + ";\n" 51 | 52 | for layer in range(1, len(neurons_per_layer)): # other layers 53 | for i in range(neurons_per_layer[layer]): 54 | line_start = "let neuron" + str(layer) + str(i) + ": " + integer_type + " = rectified_linear_activation(" 55 | for j in range(neurons_per_layer[layer-1]): 56 | line_start += "neuron" + str(layer-1) + str(j) + " * w" + str(layer) + str(j) + str(i) + " / " + str(2**scaling_factor) + " + " 57 | 58 | line_start += "b" + str(layer) + str(i) + ");\n" 59 | line += line_start 60 | 61 | str_list_main.append(line) 62 | 63 | line = "return [" 64 | str_inputs += "r0: [" + integer_type + "; " + str(neurons_per_layer[-1]) + "] = [" 65 | for i in range(neurons_per_layer[-1]): 66 | line += "neuron" + str(len(neurons_per_layer)-1) + str(i) + ", " 67 | str_inputs += "0, " 68 | str_inputs = str_inputs[:-2] + "];\n" 69 | 70 | line = line[:-2] 71 | line += "];}\n\n" 72 | str_list_main.append(line) 73 | str_list_inputs.append(str_inputs) 74 | 75 | str_list_main.append("function rectified_linear_activation(x: u32) -> u32 {\n") 76 | str_list_main.append("let result: u32 = 0;\n") 77 | str_list_main.append("if x > 0 {\n") 78 | str_list_main.append("result = x;\n") 79 | str_list_main.append("}\n") 80 | str_list_main.append("return result;\n") 81 | str_list_main.append("}") 82 | 83 | with open("main.leo", "w+") as file: 84 | file.writelines(str_list_main) 85 | 86 | with open("project.in", "w+") as file: 87 | file.writelines(str_list_inputs) -------------------------------------------------------------------------------- /credit-analysis-application_python/04_extract_inputs.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.utils.data import DataLoader, TensorDataset 4 | import pandas as pd 5 | from sklearn.model_selection import train_test_split 6 | import pickle 7 | import math 8 | 9 | # load the data set from the disk - you can download it here: https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data) 10 | data = pd.read_csv('german.data-numeric', delim_whitespace=True, header=None, on_bad_lines='skip') 11 | 12 | # define the neural network 13 | class MLP(nn.Module): 14 | def __init__(self, input_size, hidden_size, output_size): 15 | super(MLP, self).__init__() 16 | self.fc1 = nn.Linear(input_size, hidden_size) 17 | self.fc2 = nn.Linear(hidden_size, output_size) 18 | 19 | def forward(self, x): 20 | x = torch.relu(self.fc1(x)) 21 | x = self.fc2(x) 22 | return x 23 | 24 | X = data.iloc[:, 0:20]#df.iloc[:, :-1]#df.iloc[:, 0:6]#df.iloc[:, :-1] 25 | y = data.iloc[:, -1] - 1 26 | 27 | # split training and testing data 28 | _, x_test, _, y_test = train_test_split(X, y, test_size=0.2, random_state=0) 29 | 30 | # open the pickle file to load the train mean and std 31 | with open('mean_std.pkl', 'rb') as f: 32 | [x_train_mean, x_train_std] = pickle.load(f) 33 | 34 | x_test = (x_test - x_train_mean) / x_train_std 35 | 36 | # load the model 37 | model = MLP(20, 10, 2) 38 | model.load_state_dict(torch.load('model.pt')) 39 | 40 | for key in model.state_dict().keys(): 41 | print(key) 42 | print(model.state_dict()[key]) 43 | 44 | str_list_inputs = [] 45 | 46 | str_inputs = "" 47 | 48 | str_list_inputs.append("[main]\n") 49 | 50 | for i, key in enumerate(model.state_dict().keys()): 51 | if(i==0): 52 | first_layer = model.state_dict()[key] 53 | layer_name = "" 54 | if('weight' in key): 55 | layer_name = "w" 56 | if('bias' in key): 57 | layer_name = "b" 58 | 59 | value = model.state_dict()[key] 60 | for j, val in enumerate(value): 61 | # get dimension of the value 62 | if(len(val.shape) == 1): 63 | for k, val2 in enumerate(val): 64 | val_fixed_point = int(val2 * 2**7) 65 | #variable_line = layer_name + str(math.floor(i/2)+1) + str(k) + str(j) + ": " + "u32 = " + str(val_fixed_point) + ";\n" 66 | variable_line = layer_name + str(math.floor(i/2)+1) + str(k) + str(j) + ": " + "u32 = " + str(0) + ";\n" 67 | str_list_inputs.append(variable_line) 68 | else: 69 | val_fixed_point = int(val * 2**7) 70 | #variable_line = layer_name + str(math.floor(i/2)+1) + str(j) + ": " + "u32 = " + str(val_fixed_point) + ";\n" 71 | variable_line = layer_name + str(math.floor(i/2)+1) + str(j) + ": " + "u32 = " + str(0) + ";\n" 72 | str_list_inputs.append(variable_line) 73 | 74 | str_list_inputs.append("\n") 75 | 76 | # load the data set 77 | data = pd.read_csv('german.data-numeric', delim_whitespace=True, header=None, on_bad_lines='skip') 78 | 79 | X = data.iloc[:, 0:20]#df.iloc[:, :-1]#df.iloc[:, 0:6]#df.iloc[:, :-1] 80 | y = data.iloc[:, -1] - 1 81 | 82 | # split training and testing data 83 | _, x_test, _, y_test = train_test_split(X, y, test_size=0.2, random_state=0) 84 | 85 | # open the pickle file to load the train mean and std 86 | with open('mean_std.pkl', 'rb') as f: 87 | [x_train_mean, x_train_std] = pickle.load(f) 88 | 89 | x_test = (x_test - x_train_mean) / x_train_std 90 | 91 | x_test = torch.tensor(x_test.values, dtype=torch.float) 92 | y_test = torch.tensor(y_test.values, dtype=torch.float) 93 | test_data = TensorDataset(x_test, y_test) 94 | test_loader = DataLoader(test_data, batch_size=32, shuffle=False) 95 | 96 | for i in range(len((first_layer[0]))): 97 | value = test_data[0][0][i] 98 | val_fixed_point = int(value * 2**7) 99 | str_list_inputs.append("input" + str(i) + ": u32 = " + str(val_fixed_point) + ";\n") 100 | 101 | str_list_inputs.append("\n") 102 | str_list_inputs.append("[registers]") 103 | str_list_inputs.append("\n") 104 | str_list_inputs.append("r0: [u32; 2] = [0, 0];") 105 | 106 | with open("project.in", "w+") as file: 107 | file.writelines(str_list_inputs) -------------------------------------------------------------------------------- /neuralnetwork-initial/.gitignore: -------------------------------------------------------------------------------- 1 | outputs/ 2 | -------------------------------------------------------------------------------- /neuralnetwork-initial/Leo.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "neuralnetwork-initial" 3 | version = "0.1.0" 4 | description = "The neuralnetwork-initial package" 5 | license = "MIT" 6 | 7 | [remote] 8 | author = "[AUTHOR]" # Add your Aleo Package Manager username or team name. 9 | -------------------------------------------------------------------------------- /neuralnetwork-initial/README.md: -------------------------------------------------------------------------------- 1 | # neuralnetwork-initial 2 | 3 | ## Build Guide 4 | 5 | To compile this Leo program, run: 6 | ```bash 7 | leo build 8 | ``` 9 | 10 | To test this Leo program, run: 11 | ```bash 12 | leo test 13 | ``` 14 | 15 | ## Development 16 | 17 | To output the number of constraints, run: 18 | ```bash 19 | leo build -d 20 | ``` 21 | -------------------------------------------------------------------------------- /neuralnetwork-initial/inputs/nn-med-1.in: -------------------------------------------------------------------------------- 1 | // The program input for neuralnetwork-initial/src/main.leo 2 | [main] 3 | x0: u32 = 50; 4 | x1: u32 = 100; 5 | w00: u32 = 100; 6 | w01: u32 = 100; 7 | 8 | w100: u32 = 150; 9 | w101: u32 = 50; 10 | w110: u32 = 0; 11 | w111: u32 = 50; 12 | 13 | w20: u32 = 100; 14 | w21: u32 = 100; 15 | 16 | b00: u32 = 0; 17 | b01: u32 = 0; 18 | b10: u32 = 50; 19 | b11: u32 = 150; 20 | 21 | b2: u32 = 25; 22 | [registers] 23 | r0: u32 = 0; -------------------------------------------------------------------------------- /neuralnetwork-initial/inputs/nn-med-1.state: -------------------------------------------------------------------------------- 1 | // The program state for neuralnetwork-initial/src/main.leo 2 | [[public]] 3 | 4 | [state] 5 | leaf_index: u32 = 0; 6 | root: [u8; 32] = [0; 32]; 7 | 8 | [[private]] 9 | 10 | [record] 11 | serial_number: [u8; 64] = [0; 64]; 12 | commitment: [u8; 32] = [0; 32]; 13 | owner: address = aleo1daxej63vwrmn2zhl4dymygagh89k5d2vaw6rjauueme7le6k2q8sjn0ng9; 14 | is_dummy: bool = false; 15 | value: u64 = 0; 16 | payload: [u8; 32] = [0; 32]; 17 | birth_program_id: [u8; 48] = [0; 48]; 18 | death_program_id: [u8; 48] = [0; 48]; 19 | serial_number_nonce: [u8; 32] = [0; 32]; 20 | commitment_randomness: [u8; 32] = [0; 32]; 21 | 22 | [state_leaf] 23 | path: [u8; 128] = [0; 128]; 24 | memo: [u8; 32] = [0; 32]; 25 | network_id: u8 = 0; 26 | leaf_randomness: [u8; 32] = [0; 32]; 27 | -------------------------------------------------------------------------------- /neuralnetwork-initial/src/main.leo: -------------------------------------------------------------------------------- 1 | // The 'neuralnetwork-initial' main function. 2 | function main(x0: u32, x1: u32, w00: u32, w01: u32, w100: u32, w101: u32, w110: u32, w111: u32, w20: u32, w21: u32, b00: u32, b01: u32, b10: u32, b11: u32, b2: u32) -> u32 { 3 | 4 | let multiplication_correction: u32 = 100; // 10 to the power of 2, the number of decimal points 5 | 6 | let neuron00: u32 = w00 * x0 / multiplication_correction + b00; 7 | let neuron01: u32 = w01 * x1 / multiplication_correction + b01; 8 | 9 | let neuron10: u32 = rectified_linear_activation(w100 * neuron00 / multiplication_correction + w110 * neuron01 / multiplication_correction + b10); 10 | let neuron11: u32 = rectified_linear_activation(w101 * neuron00 / multiplication_correction + w111 * neuron01 / multiplication_correction + b11); 11 | 12 | let neuron20: u32 = rectified_linear_activation(w20 * neuron10 / multiplication_correction + w21 * neuron11 / multiplication_correction + b2); 13 | return neuron20;} 14 | 15 | function rectified_linear_activation(x: u32) -> u32 { 16 | 17 | let result: u32 = 0; 18 | 19 | if x > 0 { 20 | result = x; 21 | } 22 | 23 | return result; 24 | } -------------------------------------------------------------------------------- /python-neuralnetwork-generator/leo_neural_network_generator.py: -------------------------------------------------------------------------------- 1 | neurons_per_layer = [2, 2, 1] # specifies NN architecture 2 | scaling_factor = 1 # specifies scaling factor for fixed point numbers 3 | integer_type = "u32" # specifies used integer type 4 | 5 | if(len(neurons_per_layer) < 2 or min(neurons_per_layer) < 1): 6 | print("error, invalid input") 7 | 8 | str_list_main = [] 9 | str_list_inputs = [] 10 | 11 | str_main="function main(" 12 | 13 | str_inputs = "" 14 | 15 | str_list_inputs.append("[main]\n") 16 | 17 | for i in range(neurons_per_layer[0]): 18 | str_main += "w0" + str(i)+": " + integer_type + ", b0" + str(i) + ": " + integer_type + ", " 19 | str_inputs += "w0" + str(i) + ": " + integer_type + " = 0;\n" 20 | str_inputs += "b0" + str(i) + ": " + integer_type + " = 0;\n" 21 | 22 | str_list_inputs.append(str_inputs) 23 | str_inputs = "" 24 | 25 | for i in range(1, len(neurons_per_layer)): # current layer 26 | for j in range(neurons_per_layer[i-1]): # neuron of previous layer 27 | for k in range(neurons_per_layer[i]): # neuron of current layer 28 | str_main += "w" + str(i) + str(j) + str(k) + ": " + integer_type + ", " 29 | str_inputs += "w" + str(i) + str(j) + str(k) + ": " + integer_type + " = 0;\n" 30 | str_main += "b" + str(i) + str(j) + ": " + integer_type + ", " 31 | str_inputs += "b" + str(i) + str(j) + ": " + integer_type + " = 0;\n" 32 | 33 | for i in range(neurons_per_layer[0]): 34 | str_main += "input"+str(i)+": " + integer_type + ", " 35 | str_inputs += "input"+str(i)+": " + integer_type + " = 0;\n" 36 | 37 | str_main = str_main[:-2] 38 | str_list_inputs.append(str_inputs) 39 | 40 | str_inputs = "[registers]\n" 41 | 42 | str_main += ") -> [" + integer_type + "; " + str(neurons_per_layer[-1]) + "] {\n" 43 | 44 | str_list_main.append(str_main) 45 | 46 | line = "" 47 | 48 | for i in range(neurons_per_layer[0]): # input layer 49 | line += "let neuron0"+str(i) + ": " + integer_type + " = w0" + str(i) + " * input" + str(i) + " / " + str(2**scaling_factor) + " + b0" + str(i) + ";\n" 50 | 51 | for layer in range(1, len(neurons_per_layer)): # other layers 52 | for i in range(neurons_per_layer[layer]): 53 | line_start = "let neuron" + str(layer) + str(i) + ": " + integer_type + " = rectified_linear_activation(" 54 | for j in range(neurons_per_layer[layer-1]): 55 | line_start += "neuron" + str(layer-1) + str(j) + " * w" + str(layer) + str(j) + str(i) + " / " + str(2**scaling_factor) + " + " 56 | 57 | line_start += "b" + str(layer) + str(i) + ");\n" 58 | line += line_start 59 | 60 | str_list_main.append(line) 61 | 62 | line = "return [" 63 | str_inputs += "r0: [" + integer_type + "; " + str(neurons_per_layer[-1]) + "] = [" 64 | for i in range(neurons_per_layer[-1]): 65 | line += "neuron" + str(len(neurons_per_layer)-1) + str(i) + ", " 66 | str_inputs += "0, " 67 | str_inputs = str_inputs[:-2] + "];\n" 68 | 69 | line = line[:-2] 70 | line += "];}\n\n" 71 | str_list_main.append(line) 72 | str_list_inputs.append(str_inputs) 73 | 74 | str_list_main.append("function rectified_linear_activation(x: u32) -> u32 {\n") 75 | str_list_main.append("let result: u32 = 0;\n") 76 | str_list_main.append("if x > 0 {\n") 77 | str_list_main.append("result = x;\n") 78 | str_list_main.append("}\n") 79 | str_list_main.append("return result;\n") 80 | str_list_main.append("}") 81 | 82 | with open("main.leo", "w+") as file: 83 | file.writelines(str_list_main) 84 | 85 | with open("project.in", "w+") as file: 86 | file.writelines(str_list_inputs) -------------------------------------------------------------------------------- /python-neuralnetwork-generator/main.leo: -------------------------------------------------------------------------------- 1 | function main(w00: u32, b00: u32, w01: u32, b01: u32, w100: u32, w101: u32, b10: u32, w110: u32, w111: u32, b11: u32, w200: u32, b20: u32, w210: u32, b21: u32, input0: u32, input1: u32) -> [u32; 1] { 2 | let neuron00: u32 = w00 * input0 / 2 + b00; 3 | let neuron01: u32 = w01 * input1 / 2 + b01; 4 | let neuron10: u32 = rectified_linear_activation(neuron00 * w100 / 2 + neuron01 * w110 / 2 + b10); 5 | let neuron11: u32 = rectified_linear_activation(neuron00 * w101 / 2 + neuron01 * w111 / 2 + b11); 6 | let neuron20: u32 = rectified_linear_activation(neuron10 * w200 / 2 + neuron11 * w210 / 2 + b20); 7 | return [neuron20];} 8 | 9 | function rectified_linear_activation(x: u32) -> u32 { 10 | let result: u32 = 0; 11 | if x > 0 { 12 | result = x; 13 | } 14 | return result; 15 | } -------------------------------------------------------------------------------- /python-neuralnetwork-generator/project.in: -------------------------------------------------------------------------------- 1 | [main] 2 | w00: u32 = 0; 3 | b00: u32 = 0; 4 | w01: u32 = 0; 5 | b01: u32 = 0; 6 | w100: u32 = 0; 7 | w101: u32 = 0; 8 | b10: u32 = 0; 9 | w110: u32 = 0; 10 | w111: u32 = 0; 11 | b11: u32 = 0; 12 | w200: u32 = 0; 13 | b20: u32 = 0; 14 | w210: u32 = 0; 15 | b21: u32 = 0; 16 | input0: u32 = 0; 17 | input1: u32 = 0; 18 | [registers] 19 | r0: [u32; 1] = [0]; 20 | --------------------------------------------------------------------------------