├── .gitignore ├── CNN.cpp ├── CNNpy.py ├── Cnn.py ├── LICENSE ├── README.md ├── Test_FullCon.py ├── bitstring.h ├── cnn_comparison.py ├── fc1_weight_upper_half.schem ├── fc2_weight_upper_half.schem ├── fc3_weight.schem ├── fnn_comparison.py ├── im.py ├── nbtfillweight.py ├── netpy.py ├── network.cpp ├── neuron.h ├── test_white_board.schem ├── weight_analysis_cnn.py ├── weigtht_analysis_fnn.py └── write_number.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | .history/ 131 | data/ 132 | FullConnect_Mnist/ 133 | CnnConnect_Mnist/ 134 | weight_data/ 135 | *.npy 136 | *.txt 137 | *.zip 138 | weight_data_cnn/ -------------------------------------------------------------------------------- /CNN.cpp: -------------------------------------------------------------------------------- 1 | #include"bitstring.h" 2 | #include"neuron.h" 3 | #include 4 | #include 5 | #include 6 | using namespace std; 7 | 8 | const int bitstring_size = 256; 9 | double convolution_result[49]; 10 | double input[15][15]; 11 | double kernel[3][3]; 12 | 13 | class identifier_CNN 14 | { 15 | public: 16 | identifier_CNN() 17 | :convolution_result{}, 18 | input_layer(convolution_result), 19 | hidden_layer1(input_layer, "CNN weights/fc1_weight.txt", 2, 10, 1, 128), 20 | hidden_layer2(hidden_layer1, "CNN weights/fc2_weight.txt", 2, 10, 1, 64), 21 | output_layer(hidden_layer2, "CNN weights/fc3_weight.txt", 2, 10, 1, 64) 22 | { 23 | input_array_2d(kernel, "CNN weights/conv1_weight.txt", 3, 1.0/8, 0.5); 24 | for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) cout << kernel[i][j]*16 << " "; cout << endl; 25 | // for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) if(kernel[i][j] < 0) kernel[i][j] = 0; 26 | } 27 | stochastic_computing_neuron_layer input_layer; 28 | stochastic_computing_neuron_layer hidden_layer1; 29 | stochastic_computing_neuron_layer hidden_layer2; 30 | stochastic_computing_neuron_layer output_layer; 31 | double input_image[16][16]; 32 | double kernel[3][3]; 33 | double convolution_result[49]; 34 | int identify(const char* filename) 35 | { 36 | input_array_2d(input_image, filename, 16); 37 | to_zero_or_one_2d(input_image, 16, 0.05); 38 | conv_and_relu(); 39 | input_layer.update(convolution_result); 40 | hidden_layer1.update(input_layer); 41 | hidden_layer2.update(hidden_layer1); 42 | output_layer.update(hidden_layer2); 43 | //output_layer.output_value(); 44 | return output_layer.max_index(); 45 | } 46 | void conv_and_relu() 47 | { 48 | for (int i = 0; i < 7; i++) { 49 | for (int j = 0; j < 7; j++) { 50 | int cij = i * 7 + j, i2 = i<<1, j2 = j<<1; 51 | convolution_result[cij] = 0; 52 | for (int k = 0; k < 3; k++) { 53 | for (int l = 0; l < 3; l++) { 54 | convolution_result[cij] += kernel[k][l] * input_image[i2 + k][j2 + l]; 55 | } 56 | } 57 | if (convolution_result[cij] < 0) convolution_result[cij] = 0; 58 | if (convolution_result[cij] > 1) convolution_result[cij] = 1; 59 | //cout << convolution_result[cij] * 16 << " "; 60 | } 61 | //cout << endl; 62 | } 63 | } 64 | }; 65 | 66 | int identify(const char* filename) 67 | { 68 | //to_zero_or_one(input_layer_array, 256); 69 | stochastic_computing_neuron_layer input_layer(convolution_result); 70 | stochastic_computing_neuron_layer hidden_layer1(input_layer, "fc1_weight.txt"); 71 | stochastic_computing_neuron_layer hidden_layer2(hidden_layer1, "fc2_weight.txt"); 72 | stochastic_computing_neuron_layer output_layer(hidden_layer2, "fc3_weight.txt"); 73 | return output_layer.max_index(); 74 | } 75 | 76 | int answer[60000]; 77 | int main() 78 | { 79 | ifstream fin("value_list.txt"); 80 | for (int i = 0; i < 10000; i++) { 81 | double x; 82 | fin >> x; 83 | answer[i] = static_cast(x); 84 | } 85 | 86 | identifier_CNN id; 87 | char filename[100]; 88 | char fmt[100] = "data_figures/fig%d.txt"; 89 | int correct_count = 0; 90 | for (int i = 0; i < 2000; i++) { 91 | sprintf(filename, fmt, i); 92 | int number = id.identify(filename); 93 | //int number = id.identify("test_written_number.txt"); 94 | correct_count += (number == answer[i]); 95 | cout << number << " " << answer[i] << " " << ((number == answer[i]) ? 'T' : 'F') << "\n"; 96 | } 97 | cout << correct_count << endl; 98 | // cout << id.identify("data_figures/fig0.txt") << endl; 99 | // id.output_layer.output_value(); 100 | return 0; 101 | } -------------------------------------------------------------------------------- /CNNpy.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | import numpy as np 3 | from numpy import dot, tanh 4 | 5 | def conv_2d_single_kernel(input_data, kernel, stride): 6 | """单个卷积核进行卷积,得到单个输出。 7 | 由于是学习卷积实现原理这里简单处理,padding 是自动补全, 8 | 相当于tf 里面的 "SAME"。 9 | Args: 10 | input_data: 卷积层输入,是一个 shape 为 [h, w] 11 | 的 np.array。 12 | kernel: 卷积核大小,形式如 [k_h, k_w] 13 | stride: stride, list [s_h, s_w]。 14 | Return: 15 | out: 卷积结果 16 | """ 17 | h, w = input_data.shape 18 | kernel_h, kernel_w = kernel.shape 19 | 20 | stride_h, stride_w = stride 21 | 22 | # padding_h = (kernel_h-1) // 2 23 | # padding_w = (kernel_w-1) // 2 24 | # padding_data = np.zeros((h+padding_h*2, w+padding_w*2)) 25 | # padding_data[padding_h:-padding_h, padding_w:-padding_w] = input_data 26 | 27 | out = np.zeros((h//stride_h, w//stride_w)) 28 | for idx_h, i in enumerate(range(0, h-kernel_h+1, stride_h)): 29 | for idx_w, j in enumerate(range(0, w-kernel_w+1, stride_w)): 30 | window = input_data[i:i+kernel_h, j:j+kernel_w] 31 | out[idx_h, idx_w] = np.sum(window*kernel) 32 | return out 33 | 34 | def cut_off_bipolar(arr): 35 | arr[arr < -1] = -1 36 | arr[arr > 1] = 1 37 | 38 | if __name__ == "__main__": 39 | kernel = np.array([[4,7,2],[5,5,1],[-2,-1,0]])/16 40 | #print(np.round(kernel*16)) 41 | #fig_data = np.genfromtxt("data_figures/fig4.txt") 42 | fig_data = np.genfromtxt("test_written_number.txt") 43 | #fig_data = (fig_data > 0.5) 44 | fig_data = np.ceil(fig_data) 45 | # plt.imshow(fig_data, cmap='Greys') 46 | # plt.show() 47 | 48 | conv_result = conv_2d_single_kernel(fig_data[0:15, 0:15], kernel, (2, 2)) 49 | #print(conv_result[1:7,1:7]) 50 | conv_result[conv_result < 0] = 0 51 | conv_result[conv_result > 1] = 1 52 | relu_output = np.reshape(conv_result, (49, 1)) 53 | 54 | relu_output = relu_output 55 | 56 | hidden_layer1_linear_trans = np.genfromtxt("CNN weights/fc1_weight.txt") 57 | #cut_off_bipolar(hidden_layer1_linear_trans) 58 | hidden_layer2_linear_trans = np.genfromtxt("CNN weights/fc2_weight.txt") 59 | #cut_off_bipolar(hidden_layer2_linear_trans) 60 | output_layer_linear_trans = np.genfromtxt("CNN weights/fc3_weight.txt") 61 | #cut_off_bipolar(output_layer_linear_trans) 62 | 63 | hidden_layer1_result = dot(hidden_layer1_linear_trans, relu_output) 64 | hidden_layer1_output = tanh(hidden_layer1_result) 65 | #hidden_layer1_output = np.array([11,11,2,3,0,7,11,11,9,11,15,14,8,6,6, 1,4,7,4,2,10,4,10,12,6,7,5,7,1,12])/8-1 66 | 67 | hidden_layer2_result = dot(hidden_layer2_linear_trans, hidden_layer1_output) 68 | hidden_layer2_output = tanh(hidden_layer2_result) 69 | 70 | #print(np.round((relu_output.transpose())*16)) 71 | print(np.round((hidden_layer2_output.transpose() + 1)*8)) 72 | hidden_layer2_output = np.array([3,10,6,8,9,7,14,9,3,6,2,7,12,12,2, 13,12,6,8,1,9,6,3,4,6,3,11,7,9,9])/8-1 73 | #hidden_layer2_output = np.round((hidden_layer2_output + 1)*8)/8-1 74 | #print(np.sum(output_layer_linear_trans[3]*hidden_layer2_output.transpose())) 75 | 76 | #np.savetxt("CNN weights/fc3_input.txt", hidden_layer2_output.transpose()) 77 | 78 | output_layer_result = dot(output_layer_linear_trans, hidden_layer2_output) 79 | output_layer_tanh = tanh(output_layer_result) 80 | print(output_layer_tanh.transpose()) -------------------------------------------------------------------------------- /Cnn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.optim as optim 4 | from torchvision import datasets, transforms 5 | from torch.autograd import Variable 6 | from torch.utils import data 7 | from torch.utils.tensorboard import SummaryWriter 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import os 11 | import shutil 12 | import sys 13 | 14 | 15 | # train function 16 | def train(module, train_data, optimizer_function, epoch_num): 17 | for batch_idx, (t_data, target) in enumerate(train_data): 18 | # data -> binary 19 | # t_data = t_data.view(t_data.size(0), -1) 20 | t_data_binary = np.ceil(t_data.numpy()) 21 | t_data = torch.from_numpy(t_data_binary) 22 | t_data, target = Variable(t_data).to(device), Variable(target).to(device) 23 | optimizer_function.zero_grad() 24 | output = module(t_data) 25 | loss = Loss_function(output, target) 26 | loss.backward() 27 | optimizer_function.step() 28 | if batch_idx % 300 == 0: 29 | print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( 30 | epoch_num, batch_idx * len(t_data), len(train_data.dataset), 31 | 100. * batch_idx / len(train_data), loss.item())) 32 | 33 | weight_control = True 34 | if weight_control: 35 | fc3_weight = net.state_dict()['fc3.weight'].numpy() 36 | max_fc3_weight = np.max(np.abs(fc3_weight)) 37 | fc2_weight = net.state_dict()['fc2.weight'].numpy() 38 | max_fc2_weight = np.max(np.abs(fc2_weight)) 39 | fc1_weight = net.state_dict()['fc1.weight'].numpy() 40 | max_fc1_weight = np.max(np.abs(fc1_weight)) 41 | if max_fc1_weight >= 1: 42 | net.state_dict()['fc1.weight'].copy_(torch.from_numpy(fc1_weight/max_fc1_weight)) 43 | if max_fc2_weight >= 1: 44 | net.state_dict()['fc2.weight'].copy_(torch.from_numpy(fc2_weight/max_fc2_weight)) 45 | if max_fc3_weight >= 1: 46 | net.state_dict()['fc3.weight'].copy_(torch.from_numpy(fc3_weight/max_fc3_weight)) 47 | 48 | # test function 49 | # def test(model, test_data, epoch_num): 50 | def test(model, test_data, epoch_num, writer): 51 | correct = 0 52 | with torch.no_grad(): 53 | for t_data, target in test_data: 54 | # t_data = t_data.view(t_data.size(0), -1) 55 | t_data_binary = np.ceil(t_data.numpy()) 56 | t_data = torch.from_numpy(t_data_binary) 57 | t_data, target = Variable(t_data), Variable(target) 58 | output = model(t_data) 59 | pred = output.max(1, keepdim=True)[1] 60 | correct += pred.eq(target.view_as(pred)).sum().item() 61 | 62 | print("\nTest set: Epoch:{} Accuracy: {}/{} ({:.2f}%) \n".format(epoch_num, correct, len(test_data.dataset), 63 | 100. * correct / len(test_data.dataset))) 64 | # record data in tensorboard log 65 | writer.add_scalar('Accuracy', 100. * correct / len(test_data.dataset), epoch_num) 66 | 67 | # Network structure 68 | class Net(nn.Module): 69 | 70 | def __init__(self): 71 | super(Net, self).__init__() 72 | self.conv1 = nn.Conv2d(1, 1, kernel_size=3, stride=2, bias=False) 73 | for p in self.parameters(): 74 | p.requires_grad=False 75 | # self.pool = nn.MaxPool2d(2, 2) 76 | self.fc1 = nn.Linear(7*7, 30, bias=False) 77 | self.fc2 = nn.Linear(30, 30, bias=False) 78 | self.fc3 = nn.Linear(30, 10, bias=False) 79 | 80 | # connect inputs and outputs size: 15x15 -> 7x7 -> 30 -> 30 -> 10 81 | def forward(self, x): 82 | x = torch.relu(self.conv1(x)) 83 | # x = self.pool(x) 84 | x = x.view(-1, 7*7) 85 | x = torch.tanh(self.fc1(x)) 86 | x = torch.tanh(self.fc2(x)) 87 | x = torch.tanh(self.fc3(x)) 88 | return x 89 | 90 | 91 | # hyper parameters 92 | # size_inputs = 16*16 93 | # size_hidden1 = 32 94 | # size_hidden2 = 32 95 | # size_outputs = 10 96 | learning_rate = 0.01 #0.005 97 | BATCH_SIZE = 5 98 | EPOCHS = 100 99 | 100 | 101 | if __name__ == '__main__': 102 | # data precoding 103 | train_transformer = transforms.Compose([ 104 | transforms.Resize(16), # down sampling 105 | transforms.ToTensor() 106 | ]) 107 | 108 | # data loading 109 | train_loader = data.DataLoader( 110 | datasets.MNIST('data', train=True, download=True, transform=train_transformer), 111 | batch_size=BATCH_SIZE, shuffle=True) 112 | 113 | test_loader = data.DataLoader( 114 | datasets.MNIST('data', train=False, download=True, transform=train_transformer), 115 | batch_size=BATCH_SIZE, shuffle=True) 116 | 117 | # compare images (28x28 vs 15x15) 118 | raw_train_data = data.DataLoader( 119 | datasets.MNIST('data', train=True, download=True, transform=transforms.ToTensor()), 120 | batch_size=BATCH_SIZE, shuffle=False) 121 | transform_train_data = data.DataLoader( 122 | datasets.MNIST('data', train=True, download=True, transform=train_transformer), 123 | batch_size=BATCH_SIZE, shuffle=False) 124 | 125 | # check for data 126 | show_fig = False 127 | if show_fig: 128 | for batch_idx, (t_data, target) in enumerate(raw_train_data): 129 | t_data = t_data.view(28, 28) 130 | t_data_binary = np.ceil(t_data.numpy()) 131 | t_data_binary = torch.from_numpy(t_data_binary) 132 | # print(t_data) 133 | if batch_idx < 3: 134 | plt.figure(f'raw data {batch_idx}') 135 | plt.imshow(t_data) 136 | 137 | target_number_list = [] 138 | for batch_idx, (t_data, target) in enumerate(transform_train_data): 139 | t_data = t_data.view(16, 16) 140 | target_number_list.append(target) 141 | # save data as a .txt file for zehan's test 142 | if not os.path.exists('data_figures'): 143 | os.mkdir('data_figures') 144 | t_data_1 = t_data.numpy() 145 | np.savetxt(f'data_figures/fig{batch_idx}.txt', t_data_1) 146 | # print(t_data_1) 147 | t_data_binary = np.ceil(t_data.numpy()) 148 | t_data_binary = torch.from_numpy(t_data_binary) 149 | if batch_idx < 3: 150 | plt.figure(f'transformed data {batch_idx}') 151 | plt.imshow(t_data) 152 | plt.figure(f'data_binary {batch_idx}') 153 | plt.imshow(t_data_binary) 154 | # print(t_data_binary) 155 | plt.show() 156 | np.savetxt('value_list.txt', np.array(target_number_list)) 157 | sys.exit(-1) 158 | 159 | # cuda acceleration 160 | device = "cuda" if torch.cuda.is_available() else "cpu" 161 | device = 'cpu' # in MNIST recognition 'GPu' is slower than 'cpu' 162 | print(f"Using {device} device") 163 | 164 | # create a network sample, shape of network could be changed in definition of cnn network 165 | net = Net().to(device) 166 | print(net) 167 | print(net.state_dict().keys()) 168 | 169 | # record the weight datas as .npy form 170 | conv1_max_list, conv1_min_list = [], [] 171 | fc1_max_list, fc1_min_list = [], [] 172 | fc2_max_list, fc2_min_list = [], [] 173 | fc3_max_list, fc3_min_list = [], [] 174 | 175 | # initial weight contribution 176 | nn.init.normal_(net.state_dict()['conv1.weight'], mean=0, std=0.1) 177 | nn.init.normal_(net.state_dict()['fc1.weight'], mean=0, std=0.25) 178 | nn.init.normal_(net.state_dict()['fc2.weight'], mean=0, std=0.2) 179 | nn.init.normal_(net.state_dict()['fc3.weight'], mean=0, std=0.05) 180 | 181 | # additional test from zehan 182 | constant_weights = np.genfromtxt('constant_weight_conv1_zehan.txt') 183 | constant_weights = torch.from_numpy(np.multiply(constant_weights, 1/16)) 184 | print(constant_weights) 185 | net.state_dict()['conv1.weight'].copy_(constant_weights) 186 | 187 | conv1_weight = net.state_dict()['conv1.weight'].numpy() 188 | fc1_weight = net.state_dict()['fc1.weight'].numpy() 189 | fc2_weight = net.state_dict()['fc2.weight'].numpy() 190 | fc3_weight = net.state_dict()['fc3.weight'].numpy() 191 | 192 | print(f'conv1 max: {np.max(conv1_weight)} min: {np.min(conv1_weight)}') 193 | print(f'fc1 max: {np.max(fc1_weight)} min: {np.min(fc1_weight)}') 194 | print(f'fc2 max: {np.max(fc2_weight)} min: {np.min(fc2_weight)}') 195 | print(f'fc3 max: {np.max(fc3_weight)} min: {np.min(fc3_weight)}') 196 | 197 | conv1_max_list.append(np.max(conv1_weight)) 198 | conv1_min_list.append(np.min(conv1_weight)) 199 | fc1_max_list.append(np.max(fc1_weight)) 200 | fc1_min_list.append(np.min(fc1_weight)) 201 | fc2_max_list.append(np.max(fc2_weight)) 202 | fc2_min_list.append(np.min(fc2_weight)) 203 | fc3_max_list.append(np.max(fc3_weight)) 204 | fc3_min_list.append(np.min(fc3_weight)) 205 | 206 | # loss function and optimizer 207 | Loss_function = nn.CrossEntropyLoss() 208 | optimizer = optim.SGD(net.parameters(), lr=learning_rate) 209 | 210 | # tensorboard 211 | tensorlog_path = 'CnnConnect_Mnist' 212 | if os.path.exists(tensorlog_path): 213 | shutil.rmtree(tensorlog_path) 214 | writer = SummaryWriter(tensorlog_path) 215 | 216 | for epoch in range(1, EPOCHS + 1): 217 | train(module=net, train_data=train_loader, optimizer_function=optimizer, epoch_num=epoch) 218 | # test(model=net, test_data=test_loader, epoch_num=epoch) 219 | test(model=net, test_data=test_loader, epoch_num=epoch, writer=writer) 220 | print(net.state_dict().keys()) 221 | conv1_weight = net.state_dict()['conv1.weight'].numpy().reshape(1, -1) 222 | fc1_weight = net.state_dict()['fc1.weight'].numpy() 223 | fc2_weight = net.state_dict()['fc2.weight'].numpy() 224 | 225 | data_save_path = 'weight_data_cnn' 226 | if not os.path.exists(data_save_path): 227 | os.makedirs(data_save_path) 228 | # save data 229 | torch.save(net, f'{data_save_path}/epoch_{epoch}') 230 | np.save(f'{data_save_path}\\hidden1', conv1_weight) 231 | np.save(f'{data_save_path}\\hidden2', fc1_weight) 232 | np.save(f'{data_save_path}\\fc2_weight', fc2_weight) 233 | print(conv1_weight) 234 | np.savetxt('conv1_weight_0617_2022.txt', conv1_weight) 235 | np.savetxt('fc1_weight_0617_2022.txt', fc1_weight) 236 | np.savetxt('fc2_weight_0617_2022.txt', fc2_weight) 237 | np.savetxt('fc3_weight_0617_2022.txt', fc3_weight) 238 | # print(f'hidden1 max: {np.max(conv1_weight)} min: {np.min(conv1_weight)}') 239 | # print(f'hidden2 max: {np.max(fc1_weight)} min: {np.min(fc1_weight)}') 240 | # print(f'fc2_weight max: {np.max(fc2_weight)} min: {np.min(fc2_weight)}') 241 | # save data 242 | conv1_max_list.append(np.max(conv1_weight)) 243 | conv1_min_list.append(np.min(conv1_weight)) 244 | fc1_max_list.append(np.max(fc1_weight)) 245 | fc1_min_list.append(np.min(fc1_weight)) 246 | fc2_max_list.append(np.max(fc2_weight)) 247 | fc2_min_list.append(np.min(fc2_weight)) 248 | fc3_max_list.append(np.max(fc3_weight)) 249 | fc3_min_list.append(np.min(fc3_weight)) 250 | 251 | print(f'conv1 max: {np.max(conv1_weight)} min: {np.min(conv1_weight)}') 252 | print(f'fc1 max: {np.max(fc1_weight)} min: {np.min(fc1_weight)}') 253 | print(f'fc2 max: {np.max(fc2_weight)} min: {np.min(fc2_weight)}') 254 | print(f'fc3 max: {np.max(fc3_weight)} min: {np.min(fc3_weight)}') 255 | 256 | # save range of weight datas 257 | np.savetxt('conv1_min_list.txt', conv1_min_list) 258 | np.savetxt('fc1_min_list.txt', fc1_min_list) 259 | np.savetxt('fc2_min_list.txt', fc2_min_list) 260 | np.savetxt('conv1_max_list.txt', conv1_max_list) 261 | np.savetxt('fc1_max_list.txt', fc1_max_list) 262 | np.savetxt('fc2_max_list.txt', fc2_max_list) 263 | np.savetxt('fc3_min_list.txt', fc3_min_list) 264 | np.savetxt('fc3_max_list.txt', fc3_max_list) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Lemoon 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # StochasticNet 2 | 3 | Nerual Network of Stochastic Computing for MNIST Recognition. This project is the part of the redstonic convolutional neural network in Minecraft. We built the world first redstonic convolutional neural network, the task being the recognition of 15×15 hand-written digits. LeNet-5 as its architecture, the network can achieve an accuracy up to 80%. We used an unconventional computational method, the stochastic computing, to realize the network, making it much simpler in design and layout compared to the traditional full-precision computing. The recognition time is 5 minutes per figure theoretically. However, limited by the computational capacity of Minecraft, the real running time exceeds 20 minutes. Nevertheless, it is a breakthrough in redstonic digital circuits, and it may inspire real-world physical neural networks. 4 | 5 | We are so sorry that we cannot provide a proper and clear organized codes due to limited ability and time. 6 | 7 | [Video link in bilibili.](https://www.bilibili.com/video/BV1yv4y1u7ZX/?spm_id_from=333.788.recommend_more_video.6&vd_source=923ade385facaf796f5897884fb921eb) 8 | 9 | 随机计算用于手写数字识别。我们搭建了世界首个红石卷积神经网络,任务是识别15×15手写数字。该网络使用LeNet-5架构,准确率可达80%。我们使用非传统的计算方式——随机计算来实现神经网络,使得设计和布局上比传统的全精度计算简单许多,且单次理论识别时间仅为5分钟。受限于Minecraft的运算能力,实际识别时间超过20分钟。尽管如此,这仍是红石数电领域的重大突破,也可能启发现实中的硬件神经网络。 10 | 11 | 由于时间成本和精力有限,我们很抱歉没法提供清晰的便于查阅的项目代码。 12 | 13 | [视频链接](https://www.bilibili.com/video/BV1yv4y1u7ZX/?spm_id_from=333.788.recommend_more_video.6&vd_source=923ade385facaf796f5897884fb921eb) 14 | -------------------------------------------------------------------------------- /Test_FullCon.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.optim as optim 4 | from torchvision import datasets, transforms 5 | from torch.autograd import Variable 6 | from torch.utils import data 7 | from torch.utils.tensorboard import SummaryWriter 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import os 11 | import shutil 12 | import cv2 13 | import sys 14 | 15 | 16 | # train function 17 | def train(module, train_data, optimizer_function, epoch_num): 18 | for batch_idx, (t_data, target) in enumerate(train_data): 19 | # data -> binary 20 | t_data = t_data.view(t_data.size(0), -1) 21 | t_data_binary = np.ceil(t_data.numpy()) 22 | t_data = torch.from_numpy(t_data_binary) 23 | t_data, target = Variable(t_data).to(device), Variable(target).to(device) 24 | optimizer_function.zero_grad() 25 | output = module(t_data) 26 | loss = Loss_function(output, target) 27 | loss.backward() 28 | optimizer_function.step() 29 | if batch_idx % 300 == 0: 30 | print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( 31 | epoch_num, batch_idx * len(t_data), len(train_data.dataset), 32 | 100. * batch_idx / len(train_data), loss.item())) 33 | 34 | 35 | # test function 36 | def test(model, test_data, epoch_num, writer): 37 | correct = 0 38 | with torch.no_grad(): 39 | for t_data, target in test_data: 40 | t_data = t_data.view(t_data.size(0), -1) 41 | t_data_binary = np.ceil(t_data.numpy()) 42 | t_data = torch.from_numpy(t_data_binary) 43 | t_data, target = Variable(t_data), Variable(target) 44 | output = model(t_data) 45 | pred = output.max(1, keepdim=True)[1] 46 | correct += pred.eq(target.view_as(pred)).sum().item() 47 | 48 | print("\nTest set: Epoch:{} Accuracy: {}/{} ({:.2f}%) \n".format(epoch_num, correct, len(test_data.dataset), 49 | 100. * correct / len(test_data.dataset))) 50 | # record data in tensorboard log 51 | writer.add_scalar('Accuracy', 100. * correct / len(test_data.dataset), epoch_num) 52 | 53 | # Network structure 54 | class Net(nn.Module): 55 | 56 | def __init__(self, n_feature, n_hidden1, n_hidden2, n_output): 57 | super(Net, self).__init__() 58 | self.hidden1 = nn.Linear(n_feature, n_hidden1, bias=False) 59 | self.hidden2 = nn.Linear(n_hidden1, n_hidden2, bias=False) 60 | self.out = nn.Linear(n_hidden2, n_output, bias=False) 61 | 62 | # connect inputs and outputs 63 | def forward(self, x): 64 | x = torch.tanh(self.hidden1(x)) 65 | x = torch.tanh(self.hidden2(x)) 66 | x = torch.tanh(self.out(x)) 67 | return x 68 | 69 | 70 | # hyper parameters 71 | size_inputs = 16*16 72 | size_hidden1 = 32 73 | size_hidden2 = 32 74 | size_outputs = 10 75 | learning_rate = 0.01 76 | BATCH_SIZE = 1 77 | EPOCHS = 100 78 | 79 | 80 | if __name__ == '__main__': 81 | # data precoding 82 | train_transformer = transforms.Compose([ 83 | transforms.Resize(16), # down sampling 84 | transforms.ToTensor() 85 | ]) 86 | 87 | # data loading 88 | train_loader = data.DataLoader( 89 | datasets.MNIST('data', train=True, download=True, transform=train_transformer), 90 | batch_size=BATCH_SIZE, shuffle=True) 91 | 92 | test_loader = data.DataLoader( 93 | datasets.MNIST('data', train=False, download=True, transform=train_transformer), 94 | batch_size=BATCH_SIZE, shuffle=True) 95 | 96 | # compare images (28x28 vs 16x16) 97 | raw_train_data = data.DataLoader( 98 | datasets.MNIST('data', train=True, download=True, transform=transforms.ToTensor()), 99 | batch_size=BATCH_SIZE, shuffle=False) 100 | transform_train_data = data.DataLoader( 101 | datasets.MNIST('data', train=True, download=True, transform=train_transformer), 102 | batch_size=BATCH_SIZE, shuffle=False) 103 | 104 | # check for data 105 | show_fig = False 106 | if show_fig: 107 | for batch_idx, (t_data, target) in enumerate(raw_train_data): 108 | t_data = t_data.view(28, 28) 109 | t_data_binary = np.ceil(t_data.numpy()) 110 | t_data_binary = torch.from_numpy(t_data_binary) 111 | # print(t_data) 112 | if batch_idx < 3: 113 | plt.figure(f'raw data {batch_idx}') 114 | plt.imshow(t_data) 115 | 116 | target_number_list = [] 117 | for batch_idx, (t_data, target) in enumerate(transform_train_data): 118 | print(t_data) 119 | t_data = t_data.view(16, 16) 120 | target_number_list.append(target) 121 | # save data as a .txt file for zehan's test 122 | if not os.path.exists('data_figures'): 123 | os.mkdir('data_figures') 124 | t_data_1 = t_data.numpy() 125 | np.savetxt(f'data_figures/fig{batch_idx}.txt', t_data_1) 126 | # print(t_data_1) 127 | t_data_binary = np.ceil(t_data.numpy()) 128 | t_data_binary = torch.from_numpy(t_data_binary) 129 | if batch_idx < 3: 130 | plt.figure(f'transformed data {batch_idx}') 131 | plt.imshow(t_data) 132 | plt.figure(f'data_binary {batch_idx}') 133 | plt.imshow(t_data_binary) 134 | print(t_data_binary) 135 | plt.show() 136 | np.savetxt('value_list.txt', np.array(target_number_list)) 137 | sys.exit(-1) 138 | 139 | # cuda acceleration 140 | device = "cuda" if torch.cuda.is_available() else "cpu" 141 | device = 'cpu' # in MNIST recognition 'GPu' is slower than 'cpu' 142 | print(f"Using {device} device") 143 | 144 | # create a network sample 145 | net = Net(n_feature=size_inputs, n_hidden1=size_hidden1, n_hidden2=size_hidden2, n_output=size_outputs).to(device) 146 | print(net.state_dict().keys()) 147 | 148 | # record the weight datas as .npy form 149 | hid1_max_list, hid1_min_list = [], [] 150 | hid2_max_list, hid2_min_list = [], [] 151 | out_max_list, out_min_list = [], [] 152 | nn.init.normal_(net.state_dict()['hidden1.weight'], mean=0, std=0.1) 153 | nn.init.normal_(net.state_dict()['hidden2.weight'], mean=0, std=0.1) 154 | nn.init.normal_(net.state_dict()['out.weight'], mean=0, std=0.1) 155 | hidden1_weight = net.state_dict()['hidden1.weight'].numpy() 156 | hidden2_weight = net.state_dict()['hidden2.weight'].numpy() 157 | out_weight = net.state_dict()['out.weight'].numpy() 158 | print(f'hidden1 max: {np.max(hidden1_weight)} min: {np.min(hidden1_weight)}') 159 | print(f'hidden2 max: {np.max(hidden2_weight)} min: {np.min(hidden2_weight)}') 160 | print(f'out_weight max: {np.max(out_weight)} min: {np.min(out_weight)}') 161 | hid1_max_list.append(np.max(hidden1_weight)) 162 | hid1_min_list.append(np.min(hidden1_weight)) 163 | hid2_max_list.append(np.max(hidden2_weight)) 164 | hid2_min_list.append(np.min(hidden2_weight)) 165 | out_max_list.append(np.max(out_weight)) 166 | out_min_list.append(np.min(out_weight)) 167 | 168 | # loss function and optimizer 169 | Loss_function = nn.CrossEntropyLoss() 170 | optimizer = optim.SGD(net.parameters(), lr=learning_rate) 171 | 172 | # tensorboard 173 | tensorlog_path = 'FullConnect_Mnist' 174 | if os.path.exists(tensorlog_path): 175 | shutil.rmtree(tensorlog_path) 176 | writer = SummaryWriter(tensorlog_path) 177 | for epoch in range(1, EPOCHS + 1): 178 | train(module=net, train_data=train_loader, optimizer_function=optimizer, epoch_num=epoch) 179 | test(model=net, test_data=test_loader, epoch_num=epoch, writer=writer) 180 | print(net.state_dict().keys()) 181 | hidden1_weight = net.state_dict()['hidden1.weight'].numpy() 182 | hidden2_weight = net.state_dict()['hidden2.weight'].numpy() 183 | out_weight = net.state_dict()['out.weight'].numpy() 184 | 185 | data_save_path = 'weight_data' 186 | if not os.path.exists(data_save_path): 187 | os.makedirs(data_save_path) 188 | # save data 189 | torch.save(net, f'{data_save_path}/epoch_{epoch}') 190 | # np.save(f'{data_save_path}\\hidden1', hidden1_weight) 191 | # np.save(f'{data_save_path}\\hidden2', hidden2_weight) 192 | # np.save(f'{data_save_path}\\out_weight', out_weight) 193 | # print(f'hidden1 max: {np.max(hidden1_weight)} min: {np.min(hidden1_weight)}') 194 | # print(f'hidden2 max: {np.max(hidden2_weight)} min: {np.min(hidden2_weight)}') 195 | # print(f'out_weight max: {np.max(out_weight)} min: {np.min(out_weight)}') 196 | # save data 197 | hid1_max_list.append(np.max(hidden1_weight)) 198 | hid1_min_list.append(np.min(hidden1_weight)) 199 | hid2_max_list.append(np.max(hidden2_weight)) 200 | hid2_min_list.append(np.min(hidden2_weight)) 201 | out_max_list.append(np.max(out_weight)) 202 | out_min_list.append(np.min(out_weight)) 203 | np.save('hid1_min_list', hid1_min_list) 204 | np.save('hid2_min_list', hid2_min_list) 205 | np.save('out_min_list', out_min_list) 206 | np.save('hid1_max_list', hid1_max_list) 207 | np.save('hid2_max_list', hid2_max_list) 208 | np.save('out_max_list', out_max_list) -------------------------------------------------------------------------------- /bitstring.h: -------------------------------------------------------------------------------- 1 | #ifndef BITSTRING_H 2 | #define BITSTRING_H 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | using namespace std; 9 | 10 | enum bitstring_type {unipolar, bipolar}; 11 | 12 | default_random_engine random_generator(chrono::high_resolution_clock::now().time_since_epoch().count()); 13 | 14 | template 15 | class bitstring 16 | { 17 | public: 18 | bitset X; 19 | bitstring() {} 20 | bitstring(double x); 21 | bitstring(bitset&& S): X(S) {} 22 | ~bitstring(); 23 | double value() const; 24 | bitstring operator|(bitstring y) { return bitstring(X | y.X);} 25 | void operator|=(bitstring y) { X |= y.X;} 26 | bitstring operator&(bitstring y) { return bitstring(X & y.X);} 27 | void operator&=(bitstring y) { X &= y.X;} 28 | bitstring operator^(bitstring y) { return bitstring(X ^ y.X);} 29 | void operator^=(bitstring y) { X ^= y.X;} 30 | bitstring operator*(const bitstring& y) 31 | { 32 | return bitstring(X & y.X); 33 | } 34 | bitstring operator*(const bitstring& y) 35 | { 36 | return bitstring(~(X ^ y.X)); 37 | } 38 | bitstring Stanh(int n); 39 | bool at(size_t position) { return X.test(position); } 40 | }; 41 | 42 | template 43 | bitstring::bitstring(double x) 44 | { 45 | if (T == unipolar) { 46 | bernoulli_distribution R(x); 47 | for (size_t i = 0; i < bitstring_size; i++) X.set(i, R(random_generator)); 48 | } 49 | else { 50 | bernoulli_distribution R((x + 1) / 2.0); 51 | for (size_t i = 0; i < bitstring_size; i++) X.set(i, R(random_generator)); 52 | } 53 | } 54 | 55 | template 56 | bitstring::~bitstring() 57 | { 58 | } 59 | 60 | template 61 | double bitstring::value() const 62 | { 63 | double unipolar_value = static_cast(X.count()) / bitstring_size; 64 | if (T == unipolar) return unipolar_value; 65 | return 2 * unipolar_value - 1; 66 | } 67 | 68 | template 69 | bitstring bitstring::Stanh(int n) 70 | { 71 | bitset Y; 72 | int count = n / 2 - 1; 73 | for (int i = 0; i < bitstring_size; i++) { 74 | count += 2 * X.test(i) - 1; 75 | if (count > n - 1) count = n - 1; 76 | if (count < 0) count = 0; 77 | if (count > n / 2 - 1) Y.set(i, 1); 78 | } 79 | return bitstring(move(Y)); 80 | } 81 | 82 | template 83 | class integral_bitstring 84 | { 85 | public: 86 | array,M> X; 87 | integral_bitstring() {}; 88 | integral_bitstring(double x) 89 | { 90 | // X.fill(bitstring(x / M)); 91 | for (int i = 0; i < M; i++) X[i] = bitstring(x / M); 92 | } 93 | ~integral_bitstring() {}; 94 | double value() const 95 | { 96 | double v = 0; 97 | for (auto& x: X) v += x.value(); 98 | return v; 99 | } 100 | size_t size() const { return M; } 101 | template 102 | integral_bitstring operator+(integral_bitstring y) 103 | { 104 | integral_bitstring Z; 105 | for (int i = 0; i < M; i++) Z.X[i] = X[i]; 106 | for (int i = M; i < M + M2; i++) Z.X[i] = y.X[i - M]; 107 | return move(Z); 108 | } 109 | integral_bitstring operator*(const bitstring& y) 110 | { 111 | integral_bitstring Z; 112 | for (int i = 0; i < M; i++) Z.X[i] = X[i] * y; 113 | return move(Z); 114 | } 115 | template 116 | integral_bitstring operator*(integral_bitstring y) 117 | { 118 | integral_bitstring Z; 119 | for (int i = 0; i < M; i++) 120 | for (int j = 0; j < M2; j++) Z.X[i * M2 + j] = X[i] * y.X[j]; 121 | return move(Z); 122 | } 123 | int at(size_t position) const 124 | { 125 | int s = 0; 126 | for (auto &x: X) s += x.X.test(position); 127 | return s; 128 | } 129 | bitstring NStanh(int n) const; 130 | bitstring NStanh_bound(int n, int bound) const; 131 | }; 132 | 133 | template 134 | bitstring integral_bitstring::NStanh(int n) const 135 | { 136 | bitset Y; 137 | int count = n * M / 2 - 1; 138 | for (int i = 0; i < bitstring_size; i++) { 139 | count += 2 * at(i) - static_cast(M); 140 | if (count > static_cast(n * M - 1)) count = n * M - 1; 141 | if (count < 0) count = 0; 142 | if (count > static_cast(n * M / 2 - 1)) Y.set(i, 1); 143 | } 144 | return bitstring(move(Y)); 145 | } 146 | 147 | template 148 | bitstring integral_bitstring::NStanh_bound(int n, int bound) const 149 | { 150 | bitset Y; 151 | int count = bound / 2 - 1; 152 | for (int i = 0; i < bitstring_size; i++) { 153 | count += 2 * at(i) - static_cast(M); 154 | if (count > static_cast(bound - 1)) count = bound - 1; 155 | if (count < 0) count = 0; 156 | if (count > static_cast(bound / 2 - 1)) Y.set(i, 1); 157 | } 158 | return bitstring(move(Y)); 159 | } 160 | 161 | template 162 | integral_bitstring sum_of_array(integral_bitstring arr[]) 163 | { 164 | integral_bitstring result; 165 | for (int i = 0; i < N; i++) { 166 | for (int j = 0; j < M; j++) { 167 | result.X[i * M + j] = arr[i].X[j]; 168 | } 169 | } 170 | return move(result); 171 | } 172 | 173 | template 174 | integral_bitstring sum_of_array_with_weigth(integral_bitstring arr[N], const bitstring w[N]) 175 | { 176 | integral_bitstring result; 177 | for (int i = 0; i < N; i++) { 178 | for (int j = 0; j < M; j++) { 179 | result.X[i * M + j] = arr[i].X[j] * w[i]; 180 | } 181 | } 182 | return move(result); 183 | } 184 | 185 | template 186 | integral_bitstring test_func(const bitstring w[N]) 187 | { 188 | integral_bitstring result; 189 | return move(result); 190 | } 191 | #endif -------------------------------------------------------------------------------- /cnn_comparison.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | import numpy as np 3 | from numpy import dot, tanh 4 | import torch 5 | import torch.nn as nn 6 | from torch.autograd import Variable 7 | 8 | hidden_layer1_linear_trans = np.genfromtxt("fc1_weight_0617_2022.txt") 9 | hidden_layer2_linear_trans = np.genfromtxt("fc2_weight_0617_2022.txt") 10 | output_layer_linear_trans = np.genfromtxt("fc3_weight_0617_2022.txt") 11 | anwser = np.genfromtxt("value_list.txt") 12 | 13 | def test(model, figure_data, target): 14 | correct = 0 15 | with torch.no_grad(): 16 | t_data = torch.from_numpy(figure_data) 17 | t_data = t_data.reshape([1, 1, 16, 16]) 18 | # print(t_data) 19 | # print(t_data.shape) 20 | target = torch.as_tensor(target) 21 | # t_data = t_data.view(t_data.size(1), -1) 22 | # t_data_binary = np.ceil(t_data.numpy()) 23 | # t_data = torch.from_numpy(t_data_binary) 24 | t_data, target = Variable(t_data), Variable(target) 25 | output = model(t_data) 26 | pred = output.max(1, keepdim=True)[1] 27 | correct += pred.eq(target.view_as(pred)).sum().item() 28 | if correct > 0: 29 | print('right') 30 | return True 31 | else: 32 | print('wrong') 33 | return False 34 | 35 | def conv_2d_single_kernel(input_data, kernel, stride): 36 | """单个卷积核进行卷积,得到单个输出。 37 | 由于是学习卷积实现原理这里简单处理,padding 是自动补全, 38 | 相当于tf 里面的 "SAME"。 39 | Args: 40 | input_data: 卷积层输入,是一个 shape 为 [h, w] 41 | 的 np.array。 42 | kernel: 卷积核大小,形式如 [k_h, k_w] 43 | stride: stride, list [s_h, s_w]。 44 | Return: 45 | out: 卷积结果 46 | """ 47 | h, w = input_data.shape 48 | kernel_h, kernel_w = kernel.shape 49 | 50 | stride_h, stride_w = stride 51 | 52 | out = np.zeros((h//stride_h, w//stride_w)) 53 | for idx_h, i in enumerate(range(0, h-kernel_h+1, stride_h)): 54 | for idx_w, j in enumerate(range(0, w-kernel_w+1, stride_w)): 55 | window = input_data[i:i+kernel_h, j:j+kernel_w] 56 | out[idx_h, idx_w] = np.sum(window*kernel) 57 | return out 58 | 59 | class Net(nn.Module): 60 | 61 | def __init__(self): 62 | super(Net, self).__init__() 63 | self.conv1 = nn.Conv2d(1, 1, kernel_size=3, stride=2, bias=False) 64 | # for p in self.parameters(): 65 | # p.requires_grad=False 66 | # self.pool = nn.MaxPool2d(2, 2) 67 | self.fc1 = nn.Linear(7*7, 30, bias=False) 68 | self.fc2 = nn.Linear(30, 30, bias=False) 69 | self.fc3 = nn.Linear(30, 10, bias=False) 70 | 71 | # connect inputs and outputs size: 15x15 -> 7x7 -> 30 -> 30 -> 10 72 | def forward(self, x): 73 | x = x.to(torch.float32) 74 | x = torch.relu(self.conv1(x)) 75 | # x = self.pool(x) 76 | x = x.view(-1, 7*7) 77 | x = torch.tanh(self.fc1(x)) 78 | x = torch.tanh(self.fc2(x)) 79 | x = torch.tanh(self.fc3(x)) 80 | return x 81 | 82 | 83 | device = "cuda" if torch.cuda.is_available() else "cpu" 84 | device = 'cpu' # in MNIST recognition 'GPu' is slower than 'cpu' 85 | print(f"Using {device} device") 86 | 87 | # create a network sample 88 | net = Net().to(device) 89 | # net = torch.load(f'weight_data_cnn\epoch_20') 90 | constant_weights = np.genfromtxt('constant_weight_conv1_zehan.txt') 91 | constant_weights = torch.from_numpy(np.multiply(constant_weights, 1/16)) 92 | print(constant_weights) 93 | net.state_dict()['conv1.weight'].copy_(constant_weights) 94 | net.state_dict()['fc1.weight'].copy_(torch.from_numpy(hidden_layer1_linear_trans)) 95 | net.state_dict()['fc2.weight'].copy_(torch.from_numpy(hidden_layer2_linear_trans)) 96 | net.state_dict()['fc3.weight'].copy_(torch.from_numpy(output_layer_linear_trans)) 97 | # hidden_layer1_linear_trans = net.state_dict()['hidden1.weight'].numpy() 98 | # hidden_layer2_linear_trans = net.state_dict()['hidden2.weight'].numpy() 99 | # output_layer_linear_trans = net.state_dict()['out.weight'].numpy() 100 | correct_count = 0 101 | count_pytoch = 0 102 | kernel = np.genfromtxt("constant_weight_conv1_zehan.txt") / 16 103 | 104 | for i in range(1000): 105 | 106 | fig_data = np.genfromtxt("data_figures/fig{}.txt".format(i)) 107 | fig_data = np.ceil(fig_data) 108 | fig_data_copy = fig_data 109 | # fig_data = (fig_data > 0.5) # 二值化 110 | # fig_data = np.ceil(fig_data) # 二值化 111 | #fig_data = np.round(fig_data) # 二值化 112 | #plt.imshow(fig_data, cmap='Greys') 113 | #plt.show() 114 | 115 | #input_layer = np.reshape(fig_data, (256, 1)) 116 | conv_result = conv_2d_single_kernel(fig_data[0:15, 0:15], kernel, (2, 2)) 117 | #print(conv_result[1:7,1:7]) 118 | 119 | conv_result[conv_result < 0] = 0 120 | # conv_result[conv_result > 1] = 1 121 | 122 | relu_output = np.reshape(conv_result, (49, 1)) 123 | # input_layer = torch.Tensor(input_layer) 124 | 125 | hidden_layer1_result = dot(hidden_layer1_linear_trans, relu_output) 126 | hidden_layer1_output = tanh(hidden_layer1_result) 127 | 128 | hidden_layer2_result = dot(hidden_layer2_linear_trans, hidden_layer1_output) 129 | hidden_layer2_output = tanh(hidden_layer2_result) 130 | 131 | output_layer_result = dot(output_layer_linear_trans, hidden_layer2_output) 132 | output_layer_tanh = tanh(output_layer_result) 133 | 134 | number = np.argmax(output_layer_tanh) 135 | print(number, int(anwser[i]), end = ' ') 136 | if number == int(anwser[i]): 137 | correct_count += 1 138 | print('T') 139 | else: 140 | print('F') 141 | #print(output_layer_tanh) 142 | result = test(model=net, figure_data=fig_data_copy, target=anwser[i]) 143 | if result: 144 | count_pytoch += 1 145 | 146 | print(f'correct from zehan"s code: {correct_count}') 147 | print(f'correct from pytorch: {count_pytoch}') 148 | -------------------------------------------------------------------------------- /fc1_weight_upper_half.schem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamoon/StochasticNet/1474ba0fb0c245fe8a7a0f101142ad3d56ef507e/fc1_weight_upper_half.schem -------------------------------------------------------------------------------- /fc2_weight_upper_half.schem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamoon/StochasticNet/1474ba0fb0c245fe8a7a0f101142ad3d56ef507e/fc2_weight_upper_half.schem -------------------------------------------------------------------------------- /fc3_weight.schem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamoon/StochasticNet/1474ba0fb0c245fe8a7a0f101142ad3d56ef507e/fc3_weight.schem -------------------------------------------------------------------------------- /fnn_comparison.py: -------------------------------------------------------------------------------- 1 | import re 2 | from matplotlib import pyplot as plt 3 | import numpy as np 4 | from numpy import dot, tanh 5 | import torch 6 | import torch.nn as nn 7 | from torch.autograd import Variable 8 | 9 | hidden_layer1_linear_trans = np.genfromtxt("hidden1_weight.txt") 10 | hidden_layer2_linear_trans = np.genfromtxt("hidden2_weight.txt") 11 | output_layer_linear_trans = np.genfromtxt("out_weight.txt") 12 | anwser = np.genfromtxt("value_list.txt") 13 | 14 | def test(model, figure_data, target): 15 | """ 16 | a function used to test the accuracy of neural network. 17 | 18 | Args: 19 | model (class): a neural network 20 | test_data (iterator): test datas 21 | epoch_num (int): repeat number of datasets used to train 22 | writer (tensorboard): write log file for data visition 23 | decimal_number (int): the significant number of weights 24 | """ 25 | correct = 0 26 | with torch.no_grad(): 27 | t_data = torch.Tensor(figure_data) 28 | target = torch.as_tensor(target) 29 | t_data = t_data.view(t_data.size(1), -1) 30 | t_data_binary = np.ceil(t_data.numpy()) 31 | t_data = torch.from_numpy(t_data_binary) 32 | t_data, target = Variable(t_data), Variable(target) 33 | output = model(t_data) 34 | pred = output.max(1, keepdim=True)[1] 35 | correct += pred.eq(target.view_as(pred)).sum().item() 36 | if correct > 0: 37 | print('right') 38 | return True 39 | else: 40 | print('wrong') 41 | return False 42 | 43 | class Net(nn.Module): 44 | 45 | def __init__(self, n_feature, n_hidden1, n_hidden2, n_output): 46 | super(Net, self).__init__() 47 | self.hidden1 = nn.Linear(n_feature, n_hidden1, bias=False) 48 | self.hidden2 = nn.Linear(n_hidden1, n_hidden2, bias=False) 49 | self.out = nn.Linear(n_hidden2, n_output, bias=False) 50 | 51 | # connect inputs and outputs 52 | def forward(self, x): 53 | # print(f'input: {x}') 54 | x = torch.tanh(self.hidden1(x)) 55 | # print(f'hidden1 output: {x}') 56 | x = torch.tanh(self.hidden2(x)) 57 | x = torch.tanh(self.out(x)) 58 | return x 59 | 60 | 61 | size_inputs = 16*16 62 | size_hidden1 = 32 63 | size_hidden2 = 32 64 | size_outputs = 10 65 | 66 | device = "cuda" if torch.cuda.is_available() else "cpu" 67 | device = 'cpu' # in MNIST recognition 'GPu' is slower than 'cpu' 68 | print(f"Using {device} device") 69 | 70 | # create a network sample 71 | net = Net(n_feature=size_inputs, n_hidden1=size_hidden1, n_hidden2=size_hidden2, n_output=size_outputs).to(device) 72 | net = torch.load(f'weight_data\epoch_20') 73 | # hidden_layer1_linear_trans = net.state_dict()['hidden1.weight'].numpy() 74 | # hidden_layer2_linear_trans = net.state_dict()['hidden2.weight'].numpy() 75 | # output_layer_linear_trans = net.state_dict()['out.weight'].numpy() 76 | correct_count = 0 77 | count_pytoch = 0 78 | 79 | for i in range(1000): 80 | 81 | fig_data = np.genfromtxt("data_figures/fig{}.txt".format(i)) 82 | #fig_data = (fig_data > 0.5) # 二值化 83 | #plt.imshow(fig_data, cmap='Greys') 84 | #plt.show() 85 | 86 | input_layer = np.ceil(np.reshape(fig_data, (256, 1))) 87 | # input_layer = torch.Tensor(input_layer) 88 | 89 | hidden_layer1_result = dot(hidden_layer1_linear_trans, input_layer) 90 | hidden_layer1_output = tanh(hidden_layer1_result) 91 | # print(f'output from non-pytorch: {hidden_layer1_output}') 92 | 93 | hidden_layer2_result = dot(hidden_layer2_linear_trans, hidden_layer1_output) 94 | hidden_layer2_output = tanh(hidden_layer2_result) 95 | 96 | output_layer_result = dot(output_layer_linear_trans, hidden_layer2_output) 97 | output_layer_tanh = tanh(output_layer_result) 98 | # print(f'input from non-pytorch: {input_layer.T}') 99 | # print(f'output from non-pytorch: {hidden_layer1_output.T}') 100 | 101 | number = np.argmax(output_layer_tanh) 102 | print(number, int(anwser[i]), end = ' ') 103 | if number == int(anwser[i]): 104 | correct_count += 1 105 | print('T') 106 | else: 107 | print('F') 108 | #print(output_layer_tanh) 109 | result = test(model=net, figure_data=input_layer, target=anwser[i]) 110 | if result: 111 | count_pytoch += 1 112 | 113 | print(correct_count) 114 | print(f'correct from pytorch: {count_pytoch}') 115 | -------------------------------------------------------------------------------- /im.py: -------------------------------------------------------------------------------- 1 | from matplotlib import pyplot as plt 2 | import numpy as np 3 | 4 | fig_data = np.genfromtxt("data_figures/fig{}.txt".format(0)) 5 | #fig_data = np.genfromtxt("test_written_number.txt") 6 | #print(fig_data) 7 | fig_data = (fig_data > 0.5) # 二值化 8 | plt.imshow(fig_data, cmap='Greys') 9 | plt.show() 10 | 11 | # kernel = np.genfromtxt("CNN weights/conv1_weight.txt") 12 | # K = kernel.copy() 13 | # K[kernel < 0] = 0 14 | # S = np.sum(K) 15 | # print(S) 16 | # print(kernel / S) 17 | 18 | # weight = np.genfromtxt("CNN weights/fc1_weight.txt") 19 | # plt.imshow(weight, cmap='bwr') 20 | # plt.show() 21 | 22 | # print(np.max(weight), np.min(weight)) -------------------------------------------------------------------------------- /nbtfillweight.py: -------------------------------------------------------------------------------- 1 | from json.encoder import INFINITY 2 | from nbt import nbt 3 | from numpy import Inf, empty 4 | import numpy as np 5 | 6 | stackable_object_label_list = ["minecraft:red_concrete", "minecraft:green_concrete", "minecraft:blue_concrete",\ 7 | "minecraft:cyan_concrete", "minecraft:black_concrete", "minecraft:white_concrete", "minecraft:gray_concrete", 8 | "minecraft:light_blue_concrete", "minecraft:magenta_concrete", "minecraft:purplr_concrete"] 9 | # use different kind of concrete to avoid stacking 10 | 11 | def set_value_source(container_source, numerator, denominator): 12 | numerator = int(numerator) 13 | denominator = int(denominator) 14 | container_source['Items'] = nbt.TAG_List(type=nbt.TAG_Compound) 15 | if numerator == 1 and denominator == 1: # if a/b=1, just set 1 minecart 16 | item = nbt.TAG_Compound(name = "") 17 | item.tags.append(nbt.TAG_Byte(value = 0, name = "Slot")) 18 | item.tags.append(nbt.TAG_String(value = "minecraft:minecart", name = "id")) 19 | item.tags.append(nbt.TAG_Byte(value = 1, name = "Count")) 20 | container_source['Items'].tags.append(item) 21 | return 22 | if numerator == 0: # if a/b=0, just set 1 concrete 23 | item = nbt.TAG_Compound(name = "") 24 | item.tags.append(nbt.TAG_Byte(value = 0, name = "Slot")) 25 | item.tags.append(nbt.TAG_String(value = stackable_object_label_list[1], name = "id")) 26 | item.tags.append(nbt.TAG_Byte(value = 1, name = "Count")) 27 | container_source['Items'].tags.append(item) 28 | return 29 | for i in range(min(denominator - numerator, denominator - 1)): # fill non-stackable objects 30 | item = nbt.TAG_Compound(name = "") 31 | item.tags.append(nbt.TAG_Byte(value = i, name = "Slot")) 32 | item.tags.append(nbt.TAG_String(value = stackable_object_label_list[i], name = "id")) 33 | item.tags.append(nbt.TAG_Byte(value = 1, name = "Count")) 34 | container_source['Items'].tags.append(item) 35 | for i in range(denominator - numerator, denominator - 1): # fill minecarts 36 | item = nbt.TAG_Compound(name = "") 37 | item.tags.append(nbt.TAG_Byte(value = i, name = "Slot")) 38 | item.tags.append(nbt.TAG_String(value = "minecraft:minecart", name = "id")) 39 | item.tags.append(nbt.TAG_Byte(value = 1, name = "Count")) 40 | container_source['Items'].tags.append(item) 41 | 42 | def set_value_detected(container_detected, numerator = 0, denominator = 1): 43 | container_detected['Items'] = nbt.TAG_List(type=nbt.TAG_Compound) 44 | if numerator != 0: # otherwise (i.e. a/b=0) set one concrete 45 | item = nbt.TAG_Compound(name = "") 46 | item.tags.append(nbt.TAG_Byte(value = 0, name = "Slot")) 47 | item.tags.append(nbt.TAG_String(value = "minecraft:minecart", name = "id")) 48 | item.tags.append(nbt.TAG_Byte(value = 1, name = "Count")) 49 | container_detected['Items'].tags.append(item) 50 | else: 51 | item = nbt.TAG_Compound(name = "") 52 | item.tags.append(nbt.TAG_Byte(value = 0, name = "Slot")) 53 | item.tags.append(nbt.TAG_String(value = stackable_object_label_list[0], name = "id")) 54 | item.tags.append(nbt.TAG_Byte(value = 1, name = "Count")) 55 | container_detected['Items'].tags.append(item) 56 | 57 | def equal_position(a, b): # if position is equal 58 | return a[0] == b[0] and a[1] == b[1] and a[2] == b[2] 59 | 60 | def set_weight(nbtfile, weight_num, weight_den, crystall_period, source_pos, detected_pos): 61 | for i in nbtfile['BlockEntities']: 62 | if str(i["Id"]) == "minecraft:dropper": 63 | # pos_in_lattice is the relative position with respect to the lattice original point 64 | # crystall_period is the size (or shape) of the lattice in x,y,z 65 | # index denotes which lattice to fill, index [i,j,k] fills weight[i,j,k] 66 | pos_in_lattice = \ 67 | (i["Pos"][0] % crystall_period[0], i["Pos"][1] % crystall_period[1], i["Pos"][2] % crystall_period[2]) 68 | index = \ 69 | (i["Pos"][0] // crystall_period[0], i["Pos"][1] // crystall_period[1], i["Pos"][2] // crystall_period[2]) 70 | if equal_position(pos_in_lattice, detected_pos): 71 | set_value_detected(i, weight_num[index[0], index[1], index[2]], weight_den[index[0], index[1], index[2]]) 72 | if equal_position(pos_in_lattice, source_pos): 73 | set_value_source(i, weight_num[index[0], index[1], index[2]], weight_den[index[0], index[1], index[2]]) 74 | 75 | def nearest_fraction(x, N): # output the nearest a/b to x, b <= N, also output error = |a/b-x| 76 | err = INFINITY 77 | a, b = 0, 0 78 | for j in range(1, N + 1): 79 | i = round(x * j) 80 | current_err = abs(i / j - x) 81 | if current_err < err: 82 | a, b = i, j 83 | err = current_err 84 | return a, b, err 85 | 86 | def bipolar_to_frequency(x): # x in [-1,1] to [0,1], cut off x<-1 and x>1 87 | if x < -1: 88 | x = -1 89 | if x > 1: 90 | x = 1 91 | return (x + 1) / 2 92 | 93 | def check_empty_dropper(filename, pos = (0,0,0)): 94 | def add_position(a, b): 95 | return (a[0]+b[0], a[1]+b[1], a[2]+b[2]) 96 | nbtfile = nbt.NBTFile(filename,'rb') 97 | for i in nbtfile['BlockEntities']: 98 | if str(i["Id"]) == "minecraft:dropper": 99 | if len(i["Items"]) == 0: 100 | new_pos = add_position(i["Pos"], pos) 101 | print('/tp', new_pos[0], '~', new_pos[2]) 102 | 103 | def check_anomalous_dropper(filename, pos = (0,0,0), layer=''): 104 | crystall_period = (0, 0, 0) 105 | detected = (0, 0, 0) 106 | if layer == 'fc1': 107 | crystall_period = (8, 2, 5) 108 | detected = (6, 0, 3) 109 | elif layer == 'fc2': 110 | crystall_period = (5, 2, 8) 111 | detected = (1, 1, 5) 112 | elif layer == 'fc3': 113 | crystall_period = (8, 2, 5) 114 | detected = (6, 0, 2) 115 | else: 116 | print("input correct layer") 117 | return 118 | 119 | def add_position(a, b): 120 | return (a[0]+b[0], a[1]+b[1], a[2]+b[2]) 121 | nbtfile = nbt.NBTFile(filename,'rb') 122 | get_at_least_one_detected = False 123 | for i in nbtfile['BlockEntities']: 124 | if str(i["Id"]) == "minecraft:dropper": 125 | pos_in_lattice = \ 126 | (i["Pos"][0] % crystall_period[0], i["Pos"][1] % crystall_period[1], i["Pos"][2] % crystall_period[2]) 127 | if not get_at_least_one_detected and equal_position(pos_in_lattice, detected): 128 | get_at_least_one_detected = True 129 | if equal_position(pos_in_lattice, detected) and len(i["Items"]) > 1: 130 | new_pos = add_position(i["Pos"], pos) 131 | print('/tp', new_pos[0], '~', new_pos[2]) 132 | if not get_at_least_one_detected: 133 | print("the position might be wrong") 134 | 135 | def fill_fc1_weight(): 136 | weight = np.genfromtxt("CNN weights/fc1_weight.txt")[:,1:] 137 | weight = weight * 0.5 # factor 0.5 138 | 139 | shape = (15, 3, 16) # Half of the matrix 30*48 to shape (15, 3, 16) in x,y,z, y being the height 140 | weight = np.reshape(weight[:15], shape) # upper half 141 | # weight = np.reshape(weight[15:], shape) # lower half 142 | weight = np.flip(weight, axis=2) # since our setting has reversed direction w.r.t. z-axis 143 | weight = np.flip(weight, axis=1) # since our setting has reversed direction w.r.t. y-axis 144 | device_shape = shape # integral range [-1,1] 145 | weight_num = np.ones(device_shape) # weight-numerator 146 | weight_den = np.zeros(device_shape) # weight-denominator 147 | for i in range(shape[0]): 148 | for j in range(shape[1]): 149 | for k in range(shape[2]): 150 | a, b, err = nearest_fraction(bipolar_to_frequency(weight[i,j,k]), 10) 151 | # print(a, b) 152 | weight_num[i,j,k] = a 153 | weight_den[i,j,k] = b 154 | weight[i,j,k] = a / b 155 | print(weight[14,::-1,::-1]) 156 | 157 | nbtfile = nbt.NBTFile("fc1_weight_upper_half.schem",'rb') 158 | crystall_period = (8, 2, 5) 159 | detected = (6, 0, 3) # this two can be determined by print the positions of some droppers, using the code below 160 | source = (7, 0, 3) 161 | set_weight(nbtfile, weight_num, weight_den, crystall_period, source, detected) 162 | nbtfile.write_file("fc1_weight_upper_half_fill.schem") 163 | # nbtfile.write_file("fc1_weight_lower_half_fill.schem") 164 | 165 | def fill_fc2_weight(): 166 | weight = np.genfromtxt("CNN weights/fc2_weight.txt") 167 | weight = weight * 0.5 # factor 0.5 168 | 169 | shape = (15, 2, 15) # Half of the matrix 30*30 to shape (15, 3, 15) 170 | new_shape = (15, 2, 15) # after swapping x, z 171 | weight = np.reshape(weight[:15], shape) # upper half 172 | # weight = np.reshape(weight[15:], shape) # lower half 173 | # print(weight[0]) 174 | weight = np.swapaxes(weight, 0, 2) # swap x, z 175 | weight = np.flip(weight, axis=2) # since our setting has reversed direction w.r.t. z-axis 176 | weight = np.flip(weight, axis=1) # since our setting has reversed direction w.r.t. y-axis 177 | device_shape = new_shape # integral range [-1,1] 178 | weight_num = np.ones(device_shape) # weight-numerator 179 | weight_den = np.zeros(device_shape) # weight-denominator 180 | for i in range(new_shape[0]): 181 | for j in range(new_shape[1]): 182 | for k in range(new_shape[2]): 183 | a, b, err = nearest_fraction(bipolar_to_frequency(weight[i,j,k]), 10) 184 | # print(a, b) 185 | weight_num[i,j,k] = a 186 | weight_den[i,j,k] = b 187 | weight[i,j,k] = a / b 188 | print(weight[:,::-1,-1].transpose()) 189 | 190 | nbtfile = nbt.NBTFile("fc2_weight_upper_half.schem",'rb') 191 | crystall_period = (5, 2, 8) 192 | detected = (1, 1, 5) # this two can be determined by print the positions of some droppers, using the code below 193 | source = (1, 1, 6) 194 | 195 | set_weight(nbtfile, weight_num, weight_den, crystall_period, source, detected) 196 | nbtfile.write_file("fc2_weight_upper_half_fill.schem") 197 | # nbtfile.write_file("fc2_weight_lower_half_fill.schem") 198 | 199 | def fill_fc3_weight(): 200 | weight = np.genfromtxt("CNN weights/fc3_weight.txt") 201 | weight = weight * 0.5 # factor 0.5 202 | 203 | shape = (10, 2, 15) # 10*30 to shape (15, 3, 15) 204 | new_shape = shape 205 | weight = np.reshape(weight, shape) 206 | # print(weight[0]) 207 | weight = np.flip(weight, axis=2) # since our setting has reversed direction w.r.t. z-axis 208 | weight = np.flip(weight, axis=1) # since our setting has reversed direction w.r.t. y-axis 209 | weight = np.flip(weight, axis=0) # since our setting has reversed direction w.r.t. x-axis 210 | device_shape = new_shape # integral range [-1,1] 211 | weight_num = np.ones(device_shape) # weight-numerator 212 | weight_den = np.zeros(device_shape) # weight-denominator 213 | for i in range(new_shape[0]): 214 | for j in range(new_shape[1]): 215 | for k in range(new_shape[2]): 216 | a, b, err = nearest_fraction(bipolar_to_frequency(weight[i,j,k]), 10) 217 | # print(a, b) 218 | weight_num[i,j,k] = a 219 | weight_den[i,j,k] = b 220 | weight[i,j,k] = a / b 221 | print(weight[-1,::-1,::-1]) 222 | 223 | nbtfile = nbt.NBTFile("fc3_weight.schem",'rb') 224 | crystall_period = (8, 2, 5) 225 | detected = (6, 0, 2) # this two can be determined by print the positions of some droppers, using the code below 226 | source = (7, 0, 2) 227 | # for i in range(0, 1000): 228 | # if str(nbtfile['BlockEntities'][i]['Id']) == "minecraft:dropper": 229 | # print(nbtfile['BlockEntities'][i]['Pos']) 230 | set_weight(nbtfile, weight_num, weight_den, crystall_period, source, detected) 231 | nbtfile.write_file("fc3_weight_fill.schem") 232 | 233 | def test_fill_fc1_input(): # fill input of 1st layer 234 | input = np.genfromtxt("CNN weights/fc1_input.txt")[1:] 235 | test_weight = np.genfromtxt("CNN weights/fc1_weight.txt")[13,1:] * 0.5 236 | 237 | input[input > 1] = 1 238 | input[input < -1] = -1 239 | test_weight[test_weight > 1] = 1 240 | test_weight[test_weight < -1] = -1 241 | #print(np.dot(input, test_weight), np.tanh(np.dot(input, test_weight))) 242 | 243 | shape = (1, 3, 16) 244 | input = np.resize(input, shape) 245 | test_weight = np.resize(test_weight, shape) 246 | test_weight = np.flip(test_weight, axis=2) 247 | test_weight = np.flip(test_weight, axis=1) 248 | input = np.flip(input, axis=2) 249 | input = np.flip(input, axis=1) 250 | input_num = np.ones(shape) 251 | input_den = np.zeros(shape) 252 | test_weight_num = np.zeros(shape) 253 | test_weight_den = np.zeros(shape) 254 | print(np.round(input*16)) 255 | for j in range(shape[1]): 256 | for k in range(shape[2]): 257 | a, b, err = nearest_fraction(bipolar_to_frequency(input[0,j,k]), 10) 258 | # print(a, b, bipolar_to_frequency(input[0,j,k])) 259 | input_num[0,j,k] = a 260 | input_den[0,j,k] = b 261 | input[0,j,k] = a / b# * 2 - 1 262 | a, b, err = nearest_fraction(bipolar_to_frequency(test_weight[0,j,k]), 10) 263 | test_weight_num[0,j,k] = a 264 | test_weight_den[0,j,k] = b 265 | test_weight[0,j,k] = a / b# * 2 - 1 266 | #print(np.sum(input * test_weight)) 267 | print(test_weight) 268 | 269 | nbtfile = nbt.NBTFile("test_fc1_input_empty.schem",'rb') 270 | crystall_period = (8, 2, 5) 271 | detected = (0, 0, 0) 272 | source = (1, 0, 0) 273 | set_weight(nbtfile, input_num, input_den, crystall_period, source, detected) 274 | nbtfile.write_file("test_fc1_input_fill.schem") 275 | 276 | nbtfile2 = nbt.NBTFile("test_fc1_weight_empty.schem",'rb') 277 | crystall_period = (8, 2, 5) 278 | detected = (6, 0, 3) 279 | source = (7, 0, 3) 280 | set_weight(nbtfile2, test_weight_num, test_weight_den, crystall_period, source, detected) 281 | nbtfile2.write_file("test_fc1_weight_fill.schem") 282 | 283 | def test_fill_fc2_input(): # fill input of 2nd layer 284 | input = np.genfromtxt("CNN weights/fc2_input.txt") 285 | test_weight = np.genfromtxt("CNN weights/fc2_weight.txt")[12] * 0.5 286 | 287 | input[input > 1] = 1 288 | input[input < -1] = -1 289 | test_weight[test_weight > 1] = 1 290 | test_weight[test_weight < -1] = -1 291 | #print(np.dot(input, test_weight), np.tanh(np.dot(input, test_weight))) 292 | 293 | shape = (1, 2, 15) 294 | new_shape = (15, 2, 1) # after swapping x, z 295 | input = np.resize(input, shape) 296 | test_weight = np.resize(test_weight, shape) 297 | test_weight = np.swapaxes(test_weight, 0, 2) # swap x, z 298 | test_weight = np.flip(test_weight, axis=2) 299 | test_weight = np.flip(test_weight, axis=1) 300 | input = np.swapaxes(input, 0, 2) # swap x, z 301 | input = np.flip(input, axis=2) 302 | input = np.flip(input, axis=1) 303 | input_num = np.ones(new_shape) 304 | input_den = np.zeros(new_shape) 305 | test_weight_num = np.zeros(new_shape) 306 | test_weight_den = np.zeros(new_shape) 307 | #print(np.round(input*16)) 308 | for i in range(new_shape[0]): 309 | for j in range(new_shape[1]): 310 | a, b, err = nearest_fraction(bipolar_to_frequency(input[i,j,0]), 10) 311 | # print(a, b, bipolar_to_frequency(input[0,j,k])) 312 | input_num[i,j,0] = a 313 | input_den[i,j,0] = b 314 | input[i,j,0] = a / b 315 | a, b, err = nearest_fraction(bipolar_to_frequency(test_weight[i,j,0]), 10) 316 | test_weight_num[i,j,0] = a 317 | test_weight_den[i,j,0] = b 318 | test_weight[i,j,0] = a / b 319 | print(np.tanh(np.sum((input*2-1) * (test_weight*2-1)))) 320 | print(input[:,::-1,-1].transpose()) 321 | print(test_weight[:,::-1,-1].transpose()) 322 | 323 | nbtfile = nbt.NBTFile("test_fc2_input_empty.schem",'rb') 324 | crystall_period = (5, 2, 8) 325 | detected = (0, 0, 0) 326 | source = (0, 0, 1) 327 | # for i in range(0, 200): 328 | # if str(nbtfile['BlockEntities'][i]['Id']) == "minecraft:dropper": 329 | # print(nbtfile['BlockEntities'][i]['Pos']) 330 | set_weight(nbtfile, input_num, input_den, crystall_period, source, detected) 331 | nbtfile.write_file("test_fc2_input_fill.schem") 332 | ''' 333 | nbtfile2 = nbt.NBTFile("test_fc2_weight_empty.schem",'rb') 334 | crystall_period = (8, 2, 5) 335 | detected = (6, 0, 3) 336 | source = (7, 0, 3) 337 | set_weight(nbtfile2, test_weight_num, test_weight_den, crystall_period, source, detected) 338 | nbtfile2.write_file("test_fc2_weight_fill.schem") 339 | ''' 340 | 341 | def test_fill_fc3_input(): # fill input of 3rd layer 342 | input = np.genfromtxt("CNN weights/fc3_input.txt") 343 | test_weight = np.genfromtxt("CNN weights/fc3_weight.txt")[3] * 0.5 344 | 345 | input[input > 1] = 1 346 | input[input < -1] = -1 347 | test_weight[test_weight > 1] = 1 348 | test_weight[test_weight < -1] = -1 349 | #print(np.dot(input, test_weight), np.tanh(np.dot(input, test_weight))) 350 | 351 | shape = (1, 2, 15) 352 | input = np.resize(input, shape) 353 | test_weight = np.resize(test_weight, shape) 354 | test_weight = np.flip(test_weight, axis=2) 355 | test_weight = np.flip(test_weight, axis=1) 356 | input = np.flip(input, axis=2) 357 | input = np.flip(input, axis=1) 358 | input_num = np.ones(shape) 359 | input_den = np.zeros(shape) 360 | test_weight_num = np.zeros(shape) 361 | test_weight_den = np.zeros(shape) 362 | print(np.sum(input*test_weight)) 363 | for j in range(shape[1]): 364 | for k in range(shape[2]): 365 | a, b, err = nearest_fraction(bipolar_to_frequency(input[0,j,k]), 10) 366 | # print(a, b, bipolar_to_frequency(input[0,j,k])) 367 | input_num[0,j,k] = a 368 | input_den[0,j,k] = b 369 | input[0,j,k] = a / b# * 2 - 1 370 | a, b, err = nearest_fraction(bipolar_to_frequency(test_weight[0,j,k]), 10) 371 | test_weight_num[0,j,k] = a 372 | test_weight_den[0,j,k] = b 373 | test_weight[0,j,k] = a / b# * 2 - 1 374 | print(np.tanh(np.sum((input*2-1) * (test_weight*2-1)))) 375 | print(input[:,::-1,::-1]) 376 | print(test_weight[:,::-1,::-1]) 377 | 378 | nbtfile = nbt.NBTFile("test_fc3_input_empty.schem",'rb') 379 | crystall_period = (8, 2, 5) 380 | detected = (0, 0, 0) 381 | source = (1, 0, 0) 382 | set_weight(nbtfile, input_num, input_den, crystall_period, source, detected) 383 | nbtfile.write_file("test_fc3_input_fill.schem") 384 | ''' 385 | nbtfile2 = nbt.NBTFile("test_fc1_weight_empty.schem",'rb') 386 | crystall_period = (8, 2, 5) 387 | detected = (6, 0, 3) 388 | source = (7, 0, 3) 389 | set_weight(nbtfile2, test_weight_num, test_weight_den, crystall_period, source, detected) 390 | nbtfile2.write_file("test_fc1_weight_fill.schem") 391 | ''' 392 | 393 | # when writing the functions, pay attention to the actual orientation of the device 394 | def test_fill_fc3_weight_double(): # fill 3rd layer weight, testing, [-2,2] range 395 | weight = np.genfromtxt("CNN weights/fc3_weight.txt") 396 | shape = (10, 2, 15) # Matrix 10*30 to shape (10, 2, 15) in x,y,z, y being the height 397 | weight = np.reshape(weight, shape) 398 | device_shape = (10, 4, 15) # 4 = 2*2, 2 is the integral range [-2,2], see the 1st stage report 399 | weight_num = np.ones(device_shape) # weight-numerator 400 | weight_den = np.zeros(device_shape) # weight-denominator 401 | for i in range(shape[0]): 402 | for j in range(shape[1]): 403 | for k in range(shape[2]): 404 | # divided by 2 since we are using [-2,2] range 405 | a, b, err = nearest_fraction(bipolar_to_frequency(weight[i,j,k] / 2), 10) 406 | weight_num[i,j * 2,k] = a 407 | weight_num[i,j * 2 + 1,k] = a 408 | weight_den[i,j * 2,k] = b 409 | weight_den[i,j * 2 + 1,k] = b 410 | 411 | nbtfile = nbt.NBTFile("test_weight_empty.schem",'rb') 412 | crystall_period = (8, 2, 5) 413 | detected = (6, 1, 3) # this two can be determined by print the positions of some droppers, using the code below 414 | source = (7, 1, 3) 415 | # for i in range(1200, 1300): 416 | # if str(nbtfile['BlockEntities'][i]['Id']) == "minecraft:dropper": 417 | # print(nbtfile['BlockEntities'][i]['Pos']) 418 | #set_weight(nbtfile, weight_num, weight_den, crystall_period, source, detected) 419 | #nbtfile.write_file("test_weight_fill.schem") 420 | 421 | def test_fill_fc3_input_double(): # fill 3rd layer input, testing, [-2,2] range 422 | input = np.genfromtxt("CNN weights/fc3_input.txt") 423 | shape = (1, 2, 15) 424 | input = np.resize(input, shape) 425 | input_num = np.ones(shape) 426 | input_den = np.zeros(shape) 427 | for j in range(shape[1]): 428 | for k in range(shape[2]): 429 | a, b, err = nearest_fraction(bipolar_to_frequency(input[0,j,k]), 10) 430 | # print(a, b, bipolar_to_frequency(input[0,j,k])) 431 | input_num[0,j,k] = a 432 | input_den[0,j,k] = b 433 | nbtfile = nbt.NBTFile("test_input_empty.schem",'rb') 434 | crystall_period = (8, 4, 5) 435 | detected = (0, 0, 0) 436 | source = (1, 0, 0) 437 | # for i in range(0, 20): 438 | # if str(nbtfile['BlockEntities'][i]['Id']) == "minecraft:dropper": 439 | # print(nbtfile['BlockEntities'][i]['Pos']) 440 | set_weight(nbtfile, input_num, input_den, crystall_period, source, detected) 441 | nbtfile.write_file("test_input_fill.schem") 442 | 443 | if __name__ == "__main__": 444 | #test_fill_fc2_input() 445 | #fill_fc2_weight() 446 | 447 | # check_empty_dropper("fc1_weight_upper_half_check.schem") 448 | # check_empty_dropper("test_fc1_input_check.schem") 449 | 450 | check_empty_dropper("test_dropper_empty.schem", pos=(-244,0,-415)) 451 | check_anomalous_dropper("test_dropper_empty.schem", pos=(-244,0,-415), layer='fc3') 452 | -------------------------------------------------------------------------------- /netpy.py: -------------------------------------------------------------------------------- 1 | from turtle import forward 2 | from matplotlib import pyplot as plt 3 | import numpy as np 4 | from numpy import dot, tanh 5 | 6 | kernel = np.genfromtxt("CNN weights/conv_kernel_16.txt") / 16 7 | #kernel = np.genfromtxt("CNN weights/conv1_weight.txt") 8 | hidden_layer1_linear_trans = np.genfromtxt("CNN weights/fc1_weight.txt") 9 | hidden_layer2_linear_trans = np.genfromtxt("CNN weights/fc2_weight.txt") 10 | output_layer_linear_trans = np.genfromtxt("CNN weights/fc3_weight.txt") 11 | # hidden_layer1_linear_trans = torch.Tensor(np.load("weight_data/hidden1.npy")) 12 | # hidden_layer2_linear_trans = torch.Tensor(np.load("weight_data/hidden2.npy")) 13 | # output_layer_linear_trans = torch.Tensor(np.load("weight_data/out_weight.npy")) 14 | 15 | #hidden_layer1_linear_trans = np.round(hidden_layer1_linear_trans * 33) / 33 16 | anwser = np.genfromtxt("DNN weights/value_list.txt") 17 | 18 | correct_count = 0 19 | wrong_count = 0 20 | 21 | def conv_2d_single_kernel(input_data, kernel, stride): 22 | """单个卷积核进行卷积,得到单个输出。 23 | 由于是学习卷积实现原理这里简单处理,padding 是自动补全, 24 | 相当于tf 里面的 "SAME"。 25 | Args: 26 | input_data: 卷积层输入,是一个 shape 为 [h, w] 27 | 的 np.array。 28 | kernel: 卷积核大小,形式如 [k_h, k_w] 29 | stride: stride, list [s_h, s_w]。 30 | Return: 31 | out: 卷积结果 32 | """ 33 | h, w = input_data.shape 34 | kernel_h, kernel_w = kernel.shape 35 | 36 | stride_h, stride_w = stride 37 | 38 | out = np.zeros((h//stride_h, w//stride_w)) 39 | for idx_h, i in enumerate(range(0, h-kernel_h+1, stride_h)): 40 | for idx_w, j in enumerate(range(0, w-kernel_w+1, stride_w)): 41 | window = input_data[i:i+kernel_h, j:j+kernel_w] 42 | out[idx_h, idx_w] = np.sum(window*kernel) 43 | return out 44 | 45 | for i in range(10000): 46 | #if int(anwser[i]) != 9: 47 | # continue 48 | 49 | fig_data = np.genfromtxt("data_figures/fig{}.txt".format(i)) 50 | #fig_data = (fig_data > 0.5) # 二值化 51 | fig_data = np.ceil(fig_data) # 二值化 52 | #fig_data = np.round(fig_data) # 二值化 53 | #plt.imshow(fig_data, cmap='Greys') 54 | #plt.show() 55 | 56 | #input_layer = np.reshape(fig_data, (256, 1)) 57 | conv_result = conv_2d_single_kernel(fig_data[0:15, 0:15], kernel, (2, 2)) 58 | #print(conv_result[1:7,1:7]) 59 | conv_result[conv_result < 0] = 0 60 | conv_result[conv_result > 1] = 1 61 | relu_output = np.reshape(conv_result, (49, 1)) 62 | # input_layer = torch.Tensor(input_layer) 63 | 64 | hidden_layer1_result = dot(hidden_layer1_linear_trans, relu_output) 65 | hidden_layer1_output = tanh(hidden_layer1_result) 66 | 67 | hidden_layer2_result = dot(hidden_layer2_linear_trans, hidden_layer1_output) 68 | hidden_layer2_output = tanh(hidden_layer2_result) 69 | 70 | output_layer_result = dot(output_layer_linear_trans, hidden_layer2_output) 71 | output_layer_tanh = tanh(output_layer_result) 72 | 73 | number = np.argmax(output_layer_tanh) 74 | print(number, int(anwser[i]), end = ' ') 75 | if number == int(anwser[i]): 76 | correct_count += 1 77 | print('T') 78 | else: 79 | wrong_count += 1 80 | print('F') 81 | #print(output_layer_tanh) 82 | 83 | print(correct_count, wrong_count) -------------------------------------------------------------------------------- /network.cpp: -------------------------------------------------------------------------------- 1 | #include"bitstring.h" 2 | #include"neuron.h" 3 | #include 4 | #include 5 | #include 6 | using namespace std; 7 | 8 | const int bitstring_size = 1024; 9 | double input_layer_array[256]; 10 | 11 | class identifier 12 | { 13 | public: 14 | identifier() 15 | :input_layer_array{}, 16 | input_layer(input_layer_array), 17 | hidden_layer1(input_layer, "hidden1_weight.txt"), 18 | hidden_layer2(hidden_layer1, "hidden2_weight.txt"), 19 | output_layer(hidden_layer2, "out_weight.txt") 20 | {} 21 | stochastic_computing_neuron_layer input_layer; 22 | stochastic_computing_neuron_layer hidden_layer1; 23 | stochastic_computing_neuron_layer hidden_layer2; 24 | stochastic_computing_neuron_layer output_layer; 25 | double input_layer_array[256]; 26 | int identify(const char* filename) 27 | { 28 | input_array(input_layer_array, filename, 256); 29 | to_zero_or_one(input_layer_array, 256); 30 | input_layer.update(input_layer_array); 31 | hidden_layer1.update(input_layer); 32 | hidden_layer2.update(hidden_layer1); 33 | output_layer.update(hidden_layer2); 34 | return output_layer.max_index(); 35 | } 36 | }; 37 | 38 | int identify(const char* filename) 39 | { 40 | input_array(input_layer_array, filename, 256); 41 | //to_zero_or_one(input_layer_array, 256); 42 | stochastic_computing_neuron_layer input_layer(input_layer_array); 43 | stochastic_computing_neuron_layer hidden_layer1(input_layer, "hidden1_weight.txt"); 44 | stochastic_computing_neuron_layer hidden_layer2(hidden_layer1, "hidden2_weight.txt"); 45 | stochastic_computing_neuron_layer output_layer(hidden_layer2, "out_weight.txt"); 46 | return output_layer.max_index(); 47 | } 48 | 49 | int answer[60000]; 50 | int main() 51 | { 52 | ifstream fin("value_list.txt"); 53 | for (int i = 0; i < 100; i++) { 54 | double x; 55 | fin >> x; 56 | answer[i] = static_cast(x); 57 | } 58 | 59 | identifier id; 60 | char filename[100]; 61 | char fmt[100] = "data_figures/fig%d.txt"; 62 | int correct_count = 0; 63 | for (int i = 0; i < 100; i++) { 64 | sprintf(filename, fmt, i); 65 | int number = id.identify(filename); 66 | correct_count += (number == answer[i]); 67 | cout << number << " " << answer[i] << " " << ((number == answer[i]) ? 'T' : 'F') << "\n"; 68 | } 69 | cout << correct_count << endl; 70 | return 0; 71 | } -------------------------------------------------------------------------------- /neuron.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include"bitstring.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | using namespace std; 9 | //ratio class 10 | pair,double> to_the_nearest_ratio(double x, int R, int n) 11 | { 12 | if (x == 0) return {{0, 1}, 0.0}; 13 | if (x == 1) return {{1, 1}, 0.0}; 14 | pair result{0, 1}; 15 | double error = INFINITY; 16 | for (int i = 1; i <= R; i++) { 17 | if (n <= 1) { 18 | double a = round(i * x); // a / i ≈ x 19 | if (a > R) a = R; 20 | if (a < 0) a = 0; 21 | double new_error = abs(a / i - x); 22 | if (new_error < error) { 23 | result = {a, i}; 24 | error = new_error; 25 | } 26 | } 27 | else { 28 | auto test = to_the_nearest_ratio(x, R, n - 1); 29 | if (test.second < error) { 30 | result = test.first; 31 | error = test.second; 32 | } 33 | } 34 | } 35 | return {result, error}; 36 | } 37 | 38 | template 39 | auto input_weight(const char* filename) 40 | { 41 | integral_bitstring (*Result)[prev_size] = new integral_bitstring[Size][prev_size]; 42 | ifstream fin(filename); 43 | for (int i = 0; i < Size; i++) { 44 | for (int j = 0; j < prev_size; j++) { 45 | double x; 46 | fin >> x; 47 | Result[i][j] = integral_bitstring(x); 48 | } 49 | } 50 | fin.close(); 51 | return Result; 52 | } 53 | 54 | template 55 | auto input_weight_precision(const char* filename, double precision, double factor = 1.0) 56 | { 57 | integral_bitstring (*Result)[prev_size] = new integral_bitstring[Size][prev_size]; 58 | ifstream fin(filename); 59 | for (int i = 0; i < Size; i++) { 60 | for (int j = 0; j < prev_size; j++) { 61 | double x; 62 | fin >> x; 63 | x = round(x / precision) * precision * factor; 64 | Result[i][j] = integral_bitstring(x); 65 | } 66 | } 67 | fin.close(); 68 | return Result; 69 | } 70 | 71 | pair nearest_fractor(double x, int N) 72 | { 73 | double err = INFINITY; 74 | int a = 0, b = 0; 75 | for (int j = 1; j <= N; j++) { 76 | int i = round(x * j); 77 | double current_err = abs(i * 1.0 / j - x); 78 | if (current_err < err) { 79 | a = i; 80 | b = j; 81 | err = current_err; 82 | } 83 | } 84 | return make_pair(a, b); 85 | } 86 | 87 | template 88 | auto input_weight_fraction(const char* filename, int denominator_N, double factor = 1.0) 89 | { 90 | integral_bitstring (*Result)[prev_size] = new integral_bitstring[Size][prev_size]; 91 | ifstream fin(filename); 92 | for (int i = 0; i < Size; i++) { 93 | for (int j = 0; j < prev_size; j++) { 94 | double x; 95 | fin >> x; 96 | auto nearest_fractor_frequency = nearest_fractor((x * factor / M + 1) / 2, denominator_N); 97 | x = nearest_fractor_frequency.first * 1.0 / nearest_fractor_frequency.second * 2 - 1; 98 | x = x * M; 99 | Result[i][j] = integral_bitstring(x); 100 | } 101 | } 102 | fin.close(); 103 | return Result; 104 | } 105 | 106 | void input_array(double input[], const char* filename, int N) 107 | { 108 | ifstream fin(filename); 109 | for (int i = 0; i < N; i++) fin >> input[i]; 110 | fin.close(); 111 | } 112 | 113 | void input_array(double input[], const char* filename, int N, double precision, double factor) 114 | { 115 | ifstream fin(filename); 116 | for (int i = 0; i < N; i++) { 117 | double x; 118 | fin >> x; 119 | x = round(x / precision) * precision * factor; 120 | input[i] = x; 121 | } 122 | fin.close(); 123 | } 124 | 125 | template 126 | void input_array_2d(double input[][M], const char* filename, int N, double precision = 0.01, double factor = 1.0) 127 | { 128 | ifstream fin(filename); 129 | for (int i = 0; i < N; i++) { 130 | for (int j = 0; j < M; j++) { 131 | double x; 132 | fin >> x; 133 | x = round(x / precision) * precision * factor; 134 | // if (x > 1) x = 1; 135 | input[i][j] = x; 136 | } 137 | } 138 | fin.close(); 139 | } 140 | 141 | void to_zero_or_one(double input[], int N) 142 | { 143 | for (int i = 0; i < N; i++) { 144 | if (input[i] > 0.5) input[i] = 1; 145 | else input[i] = 0; 146 | } 147 | } 148 | 149 | template 150 | void to_zero_or_one_2d(double input[][M], int N, double cond=0.5) 151 | { 152 | for (int i = 0; i < N; i++) { 153 | for (int j = 0; j < M; j++) { 154 | if (input[i][j] > cond) input[i][j] = 1.0; 155 | else input[i][j] = 0.0; 156 | } 157 | } 158 | } 159 | 160 | template 161 | class stochastic_computing_neuron_layer 162 | { 163 | public: 164 | template 165 | stochastic_computing_neuron_layer(const stochastic_computing_neuron_layer& previous_layer, const char* filename) 166 | :LinearTrans(input_weight(filename)) 167 | { 168 | for (int i = 0; i < Size; i++) { 169 | integral_bitstring output = sum_of_array_with_weigth(LinearTrans[i], previous_layer.Output); 170 | Output[i] = bitstring(output.NStanh(2)); 171 | } 172 | previous_size = prev_size; 173 | } 174 | template 175 | stochastic_computing_neuron_layer(const stochastic_computing_neuron_layer& previous_layer, const char* filename, int tanh_N, double precision, double factor) 176 | :LinearTrans(input_weight_precision(filename, precision, factor)) 177 | { 178 | for (int i = 0; i < Size; i++) { 179 | integral_bitstring output = sum_of_array_with_weigth(LinearTrans[i], previous_layer.Output); 180 | Output[i] = bitstring(output.NStanh(tanh_N)); 181 | } 182 | previous_size = prev_size; 183 | } 184 | template 185 | stochastic_computing_neuron_layer(const stochastic_computing_neuron_layer& previous_layer, const char* filename, int tanh_N, int denominator_N, double factor, int bound) 186 | :LinearTrans(input_weight_fraction(filename, denominator_N, factor)) 187 | { 188 | for (int i = 0; i < Size; i++) { 189 | integral_bitstring output = sum_of_array_with_weigth(LinearTrans[i], previous_layer.Output); 190 | Output[i] = bitstring(output.NStanh_bound(tanh_N, bound)); 191 | } 192 | previous_size = prev_size; 193 | } 194 | stochastic_computing_neuron_layer(double const_output[]) 195 | { 196 | for (int i = 0; i < Size; i++) Output[i] = bitstring(const_output[i]); 197 | } 198 | ~stochastic_computing_neuron_layer() { delete LinearTrans; } 199 | void output_value() const 200 | { 201 | for (int i = 0; i < Size; i++) cout << Output[i].value() << " "; 202 | cout << "\n"; 203 | } 204 | void output_weight() const 205 | { 206 | cout << "Wei\n"; 207 | for (int i = 0; i < Size; i++) { 208 | for (int j = 0; j < previous_size; j++) cout << LinearTrans[j][i].value() << " "; 209 | cout << endl; 210 | } 211 | } 212 | int max_index() const 213 | { 214 | return max_element(begin(Output), end(Output), 215 | [](const bitstring& a, const bitstring& b) 216 | { 217 | return a.value() < b.value(); 218 | } 219 | ) - begin(Output); 220 | } 221 | template 222 | void update(const stochastic_computing_neuron_layer& previous_layer) 223 | { 224 | for (int i = 0; i < Size; i++) { 225 | integral_bitstring output = sum_of_array_with_weigth(LinearTrans[i], previous_layer.Output); 226 | Output[i] = bitstring(output.NStanh(2)); 227 | } 228 | } 229 | void update(double const_output[]) 230 | { 231 | for (int i = 0; i < Size; i++) Output[i] = bitstring(const_output[i]); 232 | } 233 | int previous_size; 234 | integral_bitstring (*LinearTrans)[prev_size]; // LinearTrans[previous_size][self_size] 235 | bitstring Output[Size]; 236 | }; 237 | -------------------------------------------------------------------------------- /test_white_board.schem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/leamoon/StochasticNet/1474ba0fb0c245fe8a7a0f101142ad3d56ef507e/test_white_board.schem -------------------------------------------------------------------------------- /weight_analysis_cnn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import matplotlib.pyplot as plt 4 | from Cnn import* 5 | import torch 6 | from torchvision import datasets, transforms 7 | from torch.utils import data 8 | import shutil 9 | from torch.utils.tensorboard import SummaryWriter 10 | 11 | def test(model, test_data, epoch_num, writer, decimal_number): 12 | """ 13 | a function used to test the accuracy of neural network. 14 | 15 | Args: 16 | model (class): a neural network 17 | test_data (iterator): test datas 18 | epoch_num (int): repeat number of datasets used to train 19 | writer (tensorboard): write log file for data visition 20 | decimal_number (int): the significant number of weights 21 | """ 22 | correct = 0 23 | with torch.no_grad(): 24 | for t_data, target in test_data: 25 | t_data_binary = np.ceil(t_data.numpy()) 26 | t_data = torch.from_numpy(t_data_binary) 27 | t_data, target = Variable(t_data), Variable(target) 28 | output = model(t_data) 29 | pred = output.max(1, keepdim=True)[1] 30 | correct += pred.eq(target.view_as(pred)).sum().item() 31 | 32 | print("\nTest: Epoch:{} Accuracy: {}/{} ({:.2f}%) \n".format(epoch_num, correct, len(test_data.dataset), 33 | 100. * correct / len(test_data.dataset))) 34 | # record data in tensorboard log 35 | # writer.add_scalar(f'Accuracy_{decimal_number}', 100. * correct / len(test_data.dataset), epoch_num) 36 | writer.add_scalar(f'Accuracy_{epoch_num}', 100. * correct / len(test_data.dataset), decimal_number) 37 | 38 | if __name__ == '__main__': 39 | # load the weight data and save them as .txt file. 40 | net = Net() 41 | net = torch.load(f'weight_data_cnn\epoch_11') 42 | new_weights = net.state_dict()['conv1.weight'].numpy() 43 | print(new_weights) 44 | np.savetxt('conv1_weight.txt', np.array(new_weights[0][0])) 45 | new_weights = net.state_dict()['fc1.weight'].numpy() 46 | np.savetxt('fc1_weight.txt', new_weights) 47 | new_weights = net.state_dict()['fc2.weight'].numpy() 48 | np.savetxt('fc2_weight.txt', new_weights) 49 | new_weights = net.state_dict()['fc3.weight'].numpy() 50 | np.savetxt('fc3_weight.txt', new_weights) 51 | 52 | # check for weight datas 53 | file_list = ['fc1_max_list.txt', 'fc1_min_list.txt', 'fc2_max_list.txt', 'fc2_min_list.txt', 54 | 'fc3_min_list.txt', 'fc3_max_list.txt', 'conv1_min_list.txt', 'conv1_max_list.txt'] 55 | plt.figure() 56 | for file_path in file_list: 57 | if os.path.exists(file_path): 58 | datas = np.loadtxt(file_path) 59 | plt.plot(datas, label=f'{file_path}') 60 | plt.xlabel('Epoch', size=20) 61 | plt.ylabel('Value', size=20) 62 | plt.title('Weight', size=16) 63 | 64 | # indication line for Figs 65 | line_2 = [3]*len(datas) 66 | line_minus_2 = [-3]*len(datas) 67 | plt.plot(line_2, c='black', linestyle='--') 68 | plt.plot(line_minus_2, c='black', linestyle='--') 69 | plt.legend() 70 | plt.show() 71 | 72 | train_transformer = transforms.Compose([ 73 | transforms.Resize(15), # down sampling 74 | transforms.ToTensor() 75 | ]) 76 | 77 | # data loading 78 | train_loader = data.DataLoader( 79 | datasets.MNIST('data', train=True, download=True, transform=train_transformer), 80 | batch_size=BATCH_SIZE, shuffle=True) 81 | 82 | test_loader = data.DataLoader( 83 | datasets.MNIST('data', train=False, download=True, transform=train_transformer), 84 | batch_size=BATCH_SIZE, shuffle=True) 85 | 86 | # create the Neural Network 87 | net = Net() 88 | 89 | # data visition 90 | tensorlog_path = 'FullConnect_Mnist' 91 | if os.path.exists(tensorlog_path): 92 | shutil.rmtree(tensorlog_path) 93 | writer = SummaryWriter(tensorlog_path) 94 | epoch_number_list = np.linspace(20, 40, 21, dtype=int) 95 | decimal_number_list = np.linspace(0, 16, 17, dtype=int) 96 | 97 | # for decimal_value in decimal_number_list: 98 | for epoch_value in epoch_number_list: 99 | for decimal_value in decimal_number_list: 100 | net = torch.load(f'weight_data\epoch_{epoch_value}') 101 | new_weights = net.state_dict()['out.weight'].numpy() 102 | new_weights = torch.from_numpy(np.round(new_weights, decimal_value)) 103 | net.state_dict()['out.weight'].copy_(new_weights) 104 | new_weights = net.state_dict()['hidden1.weight'].numpy() 105 | new_weights = torch.from_numpy(np.round(new_weights, decimal_value)) 106 | net.state_dict()['hidden1.weight'].copy_(new_weights) 107 | new_weights = net.state_dict()['hidden2.weight'].numpy() 108 | new_weights = torch.from_numpy(np.round(new_weights, decimal_value)) 109 | net.state_dict()['hidden2.weight'].copy_(new_weights) 110 | test(model=net, test_data=test_loader, epoch_num=epoch_value, writer=writer, decimal_number=decimal_value) 111 | -------------------------------------------------------------------------------- /weigtht_analysis_fnn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import matplotlib.pyplot as plt 4 | from Test_FullCon import* 5 | import torch 6 | from torchvision import datasets, transforms 7 | from torch.utils import data 8 | import shutil 9 | from torch.utils.tensorboard import SummaryWriter 10 | 11 | def test(model, test_data, epoch_num, writer, decimal_number): 12 | """ 13 | a function used to test the accuracy of neural network. 14 | 15 | Args: 16 | model (class): a neural network 17 | test_data (iterator): test datas 18 | epoch_num (int): repeat number of datasets used to train 19 | writer (tensorboard): write log file for data visition 20 | decimal_number (int): the significant number of weights 21 | """ 22 | correct = 0 23 | with torch.no_grad(): 24 | for t_data, target in test_data: 25 | t_data = t_data.view(t_data.size(0), -1) 26 | t_data_binary = np.ceil(t_data.numpy()) 27 | t_data = torch.from_numpy(t_data_binary) 28 | t_data, target = Variable(t_data), Variable(target) 29 | output = model(t_data) 30 | pred = output.max(1, keepdim=True)[1] 31 | correct += pred.eq(target.view_as(pred)).sum().item() 32 | 33 | print("\nTest: Epoch:{} Accuracy: {}/{} ({:.2f}%) \n".format(epoch_num, correct, len(test_data.dataset), 34 | 100. * correct / len(test_data.dataset))) 35 | # record data in tensorboard log 36 | # writer.add_scalar(f'Accuracy_{decimal_number}', 100. * correct / len(test_data.dataset), epoch_num) 37 | writer.add_scalar(f'Accuracy_{epoch_num}', 100. * correct / len(test_data.dataset), decimal_number) 38 | 39 | if __name__ == '__main__': 40 | # load the weight data and save them as .txt file. 41 | net = Net(n_feature=size_inputs, n_hidden1=size_hidden1, n_hidden2=size_hidden2, n_output=size_outputs) 42 | net = torch.load(f'weight_data\epoch_20') 43 | new_weights = net.state_dict()['out.weight'].numpy() 44 | np.savetxt('out_weight.txt', new_weights) 45 | new_weights = net.state_dict()['hidden1.weight'].numpy() 46 | np.savetxt('hidden1_weight.txt', new_weights) 47 | new_weights = net.state_dict()['hidden2.weight'].numpy() 48 | np.savetxt('hidden2_weight.txt', new_weights) 49 | 50 | # check for weight datas 51 | file_list = ['out_max_list.npy', 'out_min_list.npy', 'hid1_max_list.npy', 'hid2_max_list.npy', 52 | 'hid1_min_list.npy', 'hid2_min_list.npy'] 53 | plt.figure() 54 | for file_path in file_list: 55 | if os.path.exists(file_path): 56 | datas = np.load(file_path) 57 | plt.plot(datas, label=f'{file_path}') 58 | plt.xlabel('Epoch', size=20) 59 | plt.ylabel('Value', size=20) 60 | plt.title('Weight', size=16) 61 | 62 | # indication line for Figs 63 | line_2 = [3]*len(datas) 64 | line_minus_2 = [-3]*len(datas) 65 | plt.plot(line_2, c='black', linestyle='--') 66 | plt.plot(line_minus_2, c='black', linestyle='--') 67 | plt.legend() 68 | plt.show() 69 | 70 | train_transformer = transforms.Compose([ 71 | transforms.Resize(16), # down sampling 72 | transforms.ToTensor() 73 | ]) 74 | 75 | # data loading 76 | train_loader = data.DataLoader( 77 | datasets.MNIST('data', train=True, download=True, transform=train_transformer), 78 | batch_size=BATCH_SIZE, shuffle=True) 79 | 80 | test_loader = data.DataLoader( 81 | datasets.MNIST('data', train=False, download=True, transform=train_transformer), 82 | batch_size=BATCH_SIZE, shuffle=True) 83 | 84 | # create the Neural Network 85 | net = Net(n_feature=size_inputs, n_hidden1=size_hidden1, n_hidden2=size_hidden2, n_output=size_outputs) 86 | 87 | # data visition 88 | tensorlog_path = 'FullConnect_Mnist' 89 | if os.path.exists(tensorlog_path): 90 | shutil.rmtree(tensorlog_path) 91 | writer = SummaryWriter(tensorlog_path) 92 | epoch_number_list = np.linspace(20, 40, 21, dtype=int) 93 | decimal_number_list = np.linspace(0, 16, 17, dtype=int) 94 | 95 | # for decimal_value in decimal_number_list: 96 | for epoch_value in epoch_number_list: 97 | for decimal_value in decimal_number_list: 98 | net = torch.load(f'weight_data\epoch_{epoch_value}') 99 | new_weights = net.state_dict()['out.weight'].numpy() 100 | new_weights = torch.from_numpy(np.round(new_weights, decimal_value)) 101 | net.state_dict()['out.weight'].copy_(new_weights) 102 | new_weights = net.state_dict()['hidden1.weight'].numpy() 103 | new_weights = torch.from_numpy(np.round(new_weights, decimal_value)) 104 | net.state_dict()['hidden1.weight'].copy_(new_weights) 105 | new_weights = net.state_dict()['hidden2.weight'].numpy() 106 | new_weights = torch.from_numpy(np.round(new_weights, decimal_value)) 107 | net.state_dict()['hidden2.weight'].copy_(new_weights) 108 | test(model=net, test_data=test_loader, epoch_num=epoch_value, writer=writer, decimal_number=decimal_value) 109 | -------------------------------------------------------------------------------- /write_number.py: -------------------------------------------------------------------------------- 1 | from json.encoder import INFINITY 2 | from nbt import nbt 3 | from numpy import Inf, empty 4 | import numpy as np 5 | import os 6 | from matplotlib import pyplot as plt 7 | 8 | def get_number(i): 9 | return np.genfromtxt("codes/data_figures/fig{}.txt".format(i)) > 0.5 # 二值化 10 | 11 | def plot(i): 12 | plt.imshow(get_number(i), cmap='Greys') 13 | plt.show() 14 | 15 | def write_to_file(i): 16 | number_file = get_number(i) 17 | plot(i) 18 | nbtfile = nbt.NBTFile("nbt/test_white_board.schem",'rb') 19 | nbtfile["Palette"].tags.append(nbt.TAG_Int(value=2, name="minecraft:redstone_block")) 20 | nbtfile["PaletteMax"] = nbt.TAG_Int(value=3, name="PaletteMax") 21 | for i in range(15): 22 | for j in range(15): 23 | if number_file[i][j] > 0.5: 24 | k = (14 - i) * 15 * 4 + (14 - j) * 2 + 30 + 1 25 | print(k) 26 | nbtfile["BlockData"][k] = 2 27 | # for i in len(nbtfile['BlockData']): 28 | # if str(nbtfile['BlockData'][i]) == str(nbtfile["Palette"]["minecraft:black_concrete"]): 29 | # pass 30 | print(nbtfile["BlockData"]) 31 | # print(nbtfile.pretty_tree()) 32 | nbtfile.write_file("nbt/test_number_written.schem") 33 | 34 | if __name__ == "__main__": 35 | os.chdir(os.path.pardir) 36 | write_to_file(15) --------------------------------------------------------------------------------