├── README.md ├── conv.py ├── conv_net_3d ├── conv.py ├── input_data.py └── read.py ├── createfiles.py └── input_data.py /README.md: -------------------------------------------------------------------------------- 1 | # QuantumMachineLearning 2 | -------------------------------------------------------------------------------- /conv.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import input_data 3 | import sys 4 | 5 | L=200 6 | lx=4 #=int(raw_input('lx')) 7 | V4d=lx*lx*lx*L # 4d volume 8 | 9 | training=10000 #=int(raw_input('training')) 10 | bsize=400 #=int(raw_input('bsize')) 11 | 12 | # how does the data look like 13 | Ntemp=104 #int(raw_input('Ntemp')) #20 # number of different temperatures used in the simulation 14 | samples_per_T=80 #int(raw_input('samples_per_T')) #250 # number of samples per temperature value 15 | samples_per_T_test=20 # int(raw_input('samples_per_T')) #250 # number of samples per temperature value 16 | 17 | 18 | numberlabels=2 19 | mnist = input_data.read_data_sets(numberlabels,lx,'txt', one_hot=True) 20 | 21 | print "reading sets ok" 22 | 23 | #sys.exit("pare aqui") 24 | 25 | # defining weighs and initlizatinon 26 | def weight_variable(shape): 27 | initial = tf.truncated_normal(shape, stddev=0.1) 28 | return tf.Variable(initial) 29 | 30 | def bias_variable(shape): 31 | initial = tf.constant(0.1, shape=shape) 32 | return tf.Variable(initial) 33 | 34 | # defining the convolutional and max pool layers 35 | def conv2d(x, W): 36 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 37 | 38 | def max_pool_2x2(x): 39 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], 40 | strides=[1, 2, 2, 1], padding='SAME') 41 | 42 | 43 | # defining the model 44 | 45 | x = tf.placeholder("float", shape=[None, V4d]) 46 | y_ = tf.placeholder("float", shape=[None, numberlabels]) 47 | 48 | #first fully connected layer 49 | nlayer1=400 50 | W_1 = weight_variable([V4d, nlayer1]) 51 | b_1 = bias_variable([nlayer1]) 52 | 53 | 54 | h_1 = tf.nn.relu(tf.matmul(x, W_1) + b_1) 55 | 56 | # Dropout: To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling. 57 | 58 | keep_prob = tf.placeholder("float") 59 | h_1_drop = tf.nn.dropout(h_1, keep_prob) 60 | 61 | nlayer2=400 62 | W_2 = weight_variable([nlayer1,nlayer2]) 63 | b_2 = bias_variable([nlayer2]) 64 | 65 | 66 | h_2 = tf.nn.relu(tf.matmul(h_1_drop, W_2) + b_2) 67 | 68 | # Dropout: To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling. 69 | 70 | #keep_prob = tf.placeholder("float") 71 | h_2_drop = tf.nn.dropout(h_2, keep_prob) 72 | 73 | 74 | nlayer3=100 75 | W_3 = weight_variable([nlayer2,nlayer3]) 76 | b_3 = bias_variable([nlayer3]) 77 | 78 | h_3 = tf.nn.relu(tf.matmul(h_2_drop, W_3) + b_3) 79 | 80 | # Dropout: To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling. 81 | 82 | #keep_prob = tf.placeholder("float") 83 | h_3_drop = tf.nn.dropout(h_3, keep_prob) 84 | 85 | 86 | # readout layer. Finally, we add a softmax layer, just like for the one layer softmax regression above. 87 | 88 | # weights and bias 89 | W_fc4 = weight_variable([nlayer3, numberlabels]) 90 | b_fc4 = bias_variable([numberlabels]) 91 | 92 | # apply a softmax layer 93 | y_conv=tf.nn.softmax(tf.matmul(h_3_drop, W_fc4) + b_fc4) 94 | 95 | 96 | #Train and Evaluate the Model 97 | # cost function to minimize 98 | lamb=0.00001 99 | #lamb=0.001 100 | #cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv)) 101 | cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_conv,1e-10,1.0))) +lamb*(tf.nn.l2_loss(W_1)+tf.nn.l2_loss(W_2) )+lamb*(tf.nn.l2_loss(W_fc4)+tf.nn.l2_loss(W_3) ) 102 | 103 | train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) 104 | correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) 105 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 106 | 107 | sess = tf.Session() 108 | sess.run(tf.initialize_all_variables()) 109 | 110 | for i in range(training): 111 | batch = mnist.train.next_batch(bsize) 112 | if i%100 == 0: 113 | train_accuracy = sess.run(accuracy,feed_dict={ 114 | x:batch[0], y_: batch[1], keep_prob: 1.0}) 115 | print "step %d, training accuracy %g"%(i, train_accuracy) 116 | print "test accuracy %g"%sess.run(accuracy, feed_dict={ 117 | x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}) 118 | # train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) 119 | sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) 120 | print "test accuracy %g"%sess.run(accuracy, feed_dict={ 121 | x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}) 122 | 123 | 124 | #producing data to get the plots we like 125 | 126 | f = open('nnout.dat', 'w') 127 | 128 | #output of neural net 129 | ii=0 130 | for i in range(Ntemp): 131 | av=0.0 132 | for j in range(samples_per_T_test): 133 | batch=(mnist.test.images[ii,:].reshape((1,V4d)),mnist.test.labels[ii,:].reshape((1,numberlabels))) 134 | res=sess.run(y_conv,feed_dict={x: batch[0], y_: batch[1],keep_prob: 1.0}) 135 | av=av+res 136 | #print ii, res 137 | ii=ii+1 138 | av=av/samples_per_T_test 139 | f.write(str(i)+' '+str(av[0,0])+' '+str(av[0,1])+"\n") 140 | f.close() 141 | 142 | 143 | f = open('acc.dat', 'w') 144 | 145 | # accuracy vs temperature 146 | for ii in range(Ntemp): 147 | batch=(mnist.test.images[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape(samples_per_T_test,V4d), mnist.test.labels[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape((samples_per_T_test,numberlabels)) ) 148 | train_accuracy = sess.run(accuracy,feed_dict={ 149 | x:batch[0], y_: batch[1], keep_prob: 1.0}) 150 | f.write(str(ii)+' '+str(train_accuracy)+"\n") 151 | f.close() 152 | 153 | 154 | -------------------------------------------------------------------------------- /conv_net_3d/conv.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import input_data 3 | import sys 4 | 5 | 6 | L=200 7 | lx=4 #=int(raw_input('lx')) 8 | V4d=lx*lx*lx*L # 4d volume 9 | 10 | training=5000 #=int(raw_input('training')) 11 | bsize=400 #=int(raw_input('bsize')) 12 | 13 | # how does the data look like 14 | Ntemp=41 #int(raw_input('Ntemp')) #20 # number of different temperatures used in the simulation 15 | samples_per_T=500 #int(raw_input('samples_per_T')) #250 # number of samples per temperature value 16 | samples_per_T_test=500 # int(raw_input('samples_per_T')) #250 # number of samples per temperature value 17 | 18 | 19 | numberlabels=2 20 | mnist = input_data.read_data_sets(numberlabels,lx,L,'txt', one_hot=True) 21 | 22 | 23 | 24 | print "reading sets ok" 25 | 26 | #sys.exit("pare aqui") 27 | 28 | # defining weighs and initlizatinon 29 | def weight_variable(shape): 30 | initial = tf.truncated_normal(shape, stddev=0.1) 31 | return tf.Variable(initial) 32 | 33 | def bias_variable(shape): 34 | initial = tf.constant(0.1, shape=shape) 35 | return tf.Variable(initial) 36 | 37 | # defining the convolutional and max pool layers 38 | def conv3d(x, W): 39 | return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='VALID') 40 | 41 | # defining the model 42 | 43 | x = tf.placeholder("float", shape=[None, (lx)*(lx)*(lx)*L]) # placeholder for the spin configurations 44 | #x = tf.placeholder("float", shape=[None, lx*lx*2]) #with padding and no PBC conv net 45 | y_ = tf.placeholder("float", shape=[None, numberlabels]) 46 | 47 | 48 | #first layer 49 | # convolutional layer # 2x2x2 patch size, 2 channel (2 color), 64 feature maps computed 50 | nmaps1=64 51 | spatial_filter_size=2 52 | W_conv1 = weight_variable([spatial_filter_size, spatial_filter_size, spatial_filter_size,L,nmaps1]) 53 | # bias for each of the feature maps 54 | b_conv1 = bias_variable([nmaps1]) 55 | 56 | # applying a reshape of the data to get the two dimensional structure back 57 | #x_image = tf.reshape(x, [-1,lx,lx,2]) # #with padding and no PBC conv net 58 | x_image = tf.reshape(x, [-1,lx,lx,lx,L]) # with PBC 59 | 60 | #We then convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool. 61 | 62 | h_conv1 = tf.nn.relu(conv3d(x_image, W_conv1) + b_conv1) 63 | 64 | h_pool1=h_conv1 65 | 66 | #In order to build a deep network, we stack several layers of this type. The second layer will have 8 features for each 5x5 patch. 67 | 68 | # weights and bias of the fully connected (fc) layer. Ihn this case everything looks one dimensiona because it is fully connected 69 | nmaps2=64 70 | 71 | #W_fc1 = weight_variable([(lx/2) * (lx/2) * nmaps1,nmaps2 ]) # with maxpool 72 | W_fc1 = weight_variable([(lx-1) * (lx-1)*(lx-1)*nmaps1,nmaps2 ]) # no maxpool images remain the same size after conv 73 | 74 | b_fc1 = bias_variable([nmaps2]) 75 | 76 | # first we reshape the outcome h_pool2 to a vector 77 | #h_pool1_flat = tf.reshape(h_pool1, [-1, (lx/2)*(lx/2)*nmaps1]) # with maxpool 78 | 79 | h_pool1_flat = tf.reshape(h_pool1, [-1, (lx-1)*(lx-1)*(lx-1)*nmaps1]) # no maxpool 80 | # then apply the ReLU with the fully connected weights and biases. 81 | h_fc1 = tf.nn.relu(tf.matmul(h_pool1_flat, W_fc1) + b_fc1) 82 | 83 | # Dropout: To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling. 84 | 85 | keep_prob = tf.placeholder("float") 86 | h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) 87 | 88 | # readout layer. Finally, we add a softmax layer, just like for the one layer softmax regression above. 89 | 90 | # weights and bias 91 | W_fc2 = weight_variable([nmaps2, numberlabels]) 92 | b_fc2 = bias_variable([numberlabels]) 93 | 94 | # apply a softmax layer 95 | y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) 96 | 97 | 98 | #Train and Evaluate the Model 99 | # cost function to minimize 100 | 101 | #cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv)) 102 | cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_conv,1e-10,1.0))) 103 | 104 | train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) 105 | correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) 106 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 107 | 108 | sess = tf.Session() 109 | sess.run(tf.initialize_all_variables()) 110 | 111 | for i in range(training): 112 | batch = mnist.train.next_batch(bsize) 113 | if i%100 == 0: 114 | train_accuracy = sess.run(accuracy,feed_dict={ 115 | x:batch[0], y_: batch[1], keep_prob: 1.0}) 116 | print "step %d, training accuracy %g"%(i, train_accuracy) 117 | print "test accuracy %g"%sess.run(accuracy, feed_dict={ 118 | x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}) 119 | #print "test Trick accuracy %g"%sess.run(accuracy, feed_dict={ 120 | #x: mnist.test_Trick.images, y_: mnist.test_Trick.labels, keep_prob: 1.0}) 121 | # train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) 122 | sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) 123 | print "test accuracy %g"%sess.run(accuracy, feed_dict={ 124 | x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}) 125 | 126 | 127 | 128 | saver = tf.train.Saver([W_conv1, b_conv1, W_fc1,b_fc1,W_fc2,b_fc2]) 129 | save_path = saver.save(sess, "./model.ckpt") 130 | print "Model saved in file: ", save_path 131 | 132 | #producing data to get the plots we like 133 | 134 | f = open('nnout.dat', 'w') 135 | 136 | #output of neural net 137 | ii=0 138 | for i in range(Ntemp): 139 | av=0.0 140 | for j in range(samples_per_T_test): 141 | batch=(mnist.test.images[ii,:].reshape(1,lx*lx*lx*L),mnist.test.labels[ii,:].reshape((1,numberlabels))) 142 | res=sess.run(y_conv,feed_dict={x: batch[0], y_: batch[1],keep_prob: 1.0}) 143 | av=av+res 144 | #print ii, res 145 | ii=ii+1 146 | av=av/samples_per_T_test 147 | f.write(str(i)+' '+str(av[0,0])+' '+str(av[0,1])+"\n") 148 | f.close() 149 | 150 | 151 | f = open('acc.dat', 'w') 152 | 153 | # accuracy vs temperature 154 | for ii in range(Ntemp): 155 | batch=(mnist.test.images[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape(samples_per_T_test,L*lx*lx*lx), mnist.test.labels[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape((samples_per_T_test,numberlabels)) ) 156 | train_accuracy = sess.run(accuracy,feed_dict={ 157 | x:batch[0], y_: batch[1], keep_prob: 1.0}) 158 | f.write(str(ii)+' '+str(train_accuracy)+"\n") 159 | f.close() 160 | 161 | 162 | #producing data to get the plots we like 163 | 164 | #f = open('nnoutTrick.dat', 'w') 165 | 166 | #output of neural net 167 | #ii=0 168 | #for i in range(Ntemp): 169 | # av=0.0 170 | # for j in range(samples_per_T_test): 171 | # batch=(mnist.test_Trick.images[ii,:].reshape((1,2*lx*lx)),mnist.test_Trick.labels[ii,:].reshape((1,numberlabels))) 172 | # res=sess.run(y_conv,feed_dict={x: batch[0], y_: batch[1],keep_prob: 1.0}) 173 | # av=av+res 174 | # #print ii, res 175 | # ii=ii+1 176 | # av=av/samples_per_T_test 177 | # f.write(str(i)+' '+str(av[0,0])+' '+str(av[0,1])+"\n") 178 | #f.close() 179 | 180 | 181 | #f = open('accTrick.dat', 'w') 182 | 183 | # accuracy vs temperature 184 | #for ii in range(Ntemp): 185 | # batch=(mnist.test_Trick.images[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape(samples_per_T_test,2*lx*lx), mnist.test_Trick.labels[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape((samples_per_T_test,numberlabels)) ) 186 | # train_accuracy = sess.run(accuracy,feed_dict={ 187 | # x:batch[0], y_: batch[1], keep_prob: 1.0}) 188 | # f.write(str(ii)+' '+str(train_accuracy)+"\n") 189 | #f.close() 190 | 191 | -------------------------------------------------------------------------------- /conv_net_3d/input_data.py: -------------------------------------------------------------------------------- 1 | """Functions for downloading and reading MNIST data.""" 2 | import gzip 3 | import os 4 | import urllib 5 | import numpy 6 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' 7 | 8 | 9 | def maybe_download(filename, work_directory): 10 | filepath = os.path.join(work_directory, filename) 11 | return filepath 12 | 13 | 14 | def _read32(bytestream): 15 | dt = numpy.dtype(numpy.uint32).newbyteorder('>') 16 | return numpy.frombuffer(bytestream.read(4), dtype=dt) 17 | 18 | 19 | def extract_images(filename,lx,Lt): 20 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" 21 | print 'Extracting', filename,'aaaaaa' 22 | 23 | #with gzip.open(filename) as bytestream: 24 | # magic = _read32(bytestream) 25 | # if magic != 2051: 26 | # raise ValueError( 27 | # 'Invalid magic number %d in MNIST image file: %s' % 28 | # (magic, filename)) 29 | # num_images = _read32(bytestream) 30 | # rows = _read32(bytestream) 31 | # cols = _read32(bytestream) 32 | # buf = bytestream.read(rows * cols * num_images) 33 | # data = numpy.frombuffer(buf, dtype=numpy.uint8) 34 | # data = data.reshape(num_images, rows, cols, 1) 35 | data=numpy.loadtxt(filename) 36 | dim=data.shape[0] 37 | data=data.reshape(dim,Lt,lx,lx,lx) # the two comes from the 2 site unite cell of the toric code. 38 | data=numpy.transpose(data,(0,2,3,4,1)) 39 | print data.shape 40 | return data 41 | 42 | 43 | def dense_to_one_hot(labels_dense, num_classes=10): 44 | """Convert class labels from scalars to one-hot vectors.""" 45 | num_labels = labels_dense.shape[0] 46 | index_offset = numpy.arange(num_labels) * num_classes 47 | labels_one_hot = numpy.zeros((num_labels, num_classes)) 48 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 49 | return labels_one_hot 50 | 51 | 52 | def extract_labels(nlabels,filename, one_hot=False): 53 | """Extract the labels into a 1D uint8 numpy array [index].""" 54 | print 'Extracting', filename,'bbbccicicicicib' 55 | 56 | labels=numpy.loadtxt(filename,dtype='uint8') 57 | 58 | if one_hot: 59 | print "LABELS ONE HOT" 60 | print labels.shape 61 | XXX=dense_to_one_hot(labels,nlabels) 62 | print XXX.shape 63 | return dense_to_one_hot(labels,nlabels) 64 | print "LABELS" 65 | print labels.shape 66 | return labels 67 | 68 | 69 | class DataSet(object): 70 | def __init__(self, images, labels, fake_data=False): 71 | if fake_data: 72 | self._num_examples = 10000 73 | else: 74 | assert images.shape[0] == labels.shape[0], ( 75 | "images.shape: %s labels.shape: %s" % (images.shape, 76 | labels.shape)) 77 | self._num_examples = images.shape[0] 78 | # Convert shape from [num examples, rows, columns, depth] 79 | # to [num examples, rows*columns] (assuming depth == 1) 80 | #assert images.shape[3] == 1 # the 2 comes from the toric code unit cell 81 | images = images.reshape(images.shape[0], 82 | images.shape[1]*images.shape[2]*images.shape[3]*images.shape[4] ) # 83 | # Convert from [0, 255] -> [0.0, 1.0]. 84 | images = images.astype(numpy.float32) 85 | # images = numpy.multiply(images, 1.0 / 255.0) # commented since it is ising variables 86 | images = numpy.multiply(images, 1.0 ) # multiply by one, instead 87 | self._images = images 88 | self._labels = labels 89 | self._epochs_completed = 0 90 | self._index_in_epoch = 0 91 | 92 | @property 93 | def images(self): 94 | return self._images 95 | 96 | @property 97 | def labels(self): 98 | return self._labels 99 | 100 | @property 101 | def num_examples(self): 102 | return self._num_examples 103 | 104 | @property 105 | def epochs_completed(self): 106 | return self._epochs_completed 107 | 108 | def next_batch(self, batch_size, fake_data=False): 109 | """Return the next `batch_size` examples from this data set.""" 110 | if fake_data: 111 | fake_image = [1.0 for _ in xrange(784)] 112 | fake_label = 0 113 | return [fake_image for _ in xrange(batch_size)], [ 114 | fake_label for _ in xrange(batch_size)] 115 | start = self._index_in_epoch 116 | self._index_in_epoch += batch_size 117 | if self._index_in_epoch > self._num_examples: 118 | # Finished epoch 119 | self._epochs_completed += 1 120 | # Shuffle the data 121 | perm = numpy.arange(self._num_examples) 122 | numpy.random.shuffle(perm) 123 | self._images = self._images[perm] 124 | self._labels = self._labels[perm] 125 | # Start next epoch 126 | start = 0 127 | self._index_in_epoch = batch_size 128 | assert batch_size <= self._num_examples 129 | end = self._index_in_epoch 130 | return self._images[start:end], self._labels[start:end] 131 | 132 | 133 | def read_data_sets(nlabels,lx,Lt, train_dir, fake_data=False, one_hot=False ): 134 | class DataSets(object): 135 | pass 136 | data_sets = DataSets() 137 | if fake_data: 138 | data_sets.train = DataSet([], [], fake_data=True) 139 | data_sets.validation = DataSet([], [], fake_data=True) 140 | data_sets.test = DataSet([], [], fake_data=True) 141 | return data_sets 142 | TRAIN_IMAGES = 'Xtrain.txt' 143 | TRAIN_LABELS = 'ytrain.txt' 144 | TEST_IMAGES = 'Xtest.txt' 145 | TEST_LABELS = 'ytest.txt' 146 | #TEST_IMAGES_Trick = 'XtestTrick.txt' 147 | #TEST_LABELS_Trick = 'ytestTrick.txt' 148 | VALIDATION_SIZE = 0 149 | local_file = maybe_download(TRAIN_IMAGES, train_dir) 150 | train_images = extract_images(local_file,lx,Lt) 151 | local_file = maybe_download(TRAIN_LABELS, train_dir) 152 | train_labels = extract_labels(nlabels,local_file, one_hot=one_hot) 153 | local_file = maybe_download(TEST_IMAGES, train_dir) 154 | test_images = extract_images(local_file,lx,Lt) 155 | local_file = maybe_download(TEST_LABELS, train_dir) 156 | test_labels = extract_labels(nlabels,local_file, one_hot=one_hot) 157 | 158 | #local_file = maybe_download(TEST_IMAGES_Trick, train_dir) 159 | #test_images_Trick = extract_images(local_file,lx) 160 | #local_file = maybe_download(TEST_LABELS_Trick, train_dir) 161 | #test_labels_Trick = extract_labels(nlabels,local_file, one_hot=one_hot) 162 | 163 | validation_images = train_images[:VALIDATION_SIZE] 164 | validation_labels = train_labels[:VALIDATION_SIZE] 165 | train_images = train_images[VALIDATION_SIZE:] 166 | print "bababa", train_images.shape 167 | train_labels = train_labels[VALIDATION_SIZE:] 168 | data_sets.train = DataSet(train_images, train_labels) 169 | data_sets.validation = DataSet(validation_images, validation_labels) 170 | data_sets.test = DataSet(test_images, test_labels) 171 | #data_sets.test_Trick = DataSet(test_images_Trick, test_labels_Trick) 172 | return data_sets 173 | -------------------------------------------------------------------------------- /conv_net_3d/read.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import input_data 3 | import sys 4 | 5 | 6 | L=200 7 | lx=4 #=int(raw_input('lx')) 8 | V4d=lx*lx*lx*L # 4d volume 9 | 10 | training=10000 #=int(raw_input('training')) 11 | bsize=400 #=int(raw_input('bsize')) 12 | 13 | # how does the data look like 14 | Ntemp=41 #int(raw_input('Ntemp')) #20 # number of different temperatures used in the simulation 15 | samples_per_T=100 #int(raw_input('samples_per_T')) #250 # number of samples per temperature value 16 | samples_per_T_test=100 # int(raw_input('samples_per_T')) #250 # number of samples per temperature value 17 | 18 | 19 | numberlabels=2 20 | mnist = input_data.read_data_sets(numberlabels,lx,L,'txt', one_hot=True) 21 | 22 | 23 | 24 | print "reading sets ok" 25 | 26 | #sys.exit("pare aqui") 27 | 28 | # defining weighs and initlizatinon 29 | def weight_variable(shape): 30 | initial = tf.truncated_normal(shape, stddev=0.1) 31 | return tf.Variable(initial) 32 | 33 | def bias_variable(shape): 34 | initial = tf.constant(0.1, shape=shape) 35 | return tf.Variable(initial) 36 | 37 | # defining the convolutional and max pool layers 38 | def conv3d(x, W): 39 | return tf.nn.conv3d(x, W, strides=[1, 1, 1, 1, 1], padding='VALID') 40 | 41 | # defining the model 42 | 43 | x = tf.placeholder("float", shape=[None, (lx)*(lx)*(lx)*L]) # placeholder for the spin configurations 44 | #x = tf.placeholder("float", shape=[None, lx*lx*2]) #with padding and no PBC conv net 45 | y_ = tf.placeholder("float", shape=[None, numberlabels]) 46 | 47 | 48 | #first layer 49 | # convolutional layer # 2x2x2 patch size, 2 channel (2 color), 64 feature maps computed 50 | nmaps1=64 51 | spatial_filter_size=2 52 | W_conv1 = weight_variable([spatial_filter_size, spatial_filter_size, spatial_filter_size,L,nmaps1]) 53 | # bias for each of the feature maps 54 | b_conv1 = bias_variable([nmaps1]) 55 | 56 | # applying a reshape of the data to get the two dimensional structure back 57 | #x_image = tf.reshape(x, [-1,lx,lx,2]) # #with padding and no PBC conv net 58 | x_image = tf.reshape(x, [-1,lx,lx,lx,L]) # with PBC 59 | 60 | #We then convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool. 61 | 62 | h_conv1 = tf.nn.relu(conv3d(x_image, W_conv1) + b_conv1) 63 | 64 | h_pool1=h_conv1 65 | 66 | #In order to build a deep network, we stack several layers of this type. The second layer will have 8 features for each 5x5 patch. 67 | 68 | # weights and bias of the fully connected (fc) layer. Ihn this case everything looks one dimensiona because it is fully connected 69 | nmaps2=64 70 | 71 | #W_fc1 = weight_variable([(lx/2) * (lx/2) * nmaps1,nmaps2 ]) # with maxpool 72 | W_fc1 = weight_variable([(lx-1) * (lx-1)*(lx-1)*nmaps1,nmaps2 ]) # no maxpool images remain the same size after conv 73 | 74 | b_fc1 = bias_variable([nmaps2]) 75 | 76 | # first we reshape the outcome h_pool2 to a vector 77 | #h_pool1_flat = tf.reshape(h_pool1, [-1, (lx/2)*(lx/2)*nmaps1]) # with maxpool 78 | 79 | h_pool1_flat = tf.reshape(h_pool1, [-1, (lx-1)*(lx-1)*(lx-1)*nmaps1]) # no maxpool 80 | # then apply the ReLU with the fully connected weights and biases. 81 | h_fc1 = tf.nn.relu(tf.matmul(h_pool1_flat, W_fc1) + b_fc1) 82 | 83 | # Dropout: To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling. 84 | 85 | keep_prob = tf.placeholder("float") 86 | h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) 87 | 88 | # readout layer. Finally, we add a softmax layer, just like for the one layer softmax regression above. 89 | 90 | # weights and bias 91 | W_fc2 = weight_variable([nmaps2, numberlabels]) 92 | b_fc2 = bias_variable([numberlabels]) 93 | 94 | # apply a softmax layer 95 | y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) 96 | 97 | 98 | #Train and Evaluate the Model 99 | # cost function to minimize 100 | 101 | #cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv)) 102 | cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_conv,1e-10,1.0))) 103 | 104 | train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) 105 | correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) 106 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 107 | 108 | sess = tf.Session() 109 | sess.run(tf.initialize_all_variables()) 110 | 111 | #for i in range(training): 112 | # batch = mnist.train.next_batch(bsize) 113 | # if i%100 == 0: 114 | # train_accuracy = sess.run(accuracy,feed_dict={ 115 | # x:batch[0], y_: batch[1], keep_prob: 1.0}) 116 | # print "step %d, training accuracy %g"%(i, train_accuracy) 117 | # print "test accuracy %g"%sess.run(accuracy, feed_dict={ 118 | # x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}) 119 | # #print "test Trick accuracy %g"%sess.run(accuracy, feed_dict={ 120 | # #x: mnist.test_Trick.images, y_: mnist.test_Trick.labels, keep_prob: 1.0}) 121 | ## train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) 122 | # sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) 123 | #print "test accuracy %g"%sess.run(accuracy, feed_dict={ 124 | # x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}) 125 | 126 | 127 | 128 | #saver = tf.train.Saver([W_conv1, b_conv1, W_fc1,b_fc1,W_fc2,b_fc2]) 129 | #save_path = saver.save(sess, "./model.ckpt") 130 | #print "Model saved in file: ", save_path 131 | 132 | # Add ops to save and restore all the variables. 133 | saver = tf.train.Saver([W_conv1, b_conv1, W_fc1,b_fc1,W_fc2,b_fc2]) 134 | saver.restore(sess, "./model.ckpt") 135 | print("Model restored.") 136 | 137 | #producing data to get the plots we like 138 | 139 | f = open('nnout.dat', 'w') 140 | 141 | #output of neural net 142 | ii=0 143 | for i in range(Ntemp): 144 | av=0.0 145 | for j in range(samples_per_T_test): 146 | batch=(mnist.test.images[ii,:].reshape(1,lx*lx*lx*L),mnist.test.labels[ii,:].reshape((1,numberlabels))) 147 | res=sess.run(y_conv,feed_dict={x: batch[0], y_: batch[1],keep_prob: 1.0}) 148 | av=av+res 149 | #print ii, res 150 | ii=ii+1 151 | av=av/samples_per_T_test 152 | f.write(str(i)+' '+str(av[0,0])+' '+str(av[0,1])+"\n") 153 | f.close() 154 | 155 | 156 | f = open('acc.dat', 'w') 157 | 158 | # accuracy vs temperature 159 | for ii in range(Ntemp): 160 | batch=(mnist.test.images[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape(samples_per_T_test,L*lx*lx*lx), mnist.test.labels[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape((samples_per_T_test,numberlabels)) ) 161 | train_accuracy = sess.run(accuracy,feed_dict={ 162 | x:batch[0], y_: batch[1], keep_prob: 1.0}) 163 | f.write(str(ii)+' '+str(train_accuracy)+"\n") 164 | f.close() 165 | 166 | #producing data to get the plots we like 167 | 168 | #f = open('nnoutTrick.dat', 'w') 169 | 170 | #output of neural net 171 | #ii=0 172 | #for i in range(Ntemp): 173 | # av=0.0 174 | # for j in range(samples_per_T_test): 175 | # batch=(mnist.test_Trick.images[ii,:].reshape((1,2*lx*lx)),mnist.test_Trick.labels[ii,:].reshape((1,numberlabels))) 176 | # res=sess.run(y_conv,feed_dict={x: batch[0], y_: batch[1],keep_prob: 1.0}) 177 | # av=av+res 178 | # #print ii, res 179 | # ii=ii+1 180 | # av=av/samples_per_T_test 181 | # f.write(str(i)+' '+str(av[0,0])+' '+str(av[0,1])+"\n") 182 | #f.close() 183 | 184 | 185 | #f = open('accTrick.dat', 'w') 186 | 187 | # accuracy vs temperature 188 | #for ii in range(Ntemp): 189 | # batch=(mnist.test_Trick.images[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape(samples_per_T_test,2*lx*lx), mnist.test_Trick.labels[ii*samples_per_T_test:ii*samples_per_T_test+samples_per_T_test,:].reshape((samples_per_T_test,numberlabels)) ) 190 | # train_accuracy = sess.run(accuracy,feed_dict={ 191 | # x:batch[0], y_: batch[1], keep_prob: 1.0}) 192 | # f.write(str(ii)+' '+str(train_accuracy)+"\n") 193 | #f.close() 194 | 195 | -------------------------------------------------------------------------------- /createfiles.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | muv=["0.00500", "0.00545", "0.00590", "0.00635", "0.00680", "0.00725", "0.00770", "0.00815", "0.00860", "0.00905", "0.00950", "0.00995", "0.01040", "0.01085", "0.01130", "0.01175", "0.01220", "0.01265", "0.01310", "0.01355", "0.01400", "0.01445", "0.01490", "0.01535", "0.01580", "0.01625", "0.01670", "0.01715", "0.01760", "0.01805", "0.01850", "0.01895", "0.01940", "0.01985", "0.02030", "0.02075", "0.02120", "0.02165", "0.02210", "0.02255", "0.02300", "0.02345", "0.02390", "0.02435", "0.02480", "0.02525", "0.02570", "0.02615", "0.02660", "0.02705", "0.02750", "0.02795", "0.02840", "0.02885", "0.02930", "0.02975", "0.03020", "0.03065", "0.03110", "0.03155", "0.03200", "0.03245", "0.03290", "0.03335", "0.03380", "0.03425", "0.03470", "0.03515", "0.03560", "0.03605", "0.03650", "0.03695", "0.03740", "0.03785", "0.03830", "0.03875", "0.03920", "0.03965", "0.04010", "0.04055", "0.04100", "0.04145", "0.04190", "0.04235", "0.04280", "0.04325", "0.04370", "0.04415", "0.04460", "0.04505", "0.04550", "0.04595", "0.04640", "0.04685", "0.04730", "0.04775", "0.04820", "0.04865", "0.04910", "0.04955", "0.05000", "0.05045", "0.05090", "0.05135"] 5 | 6 | muv=["0.00500", "0.00545", "0.00590", "0.00635", "0.00680", "0.00725", "0.00770", "0.00815", "0.00860", "0.00905", "0.00950", "0.00995", "0.01040", "0.01085", "0.01130", "0.01175", "0.01220", "0.01265", "0.01310", "0.01355", "0.01400", "0.01445", "0.01490", "0.01535", "0.01580", "0.01625", "0.01670", "0.01715", "0.01760", "0.01805", "0.01850", "0.01895", "0.01940", "0.01985", "0.02030", "0.02075", "0.02120", "0.02165", "0.02210", "0.02255", "0.02300", "0.02345" ] 7 | 8 | 9 | L=200 10 | lx=4 11 | V=lx*lx*lx 12 | 13 | V4d=V*L 14 | 15 | testpercentage=20 16 | 17 | #xx=np.genfromtxt('N4x4x4_L200_U9_Mu0_dtau0.00500.HSF.stream',dtype=np.int,delimiter=1, usecols =tuple(range(V4d))) 18 | 19 | Tc=0.35 20 | 21 | ytrain=np.asarray([],dtype=np.int8) 22 | ytest=np.asarray([],dtype=np.int8) 23 | 24 | 25 | k=0 26 | o=0 27 | do=0 28 | for i in muv: 29 | 30 | fname='N4x4x4_L200_U9_Mu0_dtau'+i+'.HSF.stream' 31 | T=1.0/(L*float(i)) 32 | xx=np.genfromtxt(fname,dtype=np.uint8,delimiter=1, usecols =tuple(range(V4d))) 33 | print fname, xx.shape, T 34 | 35 | ntest=int(xx.shape[0]*testpercentage/100) 36 | ntrain=xx.shape[0]-ntest 37 | 38 | train=xx[0:ntrain,:] 39 | test=xx[ntrain:,:] 40 | if k==0: 41 | Xtrain=np.copy(train) 42 | Xtest=np.copy(test) 43 | k+=1 44 | else: 45 | Xtrain=np.append(Xtrain,train, axis=0) 46 | Xtest=np.append(Xtest,test,axis=0) 47 | 48 | 49 | if T') 16 | return numpy.frombuffer(bytestream.read(4), dtype=dt) 17 | 18 | 19 | def extract_images(filename,lx): 20 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" 21 | print 'Extracting', filename,'aaaaaa' 22 | 23 | #with gzip.open(filename) as bytestream: 24 | # magic = _read32(bytestream) 25 | # if magic != 2051: 26 | # raise ValueError( 27 | # 'Invalid magic number %d in MNIST image file: %s' % 28 | # (magic, filename)) 29 | # num_images = _read32(bytestream) 30 | # rows = _read32(bytestream) 31 | # cols = _read32(bytestream) 32 | # buf = bytestream.read(rows * cols * num_images) 33 | # data = numpy.frombuffer(buf, dtype=numpy.uint8) 34 | # data = data.reshape(num_images, rows, cols, 1) 35 | data=numpy.loadtxt(filename) 36 | dim=data.shape[0] 37 | #data=data.reshape(dim,lx,lx,2) # the two comes from the 2 site unite cell of the toric code. 38 | print data.shape 39 | return data 40 | 41 | 42 | def dense_to_one_hot(labels_dense, num_classes=10): 43 | """Convert class labels from scalars to one-hot vectors.""" 44 | num_labels = labels_dense.shape[0] 45 | index_offset = numpy.arange(num_labels) * num_classes 46 | labels_one_hot = numpy.zeros((num_labels, num_classes)) 47 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 48 | return labels_one_hot 49 | 50 | 51 | def extract_labels(nlabels,filename, one_hot=False): 52 | """Extract the labels into a 1D uint8 numpy array [index].""" 53 | print 'Extracting', filename,'bbbccicicicicib' 54 | 55 | labels=numpy.loadtxt(filename,dtype='uint8') 56 | 57 | if one_hot: 58 | print "LABELS ONE HOT" 59 | print labels.shape 60 | XXX=dense_to_one_hot(labels,nlabels) 61 | print XXX.shape 62 | return dense_to_one_hot(labels,nlabels) 63 | print "LABELS" 64 | print labels.shape 65 | return labels 66 | 67 | 68 | class DataSet(object): 69 | def __init__(self, images, labels, fake_data=False): 70 | if fake_data: 71 | self._num_examples = 10000 72 | else: 73 | assert images.shape[0] == labels.shape[0], ( 74 | "images.shape: %s labels.shape: %s" % (images.shape, 75 | labels.shape)) 76 | self._num_examples = images.shape[0] 77 | # Convert shape from [num examples, rows, columns, depth] 78 | # to [num examples, rows*columns] (assuming depth == 1) 79 | #assert images.shape[3] == 1 # the 2 comes from the toric code unit cell 80 | images = images.reshape(images.shape[0], 81 | images.shape[1] ) #the 2 comes from the toric code unit cell 82 | # Convert from [0, 255] -> [0.0, 1.0]. 83 | images = images.astype(numpy.float32) 84 | # images = numpy.multiply(images, 1.0 / 255.0) # commented since it is ising variables 85 | images = numpy.multiply(images, 1.0 ) # multiply by one, instead 86 | self._images = images 87 | self._labels = labels 88 | self._epochs_completed = 0 89 | self._index_in_epoch = 0 90 | 91 | @property 92 | def images(self): 93 | return self._images 94 | 95 | @property 96 | def labels(self): 97 | return self._labels 98 | 99 | @property 100 | def num_examples(self): 101 | return self._num_examples 102 | 103 | @property 104 | def epochs_completed(self): 105 | return self._epochs_completed 106 | 107 | def next_batch(self, batch_size, fake_data=False): 108 | """Return the next `batch_size` examples from this data set.""" 109 | if fake_data: 110 | fake_image = [1.0 for _ in xrange(784)] 111 | fake_label = 0 112 | return [fake_image for _ in xrange(batch_size)], [ 113 | fake_label for _ in xrange(batch_size)] 114 | start = self._index_in_epoch 115 | self._index_in_epoch += batch_size 116 | if self._index_in_epoch > self._num_examples: 117 | # Finished epoch 118 | self._epochs_completed += 1 119 | # Shuffle the data 120 | perm = numpy.arange(self._num_examples) 121 | numpy.random.shuffle(perm) 122 | self._images = self._images[perm] 123 | self._labels = self._labels[perm] 124 | # Start next epoch 125 | start = 0 126 | self._index_in_epoch = batch_size 127 | assert batch_size <= self._num_examples 128 | end = self._index_in_epoch 129 | return self._images[start:end], self._labels[start:end] 130 | 131 | 132 | def read_data_sets(nlabels,lx, train_dir, fake_data=False, one_hot=False ): 133 | class DataSets(object): 134 | pass 135 | data_sets = DataSets() 136 | if fake_data: 137 | data_sets.train = DataSet([], [], fake_data=True) 138 | data_sets.validation = DataSet([], [], fake_data=True) 139 | data_sets.test = DataSet([], [], fake_data=True) 140 | return data_sets 141 | TRAIN_IMAGES = 'Xtrain.txt' 142 | TRAIN_LABELS = 'ytrain.txt' 143 | TEST_IMAGES = 'Xtest.txt' 144 | TEST_LABELS = 'ytest.txt' 145 | #TEST_IMAGES_Trick = 'XtestTrick.txt' 146 | #TEST_LABELS_Trick = 'ytestTrick.txt' 147 | VALIDATION_SIZE = 0 148 | local_file = maybe_download(TRAIN_IMAGES, train_dir) 149 | train_images = extract_images(local_file,lx) 150 | local_file = maybe_download(TRAIN_LABELS, train_dir) 151 | train_labels = extract_labels(nlabels,local_file, one_hot=one_hot) 152 | local_file = maybe_download(TEST_IMAGES, train_dir) 153 | test_images = extract_images(local_file,lx) 154 | local_file = maybe_download(TEST_LABELS, train_dir) 155 | test_labels = extract_labels(nlabels,local_file, one_hot=one_hot) 156 | 157 | #local_file = maybe_download(TEST_IMAGES_Trick, train_dir) 158 | #test_images_Trick = extract_images(local_file,lx) 159 | #local_file = maybe_download(TEST_LABELS_Trick, train_dir) 160 | #test_labels_Trick = extract_labels(nlabels,local_file, one_hot=one_hot) 161 | 162 | validation_images = train_images[:VALIDATION_SIZE] 163 | validation_labels = train_labels[:VALIDATION_SIZE] 164 | train_images = train_images[VALIDATION_SIZE:] 165 | train_labels = train_labels[VALIDATION_SIZE:] 166 | data_sets.train = DataSet(train_images, train_labels) 167 | data_sets.validation = DataSet(validation_images, validation_labels) 168 | data_sets.test = DataSet(test_images, test_labels) 169 | #data_sets.test_Trick = DataSet(test_images_Trick, test_labels_Trick) 170 | return data_sets 171 | --------------------------------------------------------------------------------