├── DeepMimo.py └── README.md /DeepMimo.py: -------------------------------------------------------------------------------- 1 | import math 2 | import tensorflow as tf 3 | import pylab 4 | import numpy as np 5 | import os 6 | import random 7 | import numpy as np 8 | import tensorflow as tf 9 | from tensorflow.python.util import nest 10 | from tensorflow.python.ops import tensor_array_ops 11 | from tensorflow.python.framework import tensor_shape 12 | from tensorflow.python.framework import ops 13 | from tensorflow.python.framework import tensor_util 14 | from tensorflow.python.ops import control_flow_ops 15 | 16 | DataSet_x=[] 17 | DataSet_F1=[] 18 | DataSet_F2=[] 19 | NrSamples = 100000 20 | NrSamples_ToTest = 10000 21 | SNR_dB_range=np.arange(0,22,2) 22 | BER=np.zeros(SNR_dB_range.shape,dtype=np.int) 23 | Sigma2 = 1.0 24 | SNR_min_dB = 0.0 25 | SNR_max_dB = 20.0 26 | K = 2 27 | N = 2*K 28 | L = 3*K 29 | batch_size=1000 30 | number_of_batches=NrSamples/batch_size 31 | c=0 32 | kk=0 33 | l=1 34 | for runIdx in range(0,NrSamples): 35 | # generate a random operating SNR 36 | SNR_dB = SNR_min_dB + (SNR_max_dB - SNR_min_dB)*np.random.rand() 37 | SNR = 10.0**(SNR_dB/10.0) 38 | H = ((0.5*float(SNR)/float(K))**(1/2.0))*np.random.randn(N,K) 39 | x = 2*np.round(np.random.rand(K,1))-1; 40 | h_prime=np.transpose(H) 41 | noise = (Sigma2/2.0)**(1/2.0)*np.random.randn(N,1) 42 | y = np.matmul(H,x) + noise 43 | c=c+1 44 | DataSet_x.append(x) 45 | DataSet_F1.append(np.matmul(h_prime,y)) 46 | DataSet_F2.append(np.matmul(h_prime,H)) 47 | print("training_data") 48 | kk=0 49 | kk2=0 50 | kk3=0 51 | X=tf.placeholder(tf.float64,shape=(batch_size,2,1)) 52 | F1=tf.placeholder(tf.float64,shape=(batch_size,2,1)) 53 | F2=tf.placeholder(tf.float64,shape=(batch_size,2,2)) 54 | 55 | #Test data 56 | TestSet_x=[] 57 | TestSet_F1=[] 58 | TestSet_F2=[] 59 | SNR_dB_range=np.arange(0,22,2) 60 | BER=np.zeros(SNR_dB_range.shape,dtype=np.int) 61 | Sigma2 = 1.0 62 | SNR_min_dB = 0.0 63 | SNR_max_dB = 20.0 64 | K = 2 65 | N = 2*K 66 | L = 3*K 67 | number_of_batches_test=NrSamples_ToTest/batch_size 68 | c=0 69 | for runIdx in range(0,NrSamples_ToTest): 70 | # generate a random operating SNR 71 | SNR_dB = SNR_min_dB + (SNR_max_dB - SNR_min_dB)*np.random.rand() 72 | SNR = 10.0**(SNR_dB/10.0) 73 | H = ((0.5*float(SNR)/float(K))**(1/2.0))*np.random.randn(N,K) 74 | x = 2*np.round(np.random.rand(K,1))-1; 75 | h_prime=np.transpose(H) 76 | noise = (Sigma2/2.0)**(1/2.0)*np.random.randn(N,1) 77 | y = np.matmul(H,x) + noise 78 | c=c+1 79 | TestSet_x.append(x) 80 | TestSet_F1.append(np.matmul(h_prime,y)) 81 | TestSet_F2.append(np.matmul(h_prime,H)) 82 | kk1=0 83 | kk2=0 84 | kk3=0 85 | with tf.variable_scope("embedding") as scope: 86 | if kk1 > 0: 87 | scope.reuse_variables() 88 | else: 89 | W1=tf.get_variable("embedding1",initializer=tf.random_normal([L,8*K,5*K],stddev=0.1,dtype=tf.float64),trainable=True,dtype=tf.float64) 90 | b1=tf.get_variable("embedding2",initializer=tf.random_normal([L,8*K,1],stddev=0.1,dtype=tf.float64),trainable=True,dtype=tf.float64) 91 | kk1=1 92 | 93 | 94 | 95 | with tf.variable_scope("embedding") as scope: 96 | if kk2 > 0: 97 | scope.reuse_variables() 98 | else: 99 | W2=tf.get_variable("embedding3",initializer=tf.random_normal([L,K,8*K],stddev=0.1,dtype=tf.float64),trainable=True,dtype=tf.float64) 100 | b2=tf.get_variable("embedding4",initializer=tf.random_normal([L,K,1],stddev=0.1,dtype=tf.float64),trainable=True,dtype=tf.float64) 101 | 102 | with tf.variable_scope("embedding") as scope: 103 | if kk3 > 0: 104 | scope.reuse_variables() 105 | else: 106 | W3=tf.get_variable("embedding5",initializer=tf.random_normal([L,2*K,8*K],stddev=0.1,dtype=tf.float64),trainable=True,dtype=tf.float64) 107 | b3=tf.get_variable("embedding6",initializer=tf.random_normal([L,2*K,1],stddev=0.1,dtype=tf.float64),trainable=True,dtype=tf.float64) 108 | 109 | def zk(Hty,xk,HtH,vk,k,name='embedding'): #(f1,x_hat,f2,v_hat,k) 110 | inp3=tf.matmul(HtH,xk) 111 | concat=tf.concat([Hty,xk,inp3,vk],0) 112 | temp=tf.matmul(W1[k-1],concat)+b1[k-1] 113 | zk=tf.nn.relu(temp) 114 | return zk 115 | 116 | def psi(x,tt): 117 | t=tt*tf.ones_like(x) 118 | relu1=tf.nn.relu((x+t)) 119 | relu2=tf.nn.relu((x-t)) 120 | ab=tf.abs(t) 121 | out1=tf.div(relu1,ab) 122 | out2=tf.div(relu2,ab) 123 | one=tf.ones_like(out1,dtype=tf.float64) 124 | temp=tf.add(one,out2) 125 | return tf.subtract(out1,temp) 126 | 127 | 128 | def xk(z_hat,k,name='embedding'): 129 | return psi(tf.matmul(W2[k-1],z_hat)+b2[k-1],tt=0.1) 130 | 131 | def vk(z_hat,k,name='embedding'): 132 | return tf.matmul(W3[k-1],z_hat)+b3[k-1] 133 | 134 | def x_tilde(HtH,Hty): 135 | HtH_inv=tf.matrix_inverse(HtH) 136 | return tf.matmul(HtH_inv,Hty) 137 | def model(x_f,f1_f,f2_f): 138 | loss_2=0 139 | pred=[] 140 | for dsIdx in range(0,batch_size): 141 | x = x_f[dsIdx] 142 | f1 = f1_f[dsIdx] ##hty 143 | f2 = f2_f[dsIdx]##hth 144 | x_hat=tf.zeros((K,1),dtype = tf.float64) 145 | v_hat=tf.zeros((2*K,1), dtype = tf.float64) 146 | x_tilde_num=x_tilde(f2,f1) 147 | temp_loss=0 148 | for k in range(1,L+1): 149 | z_hat = zk(f1,x_hat,f2,v_hat,k) 150 | x_hat=xk(z_hat,k) 151 | v_hat=vk(z_hat,k) 152 | num=math.log(k)*tf.abs(x-x_hat)*tf.abs(x-x_hat) 153 | denom=tf.abs(tf.subtract(x,x_tilde_num))*tf.abs(tf.subtract(x,x_tilde_num)) 154 | loss_k=tf.div(num,denom) 155 | loss_k=tf.reduce_sum(loss_k) 156 | pred.append(x_hat) 157 | temp_loss=temp_loss+loss_k 158 | loss_2=loss_2+temp_loss 159 | ############ Combined Loss of all layers ######################## 160 | return loss_2,pred 161 | 162 | 163 | print("model") 164 | loss_, out=model(X,F1,F2) 165 | loss=loss_/batch_size 166 | tf.summary.histogram("loss",loss) 167 | loss=tf.clip_by_value(loss,clip_value_min=0,clip_value_max=1000) 168 | train_step=tf.train.AdamOptimizer(0.001).minimize(loss) 169 | saver=tf.train.Saver() 170 | gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) 171 | sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) 172 | list_of_variables = tf.all_variables() 173 | sess.run(tf.global_variables_initializer()) 174 | variable_names= [v.name for v in tf.trainable_variables()] 175 | uninitialized_variables = list(tf.get_variable(name) for name in sess.run(tf.report_uninitialized_variables(list_of_variables))) 176 | 177 | init_op = tf.variables_initializer(list_of_variables) 178 | sess.run(tf.variables_initializer(uninitialized_variables)) 179 | print(sess.run(tf.report_uninitialized_variables(list_of_variables))) 180 | 181 | t=0 182 | with sess.as_default(): 183 | 184 | merged=tf.summary.merge_all() 185 | train_writer=tf.summary.FileWriter("train_loss_f",sess.graph) 186 | for epoch in range(100,4000): 187 | loss_ep=0 188 | random.shuffle(DataSet_x) 189 | random.shuffle(DataSet_F1) 190 | random.shuffle(DataSet_F2) 191 | print("ep") 192 | for i in range(0,int(number_of_batches)): 193 | train_step.run(feed_dict={X: DataSet_x[i*batch_size:i*batch_size+batch_size], F1:DataSet_F1[i*batch_size:i*batch_size+batch_size], F2:DataSet_F2[i*batch_size:i*batch_size+batch_size]}) 194 | loss1,summary=sess.run([loss,merged],feed_dict={X: DataSet_x[i*batch_size:i*batch_size+batch_size], F1:DataSet_F1[i*batch_size:i*batch_size+batch_size], F2:DataSet_F2[i*batch_size:i*batch_size+batch_size]}) 195 | loss_ep=loss_ep+loss1 196 | file=open("loss_f.txt","a") 197 | file.write("loss"+" "+str(i)+" " +str(loss1)+"\n") 198 | file.close() 199 | print("loss"+" "+str(loss1)) 200 | train_writer.add_summary(summary,t) 201 | t=t+1 202 | file=open("loss_f.txt","a") 203 | file.write("loss_ep"+" "+str(loss_ep/number_of_batches)+"\n") 204 | file.close() 205 | loss_ep_t=0 206 | for j in range(0,int(number_of_batches_test)): 207 | loss_t, out_t=sess.run([loss,out],feed_dict={X: TestSet_x[j*batch_size:j*batch_size+batch_size], F1:TestSet_F1[j*batch_size:j*batch_size+batch_size], F2:TestSet_F2[j*batch_size:j*batch_size+batch_size]}) 208 | loss_ep_t=loss_ep_t+loss_t 209 | file=open(str(epoch)+"-"+"test_labels_f.txt","a") 210 | for z in range(0,batch_size): 211 | file.write("test_input"+" "+str(TestSet_x[j*batch_size+z][0][0])+" "+str(TestSet_x[j*batch_size+z][1][0])+"\n") 212 | file.write("test_label"+" "+str(out_t[z][0][0])+" "+str(out_t[z][1][0])+"\n") 213 | file.close() 214 | file1=open("test_loss_f.txt","a") 215 | file1.write("test_loss"+" "+str(loss_ep_t/number_of_batches_test)+"\n") 216 | file1.close() 217 | print("loss_ep") 218 | print(loss_ep/number_of_batches) 219 | 220 | 221 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deep-MIMO-Detection 2 | This repository contains implementation of https://arxiv.org/pdf/1706.01151.pdf. It is a deep neural network for Multiple Input Multiple Output (MIMO) Detection. 3 | --------------------------------------------------------------------------------