├── OFDMmain.py └── README.md /OFDMmain.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import random 3 | import numpy as np 4 | import time 5 | from pylab import * 6 | # import matplotlib.pyplot as plt 7 | import shutil 8 | import os 9 | 10 | 11 | from tensorflow.examples.tutorials.mnist import input_data 12 | 13 | #tf.set_random_seed(777) # reproducibility 14 | 15 | learning_rate = 0.0001 16 | batch_size = 400 17 | Nsubc = 64 18 | modulation_level = 4 19 | 20 | 21 | # input place holders 22 | X = tf.placeholder(tf.float32, [None, Nsubc*modulation_level]) 23 | Y = tf.placeholder(tf.float32, [None, Nsubc*modulation_level]) 24 | corruption = tf.placeholder(tf.complex64,[None, Nsubc]) 25 | # peak_power_symbol = tf.placeholder(tf.float32,[batch_size]) 26 | 27 | # dropout (keep_prob) rate 0.7 on training, but should be 1 for testing 28 | 29 | # weights & bias for nn layers 30 | W1_1 = tf.get_variable("W_1", shape=[Nsubc*modulation_level, 2048], initializer=tf.contrib.layers.xavier_initializer()) 31 | b1_1 = tf.Variable(tf.random_normal([2048])) 32 | L1_1 = tf.layers.batch_normalization(tf.matmul(X, W1_1) + b1_1) 33 | L1_1 = tf.nn.relu(L1_1) 34 | 35 | 36 | W2_1 = tf.get_variable("W2_1", shape=[2048, 2048], initializer=tf.contrib.layers.xavier_initializer()) 37 | b2_1 = tf.Variable(tf.random_normal([2048])) 38 | L2_1 = tf.layers.batch_normalization(tf.matmul(L1_1, W2_1) + b2_1) 39 | L2_1 = tf.nn.relu(L2_1) 40 | 41 | W3_1 = tf.get_variable("W3_1", shape=[2048, 2048], initializer=tf.contrib.layers.xavier_initializer()) 42 | b3_1 = tf.Variable(tf.random_normal([2048])) 43 | L3_1 = tf.layers.batch_normalization(tf.matmul(L2_1, W3_1) + b3_1) 44 | L3_1 = tf.nn.relu(L3_1) 45 | 46 | W4_1 = tf.get_variable("W4_1", shape=[2048, 2048], initializer=tf.contrib.layers.xavier_initializer()) 47 | b4_1 = tf.Variable(tf.random_normal([2048])) 48 | L4_1 = tf.layers.batch_normalization(tf.matmul(L3_1, W4_1) + b4_1) 49 | L4_1 = tf.nn.relu(L4_1) 50 | 51 | W5_1 = tf.get_variable("W5_1", shape=[2048, 2*Nsubc], initializer=tf.contrib.layers.xavier_initializer()) 52 | b5_1 = tf.Variable(tf.random_normal([2*Nsubc])) 53 | 54 | 55 | # encoded_symbol_normalizing = tf.sqrt(tf.reduce_mean(tf.square(tf.matmul(L4_1, W5_1) + b5_1))) 56 | # encoded_symbol_normalizing = tf.expand_dims(encoded_symbol_normalizing,1) # ,1 is normalizing for every transmit signal i.e. shape (batch_size,1) 57 | # encoded_symbol_original = (1/np.sqrt(2))*tf.div(tf.matmul(L4_1, W5_1) + b5_1, encoded_symbol_normalizing) 58 | raw_symbol = tf.matmul(L4_1, W5_1) + b5_1 59 | even_number = np.arange(0,2*Nsubc,2) 60 | odd_number = np.arange(1,2*Nsubc,2) 61 | X_real = tf.transpose(tf.gather(tf.transpose(raw_symbol),even_number)) 62 | X_imag = tf.transpose(tf.gather(tf.transpose(raw_symbol),odd_number)) 63 | encoded_symbol_complex = tf.complex(X_real,X_imag) 64 | encoded_symbol_iffted = tf.ifft(encoded_symbol_complex) 65 | 66 | # sess = tf.Session() 67 | # sess.run(tf.initialize_all_variables()) 68 | # print sess.run(tf.shape(encoded_symbol_iffted)) 69 | encoded_symbol_normalizing = tf.sqrt(tf.reduce_mean(tf.square(encoded_symbol_iffted))) 70 | encoded_symbol_original_r = tf.div(tf.real(encoded_symbol_iffted), encoded_symbol_normalizing) 71 | encoded_symbol_original_i = tf.div(tf.imag(encoded_symbol_iffted), encoded_symbol_normalizing) 72 | encoded_symbol_original = tf.complex(encoded_symbol_original_r, encoded_symbol_original_i) 73 | peak_power_batch = tf.reduce_max(tf.square(encoded_symbol_original)) 74 | avr_power_batch = tf.reduce_mean(tf.square(encoded_symbol_original)) 75 | # for i in np.arange(batch_size): 76 | # peak_power_symbol[i] = tf.gather_nd(encoded_symbol_original, [i,tf.argmax(tf.abs(encoded_symbol_original[i,:]))]) # kind of tricky 77 | peak_power_symbol = tf.reduce_max(tf.abs(encoded_symbol_original), axis=1) 78 | encoded_symbol = encoded_symbol_original+ corruption 79 | 80 | received_symbol_complex = tf.fft(encoded_symbol) 81 | received_symbol_r = tf.real(received_symbol_complex) 82 | received_symbol_i = tf.imag(received_symbol_complex) 83 | received_symbol = tf.concat([received_symbol_r, received_symbol_i], axis=1) 84 | 85 | W1 = tf.get_variable("W1", shape=[2*Nsubc, 2048], initializer=tf.contrib.layers.xavier_initializer()) 86 | b1 = tf.Variable(tf.random_normal([2048])) 87 | L1 = tf.layers.batch_normalization(tf.matmul(received_symbol, W1) + b1) 88 | L1 = tf.nn.relu(L1) 89 | 90 | 91 | W2 = tf.get_variable("W2", shape=[2048, 2048], initializer=tf.contrib.layers.xavier_initializer()) 92 | b2 = tf.Variable(tf.random_normal([2048])) 93 | L2 = tf.layers.batch_normalization(tf.matmul(L1, W2) + b2) 94 | L2 = tf.nn.relu(L2) 95 | 96 | 97 | W3 = tf.get_variable("W3", shape=[2048, 2048], initializer=tf.contrib.layers.xavier_initializer()) 98 | b3 = tf.Variable(tf.random_normal([2048])) 99 | L3 = tf.layers.batch_normalization(tf.matmul(L2, W3) + b3) 100 | L3 = tf.nn.relu(L3) 101 | 102 | 103 | W4 = tf.get_variable("W4", shape=[2048, 2048], initializer=tf.contrib.layers.xavier_initializer()) 104 | b4 = tf.Variable(tf.random_normal([2048])) 105 | L4 = tf.layers.batch_normalization(tf.matmul(L3, W4) + b4) 106 | L4 = tf.nn.relu(L4) 107 | 108 | 109 | W5 = tf.get_variable("W5", shape=[2048, Nsubc*modulation_level], initializer=tf.contrib.layers.xavier_initializer()) 110 | b5 = tf.Variable(tf.random_normal([Nsubc*modulation_level])) 111 | 112 | hypothesis = tf.matmul(L4, W5) + b5 113 | 114 | # define cost/loss & optimizer 115 | #cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, labels=Y)) 116 | #cost = tf.losses.softmax_cross_entropy(Y, hypothesis) 117 | cost = tf.reduce_mean(tf.square(hypothesis-Y)) #+ 0.1*tf.reduce_mean(peak_power_symbol) #0.001-->0.01 is the best value 118 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) 119 | #optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(cost) 120 | #optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost) 121 | 122 | # initialize 123 | 124 | # sess = tf.Session() 125 | # sess.run(tf.global_variables_initializer()) 126 | figure() 127 | #for SNR_range in np.arange(10,11,2): 128 | for SNR_range in np.arange(15, 16, 5): 129 | # train my model 130 | shutil.rmtree('./saved_networks/') 131 | os.makedirs('./saved_networks/') 132 | sess = tf.Session() 133 | sess.run(tf.global_variables_initializer()) 134 | saver = tf.train.Saver(max_to_keep=2) 135 | checkpoint = tf.train.get_checkpoint_state("saved_networks") 136 | if checkpoint and checkpoint.model_checkpoint_path: 137 | saver.restore(sess, checkpoint.model_checkpoint_path) 138 | print ("Successfully loaded:", checkpoint.model_checkpoint_path) 139 | else: 140 | print ("Could not find old network weights") 141 | #lin_space = np.arange(0,13,1) 142 | lin_space = np.arange(0, 31, 5) 143 | print (lin_space) 144 | cha_mag = 1.0 145 | ber_rate = [] 146 | ser_rate = [] 147 | for iterN in range(len(lin_space)): 148 | EbN0dB = lin_space[iterN] 149 | EbN0dB = np.random.randint(13, 18) 150 | N0 = 1/np.log2(modulation_level)/1.0*np.power(10.0, -EbN0dB/10.0) 151 | if lin_space[iterN] == SNR_range: 152 | cost_plot = [] 153 | training_epochs = 500000 #100+SNR_range*30000 154 | for epoch in range(training_epochs): 155 | avg_cost = 0 156 | batch_ys = np.random.randint(modulation_level, size=(batch_size, Nsubc)) 157 | batch_y = np.zeros((batch_size, Nsubc*modulation_level)) 158 | for n in range(batch_size): 159 | for m in range(Nsubc): 160 | batch_y[n, m * modulation_level + batch_ys[n, m]] = 1 161 | # batch_y[n, m * modulation_level + ((batch_ys[n, m]+1)%4)] = 0.1 # for non-gray coding use 0.05 162 | # batch_y[n, m * modulation_level + ((batch_ys[n, m]-1)%4)] = 0.1 163 | noise_batch_r = (np.sqrt(N0 / 2.0)) * np.random.normal(0.0, size=(batch_size, Nsubc)) 164 | noise_batch_i = (np.sqrt(N0 / 2.0)) * np.random.normal(0.0, size=(batch_size, Nsubc)) 165 | rly = np.random.rayleigh(cha_mag /2.0 , (batch_size, Nsubc)) 166 | #rly = np.ones((batch_size,Nsubc)) 167 | corruption_r = np.divide(noise_batch_r, rly) 168 | corruption_i = np.divide(noise_batch_i, rly) 169 | corruption_batch = corruption_r + 1j*corruption_i 170 | #corruption_batch = noise_batch_r + 1j * noise_batch_i 171 | #print (batch_xs.shape) 172 | #print (batch_ys.shape) 173 | feed_dict = {X: batch_y, Y: batch_y, corruption: corruption_batch} 174 | #print 'test1',test1 175 | # test2 = sess.run(encoded_symbol_normalizing,feed_dict=feed_dict) 176 | # print 'test2',test2 177 | # test33 = sess.run(peak_power_symbol, feed_dict=feed_dict) 178 | # print 'test3', test33 179 | c, _ = sess.run([cost, optimizer], feed_dict=feed_dict) 180 | avg_cost += c 181 | if epoch % 1000 ==0: 182 | print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost)) 183 | cost_plot.append(avg_cost) 184 | print 'peak_power in transmitted symbol', sess.run(tf.reduce_max(peak_power_symbol), feed_dict=feed_dict), 'avr power', sess.run(avr_power_batch, feed_dict=feed_dict) 185 | if epoch % 1000 == 0: 186 | saver.save(sess, 'saved_networks/' + 'network' + '-OFDM', global_step=epoch) 187 | #np.savetxt("./result/cost_at_learning_rate{0}".format(learning_rate), cost_plot) 188 | #learning_rate = learning_rate*10 189 | print('Learning Finished!') 190 | # test for SER begin 191 | for iterN in range(len(lin_space)): 192 | EbN0dB = lin_space[iterN] 193 | N0 = 1/np.log2(modulation_level) / np.power(10.0, EbN0dB / 10.0) 194 | test_batch_size = 50000 195 | test_ys = np.random.randint(modulation_level, size=(test_batch_size, Nsubc)) 196 | test_y = np.zeros((test_batch_size,Nsubc*modulation_level)) 197 | for n in range(test_batch_size): 198 | for m in range(Nsubc): 199 | test_y[n, m * modulation_level + test_ys[n, m]] = 1 200 | #test_y[n, 8+np.remainder(n,4)] = 1 201 | noise_batch_test_r = (np.sqrt(N0 / 2.0)) * np.random.normal(0.0, size=(test_batch_size, Nsubc)) 202 | noise_batch_test_i = (np.sqrt(N0 / 2.0)) * np.random.normal(0.0, size=(test_batch_size, Nsubc)) 203 | rly = np.random.rayleigh(cha_mag / 2, (test_batch_size, Nsubc)) 204 | #rly = np.ones((test_batch_size, Nsubc)) 205 | corruption_r = np.divide(noise_batch_test_r, rly) 206 | corruption_i = np.divide(noise_batch_test_i, rly) 207 | corruption_test_batch = corruption_r + 1j*corruption_i 208 | #test_xs = np.hstack((np.real(message_test), np.imag(message_test))) + (np.random.normal(0, 0.01, (test_batch_size,8)) + 1j*np.random.normal(0, 0.01, (test_batch_size,8)))/np.random.rayleigh(1.0) 209 | bit_error = [] 210 | graycoding = tf.constant([[False, False], [False, True], [True, True], [True, False]]) 211 | feed_dict = {X: test_y, Y: test_y, corruption: corruption_test_batch} 212 | 213 | for i in range(Nsubc): 214 | bit_error.append(tf.reduce_mean(tf.cast(tf.logical_xor(tf.gather(graycoding, tf.argmax(hypothesis[:, i * modulation_level:(i + 1) * modulation_level], 1)), 215 | tf.gather(graycoding, tf.argmax(Y[:, i * modulation_level:(i + 1) * modulation_level], 1))),tf.float32))) 216 | tic = time.time() 217 | BER = sess.run(tf.reduce_mean(bit_error), feed_dict=feed_dict) 218 | toc = time.time() 219 | print("elapsed time", toc - tic) 220 | ber_rate.append(BER) 221 | correct_prediction=[] 222 | 223 | for i in range(Nsubc): 224 | correct_prediction.append(tf.equal(tf.argmax(hypothesis[:, i*modulation_level:(i+1)*modulation_level], 1), tf.argmax(Y[:, i*modulation_level:(i+1)*modulation_level], 1))) 225 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 226 | SER = 1 - sess.run(accuracy, feed_dict={X: test_y, Y: test_y, corruption: corruption_test_batch}) 227 | ser_rate.append(SER) 228 | 229 | #np.savetxt("./result/OFDM_SER_trained_at_{0}dB SNR_L4_64".format(SNR_range), ser_rate) 230 | #np.savetxt("./result/OFDM_BER_trained_at_{0}dB SNR_L4_64_rly_mixed".format(SNR_range), ber_rate) 231 | # test for SER end 232 | # test for CCDF begin 233 | CCDF = [] 234 | N0 = 1 / np.log2(modulation_level) / np.power(10.0, EbN0dB / 10.0) 235 | test_batch_size = 50000 236 | test_ys = np.random.randint(modulation_level, size=(test_batch_size, Nsubc)) 237 | test_y = np.zeros((test_batch_size, Nsubc * modulation_level)) 238 | for n in range(test_batch_size): 239 | for m in range(Nsubc): 240 | test_y[n, m * modulation_level + test_ys[n, m]] = 1 241 | # test_y[n, 8+np.remainder(n,4)] = 1 242 | noise_batch_test_r = (np.sqrt(N0 / 2.0)) * np.random.normal(0.0, size=(test_batch_size, Nsubc)) 243 | noise_batch_test_i = (np.sqrt(N0 / 2.0)) * np.random.normal(0.0, size=(test_batch_size, Nsubc)) 244 | # rly = np.random.rayleigh(cha_mag / 2, (test_batch_size, 4)) 245 | rly = np.ones((test_batch_size, Nsubc)) 246 | corruption_r = np.divide(noise_batch_test_r, rly) 247 | corruption_i = np.divide(noise_batch_test_i, rly) 248 | corruption_test_batch = corruption_r + 1j * corruption_i 249 | PAPR_sample = sess.run(peak_power_symbol, feed_dict={X: test_y, Y: test_y, corruption: corruption_test_batch}) 250 | for z in np.arange(0,6,0.2): 251 | CCDF.append(np.divide(np.sum(z<10*np.log10(PAPR_sample)),50000.0)) 252 | #np.savetxt("./result/CCDF_trained_at_{0}dB SNR_L4_64_rly".format(SNR_range), CCDF) 253 | #np.savetxt("./result/PAPR_at_{0}dB SNR_L4_64_mixed".format(SNR_range), PAPR_sample) 254 | # test for CCDF end 255 | np.savetxt("./result/cost_plot", cost_plot) 256 | 257 | print (lin_space) 258 | print (ber_rate) 259 | show() 260 | 261 | 262 | 263 | 264 | # Get one and predict 265 | # r = random.randint(0, mnist.test.num_examples - 1) 266 | # print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1))) 267 | # print("Prediction: ", sess.run( 268 | # tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1], keep_prob: 1})) 269 | 270 | # plt.imshow(mnist.test.images[r:r + 1]. 271 | # reshape(28, 28), cmap='Greys', interpolation='nearest') 272 | # plt.show() 273 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OFDM_PAPR_reduction 2 | 3 | This is the simulation code for the paper from the authors of 4 | 5 | "A novel PAPR reduction scheme for OFDM system based on deep learning" 6 | IEEE COMMUNICATIONS LETTERS, VOL. 22, NO. 3, MARCH 2018 7 | Authors: Minhoe Kim, Woongsup Lee, Dong-Ho Cho 8 | https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8240644 9 | 10 | Please cite the paper if you have referenced from this code. 11 | 12 | ======= About the code ======= 13 | 14 | The purpose of sharing the code is not to provide the user-friendly simulator but to share the code implementations for research. 15 | Please understand that the code is not originally meant to share with others, so it is not documented well. 16 | There are many figures in the paper, so the hyper-parameters need to be tuned according to simulation environments. 17 | 18 | In this code, we have used 19 | 20 | Tensorflow v1.15 (if you have tensorflow v2.x, you need to disable tensorflow v2 and replace all the deprecated tfv1 functions to tfv2 functions 21 | Python 3.6 and above 22 | 23 | --------------------------------------------------------------------------------