├── README.md ├── SISO_autoencoder22.py ├── SISO_autoencoder22_copy.py ├── SISO_autoencoder22_pic.py ├── TWOUER_autoencoder.py └── autoencoder22.h5 /README.md: -------------------------------------------------------------------------------- 1 | # autoencoder-for-the-Physical-Layer 2 | Using Keras to validate the simulation results according to Paper : "An Introduction to Deep Learning for the Physical Layer" 3 | -------------------------------------------------------------------------------- /SISO_autoencoder22.py: -------------------------------------------------------------------------------- 1 | from keras.layers import Input, Dense, GaussianNoise, Lambda 2 | from keras.models import Model 3 | from keras import backend as K 4 | import numpy as np 5 | import random as rd 6 | import matplotlib.pyplot as plt 7 | 8 | #initial 9 | k = 2 10 | n = 2 11 | M = 2**k 12 | R = k/n 13 | 14 | eye_matrix = np.eye(M) 15 | x_train = np.tile(eye_matrix, (600, 1)) 16 | x_test = np.tile(eye_matrix, (100, 1)) 17 | x_try = np.tile(eye_matrix, (1000, 1)) 18 | rd.shuffle(x_train) 19 | rd.shuffle(x_test) 20 | rd.shuffle(x_try) 21 | print(x_train.shape) 22 | print(x_test.shape) 23 | 24 | #误码率 25 | def BER(y_true, y_pred): 26 | return K.mean(K.not_equal(y_true, K.round(y_pred)), axis=-1) 27 | 28 | #SNR 29 | Eb_No_dB = 7 30 | noise = 1/(10**(Eb_No_dB/10)) 31 | noise_sigma = np.sqrt(noise) 32 | belta = 1/(2*R*(10**(Eb_No_dB/10))) 33 | belta_sqrt = np.sqrt(belta) 34 | 35 | #autoencoder 36 | input_sys = Input(shape=(M,)) 37 | 38 | #深度自编码器 39 | encoded = Dense(M, activation='relu')(input_sys) 40 | encoded = Dense(n)(encoded) 41 | #encoded = ActivityRegularization(l2=0.02)(encoded) 42 | encoded = Lambda(lambda x: np.sqrt(n) * K.l2_normalize(x, axis=1))(encoded) #energy constraint 43 | #encoded = Lambda(lambda x: x / K.sqrt(K.mean(x**2)))(encoded) #average power constraint 44 | encoded_noise = GaussianNoise(belta_sqrt)(encoded)#噪声层 45 | decoded = Dense(M, activation='relu')(encoded_noise) 46 | decoded = Dense(M, activation='softmax')(decoded) 47 | 48 | autoencoder = Model(inputs=input_sys, outputs=decoded) 49 | encoder = Model(inputs=input_sys, outputs=encoded) 50 | 51 | autoencoder.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['binary_accuracy',BER]) 52 | 53 | hist = autoencoder.fit(x_train, x_train, epochs=200, validation_data=(x_test, x_test))#without batch_size 54 | 55 | #误码个数 56 | #encoded_sys = encoder.predict(x_try) 57 | #decoded_sys = autoencoder.predict(x_try) 58 | #decoded_sys_round = np.round(decoded_sys) 59 | #error_rate = np.mean(np.not_equal(x_try,decoded_sys_round).max(axis=1)) 60 | 61 | #星座图 62 | encoded_planisphere = encoder.predict(eye_matrix) 63 | plt.title('Constellation') 64 | plt.xlim(-2, 2) 65 | plt.ylim(-2, 2) 66 | plt.plot(encoded_planisphere[:,0], encoded_planisphere[:,1], 'r.') 67 | plt.grid(True) 68 | 69 | #loss曲线 70 | plt.figure() 71 | plt.plot(hist.history['loss']) 72 | plt.title('model loss') 73 | plt.ylabel('loss') 74 | plt.xlabel('epoch') 75 | 76 | -------------------------------------------------------------------------------- /SISO_autoencoder22_copy.py: -------------------------------------------------------------------------------- 1 | from keras.layers import Input, Dense, Lambda, Add 2 | from keras.models import Model 3 | from keras import backend as K 4 | import numpy as np 5 | import random as rd 6 | import matplotlib.pyplot as plt 7 | 8 | from numpy.random import seed 9 | from tensorflow import set_random_seed 10 | seed(5) 11 | set_random_seed(3.8) 12 | 13 | #initial 14 | k = 2 15 | n = 2 16 | M = 2**k 17 | R = k/n 18 | 19 | eye_matrix = np.eye(M) 20 | x_train = np.tile(eye_matrix, (2000, 1)) 21 | x_test = np.tile(eye_matrix, (100, 1)) 22 | x_try = np.tile(eye_matrix, (10000, 1)) 23 | rd.shuffle(x_train) 24 | rd.shuffle(x_test) 25 | rd.shuffle(x_try) 26 | print(x_train.shape) 27 | print(x_test.shape) 28 | 29 | #误码率 30 | def BER(y_true, y_pred): 31 | return K.mean(K.not_equal(y_true, K.round(y_pred)), axis=-1) 32 | 33 | #SNR 34 | Eb_No_dB = 7 35 | noise = 1/(10**(Eb_No_dB/10)) 36 | noise_sigma = np.sqrt(noise) 37 | belta = 1/(2*R*(10**(Eb_No_dB/10))) 38 | belta_sqrt = np.sqrt(belta) 39 | 40 | noise_train = belta_sqrt * np.random.randn(np.shape(x_train)[0],n) 41 | noise_test = belta_sqrt * np.random.randn(np.shape(x_test)[0],n) 42 | noise_try = belta_sqrt * np.random.randn(np.shape(x_try)[0],n) 43 | 44 | #autoencoder 45 | input_sys = Input(shape=(M,)) 46 | input_noise = Input(shape=(n,)) 47 | 48 | #深度自编码器 49 | encoded = Dense(M, activation='relu')(input_sys) 50 | encoded = Dense(n)(encoded) 51 | #encoded = ActivityRegularization(l2=0.02)(encoded) 52 | encoded = Lambda(lambda x: np.sqrt(n) * K.l2_normalize(x, axis=1))(encoded) #energy constraint 53 | #encoded = Lambda(lambda x: x / K.sqrt(K.mean(x**2)))(encoded) #average power constraint 54 | encoded_noise = Add()([encoded, input_noise]) #使用多输入改变噪声 55 | #encoded_noise = GaussianNoise(belta_sqrt)(encoded)#噪声层 56 | decoded = Dense(M, activation='relu')(encoded_noise) 57 | decoded = Dense(M, activation='softmax')(decoded) 58 | 59 | autoencoder = Model(inputs=[input_sys,input_noise], outputs=decoded) 60 | encoder = Model(inputs=input_sys, outputs=encoded) 61 | 62 | autoencoder.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['binary_accuracy',BER]) 63 | 64 | hist = autoencoder.fit([x_train,noise_train], x_train, epochs=100, batch_size=32, validation_data=([x_test, noise_test], x_test))#without batch_size 65 | 66 | #误码个数 67 | #encoded_sys = encoder.predict(x_try) 68 | #decoded_sys = autoencoder.predict([x_try,noise_try]) 69 | #decoded_sys_round = np.round(decoded_sys) 70 | #error_rate = np.mean(np.not_equal(x_try,decoded_sys_round).max(axis=1)) 71 | 72 | #星座图 73 | encoded_planisphere = encoder.predict(eye_matrix) 74 | plt.figure() 75 | plt.title('Constellation') 76 | plt.xlim(-2, 2) 77 | plt.ylim(-2, 2) 78 | plt.plot(encoded_planisphere[:,0], encoded_planisphere[:,1], 'r.') 79 | plt.grid(True) 80 | 81 | #loss曲线 82 | plt.figure() 83 | plt.plot(hist.history['loss']) 84 | plt.title('model loss') 85 | plt.ylabel('loss') 86 | plt.xlabel('epoch') 87 | 88 | #save 89 | #autoencoder.save('autoencoder22.h5') 90 | 91 | 92 | 93 | -------------------------------------------------------------------------------- /SISO_autoencoder22_pic.py: -------------------------------------------------------------------------------- 1 | ##################################################### 2 | #after run SISO_autoencoder22_copy.py 3 | ##################################################### 4 | from keras.models import load_model 5 | import numpy as np 6 | import random as rd 7 | import matplotlib.pyplot as plt 8 | 9 | 10 | 11 | #initial 12 | k = 2 13 | n = 2 14 | M = 2**k 15 | R = k/n 16 | 17 | 18 | eye_matrix = np.eye(M) 19 | x_try = np.tile(eye_matrix, (250000, 1)) 20 | rd.shuffle(x_try) 21 | print(x_try.shape) 22 | 23 | ER = [] 24 | 25 | #load model 26 | #autoencoder = load_model('autoencoder22.h5') #without n and BER 27 | 28 | for Eb_No_dB in np.arange(-2.0, 10.0, 0.5): 29 | belta = 1/(2*R*(10**(Eb_No_dB/10))) 30 | belta_sqrt = np.sqrt(belta) 31 | noise_try = belta_sqrt * np.random.randn(np.shape(x_try)[0],n) 32 | #encoded_sys = encoder.predict(x_try) 33 | decoded_sys_round = np.round(autoencoder.predict([x_try,noise_try])) 34 | error_rate = np.mean(np.not_equal(x_try,decoded_sys_round).max(axis=1)) 35 | ER.append(error_rate) 36 | print(Eb_No_dB) 37 | print(error_rate) 38 | 39 | #误码曲线 40 | plt.yscale('log') 41 | plt.plot(np.arange(-2.0, 10.0, 0.5),ER,'r.-') 42 | plt.grid(True) 43 | plt.ylim(10**-5, 1) 44 | plt.xlim(-2, 10) 45 | plt.title("Block error rate") -------------------------------------------------------------------------------- /TWOUER_autoencoder.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from keras.layers import Input, LSTM, Dense, GaussianNoise, Lambda, Dropout,embeddings, Flatten, Add 3 | from keras.models import Model 4 | from keras import regularizers 5 | from keras.layers.normalization import BatchNormalization 6 | from keras.optimizers import Adam, SGD 7 | from keras import backend as K 8 | from keras.callbacks import Callback 9 | import matplotlib.pyplot as plt 10 | from numpy.random import seed 11 | seed(1) 12 | from tensorflow import set_random_seed 13 | set_random_seed(3) 14 | 15 | NUM_EPOCHS = 100 16 | BATCH_SIZE = 32 17 | M = 4 18 | k = np.log2(M) 19 | k = int(k) 20 | n_channel = 2 21 | emb_k = 4 22 | R = k / n_channel 23 | train_data_size=10000 24 | bertest_data_size=50000 25 | EbNodB_train = 7 26 | EbNo_train = 10 ** (EbNodB_train / 10.0) 27 | noise_std= np.sqrt( 1/ (2 * R * EbNo_train)) 28 | alpha = K.variable(0.5) 29 | beta = K.variable(0.5) 30 | 31 | #define the function for mixed AWGN channel 32 | def mixed_AWGN(x): 33 | signal = x[0] 34 | interference = x[1] 35 | noise = K.random_normal(K.shape(signal), 36 | mean=0, 37 | stddev=noise_std) 38 | signal = Add()([signal, interference]) 39 | signal = Add()([signal, noise]) 40 | return signal 41 | 42 | #define the dynamic loss weights 43 | class Mycallback(Callback): 44 | def __init__(self,alpha, beta): 45 | self.alpha = alpha 46 | self.beta = beta 47 | self.epoch_num = 0 48 | def on_epoch_end(self, epoch, logs={}): 49 | self.epoch_num = self.epoch_num + 1 50 | loss1 = logs.get('u1_receiver_loss') 51 | loss2 = logs.get('u2_receiver_loss') 52 | print("epoch %d" %self.epoch_num) 53 | print("total_loss%f" %logs.get('loss')) 54 | print("u1_loss %f"%(loss1)) 55 | print("u2_loss %f" % (loss2)) 56 | a = loss1 / (loss1 + loss2) 57 | b = 1 - a 58 | K.set_value(self.alpha, a) 59 | K.set_value(self.beta, b) 60 | print("alpha %f" %K.get_value(alpha)) 61 | print("beta %f" % K.get_value(beta)) 62 | print("selfalpha %f" % K.get_value(self.alpha)) 63 | print("selfbeta %f" % K.get_value(self.beta)) 64 | 65 | #generating train and test data 66 | #user 1 67 | seed(1) 68 | train_label_s1 = np.random.randint(M,size= train_data_size) 69 | train_label_out_s1 = train_label_s1.reshape((-1,1)) 70 | test_label_s1 = np.random.randint(M, size= bertest_data_size) 71 | test_label_out_s1 = test_label_s1.reshape((-1,1)) 72 | #user 2 73 | seed(2) 74 | train_label_s2 = np.random.randint(M,size= train_data_size) 75 | train_label_out_s2 = train_label_s2.reshape((-1,1)) 76 | test_label_s2 = np.random.randint(M, size= bertest_data_size) 77 | test_label_out_s2 = test_label_s2.reshape((-1,1)) 78 | 79 | # Embedding Model for Two User using real signal 80 | #user1's transmitter 81 | u1_input_signal = Input(shape=(1,)) 82 | u1_encoded = embeddings.Embedding(input_dim=M, output_dim=emb_k, input_length=1)(u1_input_signal) 83 | u1_encoded1 = Flatten()(u1_encoded) 84 | u1_encoded2 = Dense(M, activation= 'relu')(u1_encoded1) 85 | u1_encoded3 = Dense(n_channel, activation= 'linear')(u1_encoded2) 86 | u1_encoded4 = Lambda(lambda x: np.sqrt(n_channel)*K.l2_normalize(x,axis=1))(u1_encoded3) 87 | #u1_encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(u1_encoded3) 88 | #user2's transmitter 89 | u2_input_signal = Input(shape=(1,)) 90 | u2_encoded = embeddings.Embedding(input_dim=M, output_dim=emb_k, input_length=1)(u2_input_signal) 91 | u2_encoded1 = Flatten()(u2_encoded) 92 | u2_encoded2 = Dense(M, activation= 'relu')(u2_encoded1) 93 | u2_encoded3 = Dense(n_channel, activation= 'linear')(u2_encoded2) 94 | u2_encoded4 = Lambda(lambda x: np.sqrt(n_channel)*K.l2_normalize(x,axis=1))(u2_encoded3) 95 | #u2_encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(u2_encoded3) 96 | 97 | #mixed AWGN channel 98 | u1_channel_out = Lambda(lambda x: mixed_AWGN(x))([ u1_encoded4, u2_encoded4]) 99 | u2_channel_out = Lambda(lambda x: mixed_AWGN(x))([ u2_encoded4, u1_encoded4]) 100 | 101 | #user1's receiver 102 | u1_decoded = Dense(M, activation='relu',name= 'u1_pre_receiver')(u1_channel_out) 103 | u1_decoded1 = Dense(M, activation= 'softmax', name= 'u1_receiver')(u1_decoded) 104 | 105 | #user2's receiver 106 | u2_decoded = Dense(M, activation='relu',name='u2_pre_receiver')(u2_channel_out) 107 | u2_decoded1 = Dense(M, activation= 'softmax',name='u2_receiver')(u2_decoded) 108 | 109 | twouser_autoencoder = Model(inputs=[u1_input_signal, u2_input_signal], 110 | outputs=[u1_decoded1, u2_decoded1]) 111 | adam =Adam(lr = 0.01) 112 | twouser_autoencoder.compile( optimizer=adam, 113 | loss='sparse_categorical_crossentropy', 114 | loss_weights=[alpha, beta])#loss=a*loss1+b*loss2 115 | print(twouser_autoencoder.summary()) 116 | twouser_autoencoder.fit( [train_label_s1,train_label_s2], 117 | [train_label_out_s1, train_label_out_s2], 118 | epochs=45, 119 | batch_size=32, 120 | callbacks= [Mycallback(alpha,beta)]) 121 | 122 | #from keras.utils.vis_utils import plot_model 123 | #plot_model(twouser_autoencoder, to_file= 'model.png') 124 | 125 | #generating the encoder and decoder for user1 126 | u1_encoder = Model(u1_input_signal, u1_encoded4) 127 | u1_encoded_input = Input(shape= (n_channel,)) 128 | u1_deco = twouser_autoencoder.get_layer("u1_pre_receiver")(u1_encoded_input) 129 | u1_deco = twouser_autoencoder.get_layer("u1_receiver")(u1_deco) 130 | u1_decoder = Model(u1_encoded_input, u1_deco) 131 | 132 | #generating the encoder and decoder for user1 133 | u2_encoder = Model(u2_input_signal, u2_encoded4) 134 | u2_encoded_input = Input(shape= (n_channel,)) 135 | u2_deco = twouser_autoencoder.get_layer("u2_pre_receiver")(u2_encoded_input) 136 | u2_deco = twouser_autoencoder.get_layer("u2_receiver")(u2_deco) 137 | u2_decoder = Model(u2_encoded_input, u2_deco) 138 | 139 | #plotting the constellation diagram 140 | #user1 141 | u1_scatter_plot = [] 142 | for i in range(M): 143 | u1_scatter_plot.append(u1_encoder.predict(np.expand_dims(i,axis=0))) 144 | u1_scatter_plot = np.array(u1_scatter_plot) 145 | u1_scatter_plot = u1_scatter_plot.reshape(M, 2, 1) 146 | plt.scatter(u1_scatter_plot[:, 0], u1_scatter_plot[:, 1],color='red',label = 'user1(2,2)') 147 | 148 | u2_scatter_plot = [] 149 | for i in range(M): 150 | u2_scatter_plot.append(u2_encoder.predict(np.expand_dims(i,axis=0))) 151 | u2_scatter_plot = np.array(u2_scatter_plot) 152 | u2_scatter_plot = u2_scatter_plot.reshape(M, 2, 1) 153 | plt.scatter(u2_scatter_plot[:, 0], u2_scatter_plot[:, 1], color = 'blue',label = 'user2(2,2)') 154 | 155 | plt.legend(loc='upper left',ncol= 1) 156 | plt.axis((-2.5, 2.5, -2.5, 2.5)) 157 | plt.grid() 158 | fig = plt.gcf() 159 | fig.set_size_inches(16,12) 160 | #fig.savefig('graph/TwoUsercons(2,2)0326_1.png',dpi=100) 161 | plt.show() 162 | 163 | #calculating BER for embedding 164 | EbNodB_range = list(np.linspace(0, 14 ,28)) 165 | ber = [None] * len(EbNodB_range) 166 | u1_ber = [None] * len(EbNodB_range) 167 | u2_ber = [None] * len(EbNodB_range) 168 | for n in range(0, len(EbNodB_range)): 169 | EbNo = 10 ** (EbNodB_range[n] / 10.0) 170 | noise_std = np.sqrt(1 / (2 * R * EbNo)) 171 | noise_mean = 0 172 | no_errors = 0 173 | nn = bertest_data_size 174 | noise1 = noise_std * np.random.randn(nn, n_channel) 175 | noise2 = noise_std * np.random.randn(nn, n_channel) 176 | u1_encoded_signal = u1_encoder.predict(test_label_s1) 177 | u2_encoded_signal = u2_encoder.predict(test_label_s2) 178 | u1_final_signal = u1_encoded_signal + u2_encoded_signal + noise1 179 | u2_final_signal = u2_encoded_signal + u1_encoded_signal + noise2 180 | u1_pred_final_signal = u1_decoder.predict(u1_final_signal) 181 | u2_pred_final_signal = u2_decoder.predict(u2_final_signal) 182 | u1_pred_output = np.argmax(u1_pred_final_signal, axis=1) 183 | u2_pred_output = np.argmax(u2_pred_final_signal, axis=1) 184 | u1_no_errors = (u1_pred_output != test_label_s1) 185 | u1_no_errors = u1_no_errors.astype(int).sum() 186 | u2_no_errors = (u2_pred_output != test_label_s2) 187 | u2_no_errors = u2_no_errors.astype(int).sum() 188 | u1_ber[n] = u1_no_errors / nn 189 | u2_ber[n] = u2_no_errors / nn 190 | ber[n] = (u1_ber[n] + u2_ber[n]) / 2 191 | print('U1_SNR:', EbNodB_range[n], 'U1_BER:', u1_ber[n]) 192 | print('U2_SNR:', EbNodB_range[n], 'U1_BER:', u2_ber[n]) 193 | print('SNR:', EbNodB_range[n], 'BER:', ber[n]) 194 | 195 | plt.plot(EbNodB_range, u1_ber ,label = 'TwoUserSNR(2,2)U1,emb_k=4,') 196 | plt.plot(EbNodB_range, u2_ber ,label = 'TwoUserSNR(2,2)U2,emb_k=4,') 197 | plt.plot(EbNodB_range, ber ,label = 'TwoUserSNR(2,2),emb_k=4,') 198 | 199 | plt.yscale('log') 200 | plt.xlabel('SNR') 201 | plt.ylabel('Block Error Rate') 202 | plt.grid() 203 | plt.legend(loc='upper right',ncol= 1) 204 | 205 | fig = plt.gcf() 206 | fig.set_size_inches(16,12) 207 | #fig.savefig('graph/TwoUserSNR(2,2)0326_1.png',dpi=100) 208 | plt.show() 209 | -------------------------------------------------------------------------------- /autoencoder22.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/helloMRDJ/autoencoder-for-the-Physical-Layer/6222b5c89af840b0da53b889c605c2de3115914b/autoencoder22.h5 --------------------------------------------------------------------------------