├── AWGN_ComplexChannel.py
├── AWGN_ComplexChannelCNN.py
├── AWGN_Complex_use.py
├── AWGN_complex_SNR.py
├── AutoEncoder.py
├── AutoEncoder_BasicModel.py
├── AutoEncoder_NewModel.py
├── AutoEncoder_embedding.py
├── AutoEncoder_embedding_high.py
├── AutoEncoder_embedding_high_contrast.py
├── AutoEncoder_embedding_trainSNR.py
├── AveragePower.py
├── BERtoBLER.py
├── Keras_test.py
├── LearningtoUse.py
├── MulSNR.py
├── README.md
├── RayleighChannel.py
├── Rayleigh_SISO_keras.py
├── Rayleigh_self.py
├── Rayleigh_self_CNN.py
├── Rayleigh_self_Dense.py
├── ReproducingResults.py
├── Test.py
├── Train_SNR.py
├── Train_SNR2.py
├── TwoUser.py
├── TwoUserBasicModel.py
├── TwoUserC.py
├── TwoUserSNRtest.py
├── _windows
├── git.xml
└── laf.xml
├── codestyles
└── Default.xml
├── colors.scheme.xml
├── databaseDrivers.xml
├── databaseSettings.xml
├── debugger.xml
├── editor.codeinsight.xml
├── editor.xml
├── filetypes.xml
├── github.xml
├── ide.general.xml
├── project.default.xml
├── rayleigh.py
├── rayleigh_siso.py
├── readme.txt
├── reportGBDT.py
├── reportmlp.py
├── result.csv
├── resultGBDT.csv
├── strange.py
├── ui.lnf.xml
├── undergradthesis - 副本 (2).docx
└── vcs.xml
/AWGN_ComplexChannel.py:
--------------------------------------------------------------------------------
1 | #-*- coding: utf-8 -*-
2 |
3 | # -*- coding: utf-8 -*-
4 |
5 | import numpy as np
6 | import keras
7 | import tensorflow as tf
8 | from keras.layers import Input, LSTM, Dense,GaussianNoise, Lambda,Add, Reshape,Dropout, embeddings,Flatten
9 | from keras.models import Model
10 | from keras import regularizers
11 | from keras.layers.normalization import BatchNormalization
12 | from keras.optimizers import Adam, SGD, RMSprop
13 | from keras import backend as K
14 | from keras.utils.np_utils import to_categorical
15 |
16 | # for reproducing result
17 | from numpy.random import seed
18 | from sklearn.manifold import TSNE
19 | import matplotlib.pyplot as plt
20 | import random
21 | from numpy import sqrt
22 | from numpy import genfromtxt
23 | from math import pow
24 | from AutoEncoder_BasicModel import AutoEncoder_R
25 |
26 | #set the random state to generate the same/different train data
27 | from numpy.random import seed
28 | seed(1)
29 | from tensorflow import set_random_seed
30 | set_random_seed(2)
31 |
32 |
33 | class AutoEncoder_C(object):
34 | """
35 | This is an API for the use of NN of an end to end communication system,
36 | AutoEncoder.Initialize():
37 | Model Building and Training
38 | Draw_Constellation()
39 | Constellation Graph of the transmitted signal
40 |
41 | """
42 | def __init__(self, ComplexChannel = True,CodingMeth = 'Embedding',M = 4,n_channel = 2, k = 2, emb_k=4, EbNodB_train = 7 , train_data_size = 10000):
43 | """
44 |
45 | :param CodingMeth: 'Embedding' or ' Onehot'
46 | :param M: The total number of symbol
47 | :param n_channel: bits of channel
48 | :param k: int(log(M))
49 | :param emb_k: output dimension of the first embedding layer if using the CodingMeth 'Embedding'
50 | :param EbNodB_train: SNR(dB) of the AWGN channel in train process
51 | :param train_data_size: size of the train data
52 | """
53 | seed(1)
54 | from tensorflow import set_random_seed
55 | set_random_seed(3)
56 | assert ComplexChannel in (True, False)
57 | assert CodingMeth in ('Embedding','Onehot')
58 | assert M > 1
59 | assert n_channel > 1
60 | assert emb_k > 1
61 | assert k >1
62 | self.M = M
63 | self.CodingMeth = CodingMeth
64 | self.ComplexChannel = ComplexChannel
65 | self.n_channel = n_channel
66 | if ComplexChannel== True:
67 | self.n_channel_r = self.n_channel * 2
68 | self.n_channel_c = self.n_channel
69 | if ComplexChannel == False:
70 | self.n_channel_r = self.n_channel
71 | self.n_channel_c = self.n_channel
72 | self.emb_k = emb_k
73 | self.k = k
74 | self.R = self.k / float(self.n_channel)
75 | self.train_data_size = train_data_size
76 | self.EbNodB_train = EbNodB_train
77 | self.EbNo_train = 10 ** (self.EbNodB_train / 10.0)
78 | self.noise_std = np.sqrt(1 / (2 * self.R * self.EbNo_train))
79 |
80 |
81 | def Rayleigh_Channel(self, x, n_sample):
82 | """
83 |
84 | :param x:
85 | :param n_sample:
86 | :return:
87 | """
88 | H_R = np.random.normal(0,1, n_sample)
89 | H_I = np.random.normal(0,1, n_sample)
90 | real = H_R * x[:,:,0] - H_I* x[:,:,1]
91 | imag = H_R * x[:,:,1]+ H_I* x[:,:,0]
92 | print('realshape',K.shape(real))
93 | noise_r = K.random_normal(K.shape(real),
94 | mean=0,
95 | stddev=self.noise_std)
96 | noise_i = K.random_normal(K.shape(imag),
97 | mean=0,
98 | stddev=self.noise_std)
99 | real = Add()([real, noise_r])
100 | imag = Add()([imag, noise_i])
101 | x = K.stack([real, imag], axis=2)
102 | return x
103 |
104 | def Rayleigh_Channel_test(self, x, n_sample, noise_std,test_datasize):
105 | """
106 |
107 | :param x:
108 | :param H:
109 | :return:
110 | """
111 | H_R = np.random.normal(0, 1, n_sample*test_datasize)
112 | H_I = np.random.normal(0, 1, n_sample*test_datasize)
113 | H_R = np.reshape(H_R,(-1,2))
114 | H_I = np.reshape(H_I,(-1,2))
115 | np.random.shuffle(H_R)
116 | np.random.shuffle(H_I)
117 | #x[:,:,0] is the real part of the signal
118 | #x[:,:,1] is the imag part of the signal
119 | real = H_R*x[:,:,0] - H_I*x[:,:,1]
120 | imag = H_R*x[:,:,1] + H_I*x[:,:,0]
121 | noise_r = K.random_normal(K.shape(real),
122 | mean=0,
123 | stddev=noise_std)
124 | noise_i = K.random_normal(K.shape(imag),
125 | mean=0,
126 | stddev=noise_std)
127 | real = real+ noise_r
128 | imag = imag+ noise_i
129 | #print('realshape',real.shape)
130 | #print('imagshape',imag.shape)
131 | x = K.stack([real, imag],axis=2)
132 | x = tf.Session().run(x)
133 | #print(x.shape)
134 | return x
135 |
136 | def Initialize(self):
137 | """
138 |
139 | :return:
140 | """
141 |
142 | if self.CodingMeth == 'Embedding':
143 | print("This model used Embedding layer")
144 | #Generating train_data
145 | train_data = np.random.randint(self.M, size=self.train_data_size)
146 | train_data_pre = train_data.reshape((-1,1))
147 | # Embedding Layer
148 | input_signal = Input(shape=(1,))
149 | encoded = embeddings.Embedding(input_dim=self.M, output_dim=self.emb_k, input_length=1)(input_signal)
150 | encoded1 = Flatten()(encoded)
151 | encoded2 = Dense(self.M, activation='relu')(encoded1)
152 | encoded3 = Dense(self.n_channel_r, activation='linear')(encoded2)
153 | encoded4 = Lambda(lambda x: np.sqrt(self.n_channel_c) * K.l2_normalize(x, axis=1))(encoded3)
154 | #encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(encoded3)
155 | encoded5 = Reshape((-1,2))(encoded4)
156 | #channel_out = Lambda(lambda x: self.Rayleigh_Channel(x, self.n_channel_c))(encoded5)
157 | channel_out = GaussianNoise(np.sqrt(1 / (2 * self.R * self.EbNo_train)))(encoded5)
158 | decoded = Flatten()(channel_out)
159 | decoded1 = Dense(self.M, activation='relu')(decoded)
160 | decoded2 = Dense(self.M, activation='softmax')(decoded1)
161 |
162 | self.auto_encoder = Model(input_signal, decoded2)
163 | adam = Adam(lr=0.005)
164 | #rms = RMSprop(lr=0.002)
165 | self.auto_encoder.compile(optimizer=adam,
166 | loss='sparse_categorical_crossentropy',
167 | )
168 | print(self.auto_encoder.summary())
169 | self.auto_encoder.fit(train_data, train_data_pre,
170 | epochs=45,
171 | batch_size=32,
172 | verbose=2)
173 | self.encoder = Model(input_signal, encoded5)
174 | print('encoder',self.encoder.summary())
175 | encoded_input = Input(shape=(self.n_channel_c,2,))
176 |
177 | deco = self.auto_encoder.layers[-3](encoded_input)
178 | deco1 = self.auto_encoder.layers[-2](deco)
179 | deco2 = self.auto_encoder.layers[-1](deco1)
180 | self.decoder = Model(encoded_input, deco2)
181 | print('decodersummary',self.decoder.summary())
182 |
183 | """
184 | The code of onehot situation remain unchaged(AWGN)
185 | """
186 | if self.CodingMeth == 'Onehot':
187 | print("This is the model using Onehot")
188 |
189 | # Generating train_data
190 | train_data = np.random.randint(self.M, size=self.train_data_size)
191 | data = []
192 | for i in train_data:
193 | temp = np.zeros(self.M)
194 | temp[i] = 1
195 | data.append(temp)
196 | train_data = np.array(data)
197 |
198 | input_signal = Input(shape=(self.M,))
199 | encoded = Dense(self.M, activation='relu')(input_signal)
200 | encoded1 = Dense(self.n_channel, activation='linear')(encoded)
201 | encoded2 = Lambda(lambda x: np.sqrt(self.n_channel) * K.l2_normalize(x, axis=1))(encoded1)
202 | """
203 | K.l2_mormalize 二阶约束(功率约束)
204 | """
205 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
206 | encoded3 = GaussianNoise(np.sqrt(1 / (2 * self.R * EbNo_train)))(encoded2)
207 |
208 | decoded = Dense(self.M, activation='relu')(encoded3)
209 | decoded1 = Dense(self.M, activation='softmax')(decoded)
210 | self.auto_encoder = Model(input_signal, decoded1)
211 | adam = Adam(lr=0.01)
212 | self.auto_encoder.compile(optimizer=adam, loss='categorical_crossentropy')
213 |
214 | print(self.auto_encoder.summary())
215 |
216 | # for tensor board visualization
217 | # tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
218 | # traning auto encoder
219 |
220 | self.auto_encoder.fit(train_data, train_data,
221 | epochs=45,
222 | batch_size=32,
223 | verbose = 0)
224 |
225 | # saving keras model
226 | from keras.models import load_model
227 |
228 | # if you want to save model then remove below comment
229 | # autoencoder.save('autoencoder_v_best.model')
230 |
231 | # making encoder from full autoencoder
232 | self.encoder = Model(input_signal, encoded2)
233 |
234 | # making decoder from full autoencoder
235 | encoded_input = Input(shape=(self.n_channel,))
236 |
237 | deco = self.auto_encoder.layers[-2](encoded_input)
238 | deco = self.auto_encoder.layers[-1](deco)
239 | self.decoder = Model(encoded_input, deco)
240 |
241 | def Draw_Constellation(self, test_data_size = 1500):
242 | """
243 |
244 | :param test_data_size: low-dim situation does not use this param, high-dim situation requires test_data_size to be not to big
245 | :return:
246 | """
247 | import matplotlib.pyplot as plt
248 | test_label = np.random.randint(self.M, size=test_data_size)
249 | test_data = []
250 | for i in test_label:
251 | temp = np.zeros(self.M)
252 | temp[i] = 1
253 | test_data.append(temp)
254 | test_data = np.array(test_data)
255 |
256 | if self.n_channel == 2:
257 | scatter_plot = []
258 | if self.CodingMeth == 'Embedding':
259 | print("Embedding,Two Dimension")
260 | for i in range(0, self.M):
261 | scatter_plot.append(self.encoder.predict(np.expand_dims(i, axis=0)))
262 | scatter_plot = np.array(scatter_plot)
263 | if self.CodingMeth == 'Onehot':
264 | print("Onehot,Two Dimension")
265 | for i in range(0, self.M):
266 | temp = np.zeros(self.M)
267 | temp[i] = 1
268 | scatter_plot.append(self.encoder.predict(np.expand_dims(temp, axis=0)))
269 | scatter_plot = np.array(scatter_plot)
270 | scatter_plot = scatter_plot.reshape(self.M, 2, 1)
271 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1],label= '%s,(%d, %d), %d'%(self.CodingMeth,self.n_channel, self.k, self.emb_k) )
272 | plt.legend()
273 | plt.axis((-2.5, 2.5, -2.5, 2.5))
274 | plt.grid()
275 | plt.show()
276 | if self.n_channel > 2 :
277 | if self.CodingMeth == 'Embedding':
278 | x_emb = self.encoder.predict(test_label)
279 | print("Embedding,High Dimension")
280 | if self.CodingMeth == 'Onehot':
281 | x_emb = self.encoder.predict(test_data)
282 | print("Onehot,High Dimension")
283 |
284 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
285 | noise_std = np.sqrt(1 / (2 * self.R * EbNo_train))
286 | noise = noise_std * np.random.randn(test_data_size, self.n_channel)
287 | x_emb = x_emb + noise
288 | X_embedded = TSNE(learning_rate=700, n_components=2, n_iter=35000, random_state=0,
289 | perplexity=60).fit_transform(x_emb)
290 | print(X_embedded.shape)
291 | X_embedded = X_embedded / 7
292 | import matplotlib.pyplot as plt
293 | plt.scatter(X_embedded[:, 0], X_embedded[:, 1],label= '%s,(%d, %d), %d'%(self.CodingMeth,self.n_channel, self.k, self.emb_k))
294 | # plt.axis((-2.5,2.5,-2.5,2.5))
295 | plt.legend()
296 | plt.grid()
297 | plt.show()
298 |
299 | def Cal_BLER(self, bertest_data_size = 50000, EbNodB_low = -4, EbNodB_high = 8.5, EbNodB_num = 26):
300 | test_label = np.random.randint(self.M, size=bertest_data_size)
301 | test_data = []
302 | for i in test_label:
303 | temp = np.zeros(self.M)
304 | temp[i] = 1
305 | test_data.append(temp)
306 | test_data = np.array(test_data)
307 |
308 | EbNodB_range = list(np.linspace(EbNodB_low, EbNodB_high, EbNodB_num))
309 | ber = [None] * len(EbNodB_range)
310 | self.ber = ber
311 | for n in range(0, len(EbNodB_range)):
312 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
313 | noise_std = np.sqrt(1 / (2 * self.R * EbNo))
314 | noise_mean = 0
315 | no_errors = 0
316 | nn = bertest_data_size
317 | noise = noise_std * np.random.randn(nn, self.n_channel_c,2)
318 | if self.CodingMeth == 'Embedding':
319 | encoded_signal = self.encoder.predict(test_label)
320 | if self.CodingMeth == 'Onehot':
321 | encoded_signal = self.encoder.predict(test_data)
322 | #final_signal = self.Rayleigh_Channel_test(x=encoded_signal,n_sample=self.n_channel_c,
323 | # noise_std=noise_std,
324 | # test_datasize=bertest_data_size)
325 | final_signal = encoded_signal + noise
326 | pred_final_signal = self.decoder.predict(final_signal)
327 | pred_output = np.argmax(pred_final_signal, axis=1)
328 | print('pre_outputshape',pred_output.shape)
329 | print('pred_finalsignalshape', pred_final_signal.shape)
330 | no_errors = (pred_output != test_label)
331 | no_errors = no_errors.astype(int).sum()
332 | ber[n] = no_errors / nn
333 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
334 | self.ber = ber
335 |
336 | """
337 | The following codes show how to apply Class AutoEncoder
338 | """
339 | """
340 | model_test3 = AutoEncoder(CodingMeth='Embedding',M = 16, n_channel=7, k = 4, emb_k=16,EbNodB_train = 7,train_data_size=10000)
341 | model_test3.Initialize()
342 | print("Initialization Finished")
343 | #model_test3.Draw_Constellation()
344 | model_test3.Cal_BLER(bertest_data_size= 70000)
345 | EbNodB_range = list(np.linspace(-4, 8.5, 26))
346 | plt.plot(EbNodB_range, model_test3.ber,'bo')
347 | plt.yscale('log')
348 | plt.xlabel('SNR_RANGE')
349 | plt.ylabel('Block Error Rate')
350 | plt.grid()
351 | plt.show()
352 | """
353 |
354 |
355 |
--------------------------------------------------------------------------------
/AWGN_Complex_use.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpy import genfromtxt
3 | import matplotlib.pyplot as plt
4 | from AWGN_ComplexChannel import AutoEncoder_C
5 | from AutoEncoder_BasicModel import AutoEncoder_R
6 |
7 | EbNodB_range = list(np.linspace(-4,8.5,26))
8 | k=4
9 | bers = genfromtxt('data/hamming74bpsk.csv',delimiter=',')
10 | bers = 1- bers
11 | blers = bers
12 | for i,ber in enumerate(bers):
13 | blers[i] = 1 - pow(ber,k)
14 | plt.plot(EbNodB_range, blers,'r.-' ,label= 'hamming74bpsk(7,4)')
15 |
16 | EbNodB_train = 0
17 | model_test = AutoEncoder_C(ComplexChannel=True,CodingMeth='Embedding',
18 | M = 16, n_channel=7, k = 4, emb_k=16,
19 | EbNodB_train = EbNodB_train,train_data_size=10000)
20 | model_test.Initialize()
21 | print("Initialization of the complex model Finished")
22 | #model_test3.Draw_Constellation()
23 | model_test.Cal_BLER(EbNodB_low=-4,EbNodB_high=8.5,EbNodB_num=26,bertest_data_size= 50000)
24 | EbNodB_range = list(np.linspace(-4,8.5,26))
25 | plt.plot(EbNodB_range, model_test.ber,'b.-',label='AE_AWGN_RESHAPE(7,4)')
26 |
27 | model_real = AutoEncoder_R(CodingMeth='Embedding',M=16, n_channel=7, k=4,emb_k=16, EbNodB_train=EbNodB_train,train_data_size=10000)
28 | model_real.Initialize()
29 | print("Initialization of the real model Finished")
30 | model_real.Cal_BLER(bertest_data_size=50000,EbNodB_low=-4,EbNodB_high=8.5,EbNodB_num=26)
31 | plt.plot(EbNodB_range, model_real.ber,'y.-',label='AE_AWGN(7,4)')
32 |
33 | plt.legend(loc='upper right')
34 | plt.yscale('log')
35 | plt.xlabel('SNR_RANGE')
36 | plt.ylabel('Block Error Rate')
37 | plt.title('awgnChannel(7,4),EnergyConstraint,EbdB_train:%f'%EbNodB_train)
38 | plt.grid()
39 |
40 | fig = plt.gcf()
41 | fig.set_size_inches(16,12)
42 | fig.savefig('graph/0506/AE_AWGN_RESHAPE(7,4)5.png',dpi=100)
43 | plt.show()
--------------------------------------------------------------------------------
/AWGN_complex_SNR.py:
--------------------------------------------------------------------------------
1 | from AWGN_ComplexChannel import AutoEncoder_C
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | from numpy import genfromtxt
5 |
6 | EbNodB_range = list(np.linspace(-4,8.5,26))
7 | k=2
8 | bers = genfromtxt('data/uncodedbpsk.csv',delimiter=',')
9 | bers = 1- bers
10 | blers = bers
11 | for i,ber in enumerate(bers):
12 | blers[i] = 1 - pow(ber,k)
13 | plt.plot(EbNodB_range, blers,'r.-' ,label= 'uncodedbpsk(2,2)')
14 |
15 | EbNodB_low= -4
16 | EbNodB_high= 8.5
17 | EbNodB_num= 26
18 | M=4
19 | n_channel=2
20 | k=2
21 | emb_k=4
22 | #EbNodB_train=7
23 | train_data_size=10000
24 | bertest_data_size=50000
25 | number = 7
26 |
27 | #Train_EbNodB_range = list(np.linspace(start=5, stop=8, num=number))
28 | Train_EbNodB_range = list(np.linspace(start=-10, stop=20, num=number))
29 | EbNodB_range = list(np.linspace(start=EbNodB_low, stop=EbNodB_high, num=EbNodB_num))
30 | for (i,train_ebnodb) in enumerate(Train_EbNodB_range):
31 | print('train_ebnodb',train_ebnodb)
32 | model_complex = AutoEncoder_C(ComplexChannel=True,CodingMeth='Embedding',M=M,n_channel=n_channel,k=k,
33 | emb_k=emb_k,EbNodB_train=train_ebnodb,train_data_size=train_data_size)
34 | model_complex.Initialize()
35 | model_complex.Cal_BLER(bertest_data_size=bertest_data_size,EbNodB_low=EbNodB_low,
36 | EbNodB_high=EbNodB_high,EbNodB_num=EbNodB_num)
37 | plt.plot(EbNodB_range, model_complex.ber,linestyle='-.', label = 'Train_SNR:%f' % (train_ebnodb))
38 |
39 | plt.legend(loc = 'lower left')
40 | plt.yscale('log')
41 | plt.xlabel('SNR_RANGE')
42 | plt.ylabel('Block Error Rate')
43 | plt.title('awgnChannel(2,2),EnergyConstraint,SNRComparison')
44 | plt.grid()
45 |
46 | fig = plt.gcf()
47 | fig.set_size_inches(16,12)
48 | fig.savefig('graph/0506/AE_AWGN_RESHAPE(2,2)5.png',dpi=100)
49 | plt.show()
50 |
--------------------------------------------------------------------------------
/AutoEncoder.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | # -*- coding: utf-8 -*-
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 | import keras
8 | from keras.layers import Input, Dense, GaussianNoise, Lambda, Dropout
9 | from keras.models import Model
10 | from keras import regularizers
11 | from keras.layers.normalization import BatchNormalization
12 | from keras.optimizers import Adam, SGD
13 | from keras import backend as K
14 |
15 | # for reproducing reslut
16 | from numpy.random import seed
17 |
18 | seed(1)
19 | from tensorflow import set_random_seed
20 |
21 | set_random_seed(3)
22 |
23 | # defining parameters
24 | # define (n,k) here for (n,k) autoencoder
25 | # n = n_channel
26 | # k = log2(M) ==> so for (7,4) autoencoder n_channel = 7 and M = 2^4 = 16
27 | M = 16
28 | k = np.log2(M)
29 | k = int(k)
30 | n_channel = 7
31 | R = k / n_channel
32 | print('M:', M, 'k:', k, 'n:', n_channel)
33 |
34 | # generating data of size N
35 | N = 8000
36 | label = np.random.randint(M, size=N)
37 |
38 | # creating one hot encoded vectors
39 | data = []
40 | for i in label:
41 | temp = np.zeros(M)
42 | temp[i] = 1
43 | data.append(temp)
44 |
45 | # checking data shape
46 | data = np.array(data)
47 | print(data.shape)
48 |
49 | # checking generated data with it's label
50 | temp_check = [17, 23, 45, 67, 89, 96, 72, 250, 350]
51 | for i in temp_check:
52 | print(label[i], data[i])
53 |
54 | # defining autoencoder and it's layer
55 | input_signal = Input(shape=(M,))
56 | encoded = Dense(M, activation='relu')(input_signal)
57 | encoded1 = Dense(n_channel, activation='linear')(encoded)
58 | encoded2 = Lambda(lambda x: np.sqrt(n_channel) * K.l2_normalize(x, axis=1))(encoded1)
59 | """
60 | K.l2_mormalize 二阶约束(功率约束)
61 | """
62 | EbNo_train = 5.01187 # coverted 7 db of EbNo
63 | encoded3 = GaussianNoise(np.sqrt(1 / (2 * R * EbNo_train)))(encoded2)
64 |
65 | decoded = Dense(M, activation='relu')(encoded3)
66 | decoded1 = Dense(M, activation='softmax')(decoded)
67 | autoencoder = Model(input_signal, decoded1)
68 | adam = Adam(lr=0.01)
69 | autoencoder.compile(optimizer=adam, loss='categorical_crossentropy')
70 |
71 | # printing summary of layers and it's trainable parameters
72 | print(autoencoder.summary())
73 |
74 | # for tensor board visualization
75 | # tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
76 | # traning auto encoder
77 |
78 | autoencoder.fit(data, data,
79 | epochs=45,
80 | batch_size=32)
81 |
82 | # saving keras model
83 | from keras.models import load_model
84 |
85 | # if you want to save model then remove below comment
86 | # autoencoder.save('autoencoder_v_best.model')
87 |
88 | # making encoder from full autoencoder
89 | encoder = Model(input_signal, encoded2)
90 |
91 | # making decoder from full autoencoder
92 | encoded_input = Input(shape=(n_channel,))
93 |
94 | deco = autoencoder.layers[-2](encoded_input)
95 | deco = autoencoder.layers[-1](deco)
96 | decoder = Model(encoded_input, deco)
97 |
98 | # generating data for checking BER
99 | # if you're not using t-sne for visulation than set N to 70,000 for better result
100 | # for t-sne use less N like N = 1500
101 | N = 50000
102 | test_label = np.random.randint(M, size=N)
103 | test_data = []
104 |
105 | for i in test_label:
106 | temp = np.zeros(M)
107 | temp[i] = 1
108 | test_data.append(temp)
109 |
110 | test_data = np.array(test_data)
111 |
112 | # checking generated data
113 | temp_test = 6
114 | print(test_data[temp_test][test_label[temp_test]], test_label[temp_test])
115 |
116 | # for plotting learned consteallation diagram
117 | """
118 | scatter_plot = []
119 | for i in range(0, M):
120 | temp = np.zeros(M)
121 | temp[i] = 1
122 | scatter_plot.append(encoder.predict(np.expand_dims(temp, axis=0)))
123 | scatter_plot = np.array(scatter_plot)
124 | print(scatter_plot.shape)
125 | """
126 | # use this function for ploting constellation for higher dimenson like 7-D for (7,4) autoencoder
127 | '''
128 | x_emb = encoder.predict(test_data)
129 | noise_std = np.sqrt(1/(2*R*EbNo_train))
130 | noise = noise_std * np.random.randn(N,n_channel)
131 | x_emb = x_emb + noise
132 | from sklearn.manifold import TSNE
133 | X_embedded = TSNE(learning_rate=700, n_components=2,n_iter=35000, random_state=0, perplexity=60).fit_transform(x_emb)
134 | print (X_embedded.shape)
135 | X_embedded = X_embedded / 7
136 | import matplotlib.pyplot as plt
137 | plt.scatter(X_embedded[:,0],X_embedded[:,1])
138 | #plt.axis((-2.5,2.5,-2.5,2.5))
139 | plt.grid()
140 | plt.show()
141 | '''
142 | """
143 | # ploting constellation diagram
144 | import matplotlib.pyplot as plt
145 |
146 | scatter_plot = scatter_plot.reshape(M, 2, 1)
147 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1])
148 | #plt.axis((-2.5, 2.5, -2.5, 2.5))
149 | plt.grid()
150 | plt.show()
151 | """
152 |
153 | # calculating BER
154 | # this is optimized BER function so it can handle large number of N
155 | # previous code has another for loop which was making it slow
156 | EbNodB_range = list(np.linspace(-4, 8.5, 26))
157 | ber = [None] * len(EbNodB_range)
158 | for n in range(0, len(EbNodB_range)):
159 | EbNo = 10.0 ** (EbNodB_range[n] / 10.0)
160 | noise_std = np.sqrt(1 / (2 * R * EbNo))
161 | noise_mean = 0
162 | no_errors = 0
163 | nn = N
164 | noise = noise_std * np.random.randn(nn, n_channel)
165 | encoded_signal = encoder.predict(test_data)
166 | final_signal = encoded_signal + noise
167 | pred_final_signal = decoder.predict(final_signal)
168 | pred_output = np.argmax(pred_final_signal, axis=1)
169 | no_errors = (pred_output != test_label)
170 | no_errors = no_errors.astype(int).sum()
171 | ber[n] = no_errors / nn
172 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
173 | # use below line for generating matlab like matrix which can be copy and paste for plotting ber graph in matlab
174 | # print(ber[n], " ",end='')
175 |
176 | # ploting ber curve
177 | import matplotlib.pyplot as plt
178 | from scipy import interpolate
179 |
180 | plt.plot(EbNodB_range, ber, 'bo', label='Autoencoder(7,4)')
181 | plt.yscale('log')
182 | plt.xlabel('SNR Range')
183 | plt.ylabel('Block Error Rate')
184 | plt.grid()
185 | plt.legend(loc='upper right', ncol=1)
186 |
187 | # for saving figure remove below comment
188 | # plt.savefig('AutoEncoder_2_2_constrained_BER_matplotlib')
189 | plt.show()
--------------------------------------------------------------------------------
/AutoEncoder_BasicModel.py:
--------------------------------------------------------------------------------
1 | #-*- coding: utf-8 -*-
2 |
3 | # -*- coding: utf-8 -*-
4 |
5 | import numpy as np
6 | import keras
7 | import tensorflow as tf
8 | from keras.layers import Input, LSTM, Dense,GaussianNoise, Lambda, Dropout, embeddings,Flatten
9 | from keras.models import Model
10 | from keras import regularizers
11 | from keras.layers.normalization import BatchNormalization
12 | from keras.optimizers import Adam, SGD
13 | from keras import backend as K
14 | from keras.utils.np_utils import to_categorical
15 |
16 | # for reproducing reslut
17 | from numpy.random import seed
18 | from sklearn.manifold import TSNE
19 | import matplotlib.pyplot as plt
20 |
21 |
22 | class AutoEncoder(object):
23 | """
24 | This is an API for the use of NN of an end to end communication system,
25 | AutoEncoder.Initialize():
26 | Model Building and Training
27 | Draw_Constellation()
28 | Constellation Graph of the transmitted signal
29 |
30 | """
31 | def __init__(self, CodingMeth = 'Embedding',M = 4,n_channel = 2, k = 2, emb_k=4, EbNodB_train = 7 , train_data_size = 10000):
32 | """
33 |
34 | :param CodingMeth: 'Embedding' or ' Onehot'
35 | :param M: The total number of symbol
36 | :param n_channel: bits of channel
37 | :param k: int(log(M))
38 | :param emb_k: output dimension of the first embedding layer if using the CodingMeth 'Embedding'
39 | :param EbNodB_train: SNR(dB) of the AWGN channel in train process
40 | :param train_data_size: size of the train data
41 | """
42 | seed(1)
43 | from tensorflow import set_random_seed
44 | set_random_seed(3)
45 |
46 | assert CodingMeth in ('Embedding','Onehot')
47 | assert M > 1
48 | assert n_channel > 1
49 | assert emb_k > 1
50 | assert k >1
51 | self.M = M
52 | self.CodingMeth = CodingMeth
53 | self.n_channel = n_channel
54 | self.emb_k = emb_k
55 | self.k = k
56 | self.train_data_size = train_data_size
57 | self.EbNodB_train = EbNodB_train
58 | self.R = self.k / float(self.n_channel)
59 |
60 | def Initialize(self):
61 | """
62 |
63 | :return:
64 | """
65 |
66 | if self.CodingMeth == 'Embedding':
67 | print("This model used Embedding layer")
68 | #Generating train_data
69 | train_data = np.random.randint(self.M, size=self.train_data_size)
70 | train_data_pre = train_data.reshape((-1,1))
71 | # Embedding Layer
72 | input_signal = Input(shape=(1,))
73 | encoded = embeddings.Embedding(input_dim=self.M, output_dim=self.emb_k, input_length=1)(input_signal)
74 | encoded1 = Flatten()(encoded)
75 | encoded2 = Dense(self.M, activation='relu')(encoded1)
76 | encoded3 = Dense(self.n_channel, activation='linear')(encoded2)
77 | encoded4 = Lambda(lambda x: np.sqrt(self.n_channel) * K.l2_normalize(x, axis=1))(encoded3)
78 |
79 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
80 | channel_out = GaussianNoise(np.sqrt(1 / (2 * self.R * EbNo_train)))(encoded4)
81 |
82 | decoded = Dense(self.M, activation='relu')(channel_out)
83 | decoded1 = Dense(self.M, activation='softmax')(decoded)
84 |
85 | self.auto_encoder = Model(input_signal, decoded1)
86 | adam = Adam(lr=0.01)
87 | self.auto_encoder.compile(optimizer=adam,
88 | loss='sparse_categorical_crossentropy',
89 | )
90 | print(self.auto_encoder.summary())
91 | self.auto_encoder.fit(train_data, train_data_pre,
92 | epochs=45,
93 | batch_size=32,
94 | verbose=0)
95 | self.encoder = Model(input_signal, encoded4)
96 | encoded_input = Input(shape=(self.n_channel,))
97 |
98 | deco = self.auto_encoder.layers[-2](encoded_input)
99 | deco = self.auto_encoder.layers[-1](deco)
100 | self.decoder = Model(encoded_input, deco)
101 |
102 | if self.CodingMeth == 'Onehot':
103 | print("This is the model using Onehot")
104 |
105 | # Generating train_data
106 | train_data = np.random.randint(self.M, size=self.train_data_size)
107 | data = []
108 | for i in train_data:
109 | temp = np.zeros(self.M)
110 | temp[i] = 1
111 | data.append(temp)
112 | train_data = np.array(data)
113 |
114 | input_signal = Input(shape=(self.M,))
115 | encoded = Dense(self.M, activation='relu')(input_signal)
116 | encoded1 = Dense(self.n_channel, activation='linear')(encoded)
117 | encoded2 = Lambda(lambda x: np.sqrt(self.n_channel) * K.l2_normalize(x, axis=1))(encoded1)
118 | """
119 | K.l2_mormalize 二阶约束(功率约束)
120 | """
121 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
122 | encoded3 = GaussianNoise(np.sqrt(1 / (2 * self.R * EbNo_train)))(encoded2)
123 |
124 | decoded = Dense(self.M, activation='relu')(encoded3)
125 | decoded1 = Dense(self.M, activation='softmax')(decoded)
126 | self.auto_encoder = Model(input_signal, decoded1)
127 | adam = Adam(lr=0.01)
128 | self.auto_encoder.compile(optimizer=adam, loss='categorical_crossentropy')
129 |
130 | print(self.auto_encoder.summary())
131 |
132 | # for tensor board visualization
133 | # tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
134 | # traning auto encoder
135 |
136 | self.auto_encoder.fit(train_data, train_data,
137 | epochs=45,
138 | batch_size=32,
139 | verbose = 0)
140 |
141 | # saving keras model
142 | from keras.models import load_model
143 |
144 | # if you want to save model then remove below comment
145 | # autoencoder.save('autoencoder_v_best.model')
146 |
147 | # making encoder from full autoencoder
148 | self.encoder = Model(input_signal, encoded2)
149 |
150 | # making decoder from full autoencoder
151 | encoded_input = Input(shape=(self.n_channel,))
152 |
153 | deco = self.auto_encoder.layers[-2](encoded_input)
154 | deco = self.auto_encoder.layers[-1](deco)
155 | self.decoder = Model(encoded_input, deco)
156 |
157 | def Draw_Constellation(self, test_data_size = 1500):
158 | """
159 |
160 | :param test_data_size: low-dim situation does not use this param, high-dim situation requires test_data_size to be not to big
161 | :return:
162 | """
163 | import matplotlib.pyplot as plt
164 | test_label = np.random.randint(self.M, size=test_data_size)
165 | test_data = []
166 | for i in test_label:
167 | temp = np.zeros(self.M)
168 | temp[i] = 1
169 | test_data.append(temp)
170 | test_data = np.array(test_data)
171 |
172 | if self.n_channel == 2:
173 | scatter_plot = []
174 | if self.CodingMeth == 'Embedding':
175 | print("Embedding,Two Dimension")
176 | for i in range(0, self.M):
177 | scatter_plot.append(self.encoder.predict(np.expand_dims(i, axis=0)))
178 | scatter_plot = np.array(scatter_plot)
179 | if self.CodingMeth == 'Onehot':
180 | print("Onehot,Two Dimension")
181 | for i in range(0, self.M):
182 | temp = np.zeros(self.M)
183 | temp[i] = 1
184 | scatter_plot.append(self.encoder.predict(np.expand_dims(temp, axis=0)))
185 | scatter_plot = np.array(scatter_plot)
186 | scatter_plot = scatter_plot.reshape(self.M, 2, 1)
187 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1],label= '%s,(%d, %d), %d'%(self.CodingMeth,self.n_channel, self.k, self.emb_k) )
188 | plt.legend()
189 | plt.axis((-2.5, 2.5, -2.5, 2.5))
190 | plt.grid()
191 | plt.show()
192 | if self.n_channel > 2 :
193 | if self.CodingMeth == 'Embedding':
194 | x_emb = self.encoder.predict(test_label)
195 | print("Embedding,High Dimension")
196 | if self.CodingMeth == 'Onehot':
197 | x_emb = self.encoder.predict(test_data)
198 | print("Onehot,High Dimension")
199 |
200 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
201 | noise_std = np.sqrt(1 / (2 * self.R * EbNo_train))
202 | noise = noise_std * np.random.randn(test_data_size, self.n_channel)
203 | x_emb = x_emb + noise
204 | X_embedded = TSNE(learning_rate=700, n_components=2, n_iter=35000, random_state=0,
205 | perplexity=60).fit_transform(x_emb)
206 | print(X_embedded.shape)
207 | X_embedded = X_embedded / 7
208 | import matplotlib.pyplot as plt
209 | plt.scatter(X_embedded[:, 0], X_embedded[:, 1],label= '%s,(%d, %d), %d'%(self.CodingMeth,self.n_channel, self.k, self.emb_k))
210 | # plt.axis((-2.5,2.5,-2.5,2.5))
211 | plt.legend()
212 | plt.grid()
213 | plt.show()
214 |
215 | def Cal_BLER(self, bertest_data_size = 50000, EbNodB_low = -4, EbNodB_high = 8.5, EbNodB_num = 26):
216 | test_label = np.random.randint(self.M, size=bertest_data_size)
217 | test_data = []
218 | for i in test_label:
219 | temp = np.zeros(self.M)
220 | temp[i] = 1
221 | test_data.append(temp)
222 | test_data = np.array(test_data)
223 |
224 | EbNodB_range = list(np.linspace(EbNodB_low, EbNodB_high, EbNodB_num))
225 | ber = [None] * len(EbNodB_range)
226 | self.ber = ber
227 | for n in range(0, len(EbNodB_range)):
228 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
229 | noise_std = np.sqrt(1 / (2 * self.R * EbNo))
230 | noise_mean = 0
231 | no_errors = 0
232 | nn = bertest_data_size
233 | noise = noise_std * np.random.randn(nn, self.n_channel)
234 | if self.CodingMeth == 'Embedding':
235 | encoded_signal = self.encoder.predict(test_label)
236 | if self.CodingMeth == 'Onehot':
237 | encoded_signal = self.encoder.predict(test_data)
238 | final_signal = encoded_signal + noise
239 | pred_final_signal = self.decoder.predict(final_signal)
240 | pred_output = np.argmax(pred_final_signal, axis=1)
241 | no_errors = (pred_output != test_label)
242 | no_errors = no_errors.astype(int).sum()
243 | ber[n] = no_errors / nn
244 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
245 | self.ber = ber
246 |
247 | """
248 | The following codes show how to apply Class AutoEncoder
249 | """
250 | """
251 | model_test3 = AutoEncoder(CodingMeth='Embedding',M = 16, n_channel=7, k = 4, emb_k=16,EbNodB_train = 7,train_data_size=10000)
252 | model_test3.Initialize()
253 | print("Initialization Finished")
254 | #model_test3.Draw_Constellation()
255 | model_test3.Cal_BLER(bertest_data_size= 70000)
256 | EbNodB_range = list(np.linspace(-4, 8.5, 26))
257 | plt.plot(EbNodB_range, model_test3.ber,'bo')
258 | plt.yscale('log')
259 | plt.xlabel('SNR_RANGE')
260 | plt.ylabel('Block Error Rate')
261 | plt.grid()
262 | plt.show()
263 |
264 | model_test3 = AutoEncoder(CodingMeth='Embedding',M = 16, n_channel=7, k = 4, emb_k=16, EbNodB_train = 7,train_data_size=10000)
265 | model_test3.Initialize()
266 | print("Initialization Finished")
267 | #model_test3.Draw_Constellation()
268 | model_test3.Cal_BLER(bertest_data_size= 70000)
269 | EbNodB_range = list(np.linspace(-4, 8.5, 26))
270 | plt.plot(EbNodB_range, model_test3.ber,'bo')
271 | plt.yscale('log')
272 | plt.xlabel('SNR_RANGE')
273 | plt.ylabel('Block Error Rate')
274 | plt.grid()
275 | plt.show()
276 | """
277 |
--------------------------------------------------------------------------------
/AutoEncoder_NewModel.py:
--------------------------------------------------------------------------------
1 | #-*- coding: utf-8 -*-
2 |
3 | # -*- coding: utf-8 -*-
4 |
5 | import numpy as np
6 | import keras
7 | import tensorflow as tf
8 | from keras.layers import Input, LSTM, Dense,GaussianNoise, Lambda, Dropout, embeddings,Flatten
9 | from keras.models import Model
10 | from keras import regularizers
11 | from keras.layers.normalization import BatchNormalization
12 | from keras.optimizers import Adam, SGD
13 | from keras import backend as K
14 | from keras.utils.np_utils import to_categorical
15 |
16 | # for reproducing result
17 | from numpy.random import seed
18 | from sklearn.manifold import TSNE
19 | import matplotlib.pyplot as plt
20 |
21 |
22 | class NewAutoEncoder(object):
23 | """
24 | This is an API for the use of NN of an end to end communication system,the SNR in training process varies
25 | AutoEncoder.Initialize():
26 | Model Building and Training
27 | Draw_Constellation()
28 | Constellation Graph of the transmitted signal
29 |
30 | """
31 | def __init__(self, CodingMeth = 'Embedding',M = 4,n_channel = 2, k = 2, emb_k=4, EbNodB_train = 7 , train_data_size = 10000):
32 | """
33 |
34 | :param CodingMeth: 'Embedding' or ' Onehot'
35 | :param M: The total number of symbol
36 | :param n_channel: bits of channel
37 | :param k: int(log(M))
38 | :param emb_k: output dimension of the first embedding layer if using the CodingMeth 'Embedding'
39 | :param EbNodB_train: SNR(dB) of the AWGN channel in train process
40 | :param train_data_size: size of the train data
41 | """
42 | seed(1)
43 | from tensorflow import set_random_seed
44 | set_random_seed(3)
45 |
46 | assert CodingMeth in ('Embedding','Onehot')
47 | assert M > 1
48 | assert n_channel > 1
49 | assert emb_k > 1
50 | assert k >1
51 | self.M = M
52 | self.CodingMeth = CodingMeth
53 | self.n_channel = n_channel
54 | self.emb_k = emb_k
55 | self.k = k
56 | self.train_data_size = train_data_size
57 | self.EbNodB_train = EbNodB_train
58 | self.R = self.k / float(self.n_channel)
59 |
60 | def Initialize(self):
61 | """
62 |
63 | :return:
64 | """
65 |
66 | if self.CodingMeth == 'Embedding':
67 | print("This model used Embedding layer")
68 | #Generating train_data
69 | train_data = np.random.randint(self.M, size=self.train_data_size)
70 | train_data_pre = train_data.reshape((-1,1))
71 | # Embedding Layer
72 | input_signal = Input(shape=(1,))
73 | encoded = embeddings.Embedding(input_dim=self.M, output_dim=self.emb_k, input_length=1)(input_signal)
74 | encoded1 = Flatten()(encoded)
75 | encoded2 = Dense(self.M, activation='relu')(encoded1)
76 | encoded3 = Dense(self.n_channel, activation='linear')(encoded2)
77 | encoded4 = Lambda(lambda x: np.sqrt(self.n_channel) * K.l2_normalize(x, axis=1))(encoded3)
78 |
79 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
80 | channel_out = GaussianNoise(np.sqrt(1 / (2 * self.R * EbNo_train)))(encoded4)
81 |
82 | decoded = Dense(self.M, activation='relu')(channel_out)
83 | decoded1 = Dense(self.M, activation='softmax')(decoded)
84 |
85 | self.auto_encoder = Model(input_signal, decoded1)
86 | adam = Adam(lr=0.01)
87 | self.auto_encoder.compile(optimizer=adam,
88 | loss='sparse_categorical_crossentropy',
89 | )
90 | print(self.auto_encoder.summary())
91 | self.auto_encoder.fit(train_data, train_data_pre,
92 | epochs=45,
93 | batch_size=32,
94 | verbose=0)
95 | self.encoder = Model(input_signal, encoded4)
96 | encoded_input = Input(shape=(self.n_channel,))
97 |
98 | deco = self.auto_encoder.layers[-2](encoded_input)
99 | deco = self.auto_encoder.layers[-1](deco)
100 | self.decoder = Model(encoded_input, deco)
101 |
102 | if self.CodingMeth == 'Onehot':
103 | print("This is the model using Onehot")
104 |
105 | # Generating train_data
106 | train_data = np.random.randint(self.M, size=self.train_data_size)
107 | data = []
108 | for i in train_data:
109 | temp = np.zeros(self.M)
110 | temp[i] = 1
111 | data.append(temp)
112 | train_data = np.array(data)
113 |
114 | input_signal = Input(shape=(self.M,))
115 | encoded = Dense(self.M, activation='relu')(input_signal)
116 | encoded1 = Dense(self.n_channel, activation='linear')(encoded)
117 | encoded2 = Lambda(lambda x: np.sqrt(self.n_channel) * K.l2_normalize(x, axis=1))(encoded1)
118 | """
119 | K.l2_mormalize 二阶约束(功率约束)
120 | """
121 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
122 | encoded3 = GaussianNoise(np.sqrt(1 / (2 * self.R * EbNo_train)))(encoded2)
123 |
124 | decoded = Dense(self.M, activation='relu')(encoded3)
125 | decoded1 = Dense(self.M, activation='softmax')(decoded)
126 | self.autoencoder = Model(input_signal, decoded1)
127 | adam = Adam(lr=0.01)
128 | self.autoencoder.compile(optimizer=adam, loss='categorical_crossentropy')
129 |
130 | print(self.autoencoder.summary())
131 |
132 | # for tensor board visualization
133 | # tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
134 | # traning auto encoder
135 |
136 | self.autoencoder.fit(train_data, train_data,
137 | epochs=45,
138 | batch_size=32,
139 | verbose = 0)
140 |
141 | # saving keras model
142 | from keras.models import load_model
143 |
144 | # if you want to save model then remove below comment
145 | # autoencoder.save('autoencoder_v_best.model')
146 |
147 | # making encoder from full autoencoder
148 | self.encoder = Model(input_signal, encoded2)
149 |
150 | # making decoder from full autoencoder
151 | encoded_input = Input(shape=(self.n_channel,))
152 |
153 | deco = self.autoencoder.layers[-2](encoded_input)
154 | deco = self.autoencoder.layers[-1](deco)
155 | self.decoder = Model(encoded_input, deco)
156 |
157 | def Draw_Constellation(self, test_data_size = 1500):
158 | """
159 |
160 | :param test_data_size: low-dim situation does not use this param, high-dim situation requires test_data_size to be not to big
161 | :return:
162 | """
163 | import matplotlib.pyplot as plt
164 | test_label = np.random.randint(self.M, size=test_data_size)
165 | test_data = []
166 | for i in test_label:
167 | temp = np.zeros(self.M)
168 | temp[i] = 1
169 | test_data.append(temp)
170 | test_data = np.array(test_data)
171 |
172 | if self.n_channel == 2:
173 | scatter_plot = []
174 | if self.CodingMeth == 'Embedding':
175 | print("Embedding,Two Dimension")
176 | for i in range(0, self.M):
177 | scatter_plot.append(self.encoder.predict(np.expand_dims(i, axis=0)))
178 | scatter_plot = np.array(scatter_plot)
179 | if self.CodingMeth == 'Onehot':
180 | print("Onehot,Two Dimension")
181 | for i in range(0, self.M):
182 | temp = np.zeros(self.M)
183 | temp[i] = 1
184 | scatter_plot.append(self.encoder.predict(np.expand_dims(temp, axis=0)))
185 | scatter_plot = np.array(scatter_plot)
186 | scatter_plot = scatter_plot.reshape(self.M, 2, 1)
187 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1],label= '%s,(%d, %d), %d'%(self.CodingMeth,self.n_channel, self.k, self.emb_k) )
188 | plt.legend()
189 | plt.axis((-2.5, 2.5, -2.5, 2.5))
190 | plt.grid()
191 | plt.show()
192 | if self.n_channel > 2 :
193 | if self.CodingMeth == 'Embedding':
194 | x_emb = self.encoder.predict(test_label)
195 | print("Embedding,High Dimension")
196 | if self.CodingMeth == 'Onehot':
197 | x_emb = self.encoder.predict(test_data)
198 | print("Onehot,High Dimension")
199 |
200 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
201 | noise_std = np.sqrt(1 / (2 * self.R * EbNo_train))
202 | noise = noise_std * np.random.randn(test_data_size, self.n_channel)
203 | x_emb = x_emb + noise
204 | X_embedded = TSNE(learning_rate=700, n_components=2, n_iter=35000, random_state=0,
205 | perplexity=60).fit_transform(x_emb)
206 | print(X_embedded.shape)
207 | X_embedded = X_embedded / 7
208 | import matplotlib.pyplot as plt
209 | plt.scatter(X_embedded[:, 0], X_embedded[:, 1],label= '%s,(%d, %d), %d'%(self.CodingMeth,self.n_channel, self.k, self.emb_k))
210 | # plt.axis((-2.5,2.5,-2.5,2.5))
211 | plt.legend()
212 | plt.grid()
213 | plt.show()
214 |
215 | def Cal_BLER(self, bertest_data_size = 50000, EbNodB_low = -4, EbNodB_high = 8.5, EbNodB_num = 26):
216 | test_label = np.random.randint(self.M, size=bertest_data_size)
217 | test_data = []
218 | for i in test_label:
219 | temp = np.zeros(self.M)
220 | temp[i] = 1
221 | test_data.append(temp)
222 | test_data = np.array(test_data)
223 |
224 | EbNodB_range = list(np.linspace(EbNodB_low, EbNodB_high, EbNodB_num))
225 | ber = [None] * len(EbNodB_range)
226 | self.ber = ber
227 | for n in range(0, len(EbNodB_range)):
228 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
229 | noise_std = np.sqrt(1 / (2 * self.R * EbNo))
230 | noise_mean = 0
231 | no_errors = 0
232 | nn = bertest_data_size
233 | noise = noise_std * np.random.randn(nn, self.n_channel)
234 | if self.CodingMeth == 'Embedding':
235 | encoded_signal = self.encoder.predict(test_label)
236 | if self.CodingMeth == 'Onehot':
237 | encoded_signal = self.encoder.predict(test_data)
238 | final_signal = encoded_signal + noise
239 | pred_final_signal = self.decoder.predict(final_signal)
240 | pred_output = np.argmax(pred_final_signal, axis=1)
241 | no_errors = (pred_output != test_label)
242 | no_errors = no_errors.astype(int).sum()
243 | ber[n] = no_errors / nn
244 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
245 | self.ber = ber
246 |
247 | """
248 | The following codes show how to apply Class AutoEncoder
249 | """
250 | """
251 | model_test3 = AutoEncoder(CodingMeth='Embedding',M = 16, n_channel=7, k = 4, emb_k=16,EbNodB_train = 7,train_data_size=10000)
252 | model_test3.Initialize()
253 | print("Initialization Finished")
254 | #model_test3.Draw_Constellation()
255 | model_test3.Cal_BLER(bertest_data_size= 70000)
256 | EbNodB_range = list(np.linspace(-4, 8.5, 26))
257 | plt.plot(EbNodB_range, model_test3.ber,'bo')
258 | plt.yscale('log')
259 | plt.xlabel('SNR_RANGE')
260 | plt.ylabel('Block Error Rate')
261 | plt.grid()
262 | plt.show()
263 |
264 | model_test3 = AutoEncoder(CodingMeth='Embedding',M = 16, n_channel=7, k = 4, emb_k=16, EbNodB_train = 7,train_data_size=10000)
265 | model_test3.Initialize()
266 | print("Initialization Finished")
267 | #model_test3.Draw_Constellation()
268 | model_test3.Cal_BLER(bertest_data_size= 70000)
269 | EbNodB_range = list(np.linspace(-4, 8.5, 26))
270 | plt.plot(EbNodB_range, model_test3.ber,'bo')
271 | plt.yscale('log')
272 | plt.xlabel('SNR_RANGE')
273 | plt.ylabel('Block Error Rate')
274 | plt.grid()
275 | plt.show()
276 | """
277 |
--------------------------------------------------------------------------------
/AutoEncoder_embedding.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import keras
3 | from keras.layers import Input, LSTM,Dense,GaussianNoise, Lambda, Dropout, embeddings,Flatten
4 | from keras.models import Model
5 | from keras import regularizers
6 | from keras.layers.normalization import BatchNormalization
7 | from keras.optimizers import Adam, SGD
8 | from keras import backend as K
9 | from keras.utils.np_utils import to_categorical
10 |
11 | #set the random state to generate the same/different train data
12 | from numpy.random import seed
13 | seed(1)
14 | from tensorflow import set_random_seed
15 | set_random_seed(3)
16 |
17 | M = 4
18 | k_r = np.log2(M)
19 | k_r = int(k_r)
20 | k = 2
21 | n_channel = 2
22 | R = k_r / n_channel
23 | print('M:', M, 'k:', k, 'n:', n_channel)
24 |
25 | #generating train data
26 | N = 10000
27 | label = np.random.randint(M, size = N)
28 | label_out = label.reshape((-1,1))
29 | #defining an auto encoder
30 |
31 | # creating one hot encoded vectors
32 | data = []
33 | for i in label:
34 | temp = np.zeros(M)
35 | temp[i] = 1
36 | data.append(temp)
37 |
38 | # checking data shape
39 | data = np.array(data)
40 | print(data.shape)
41 |
42 | # checking generated data with it's label
43 | temp_check = [17, 23, 45, 67, 89, 96, 72, 250, 350]
44 | for i in temp_check:
45 | print(label[i], data[i])
46 |
47 | # defining autoencoder and it's layer (onehot)
48 | input_signal = Input(shape=(M,))
49 | encoded_n = Dense(M, activation='relu')(input_signal)
50 | encoded1_n = Dense(n_channel, activation='linear')(encoded_n)
51 | encoded2_n = Lambda(lambda x: np.sqrt(n_channel) * K.l2_normalize(x, axis=1))(encoded1_n)
52 | """
53 | K.l2_mormalize 二阶约束(功率约束)
54 | """
55 | EbNo_train = 5.01187 # coverted 7 db of EbNo
56 | encoded3_n = GaussianNoise(np.sqrt(1 / (2 * R * EbNo_train)))(encoded2_n)
57 |
58 | decoded_n = Dense(M, activation='relu')(encoded3_n)
59 | decoded1_n = Dense(M, activation='softmax')(decoded_n)
60 | autoencoder_n = Model(input_signal, decoded1_n)
61 | adam = Adam(lr=0.01)
62 | autoencoder_n.compile(optimizer=adam, loss='categorical_crossentropy')
63 |
64 | # printing summary of layers and it's trainable parameters
65 | print(autoencoder_n.summary())
66 |
67 | # for tensor board visualization
68 | # tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
69 | # traning auto encoder
70 |
71 | autoencoder_n.fit(data, data,
72 | epochs=45,
73 | batch_size=32)
74 |
75 |
76 | encoder_n = Model(input_signal, encoded2_n)
77 | encoded_input_n = Input(shape=(n_channel,))
78 |
79 | deco_n = autoencoder_n.layers[-2](encoded_input_n)
80 | deco_n = autoencoder_n.layers[-1](deco_n)
81 | decoder_n = Model(encoded_input_n, deco_n)
82 |
83 | # Embedding Layer
84 | input_signal = Input( shape = (1, ) )
85 | encoded = embeddings.Embedding(input_dim=M, output_dim = k,input_length= 1 )(input_signal)
86 | encoded1 = Flatten()(encoded)
87 | encoded2 = Dense(M,activation= 'relu')(encoded1)
88 | #encoded2 = LSTM(n_channel, dropout=0.2, recurrent_dropout=0.2)(encoded)
89 | encoded3 = Dense(n_channel, activation= 'linear')(encoded2)
90 | #encoded4 = Lambda(lambda x: np.sqrt(n_channel)* K.l2_normalize(x, axis=1))(encoded3)
91 | encoded4 = Lambda(lambda x: np.sqrt(n_channel) * K.l2_normalize(x, axis=1))(encoded3)
92 |
93 | EbNodB_train = 7
94 | EbNo_train = 10 ** (EbNodB_train / 10.0)
95 | # EbNo_train = 5.01187
96 | channel_out = GaussianNoise(np.sqrt(1 / (2 * R * EbNo_train)))(encoded4)
97 |
98 | decoded = Dense(M, activation='relu')(channel_out)
99 | decoded1 = Dense(M, activation='softmax')(decoded)
100 | #decoded1 = Dense(M, activation= 'sigmoid')(decoded)
101 | #?? why softmax?
102 |
103 | auto_encoder_embedding = Model(input_signal, decoded1)
104 | adam= Adam(lr= 0.01)
105 | auto_encoder_embedding.compile(optimizer= adam,
106 | loss= 'sparse_categorical_crossentropy',
107 | )
108 | print(auto_encoder_embedding.summary())
109 | auto_encoder_embedding.fit(label, label_out,
110 | epochs=45,
111 | batch_size=32)
112 | encoder = Model(input_signal, encoded4)
113 | encoded_input = Input(shape=(n_channel,))
114 |
115 | deco = auto_encoder_embedding.layers[-2](encoded_input)
116 | deco = auto_encoder_embedding.layers[-1](deco)
117 | decoder = Model(encoded_input, deco)
118 |
119 |
120 | #generating test data
121 |
122 | N = 50000
123 | test_label = np.random.randint(M, size=N)
124 | test_label_out = test_label.reshape((-1,1))
125 | test_data = []
126 | for i in test_label:
127 | temp = np.zeros(M)
128 | temp[i] = 1
129 | test_data.append(temp)
130 |
131 | test_data = np.array(test_data)
132 |
133 | #plotting constellation diagram for embedding
134 | scatter_plot = []
135 | for i in range (0,M):
136 | scatter_plot.append(encoder.predict(np.expand_dims(i, axis=0)))
137 | scatter_plot = np.array(scatter_plot)
138 | print(scatter_plot.shape)
139 |
140 | import matplotlib.pyplot as plt
141 | scatter_plot = scatter_plot.reshape(M, 2, 1)
142 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1])
143 | plt.legend(['embedding_constellation(2,2),emb_k=2'],loc='upper left')
144 | plt.axis((-2.5, 2.5, -2.5, 2.5))
145 | plt.grid()
146 | plt.show()
147 |
148 | #plotting constellation diagram for one-hot
149 | scatter_plot = []
150 | for i in range(0, M):
151 | temp = np.zeros(M)
152 | temp[i] = 1
153 | scatter_plot.append(encoder_n.predict(np.expand_dims(temp, axis=0)))
154 | scatter_plot = np.array(scatter_plot)
155 | print(scatter_plot.shape)
156 | scatter_plot = scatter_plot.reshape(M, 2, 1)
157 |
158 |
159 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1], )
160 | plt.legend(['onehot_constellation(2,2)'],loc='upper left')
161 | plt.axis((-2.5, 2.5, -2.5, 2.5))
162 | plt.grid()
163 | plt.show()
164 |
165 | # use this function for ploting constellation for higher dimenson like 7-D for (7,4) autoencoder
166 | '''
167 | x_emb = encoder.predict(test_data)
168 | noise_std = np.sqrt(1/(2*R*EbNo_train))
169 | noise = noise_std * np.random.randn(N,n_channel)
170 | x_emb = x_emb + noise
171 | from sklearn.manifold import TSNE
172 | X_embedded = TSNE(learning_rate=700, n_components=2,n_iter=35000, random_state=0, perplexity=60).fit_transform(x_emb)
173 | print (X_embedded.shape)
174 | X_embedded = X_embedded / 7
175 | import matplotlib.pyplot as plt
176 | plt.scatter(X_embedded[:,0],X_embedded[:,1])
177 | #plt.axis((-2.5,2.5,-2.5,2.5))
178 | plt.grid()
179 | plt.show()
180 | '''
181 |
182 | #ccalculating BER for embedding
183 | EbNodB_range = list(np.linspace(-4, 8.5 ,26))
184 | ber = [None] * len(EbNodB_range)
185 | for n in range(0, len(EbNodB_range)):
186 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
187 | noise_std = np.sqrt(1 / (2 * R * EbNo))
188 | noise_mean = 0
189 | no_errors = 0
190 | nn = N
191 | noise = noise_std * np.random.randn(nn, n_channel)
192 | encoded_signal = encoder.predict(test_label)
193 | final_signal = encoded_signal + noise
194 | pred_final_signal = decoder.predict(final_signal)
195 | pred_output = np.argmax(pred_final_signal, axis=1)
196 | no_errors = (pred_output != test_label)
197 | no_errors = no_errors.astype(int).sum()
198 | ber[n] = no_errors/nn
199 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
200 |
201 | #ccalculating BER for onehot
202 | ber_n = [None] * len(EbNodB_range)
203 | for n in range(0, len(EbNodB_range)):
204 | EbNo = 10.0 ** (EbNodB_range[n] / 10.0)
205 | noise_std = np.sqrt(1 / (2 * R * EbNo))
206 | noise_mean = 0
207 | no_errors = 0
208 | nn = N
209 | noise = noise_std * np.random.randn(nn, n_channel)
210 | encoded_signal = encoder_n.predict(test_data)
211 | final_signal = encoded_signal + noise
212 | pred_final_signal = decoder_n.predict(final_signal)
213 | pred_output = np.argmax(pred_final_signal, axis=1)
214 | no_errors = (pred_output != test_label)
215 | no_errors = no_errors.astype(int).sum()
216 | ber_n[n] = no_errors / nn
217 | print('SNR:', EbNodB_range[n], 'BER_N:', ber_n[n])
218 |
219 | plt.plot(EbNodB_range, ber )
220 | plt.plot(EbNodB_range, ber_n )
221 | plt.yscale('log')
222 | plt.xlabel('SNR_RANGE')
223 | plt.ylabel('Block Error Rate')
224 | plt.grid()
225 | plt.legend(['Autoencoeder_embedding(2,2),emb_k=2','Autoencoeder_onehot(2,2)'],loc = 'upper left')
226 | plt.legend(loc='upper right',ncol= 1)
227 |
228 | plt.show()
229 |
--------------------------------------------------------------------------------
/AutoEncoder_embedding_high.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import keras
3 | from keras.layers import Input, LSTM,Dense,GaussianNoise, Lambda, Dropout, embeddings,Flatten
4 | from keras.models import Model
5 | from keras import regularizers
6 | from keras.layers.normalization import BatchNormalization
7 | from keras.optimizers import Adam, SGD
8 | from keras import backend as K
9 | from keras.utils.np_utils import to_categorical
10 |
11 | #set the random state to generate the same/different train data
12 | from numpy.random import seed
13 | seed(1)
14 | from tensorflow import set_random_seed
15 | set_random_seed(3)
16 |
17 | M = 256
18 | k_r = np.log2(M)
19 | k_r = int(k_r)
20 | k = 256
21 | n_channel = 8
22 | R = k_r / n_channel
23 | print('M:', M, 'k:', k, 'n:', n_channel)
24 |
25 | #generating train data
26 | N = 10000
27 | label = np.random.randint(M, size = N)
28 | label_out = label.reshape((-1,1))
29 | #defining an auto encoder
30 |
31 | input_signal = Input( shape = (1, ) )
32 | encoded = embeddings.Embedding(input_dim=M, output_dim = k,input_length= 1 )(input_signal)
33 | encoded1 = Flatten()(encoded)
34 | encoded2 = Dense(M,activation= 'relu')(encoded1)
35 | #encoded2 = LSTM(n_channel, dropout=0.2, recurrent_dropout=0.2)(encoded)
36 | encoded3 = Dense(n_channel, activation= 'linear')(encoded2)
37 | #encoded4 = Lambda(lambda x: np.sqrt(n_channel)* K.l2_normalize(x, axis=1))(encoded3)
38 | encoded4 = Lambda(lambda x: np.sqrt(n_channel) * K.l2_normalize(x, axis=1))(encoded3)
39 |
40 | EbNodB_train = 7
41 | EbNo_train = 10 ** (EbNodB_train / 10.0)
42 | # EbNo_train = 5.01187
43 | channel_out = GaussianNoise(np.sqrt(1 / (2 * R * EbNo_train)))(encoded4)
44 |
45 | decoded = Dense(M, activation='relu')(channel_out)
46 | decoded1 = Dense(M, activation='softmax')(decoded)
47 | #decoded1 = Dense(M, activation= 'sigmoid')(decoded)
48 | #?? why softmax?
49 |
50 | auto_encoder_embedding = Model(input_signal, decoded1)
51 | adam= Adam(lr= 0.01)
52 | auto_encoder_embedding.compile(optimizer= adam,
53 | loss= 'sparse_categorical_crossentropy',
54 | )
55 | print(auto_encoder_embedding.summary())
56 | auto_encoder_embedding.fit(label, label_out,
57 | epochs=45,
58 | batch_size=32)
59 | encoder = Model(input_signal, encoded4)
60 | encoded_input = Input(shape=(n_channel,))
61 |
62 | deco = auto_encoder_embedding.layers[-2](encoded_input)
63 | deco = auto_encoder_embedding.layers[-1](deco)
64 | decoder = Model(encoded_input, deco)
65 |
66 |
67 | #generating test data
68 |
69 | N = 50000
70 | test_label = np.random.randint(M, size=N)
71 | test_label_out = test_label.reshape((-1,1))
72 | #plotting constellation diagram
73 | """
74 | scatter_plot = []
75 | for i in range (0,M):
76 | scatter_plot.append(encoder.predict(np.expand_dims(i, axis=0)))
77 | scatter_plot = np.array(scatter_plot)
78 | print(scatter_plot.shape)
79 |
80 | import matplotlib.pyplot as plt
81 | scatter_plot = scatter_plot.reshape(M, 2, 1)
82 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1])
83 | plt.axis((-2.5, 2.5, -2.5, 2.5))
84 | plt.grid()
85 | plt.show()
86 | """
87 | # use this function for ploting constellation for higher dimenson like 7-D for (7,4) autoencoder
88 | """
89 | x_emb = encoder.predict(test_label)
90 | noise_std = np.sqrt(1/(2*R*EbNo_train))
91 | noise = noise_std * np.random.randn(N,n_channel)
92 | x_emb = x_emb + noise
93 | from sklearn.manifold import TSNE
94 | X_embedded = TSNE(learning_rate=700, n_components=2,n_iter=35000, random_state=0, perplexity=60).fit_transform(x_emb)
95 | print (X_embedded.shape)
96 | X_embedded = X_embedded / 7
97 | import matplotlib.pyplot as plt
98 | plt.scatter(X_embedded[:,0],X_embedded[:,1])
99 | plt.axis((-2.5,2.5,-2.5,2.5))
100 | plt.grid()
101 | plt.show()
102 | """
103 |
104 | import matplotlib.pyplot as plt
105 | #ccalculating BER
106 | EbNodB_range = list(np.linspace(-4, 8.5 ,26))
107 | ber = [None] * len(EbNodB_range)
108 | for n in range(0, len(EbNodB_range)):
109 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
110 | noise_std = np.sqrt(1 / (2 * R * EbNo))
111 | noise_mean = 0
112 | no_errors = 0
113 | nn = N
114 | noise = noise_std * np.random.randn(nn, n_channel)
115 | encoded_signal = encoder.predict(test_label)
116 | final_signal = encoded_signal + noise
117 | pred_final_signal = decoder.predict(final_signal)
118 | pred_output = np.argmax(pred_final_signal, axis=1)
119 | no_errors = (pred_output != test_label)
120 | no_errors = no_errors.astype(int).sum()
121 | ber[n] = no_errors/nn
122 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
123 |
124 | plt.plot(EbNodB_range, ber,'bo', label='Autoencoeder_embedding(8,8),emb_k=256')
125 | plt.yscale('log')
126 | plt.xlabel('SNR_RANGE')
127 | plt.ylabel('Block Error Rate')
128 | plt.grid()
129 | plt.legend(loc='upper right',ncol= 1)
130 |
131 | plt.show()
132 |
--------------------------------------------------------------------------------
/AutoEncoder_embedding_high_contrast.py:
--------------------------------------------------------------------------------
1 | #This code is written for testing the BER of onehot encoding and embedding layer when (n,k) is higher than 2
2 | import numpy as np
3 | import keras
4 | from keras.layers import Input, LSTM,Dense,GaussianNoise, Lambda, Dropout, embeddings,Flatten
5 | from keras.models import Model
6 | from keras import regularizers
7 | from keras.layers.normalization import BatchNormalization
8 | from keras.optimizers import Adam, SGD
9 | from keras import backend as K
10 | from keras.utils.np_utils import to_categorical
11 |
12 | #set the random state to generate the same/different train data
13 | from numpy.random import seed
14 | seed(1)
15 | from tensorflow import set_random_seed
16 | set_random_seed(3)
17 |
18 | M = 16
19 | k_r = np.log2(M)
20 | k_r = int(k_r)
21 | k = 16
22 | n_channel = 7
23 | R = k_r / n_channel
24 | print('M:', M, 'k:', k, 'n:', n_channel)
25 |
26 | #generating train data
27 | N = 10000
28 | label = np.random.randint(M, size = N)
29 | label_out = label.reshape((-1,1))
30 | #defining an auto encoder
31 |
32 | # creating one hot encoded vectors
33 | data = []
34 | for i in label:
35 | temp = np.zeros(M)
36 | temp[i] = 1
37 | data.append(temp)
38 |
39 | # checking data shape
40 | data = np.array(data)
41 | print(data.shape)
42 |
43 | # checking generated data with it's label
44 | temp_check = [17, 23, 45, 67, 89, 96, 72, 250, 350]
45 | for i in temp_check:
46 | print(label[i], data[i])
47 |
48 | # defining autoencoder and it's layer (onehot)
49 | input_signal = Input(shape=(M,))
50 | encoded_n = Dense(M, activation='relu')(input_signal)
51 | encoded1_n = Dense(n_channel, activation='linear')(encoded_n)
52 | encoded2_n = Lambda(lambda x: np.sqrt(n_channel) * K.l2_normalize(x, axis=1))(encoded1_n)
53 | """
54 | K.l2_mormalize 二阶约束(功率约束)
55 | """
56 | EbNo_train = 5.01187 # coverted 7 db of EbNo
57 | encoded3_n = GaussianNoise(np.sqrt(1 / (2 * R * EbNo_train)))(encoded2_n)
58 |
59 | decoded_n = Dense(M, activation='relu')(encoded3_n)
60 | decoded1_n = Dense(M, activation='softmax')(decoded_n)
61 | autoencoder_n = Model(input_signal, decoded1_n)
62 | adam = Adam(lr=0.01)
63 | autoencoder_n.compile(optimizer=adam, loss='categorical_crossentropy')
64 |
65 | # printing summary of layers and it's trainable parameters
66 | print(autoencoder_n.summary())
67 |
68 | # for tensor board visualization
69 | # tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
70 | # traning auto encoder
71 |
72 | autoencoder_n.fit(data, data,
73 | epochs=45,
74 | batch_size=32)
75 |
76 |
77 | encoder_n = Model(input_signal, encoded2_n)
78 | encoded_input_n = Input(shape=(n_channel,))
79 |
80 | deco_n = autoencoder_n.layers[-2](encoded_input_n)
81 | deco_n = autoencoder_n.layers[-1](deco_n)
82 | decoder_n = Model(encoded_input_n, deco_n)
83 |
84 | # Embedding Layer
85 | input_signal = Input( shape = (1, ) )
86 | encoded = embeddings.Embedding(input_dim=M, output_dim = k,input_length= 1 )(input_signal)
87 | encoded1 = Flatten()(encoded)
88 | encoded2 = Dense(M,activation= 'relu')(encoded1)
89 | #encoded2 = LSTM(n_channel, dropout=0.2, recurrent_dropout=0.2)(encoded)
90 | encoded3 = Dense(n_channel, activation= 'linear')(encoded2)
91 | #encoded4 = Lambda(lambda x: np.sqrt(n_channel)* K.l2_normalize(x, axis=1))(encoded3)
92 | encoded4 = Lambda(lambda x: np.sqrt(n_channel) * K.l2_normalize(x, axis=1))(encoded3)
93 |
94 | EbNodB_train = 7
95 | EbNo_train = 10 ** (EbNodB_train / 10.0)
96 | # EbNo_train = 5.01187
97 | channel_out = GaussianNoise(np.sqrt(1 / (2 * R * EbNo_train)))(encoded4)
98 |
99 | decoded = Dense(M, activation='relu')(channel_out)
100 | decoded1 = Dense(M, activation='softmax')(decoded)
101 | #decoded1 = Dense(M, activation= 'sigmoid')(decoded)
102 | #?? why softmax?
103 |
104 | auto_encoder_embedding = Model(input_signal, decoded1)
105 | adam= Adam(lr= 0.01)
106 | auto_encoder_embedding.compile(optimizer= adam,
107 | loss= 'sparse_categorical_crossentropy',
108 | )
109 | print(auto_encoder_embedding.summary())
110 | auto_encoder_embedding.fit(label, label_out,
111 | epochs=45,
112 | batch_size=32)
113 | encoder = Model(input_signal, encoded4)
114 | encoded_input = Input(shape=(n_channel,))
115 |
116 | deco = auto_encoder_embedding.layers[-2](encoded_input)
117 | deco = auto_encoder_embedding.layers[-1](deco)
118 | decoder = Model(encoded_input, deco)
119 |
120 |
121 | #generating test data
122 |
123 | N = 1500
124 | test_label = np.random.randint(M, size=N)
125 | test_label_out = test_label.reshape((-1,1))
126 | test_data = []
127 | for i in test_label:
128 | temp = np.zeros(M)
129 | temp[i] = 1
130 | test_data.append(temp)
131 |
132 | test_data = np.array(test_data)
133 | import matplotlib.pyplot as plt
134 | """
135 | #plotting constellation diagram for embedding
136 | scatter_plot = []
137 | for i in range (0,M):
138 | scatter_plot.append(encoder.predict(np.expand_dims(i, axis=0)))
139 | scatter_plot = np.array(scatter_plot)
140 | print(scatter_plot.shape)
141 |
142 |
143 | scatter_plot = scatter_plot.reshape(M, 2, 1)
144 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1])
145 | plt.legend(['embedding_constellation'],loc='upper left')
146 | plt.axis((-2.5, 2.5, -2.5, 2.5))
147 | plt.grid()
148 | plt.show()
149 |
150 | #plotting constellation diagram for one-hot
151 | scatter_plot = []
152 | for i in range(0, M):
153 | temp = np.zeros(M)
154 | temp[i] = 1
155 | scatter_plot.append(encoder_n.predict(np.expand_dims(temp, axis=0)))
156 | scatter_plot = np.array(scatter_plot)
157 | print(scatter_plot.shape)
158 | scatter_plot = scatter_plot.reshape(M, 2, 1)
159 |
160 |
161 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1], )
162 | plt.legend(['onehot_constellation'],loc='upper left')
163 | plt.axis((-2.5, 2.5, -2.5, 2.5))
164 | plt.grid()
165 | plt.show()
166 | """
167 | # use this function for ploting constellation for higher dimenson like 7-D for (7,4) autoencoder
168 | # generating data for checking BER
169 | # if you're not using t-sne for visulation than set N to 70,000 for better result
170 | # for t-sne use less N like N = 1500
171 |
172 | x_emb = encoder.predict(test_label)
173 | noise_std = np.sqrt(1/(2*R*EbNo_train))
174 | noise = noise_std * np.random.randn(N,n_channel)
175 | x_emb = x_emb + noise
176 | from sklearn.manifold import TSNE
177 | X_embedded = TSNE(learning_rate=700, n_components=2,n_iter=35000, random_state=0, perplexity=60).fit_transform(x_emb)
178 | print (X_embedded.shape)
179 | X_embedded = X_embedded / 7
180 | import matplotlib.pyplot as plt
181 | plt.scatter(X_embedded[:,0],X_embedded[:,1])
182 | #plt.axis((-2.5,2.5,-2.5,2.5))
183 | plt.legend(['Autoencoeder_embedding(7,4),constellation,emb_k=16 '],loc = 'lower left')
184 | plt.grid()
185 | plt.show()
186 |
187 | x_emb = encoder_n.predict(test_label)
188 | noise_std = np.sqrt(1/(2*R*EbNo_train))
189 | noise = noise_std * np.random.randn(N,n_channel)
190 | x_emb = x_emb + noise
191 | from sklearn.manifold import TSNE
192 | X_embedded = TSNE(learning_rate=700, n_components=2,n_iter=35000, random_state=0, perplexity=60).fit_transform(x_emb)
193 | print (X_embedded.shape)
194 | X_embedded = X_embedded / 7
195 | import matplotlib.pyplot as plt
196 | plt.scatter(X_embedded[:,0],X_embedded[:,1])
197 | #plt.axis((-2.5,2.5,-2.5,2.5))
198 | plt.legend(['Autoencoeder_embedding(7,4),constellation,emb_k=16 '],loc = 'lower left')
199 | plt.grid()
200 | plt.show()
201 |
202 | #ccalculating BER for embedding
203 | EbNodB_range = list(np.linspace(-4, 8.5 ,26))
204 | ber = [None] * len(EbNodB_range)
205 | for n in range(0, len(EbNodB_range)):
206 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
207 | noise_std = np.sqrt(1 / (2 * R * EbNo))
208 | noise_mean = 0
209 | no_errors = 0
210 | nn = N
211 | noise = noise_std * np.random.randn(nn, n_channel)
212 | encoded_signal = encoder.predict(test_label)
213 | final_signal = encoded_signal + noise
214 | pred_final_signal = decoder.predict(final_signal)
215 | pred_output = np.argmax(pred_final_signal, axis=1)
216 | no_errors = (pred_output != test_label)
217 | no_errors = no_errors.astype(int).sum()
218 | ber[n] = no_errors/nn
219 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
220 |
221 | #ccalculating BER for onehot
222 | ber_n = [None] * len(EbNodB_range)
223 | for n in range(0, len(EbNodB_range)):
224 | EbNo = 10.0 ** (EbNodB_range[n] / 10.0)
225 | noise_std = np.sqrt(1 / (2 * R * EbNo))
226 | noise_mean = 0
227 | no_errors = 0
228 | nn = N
229 | noise = noise_std * np.random.randn(nn, n_channel)
230 | encoded_signal = encoder_n.predict(test_data)
231 | final_signal = encoded_signal + noise
232 | pred_final_signal = decoder_n.predict(final_signal)
233 | pred_output = np.argmax(pred_final_signal, axis=1)
234 | no_errors = (pred_output != test_label)
235 | no_errors = no_errors.astype(int).sum()
236 | ber_n[n] = no_errors / nn
237 | print('SNR:', EbNodB_range[n], 'BER_N:', ber_n[n])
238 |
239 | plt.plot(EbNodB_range, ber )
240 | plt.plot(EbNodB_range, ber_n )
241 | plt.yscale('log')
242 | plt.xlabel('SNR_RANGE')
243 | plt.ylabel('Block Error Rate')
244 | plt.grid()
245 | plt.legend(['Autoencoeder_embedding(7,4),emb_k=32 ','Autoencoeder_onehot(7,4)'],loc = 'lower left')
246 | plt.legend(loc='upper right',ncol= 1)
247 |
248 | plt.show()
249 |
--------------------------------------------------------------------------------
/AutoEncoder_embedding_trainSNR.py:
--------------------------------------------------------------------------------
1 | #This code is used for testing the influence of the SNR in training phase
2 | import numpy as np
3 | import keras
4 | from keras.layers import Input, LSTM,Dense,GaussianNoise, Lambda, Dropout, embeddings,Flatten
5 | from keras.models import Model
6 | from keras import regularizers
7 | from keras.layers.normalization import BatchNormalization
8 | from keras.optimizers import Adam, SGD
9 | from keras import backend as K
10 | from keras.utils.np_utils import to_categorical
11 |
12 | #set the random state to generate the same/different train data
13 | from numpy.random import seed
14 | seed(1)
15 | from tensorflow import set_random_seed
16 | set_random_seed(3)
17 |
18 | M = 4
19 | k = np.log2(M)
20 | k = int(k)
21 | emb_k = 2
22 | n_channel = 2
23 | R = k / n_channel
24 | print('M:', M, 'emb_k',emb_k, 'k:', k, 'n:', n_channel)
25 |
26 | #generating train data
27 | N = 10000
28 | label = np.random.randint(M, size = N)
29 | label_out = label.reshape((-1,1))
30 |
31 | # Embedding Layer
32 | input_signal = Input( shape = (1, ) )
33 | encoded = embeddings.Embedding(input_dim=M, output_dim = k,input_length= 1 )(input_signal)
34 | encoded1 = Flatten()(encoded)
35 | encoded2 = Dense(M,activation= 'relu')(encoded1)
36 | encoded3 = Dense(n_channel, activation= 'linear')(encoded2)
37 | encoded4 = Lambda(lambda x: np.sqrt(n_channel) * K.l2_normalize(x, axis=1))(encoded3)
38 |
39 | EbNodB_train = 7
40 | EbNo_train = 10 ** (EbNodB_train / 10.0)
41 | # EbNo_train = 5.01187
42 | channel_out = GaussianNoise(np.sqrt(1 / (2 * R * EbNo_train)))(encoded4)
43 |
44 | decoded = Dense(M, activation='relu')(channel_out)
45 | decoded1 = Dense(M, activation='softmax')(decoded)
46 | #decoded1 = Dense(M, activation= 'sigmoid')(decoded)
47 | #?? why softmax?
48 |
49 | auto_encoder_embedding = Model(input_signal, decoded1)
50 | adam= Adam(lr= 0.01)
51 | auto_encoder_embedding.compile(optimizer= adam,
52 | loss= 'sparse_categorical_crossentropy',
53 | )
54 | print(auto_encoder_embedding.summary())
55 | auto_encoder_embedding.fit(label, label_out,
56 | epochs=45,
57 | batch_size=32)
58 | encoder = Model(input_signal, encoded4)
59 | encoded_input = Input(shape=(n_channel,))
60 |
61 | deco = auto_encoder_embedding.layers[-2](encoded_input)
62 | deco = auto_encoder_embedding.layers[-1](deco)
63 | decoder = Model(encoded_input, deco)
64 |
65 |
66 | #generating test data
67 |
68 | N = 50000
69 | test_label = np.random.randint(M, size=N)
70 | test_label_out = test_label.reshape((-1,1))
71 | test_data = []
72 | for i in test_label:
73 | temp = np.zeros(M)
74 | temp[i] = 1
75 | test_data.append(temp)
76 |
77 | test_data = np.array(test_data)
78 |
79 | #plotting constellation diagram for embedding
80 | scatter_plot = []
81 | for i in range (0,M):
82 | scatter_plot.append(encoder.predict(np.expand_dims(i, axis=0)))
83 | scatter_plot = np.array(scatter_plot)
84 | print(scatter_plot.shape)
85 |
86 | import matplotlib.pyplot as plt
87 | scatter_plot = scatter_plot.reshape(M, 2, 1)
88 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1])
89 | plt.legend(['embedding_constellation(2,2),emb_k=2'],loc='upper left')
90 | plt.axis((-2.5, 2.5, -2.5, 2.5))
91 | plt.grid()
92 | plt.show()
93 |
94 |
95 | # use this function for ploting constellation for higher dimenson like 7-D for (7,4) autoencoder
96 | '''
97 | x_emb = encoder.predict(test_data)
98 | noise_std = np.sqrt(1/(2*R*EbNo_train))
99 | noise = noise_std * np.random.randn(N,n_channel)
100 | x_emb = x_emb + noise
101 | from sklearn.manifold import TSNE
102 | X_embedded = TSNE(learning_rate=700, n_components=2,n_iter=35000, random_state=0, perplexity=60).fit_transform(x_emb)
103 | print (X_embedded.shape)
104 | X_embedded = X_embedded / 7
105 | import matplotlib.pyplot as plt
106 | plt.scatter(X_embedded[:,0],X_embedded[:,1])
107 | #plt.axis((-2.5,2.5,-2.5,2.5))
108 | plt.grid()
109 | plt.show()
110 | '''
111 |
112 | #ccalculating BER for embedding
113 | EbNodB_range = list(np.linspace(-4, 8.5 ,26))
114 | ber = [None] * len(EbNodB_range)
115 | for n in range(0, len(EbNodB_range)):
116 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
117 | noise_std = np.sqrt(1 / (2 * R * EbNo))
118 | noise_mean = 0
119 | no_errors = 0
120 | nn = N
121 | noise = noise_std * np.random.randn(nn, n_channel)
122 | encoded_signal = encoder.predict(test_label)
123 | final_signal = encoded_signal + noise
124 | pred_final_signal = decoder.predict(final_signal)
125 | pred_output = np.argmax(pred_final_signal, axis=1)
126 | no_errors = (pred_output != test_label)
127 | no_errors = no_errors.astype(int).sum()
128 | ber[n] = no_errors/nn
129 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
130 |
131 | plt.plot(EbNodB_range, ber )
132 | plt.yscale('log')
133 | plt.xlabel('SNR_RANGE')
134 | plt.ylabel('Block Error Rate')
135 | plt.grid()
136 | plt.legend(['Autoencoeder_embedding(2,2),emb_k=2','Autoencoeder_onehot(2,2)'],loc = 'upper left')
137 | plt.legend(loc='upper right',ncol= 1)
138 |
139 | plt.show()
140 |
--------------------------------------------------------------------------------
/AveragePower.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import numpy as np
4 | # import tensorflow as tf
5 | import keras
6 | from keras.layers import Input, Dense, GaussianNoise, Lambda, Dropout
7 | from keras.models import Model
8 | from keras import regularizers
9 | from keras.layers.normalization import BatchNormalization
10 | from keras.optimizers import Adam, SGD
11 | from keras import backend as K
12 |
13 | # for reproducing reslut
14 | from numpy.random import seed
15 |
16 | seed(1)
17 | from tensorflow import set_random_seed
18 |
19 | set_random_seed(3)
20 |
21 | # defining parameters
22 | # define (n,k) here for (n,k) autoencoder
23 | # n = n_channel
24 | # k = log2(M) ==> so for (7,4) autoencoder n_channel = 7 and M = 2^4 = 16
25 | M = 16
26 | k = np.log2(M)
27 | k = int(k)
28 | n_channel = 2
29 | R = k / n_channel
30 | print('M:', M, 'k:', k, 'n:', n_channel)
31 |
32 | # generating data of size N
33 | N = 8000
34 | label = np.random.randint(M, size=N)
35 |
36 | # creating one hot encoded vectors
37 | data = []
38 | for i in label:
39 | temp = np.zeros(M)
40 | temp[i] = 1
41 | data.append(temp)
42 |
43 | # checking data shape
44 | data = np.array(data)
45 | print(data.shape)
46 |
47 | # checking generated data with it's label
48 | # temp_check = [17,23,45,67,89,96,72,250,350]
49 | # for i in temp_check:
50 | # print(label[i],data[i])
51 |
52 | # defining autoencoder and it's layer
53 | input_signal = Input(shape=(M,))
54 | encoded = Dense(M, activation='relu')(input_signal)
55 | encoded1 = Dense(n_channel, activation='linear')(encoded)
56 |
57 | # amplitude constraint
58 | # encoded2 = Lambda(lambda x: np.sqrt(n_channel)*K.l2_normalize(x,axis=1))(encoded1)
59 |
60 | # average power constraint, the first two method have the same effect.
61 | # encoded2 = Lambda(lambda x: K.batch_normalization(x,K.mean(x,axis=0,keepdims=True),
62 | # K.var(x,axis=0,keepdims=True),0,1))(encoded1)
63 | # encoded2 = BatchNormalization(momentum=0.9,center=False,scale=False)(encoded1)
64 |
65 | encoded2 = BatchNormalization(momentum=0, center=False, scale=False)(encoded1)
66 | # encoded2 = BatchNormalization()(encoded1)
67 |
68 |
69 | EbNo_train = 5.01187 # coverted 7 db of EbNo
70 | # 这是一个起正则化作用的层,该层只在训练时才有效。
71 | encoded3 = GaussianNoise(np.sqrt(1 / (2 * R * EbNo_train)))(encoded2)
72 |
73 | decoded = Dense(M, activation='relu')(encoded3)
74 | decoded1 = Dense(M, activation='softmax')(decoded)
75 | autoencoder = Model(input_signal, decoded1)
76 | adam = Adam(lr=0.01)
77 | autoencoder.compile(optimizer=adam,
78 | loss='categorical_crossentropy',
79 | metrics=['accuracy'])
80 |
81 | # printing summary of layers and it's trainable parameters
82 | print(autoencoder.summary())
83 |
84 | # for tensor board visualization
85 | # tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
86 | # traning auto encoder
87 |
88 | autoencoder.fit(data, data,
89 | epochs=45,
90 | batch_size=512,
91 | verbose=1)
92 |
93 | # saving keras model
94 | from keras.models import load_model
95 |
96 | # making encoder from full autoencoder
97 | encoder = Model(input_signal, encoded2)
98 |
99 | # making decoder from full autoencoder
100 | encoded_input = Input(shape=(n_channel,))
101 |
102 | deco = autoencoder.layers[-2](encoded_input)
103 | deco = autoencoder.layers[-1](deco)
104 | decoder = Model(encoded_input, deco)
105 |
106 | # generating data for checking BER
107 | # if you're not using t-sne for visulation than set N to 70,000 for better result
108 | # for t-sne use less N like N = 1500
109 | N = 50000
110 | test_label = np.random.randint(M, size=N)
111 | test_data = []
112 |
113 | for i in test_label:
114 | temp = np.zeros(M)
115 | temp[i] = 1
116 | test_data.append(temp)
117 |
118 | test_data = np.array(test_data)
119 |
120 | # checking generated data
121 | temp_test = 6
122 | print(test_data[temp_test][test_label[temp_test]], test_label[temp_test])
123 |
124 | # for plotting learned consteallation diagram
125 |
126 | # scatter_plot = []
127 | # for i in range(0,M):
128 | # temp = np.zeros(M)
129 | # temp[i] = 1
130 | # scatter_plot.append(encoder.predict(np.expand_dims(temp,axis=0)))
131 | # scatter_plot = np.array(scatter_plot)
132 | # print (scatter_plot.shape)
133 |
134 | # use this function for ploting constellation for higher dimenson like 7-D for (7,4) autoencoder
135 | # for t-sne use less N like N = 1500
136 | '''
137 | x_emb = encoder.predict(test_data)
138 | noise_std = np.sqrt(1/(2*R*EbNo_train))
139 | noise = noise_std * np.random.randn(N,n_channel)
140 | x_emb = x_emb + noise
141 | print(x_emb)
142 | from sklearn.manifold import TSNE
143 | #X_embedded = TSNE(learning_rate=700, n_components=2,n_iter=35000, random_state=0, perplexity=60).fit_transform(x_emb)
144 | model = TSNE(n_components=2, random_state=0)
145 | np.set_printoptions(suppress=True)
146 | X_embedded = model.fit_transform(x_emb[0:1000,:])
147 | print (X_embedded.shape)
148 | X_embedded = X_embedded / 7
149 | import matplotlib.pyplot as plt
150 | plt.scatter(X_embedded[:,0],X_embedded[:,1])
151 | #plt.axis((-2.5,2.5,-2.5,2.5))
152 | plt.grid()
153 | plt.show()
154 | '''
155 |
156 | nn = np.linspace(0, M - 1, M)
157 | nn = keras.utils.to_categorical(nn, num_classes=M)
158 | scatter_plot = encoder.predict(nn)
159 |
160 | # ploting constellation diagram
161 | import matplotlib.pyplot as plt
162 |
163 | scatter_plot = scatter_plot.reshape(M, 2, 1)
164 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1])
165 | # plt.axis((-2.5,2.5,-2.5,2.5))
166 | plt.grid()
167 | plt.show()
168 |
169 |
170 | def frange(x, y, jump):
171 | while x < y:
172 | yield x
173 | x += jump
174 |
175 |
176 | # EbNodB_range = list(np.linspace(-4,8.5,26))
177 | EbNodB_range = list(frange(-4, 8.5, 0.5))
178 | ber = [None] * len(EbNodB_range)
179 | for n in range(0, len(EbNodB_range)):
180 | EbNo = 10.0 ** (EbNodB_range[n] / 10.0)
181 | noise_std = np.sqrt(1 / (2 * R * EbNo))
182 | noise_mean = 0
183 | no_errors = 0
184 | nn = N
185 | noise = noise_std * np.random.randn(nn, n_channel)
186 | encoded_signal = encoder.predict(test_data)
187 | final_signal = encoded_signal + noise
188 | pred_final_signal = decoder.predict(final_signal)
189 | pred_output = np.argmax(pred_final_signal, axis=1)
190 | no_errors = (pred_output != test_label)
191 | no_errors = no_errors.astype(int).sum()
192 | ber[n] = no_errors / nn
193 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
194 |
195 | # ploting ber curve
196 | import matplotlib.pyplot as plt
197 | from scipy import interpolate
198 |
199 | plt.plot(EbNodB_range, ber, 'bo', label='Autoencoder(%d,%d)' % (n_channel, k))
200 | plt.yscale('log')
201 | plt.xlabel('SNR Range')
202 | plt.ylabel('Block Error Rate')
203 | plt.grid()
204 | plt.legend(loc='upper right', ncol=1)
205 | plt.show()
--------------------------------------------------------------------------------
/BERtoBLER.py:
--------------------------------------------------------------------------------
1 | from numpy import genfromtxt
2 | from math import pow
3 | import numpy as np
4 | from matplotlib import pyplot as plt
5 | k=2
6 | uncodedbpsk_bers = genfromtxt('/data/uncodedbpsk.csv',delimiter=',')
7 | uncodedbpsk_bers = 1- uncodedbpsk_bers
8 | uncodedbpsk_blers = uncodedbpsk_bers
9 | for i,uncodedbpsk_ber in enumerate(uncodedbpsk_bers):
10 | uncodedbpsk_blers[i] = 1 - pow(uncodedbpsk_ber,k)
11 |
12 | EbNodB_range = list(np.linspace(-4, 8.5, 13))
13 | plt.plot(EbNodB_range, uncodedbpsk_blers)
14 | plt.yscale('log')
15 | plt.xlabel('SNR_RANGE')
16 | plt.ylabel('Block Error Rate')
17 | plt.grid()
18 | plt.show()
19 |
--------------------------------------------------------------------------------
/Keras_test.py:
--------------------------------------------------------------------------------
1 | from keras.models import Sequential
2 | from keras.layers import Dense, Activation
3 | import keras
4 |
5 | # For a single-input model with 2 classes (binary classification):
6 |
7 | model = Sequential()
8 | model.add(Dense(32, activation='relu', input_dim=100))
9 | model.add(Dense(1, activation='sigmoid'))
10 | model.compile(optimizer='rmsprop',
11 | loss='binary_crossentropy',
12 | metrics=['accuracy'])
13 |
14 | # Generate dummy data
15 | import numpy as np
16 | data = np.random.random((1000, 100))
17 | labels = np.random.randint(2, size=(1000, 1))
18 |
19 | # Train the model, iterating on the data in batches of 32 samples
20 | model.fit(data, labels, epochs=10, batch_size=32)
21 |
22 | # For a single-input model with 10 classes (categorical classification):
23 |
24 | model = Sequential()
25 | model.add(Dense(32, activation='relu', input_dim=100))
26 | model.add(Dense(10, activation='softmax'))
27 | model.compile(optimizer='rmsprop',
28 | loss='categorical_crossentropy',
29 | metrics=['accuracy'])
30 |
31 | # Generate dummy data
32 | import numpy as np
33 | data = np.random.random((1000, 100))
34 | labels = np.random.randint(10, size=(1000, 1))
35 |
36 | # Convert labels to categorical one-hot encoding
37 | one_hot_labels = keras.utils.to_categorical(labels, num_classes=10)
38 |
39 | # Train the model, iterating on the data in batches of 32 samples
40 | model.fit(data, one_hot_labels, epochs=10, batch_size=32)
41 |
42 | from keras.models import Sequential
43 | from keras.layers import Dense, Dropout, Activation
44 | from keras.optimizers import SGD
45 |
46 | # Generate dummy data
47 | import numpy as np
48 | x_train = np.random.random((1000, 20))
49 | y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
50 | x_test = np.random.random((100, 20))
51 | y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
52 |
53 | model = Sequential()
54 | # Dense(64) is a fully-connected layer with 64 hidden units.
55 | # in the first layer, you must specify the expected input data shape:
56 | # here, 20-dimensional vectors.
57 | model.add(Dense(64, activation='relu', input_dim=20))
58 | model.add(Dropout(0.5))
59 | model.add(Dense(64, activation='relu'))
60 | model.add(Dropout(0.5))
61 | model.add(Dense(10, activation='softmax'))
62 |
63 | sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
64 | model.compile(loss='categorical_crossentropy',
65 | optimizer=sgd,
66 | metrics=['accuracy'])
67 |
68 | model.fit(x_train, y_train,
69 | epochs=20,
70 | batch_size=128)
71 | score = model.evaluate(x_test, y_test, batch_size=128)
72 |
73 | import numpy as np
74 | import keras
75 | from keras.models import Sequential
76 | from keras.layers import Dense, Dropout, Flatten
77 | from keras.layers import Conv2D, MaxPooling2D
78 | from keras.optimizers import SGD
79 |
80 | # Generate dummy data
81 | x_train = np.random.random((100, 100, 100, 3))
82 | y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
83 | x_test = np.random.random((20, 100, 100, 3))
84 | y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)
85 |
86 | model = Sequential()
87 | # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
88 | # this applies 32 convolution filters of size 3x3 each.
89 | model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
90 | model.add(Conv2D(32, (3, 3), activation='relu'))
91 | model.add(MaxPooling2D(pool_size=(2, 2)))
92 | model.add(Dropout(0.25))
93 |
94 | model.add(Conv2D(64, (3, 3), activation='relu'))
95 | model.add(Conv2D(64, (3, 3), activation='relu'))
96 | model.add(MaxPooling2D(pool_size=(2, 2)))
97 | model.add(Dropout(0.25))
98 |
99 | model.add(Flatten())
100 | model.add(Dense(256, activation='relu'))s
101 | model.add(Dropout(0.5))
102 | model.add(Dense(10, activation='softmax'))
103 |
104 | sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
105 | model.compile(loss='categorical_crossentropy', optimizer=sgd)
106 |
107 | model.fit(x_train, y_train, batch_size=32, epochs=10)
108 | score = model.evaluate(x_test, y_test, batch_size=32)
109 |
110 |
--------------------------------------------------------------------------------
/LearningtoUse.py:
--------------------------------------------------------------------------------
1 | from keras.models import Sequential
2 | from keras.layers import Dense, Activation
3 |
4 | #model = Sequential([Dense(32,units=784), Activation('relu'), Dense(10),Activation('softmax'), ])
5 |
6 | model = Sequential()
7 | model.add(Dense(32,input_shape=(784,)))
8 | model.add(Activation('relu'))
9 |
10 |
--------------------------------------------------------------------------------
/MulSNR.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import keras
3 | from keras.layers import Input, LSTM,Dense,GaussianNoise, Lambda, Dropout, embeddings,Flatten
4 | from keras.models import Model
5 | from keras import regularizers
6 | from keras.layers.normalization import BatchNormalization
7 | from keras.optimizers import Adam, SGD
8 | from keras import backend as K
9 | from keras.utils.np_utils import to_categorical
10 | from math import pow
11 | from numpy import genfromtxt
12 |
13 | #import the uncodedbpskbler
14 | k=2
15 | uncodedbpsk_bers = genfromtxt('data/uncodedbpsk.csv',delimiter=',')
16 | uncodedbpsk_bers = 1- uncodedbpsk_bers
17 | uncodedbpsk_blers = uncodedbpsk_bers
18 | for i,uncodedbpsk_ber in enumerate(uncodedbpsk_bers):
19 | uncodedbpsk_blers[i] = 1 - pow(uncodedbpsk_ber,k)
20 |
21 |
22 | #set the random state to generate the same/different train data
23 | from numpy.random import seed
24 | seed(1)
25 | from tensorflow import set_random_seed
26 | set_random_seed(3)
27 |
28 | M = 4
29 | k = np.log2(M)
30 | k = int(k)
31 | emb_k = 4
32 | n_channel = 2
33 | R = k / n_channel
34 | print('M:', M, 'k:', k,'emb_k:',emb_k,'n:', n_channel)
35 | #EbNodB_trains = [-10,-5,0,5,7.5,10,15]
36 | EbNodB_trains = [15,10,7.5,5,0,-5,-10]
37 |
38 | #generating train data
39 | N = 10000
40 | label = np.random.randint(M, size = N)
41 | label_out = label.reshape((-1,1))
42 |
43 | #defining an auto encoder
44 | # Embedding Layer
45 | input_signal = Input( shape = (1, ) )
46 | encoded = embeddings.Embedding(input_dim=M, output_dim = emb_k,input_length= 1 )(input_signal)
47 | encoded1 = Flatten()(encoded)
48 | encoded2 = Dense(M,activation= 'relu')(encoded1)
49 | #encoded2 = LSTM(n_channel, dropout=0.2, recurrent_dropout=0.2)(encoded)
50 | encoded3 = Dense(n_channel, activation= 'linear')(encoded2)
51 | #encoded4 = Lambda(lambda x: np.sqrt(n_channel)* K.l2_normalize(x, axis=1))(encoded3)
52 | encoded4 = Lambda(lambda x: np.sqrt(n_channel) * K.l2_normalize(x, axis=1))(encoded3)
53 |
54 | EbNodB_train = K.variable([7])
55 | EbNo_train = 10 ** (EbNodB_train / 10.0)
56 | # EbNo_train = 5.01187
57 | channel_out = GaussianNoise(K.sqrt(1 / (2 * R * EbNo_train)))(encoded4)
58 |
59 | decoded = Dense(M, activation='relu')(channel_out)
60 | decoded1 = Dense(M, activation='softmax')(decoded)
61 | #decoded1 = Dense(M, activation= 'sigmoid')(decoded)
62 | #?? why softmax?
63 |
64 | auto_encoder_embedding = Model(input_signal, decoded1)
65 | adam= Adam(lr= 0.01)
66 | auto_encoder_embedding.compile(optimizer= adam,
67 | loss= 'sparse_categorical_crossentropy',
68 | )
69 | print(auto_encoder_embedding.summary())
70 |
71 | for val in EbNodB_trains:
72 | K.set_value(EbNodB_train,[val])
73 | print('EbNodB_train',K.get_value(EbNodB_train))
74 | print('EbNo_train', K.get_value(EbNo_train))
75 | auto_encoder_embedding.fit(label, label_out,
76 | epochs=10,
77 | batch_size=32,
78 | verbose= 2)
79 |
80 | #K.set_value(EbNodB_train,[-10])
81 | #auto_encoder_embedding.fit(label, label_out,epochs=23, batch_size=32)
82 |
83 | encoder = Model(input_signal, encoded4)
84 | encoded_input = Input(shape=(n_channel,))
85 |
86 | deco = auto_encoder_embedding.layers[-2](encoded_input)
87 | deco = auto_encoder_embedding.layers[-1](deco)
88 | decoder = Model(encoded_input, deco)
89 |
90 |
91 | #generating test data
92 |
93 | N = 50000
94 | test_label = np.random.randint(M, size=N)
95 | test_label_out = test_label.reshape((-1,1))
96 |
97 | #plotting constellation diagram for embedding
98 | scatter_plot = []
99 | for i in range (0,M):
100 | scatter_plot.append(encoder.predict(np.expand_dims(i, axis=0)))
101 | scatter_plot = np.array(scatter_plot)
102 | print(scatter_plot.shape)
103 |
104 | import matplotlib.pyplot as plt
105 | scatter_plot = scatter_plot.reshape(M, 2, 1)
106 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1])
107 | plt.legend(['embedding_constellation(2,2),emb_k=2'],loc='upper left')
108 | plt.axis((-2.5, 2.5, -2.5, 2.5))
109 | plt.grid()
110 | fig = plt.gcf()
111 | fig.set_size_inches(16,12)
112 | fig.savefig('graph/MulSNRcons(2,2)0318_0.png',dpi=100)
113 | plt.show()
114 |
115 |
116 | # use this function for ploting constellation for higher dimenson like 7-D for (7,4) autoencoder
117 | '''
118 | x_emb = encoder.predict(test_data)
119 | noise_std = np.sqrt(1/(2*R*EbNo_train))
120 | noise = noise_std * np.random.randn(N,n_channel)
121 | x_emb = x_emb + noise
122 | from sklearn.manifold import TSNE
123 | X_embedded = TSNE(learning_rate=700, n_components=2,n_iter=35000, random_state=0, perplexity=60).fit_transform(x_emb)
124 | print (X_embedded.shape)
125 | X_embedded = X_embedded / 7
126 | import matplotlib.pyplot as plt
127 | plt.scatter(X_embedded[:,0],X_embedded[:,1])
128 | #plt.axis((-2.5,2.5,-2.5,2.5))
129 | plt.grid()
130 | plt.show()
131 | '''
132 |
133 | #ccalculating BER for embedding
134 | EbNodB_range = list(np.linspace(-4, 8.5 ,26))
135 | ber = [None] * len(EbNodB_range)
136 | for n in range(0, len(EbNodB_range)):
137 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
138 | noise_std = np.sqrt(1 / (2 * R * EbNo))
139 | noise_mean = 0
140 | no_errors = 0
141 | nn = N
142 | noise = noise_std * np.random.randn(nn, n_channel)
143 | encoded_signal = encoder.predict(test_label)
144 | final_signal = encoded_signal + noise
145 | pred_final_signal = decoder.predict(final_signal)
146 | pred_output = np.argmax(pred_final_signal, axis=1)
147 | no_errors = (pred_output != test_label)
148 | no_errors = no_errors.astype(int).sum()
149 | ber[n] = no_errors/nn
150 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
151 |
152 | plt.plot(EbNodB_range, ber )
153 | plt.plot(EbNodB_range, uncodedbpsk_blers,'bo' )
154 | plt.yscale('log')
155 | plt.xlabel('SNR_RANGE')
156 | plt.ylabel('Block Error Rate')
157 | plt.grid()
158 | plt.legend(['Autoencoeder_embedding(2,2),emb_k=2','uncodedbpsk'],loc = 'upper left')
159 | plt.legend(loc='upper right',ncol= 1)
160 |
161 | fig = plt.gcf()
162 | fig.set_size_inches(16,12)
163 | fig.savefig('graph/MulSNR(2,2)0318_0.png',dpi=100)
164 | plt.show()
165 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ML-in-physical-layer
--------------------------------------------------------------------------------
/RayleighChannel.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | class rayleigh_multipath(object):
3 | """a multipath channel with Rayleigh Fading and AWGN"""
4 | def __init__(self, sigma_awgn, sigma_rayleigh, pdp):
5 | self.sigma_awgn = sigma_awgn
6 | self.sigma_rayleigh = sigma_rayleigh
7 | self.pdp = np.array(pdp)
8 | self.l = self.pdp.size - 1
9 | self.update_cir()
10 |
11 | def update_cir(self):
12 | """Generare a new CIR from the PDP with Rayleigh Fading"""
13 | self.cir = np.sqrt(np.array(self.pdp))
14 | randray = np.random.rayleigh(self.sigma_rayleigh,self.cir.size)
15 | self.cir = self.cir * randray
16 |
17 | def awgn(self, symbols):
18 | """add Gaussian White Noise"""
19 | #real_noise = np.random.rand(symbols.size)
20 | #imag_noise = np.random.rand(symbols.size)
21 | noise = np.random.rand(symbols.size)
22 | return symbols + self.sigma_awgn * noise
23 |
24 | def apply_cir(self,symbols):
25 | """convolve the symbols with cir"""
26 | if self.l != 0:
27 | self.old_symbols = symbols[-self.l :]
28 | #apply the cir
29 | symbols = np.convolve(symbols, self.cir)
30 | return symbols
--------------------------------------------------------------------------------
/Rayleigh_SISO_keras.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import keras
3 | from keras.layers import Input, LSTM, Dense, GaussianNoise, Lambda, Dropout,embeddings, Flatten, Add, Conv1D,Reshape,concatenate
4 | from keras.models import Model
5 | from keras import regularizers
6 | from keras.layers.normalization import BatchNormalization
7 | from keras.optimizers import Adam, SGD
8 | from keras import backend as K
9 | from keras.callbacks import Callback
10 | import pydot
11 | #import graphviz
12 | import matplotlib.pyplot as plt
13 | import tensorflow as tf
14 |
15 | fd = 926
16 | Ts = 1e-6
17 | Ns = 50000
18 |
19 |
20 | def Jakes_Flat(fd, Ts, Ns, t0=0, E0=1, phi_N=0):
21 | '''
22 | Inputs:
23 | fd : Doppler frequency
24 | Ts : sampling period
25 | Ns : number of samples
26 | t0 : initial time
27 | E0 : channel power
28 | phi_N : inital phase of the maximum doppler frequency sinusoid
29 | Outputs:
30 | h : complex fading vector
31 | t_state : current time
32 | '''
33 | N0 = 8
34 | N = 4 * N0 + 2
35 | wd = 2 * np.pi * fd
36 | t = t0 + np.asarray([i for i in range(0, Ns)]) * Ts
37 | H = np.ones((2, Ns))
38 | coff = E0 / np.sqrt(2 * N0 + 1)
39 | phi_n = np.asarray([np.pi * i / (N0 + 1) for i in range(1, N0 + 1)])
40 | phi_N = 0
41 | w_n = np.asarray([wd * np.cos(2 * np.pi * i / N) for i in range(1, N0 + 1)])
42 | h_i = np.ones((N0 + 1, Ns))
43 | for i in range(N0):
44 | h_i[i, :] = 2 * np.cos(phi_n[i]) * np.cos(w_n[i] * t)
45 | h_i[N0, :] = np.sqrt(2) * np.cos(phi_N) * np.cos(wd * t)
46 | h_q = np.ones((N0 + 1, Ns))
47 | for i in range(N0):
48 | h_q[i, :] = 2 * np.sin(phi_n[i]) * np.cos(w_n[i] * t)
49 | h_q[N0, :] = np.sqrt(2) * np.sin(phi_N) * np.cos(wd * t)
50 | h_I = coff * np.sum(h_i, 0)
51 | h_Q = coff * np.sum(h_q, 0)
52 | H[0, :] = h_I
53 | H[1, :] = h_Q
54 | return H
55 |
56 | #h = Jakes_Flat(fd, Ts, Ns)
57 |
58 | #parameters setting
59 | NUM_EPOCHS = 100
60 | BATCH_SIZE = 32
61 | k = 2
62 | M = 2**k
63 | n_channel_c = 2
64 | n_channel_r = n_channel_c * 2
65 | emb_k = M
66 | R = k / n_channel_c
67 | train_data_size=10000
68 | bertest_data_size=50000
69 | EbNodB_train = 7
70 | EbNo_train = 10 ** (EbNodB_train / 10.0)
71 | noise_std= np.sqrt( 1/ (2 * R * EbNo_train))
72 | alpha = K.variable(0.5)
73 | beta = K.variable(0.5)
74 |
75 | class Rayleigh_SISO(object):
76 | """
77 |
78 | """
79 | def __init__(self,ComplexChannel=True,M = 4,n_channel = 2, k = 2,
80 | emb_k=4, EbNodB_train = 7 , train_data_size = 10000,
81 | fd = 926,Ts = 1e-6,Ns = 50000):
82 | assert ComplexChannel in (True, False)
83 | assert M > 1
84 | assert n_channel > 1
85 | assert emb_k > 1
86 | assert k > 1
87 | self.M = M
88 | self.ComplexChannel = ComplexChannel
89 | self.n_channel = n_channel
90 | self.k = k
91 | self.emb_k = emb_k
92 | self.train_data_size = train_data_size
93 | self.EbNodB_train = EbNodB_train
94 | self.R = k/n_channel_c
95 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
96 | self.noise_std = np.sqrt(1 / (2 * R * EbNo_train))
97 | if ComplexChannel== True:
98 | self.n_channel_r = self.n_channel * 2
99 | self.n_channel_c = self.n_channel
100 | if ComplexChannel == False:
101 | self.n_channel_r = self.n_channel
102 | self.n_channel_c = self.n_channel
103 | self.H = K.variable(Jakes_Flat(fd, Ts, Ns))
104 |
105 | def Rayleigh_Channel(self, x, H):
106 | """
107 |
108 | :param x:
109 | :param H:
110 | :return:
111 | """
112 | print('x[:,:,1]',K.shape(x[:,1]))
113 | print('x',K.shape(x))
114 | print('H',K.shape(self.H))
115 | print('H[0,:]', K.shape(self.H[0,:]))
116 | real = H[0,:]*x[:,:,0] - H[1,:]*x[:,:,1]
117 | imag = H[0,:]*x[:,:,1] + H[0,:]*x[:,:,1]
118 | noise_r = K.random_normal(K.shape(real),
119 | mean=0,
120 | stddev=self.noise_std)
121 | noise_i = K.random_normal(K.shape(imag),
122 | mean=0,
123 | stddev=self.noise_std)
124 | real = Add()([real, noise_r])
125 | imag = Add()([imag, noise_i])
126 | #x = concatenate([real, imag])
127 | x = K.stack([real,imag], axis=2)
128 | print(x.shape)
129 | return x
130 | def Rayleigh_Channel_test(self, x, H):
131 | """
132 |
133 | :param x:
134 | :param H:
135 | :return:
136 | """
137 | print('x_shape',x.shape)
138 | print('x[:,:,1]',K.shape(x[:,1]))
139 | print('x',K.shape(x))
140 | print('H',K.shape(self.H))
141 | print('H[0,:]', K.shape(self.H[0,:]))
142 | real = H[0,:]*x[:,:,0] - H[1,:]*x[:,:,1]
143 | imag = H[0,:]*x[:,:,1] + H[0,:]*x[:,:,1]
144 | noise_r = K.random_normal(K.shape(real),
145 | mean=0,
146 | stddev=self.noise_std)
147 | noise_i = K.random_normal(K.shape(imag),
148 | mean=0,
149 | stddev=self.noise_std)
150 | real = real+ noise_r
151 | imag = imag+ noise_i
152 | print('realshape',real.shape)
153 | print('imagshape',imag.shape)
154 | x = K.stack([real, imag],axis=2)
155 | x = tf.Session().run(x)
156 | print(x.shape)
157 | return x
158 |
159 | def R2C(self, x):
160 | return x.view(x.size()[0], -1, 2)
161 |
162 | def C2R(self, x):
163 | return x.view(x.size()[0], -1)
164 |
165 | def Initialize(self):
166 | train_label = np.random.randint(self.M, size= ( self.train_data_size, self.M))
167 | train_label_out = train_label.reshape((-1, self.M,1))
168 | input_signal = Input(shape=(self.M,))
169 | encoded = embeddings.Embedding(input_dim=self.M, output_dim=self.emb_k,input_length=self.M)(input_signal)
170 | encoded1 = Conv1D(filters=self.M, kernel_size=1, activation='relu')(encoded)
171 | encoded2 = Conv1D(filters=self.M, kernel_size=1, activation='linear')(encoded1)
172 | encoded3 = LSTM(units=self.n_channel_r, input_shape=(self.M, self.M),return_sequences= True)(encoded2)
173 | encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(encoded3)
174 | #encoded5 = Reshape((self.M*2, self.n_channel_c))(encoded4)
175 | #IQ两路串行发送
176 | encoded5 = Reshape((-1, 2))(encoded4)
177 | channel_out = Lambda(lambda x:self.Rayleigh_Channel(x,self.H))(encoded5)
178 | decoded = Reshape((self.M,self.n_channel_r),name='pre_reshape')(channel_out)
179 | decoded1 = Conv1D(filters=self.M, kernel_size=1, activation='relu',name='pre_receiver')(decoded)
180 | decoded2 = Conv1D(filters=self.M, kernel_size=1, activation='softmax',name='receiver')(decoded1)
181 |
182 |
183 | self.rayleigh_channel_encoder = Model(inputs=input_signal,
184 | outputs= decoded2
185 | )
186 | adam = Adam(lr=0.01)
187 | self.rayleigh_channel_encoder.compile(optimizer=adam,
188 | loss ='sparse_categorical_crossentropy')
189 | print(self.rayleigh_channel_encoder.summary())
190 | self.encoder = Model(input_signal, encoded5)
191 | #encoded_input = Input(shape=(int(self.n_channel_r*self.M),))
192 | channel_shape = (self.n_channel_r*self.M) / 2
193 | encoded_input = Input(shape = (channel_shape,2,))
194 | deco = self.rayleigh_channel_encoder.get_layer('pre_reshape')(encoded_input)
195 | deco1 = self.rayleigh_channel_encoder.get_layer('pre_receiver')(deco)
196 | deco2 = self.rayleigh_channel_encoder.get_layer('receiver')(deco1)
197 | self.decoder = Model(encoded_input, deco2)
198 | self.rayleigh_channel_encoder.fit(train_label, train_label_out,
199 | epochs=1,
200 | batch_size=16,
201 | verbose=2)
202 |
203 | def Cal_Ber(self, bertest_datasize = 50000,EbNodB_low=-4, EbNodB_high=8, EbNodB_num=26):
204 | """
205 |
206 | :param bertest_datasize:
207 | :return:
208 | """
209 | test_label = np.random.randint(self.M, size=(bertest_datasize, self.M))
210 | EbNodB_range = list(np.linspace(EbNodB_low, EbNodB_high, EbNodB_num))
211 | ber = [None] * len(EbNodB_range)
212 | self.ber = ber
213 | for n in range(0, len(EbNodB_range)):
214 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
215 | noise_std = np.sqrt(1 / (2 * self.R * EbNo))
216 | nn = M * bertest_datasize
217 | print(test_label.shape)
218 | encoded_signal = self.encoder.predict(test_label)
219 | print(encoded_signal.shape)
220 | H = Jakes_Flat(fd=926, Ts=1e-6,Ns=4*2)
221 | final_signal = self.Rayleigh_Channel_test(encoded_signal, H)
222 | print(final_signal.shape)
223 | pred_final_signal = self.decoder.predict(final_signal)
224 | #??
225 | pred_output = np.argmax(pred_final_signal, axis=2)
226 | no_errors = (pred_output != test_label)
227 | no_errors = no_errors.astype(int).sum()
228 | print(no_errors)
229 | ber[n] = no_errors / nn
230 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
231 | return bertest_data_size
232 |
233 | test = Rayleigh_SISO(ComplexChannel= True, M = 4, n_channel=2,k=2,emb_k=4,
234 | EbNodB_train=7, train_data_size=10000,fd=926, Ts=1e-6,
235 | Ns=4*2)
236 | test.Initialize()
237 | test.Cal_Ber(bertest_datasize= 50000,EbNodB_low=-4, EbNodB_high=8,EbNodB_num=26)
238 |
--------------------------------------------------------------------------------
/Rayleigh_self.py:
--------------------------------------------------------------------------------
1 | #-*- coding: utf-8 -*-
2 |
3 | # -*- coding: utf-8 -*-
4 |
5 | import numpy as np
6 | import keras
7 | import tensorflow as tf
8 | from keras.layers import Input, LSTM, Dense,GaussianNoise, Lambda, Dropout, embeddings,Flatten
9 | from keras.models import Model
10 | from keras import regularizers
11 | from keras.layers.normalization import BatchNormalization
12 | from keras.optimizers import Adam, SGD, RMSprop
13 | from keras import backend as K
14 | from keras.utils.np_utils import to_categorical
15 |
16 | # for reproducing reslut
17 | from numpy.random import seed
18 | from sklearn.manifold import TSNE
19 | import matplotlib.pyplot as plt
20 | import random
21 | from numpy import sqrt
22 | from numpy import genfromtxt
23 | from math import pow
24 |
25 | #set the random state to generate the same/different train data
26 | from numpy.random import seed
27 | seed(1)
28 | from tensorflow import set_random_seed
29 | set_random_seed(2)
30 |
31 |
32 | class AutoEncoder(object):
33 | """
34 | This is an API for the use of NN of an end to end communication system,
35 | AutoEncoder.Initialize():
36 | Model Building and Training
37 | Draw_Constellation()
38 | Constellation Graph of the transmitted signal
39 |
40 | """
41 | def __init__(self, ComplexChannel = True,CodingMeth = 'Embedding',M = 4,n_channel = 2, k = 2, emb_k=4, EbNodB_train = 7 , train_data_size = 10000):
42 | """
43 |
44 | :param CodingMeth: 'Embedding' or ' Onehot'
45 | :param M: The total number of symbol
46 | :param n_channel: bits of channel
47 | :param k: int(log(M))
48 | :param emb_k: output dimension of the first embedding layer if using the CodingMeth 'Embedding'
49 | :param EbNodB_train: SNR(dB) of the AWGN channel in train process
50 | :param train_data_size: size of the train data
51 | """
52 | seed(1)
53 | from tensorflow import set_random_seed
54 | set_random_seed(3)
55 | assert ComplexChannel in (True, False)
56 | assert CodingMeth in ('Embedding','Onehot')
57 | assert M > 1
58 | assert n_channel > 1
59 | assert emb_k > 1
60 | assert k >1
61 | self.M = M
62 | self.CodingMeth = CodingMeth
63 | self.ComplexChannel = ComplexChannel
64 | self.n_channel = n_channel
65 | if ComplexChannel== True:
66 | self.n_channel_r = self.n_channel * 2
67 | self.n_channel_c = self.n_channel
68 | if ComplexChannel == False:
69 | self.n_channel_r = self.n_channel
70 | self.n_channel_c = self.n_channel
71 | self.emb_k = emb_k
72 | self.k = k
73 | self.R = self.k / float(self.n_channel)
74 | self.train_data_size = train_data_size
75 | self.EbNodB_train = EbNodB_train
76 | self.EbNo_train = 10 ** (self.EbNodB_train / 10.0)
77 | self.noise_std = np.sqrt(1 / (2 * self.R * self.EbNo_train))
78 |
79 |
80 | def Rayleigh_chan(self, x, n_channel):
81 | """
82 | real number situation,
83 | :param x:
84 | :return:
85 | """
86 | ch_coeff_vec = [None] * n_channel
87 | for n in range(0, n_channel):
88 | ch_coeff_vec[n] = sqrt(random.gauss(0, 1) ** 2 + random.gauss(0, 1) ** 2) / sqrt(2)
89 | noise = K.random_normal(K.shape(x),
90 | mean=0,
91 | stddev=self.noise_std/sqrt(2))
92 | print (K.shape(x))
93 | x = ch_coeff_vec * x + noise
94 | return x
95 |
96 | def Rayleigh_chantest(self,ber_test_datasize,n_channel):
97 | ch_coeff = []
98 | for i in range(0,n_channel):
99 | ch_coeff_vec = [None] * ber_test_datasize
100 | for n in range(0,ber_test_datasize):
101 | ch_coeff_vec[n] = sqrt(random.gauss(0, 1) ** 2 + random.gauss(0, 1) ** 2) / sqrt(2)
102 | ch_coeff.append(ch_coeff_vec)
103 | ch_coeff = np.asarray(ch_coeff)
104 | ch_coeff = np.transpose(ch_coeff)
105 | return ch_coeff
106 |
107 | def Initialize(self):
108 | """
109 |
110 | :return:
111 | """
112 |
113 | if self.CodingMeth == 'Embedding':
114 | print("This model used Embedding layer")
115 | #Generating train_data
116 | train_data = np.random.randint(self.M, size=self.train_data_size)
117 | train_data_pre = train_data.reshape((-1,1))
118 | # Embedding Layer
119 | input_signal = Input(shape=(1,))
120 | encoded = embeddings.Embedding(input_dim=self.M, output_dim=self.emb_k, input_length=1)(input_signal)
121 | encoded1 = Flatten()(encoded)
122 | encoded2 = Dense(self.M, activation='relu')(encoded1)
123 | encoded3 = Dense(self.n_channel_r, activation='linear')(encoded2)
124 | encoded4 = Lambda(lambda x: np.sqrt(self.n_channel_c) * K.l2_normalize(x, axis=1))(encoded3)
125 | #encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(encoded3)
126 |
127 | #EbNo_train = 10 ** (self.EbNodB_train / 10.0)
128 | #channel_out = GaussianNoise(np.sqrt(1 / (2 * self.R * EbNo_train)))(encoded4)
129 | channel_out = Lambda(lambda x: self.Rayleigh_chan(x, self.n_channel_r))(encoded4)
130 |
131 | decoded = Dense(self.M, activation='relu')(channel_out)
132 | decoded1 = Dense(self.M, activation='softmax')(decoded)
133 |
134 | self.auto_encoder = Model(input_signal, decoded1)
135 | adam = Adam(lr=0.001)
136 | #rms = RMSprop(lr=0.002)
137 | self.auto_encoder.compile(optimizer=adam,
138 | loss='sparse_categorical_crossentropy',
139 | )
140 | print(self.auto_encoder.summary())
141 | self.auto_encoder.fit(train_data, train_data_pre,
142 | epochs=45,
143 | batch_size=32,
144 | verbose=2)
145 | self.encoder = Model(input_signal, encoded4)
146 | encoded_input = Input(shape=(self.n_channel_r,))
147 |
148 | deco = self.auto_encoder.layers[-2](encoded_input)
149 | deco = self.auto_encoder.layers[-1](deco)
150 | self.decoder = Model(encoded_input, deco)
151 |
152 | """
153 | The code of onehot situation remain unchaged(AWGN)
154 | """
155 | if self.CodingMeth == 'Onehot':
156 | print("This is the model using Onehot")
157 |
158 | # Generating train_data
159 | train_data = np.random.randint(self.M, size=self.train_data_size)
160 | data = []
161 | for i in train_data:
162 | temp = np.zeros(self.M)
163 | temp[i] = 1
164 | data.append(temp)
165 | train_data = np.array(data)
166 |
167 | input_signal = Input(shape=(self.M,))
168 | encoded = Dense(self.M, activation='relu')(input_signal)
169 | encoded1 = Dense(self.n_channel, activation='linear')(encoded)
170 | encoded2 = Lambda(lambda x: np.sqrt(self.n_channel) * K.l2_normalize(x, axis=1))(encoded1)
171 | """
172 | K.l2_mormalize 二阶约束(功率约束)
173 | """
174 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
175 | encoded3 = GaussianNoise(np.sqrt(1 / (2 * self.R * EbNo_train)))(encoded2)
176 |
177 | decoded = Dense(self.M, activation='relu')(encoded3)
178 | decoded1 = Dense(self.M, activation='softmax')(decoded)
179 | self.auto_encoder = Model(input_signal, decoded1)
180 | adam = Adam(lr=0.01)
181 | self.auto_encoder.compile(optimizer=adam, loss='categorical_crossentropy')
182 |
183 | print(self.auto_encoder.summary())
184 |
185 | # for tensor board visualization
186 | # tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
187 | # traning auto encoder
188 |
189 | self.auto_encoder.fit(train_data, train_data,
190 | epochs=45,
191 | batch_size=32,
192 | verbose = 0)
193 |
194 | # saving keras model
195 | from keras.models import load_model
196 |
197 | # if you want to save model then remove below comment
198 | # autoencoder.save('autoencoder_v_best.model')
199 |
200 | # making encoder from full autoencoder
201 | self.encoder = Model(input_signal, encoded2)
202 |
203 | # making decoder from full autoencoder
204 | encoded_input = Input(shape=(self.n_channel,))
205 |
206 | deco = self.auto_encoder.layers[-2](encoded_input)
207 | deco = self.auto_encoder.layers[-1](deco)
208 | self.decoder = Model(encoded_input, deco)
209 |
210 | def Draw_Constellation(self, test_data_size = 1500):
211 | """
212 |
213 | :param test_data_size: low-dim situation does not use this param, high-dim situation requires test_data_size to be not to big
214 | :return:
215 | """
216 | import matplotlib.pyplot as plt
217 | test_label = np.random.randint(self.M, size=test_data_size)
218 | test_data = []
219 | for i in test_label:
220 | temp = np.zeros(self.M)
221 | temp[i] = 1
222 | test_data.append(temp)
223 | test_data = np.array(test_data)
224 |
225 | if self.n_channel == 2:
226 | scatter_plot = []
227 | if self.CodingMeth == 'Embedding':
228 | print("Embedding,Two Dimension")
229 | for i in range(0, self.M):
230 | scatter_plot.append(self.encoder.predict(np.expand_dims(i, axis=0)))
231 | scatter_plot = np.array(scatter_plot)
232 | if self.CodingMeth == 'Onehot':
233 | print("Onehot,Two Dimension")
234 | for i in range(0, self.M):
235 | temp = np.zeros(self.M)
236 | temp[i] = 1
237 | scatter_plot.append(self.encoder.predict(np.expand_dims(temp, axis=0)))
238 | scatter_plot = np.array(scatter_plot)
239 | scatter_plot = scatter_plot.reshape(self.M, 2, 1)
240 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1],label= '%s,(%d, %d), %d'%(self.CodingMeth,self.n_channel, self.k, self.emb_k) )
241 | plt.legend()
242 | plt.axis((-2.5, 2.5, -2.5, 2.5))
243 | plt.grid()
244 | plt.show()
245 | if self.n_channel > 2 :
246 | if self.CodingMeth == 'Embedding':
247 | x_emb = self.encoder.predict(test_label)
248 | print("Embedding,High Dimension")
249 | if self.CodingMeth == 'Onehot':
250 | x_emb = self.encoder.predict(test_data)
251 | print("Onehot,High Dimension")
252 |
253 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
254 | noise_std = np.sqrt(1 / (2 * self.R * EbNo_train))
255 | noise = noise_std * np.random.randn(test_data_size, self.n_channel)
256 | x_emb = x_emb + noise
257 | X_embedded = TSNE(learning_rate=700, n_components=2, n_iter=35000, random_state=0,
258 | perplexity=60).fit_transform(x_emb)
259 | print(X_embedded.shape)
260 | X_embedded = X_embedded / 7
261 | import matplotlib.pyplot as plt
262 | plt.scatter(X_embedded[:, 0], X_embedded[:, 1],label= '%s,(%d, %d), %d'%(self.CodingMeth,self.n_channel, self.k, self.emb_k))
263 | # plt.axis((-2.5,2.5,-2.5,2.5))
264 | plt.legend()
265 | plt.grid()
266 | plt.show()
267 |
268 | def Cal_BLER(self, bertest_data_size = 50000, EbNodB_low = -4, EbNodB_high = 8.5, EbNodB_num = 26):
269 | test_label = np.random.randint(self.M, size=bertest_data_size)
270 | test_data = []
271 | for i in test_label:
272 | temp = np.zeros(self.M)
273 | temp[i] = 1
274 | test_data.append(temp)
275 | test_data = np.array(test_data)
276 |
277 | EbNodB_range = list(np.linspace(EbNodB_low, EbNodB_high, EbNodB_num))
278 | ber = [None] * len(EbNodB_range)
279 | self.ber = ber
280 | for n in range(0, len(EbNodB_range)):
281 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
282 | noise_std = np.sqrt(1 / (2 * self.R * EbNo))
283 | noise_mean = 0
284 | no_errors = 0
285 | nn = bertest_data_size
286 | noise = noise_std * np.random.randn(nn, self.n_channel_r)/sqrt(2)
287 | if self.CodingMeth == 'Embedding':
288 | encoded_signal = self.encoder.predict(test_label)
289 | if self.CodingMeth == 'Onehot':
290 | encoded_signal = self.encoder.predict(test_data)
291 | rayleigh_coeff = self.Rayleigh_chantest(nn, self.n_channel_r)
292 | final_signal = rayleigh_coeff * encoded_signal + noise
293 | pred_final_signal = self.decoder.predict(final_signal)
294 | pred_output = np.argmax(pred_final_signal, axis=1)
295 | print('pre_outputshape', pred_output.shape)
296 | print('pred_finalsignalshape', pred_final_signal.shape)
297 | no_errors = (pred_output != test_label)
298 | no_errors = no_errors.astype(int).sum()
299 | ber[n] = no_errors / nn
300 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
301 | self.ber = ber
302 |
303 | """
304 | The following codes show how to apply Class AutoEncoder
305 | """
306 | """
307 | model_test3 = AutoEncoder(CodingMeth='Embedding',M = 16, n_channel=7, k = 4, emb_k=16,EbNodB_train = 7,train_data_size=10000)
308 | model_test3.Initialize()
309 | print("Initialization Finished")
310 | #model_test3.Draw_Constellation()
311 | model_test3.Cal_BLER(bertest_data_size= 70000)
312 | EbNodB_range = list(np.linspace(-4, 8.5, 26))
313 | plt.plot(EbNodB_range, model_test3.ber,'bo')
314 | plt.yscale('log')
315 | plt.xlabel('SNR_RANGE')
316 | plt.ylabel('Block Error Rate')
317 | plt.grid()
318 | plt.show()
319 | """
320 |
321 | EbNodB_range = list(np.linspace(0, 20, 21))
322 | k=2
323 | bers = genfromtxt('data/uncodedbpskrayleigh.csv',delimiter=',')
324 | bers = 1- bers
325 | blers = bers
326 | for i,ber in enumerate(bers):
327 | blers[i] = 1 - pow(ber,k)
328 | plt.plot(EbNodB_range, blers,label= 'uncodedrayleigh(2,2)')
329 |
330 | EbNodB_train = 7
331 | model_test = AutoEncoder(ComplexChannel=True,CodingMeth='Embedding',
332 | M = 4, n_channel=2, k = 2, emb_k=4,
333 | EbNodB_train = EbNodB_train,train_data_size=10000)
334 | model_test.Initialize()
335 | print("Initialization Finished")
336 | #model_test3.Draw_Constellation()
337 | model_test.Cal_BLER(EbNodB_low=0,EbNodB_high=20,EbNodB_num=21,bertest_data_size= 50000)
338 | EbNodB_range = list(np.linspace(0,20,21))
339 | plt.plot(EbNodB_range, model_test.ber,'bo',label='AErayleigh(2,2)')
340 |
341 | plt.yscale('log')
342 | plt.xlabel('SNR_RANGE')
343 | plt.ylabel('Block Error Rate')
344 | plt.title('realRayleigh_Channel(2,2),PowerConstraint,EbdB_train:%f'%EbNodB_train)
345 | plt.grid()
346 |
347 | fig = plt.gcf()
348 | fig.set_size_inches(16,12)
349 | fig.savefig('graph/0501/rayleigh_real_dense_BLER_self0.png',dpi=100)
350 | plt.show()
351 |
--------------------------------------------------------------------------------
/Rayleigh_self_CNN.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import keras
3 | from keras.layers import Input, LSTM, Dense, GaussianNoise, Lambda, Dropout,embeddings, Flatten, Add, Conv1D,Reshape,concatenate
4 | from keras.models import Model
5 | from keras import regularizers
6 | from keras.layers.normalization import BatchNormalization
7 | from keras.optimizers import Adam, SGD
8 | from keras import backend as K
9 | from keras.callbacks import Callback
10 | import pydot
11 | #import graphviz
12 | import matplotlib.pyplot as plt
13 | import tensorflow as tf
14 | import random
15 | from numpy import sqrt
16 | from numpy import genfromtxt
17 | from math import pow
18 | #set the random state to generate the same/different train data
19 | from numpy.random import seed
20 | seed(1)
21 | from tensorflow import set_random_seed
22 | set_random_seed(3)
23 |
24 |
25 | #parameters setting
26 | NUM_EPOCHS = 100
27 | BATCH_SIZE = 32
28 | k = 2
29 | M = 2**k
30 | n_channel_c = 2
31 | n_channel_r = n_channel_c * 2
32 | emb_k = M
33 | R = k / n_channel_c
34 | train_data_size=10000
35 | bertest_data_size=50000
36 | EbNodB_train = 7
37 | EbNo_train = 10 ** (EbNodB_train / 10.0)
38 | noise_std= np.sqrt( 1/ (2 * R * EbNo_train))
39 | alpha = K.variable(0.5)
40 | beta = K.variable(0.5)
41 | N_sample = M * n_channel_c
42 |
43 |
44 |
45 | class Rayleigh_SISO(object):
46 | """
47 |
48 | """
49 | def __init__(self,ComplexChannel=True,M = 4,n_channel = 2, k = 2,
50 | emb_k=4, EbNodB_train = 7 , train_data_size = 10000,N_sample = 8):
51 | """
52 |
53 | :param ComplexChannel: True/False, whether to use complex representation in the channel
54 | :param M: number of symbols
55 | :param n_channel: use of channels
56 | :param k:
57 | :param emb_k:
58 | :param EbNodB_train:
59 | :param train_data_size:
60 | :param N_sample: N_sample = n_channel * M
61 | """
62 | assert ComplexChannel in (True, False)
63 | assert M > 1
64 | assert n_channel > 1
65 | assert emb_k > 1
66 | assert k > 1
67 | self.M = M
68 | self.ComplexChannel = ComplexChannel
69 | self.n_channel = n_channel
70 | self.k = k
71 | self.emb_k = emb_k
72 | self.train_data_size = train_data_size
73 | self.EbNodB_train = EbNodB_train
74 | self.R = k/n_channel_c
75 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
76 | self.noise_std = np.sqrt(1 / (2 * R * EbNo_train))
77 | if ComplexChannel== True:
78 | self.n_channel_r = self.n_channel * 2
79 | self.n_channel_c = self.n_channel
80 | if ComplexChannel == False:
81 | self.n_channel_r = self.n_channel
82 | self.n_channel_c = self.n_channel
83 |
84 | self.N_sample = N_sample
85 |
86 | def Rayleigh_Channel(self, x, n_sample):
87 | """
88 |
89 | :param x:
90 | :param n_sample:
91 | :return:
92 | """
93 | H_R = np.random.normal(0,1, n_sample)
94 | H_I = np.random.normal(0,1, n_sample)
95 | real = H_R * x[:,:,0] - H_I* x[:,:,1]
96 | imag = H_R * x[:,:,1]+ H_I* x[:,:,0]
97 | print('realshape',K.shape(real))
98 | noise_r = K.random_normal(K.shape(real),
99 | mean=0,
100 | stddev=self.noise_std)
101 | noise_i = K.random_normal(K.shape(imag),
102 | mean=0,
103 | stddev=self.noise_std)
104 | real = Add()([real, noise_r])
105 | imag = Add()([imag, noise_i])
106 | x = K.stack([real, imag], axis=2)
107 | return x
108 |
109 | def Rayleigh_Channel_test(self, x, n_sample, noise_std,test_datasize):
110 | """
111 |
112 | :param x:
113 | :param H:
114 | :return:
115 | """
116 | #print('x_shape',x.shape)
117 | #print('x[:,:,1]',K.shape(x[:,1]))
118 | #print('x',K.shape(x))
119 | #print('H',K.shape(self.H))
120 | #print('H[0,:]', K.shape(self.H[0,:]))
121 | H_R = np.random.normal(0, 1, n_sample*test_datasize)
122 | H_I = np.random.normal(0, 1, n_sample*test_datasize)
123 | H_R = np.reshape(H_R,(test_datasize,2,-1))
124 | H_I = np.reshape(H_I,(test_datasize,2,-1))
125 | np.random.shuffle(H_R)
126 | np.random.shuffle(H_I)
127 | real = H_R[:,0]*x[:,:,0] - H_I[:,1]*x[:,:,1]
128 | imag = H_R[:,0]*x[:,:,1] + H_I[:,1]*x[:,:,1]
129 | noise_r = K.random_normal(K.shape(real),
130 | mean=0,
131 | stddev=noise_std)
132 | noise_i = K.random_normal(K.shape(imag),
133 | mean=0,
134 | stddev=noise_std)
135 | real = real+ noise_r
136 | imag = imag+ noise_i
137 | #print('realshape',real.shape)
138 | #print('imagshape',imag.shape)
139 | x = K.stack([real, imag],axis=2)
140 | x = tf.Session().run(x)
141 | #print(x.shape)
142 | return x
143 |
144 | def Initialize(self):
145 | train_label = np.random.randint(self.M, size= ( self.train_data_size, self.M))
146 | train_label_out = train_label.reshape((-1, self.M,1))
147 | input_signal = Input(shape=(self.M,))
148 | encoded = embeddings.Embedding(input_dim=self.M, output_dim=self.emb_k,input_length=self.M)(input_signal)
149 | encoded1 = Conv1D(filters=self.M, kernel_size=1, activation='relu')(encoded)
150 | encoded2 = Conv1D(filters=self.M, kernel_size=1, activation='linear')(encoded1)
151 | encoded3 = LSTM(units=self.n_channel_r, input_shape=(self.M, self.M),return_sequences= True)(encoded2)
152 | encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(encoded3)
153 | #encoded5 = Reshape((self.M*2, self.n_channel_c))(encoded4)
154 | #IQ两路串行发送
155 | encoded5 = Reshape((-1, 2))(encoded4)
156 | channel_out = Lambda(lambda x:self.Rayleigh_Channel(x,self.N_sample))(encoded5)
157 | decoded = Reshape((self.M,self.n_channel_r),name='pre_reshape')(channel_out)
158 | decoded1 = Conv1D(filters=self.M, kernel_size=1, activation='relu',name='pre_receiver')(decoded)
159 | decoded2 = Conv1D(filters=self.M, kernel_size=1, activation='softmax',name='receiver')(decoded1)
160 |
161 |
162 | self.rayleigh_channel_encoder = Model(inputs=input_signal,
163 | outputs= decoded2
164 | )
165 | adam = Adam(lr=0.005)
166 | self.rayleigh_channel_encoder.compile(optimizer=adam,
167 | loss ='sparse_categorical_crossentropy')
168 | print(self.rayleigh_channel_encoder.summary())
169 | self.encoder = Model(input_signal, encoded5)
170 | #encoded_input = Input(shape=(int(self.n_channel_r*self.M),))
171 | channel_shape = (self.n_channel_r*self.M) / 2
172 | encoded_input = Input(shape = (channel_shape,2,))
173 | deco = self.rayleigh_channel_encoder.get_layer('pre_reshape')(encoded_input)
174 | deco1 = self.rayleigh_channel_encoder.get_layer('pre_receiver')(deco)
175 | deco2 = self.rayleigh_channel_encoder.get_layer('receiver')(deco1)
176 | self.decoder = Model(encoded_input, deco2)
177 | self.rayleigh_channel_encoder.fit(train_label, train_label_out,
178 | epochs=5,
179 | batch_size=32,
180 | verbose=2)
181 |
182 | def Cal_Ber(self, bertest_datasize = 50000,EbNodB_low=-4, EbNodB_high=8, EbNodB_num=26):
183 | """
184 |
185 | :param bertest_datasize:
186 | :return:
187 | """
188 | test_label = np.random.randint(self.M, size=(bertest_datasize, self.M))
189 | EbNodB_range = list(np.linspace(EbNodB_low, EbNodB_high, EbNodB_num))
190 | ber = [None] * len(EbNodB_range)
191 | self.ber = ber
192 | for n in range(0, len(EbNodB_range)):
193 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
194 | noise_std = np.sqrt(1 / (2 * self.R * EbNo))
195 | nn = M * bertest_datasize
196 | #print(test_label.shape)
197 | encoded_signal = self.encoder.predict(test_label)
198 | #print(encoded_signal.shape)
199 | final_signal = self.Rayleigh_Channel_test(x=encoded_signal,n_sample=self.N_sample*2,
200 | noise_std=noise_std,
201 | test_datasize=bertest_datasize)
202 | #print(final_signal.shape)
203 | pred_final_signal = self.decoder.predict(final_signal)
204 | #??
205 | pred_output = np.argmax(pred_final_signal, axis=2)
206 | no_errors = (pred_output != test_label)
207 | no_errors = no_errors.astype(int).sum()
208 | print(no_errors)
209 | ber[n] = no_errors / nn
210 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
211 | return bertest_data_size
212 |
213 | EbNodB_range = list(np.linspace(0, 20, 21))
214 | k=2
215 | bers = genfromtxt('data/uncodedbpskrayleigh.csv',delimiter=',')
216 | bers = 1- bers
217 | blers = bers
218 | for i,ber in enumerate(bers):
219 | blers[i] = 1 - pow(ber,k)
220 | plt.plot(EbNodB_range, blers,label= 'uncodedrayleigh(2,2)')
221 |
222 | K.clear_session()
223 | test = Rayleigh_SISO(ComplexChannel= True, M = M, n_channel=n_channel_c,k=k,emb_k=emb_k,
224 | EbNodB_train=EbNodB_train, train_data_size=train_data_size, N_sample=N_sample)
225 | test.Initialize()
226 | test.Cal_Ber(bertest_datasize= 50000,EbNodB_low=0, EbNodB_high=20,EbNodB_num=21)
227 | plt.plot(EbNodB_range, test.ber,'bo')
228 | plt.yscale('log')
229 | plt.xlabel('SNR_RANGE')
230 | plt.ylabel('Block Error Rate')
231 | plt.title('Rayleigh_Channel(2,2),PlanB,EnergyConstraint,EbdB_train:%f'%EbNodB_train)
232 | plt.grid()
233 |
234 | fig = plt.gcf()
235 | fig.set_size_inches(16,12)
236 | fig.savefig('graph/0501/B_rayleighBLER0.png',dpi=100)
237 | plt.show()
238 |
--------------------------------------------------------------------------------
/Rayleigh_self_Dense.py:
--------------------------------------------------------------------------------
1 | #-*- coding: utf-8 -*-
2 |
3 | # -*- coding: utf-8 -*-
4 |
5 | import numpy as np
6 | import keras
7 | import tensorflow as tf
8 | from keras.layers import Input, LSTM, Dense,GaussianNoise, Lambda,Add, Reshape,Dropout, embeddings,Flatten
9 | from keras.models import Model
10 | from keras import regularizers
11 | from keras.layers.normalization import BatchNormalization
12 | from keras.optimizers import Adam, SGD, RMSprop
13 | from keras import backend as K
14 | from keras.utils.np_utils import to_categorical
15 |
16 | # for reproducing reslut
17 | from numpy.random import seed
18 | from sklearn.manifold import TSNE
19 | import matplotlib.pyplot as plt
20 | import random
21 | from numpy import sqrt
22 | from numpy import genfromtxt
23 | from math import pow
24 |
25 | #set the random state to generate the same/different train data
26 | from numpy.random import seed
27 | seed(1)
28 | from tensorflow import set_random_seed
29 | set_random_seed(2)
30 |
31 |
32 | class AutoEncoder(object):
33 | """
34 | This is an API for the use of NN of an end to end communication system,
35 | AutoEncoder.Initialize():
36 | Model Building and Training
37 | Draw_Constellation()
38 | Constellation Graph of the transmitted signal
39 |
40 | """
41 | def __init__(self, ComplexChannel = True,CodingMeth = 'Embedding',M = 4,n_channel = 2, k = 2, emb_k=4, EbNodB_train = 7 , train_data_size = 10000):
42 | """
43 |
44 | :param CodingMeth: 'Embedding' or ' Onehot'
45 | :param M: The total number of symbol
46 | :param n_channel: bits of channel
47 | :param k: int(log(M))
48 | :param emb_k: output dimension of the first embedding layer if using the CodingMeth 'Embedding'
49 | :param EbNodB_train: SNR(dB) of the AWGN channel in train process
50 | :param train_data_size: size of the train data
51 | """
52 | seed(1)
53 | from tensorflow import set_random_seed
54 | set_random_seed(3)
55 | assert ComplexChannel in (True, False)
56 | assert CodingMeth in ('Embedding','Onehot')
57 | assert M > 1
58 | assert n_channel > 1
59 | assert emb_k > 1
60 | assert k >1
61 | self.M = M
62 | self.CodingMeth = CodingMeth
63 | self.ComplexChannel = ComplexChannel
64 | self.n_channel = n_channel
65 | if ComplexChannel== True:
66 | self.n_channel_r = self.n_channel * 2
67 | self.n_channel_c = self.n_channel
68 | if ComplexChannel == False:
69 | self.n_channel_r = self.n_channel
70 | self.n_channel_c = self.n_channel
71 | self.emb_k = emb_k
72 | self.k = k
73 | self.R = self.k / float(self.n_channel)
74 | self.train_data_size = train_data_size
75 | self.EbNodB_train = EbNodB_train
76 | self.EbNo_train = 10 ** (self.EbNodB_train / 10.0)
77 | self.noise_std = np.sqrt(1 / (2 * self.R * self.EbNo_train))
78 |
79 |
80 | def Rayleigh_Channel(self, x, n_sample):
81 | """
82 |
83 | :param x:
84 | :param n_sample:
85 | :return:
86 | """
87 | H_R = np.random.normal(0,1, n_sample)
88 | H_I = np.random.normal(0,1, n_sample)
89 | real = H_R * x[:,:,0] - H_I* x[:,:,1]
90 | imag = H_R * x[:,:,1]+ H_I* x[:,:,0]
91 | print('realshape',K.shape(real))
92 | noise_r = K.random_normal(K.shape(real),
93 | mean=0,
94 | stddev=self.noise_std)
95 | noise_i = K.random_normal(K.shape(imag),
96 | mean=0,
97 | stddev=self.noise_std)
98 | real = Add()([real, noise_r])
99 | imag = Add()([imag, noise_i])
100 | x = K.stack([real, imag], axis=2)
101 | return x
102 |
103 | def Rayleigh_Channel_test(self, x, n_sample, noise_std,test_datasize):
104 | """
105 |
106 | :param x:
107 | :param H:
108 | :return:
109 | """
110 | #print('x_shape',x.shape)
111 | #print('x[:,:,1]',K.shape(x[:,1]))
112 | #print('x',K.shape(x))
113 | #print('H',K.shape(self.H))
114 | #print('H[0,:]', K.shape(self.H[0,:]))
115 | H_R = np.random.normal(0, 1, n_sample*test_datasize)
116 | H_I = np.random.normal(0, 1, n_sample*test_datasize)
117 | H_R = np.reshape(H_R,(test_datasize,2,-1))
118 | H_I = np.reshape(H_I,(test_datasize,2,-1))
119 | np.random.shuffle(H_R)
120 | np.random.shuffle(H_I)
121 | real = H_R[:,0]*x[:,:,0] - H_I[:,1]*x[:,:,1]
122 | imag = H_R[:,0]*x[:,:,1] + H_I[:,1]*x[:,:,1]
123 | noise_r = K.random_normal(K.shape(real),
124 | mean=0,
125 | stddev=noise_std)
126 | noise_i = K.random_normal(K.shape(imag),
127 | mean=0,
128 | stddev=noise_std)
129 | real = real+ noise_r
130 | imag = imag+ noise_i
131 | #print('realshape',real.shape)
132 | #print('imagshape',imag.shape)
133 | x = K.stack([real, imag],axis=2)
134 | x = tf.Session().run(x)
135 | #print(x.shape)
136 | return x
137 |
138 | def Initialize(self):
139 | """
140 |
141 | :return:
142 | """
143 |
144 | if self.CodingMeth == 'Embedding':
145 | print("This model used Embedding layer")
146 | #Generating train_data
147 | train_data = np.random.randint(self.M, size=self.train_data_size)
148 | train_data_pre = train_data.reshape((-1,1))
149 | # Embedding Layer
150 | input_signal = Input(shape=(1,))
151 | encoded = embeddings.Embedding(input_dim=self.M, output_dim=self.emb_k, input_length=1)(input_signal)
152 | encoded1 = Flatten()(encoded)
153 | encoded2 = Dense(self.M, activation='relu')(encoded1)
154 | encoded3 = Dense(self.n_channel_r, activation='linear')(encoded2)
155 | #encoded4 = Lambda(lambda x: np.sqrt(self.n_channel_c) * K.l2_normalize(x, axis=1))(encoded3)
156 | encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(encoded3)
157 | encoded5 = Reshape((-1,2))(encoded4)
158 | #EbNo_train = 10 ** (self.EbNodB_train / 10.0)
159 | #channel_out = GaussianNoise(np.sqrt(1 / (2 * self.R * EbNo_train)))(encoded4)
160 | channel_out = Lambda(lambda x: self.Rayleigh_Channel(x, self.n_channel_c))(encoded5)
161 |
162 | decoded = Flatten()(channel_out)
163 | decoded1 = Dense(self.M, activation='relu')(decoded)
164 | decoded2 = Dense(self.M, activation='softmax')(decoded1)
165 |
166 | self.auto_encoder = Model(input_signal, decoded2)
167 | adam = Adam(lr=0.002)
168 | #rms = RMSprop(lr=0.002)
169 | self.auto_encoder.compile(optimizer=adam,
170 | loss='sparse_categorical_crossentropy',
171 | )
172 | print(self.auto_encoder.summary())
173 | self.auto_encoder.fit(train_data, train_data_pre,
174 | epochs=45,
175 | batch_size=32,
176 | verbose=2)
177 | self.encoder = Model(input_signal, encoded5)
178 | print(self.encoder.summary())
179 | encoded_input = Input(shape=(self.n_channel_c,2,))
180 |
181 | deco = self.auto_encoder.layers[-3](encoded_input)
182 | deco = self.auto_encoder.layers[-2](deco)
183 | deco = self.auto_encoder.layers[-1](deco)
184 | self.decoder = Model(encoded_input, deco)
185 | print(self.decoder.summary())
186 |
187 | """
188 | The code of onehot situation remain unchaged(AWGN)
189 | """
190 | if self.CodingMeth == 'Onehot':
191 | print("This is the model using Onehot")
192 |
193 | # Generating train_data
194 | train_data = np.random.randint(self.M, size=self.train_data_size)
195 | data = []
196 | for i in train_data:
197 | temp = np.zeros(self.M)
198 | temp[i] = 1
199 | data.append(temp)
200 | train_data = np.array(data)
201 |
202 | input_signal = Input(shape=(self.M,))
203 | encoded = Dense(self.M, activation='relu')(input_signal)
204 | encoded1 = Dense(self.n_channel, activation='linear')(encoded)
205 | encoded2 = Lambda(lambda x: np.sqrt(self.n_channel) * K.l2_normalize(x, axis=1))(encoded1)
206 | """
207 | K.l2_mormalize 二阶约束(功率约束)
208 | """
209 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
210 | encoded3 = GaussianNoise(np.sqrt(1 / (2 * self.R * EbNo_train)))(encoded2)
211 |
212 | decoded = Dense(self.M, activation='relu')(encoded3)
213 | decoded1 = Dense(self.M, activation='softmax')(decoded)
214 | self.auto_encoder = Model(input_signal, decoded1)
215 | adam = Adam(lr=0.01)
216 | self.auto_encoder.compile(optimizer=adam, loss='categorical_crossentropy')
217 |
218 | print(self.auto_encoder.summary())
219 |
220 | # for tensor board visualization
221 | # tbCallBack = keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
222 | # traning auto encoder
223 |
224 | self.auto_encoder.fit(train_data, train_data,
225 | epochs=45,
226 | batch_size=32,
227 | verbose = 0)
228 |
229 | # saving keras model
230 | from keras.models import load_model
231 |
232 | # if you want to save model then remove below comment
233 | # autoencoder.save('autoencoder_v_best.model')
234 |
235 | # making encoder from full autoencoder
236 | self.encoder = Model(input_signal, encoded2)
237 |
238 | # making decoder from full autoencoder
239 | encoded_input = Input(shape=(self.n_channel,))
240 |
241 | deco = self.auto_encoder.layers[-2](encoded_input)
242 | deco = self.auto_encoder.layers[-1](deco)
243 | self.decoder = Model(encoded_input, deco)
244 |
245 | def Draw_Constellation(self, test_data_size = 1500):
246 | """
247 |
248 | :param test_data_size: low-dim situation does not use this param, high-dim situation requires test_data_size to be not to big
249 | :return:
250 | """
251 | import matplotlib.pyplot as plt
252 | test_label = np.random.randint(self.M, size=test_data_size)
253 | test_data = []
254 | for i in test_label:
255 | temp = np.zeros(self.M)
256 | temp[i] = 1
257 | test_data.append(temp)
258 | test_data = np.array(test_data)
259 |
260 | if self.n_channel == 2:
261 | scatter_plot = []
262 | if self.CodingMeth == 'Embedding':
263 | print("Embedding,Two Dimension")
264 | for i in range(0, self.M):
265 | scatter_plot.append(self.encoder.predict(np.expand_dims(i, axis=0)))
266 | scatter_plot = np.array(scatter_plot)
267 | if self.CodingMeth == 'Onehot':
268 | print("Onehot,Two Dimension")
269 | for i in range(0, self.M):
270 | temp = np.zeros(self.M)
271 | temp[i] = 1
272 | scatter_plot.append(self.encoder.predict(np.expand_dims(temp, axis=0)))
273 | scatter_plot = np.array(scatter_plot)
274 | scatter_plot = scatter_plot.reshape(self.M, 2, 1)
275 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1],label= '%s,(%d, %d), %d'%(self.CodingMeth,self.n_channel, self.k, self.emb_k) )
276 | plt.legend()
277 | plt.axis((-2.5, 2.5, -2.5, 2.5))
278 | plt.grid()
279 | plt.show()
280 | if self.n_channel > 2 :
281 | if self.CodingMeth == 'Embedding':
282 | x_emb = self.encoder.predict(test_label)
283 | print("Embedding,High Dimension")
284 | if self.CodingMeth == 'Onehot':
285 | x_emb = self.encoder.predict(test_data)
286 | print("Onehot,High Dimension")
287 |
288 | EbNo_train = 10 ** (self.EbNodB_train / 10.0)
289 | noise_std = np.sqrt(1 / (2 * self.R * EbNo_train))
290 | noise = noise_std * np.random.randn(test_data_size, self.n_channel)
291 | x_emb = x_emb + noise
292 | X_embedded = TSNE(learning_rate=700, n_components=2, n_iter=35000, random_state=0,
293 | perplexity=60).fit_transform(x_emb)
294 | print(X_embedded.shape)
295 | X_embedded = X_embedded / 7
296 | import matplotlib.pyplot as plt
297 | plt.scatter(X_embedded[:, 0], X_embedded[:, 1],label= '%s,(%d, %d), %d'%(self.CodingMeth,self.n_channel, self.k, self.emb_k))
298 | # plt.axis((-2.5,2.5,-2.5,2.5))
299 | plt.legend()
300 | plt.grid()
301 | plt.show()
302 |
303 | def Cal_BLER(self, bertest_data_size = 50000, EbNodB_low = -4, EbNodB_high = 8.5, EbNodB_num = 26):
304 | test_label = np.random.randint(self.M, size=bertest_data_size)
305 | test_data = []
306 | for i in test_label:
307 | temp = np.zeros(self.M)
308 | temp[i] = 1
309 | test_data.append(temp)
310 | test_data = np.array(test_data)
311 |
312 | EbNodB_range = list(np.linspace(EbNodB_low, EbNodB_high, EbNodB_num))
313 | ber = [None] * len(EbNodB_range)
314 | self.ber = ber
315 | for n in range(0, len(EbNodB_range)):
316 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
317 | noise_std = np.sqrt(1 / (2 * self.R * EbNo))
318 | noise_mean = 0
319 | no_errors = 0
320 | nn = bertest_data_size
321 | #noise = noise_std * np.random.randn(nn, self.n_channel_c)
322 | if self.CodingMeth == 'Embedding':
323 | encoded_signal = self.encoder.predict(test_label)
324 | if self.CodingMeth == 'Onehot':
325 | encoded_signal = self.encoder.predict(test_data)
326 | final_signal = self.Rayleigh_Channel_test(x=encoded_signal,n_sample=self.n_channel_r,
327 | noise_std=noise_std,
328 | test_datasize=bertest_data_size)
329 | pred_final_signal = self.decoder.predict(final_signal)
330 | pred_output = np.argmax(pred_final_signal, axis=1)
331 | no_errors = (pred_output != test_label)
332 | no_errors = no_errors.astype(int).sum()
333 | ber[n] = no_errors / nn
334 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
335 | self.ber = ber
336 |
337 | """
338 | The following codes show how to apply Class AutoEncoder
339 | """
340 | """
341 | model_test3 = AutoEncoder(CodingMeth='Embedding',M = 16, n_channel=7, k = 4, emb_k=16,EbNodB_train = 7,train_data_size=10000)
342 | model_test3.Initialize()
343 | print("Initialization Finished")
344 | #model_test3.Draw_Constellation()
345 | model_test3.Cal_BLER(bertest_data_size= 70000)
346 | EbNodB_range = list(np.linspace(-4, 8.5, 26))
347 | plt.plot(EbNodB_range, model_test3.ber,'bo')
348 | plt.yscale('log')
349 | plt.xlabel('SNR_RANGE')
350 | plt.ylabel('Block Error Rate')
351 | plt.grid()
352 | plt.show()
353 | """
354 |
355 | EbNodB_range = list(np.linspace(0, 20, 21))
356 | k=2
357 | bers = genfromtxt('data/uncodedbpskrayleigh.csv',delimiter=',')
358 | bers = 1- bers
359 | blers = bers
360 | for i,ber in enumerate(bers):
361 | blers[i] = 1 - pow(ber,k)
362 | plt.plot(EbNodB_range, blers,label= 'uncodedrayleigh(2,2)')
363 |
364 | EbNodB_train = 7
365 | model_test = AutoEncoder(ComplexChannel=True,CodingMeth='Embedding',
366 | M = 4, n_channel=2, k = 2, emb_k=4,
367 | EbNodB_train = EbNodB_train,train_data_size=10000)
368 | model_test.Initialize()
369 | print("Initialization Finished")
370 | #model_test3.Draw_Constellation()
371 | model_test.Cal_BLER(EbNodB_low=0,EbNodB_high=20,EbNodB_num=21,bertest_data_size= 50000)
372 | EbNodB_range = list(np.linspace(0,20,21))
373 | plt.plot(EbNodB_range, model_test.ber,'bo',label='AErayleigh(2,2)')
374 |
375 | plt.yscale('log')
376 | plt.xlabel('SNR_RANGE')
377 | plt.ylabel('Block Error Rate')
378 | plt.title('Rayleigh_Channel(2,2),PowerConstraint,EbdB_train:%f'%EbNodB_train)
379 | plt.grid()
380 |
381 | fig = plt.gcf()
382 | fig.set_size_inches(16,12)
383 | fig.savefig('graph/0501/rayleighBLER2.png',dpi=100)
384 | plt.show()
385 |
--------------------------------------------------------------------------------
/ReproducingResults.py:
--------------------------------------------------------------------------------
1 | """
2 | This is for reproducing results of the git code of immortal :https://github.com/immortal3/AutoEncoder-Based-Communication-System
3 | Please Note that this writer uses batch norm which means that the autoencoder is power constrained instead of energy constrained
4 | """
5 | # importing libs
6 | import numpy as np
7 | import tensorflow as tf
8 | from keras.layers import Input, Dense, GaussianNoise
9 | from keras.models import Model
10 | from keras import regularizers
11 | from keras.layers.normalization import BatchNormalization
12 | from keras.optimizers import SGD
13 | import random as rn
14 |
15 | # defining parameters
16 | M = 16
17 | k = np.log2(M)
18 | k = int(k)
19 | print ('M:',M,'k:',k)
20 | #generating data of size N
21 | N = 10000
22 | label = np.random.randint(M,size=N)
23 | # creating one hot encoded vectors
24 | data = []
25 | for i in label:
26 | temp = np.zeros(M)
27 | temp[i] = 1
28 | data.append(temp)
29 |
30 | data = np.array(data)
31 | print (data.shape)
32 | temp_check = [17,23,45,67,89,96,72,250,350]
33 | for i in temp_check:
34 | print(label[i],data[i])
35 |
36 | R = 4/7
37 | n_channel = 7
38 | print (int(k/R))
39 | input_signal = Input(shape=(M,))
40 | encoded = Dense(M, activation='relu')(input_signal)
41 | encoded1 = Dense(n_channel, activation='linear')(encoded)
42 | encoded2 = BatchNormalization()(encoded1)
43 |
44 | EbNo_train = 5.01187 # coverted 7 db of EbNo
45 | encoded3 = GaussianNoise(np.sqrt(1/(2*R*EbNo_train)))(encoded2)
46 |
47 | decoded = Dense(M, activation='relu')(encoded3)
48 | decoded1 = Dense(M, activation='softmax')(decoded)
49 |
50 | autoencoder = Model(input_signal, decoded1)
51 | #sgd = SGD(lr=0.001)
52 | autoencoder.compile(optimizer='adam', loss='categorical_crossentropy')
53 |
54 | print (autoencoder.summary())
55 |
56 | N_val = 1500
57 | val_label = np.random.randint(M,size=N_val)
58 | val_data = []
59 | for i in val_label:
60 | temp = np.zeros(M)
61 | temp[i] = 1
62 | val_data.append(temp)
63 | val_data = np.array(val_data)
64 |
65 |
66 | autoencoder.fit(data, data,
67 | epochs=17,
68 | batch_size=300,
69 | validation_data=(val_data, val_data))
70 |
71 | encoder = Model(input_signal, encoded2)
72 |
73 | encoded_input = Input(shape=(n_channel,))
74 |
75 | deco = autoencoder.layers[-2](encoded_input)
76 | deco = autoencoder.layers[-1](deco)
77 | # create the decoder model
78 | decoder = Model(encoded_input, deco)
79 | N = 45000
80 | test_label = np.random.randint(M, size=N)
81 | test_data = []
82 |
83 | for i in test_label:
84 | temp = np.zeros(M)
85 | temp[i] = 1
86 | test_data.append(temp)
87 |
88 | test_data = np.array(test_data)
89 |
90 | def frange(x, y, jump):
91 | while x < y:
92 | yield x
93 | x += jump
94 |
95 | EbNodB_range = list(frange(-4,8.5,0.5))
96 | ber = [None]*len(EbNodB_range)
97 | for n in range(0,len(EbNodB_range)):
98 | EbNo=10.0**(EbNodB_range[n]/10.0)
99 | noise_std = np.sqrt(1/(2*R*EbNo))
100 | noise_mean = 0
101 | no_errors = 0
102 | nn = N
103 | noise = noise_std * np.random.randn(nn,n_channel)
104 | encoded_signal = encoder.predict(test_data)
105 | final_signal = encoded_signal + noise
106 | pred_final_signal = decoder.predict(final_signal)
107 | pred_output = np.argmax(pred_final_signal,axis=1)
108 | no_errors = (pred_output != test_label)
109 | no_errors = no_errors.astype(int).sum()
110 | ber[n] = no_errors / nn
111 | print ('SNR:',EbNodB_range[n],'BER:',ber[n])
112 |
113 |
114 | import matplotlib.pyplot as plt
115 | plt.plot(EbNodB_range, ber, 'bo',label='Autoencoder(7,4)')
116 | #plt.plot(list(EbNodB_range), ber_theory, 'ro-',label='BPSK BER')
117 | plt.yscale('log')
118 | plt.xlabel('SNR Range')
119 | plt.ylabel('Block Error Rate')
120 | plt.grid()
121 | plt.legend(loc='upper right',ncol = 1)
122 | plt.savefig('AutoEncoder_7_4_BER_matplotlib')
123 | plt.show()
--------------------------------------------------------------------------------
/Test.py:
--------------------------------------------------------------------------------
1 | from AutoEncoder_BasicModel import AutoEncoder
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | M = 16
6 | n_channel=7
7 | k = 4
8 | emb_k=16
9 | EbNodB_train = 7
10 | train_data_size=10000
11 | model_test3 = AutoEncoder(CodingMeth='Onehot',M = M, n_channel=n_channel, k = k, emb_k=emb_k, EbNodB_train = EbNodB_train,train_data_size=train_data_size)
12 | model_test3.Initialize()
13 | print("Initialization Finished")
14 | #model_test3.Draw_Constellation()
15 | model_test3.Cal_BLER(bertest_data_size= 50000)
16 | EbNodB_range = list(np.linspace(-4, 8.5, 26))
17 | plt.figure(figsize=(16,12),dpi=100)
18 | plt.plot(EbNodB_range, model_test3.ber,'bo')
19 | plt.yscale('log')
20 | plt.xlabel('SNR_RANGE')
21 | plt.ylabel('Block Error Rate')
22 | plt.grid()
23 | plt.legend()
24 | #plt.savefig('AutoEncoder,test,Embedding,(%d,%d)emb_k:%d.png'%(n_channel,k, emb_k))
25 | plt.savefig('graph/test2.png')
26 | plt.show()
--------------------------------------------------------------------------------
/Train_SNR.py:
--------------------------------------------------------------------------------
1 | from AutoEncoder_BasicModel import AutoEncoder
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 |
5 |
6 | EbNodB_low= -4
7 | EbNodB_high= 8.5
8 | EbNodB_num= 26
9 | M=16
10 | n_channel=7
11 | k=4
12 | emb_k=16
13 | #EbNodB_train=7
14 | train_data_size=10000
15 | bertest_data_size=70000
16 |
17 | #Train_EbNodB_range = list(np.linspace(start=-4, stop=8, num=13))
18 | Train_EbNodB_range = list(np.linspace(start=5, stop=8, num=4))
19 | EbNodB_range = list(np.linspace(start=EbNodB_low, stop=EbNodB_high, num=EbNodB_num))
20 | for train_EnNodB in Train_EbNodB_range:
21 | model_test3 = AutoEncoder(CodingMeth='Embedding', M=M, n_channel=n_channel, k=k, emb_k=emb_k, EbNodB_train=train_EnNodB,
22 | train_data_size=train_data_size)
23 | model_test3.Initialize()
24 | model_test3.Cal_BLER(bertest_data_size=bertest_data_size,EbNodB_low=EbNodB_low ,EbNodB_high=EbNodB_high ,
25 | EbNodB_num=EbNodB_num )
26 | print(model_test3.EbNodB_train)
27 | plt.plot(EbNodB_range, model_test3.ber,label = 'Train_SNR:%f' % (train_EnNodB)
28 | )
29 | #label = 'Train_SNR:%f' % (train_EnNodB)
30 | plt.yscale('log')
31 |
32 | plt.legend(fontsize='xx-small')
33 | plt.title('AutoEncoder,Embedding,(%d,%d)emb_k:%d'%(n_channel,k, emb_k))
34 | plt.xlabel('SNR_RANGE')
35 | plt.ylabel('Block Error Rate')
36 | plt.grid()
37 | plt.savefig('AutoEncoder,SNR_train,Embedding,(%d,%d)emb_k:%d.png'%(n_channel,k, emb_k))
38 | plt.show()
39 |
40 |
--------------------------------------------------------------------------------
/Train_SNR2.py:
--------------------------------------------------------------------------------
1 | from AutoEncoder_BasicModel import AutoEncoder
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 |
5 |
6 | EbNodB_low= -4
7 | EbNodB_high= 8.5
8 | EbNodB_num= 26
9 | M=16
10 | n_channel=7
11 | k=4
12 | emb_k=16
13 | #EbNodB_train=7
14 | train_data_size=10000
15 | bertest_data_size=50000
16 |
17 | Train_EbNodB_range = list(np.linspace(start=-4, stop=8, num=13))
18 | #Train_EbNodB_range = list(np.linspace(start=5, stop=8, num=4))
19 | EbNodB_range = list(np.linspace(start=EbNodB_low, stop=EbNodB_high, num=EbNodB_num))
20 | for train_EnNodB in Train_EbNodB_range:
21 | model_test3 = AutoEncoder(CodingMeth='Embedding', M=M, n_channel=n_channel, k=k, emb_k=emb_k, EbNodB_train=train_EnNodB,
22 | train_data_size=train_data_size)
23 | model_test3.Initialize()
24 | model_test3.Cal_BLER(bertest_data_size=bertest_data_size,EbNodB_low=EbNodB_low ,EbNodB_high=EbNodB_high ,
25 | EbNodB_num=EbNodB_num )
26 | print(model_test3.EbNodB_train)
27 | plt.plot(EbNodB_range, model_test3.ber,label = 'Train_SNR:%f' % (train_EnNodB)
28 | )
29 | #label = 'Train_SNR:%f' % (train_EnNodB)
30 | plt.yscale('log')
31 |
32 | plt.legend(fontsize='xx-small')
33 | plt.title('AutoEncoder,Embedding,(%d,%d)emb_k:%d'%(n_channel,k, emb_k))
34 | plt.xlabel('SNR_RANGE')
35 | plt.ylabel('Block Error Rate')
36 | plt.grid()
37 | plt.savefig('AutoEncoder,SNR_train,Embedding,(%d,%d)emb_k:%d.png'%(n_channel,k, emb_k))
38 | plt.show()
39 |
40 |
41 |
--------------------------------------------------------------------------------
/TwoUser.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | """
4 | Created on Sat Jan 6 15:13:08 2018
5 | two-user autoencoder in paper
6 | @author: musicbeer
7 | """
8 |
9 | import torch
10 | from torch import nn
11 | import numpy as np
12 |
13 | NUM_EPOCHS = 100
14 | BATCH_SIZE = 32
15 | USE_CUDA = False
16 | parm1 = 4
17 | parm2 = 4
18 | M = 2 ** parm2 # one-hot coding feature dim
19 | k = np.log2(M)
20 | k = int(k)
21 | n_channel = parm1 # compressed feature dim
22 | R = k / n_channel
23 | CHANNEL_SIZE = M
24 | train_num = 8000
25 | test_num = 50000
26 |
27 |
28 | class RTN(nn.Module):
29 | def __init__(self, in_channels, compressed_dim):
30 | super(RTN, self).__init__()
31 |
32 | self.in_channels = in_channels
33 |
34 | self.encoder1 = nn.Sequential(
35 | nn.Linear(in_channels, in_channels),
36 | nn.Tanh(),
37 | nn.Linear(in_channels, compressed_dim),
38 | )
39 |
40 | self.decoder1 = nn.Sequential(
41 | nn.Linear(compressed_dim, in_channels),
42 | nn.Tanh(),
43 | nn.Linear(in_channels, in_channels)
44 | )
45 | self.encoder2 = nn.Sequential(
46 | nn.Linear(in_channels, in_channels),
47 | nn.Tanh(),
48 | nn.Linear(in_channels, compressed_dim),
49 | )
50 |
51 | self.decoder2 = nn.Sequential(
52 | nn.Linear(compressed_dim, in_channels),
53 | nn.Tanh(),
54 | nn.Linear(in_channels, in_channels)
55 | )
56 |
57 | def encode_signal1(self, x):
58 | x1 = self.encoder1(x)
59 | # x1 = (self.in_channels ** 2) * (x1 / x1.norm(dim=-1)[:, None])
60 | return x1
61 |
62 | def encode_signal2(self, x):
63 | x1 = self.encoder2(x)
64 | # x2 = (self.in_channels ** 2) * (x1 / x1.norm(dim=-1)[:, None])
65 | return x1
66 |
67 | def decode_signal1(self, x):
68 | return self.decoder1(x)
69 |
70 | def decode_signal2(self, x):
71 | return self.decoder2(x)
72 |
73 | def mixedAWGN(self, x1, x2, ebno):
74 | x1 = (self.in_channels ** 0.5) * (x1 / x1.norm(dim=-1)[:, None])
75 | # bit / channel_use
76 | communication_rate = R
77 | # Simulated Gaussian noise.
78 | noise1 = Variable(torch.randn(*x1.size()) / ((2 * communication_rate * ebno) ** 0.5))
79 |
80 | x2 = (self.in_channels ** 0.5) * (x2 / x2.norm(dim=-1)[:, None])
81 | # Simulated Gaussian noise.
82 | noise2 = Variable(torch.randn(*x2.size()) / ((2 * communication_rate * ebno) ** 0.5))
83 | print("############################", ebno)
84 |
85 | signal1 = x1 + noise1 + x2
86 | signal2 = x1 + x2 + noise2
87 | return signal1, signal2
88 |
89 | def forward(self, x1, x2):
90 | x1 = self.encoder1(x1)
91 | x2 = self.encoder2(x2)
92 | # Normalization.
93 | x1 = (self.in_channels ** 0.5) * (x1 / x1.norm(dim=-1)[:, None])
94 | x2 = (self.in_channels ** 0.5) * (x2 / x2.norm(dim=-1)[:, None])
95 |
96 | # 7dBW to SNR.
97 | training_signal_noise_ratio = 5.01187
98 |
99 | # bit / channel_use
100 | communication_rate = R
101 |
102 | # Simulated Gaussian noise.
103 | noise1 = Variable(torch.randn(*x1.size()) / ((2 * communication_rate * training_signal_noise_ratio) ** 0.5))
104 | noise2 = Variable(torch.randn(*x2.size()) / ((2 * communication_rate * training_signal_noise_ratio) ** 0.5))
105 | signal1 = x1 + noise1 + x2
106 | signal2 = x1 + x2 + noise2
107 |
108 | decode1 = self.decoder1(signal1)
109 | decode2 = self.decoder2(signal2)
110 |
111 | return decode1, decode2
112 |
113 |
114 | def frange(x, y, jump):
115 | while x < y:
116 | yield x
117 | x += jump
118 |
119 |
120 | if __name__ == "__main__":
121 | from torch.autograd import Variable
122 | from torch.optim import Adam, RMSprop
123 | import torch.utils.data as Data
124 |
125 | model = RTN(CHANNEL_SIZE, compressed_dim=n_channel)
126 | if USE_CUDA: model = model.cuda()
127 | train_labels1 = (torch.rand(train_num) * CHANNEL_SIZE).long()
128 | train_data1 = torch.sparse.torch.eye(CHANNEL_SIZE).index_select(dim=0, index=train_labels1)
129 | train_labels2 = (torch.rand(train_num) * CHANNEL_SIZE).long()
130 | train_data2 = torch.sparse.torch.eye(CHANNEL_SIZE).index_select(dim=0, index=train_labels2)
131 | train_labels = torch.cat((torch.unsqueeze(train_labels1, 1), torch.unsqueeze(train_labels2, 1)), 1)
132 | train_data = torch.cat((train_data1, train_data2), 1)
133 |
134 | test_labels1 = (torch.rand(test_num) * CHANNEL_SIZE).long()
135 | test_data1 = torch.sparse.torch.eye(CHANNEL_SIZE).index_select(dim=0, index=test_labels1)
136 | test_labels2 = (torch.rand(test_num) * CHANNEL_SIZE).long()
137 | test_data2 = torch.sparse.torch.eye(CHANNEL_SIZE).index_select(dim=0, index=test_labels2)
138 | test_labels = torch.cat((torch.unsqueeze(test_labels1, 1), torch.unsqueeze(test_labels2, 1)), 1)
139 | test_data = torch.cat((test_data1, test_data2), 1)
140 | dataset = Data.TensorDataset(data_tensor=train_data, target_tensor=train_labels)
141 | datasettest = Data.TensorDataset(data_tensor=test_data, target_tensor=test_labels)
142 | train_loader = Data.DataLoader(dataset=dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
143 | test_loader = Data.DataLoader(dataset=datasettest, batch_size=test_num, shuffle=True, num_workers=2)
144 |
145 | optimizer = Adam(model.parameters(), lr=0.001)
146 | loss_fn = nn.CrossEntropyLoss()
147 | a = 0.5
148 | b = 0.5
149 | for epoch in range(NUM_EPOCHS):
150 | for step, (x, y) in enumerate(train_loader):
151 | b_x1 = Variable(x[:, 0:CHANNEL_SIZE])
152 | b_y1 = Variable(x[:, 0:CHANNEL_SIZE])
153 | b_label1 = Variable(y[:, 0])
154 | b_x2 = Variable(x[:, CHANNEL_SIZE:CHANNEL_SIZE * 2])
155 | b_y2 = Variable(x[:, CHANNEL_SIZE:CHANNEL_SIZE * 2])
156 | b_label2 = Variable(y[:, 1])
157 | decoded1, decoded2 = model(b_x1, b_x2)
158 | loss1 = loss_fn(decoded1, b_label1)
159 | loss2 = loss_fn(decoded2, b_label2)
160 | loss = loss1 * a + loss2 * b
161 |
162 | optimizer.zero_grad() # clear gradients for this training step
163 | loss.backward() # backpropagation, compute gradients
164 | optimizer.step()
165 | a = loss1 / (loss1 + loss2)
166 | a = a.data[0]
167 | b = loss2 / (loss2 + loss1) # apply gradients
168 | b = b.data[0]
169 | if step % 100 == 0:
170 | print('Epoch: ', epoch, '| train loss: %.4f, L1:%.4f,L2: %.4f,a: %.4f, (1-a):%.4f' % (
171 | loss.data[0], loss1.data[0], loss2.data[0], a, b))
172 |
173 | import numpy as np
174 |
175 | EbNodB_range = list(frange(0, 15.5, 0.5))
176 | ber1 = [None] * len(EbNodB_range)
177 | ber2 = [None] * len(EbNodB_range)
178 | for n in range(0, len(EbNodB_range)):
179 | EbNo = 10.0 ** (EbNodB_range[n] / 10.0)
180 | for step, (x, y) in enumerate(test_loader):
181 | b_x1 = Variable(x[:, 0:CHANNEL_SIZE])
182 | b_y1 = Variable(x[:, 0:CHANNEL_SIZE])
183 | b_label1 = Variable(y[:, 0])
184 | b_x2 = Variable(x[:, CHANNEL_SIZE:CHANNEL_SIZE * 2])
185 | b_y2 = Variable(x[:, CHANNEL_SIZE:CHANNEL_SIZE * 2])
186 | b_label2 = Variable(y[:, 1])
187 | encoder1 = model.encode_signal1(b_x1)
188 | encoder2 = model.encode_signal2(b_x2)
189 | encoder1, encoder2 = model.mixedAWGN(encoder1, encoder2, EbNo)
190 | decoder1 = model.decode_signal1(encoder1)
191 | decoder2 = model.decode_signal2(encoder2)
192 | pred1 = decoder1.data.numpy()
193 | pred2 = decoder2.data.numpy()
194 | label1 = b_label1.data.numpy()
195 | label2 = b_label2.data.numpy()
196 | pred_output1 = np.argmax(pred1, axis=1)
197 | pred_output2 = np.argmax(pred2, axis=1)
198 | no_errors1 = (pred_output1 != label1)
199 | no_errors2 = (pred_output2 != label2)
200 | no_errors1 = no_errors1.astype(int).sum()
201 | no_errors2 = no_errors2.astype(int).sum()
202 | ber1[n] = no_errors1 / test_num
203 | ber2[n] = no_errors2 / test_num
204 | print('SNR:', EbNodB_range[n], 'BER1:', ber1[n], 'BER2:', ber2[n])
205 |
206 | #
207 | ## ploting ber curve
208 | import matplotlib.pyplot as plt
209 |
210 | plt.plot(EbNodB_range, ber1, 'bo', label='Autoencoder1(4,4)')
211 | plt.yscale('log')
212 | plt.xlabel('SNR Range')
213 | plt.ylabel('Block Error Rate')
214 | plt.grid()
215 | plt.legend(loc='upper right', ncol=1)
216 |
217 | plt.plot(EbNodB_range, ber2, 'bo', label='Autoencoder2(4,4)', color='r')
218 | plt.yscale('log')
219 | plt.xlabel('SNR Range')
220 | plt.ylabel('Block Error Rate')
221 | plt.grid()
222 | plt.legend(loc='upper right', ncol=1)
223 |
224 | #
225 | #
226 | # import matplotlib.pyplot as plt
227 | # test_labels = torch.linspace(0, CHANNEL_SIZE-1, steps=CHANNEL_SIZE).long()
228 | # test_data = torch.sparse.torch.eye(CHANNEL_SIZE).index_select(dim=0, index=test_labels)
229 | # #test_data=torch.cat((test_data, test_data), 1)
230 | # test_data=Variable(test_data)
231 | # x=model.encode_signal1(test_data)
232 | # x = (n_channel**0.5) * (x / x.norm(dim=-1)[:, None])
233 | # plot_data=x.data.numpy()
234 | # plt.scatter(plot_data[:,0],plot_data[:,1],color='r')
235 | # plt.axis((-2.5,2.5,-2.5,2.5))
236 | # #plt.grid()
237 | #
238 | # scatter_plot = []
239 | #
240 | # scatter_plot = np.array(scatter_plot)
241 | # print (scatter_plot.shape)
242 | #
243 | # test_labels = torch.linspace(0, CHANNEL_SIZE-1, steps=CHANNEL_SIZE).long()
244 | # test_data = torch.sparse.torch.eye(CHANNEL_SIZE).index_select(dim=0, index=test_labels)
245 | # #test_data=torch.cat((test_data, test_data), 1)
246 | # test_data=Variable(test_data)
247 | # x=model.encode_signal2(test_data)
248 | # x = (n_channel**0.5) * (x / x.norm(dim=-1)[:, None])
249 | # plot_data=x.data.numpy()
250 | # plt.scatter(plot_data[:,0],plot_data[:,1])
251 | # plt.axis((-2.5,2.5,-2.5,2.5))
252 | # plt.grid()
253 | # # plt.show()
254 | # scatter_plot = []
255 | ##
256 | ## scatter_plot = np.array(scatter_plot)
257 | # plt.show()
--------------------------------------------------------------------------------
/TwoUserBasicModel.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | created on Fri, 30 Mar
4 | two user with complex number, Functional API
5 | """
6 | import numpy as np
7 | import keras
8 | from keras.layers import Input, LSTM, Dense, GaussianNoise, Lambda, Dropout,embeddings, Flatten, Add
9 | from keras.models import Model
10 | from keras import regularizers
11 | from keras.layers.normalization import BatchNormalization
12 | from keras.optimizers import Adam, SGD
13 | from keras import backend as K
14 | from keras.callbacks import Callback
15 | import pydot
16 | import graphviz
17 | import matplotlib.pyplot as plt
18 | from numpy.random import seed
19 |
20 | #define the dynamic loss weights
21 | class Mycallback(Callback):
22 | def __init__(self,a, b):
23 | self.a = a
24 | self.b = b
25 | self.epoch_num = 0
26 | def on_epoch_end(self, epoch, logs={}):
27 | self.epoch_num = self.epoch_num + 1
28 | loss1 = logs.get('u1_receiver_loss')
29 | loss2 = logs.get('u2_receiver_loss')
30 | print("epoch %d" %self.epoch_num)
31 | print("total_loss%f" %logs.get('loss'))
32 | print("u1_loss %f"%(loss1))
33 | print("u2_loss %f" % (loss2))
34 | u1_ls = loss1 / (loss1 + loss2)
35 | u2_ls = 1 - u1_ls
36 | K.set_value(self.a, u1_ls)
37 | K.set_value(self.b, u2_ls)
38 | #print("alpha %f" %K.get_value(alpha))
39 | #print("beta %f" % K.get_value(beta))
40 | print("selfalpha %f" % K.get_value(self.a))
41 | print("selfbeta %f" % K.get_value(self.b))
42 |
43 | class TwoUserEncoder(object):
44 | """
45 | This is an API
46 | """
47 | def __init__(self, ComplexChannel = True, M=4, n_channel=2,k=2,emb_k=4,u1_EbNodB_train=7,u2_EbNodB_train = 7,train_datasize=10000,alpha=0.5,beta=0.5):
48 | seed(1)
49 | from tensorflow import set_random_seed
50 | set_random_seed(3)
51 |
52 | assert ComplexChannel in (True, False)
53 | assert M > 1
54 | assert n_channel >1
55 | assert emb_k >1
56 | assert k > 1
57 | self.M = M
58 | self.ComplexChannel = ComplexChannel
59 | self.n_channel = n_channel
60 | self.k = k
61 | self.emb_k =emb_k
62 | self.train_datasize = train_datasize
63 | self.u1_EbNodB_train =u1_EbNodB_train
64 | self.u2_EbNodB_train = u2_EbNodB_train
65 | self.u1_EbNo_train = 10 ** (self.u1_EbNodB_train / 10.0)
66 | self.u2_EbNo_train = 10 ** (self.u2_EbNodB_train / 10.0)
67 | self.R = self.k / float(self.n_channel)
68 | if ComplexChannel== True:
69 | self.n_channel_r = self.n_channel * 2
70 | self.n_channel_c = self.n_channel
71 | if ComplexChannel == False:
72 | self.n_channel_r = self.n_channel
73 | self.n_channel_c = self.n_channel
74 | self.u1_noise_std = np.sqrt(1 / (2 * self.R * self.u1_EbNo_train))
75 | self.u2_noise_std = np.sqrt(1 / (2 * self.R * self.u2_EbNo_train))
76 | self.alpha = K.variable(alpha)
77 | self.beta = K.variable(beta)
78 |
79 | # define the function for mixed AWGN channel
80 | def mixed_AWGN(self,x,User='u1'):
81 | assert User in ('u1','u2')
82 | signal = x[0]
83 | interference = x[1]
84 | if User == 'u1':
85 | noise = K.random_normal(K.shape(signal),
86 | mean=0,
87 | stddev=self.u1_noise_std)
88 | if User == 'u2':
89 | noise = K.random_normal(K.shape(signal),
90 | mean=0,
91 | stddev=self.u2_noise_std)
92 | signal = Add()([signal, interference])
93 | signal = Add()([signal, noise])
94 | return signal
95 | def Initialize(self):
96 | """
97 |
98 | :return:
99 | """
100 | # generating train and test data
101 | # user 1
102 | # seed(1)
103 | train_label_s1 = np.random.randint(self.M, size=self.train_datasize)
104 | train_label_out_s1 = train_label_s1.reshape((-1, 1))
105 | # user 2
106 | # seed(2)
107 | train_label_s2 = np.random.randint(self.M, size=self.train_datasize)
108 | train_label_out_s2 = train_label_s2.reshape((-1, 1))
109 |
110 | # Embedding Model for Two User using real signal
111 | # user1's transmitter
112 | u1_input_signal = Input(shape=(1,))
113 | u1_encoded = embeddings.Embedding(input_dim=self.M, output_dim=self.emb_k, input_length=1)(u1_input_signal)
114 | u1_encoded1 = Flatten()(u1_encoded)
115 | u1_encoded2 = Dense(self.M, activation='relu')(u1_encoded1)
116 | u1_encoded3 = Dense(self.n_channel_r, activation='linear')(u1_encoded2)
117 | u1_encoded4 = Lambda(lambda x: np.sqrt(self.n_channel_c) * K.l2_normalize(x, axis=1))(u1_encoded3)
118 | # u1_encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(u1_encoded3)
119 | # user2's transmitter
120 | u2_input_signal = Input(shape=(1,))
121 | u2_encoded = embeddings.Embedding(input_dim=self.M, output_dim=self.emb_k, input_length=1)(u2_input_signal)
122 | u2_encoded1 = Flatten()(u2_encoded)
123 | u2_encoded2 = Dense(self.M, activation='relu')(u2_encoded1)
124 | u2_encoded3 = Dense(self.n_channel_r, activation='linear')(u2_encoded2)
125 | u2_encoded4 = Lambda(lambda x: np.sqrt(self.n_channel_c) * K.l2_normalize(x, axis=1))(u2_encoded3)
126 | # u2_encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(u2_encoded3)
127 |
128 | # mixed AWGN channel
129 | u1_channel_out = Lambda(lambda x: self.mixed_AWGN(x, User='u1'))([u1_encoded4, u2_encoded4])
130 | u2_channel_out = Lambda(lambda x: self.mixed_AWGN(x,User='u2'))([u2_encoded4, u1_encoded4])
131 |
132 | # user1's receiver
133 | u1_decoded = Dense(self.M, activation='relu', name='u1_pre_receiver')(u1_channel_out)
134 | u1_decoded1 = Dense(self.M, activation='softmax', name='u1_receiver')(u1_decoded)
135 |
136 | # user2's receiver
137 | u2_decoded = Dense(self.M, activation='relu', name='u2_pre_receiver')(u2_channel_out)
138 | u2_decoded1 = Dense(self.M, activation='softmax', name='u2_receiver')(u2_decoded)
139 |
140 | self.twouser_autoencoder = Model(inputs=[u1_input_signal, u2_input_signal],
141 | outputs=[u1_decoded1, u2_decoded1])
142 | adam = Adam(lr=0.01)
143 | self.twouser_autoencoder.compile(optimizer=adam,
144 | loss='sparse_categorical_crossentropy',
145 | loss_weights=[self.alpha, self.beta])
146 | print(self.twouser_autoencoder.summary())
147 | self.twouser_autoencoder.fit([train_label_s1, train_label_s2],
148 | [train_label_out_s1, train_label_out_s2],
149 | epochs=45,
150 | batch_size=32,
151 | callbacks=[Mycallback(self.alpha, self.beta)], verbose=0)
152 | # generating the encoder and decoder for user1
153 | self.u1_encoder = Model(u1_input_signal, u1_encoded4)
154 | u1_encoded_input = Input(shape=(self.n_channel_r,))
155 | u1_deco = self.twouser_autoencoder.get_layer("u1_pre_receiver")(u1_encoded_input)
156 | u1_deco = self.twouser_autoencoder.get_layer("u1_receiver")(u1_deco)
157 | u1_decoder = Model(u1_encoded_input, u1_deco)
158 |
159 | # generating the encoder and decoder for user1
160 | u2_encoder = Model(u2_input_signal, u2_encoded4)
161 | u2_encoded_input = Input(shape=(self.n_channel_r,))
162 | u2_deco = self.twouser_autoencoder.get_layer("u2_pre_receiver")(u2_encoded_input)
163 | u2_deco = self.twouser_autoencoder.get_layer("u2_receiver")(u2_deco)
164 | u2_decoder = Model(u2_encoded_input, u2_deco)
165 |
166 | def CalBLER(self, bertest_data_size,EbNodB_low = 0, EbNodB_high = 14, EbNodB_num = 28):
167 | """
168 |
169 | :param ber_test_data_size:
170 | :param EbNodB_low:
171 | :param EbNodB_high:
172 | :param EbNodB_num:
173 | :return:
174 | """
175 | # ccalculating BER for embedding
176 | test_label_s1 = np.random.randint(self.M, size=bertest_data_size)
177 | test_label_out_s1 = test_label_s1.reshape((-1, 1))
178 | test_label_s2 = np.random.randint(self.M, size=bertest_data_size)
179 | test_label_out_s2 = test_label_s2.reshape((-1, 1))
180 |
181 | EbNodB_range = list(np.linspace(EbNodB_low, EbNodB_high, EbNodB_num))
182 | self.ber = [None] * len(EbNodB_range)
183 | self.u1_ber = [None] * len(EbNodB_range)
184 | self.u2_ber = [None] * len(EbNodB_range)
185 | for n in range(0, len(EbNodB_range)):
186 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
187 | noise_std = np.sqrt(1 / (2 * self.R * EbNo))
188 | noise_mean = 0
189 | no_errors = 0
190 | nn = bertest_data_size
191 | noise1 = noise_std * np.random.randn(nn, self.n_channel_r)
192 | noise2 = noise_std * np.random.randn(nn, self.n_channel_r)
193 | u1_encoded_signal = self.u1_encoder.predict(test_label_s1)
194 | u2_encoded_signal = self.u2_encoder.predict(test_label_s2)
195 | u1_final_signal = u1_encoded_signal + u2_encoded_signal + noise1
196 | u2_final_signal = u2_encoded_signal + u1_encoded_signal + noise2
197 | u1_pred_final_signal = self.u1_decoder.predict(u1_final_signal)
198 | u2_pred_final_signal = self.u2_decoder.predict(u2_final_signal)
199 | u1_pred_output = np.argmax(u1_pred_final_signal, axis=1)
200 | u2_pred_output = np.argmax(u2_pred_final_signal, axis=1)
201 | u1_no_errors = (u1_pred_output != test_label_s1)
202 | u1_no_errors = u1_no_errors.astype(int).sum()
203 | u2_no_errors = (u2_pred_output != test_label_s2)
204 | u2_no_errors = u2_no_errors.astype(int).sum()
205 | self.u1_ber[n] = u1_no_errors / nn
206 | self.u2_ber[n] = u2_no_errors / nn
207 | self.ber[n] = (self.u1_ber[n] + self.u2_ber[n]) / 2
208 | print('U1_SNR:', EbNodB_range[n], 'U1_BER:', self.u1_ber[n])
209 | print('U2_SNR:', EbNodB_range[n], 'U1_BER:', self.u2_ber[n])
210 | print('SNR:', EbNodB_range[n], 'BER:', self.ber[n])
211 |
--------------------------------------------------------------------------------
/TwoUserC.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Fri, 23 Mar
4 | two user with complex number
5 | @author Fassy
6 | """
7 | import numpy as np
8 | import keras
9 | from keras.layers import Input, LSTM, Dense, GaussianNoise, Lambda, Dropout,embeddings, Flatten, Add
10 | from keras.models import Model
11 | from keras import regularizers
12 | from keras.layers.normalization import BatchNormalization
13 | from keras.optimizers import Adam, SGD
14 | from keras import backend as K
15 | from keras.callbacks import Callback
16 | import pydot
17 | import graphviz
18 | import matplotlib.pyplot as plt
19 | from numpy.random import seed
20 | seed(1)
21 | from tensorflow import set_random_seed
22 | set_random_seed(3)
23 |
24 | NUM_EPOCHS = 100
25 | BATCH_SIZE = 32
26 | M = 4
27 | k = np.log2(M)
28 | k = int(k)
29 | n_channel = 2
30 | emb_k = 4
31 | R = k / n_channel
32 | train_data_size=10000
33 | bertest_data_size=50000
34 | EbNodB_train = 7
35 | EbNo_train = 10 ** (EbNodB_train / 10.0)
36 | noise_std= np.sqrt( 1/ (2 * R * EbNo_train))
37 | alpha = K.variable(0.5)
38 | beta = K.variable(0.5)
39 |
40 | #define the function for mixed AWGN channel
41 | def mixed_AWGN(x):
42 | signal = x[0]
43 | interference = x[1]
44 | noise = K.random_normal(K.shape(signal),
45 | mean=0,
46 | stddev=noise_std)
47 | signal = Add()([signal, interference])
48 | signal = Add()([signal, noise])
49 | return signal
50 |
51 | #define the dynamic loss weights
52 | class Mycallback(Callback):
53 | def __init__(self,alpha, beta):
54 | self.alpha = alpha
55 | self.beta = beta
56 | self.epoch_num = 0
57 | def on_epoch_end(self, epoch, logs={}):
58 | self.epoch_num = self.epoch_num + 1
59 | loss1 = logs.get('u1_receiver_loss')
60 | loss2 = logs.get('u2_receiver_loss')
61 | print("epoch %d" %self.epoch_num)
62 | print("total_loss%f" %logs.get('loss'))
63 | print("u1_loss %f"%(loss1))
64 | print("u2_loss %f" % (loss2))
65 | a = loss1 / (loss1 + loss2)
66 | b = 1 - a
67 | K.set_value(self.alpha, a)
68 | K.set_value(self.beta, b)
69 | print("alpha %f" %K.get_value(alpha))
70 | print("beta %f" % K.get_value(beta))
71 | print("selfalpha %f" % K.get_value(self.alpha))
72 | print("selfbeta %f" % K.get_value(self.beta))
73 |
74 | #generating train and test data
75 | #user 1
76 | seed(1)
77 | train_label_s1 = np.random.randint(M,size= train_data_size)
78 | train_label_out_s1 = train_label_s1.reshape((-1,1))
79 | test_label_s1 = np.random.randint(M, size= bertest_data_size)
80 | test_label_out_s1 = test_label_s1.reshape((-1,1))
81 | #user 2
82 | seed(2)
83 | train_label_s2 = np.random.randint(M,size= train_data_size)
84 | train_label_out_s2 = train_label_s2.reshape((-1,1))
85 | test_label_s2 = np.random.randint(M, size= bertest_data_size)
86 | test_label_out_s2 = test_label_s2.reshape((-1,1))
87 |
88 | # Embedding Model for Two User using real signal
89 | #user1's transmitter
90 | u1_input_signal = Input(shape=(1,))
91 | u1_encoded = embeddings.Embedding(input_dim=M, output_dim=emb_k, input_length=1)(u1_input_signal)
92 | u1_encoded1 = Flatten()(u1_encoded)
93 | u1_encoded2 = Dense(M, activation= 'relu')(u1_encoded1)
94 | u1_encoded3 = Dense(n_channel, activation= 'linear')(u1_encoded2)
95 | #u1_encoded4 = Lambda(lambda x: np.sqrt(n_channel)*K.l2_normalize(x,axis=1))(u1_encoded3)
96 | u1_encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(u1_encoded3)
97 | #user2's transmitter
98 | u2_input_signal = Input(shape=(1,))
99 | u2_encoded = embeddings.Embedding(input_dim=M, output_dim=emb_k, input_length=1)(u2_input_signal)
100 | u2_encoded1 = Flatten()(u2_encoded)
101 | u2_encoded2 = Dense(M, activation= 'relu')(u2_encoded1)
102 | u2_encoded3 = Dense(n_channel, activation= 'linear')(u2_encoded2)
103 | #u2_encoded4 = Lambda(lambda x: np.sqrt(n_channel)*K.l2_normalize(x,axis=1))(u2_encoded3)
104 | u2_encoded4 = BatchNormalization(momentum=0, center=False, scale=False)(u2_encoded3)
105 |
106 | #mixed AWGN channel
107 | u1_channel_out = Lambda(lambda x: mixed_AWGN(x))([ u1_encoded4, u2_encoded4])
108 | u2_channel_out = Lambda(lambda x: mixed_AWGN(x))([ u2_encoded4, u1_encoded4])
109 |
110 | #user1's receiver
111 | u1_decoded = Dense(M, activation='relu',name= 'u1_pre_receiver')(u1_channel_out)
112 | u1_decoded1 = Dense(M, activation= 'softmax', name= 'u1_receiver')(u1_decoded)
113 |
114 | #user2's receiver
115 | u2_decoded = Dense(M, activation='relu',name='u2_pre_receiver')(u2_channel_out)
116 | u2_decoded1 = Dense(M, activation= 'softmax',name='u2_receiver')(u2_decoded)
117 |
118 | twouser_autoencoder = Model(inputs=[u1_input_signal, u2_input_signal],
119 | outputs=[u1_decoded1, u2_decoded1])
120 | adam =Adam(lr = 0.01)
121 | twouser_autoencoder.compile( optimizer=adam,
122 | loss='sparse_categorical_crossentropy',
123 | loss_weights=[alpha, beta])
124 | print(twouser_autoencoder.summary())
125 | twouser_autoencoder.fit( [train_label_s1,train_label_s2],
126 | [train_label_out_s1, train_label_out_s2],
127 | epochs=45,
128 | batch_size=32,
129 | callbacks= [Mycallback(alpha,beta)])
130 |
131 | #from keras.utils.vis_utils import plot_model
132 | #plot_model(twouser_autoencoder, to_file= 'model.png')
133 |
134 | #generating the encoder and decoder for user1
135 | u1_encoder = Model(u1_input_signal, u1_encoded4)
136 | u1_encoded_input = Input(shape= (n_channel,))
137 | u1_deco = twouser_autoencoder.get_layer("u1_pre_receiver")(u1_encoded_input)
138 | u1_deco = twouser_autoencoder.get_layer("u1_receiver")(u1_deco)
139 | u1_decoder = Model(u1_encoded_input, u1_deco)
140 |
141 | #generating the encoder and decoder for user1
142 | u2_encoder = Model(u2_input_signal, u2_encoded4)
143 | u2_encoded_input = Input(shape= (n_channel,))
144 | u2_deco = twouser_autoencoder.get_layer("u2_pre_receiver")(u2_encoded_input)
145 | u2_deco = twouser_autoencoder.get_layer("u2_receiver")(u2_deco)
146 | u2_decoder = Model(u2_encoded_input, u2_deco)
147 |
148 | #plotting the constellation diagram
149 | #user1
150 | u1_scatter_plot = []
151 | for i in range(M):
152 | u1_scatter_plot.append(u1_encoder.predict(np.expand_dims(i,axis=0)))
153 | u1_scatter_plot = np.array(u1_scatter_plot)
154 | u1_scatter_plot = u1_scatter_plot.reshape(M, 2, 1)
155 | plt.scatter(u1_scatter_plot[:, 0], u1_scatter_plot[:, 1],color='red')
156 | plt.legend(['user1(2,2),emb_k=2'],loc='upper left')
157 |
158 | u2_scatter_plot = []
159 | for i in range(M):
160 | u2_scatter_plot.append(u2_encoder.predict(np.expand_dims(i,axis=0)))
161 | u2_scatter_plot = np.array(u2_scatter_plot)
162 | u2_scatter_plot = u2_scatter_plot.reshape(M, 2, 1)
163 | plt.scatter(u2_scatter_plot[:, 0], u2_scatter_plot[:, 1], color = 'blue')
164 | plt.legend(['user2(2,2),emb_k=2'],loc='upper left')
165 | plt.axis((-2.5, 2.5, -2.5, 2.5))
166 | plt.grid()
167 | fig = plt.gcf()
168 | fig.set_size_inches(16,12)
169 | fig.savefig('graph/TwoUsercons(2,2)0326_1.png',dpi=100)
170 | plt.show()
171 |
172 | #ccalculating BER for embedding
173 | EbNodB_range = list(np.linspace(0, 14 ,28))
174 | ber = [None] * len(EbNodB_range)
175 | u1_ber = [None] * len(EbNodB_range)
176 | u2_ber = [None] * len(EbNodB_range)
177 | for n in range(0, len(EbNodB_range)):
178 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
179 | noise_std = np.sqrt(1 / (2 * R * EbNo))
180 | noise_mean = 0
181 | no_errors = 0
182 | nn = bertest_data_size
183 | noise1 = noise_std * np.random.randn(nn, n_channel)
184 | noise2 = noise_std * np.random.randn(nn, n_channel)
185 | u1_encoded_signal = u1_encoder.predict(test_label_s1)
186 | u2_encoded_signal = u2_encoder.predict(test_label_s2)
187 | u1_final_signal = u1_encoded_signal + u2_encoded_signal + noise1
188 | u2_final_signal = u2_encoded_signal + u1_encoded_signal + noise2
189 | u1_pred_final_signal = u1_decoder.predict(u1_final_signal)
190 | u2_pred_final_signal = u2_decoder.predict(u2_final_signal)
191 | u1_pred_output = np.argmax(u1_pred_final_signal, axis=1)
192 | u2_pred_output = np.argmax(u2_pred_final_signal, axis=1)
193 | u1_no_errors = (u1_pred_output != test_label_s1)
194 | u1_no_errors = u1_no_errors.astype(int).sum()
195 | u2_no_errors = (u2_pred_output != test_label_s2)
196 | u2_no_errors = u2_no_errors.astype(int).sum()
197 | u1_ber[n] = u1_no_errors / nn
198 | u2_ber[n] = u2_no_errors / nn
199 | ber[n] = (u1_ber[n] + u2_ber[n]) / 2
200 | print('U1_SNR:', EbNodB_range[n], 'U1_BER:', u1_ber[n])
201 | print('U2_SNR:', EbNodB_range[n], 'U1_BER:', u2_ber[n])
202 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
203 |
204 | plt.plot(EbNodB_range, u1_ber ,label = 'TwoUserSNR(2,2)U1,emb_k=4,')
205 | plt.plot(EbNodB_range, u2_ber ,label = 'TwoUserSNR(2,2)U2,emb_k=4,')
206 | plt.plot(EbNodB_range, ber ,label = 'TwoUserSNR(2,2),emb_k=4,')
207 |
208 | plt.yscale('log')
209 | plt.xlabel('SNR_RANGE')
210 | plt.ylabel('Block Error Rate')
211 | plt.grid()
212 | plt.legend(loc='upper right',ncol= 1)
213 |
214 | fig = plt.gcf()
215 | fig.set_size_inches(16,12)
216 | fig.savefig('graph/TwoUserSNR(2,2)0326_1.png',dpi=100)
217 | plt.show()
218 |
219 |
220 |
221 |
222 |
--------------------------------------------------------------------------------
/TwoUserSNRtest.py:
--------------------------------------------------------------------------------
1 | from TwoUserBasicModel import TwoUserEncoder
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 |
5 | M= 4
6 | n_channel = 2
7 | k=2
8 | emb_k=4
9 | u1_EbNodB_train=7
10 | u2_EbNodB_train=7
11 | train_datasize=10000
12 | alpha=0.5
13 | beta=0.5
14 | bertest_data_size=50000
15 | EbNodB_low=0
16 | EbNodB_high=14
17 | EbNodB_num=28
18 | EbNodB_range = list(np.linspace(EbNodB_low, EbNodB_high, EbNodB_num))
19 | testmodel = TwoUserEncoder(ComplexChannel=True,M=M,n_channel=n_channel,
20 | k=k,emb_k=emb_k,
21 | u1_EbNodB_train=u1_EbNodB_train,
22 | u2_EbNodB_train=u2_EbNodB_train,
23 | train_datasize=train_datasize,
24 | alpha=alpha,beta=beta)
25 | testmodel.Initialize()
26 | testmodel.CalBLER(bertest_data_size=bertest_data_size,
27 | EbNodB_low=EbNodB_low,
28 | EbNodB_high=EbNodB_high,
29 | EbNodB_num=EbNodB_num)
30 | plt.plot(EbNodB_range, testmodel.ber ,label = 'TwoUserSNR(2,2),emb_k=4,')
31 |
--------------------------------------------------------------------------------
/_windows/git.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/_windows/laf.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/codestyles/Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/colors.scheme.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/databaseDrivers.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
--------------------------------------------------------------------------------
/databaseSettings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/debugger.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/editor.codeinsight.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/editor.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/filetypes.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/github.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/ide.general.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/project.default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ApexVCS
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/rayleigh.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 |
6 | def Jakes_Flat(fd, Ts, Ns, t0=0, E0=1, phi_N=0):
7 | '''
8 | Inputs:
9 | fd : Doppler frequency
10 | Ts : sampling period
11 | Ns : number of samples
12 | t0 : initial time
13 | E0 : channel power
14 | phi_N : inital phase of the maximum doppler frequency sinusoid
15 | Outputs:
16 | h : complex fading vector
17 | t_state : current time
18 | '''
19 | N0 = 8
20 | N = 4 * N0 + 2
21 | wd = 2 * np.pi * fd
22 | t = t0 + np.asarray([i for i in range(0, Ns)]) * Ts
23 | # tf = t0 + Ns * Ts
24 | coff = E0 / np.sqrt(2 * N0 + 1)
25 |
26 | phi_n = np.asarray([np.pi * i / (N0 + 1) for i in range(1, N0 + 1)])
27 | phi_N = 0
28 | w_n = np.asarray([wd * np.cos(2 * np.pi * i / N) for i in range(1, N0 + 1)])
29 |
30 | h_i = np.ones((N0 + 1, Ns))
31 | for i in range(N0):
32 | h_i[i, :] = 2 * np.cos(phi_n[i]) * np.cos(w_n[i] * t)
33 | h_i[N0, :] = np.sqrt(2) * np.cos(phi_N) * np.cos(wd * t)
34 |
35 | h_q = np.ones((N0 + 1, Ns))
36 | for i in range(N0):
37 | h_q[i, :] = 2 * np.sin(phi_n[i]) * np.cos(w_n[i] * t)
38 | h_q[N0, :] = np.sqrt(2) * np.sin(phi_N) * np.cos(wd * t)
39 |
40 | h_I = coff * np.sum(h_i, 0)
41 | h_Q = coff * np.sum(h_q, 0)
42 |
43 | return h_I, h_Q
44 |
45 |
46 | # test
47 | fd = 926
48 | Ts = 1e-6
49 | Ns = 50000
50 | t0 = 0
51 | E0 = 1
52 | phi_N = 0
53 |
54 | h_I, h_Q = Jakes_Flat(fd, Ts, Ns)
55 |
56 | plt.figure("amplitude")
57 | plt.title("amplitude")
58 | plt.plot(np.sqrt(h_Q * h_Q + h_I * h_I))
59 | plt.yscale('log')
60 | plt.show()
61 | plt.figure("hist")
62 | plt.title("hist")
63 | n, bins, patches = plt.hist(np.sqrt(h_Q * h_Q + h_I * h_I), bins=50, normed=0)
64 | plt.show()
65 |
66 |
67 |
--------------------------------------------------------------------------------
/rayleigh_siso.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import torch
4 | from torch import nn
5 | from torch.autograd import Variable
6 | from torch.optim import Adam
7 | import torch.utils.data as Data
8 | import torch.nn.functional as F
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 | import copy
12 |
13 | NUM_EPOCHS = 45
14 | BATCH_SIZE = 128
15 | n = 2
16 | k = 4
17 | M = 2 ** k
18 | R = k / n
19 | train_num = 5120
20 | test_num = 10000
21 |
22 | fd = 926
23 | Ts = 1e-6
24 | Ns = 50000
25 |
26 |
27 | def Jakes_Flat(fd, Ts, Ns, t0=0, E0=1, phi_N=0):
28 | '''
29 | Inputs:
30 | fd : Doppler frequency
31 | Ts : sampling period
32 | Ns : number of samples
33 | t0 : initial time
34 | E0 : channel power
35 | phi_N : inital phase of the maximum doppler frequency sinusoid
36 | Outputs:
37 | h : complex fading vector
38 | t_state : current time
39 | '''
40 | N0 = 8
41 | N = 4 * N0 + 2
42 | wd = 2 * np.pi * fd
43 | t = t0 + np.asarray([i for i in range(0, Ns)]) * Ts
44 | H = np.ones((2, Ns))
45 | coff = E0 / np.sqrt(2 * N0 + 1)
46 | phi_n = np.asarray([np.pi * i / (N0 + 1) for i in range(1, N0 + 1)])
47 | phi_N = 0
48 | w_n = np.asarray([wd * np.cos(2 * np.pi * i / N) for i in range(1, N0 + 1)])
49 | h_i = np.ones((N0 + 1, Ns))
50 | for i in range(N0):
51 | h_i[i, :] = 2 * np.cos(phi_n[i]) * np.cos(w_n[i] * t)
52 | h_i[N0, :] = np.sqrt(2) * np.cos(phi_N) * np.cos(wd * t)
53 | h_q = np.ones((N0 + 1, Ns))
54 | for i in range(N0):
55 | h_q[i, :] = 2 * np.sin(phi_n[i]) * np.cos(w_n[i] * t)
56 | h_q[N0, :] = np.sqrt(2) * np.sin(phi_N) * np.cos(wd * t)
57 | h_I = coff * np.sum(h_i, 0)
58 | h_Q = coff * np.sum(h_q, 0)
59 | H[0, :] = h_I
60 | H[1, :] = h_Q
61 | return H
62 |
63 |
64 | def frange(x, y, jump):
65 | while x < y:
66 | yield x
67 | x += jump
68 |
69 |
70 | h = Jakes_Flat(fd, Ts, Ns)
71 |
72 |
73 | class RTN(nn.Module):
74 | def __init__(self, in_channels, compressed_dim):
75 | super(RTN, self).__init__()
76 | self.in_channels = in_channels
77 | self.compressed_dim = compressed_dim
78 | self.encoder = nn.Sequential(
79 | nn.Linear(in_channels, in_channels),
80 | nn.ReLU(),
81 | nn.Linear(in_channels, compressed_dim)
82 | )
83 | self.decoder = nn.Sequential(
84 | nn.Linear(compressed_dim, in_channels),
85 | nn.ReLU(),
86 | nn.Linear(in_channels, in_channels)
87 | )
88 |
89 | def R2C(self, x):
90 | return x.view(x.size()[0], -1, 2)
91 |
92 | def C2R(self, x):
93 | return x.view(x.size()[0], -1)
94 |
95 | def channel(self, x, H):
96 | H = Variable(torch.from_numpy(H).float())
97 | real = H[0] * x[:, :, 0] - H[1] * x[:, :, 1]
98 | real = torch.unsqueeze(real, 2)
99 | imag = H[0] * x[:, :, 1] + H[1] * x[:, :, 0]
100 | imag = torch.unsqueeze(imag, 2)
101 | return torch.cat([real, imag], 2)
102 |
103 | def encode_signal(self, x):
104 | return self.encoder(x)
105 |
106 | def decode_signal(self, x):
107 | return self.decoder(x)
108 |
109 | def normalization(self, x):
110 | x = (self.compressed_dim ** 0.5) * (x / x.norm(dim=-1)[:, None])
111 | return x
112 |
113 | def AWGN(self, x, ebno):
114 | communication_rate = R
115 | noise = Variable(torch.randn(*x.size()) / ((2 * communication_rate * ebno) ** 0.5))
116 | x += noise
117 | return x
118 |
119 | def forward(self, x):
120 | x = self.encoder(x)
121 | x = self.normalization(x)
122 | x = self.R2C(x)
123 | H = h[:, np.random.randint(0, Ns)]
124 | x = self.channel(x, H)
125 | training_signal_noise_ratio = 7 # dB
126 | training_signal_noise_ratio = 10.0 ** (training_signal_noise_ratio / 10.0)
127 | communication_rate = R
128 | noise = Variable(torch.randn(*x.size()) / ((2 * communication_rate * training_signal_noise_ratio) ** 0.5))
129 | x += noise
130 | x = self.C2R(x)
131 | x = self.decoder(x)
132 | return x
133 |
134 |
135 | if __name__ == "__main__":
136 | model = RTN(M, n)
137 | train_labels = (torch.rand(train_num) * M).long()
138 | train_data = torch.sparse.torch.eye(M).index_select(dim=0,
139 | index=train_labels)
140 | test_labels = (torch.rand(test_num) * M).long()
141 | test_data = torch.sparse.torch.eye(M).index_select(dim=0,
142 | index=test_labels)
143 | dataset = Data.TensorDataset(data_tensor=train_data,
144 | target_tensor=train_labels)
145 | datasettest = Data.TensorDataset(data_tensor=test_data,
146 | target_tensor=test_labels)
147 | train_loader = Data.DataLoader(dataset=dataset,
148 | batch_size=BATCH_SIZE,
149 | shuffle=True,
150 | num_workers=2)
151 | test_loader = Data.DataLoader(dataset=datasettest,
152 | batch_size=test_num,
153 | shuffle=True,
154 | num_workers=2)
155 | optimizer = Adam(model.parameters(), lr=0.001)
156 | loss_fn = nn.CrossEntropyLoss()
157 | for epoch in range(NUM_EPOCHS):
158 | for step, (x, y) in enumerate(train_loader):
159 | b_x = Variable(x)
160 | b_y = Variable(x)
161 | b_label = Variable(y)
162 | decoded = model(b_x)
163 | loss = loss_fn(decoded, b_label)
164 | optimizer.zero_grad()
165 | loss.backward()
166 | optimizer.step()
167 | if step % 100 == 0:
168 | print('Epoch: ', epoch, '| train loss: %.4f' % loss.data[0])
169 |
170 | if 1:
171 | EbNodB_range = list(frange(-4, 8.5, 0.5))
172 | ber = [None] * len(EbNodB_range)
173 | for i in range(0, len(EbNodB_range)):
174 | EbNo = 10.0 ** (EbNodB_range[i] / 10.0)
175 | for step, (x, y) in enumerate(test_loader):
176 | b_x = Variable(x)
177 | b_y = Variable(x)
178 | b_label = Variable(y)
179 | encoder = model.encode_signal(b_x)
180 | encoder = model.normalization(encoder)
181 | encoder = model.R2C(encoder)
182 | H = h[:, np.random.randint(0, Ns)]
183 | encoder = model.channel(encoder, H)
184 | encoder = model.AWGN(encoder, EbNo)
185 | encoder = model.C2R(encoder)
186 | decoder = model.decode_signal(encoder)
187 | pred = decoder.data.numpy()
188 | label = b_label.data.numpy()
189 | pred_output = np.argmax(pred, axis=1)
190 | no_errors = (pred_output != label)
191 | no_errors = no_errors.astype(int).sum()
192 | ber[i] = no_errors / test_num
193 | print('SNR:', EbNodB_range[i], 'BLER:', ber[i])
194 | plt.plot(EbNodB_range, ber, 'bo', label='Autoencoder(%d,%d)' % (n, k))
195 | plt.yscale('log')
196 | plt.xlabel('SNR Range')
197 | plt.ylabel('Block Error Rate')
198 | plt.grid()
199 | plt.legend(loc='upper right', ncol=1)
200 | plt.show()
201 |
202 | # test_labels = torch.linspace(0, M-1, steps=M).long()
203 | # test_data = torch.sparse.torch.eye(M).index_select(dim=0, index=test_labels)
204 | # test_data = Variable(test_data)
205 | # x = model.encode_signal(test_data)
206 | # x = model.normalization(x)
207 | # plot_data = x.data.numpy()
208 |
--------------------------------------------------------------------------------
/readme.txt:
--------------------------------------------------------------------------------
1 | 利用Keras对所提供的数据进行分类任务
2 | 题目介绍见:
3 | https://challenger.ai/competition/trendsense
4 | 要求1:
5 | 利用keras进行分类,尽可能提高分类正确率
6 | 注意数据集划分(train-val-test),可以设计比较简单的网络模型来观察过拟合欠拟合现象,模型选择
7 | 注意数据预处理,网络模型设计,激活函数,损失函数
8 |
9 | 要求2:
10 | 对于你最后的模型,生层测试数据的提交文件(result_lvxiaoxin.csv)最后可以测评一次得到分数
11 |
12 | 建议:
13 | 学习pandas,sklearn,可以把不同模型的结果进行简单的比较(XGBoost,GBDT,MLP等)
14 | 相关书籍:统计学习方法,prml,deep learning
15 | 尽可能不用matlab!!!
16 |
17 | 文件:
18 | https://pan.baidu.com/s/1pKFErrl
19 |
20 | keras-cn:
21 | https://keras-cn.readthedocs.io/en/latest/
22 |
23 |
--------------------------------------------------------------------------------
/reportGBDT.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from sklearn.ensemble import GradientBoostingClassifier
3 | from sklearn.ensemble import BaggingClassifier
4 | traindata = pd.read_csv('stock_train_data_20171111.csv')
5 | testdata = pd.read_csv('stock_test_data_20171111.csv')
6 | X_train=traindata[:]
7 | del X_train['label']
8 | del X_train['id']
9 | del X_train['era']
10 | del X_train['weight']
11 | y_train=traindata['label']
12 | X_test=testdata[:]
13 | del X_test['id']
14 | weight=traindata['weight'].values
15 | logistic_model = BaggingClassifier(GradientBoostingClassifier(random_state=10),
16 | max_samples=0.5,max_features=0.5)
17 | logistic_model.fit(X_train, y_train,sample_weight=weight)
18 | fitted_test= logistic_model.predict_proba(X_test)[:, 1]
19 | save = pd.DataFrame({'id':testdata['id'],'proba':fitted_test})
20 | save.to_csv('resultGBDT.csv',index=False,sep=',')
--------------------------------------------------------------------------------
/reportmlp.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import pandas as pd
3 | import numpy as np
4 | from keras.models import Sequential
5 | from keras.layers import Dense, Dropout, Activation
6 | from keras.utils import np_utils
7 | from keras.layers.normalization import BatchNormalization
8 | from sklearn import preprocessing
9 | traindata = pd.read_csv('stock_train_data_20171111.csv')
10 | testdata = pd.read_csv('stock_test_data_20171111.csv')
11 | X_train=traindata[:]
12 |
13 | del X_train['label']
14 | del X_train['id']
15 | del X_train['era']
16 | del X_train['weight']
17 | del X_train['group1']
18 | del X_train['group2']
19 | del X_train['code_id']
20 | weight=traindata['weight'].values
21 | y_train=traindata['label']
22 | X_test=testdata[:]
23 | del X_test['id']
24 | del X_test['group1']
25 | del X_test['group2']
26 | del X_test['code_id']
27 | X_train=X_train.values
28 | X_test=X_test.values
29 | Y_train=y_train.values
30 | scaler = preprocessing.StandardScaler().fit(X_train)
31 | X_train=scaler.transform(X_train)
32 | X_test=scaler.transform(X_test)
33 | print (X_train.shape)
34 | print (y_train.shape)
35 | print (X_test.shape)
36 | print('preproc.....')
37 | weight=weight/np.mean(weight)
38 | batch_size = 64 #mini_batch_size
39 | nb_epoch = 10 #大循环次数
40 | nb_classes=2
41 | Y_train = np_utils.to_categorical(y_train, nb_classes)
42 | print('Y_train shape:', Y_train.shape)
43 | print('build model')
44 | model = Sequential()
45 | model.add(Dense(256, input_shape=(98,))) #输入维度, 101==输出维度
46 | model.add(BatchNormalization())
47 | model.add(Activation('relu')) #激活函数
48 | model.add(Dropout(0.5))
49 | model.add(Dense(512))
50 | model.add(Activation('relu')) #激活函数
51 | model.add(Dropout(0.5))
52 | model.add(Dense(nb_classes))
53 | model.add(Activation('softmax'))
54 | model.compile(loss='binary_crossentropy',
55 | optimizer='adam',
56 | metrics=['accuracy'],
57 | )
58 | print('train')
59 | history = model.fit(X_train, Y_train,
60 | nb_epoch=nb_epoch, batch_size=batch_size,shuffle=True,
61 | verbose=1, validation_split=0.2,sample_weight=weight)
62 | print('predited')
63 | fitted_test = model.predict_proba(X_test)[:, 1]
64 | save = pd.DataFrame({'id':testdata['id'],'proba':fitted_test})
65 | save.to_csv('result.csv',index=False,sep=',')
66 |
--------------------------------------------------------------------------------
/strange.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import keras
3 | from keras.layers import Input, LSTM,Dense,GaussianNoise, Lambda, Dropout, embeddings,Flatten
4 | from keras.models import Model
5 | from keras import regularizers
6 | from keras.layers.normalization import BatchNormalization
7 | from keras.optimizers import Adam, SGD
8 | from keras import backend as K
9 | from keras.utils.np_utils import to_categorical
10 |
11 | #set the random state to generate the same/different train data
12 | from numpy.random import seed
13 | seed(1)
14 | from tensorflow import set_random_seed
15 | set_random_seed(3)
16 |
17 | M = 4
18 | # k = np.log2(M)
19 | # k = int(k)
20 | k = 4
21 | n_channel = 2
22 | R = k / n_channel
23 | print('M:', M, 'k:', k, 'n:', n_channel)
24 |
25 | #generating train data
26 | N = 10000
27 | label = np.random.randint(M, size = N)
28 | label_out = label.reshape((-1,1))
29 | #defining an auto encoder
30 |
31 | input_signal = Input( shape = (1, ) )
32 | encoded = embeddings.Embedding(input_dim=M, output_dim = k,input_length= 1 )(input_signal)
33 | encoded1 = Flatten()(encoded)
34 | encoded2 = Dense(M,activation= 'relu')(encoded1)
35 | #encoded2 = LSTM(n_channel, dropout=0.2, recurrent_dropout=0.2)(encoded)
36 | encoded3 = Dense(n_channel, activation= 'linear')(encoded2)
37 | #encoded4 = Lambda(lambda x: np.sqrt(n_channel)* K.l2_normalize(x, axis=1))(encoded3)
38 | encoded4 = Lambda(lambda x: np.sqrt(n_channel) * K.l2_normalize(x, axis=1))(encoded3)
39 |
40 | EbNodB_train = 7
41 | EbNo_train = 10 ** (EbNodB_train / 10.0)
42 | # EbNo_train = 5.01187
43 | channel_out = GaussianNoise(np.sqrt(1 / (2 * R * EbNo_train)))(encoded4)
44 |
45 | decoded = Dense(M, activation='relu')(channel_out)
46 | decoded1 = Dense(M, activation='softmax')(decoded)
47 | #decoded1 = Dense(M, activation= 'sigmoid')(decoded)
48 | #?? why softmax?
49 |
50 | auto_encoder_embedding = Model(input_signal, decoded1)
51 | adam= Adam(lr= 0.01)
52 | auto_encoder_embedding.compile(optimizer= adam,
53 | loss= 'sparse_categorical_crossentropy',
54 | )
55 | print(auto_encoder_embedding.summary())
56 | auto_encoder_embedding.fit(label, label_out,
57 | epochs=45,
58 | batch_size=32)
59 | encoder = Model(input_signal, encoded4)
60 | encoded_input = Input(shape=(n_channel,))
61 |
62 | deco = auto_encoder_embedding.layers[-2](encoded_input)
63 | deco = auto_encoder_embedding.layers[-1](deco)
64 | decoder = Model(encoded_input, deco)
65 |
66 |
67 | #generating test data
68 |
69 | N = 50000
70 | test_label = np.random.randint(M, size=N)
71 | test_label_out = test_label.reshape((-1,1))
72 | #plotting constellation diagram
73 | scatter_plot = []
74 | for i in range (0,M):
75 | scatter_plot.append(encoder.predict(np.expand_dims(i, axis=0)))
76 | scatter_plot = np.array(scatter_plot)
77 | print(scatter_plot.shape)
78 |
79 | import matplotlib.pyplot as plt
80 | scatter_plot = scatter_plot.reshape(M, 2, 1)
81 | plt.scatter(scatter_plot[:, 0], scatter_plot[:, 1])
82 | #plt.axis((-2.5, 2.5, -2.5, 2.5))
83 | plt.grid()
84 | plt.show()
85 |
86 | # use this function for ploting constellation for higher dimenson like 7-D for (7,4) autoencoder
87 | '''
88 | x_emb = encoder.predict(test_data)
89 | noise_std = np.sqrt(1/(2*R*EbNo_train))
90 | noise = noise_std * np.random.randn(N,n_channel)
91 | x_emb = x_emb + noise
92 | from sklearn.manifold import TSNE
93 | X_embedded = TSNE(learning_rate=700, n_components=2,n_iter=35000, random_state=0, perplexity=60).fit_transform(x_emb)
94 | print (X_embedded.shape)
95 | X_embedded = X_embedded / 7
96 | import matplotlib.pyplot as plt
97 | plt.scatter(X_embedded[:,0],X_embedded[:,1])
98 | #plt.axis((-2.5,2.5,-2.5,2.5))
99 | plt.grid()
100 | plt.show()
101 | '''
102 |
103 | #ccalculating BER
104 | EbNodB_range = list(np.linspace(-4, 8.5 ,26))
105 | ber = [None] * len(EbNodB_range)
106 | for n in range(0, len(EbNodB_range)):
107 | EbNo = 10 ** (EbNodB_range[n] / 10.0)
108 | noise_std = np.sqrt(1 / (2 * R * EbNo))
109 | noise_mean = 0
110 | no_errors = 0
111 | nn = N
112 | noise = noise_std * np.random.randn(nn, n_channel)
113 | encoded_signal = encoder.predict(test_label)
114 | final_signal = encoded_signal + noise
115 | pred_final_signal = decoder.predict(final_signal)
116 | pred_output = np.argmax(pred_final_signal, axis=1)
117 | no_errors = (pred_output != test_label)
118 | no_errors = no_errors.astype(int).sum()
119 | ber[n] = no_errors/nn
120 | print('SNR:', EbNodB_range[n], 'BER:', ber[n])
121 |
122 | plt.plot(EbNodB_range, ber,'bo', label='Autoencoeder_embedding(K_4)')
123 | plt.yscale('log')
124 | plt.xlabel('SNR_RANGE')
125 | plt.ylabel('Block Error Rate')
126 | plt.grid()
127 | plt.legend(loc='upper right',ncol= 1)
128 |
129 | plt.show()
130 |
--------------------------------------------------------------------------------
/ui.lnf.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/undergradthesis - 副本 (2).docx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FassyGit/ML-in-physical-layer/88aafa520f35ced930fb76ee05853c3090d8edea/undergradthesis - 副本 (2).docx
--------------------------------------------------------------------------------
/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
99 |
100 |
101 |
--------------------------------------------------------------------------------