├── LSTM.py ├── README.md ├── main.py └── mltools.py /LSTM.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | import math 4 | from keras.models import Model 5 | from keras.layers import Input, Dense, Conv1D, MaxPool1D, ReLU, Dropout, Softmax, concatenate, Flatten, Reshape, \ 6 | GaussianNoise,Activation 7 | from keras.layers.convolutional import Conv2D 8 | from keras.layers import CuDNNLSTM,Lambda,Multiply,Add,Subtract,MaxPool2D,CuDNNGRU,LeakyReLU,BatchNormalization 9 | import tensorflow as tf 10 | 11 | def LSTM(weights=None, 12 | input_shape=[128,2], 13 | classes=6, 14 | **kwargs): 15 | if weights is not None and not (os.path.exists(weights)): 16 | raise ValueError('The `weights` argument should be either ' 17 | '`None` (random initialization), ' 18 | 'or the path to the weights file to be loaded.') 19 | 20 | dr = 0.5 # dropout rate (%) 21 | input = Input(input_shape, name='input1') 22 | x4= CuDNNLSTM(units=128, return_sequences=True)(input) 23 | x4 = CuDNNLSTM(units=128)(x4) 24 | x4 = Dropout(dr)(x4) 25 | x = Dense(classes, activation='softmax', name='softmax')(x4) 26 | 27 | model = Model(inputs = input, outputs=x) 28 | 29 | # Load weights. 30 | if weights is not None: 31 | model.load_weights(weights) 32 | 33 | return model -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AMR-Dataset-for-MIMO-system-with-precoding 2 | 3 | Related dataset for the paper "Deep Learning Based Automatic Modulation Recognition: Models, Datasets, and Challenges", which is published in Digital Signal Processing. 4 | 5 | The article is available here:[Deep Learning Based Automatic Modulation Recognition: Models, Datasets, and Challenges](https://www.sciencedirect.com/science/article/pii/S1051200422002676?via%3Dihub) 6 | 7 | If you have any question, please contact e-mail: zhangxx8023@gmail.com 8 | 9 | # Content 10 | Precoding for MIMO systems has gradually become a research hot spot in recent years with the development of millimeter wave communications technology, and research on the MIMO system incorporating precoding is gaining increased attention. In this paper, we test DL-AMR for the MIMO system containing precoding, and the detailed 11 | MIMO system can be find in our paper. 12 | 13 | ## Training data generation 14 | 15 | **Fig.1** Training data generation for AMR in MIMO system with precoding. 16 | ![MIMO AMR framework](https://user-images.githubusercontent.com/56213845/180169488-a82d0606-bc50-4a1d-a48c-7564c1e2d37a.png) 17 | 18 | ## Accuracy 19 | **Fig.2** Recognition accuracy comparison of four DL models on the MIMO system containing precoding. (a) 𝑁𝑡 = 4, 𝑁𝑟 = 2, (b) 𝑁𝑡 = 16, 𝑁𝑟 = 4, (c) 𝑁𝑡 = 64, 𝑁𝑟 = 16. 20 | ![MIMOcombine](https://user-images.githubusercontent.com/56213845/180170315-eefb26a9-bf1b-4b52-a692-f1c66bc9216c.png) 21 | 22 | # Dataset Description 23 | The dataset is generated by MATLAB and then Keras with Tensorflow as the backend is used to train the modulation recognition model. The data are modulated using different modulation methods, including ‘2PSK‘, ‘QPSK‘, ‘8PSK‘, 24 | ‘16QAM‘, ‘64QAM‘, and ‘128QAM‘. The number of transmitted symbols per sample is 128, and we prepare 500 samples per SNR per scheme, which are divided into three parts for training, validation, and testing by the ratio of 6:2:2. 25 | 26 | The dataset is available here: [MIMO dataset](https://pan.baidu.com/s/1YvOmfXL6RXR76bx9fwVMhw?pwd=5cx4) 27 | 28 | You can refer to the code we have released to process the dataset, or you can follow the data generation framework to generate your own dataset. 29 | 30 | # Citation 31 | If our work is helpful to your research, please cite: 32 | 33 | @article{ZHANG2022103650, 34 | title={Deep Learning Based Automatic Modulation Recognition: Models, Datasets, and Challenges}, 35 | author={Fuxin Zhang and Chunbo Luo and Jialang Xu and Yang Luo and FuChun Zheng}, 36 | journal={Digital Signal Processing}, 37 | year={2022}, 38 | doi = {https://doi.org/10.1016/j.dsp.2022.103650} 39 | } 40 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os,random 2 | os.environ["KERAS_BACKEND"] = "tensorflow" 3 | # os.environ["THEANO_FLAGS"] = "device=gpu%d"%(0)# 4 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 5 | import scipy.io as scio 6 | import csv 7 | import keras 8 | import numpy as np 9 | import pandas as pd 10 | import LSTM as mcl 11 | import mltools 12 | import matplotlib.pyplot as plt 13 | import pickle 14 | # data concatenate 15 | filename=r'./Nt4Nr2' 16 | dataFile1 = filename+r'/data2psk.mat' 17 | data1 = scio.loadmat(dataFile1) 18 | 19 | dataFile2 = filename+r'/dataqpsk.mat' 20 | data2 = scio.loadmat(dataFile2) 21 | 22 | dataFile3 = filename+r'/data8psk.mat' 23 | data3 = scio.loadmat(dataFile3) 24 | 25 | dataFile4 = filename+r'/data16qam.mat' 26 | data4 = scio.loadmat(dataFile4) 27 | 28 | dataFile5 = filename+r'/data64qam.mat' 29 | data5 = scio.loadmat(dataFile5) 30 | 31 | dataFile6 = filename+r'/data128qam.mat' 32 | data6 = scio.loadmat(dataFile6) 33 | 34 | # label concatenate 35 | dataFile11 = filename+r'/label2psk.mat' 36 | label1 = scio.loadmat(dataFile11) 37 | 38 | dataFile22 = filename+r'/labelqpsk.mat' 39 | label2 = scio.loadmat(dataFile22) 40 | 41 | dataFile33 = filename+r'/label8psk.mat' 42 | label3 = scio.loadmat(dataFile33) 43 | 44 | dataFile44 = filename+r'/label16qam.mat' 45 | label4 = scio.loadmat(dataFile44) 46 | 47 | dataFile55 = filename+r'/label64qam.mat' 48 | label5 = scio.loadmat(dataFile55) 49 | 50 | dataFile66 = filename+r'/label128qam.mat' 51 | label6 = scio.loadmat(dataFile66) 52 | # snr concatenate 53 | dataFile111 = filename+r'/snr2psk.mat' 54 | snr1 = scio.loadmat(dataFile111) 55 | 56 | dataFile222 = filename+r'/snrqpsk.mat' 57 | snr2 = scio.loadmat(dataFile222) 58 | 59 | dataFile333 = filename+r'/snr8psk.mat' 60 | snr3 = scio.loadmat(dataFile333) 61 | 62 | dataFile444 = filename+r'/snr16qam.mat' 63 | snr4 = scio.loadmat(dataFile444) 64 | 65 | dataFile555 =filename+r'/snr64qam.mat' 66 | snr5 = scio.loadmat(dataFile555) 67 | 68 | dataFile666 = filename+r'/snr128qam.mat' 69 | snr6 = scio.loadmat(dataFile666) 70 | 71 | dataset=np.concatenate([data1['data_save'],data2['data_save'],data3['data_save'],data4['data_save'],data5['data_save'],data6['data_save']]) 72 | label=np.concatenate([label1['label_save'],label2['label_save'],label3['label_save'],label4['label_save'],label5['label_save'],label6['label_save']]) 73 | snrs=np.concatenate([snr1['snr_save'],snr2['snr_save'],snr3['snr_save'],snr4['snr_save'],snr5['snr_save'],snr6['snr_save']]) 74 | 75 | L=500 76 | snr_num=31 77 | a=0 78 | train_idx=[] 79 | val_idx=[] 80 | n_examples=dataset.shape[0] 81 | num_mod=6 82 | for j in range(num_mod): 83 | for i in range(snr_num): 84 | train_idx+=list(np.random.choice(range(a*L,(a+1)*L),size=300,replace=False)) 85 | val_idx+=list(np.random.choice(list(set(range(a*L,(a+1)*L))-set(train_idx)), size=100, replace=False)) 86 | a+=1 87 | test_idx = list(set(range(0,n_examples))-set(train_idx)-set(val_idx)) 88 | 89 | np.random.shuffle(train_idx) 90 | np.random.shuffle(val_idx) 91 | np.random.shuffle(test_idx) 92 | 93 | X_train = dataset[train_idx] 94 | X_val=dataset[val_idx] 95 | X_test = dataset[test_idx] 96 | X_train=np.expand_dims(X_train,axis=3) 97 | X_test=np.expand_dims(X_test,axis=3) 98 | X_val=np.expand_dims(X_val,axis=3) 99 | Y_train = label[train_idx] 100 | Y_val=label[val_idx] 101 | Y_test = label[test_idx] 102 | batch_size= 128 103 | nb_epoch = 1000 104 | model=mcl.LSTM() 105 | model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') 106 | model.summary() 107 | filepath = 'weights.h5' 108 | history = model.fit(X_train, 109 | Y_train, 110 | batch_size=batch_size, 111 | epochs=nb_epoch, 112 | verbose=2, 113 | validation_data=(X_val,Y_val), 114 | callbacks = [ 115 | keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto'), 116 | keras.callbacks.ReduceLROnPlateau(monitor='val_loss',factor=0.5,verbose=1,patince=5,min_lr=0.0000001), 117 | keras.callbacks.EarlyStopping(monitor='val_loss', patience=50, verbose=1, mode='auto') 118 | ]) 119 | mltools.show_history(history) 120 | score = model.evaluate(X_test, Y_test, verbose=1, batch_size=batch_size) 121 | print(score) 122 | classes=['2PSK', 'QPSK', '8PSK', '16QAM', '64QAM', '128QAM'] 123 | def predict(model): 124 | model.load_weights(filepath) 125 | test_Y_hat = model.predict(X_test, batch_size=batch_size) 126 | confnorm, _, _ = mltools.calculate_confusion_matrix(Y_test, test_Y_hat, 6) 127 | mltools.plot_confusion_matrix(confnorm, 128 | labels=['2PSK', 'QPSK', '8PSK', '16QAM', '64QAM', '128QAM'], save_filename='figure/mclstm_total_confusion.png') 129 | acc = {} 130 | acc_mod_snr = np.zeros((6,31)) 131 | i = 0 132 | for snr in range(-10,21): 133 | # Extract classes @ SNR 134 | test_SNRs = snrs[test_idx].reshape(len(test_idx)) 135 | test_X_i = X_test[np.where(test_SNRs == snr)] 136 | test_Y_i = Y_test[np.where(test_SNRs == snr)] 137 | test_Y_i_hat = model.predict(test_X_i) 138 | confnorm_i, cor, ncor = mltools.calculate_confusion_matrix(test_Y_i, test_Y_i_hat, 6) 139 | acc[snr] = 1.0 * cor / (cor + ncor) 140 | result = cor / (cor + ncor) 141 | with open('acc111.csv', 'a', newline='') as f0: 142 | write0 = csv.writer(f0) 143 | write0.writerow([result]) 144 | mltools.plot_confusion_matrix(confnorm_i, 145 | labels=['2PSK', 'QPSK', '8PSK', '16QAM', '64QAM', '128QAM'], title="Confusion Matrix", 146 | save_filename="figure/Confusion(SNR=%d)(ACC=%2f).png" % (snr, 100.0 * acc[snr])) 147 | acc_mod_snr[:, i] = np.round(np.diag(confnorm_i) / np.sum(confnorm_i, axis=1), 3) 148 | i = i + 1 149 | predict(model) 150 | -------------------------------------------------------------------------------- /mltools.py: -------------------------------------------------------------------------------- 1 | "Adapted from the code (https://github.com/leena201818/radiom) contributed by leena201818" 2 | import matplotlib 3 | #matplotlib.use('Tkagg') 4 | import matplotlib.pyplot as plt 5 | import numpy as np 6 | import pickle 7 | 8 | # Show loss curves 9 | def show_history(history): 10 | plt.figure() 11 | plt.title('Training loss performance') 12 | plt.plot(history.epoch, history.history['loss'], label='train loss+error') 13 | plt.plot(history.epoch, history.history['val_loss'], label='val_error') 14 | plt.legend() 15 | plt.savefig('figure/total_loss.png') 16 | plt.close() 17 | 18 | 19 | plt.figure() 20 | plt.title('Training accuracy performance') 21 | plt.plot(history.epoch, history.history['acc'], label='train_acc') 22 | plt.plot(history.epoch, history.history['val_acc'], label='val_acc') 23 | plt.legend() 24 | plt.savefig('figure/total_acc.png') 25 | plt.close() 26 | 27 | train_acc=history.history['acc'] 28 | val_acc=history.history['val_acc'] 29 | train_loss=history.history['loss'] 30 | val_loss=history.history['val_loss'] 31 | epoch=history.epoch 32 | np_train_acc=np.array(train_acc) 33 | np_val_acc=np.array(val_acc) 34 | np_train_loss=np.array(train_loss) 35 | np_val_loss=np.array(val_loss) 36 | np_epoch=np.array(epoch) 37 | np.savetxt('train_acc.txt',np_train_acc) 38 | np.savetxt('train_loss.txt',np_train_loss) 39 | np.savetxt('val_acc.txt',np_val_acc) 40 | np.savetxt('val_loss.txt',np_val_loss) 41 | 42 | def plot_lstm2layer_output(a,modulation_type=None,save_filename=None): 43 | plt.figure(figsize=(4,3),dpi=600) 44 | plt.plot(range(128),a[0],label=modulation_type) 45 | plt.legend() 46 | plt.xticks([]) #去掉横坐标值 47 | plt.yticks([]) 48 | plt.savefig(save_filename,dpi=600,bbox_inches ='tight') 49 | plt.tight_layout() 50 | plt.close() 51 | 52 | def plot_conv4layer_output(a,modulation_type=None): 53 | plt.figure(figsize=(4,3),dpi=600) 54 | for i in range(100): 55 | plt.plot(range(124),a[0,0,:,i]) 56 | plt.xticks([]) #去掉横坐标值 57 | plt.yticks(size=20) 58 | save_filename='./figure_conv4_output/output%d.png'%i 59 | plt.savefig(save_filename,dpi=600,bbox_inches='tight') 60 | plt.tight_layout() 61 | plt.close() 62 | 63 | 64 | def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.get_cmap("Blues"), labels=[],save_filename=None): 65 | plt.figure(figsize=(4, 3),dpi=600) 66 | plt.imshow(cm*100, interpolation='nearest', cmap=cmap) 67 | #plt.title(title,fontsize=10) 68 | plt.colorbar() 69 | tick_marks = np.arange(len(labels)) 70 | plt.xticks(tick_marks, labels, rotation=90,size=12) 71 | plt.yticks(tick_marks, labels,size=12) 72 | #np.set_printoptions(precision=2, suppress=True) 73 | for i in range(len(tick_marks)): 74 | for j in range(len(tick_marks)): 75 | if i!=j: 76 | text=plt.text(j,i,int(np.around(cm[i,j]*100)),ha="center",va="center",fontsize=10) 77 | elif i==j: 78 | if int(np.around(cm[i,j]*100))==100: 79 | text=plt.text(j,i,int(np.around(cm[i,j]*100)),ha="center",va="center",fontsize=7,color='darkorange') 80 | else: 81 | text=plt.text(j,i,int(np.around(cm[i,j]*100)),ha="center",va="center",fontsize=10,color='darkorange') 82 | 83 | 84 | plt.tight_layout() 85 | #plt.ylabel('True label',fontdict={'size':8,}) 86 | #plt.xlabel('Predicted label',fontdict={'size':8,}) 87 | if save_filename is not None: 88 | plt.savefig(save_filename,dpi=600,bbox_inches = 'tight') 89 | plt.close() 90 | 91 | def calculate_confusion_matrix(Y,Y_hat,classes): 92 | n_classes = classes 93 | conf = np.zeros([n_classes,n_classes]) 94 | confnorm = np.zeros([n_classes,n_classes]) 95 | 96 | for k in range(0,Y.shape[0]): 97 | c=list(Y[k,:]) 98 | i = list(Y[k,:]).index(1) 99 | j = int(np.argmax(Y_hat[k,:])) 100 | conf[i,j] = conf[i,j] + 1 101 | 102 | for i in range(0,n_classes): 103 | confnorm[i,:] = conf[i,:] / np.sum(conf[i,:]) 104 | # print(confnorm) 105 | 106 | right = np.sum(np.diag(conf)) 107 | wrong = np.sum(conf) - right 108 | return confnorm,right,wrong --------------------------------------------------------------------------------