├── MPSKMQAM_idetification.py ├── README.md ├── compressImage.py ├── dataCreate.m ├── input_data.py ├── model.py └── testing.py /MPSKMQAM_idetification.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue May 14 09:36:26 2019 4 | 5 | @author: user 6 | """ 7 | 8 | 9 | 10 | 11 | 12 | #导入文件 13 | import os 14 | import numpy as np 15 | import tensorflow as tf 16 | import input_data 17 | import model 18 | 19 | #变量声明 20 | #N_CLASSES = 4 #husky,jiwawa,poodle,qiutian 21 | N_CLASSES = 10 22 | IMG_W = 64 # resize图像,太大的话训练时间久 23 | IMG_H = 64 24 | BATCH_SIZE =20 25 | #CAPACITY = 200 26 | CAPACITY = 1800 27 | MAX_STEP = 300 # 一般大于10K 28 | learning_rate = 0.0001 # 一般小于0.0001 29 | 30 | #获取批次batch 31 | train_dir = 'D:/学习资料/论文/神经网络——信号调制方式识别/F_仿真/train/' #训练样本的读入路径 32 | logs_train_dir = 'D:/学习资料/论文/神经网络——信号调制方式识别/F_仿真/train' #logs存储路径 33 | #logs_test_dir = 'E:/Re_train/image_data/test' #logs存储路径 34 | 35 | #train, train_label = input_data.get_files(train_dir) 36 | train, train_label, val, val_label = input_data.get_files(train_dir, 0.3) 37 | #训练数据及标签 38 | train_batch,train_label_batch = input_data.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) 39 | #测试数据及标签 40 | val_batch, val_label_batch = input_data.get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY) 41 | 42 | #训练操作定义 43 | train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES) 44 | train_loss = model.losses(train_logits, train_label_batch) 45 | train_op = model.trainning(train_loss, learning_rate) 46 | train_acc = model.evaluation(train_logits, train_label_batch) 47 | 48 | #测试操作定义 49 | test_logits = model.inference(val_batch, BATCH_SIZE, N_CLASSES) 50 | test_loss = model.losses(test_logits, val_label_batch) 51 | test_acc = model.evaluation(test_logits, val_label_batch) 52 | 53 | #这个是log汇总记录 54 | summary_op = tf.summary.merge_all() 55 | 56 | #产生一个会话 57 | sess = tf.Session() 58 | #产生一个writer来写log文件 59 | train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph) 60 | #val_writer = tf.summary.FileWriter(logs_test_dir, sess.graph) 61 | #产生一个saver来存储训练好的模型 62 | saver = tf.train.Saver() 63 | #所有节点初始化 64 | sess.run(tf.global_variables_initializer()) 65 | #队列监控 66 | coord = tf.train.Coordinator() 67 | threads = tf.train.start_queue_runners(sess=sess, coord=coord) 68 | 69 | #进行batch的训练 70 | try: 71 | #执行MAX_STEP步的训练,一步一个batch 72 | for step in np.arange(MAX_STEP): 73 | if coord.should_stop(): 74 | break 75 | #启动以下操作节点,有个疑问,为什么train_logits在这里没有开启? 76 | _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc]) 77 | 78 | #每隔50步打印一次当前的loss以及acc,同时记录log,写入writer 79 | if step % 10 == 0: 80 | print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0)) 81 | summary_str = sess.run(summary_op) 82 | train_writer.add_summary(summary_str, step) 83 | #每隔100步,保存一次训练好的模型 84 | if (step + 1) == MAX_STEP: 85 | checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt') 86 | saver.save(sess, checkpoint_path, global_step=step) 87 | 88 | except tf.errors.OutOfRangeError: 89 | print('Done training -- epoch limit reached') 90 | 91 | finally: 92 | coord.request_stop() 93 | 94 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Identification-of-communication-modulation-based-on-convolutional-neural-network 2 | - **基于卷积神经网络的通信调制方式识别** 3 | 4 |  1.通过matlab仿真平台生成星座图数据集 5 |  2.对星座图进行图像预处理 6 |  3.搭建CNN网络模型 7 |  4.导入数据进行训练 8 |  5.对数据进行测试 9 | -------------------------------------------------------------------------------- /compressImage.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun May 5 11:29:09 2019 4 | 5 | @author: user 6 | """ 7 | 8 | import PIL.Image as Image 9 | import os 10 | 11 | #图片压缩批处理 12 | def compressImage(srcPath,dstPath): 13 | for filename in os.listdir(srcPath): 14 | #如果不存在目的目录则创建一个,保持层级结构 15 | if not os.path.exists(dstPath): 16 | os.makedirs(dstPath) 17 | 18 | #拼接完整的文件或文件夹路径 19 | srcFile=os.path.join(srcPath,filename) 20 | dstFile=os.path.join(dstPath,filename) 21 | 22 | # 如果是文件就处理 23 | if os.path.isfile(srcFile): 24 | try: 25 | #打开原图片缩小后保存,可以用if srcFile.endswith(".jpg")或者split,splitext等函数等针对特定文件压缩 26 | sImg=Image.open(srcFile) 27 | sImg=sImg.convert('RGB')#转换未灰度图 28 | w,h=sImg.size 29 | dImg=sImg.resize((int(64),int(64)),Image.ANTIALIAS) #设置压缩尺寸和选项,注意尺寸要用括号 30 | dImg.save(dstFile) #也可以用srcFile原路径保存,或者更改后缀保存,save这个函数后面可以加压缩编码选项JPEG之类的 31 | print (dstFile+" 成功!") 32 | except Exception: 33 | print(dstFile+"失败!!!!!!!!!!!!!!!!!!!!!!!!!!!!") 34 | 35 | # 如果是文件夹就递归 36 | if os.path.isdir(srcFile): 37 | compressImage(srcFile, dstFile) 38 | 39 | if __name__=='__main__': 40 | # compressImage("D:/学习资料/论文/神经网络——信号调制方式识别/仿真/仿真练习/data/test/4PSK","D:/学习资料/论文/神经网络——信号调制方式识别/仿真/仿真练习/64X64data/test/4PSK") 41 | world1 = input("请输入要压缩的文件夹:") 42 | world2 = input("请输入压缩后要保存的文件夹:") 43 | compressImage(world1,world2) 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /dataCreate.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alanSunyulin/Identification-of-communication-modulation-based-on-convolutional-neural-network/ec7e1faf9a0998d78f56d558bd95cbce482a4d4b/dataCreate.m -------------------------------------------------------------------------------- /input_data.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue May 14 14:09:43 2019 4 | 5 | @author: user 6 | """ 7 | 8 | import os 9 | import math 10 | import numpy as np 11 | import tensorflow as tf 12 | import matplotlib.pyplot as plt 13 | 14 | #============================================================================ 15 | #-----------------生成图片路径和标签的List------------------------------------ 16 | 17 | 18 | #============================================================================ 19 | #-----------------生成图片路径和标签的List------------------------------------ 20 | 21 | train_dir = 'D:\学习资料\论文\神经网络——信号调制方式识别\F_仿真\train' 22 | 23 | PSK4 = [] 24 | label_PSK4 = [] 25 | QAM4 = [] 26 | label_QAM4 = [] 27 | PSK8 = [] 28 | label_PSK8 = [] 29 | QAM8 = [] 30 | label_QAM8 = [] 31 | PSK16 = [] 32 | label_PSK16 = [] 33 | QAM16 = [] 34 | label_QAM16 = [] 35 | QAM32 = [] 36 | label_QAM32 = [] 37 | QAM64 = [] 38 | label_QAM64 = [] 39 | QAM128 = [] 40 | label_QAM128 = [] 41 | QAM256 = [] 42 | label_QAM256 = [] 43 | 44 | #step1:获取'E:/Re_train/image_data/training_image'下所有的图片路径名,存放到 45 | #对应的列表中,同时贴上标签,存放到label列表中。 46 | def get_files(file_dir, ratio): 47 | for file in os.listdir(file_dir+'/PSK4'): 48 | PSK4.append(file_dir +'/PSK4'+'/'+ file) 49 | label_PSK4.append(0) 50 | for file in os.listdir(file_dir+'/QAM4'): 51 | QAM4.append(file_dir +'/QAM4'+'/'+file) 52 | label_QAM4.append(1) 53 | for file in os.listdir(file_dir+'/PSK8'): 54 | PSK8.append(file_dir +'/PSK8'+'/'+ file) 55 | label_PSK8.append(2) 56 | for file in os.listdir(file_dir+'/QAM8'): 57 | QAM8.append(file_dir +'/QAM8'+'/'+file) 58 | label_QAM8.append(3) 59 | for file in os.listdir(file_dir+'/PSK16'): 60 | PSK16.append(file_dir +'/PSK16'+'/'+file) 61 | label_PSK16.append(4) 62 | for file in os.listdir(file_dir+'/QAM16'): 63 | QAM16.append(file_dir +'/QAM16'+'/'+file) 64 | label_QAM16.append(5) 65 | for file in os.listdir(file_dir+'/QAM32'): 66 | QAM32.append(file_dir +'/QAM32'+'/'+file) 67 | label_QAM32.append(6) 68 | for file in os.listdir(file_dir+'/QAM64'): 69 | QAM64.append(file_dir +'/QAM64'+'/'+file) 70 | label_QAM64.append(7) 71 | for file in os.listdir(file_dir+'/QAM128'): 72 | QAM128.append(file_dir +'/QAM128'+'/'+file) 73 | label_QAM128.append(8) 74 | for file in os.listdir(file_dir+'/QAM256'): 75 | QAM256.append(file_dir +'/QAM256'+'/'+file) 76 | label_QAM256.append(9) 77 | 78 | 79 | #step2:对生成的图片路径和标签List做打乱处理把cat和dog合起来组成一个list(img和lab) 80 | image_list = np.hstack((PSK4,QAM4,PSK8,QAM8,PSK16,QAM16,QAM32,QAM64,QAM128,QAM256)) 81 | label_list = np.hstack((label_PSK4,label_QAM4,label_PSK4,label_QAM8,label_PSK16, 82 | label_QAM16,label_QAM32,label_QAM64,label_QAM128,label_QAM256)) 83 | #label_list = np.hstack((label_husky, label_jiwawa, label_poodle, label_qiutian)) 84 | 85 | #利用shuffle打乱顺序 86 | temp = np.array([image_list, label_list]) 87 | temp = temp.transpose() 88 | np.random.shuffle(temp) 89 | 90 | #从打乱的temp中再取出list(img和lab) 91 | #image_list = list(temp[:, 0]) 92 | #label_list = list(temp[:, 1]) 93 | #label_list = [int(i) for i in label_list] 94 | #return image_list, label_list 95 | 96 | #将所有的img和lab转换成list 97 | all_image_list = list(temp[:, 0]) 98 | all_label_list = list(temp[:, 1]) 99 | 100 | #将所得List分为两部分,一部分用来训练tra,一部分用来测试val 101 | #ratio是测试集的比例 102 | n_sample = len(all_label_list) 103 | n_val = int(math.ceil(n_sample*ratio)) #测试样本数 104 | n_train = n_sample - n_val #训练样本数 105 | 106 | tra_images = all_image_list[0:n_train] 107 | tra_labels = all_label_list[0:n_train] 108 | tra_labels = [int(float(i)) for i in tra_labels] 109 | val_images = all_image_list[n_train:-1] 110 | val_labels = all_label_list[n_train:-1] 111 | val_labels = [int(float(i)) for i in val_labels] 112 | 113 | return tra_images, tra_labels, val_images, val_labels 114 | 115 | 116 | #--------------------------------------------------------------------------- 117 | #--------------------生成Batch---------------------------------------------- 118 | 119 | #step1:将上面生成的List传入get_batch() ,转换类型,产生一个输入队列queue,因为img和lab 120 | #是分开的,所以使用tf.train.slice_input_producer(),然后用tf.read_file()从队列中读取图像 121 | # image_W, image_H, :设置好固定的图像高度和宽度 122 | # 设置batch_size:每个batch要放多少张图片 123 | # capacity:一个队列最大多少 124 | def get_batch(image, label, image_W, image_H, batch_size, capacity): 125 | #转换类型 126 | image = tf.cast(image, tf.string) 127 | label = tf.cast(label, tf.int32) 128 | 129 | # make an input queue 130 | input_queue = tf.train.slice_input_producer([image, label]) 131 | 132 | label = input_queue[1] 133 | image_contents = tf.read_file(input_queue[0]) #read img from a queue 134 | 135 | #step2:将图像解码,不同类型的图像不能混在一起,要么只用jpeg,要么只用png等。 136 | image = tf.image.decode_jpeg(image_contents, channels=3) 137 | 138 | #step3:数据预处理,对图像进行旋转、缩放、裁剪、归一化等操作,让计算出的模型更健壮。 139 | image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H) 140 | image = tf.image.per_image_standardization(image) 141 | 142 | #step4:生成batch 143 | #image_batch: 4D tensor [batch_size, width, height, 3],dtype=tf.float32 144 | #label_batch: 1D tensor [batch_size], dtype=tf.int32 145 | image_batch, label_batch = tf.train.batch([image, label], 146 | batch_size= batch_size, 147 | num_threads= 32, 148 | capacity = capacity) 149 | #重新排列label,行数为[batch_size] 150 | label_batch = tf.reshape(label_batch, [batch_size]) 151 | image_batch = tf.cast(image_batch, tf.float32) 152 | return image_batch, label_batch 153 | 154 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Tue May 14 14:11:47 2019 4 | 5 | @author: user 6 | """ 7 | 8 | #======================================================================== 9 | #输入数据:(batch_size,IMG_W,IMG_H,col_channel)= (20, 64, 64, 3) 10 | # 11 | #卷积层1: (conv_kernel,num_channel,num_out_neure)= (3, 3, 3, 64) 12 | # 13 | #池化层1: (ksize,strides,padding)= ([1,3,3,1], [1,2,2,1], 'SAME') 14 | # 15 | #卷积层2: (conv_kernel,num_channel,num_out_neure)= (3, 3, 64, 16) 16 | # 17 | #池化层2: (ksize,strides,padding)= ([1,3,3,1], [1,1,1,1], 'SAME') 18 | # 19 | #全连接1: (out_pool2_reshape,num_out_neure)= (dim, 128) 20 | # 21 | #全连接2: (fc1_out,num_out_neure)= (128,128) 22 | # 23 | #softmax层: (fc2_out,num_classes) = (128, 4) 24 | # 25 | #激活函数: tf.nn.relu 26 | # 27 | #损失函数: tf.nn.sparse_softmax_cross_entropy_with_logits 28 | #--------------------- 29 | 30 | #========================================================================= 31 | import tensorflow as tf 32 | #========================================================================= 33 | #网络结构定义 34 | #输入参数:images,image batch、4D tensor、tf.float32、[batch_size, width, height, channels] 35 | #返回参数:logits, float、 [batch_size, n_classes] 36 | def inference(images, batch_size, n_classes): 37 | #一个简单的卷积神经网络,卷积+池化层x2,全连接层x2,最后一个softmax层做分类。 38 | #卷积层1 39 | #64个3x3的卷积核(3通道),padding=’SAME’,表示padding后卷积的图与原图尺寸一致,激活函数relu() 40 | with tf.variable_scope('conv1') as scope: 41 | 42 | weights = tf.Variable(tf.truncated_normal(shape=[3,3,3,64], stddev = 1.0, dtype = tf.float32), 43 | name = 'weights', dtype = tf.float32) 44 | 45 | biases = tf.Variable(tf.constant(value = 0.1, dtype = tf.float32, shape = [64]), 46 | name = 'biases', dtype = tf.float32) 47 | 48 | conv = tf.nn.conv2d(images, weights, strides=[1,1,1,1], padding='SAME') 49 | pre_activation = tf.nn.bias_add(conv, biases) 50 | conv1 = tf.nn.relu(pre_activation, name= scope.name) 51 | 52 | #池化层1 53 | #3x3最大池化,步长strides为2,池化后执行lrn()操作,局部响应归一化,对训练有利。 54 | with tf.variable_scope('pooling1_lrn') as scope: 55 | pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME', name='pooling1') 56 | norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1') 57 | 58 | #卷积层2 59 | #16个3x3的卷积核(16通道),padding=’SAME’,表示padding后卷积的图与原图尺寸一致,激活函数relu() 60 | with tf.variable_scope('conv2') as scope: 61 | weights = tf.Variable(tf.truncated_normal(shape=[3,3,64,16], stddev = 0.1, dtype = tf.float32), 62 | name = 'weights', dtype = tf.float32) 63 | 64 | biases = tf.Variable(tf.constant(value = 0.1, dtype = tf.float32, shape = [16]), 65 | name = 'biases', dtype = tf.float32) 66 | 67 | conv = tf.nn.conv2d(norm1, weights, strides = [1,1,1,1],padding='SAME') 68 | pre_activation = tf.nn.bias_add(conv, biases) 69 | conv2 = tf.nn.relu(pre_activation, name='conv2') 70 | 71 | #池化层2 72 | #3x3最大池化,步长strides为2,池化后执行lrn()操作, 73 | #pool2 and norm2 74 | with tf.variable_scope('pooling2_lrn') as scope: 75 | norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001/9.0,beta=0.75,name='norm2') 76 | pool2 = tf.nn.max_pool(norm2, ksize=[1,3,3,1], strides=[1,1,1,1],padding='SAME',name='pooling2') 77 | 78 | #全连接层3 79 | #128个神经元,将之前pool层的输出reshape成一行,激活函数relu() 80 | with tf.variable_scope('local3') as scope: 81 | reshape = tf.reshape(pool2, shape=[batch_size, -1]) 82 | dim = reshape.get_shape()[1].value 83 | weights = tf.Variable(tf.truncated_normal(shape=[dim,128], stddev = 0.005, dtype = tf.float32), 84 | name = 'weights', dtype = tf.float32) 85 | 86 | biases = tf.Variable(tf.constant(value = 0.1, dtype = tf.float32, shape = [128]), 87 | name = 'biases', dtype=tf.float32) 88 | 89 | local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) 90 | 91 | #全连接层4 92 | #128个神经元,激活函数relu() 93 | with tf.variable_scope('local4') as scope: 94 | weights = tf.Variable(tf.truncated_normal(shape=[128,128], stddev = 0.005, dtype = tf.float32), 95 | name = 'weights',dtype = tf.float32) 96 | 97 | biases = tf.Variable(tf.constant(value = 0.1, dtype = tf.float32, shape = [128]), 98 | name = 'biases', dtype = tf.float32) 99 | 100 | local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4') 101 | 102 | #dropout层 103 | # with tf.variable_scope('dropout') as scope: 104 | # drop_out = tf.nn.dropout(local4, 0.8) 105 | 106 | 107 | #Softmax回归层 108 | #将前面的FC层输出,做一个线性回归,计算出每一类的得分,在这里是2类,所以这个层输出的是两个得分。 109 | with tf.variable_scope('softmax_linear') as scope: 110 | weights = tf.Variable(tf.truncated_normal(shape=[128, n_classes], stddev = 0.005, dtype = tf.float32), 111 | name = 'softmax_linear', dtype = tf.float32) 112 | 113 | biases = tf.Variable(tf.constant(value = 0.1, dtype = tf.float32, shape = [n_classes]), 114 | name = 'biases', dtype = tf.float32) 115 | 116 | softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear') 117 | 118 | return softmax_linear 119 | 120 | #----------------------------------------------------------------------------- 121 | #loss计算 122 | #传入参数:logits,网络计算输出值。labels,真实值,在这里是0或者1 123 | #返回参数:loss,损失值 124 | def losses(logits, labels): 125 | with tf.variable_scope('loss') as scope: 126 | cross_entropy =tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='xentropy_per_example') 127 | loss = tf.reduce_mean(cross_entropy, name='loss') 128 | tf.summary.scalar(scope.name+'/loss', loss) 129 | return loss 130 | 131 | #-------------------------------------------------------------------------- 132 | #loss损失值优化 133 | #输入参数:loss。learning_rate,学习速率。 134 | #返回参数:train_op,训练op,这个参数要输入sess.run中让模型去训练。 135 | def trainning(loss, learning_rate): 136 | with tf.name_scope('optimizer'): 137 | optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate) 138 | global_step = tf.Variable(0, name='global_step', trainable=False) 139 | train_op = optimizer.minimize(loss, global_step= global_step) 140 | return train_op 141 | 142 | #----------------------------------------------------------------------- 143 | #评价/准确率计算 144 | #输入参数:logits,网络计算值。labels,标签,也就是真实值,在这里是0或者1。 145 | #返回参数:accuracy,当前step的平均准确率,也就是在这些batch中多少张图片被正确分类了。 146 | def evaluation(logits, labels): 147 | with tf.variable_scope('accuracy') as scope: 148 | correct = tf.nn.in_top_k(logits, labels, 1) 149 | correct = tf.cast(correct, tf.float16) 150 | 151 | accuracy = tf.reduce_mean(correct) 152 | tf.summary.scalar(scope.name+'/accuracy', accuracy) 153 | return accuracy 154 | 155 | #======================================================================== 156 | -------------------------------------------------------------------------------- /testing.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu May 16 11:24:05 2019 4 | 5 | @author: user 6 | """ 7 | 8 | 9 | #============================================================================= 10 | from PIL import Image 11 | import numpy as np 12 | import tensorflow as tf 13 | import matplotlib.pyplot as plt 14 | import model 15 | from input_data import get_files 16 | 17 | #======================================================================= 18 | #获取一张图片 19 | def get_one_image(train): 20 | #输入参数:train,训练图片的路径 21 | #返回参数:image,从训练图片中随机抽取一张图片 22 | n = len(train) 23 | ind = np.random.randint(0, n) 24 | img_dir = train[ind] #随机选择测试的图片 25 | 26 | img = Image.open(img_dir) 27 | plt.imshow(img) 28 | imag = img.resize([64, 64]) #由于图片在预处理阶段以及resize,因此该命令可略 29 | image = np.array(imag) 30 | return image 31 | 32 | #-------------------------------------------------------------------- 33 | #测试图片 34 | def evaluate_one_image(image_array): 35 | with tf.Graph().as_default(): 36 | BATCH_SIZE = 1 37 | N_CLASSES = 10 38 | 39 | image = tf.cast(image_array, tf.float32) 40 | image = tf.image.per_image_standardization(image) 41 | image = tf.reshape(image, [1, 64, 64, 3]) 42 | 43 | logit = model.inference(image, BATCH_SIZE, N_CLASSES) 44 | 45 | logit = tf.nn.softmax(logit) 46 | 47 | x = tf.placeholder(tf.float32, shape=[64, 64, 3]) 48 | 49 | # you need to change the directories to yours. 50 | # logs_train_dir = 'E:/Re_train/image_data/inputdata/' 51 | logs_train_dir = 'D:/学习资料/论文/神经网络——信号调制方式识别/F_仿真/train' 52 | 53 | saver = tf.train.Saver() 54 | 55 | with tf.Session() as sess: 56 | 57 | print("Reading checkpoints...") 58 | ckpt = tf.train.get_checkpoint_state(logs_train_dir) 59 | if ckpt and ckpt.model_checkpoint_path: 60 | global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] 61 | saver.restore(sess, ckpt.model_checkpoint_path) 62 | print('Loading success, global_step is %s' % global_step) 63 | else: 64 | print('No checkpoint file found') 65 | 66 | prediction = sess.run(logit, feed_dict={x: image_array}) 67 | max_index = np.argmax(prediction) 68 | if max_index==0: 69 | print('This is a 4psk with possibility %.6f' %prediction[:, 0]) 70 | elif max_index==1: 71 | print('This is a 4qam with possibility %.6f' %prediction[:, 1]) 72 | elif max_index==2: 73 | print('This is a 8psk with possibility %.6f' %prediction[:, 2]) 74 | elif max_index==3: 75 | print('This is a 8qam with possibility %.6f' %prediction[:, 3]) 76 | elif max_index==4: 77 | print('This is a 16psk with possibility %.6f' %prediction[:, 4]) 78 | elif max_index==5: 79 | print('This is a 16qam with possibility %.6f' %prediction[:, 5]) 80 | elif max_index==6: 81 | print('This is a 32qam with possibility %.6f' %prediction[:, 6]) 82 | elif max_index==7: 83 | print('This is a 64qam with possibility %.6f' %prediction[:, 7]) 84 | elif max_index==8: 85 | print('This is a 128qam with possibility %.6f' %prediction[:, 8]) 86 | elif max_index==9: 87 | print('This is a 256qam with possibility %.6f' %prediction[:, 9]) 88 | #------------------------------------------------------------------------ 89 | 90 | if __name__ == '__main__': 91 | 92 | train_dir = 'D:/学习资料/论文/神经网络——信号调制方式识别/F_仿真/train/' 93 | train, train_label, val, val_label = get_files(train_dir, 0.3) 94 | img = get_one_image(val) #通过改变参数train or val,进而验证训练集或测试集 95 | evaluate_one_image(img) 96 | #=========================================================================== 97 | 98 | --------------------------------------------------------------------------------