├── data_set_x ├── image_1.jpg ├── image_2.jpg ├── image_3.jpg ├── image_6.jpg ├── image_7.jpg └── image_8.jpg ├── data_set_y ├── image_1.jpg ├── image_2.jpg ├── image_3.jpg ├── image_6.jpg ├── image_7.jpg └── image_8.jpg ├── __pycache__ ├── net.cpython-36.pyc ├── test_image_reader.cpython-36.pyc └── train_image_reader.cpython-36.pyc ├── test_image_reader.py ├── draft_2 ├── draft ├── draft_3.py ├── evaluate.py ├── ssim_ms.py ├── ssim ├── net.py └── test /data_set_x/image_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_x/image_1.jpg -------------------------------------------------------------------------------- /data_set_x/image_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_x/image_2.jpg -------------------------------------------------------------------------------- /data_set_x/image_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_x/image_3.jpg -------------------------------------------------------------------------------- /data_set_x/image_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_x/image_6.jpg -------------------------------------------------------------------------------- /data_set_x/image_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_x/image_7.jpg -------------------------------------------------------------------------------- /data_set_x/image_8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_x/image_8.jpg -------------------------------------------------------------------------------- /data_set_y/image_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_y/image_1.jpg -------------------------------------------------------------------------------- /data_set_y/image_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_y/image_2.jpg -------------------------------------------------------------------------------- /data_set_y/image_3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_y/image_3.jpg -------------------------------------------------------------------------------- /data_set_y/image_6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_y/image_6.jpg -------------------------------------------------------------------------------- /data_set_y/image_7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_y/image_7.jpg -------------------------------------------------------------------------------- /data_set_y/image_8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/data_set_y/image_8.jpg -------------------------------------------------------------------------------- /__pycache__/net.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/__pycache__/net.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/test_image_reader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/__pycache__/test_image_reader.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/train_image_reader.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/djjandXMU/CycleGAN-and-modification/HEAD/__pycache__/train_image_reader.cpython-36.pyc -------------------------------------------------------------------------------- /test_image_reader.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Jul 22 09:24:49 2018 4 | 5 | @author: djj 6 | """ 7 | 8 | import os 9 | 10 | import numpy as np 11 | import tensorflow as tf 12 | import cv2 13 | 14 | def TestImageReader(file_list, step, size): #训练数据读取接口 15 | file_length = len(file_list) #获取图片列表总长度 16 | line_idx = step % file_length #获取一张待读取图片的下标 17 | test_line_content = file_list[line_idx] #获取一张测试图片路径与名称 18 | test_image_name, _ = os.path.splitext(os.path.basename(test_line_content)) #获取该张测试图片名 19 | test_image = cv2.imread(test_line_content, 1) #读取一张测试图片 20 | test_image_resize_t = cv2.resize(test_image, (size, size)) #改变读取的测试图片的大小 21 | test_image_resize = test_image_resize_t/127.5-1 #归一化测试图片 22 | return test_image_name, test_image_resize #返回读取并处理的一张测试图片与它的名称 23 | -------------------------------------------------------------------------------- /draft_2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Fri Aug 24 21:36:45 2018 5 | 6 | @author: usrp1 7 | """ 8 | 9 | from datetime import datetime 10 | import tensorflow as tf 11 | import numpy as np 12 | 13 | def ssim_tf(image_1,image_2,c_1 = 0.02,c_2 = 0.03): 14 | image_1 =tf.reshape(image_1,[1,256,256,1]) 15 | image_2 =tf.reshape(image_2,[1,256,256,1]) 16 | gauss_filter = np.array([1,4,7,4,1,4,16,26,16,4,7,26,41,26,7,4,16,26,16,4,1,4,7,4,1])/273.0 17 | gauss_filter.reshape((5,5,1,1))##我先用的5*5的滤波器################################### 18 | gauss_filter = tf.constant(value =gauss_filter ,shape=[5, 5, 1, 1],dtype=tf.float32,name='gauss_filter') 19 | #gauss_filter=np.array([[1,4,7,4,1],[4,16,26,16,4],[7,26,41,26,7],[4,16,26,16,4],[1,4,7,4,1]])/273.0 20 | #gauss_filter = tf.constant(gauss_filter,name='gauss_filter',dtype=tf.float32) 21 | image_1_u = tf.nn.conv2d(image_1,gauss_filter, [1, 1, 1, 1], padding = 'SAME') 22 | image_1_u2 = tf.multiply(image_1_u,image_1_u) 23 | 24 | image_2_u = tf.nn.conv2d(image_2,gauss_filter, [1, 1, 1, 1], padding = 'SAME') 25 | image_2_u2 = tf.multiply(image_2_u,image_2_u) 26 | 27 | image_u1_u2 = tf.multiply(image_2_u,image_1_u) 28 | 29 | var_image_1 = tf.nn.conv2d(tf.multiply(image_1,image_1),gauss_filter, [1, 1, 1, 1], padding = 'SAME') -image_1_u2 30 | var_image_2 = tf.nn.conv2d(tf.multiply(image_2,image_2),gauss_filter, [1, 1, 1, 1], padding = 'SAME') -image_2_u2 31 | var_image_12 = tf.nn.conv2d(tf.multiply(image_1,image_2),gauss_filter, [1, 1, 1, 1], padding = 'SAME') -image_u1_u2 32 | #ssim_map = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2)); 33 | # c_1 = 0.02 34 | # c_2 = 0.03 35 | ssim_map = tf.multiply(tf.divide((2* image_u1_u2+c_1),(image_1_u2 +image_2_u2 + c_1 )),tf.divide((2*var_image_12+c_2),(var_image_1+var_image_2+c_2))) 36 | ssim_ch=1-tf.reduce_mean(ssim_map) 37 | return ssim_ch 38 | 39 | x_img = tf.placeholder(tf.float32,shape=[1, 256, 256,3],name='x_img') #输入的x域图像 40 | y_img = tf.placeholder(tf.float32,shape=[1, 256, 256,3],name='y_img') #输入的y域图像 41 | djj = ssim_tf(x_img[0,:,:,0],y_img[0,:,:,0]) -------------------------------------------------------------------------------- /draft: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Fri Aug 24 21:22:00 2018 5 | 6 | @author: usrp1 7 | """ 8 | 9 | from datetime import datetime 10 | import tensorflow as tf 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | from numba import jit 14 | 15 | @jit 16 | def color_similar(img1,img2): 17 | img1_r=tf.floor(tf.multiply(tf.add(img1[0,:,:,0],1),255)) 18 | img1_g=tf.floor(tf.multiply(tf.add(img1[0,:,:,1],1),255)) 19 | img1_b=tf.floor(tf.multiply(tf.add(img1[0,:,:,2],1),255)) 20 | 21 | img2_r=tf.floor(tf.multiply(tf.add(img2[0,:,:,0],1),255)) 22 | img2_g=tf.floor(tf.multiply(tf.add(img2[0,:,:,1],1),255)) 23 | img2_b=tf.floor(tf.multiply(tf.add(img2[0,:,:,2],1),255)) 24 | 25 | hist_1_r = tf.histogram_fixed_width(values=img1_r,nbins=256,value_range=[0.0,255.0],name='hist_1_r',dtype=tf.float32) 26 | hist_1_g = tf.histogram_fixed_width(values=img1_g,nbins=256,value_range=[0.0,255.0],name='hist_1_g',dtype=tf.float32) 27 | hist_1_b = tf.histogram_fixed_width(values=img1_b,nbins=256,value_range=[0.0,255.0],name='hist_1_b',dtype=tf.float32) 28 | 29 | hist_2_r = tf.histogram_fixed_width(values=img2_r,nbins=256,value_range=[0.0,255.0],name='hist_2_r',dtype=tf.float32) 30 | hist_2_g = tf.histogram_fixed_width(values=img2_g,nbins=256,value_range=[0.0,255.0],name='hist_2_g',dtype=tf.float32) 31 | hist_2_b = tf.histogram_fixed_width(values=img2_b,nbins=256,value_range=[0.0,255.0],name='hist_2_b',dtype=tf.float32) 32 | 33 | # print(hist_1) 34 | hist_1_r_norm = tf.multiply(hist_1_r,1.0/65536,name='hist_1_r_norm') 35 | hist_1_g_norm = tf.multiply(hist_1_g,1.0/65536,name='hist_1_g_norm') 36 | hist_1_b_norm = tf.multiply(hist_1_b,1.0/65536,name='hist_1_b_norm') 37 | 38 | hist_2_r_norm = tf.multiply(hist_2_r,1.0/65536,name='hist_2_r_norm') 39 | hist_2_g_norm = tf.multiply(hist_2_g,1.0/65536,name='hist_2_g_norm') 40 | hist_2_b_norm = tf.multiply(hist_2_b,1.0/65536,name='hist_2_b_norm') 41 | 42 | 43 | 44 | index_r = tf.reduce_sum(tf.minimum(hist_1_r_norm,hist_2_r_norm)) 45 | index_g = tf.reduce_sum(tf.minimum(hist_1_g_norm,hist_2_g_norm)) 46 | index_b = tf.reduce_sum(tf.minimum(hist_1_b_norm,hist_2_b_norm)) 47 | index = (index_r+index_g+index_b)/3.0 48 | 49 | #x=0:0.01:10; 50 | # index_weighted =10./(1+tf.exp(-(index-5))); 51 | index_weighted=tf.div(10.0,tf.add(tf.exp(tf.add(tf.multiply(index,-10.0),5.0)),1.0)) 52 | #plot(x,y) 53 | # k=1 54 | # for i in range(256): 55 | # for j in range(256): 56 | # tf.add(hist_1[0,img1[i,j]],1) 57 | # tf.add(hist_2[0,img2[i,j]],1) 58 | # print(k) 59 | # k=k+1 60 | # 61 | return index_weighted 62 | 63 | 64 | x_img = tf.placeholder(tf.float32,shape=[1, 256, 256,3],name='x_img') #输入的x域图像 65 | y_img = tf.placeholder(tf.float32,shape=[1, 256, 256,3],name='y_img') #输入的y域图像 66 | djj = color_similar(x_img,y_img) -------------------------------------------------------------------------------- /draft_3.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Fri Aug 24 21:22:00 2018 5 | 6 | @author: usrp1 7 | """ 8 | import matplotlib.pyplot as plt # plt 用于显示图片 9 | import matplotlib.image as mpimg # mpimg 用于读取图片 10 | import random 11 | import numpy as np 12 | import argparse 13 | import tensorflow as tf 14 | import cv2 15 | 16 | def hue_similar(img1,img2): 17 | img1_hsv = tf.image.rgb_to_hsv(img1); 18 | img1_h=tf.floor(tf.multiply(tf.add(img1_hsv[:,:,0],0),255)) 19 | img1_s=tf.floor(tf.multiply(tf.add(img1_hsv[:,:,1],0),255)) 20 | img1_v=tf.floor(tf.multiply(tf.add(img1_hsv[:,:,2],0),255)) 21 | 22 | hist_1_h = tf.histogram_fixed_width(values=img1_h,nbins=256,value_range=[0.0,255.0],name='hist_1_h',dtype=tf.float32) 23 | hist_1_h_norm = tf.multiply(hist_1_h,1.0/65536,name='hist_1_h_norm') 24 | 25 | hist_1_s = tf.histogram_fixed_width(values=img1_s,nbins=256,value_range=[0.0,255.0],name='hist_1_s',dtype=tf.float32) 26 | hist_1_s_norm = tf.multiply(hist_1_s,1.0/65536,name='hist_1_s_norm') 27 | 28 | hist_1_v = tf.histogram_fixed_width(values=img1_v,nbins=256,value_range=[0.0,255.0],name='hist_1_v',dtype=tf.float32) 29 | hist_1_v_norm = tf.multiply(hist_1_v,1.0/65536,name='hist_1_v_norm') 30 | 31 | img2_hsv = tf.image.rgb_to_hsv(img2); 32 | img2_h=tf.floor(tf.multiply(tf.add(img2_hsv[:,:,0],0),255)) 33 | img2_s=tf.floor(tf.multiply(tf.add(img2_hsv[:,:,1],0),255)) 34 | img2_v=tf.floor(tf.multiply(tf.add(img2_hsv[:,:,2],0),255)) 35 | 36 | hist_2_h = tf.histogram_fixed_width(values=img2_h,nbins=256,value_range=[0.0,255.0],name='hist_2_h',dtype=tf.float32) 37 | hist_2_h_norm = tf.multiply(hist_2_h,1.0/65536,name='hist_2_h_norm') 38 | 39 | hist_2_s = tf.histogram_fixed_width(values=img2_s,nbins=256,value_range=[0.0,255.0],name='hist_2_s',dtype=tf.float32) 40 | hist_2_s_norm = tf.multiply(hist_2_s,1.0/65536,name='hist_2_s_norm') 41 | 42 | hist_2_v = tf.histogram_fixed_width(values=img2_v,nbins=256,value_range=[0.0,255.0],name='hist_2_v',dtype=tf.float32) 43 | hist_2_v_norm = tf.multiply(hist_2_v,1.0/65536,name='hist_2_v_norm') 44 | 45 | index_h =1- tf.reduce_sum(tf.minimum(hist_1_h_norm,hist_2_h_norm)) 46 | index_s =1- tf.reduce_sum(tf.minimum(hist_1_s_norm,hist_2_s_norm)) 47 | index_v =1- tf.reduce_sum(tf.minimum(hist_1_v_norm,hist_2_v_norm)) 48 | 49 | return (0.5*index_h+0.25*index_s+0.25*index_v) 50 | 51 | 52 | x=tf.placeholder(tf.float32,shape=[1],name='x') 53 | r = tf.cond(tf.less(x[0], tf.constant(0.5)), lambda:tf.multiply(x[0],2.0), lambda:tf.multiply(x[0],3.0)) 54 | config = tf.ConfigProto() 55 | config.gpu_options.allow_growth = True #设定显存不超量使用 56 | sess = tf.Session(config=config) #新建会话层 57 | init = tf.global_variables_initializer() #参数初始化器 58 | 59 | sess.run(init) #初始化所有可训练参数 60 | #b = random.sample(range(15),13) 61 | #djj = hue_similar(np.uint8(y_image_RGB),np.uint8(x_image_RGB)) 62 | #y_image = cv2.imread('//home//usrp1//djj_cycle_GAN//img//apple2orange//testA//2.jpg') -------------------------------------------------------------------------------- /evaluate.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Jul 22 09:24:33 2018 4 | 5 | @author: djj 6 | """ 7 | 8 | from __future__ import print_function 9 | 10 | import argparse 11 | from datetime import datetime 12 | from random import shuffle 13 | import os 14 | import sys 15 | import time 16 | import math 17 | import tensorflow as tf 18 | import numpy as np 19 | import glob 20 | import cv2 21 | 22 | from test_image_reader import * 23 | from net import * 24 | 25 | parser = argparse.ArgumentParser(description='') 26 | 27 | parser.add_argument("--x_test_data_path", default='/underwater/', help="path of x test datas.") #x域的测试图片路径 28 | parser.add_argument("--y_test_data_path", default='/underwater/', help="path of y test datas.") #y域的测试图片路径 29 | parser.add_argument("--image_size", type=int, default=256, help="load image size") #网络输入的尺度 30 | parser.add_argument("--snapshots", default='./snapshots/',help="Path of Snapshots") #读取训练好的模型参数的路径 31 | parser.add_argument("--out_dir_x", default='./test_output_x/',help="Output Folder") #保存x域的输入图片与生成的y域图片的路径 32 | parser.add_argument("--out_dir_y", default='./test_output_y/',help="Output Folder") #保存y域的输入图片与生成的x域图片的路径 33 | 34 | args = parser.parse_args() 35 | 36 | def make_test_data_list(x_data_path, y_data_path): #make_test_data_list函数得到测试中的x域和y域的图像路径名称列表 37 | x_input_images = glob.glob(os.path.join(x_data_path, "*")) #读取全部的x域图像路径名称列表 38 | y_input_images = glob.glob(os.path.join(y_data_path, "*")) #读取全部的y域图像路径名称列表 39 | return x_input_images, y_input_images 40 | 41 | def cv_inv_proc(img): #cv_inv_proc函数将读取图片时归一化的图片还原成原图 42 | img_rgb = (img + 1.) * 127.5 43 | return img_rgb.astype(np.float32) #bgr 44 | 45 | def get_write_picture(x_image, y_image, fake_y, fake_x): #get_write_picture函数得到网络测试结果 46 | x_image = cv_inv_proc(x_image) #还原x域的图像 47 | y_image = cv_inv_proc(y_image) #还原y域的图像 48 | fake_y = cv_inv_proc(fake_y[0]) #还原生成的y域的图像 49 | fake_x = cv_inv_proc(fake_x[0]) #还原生成的x域的图像 50 | x_output = np.concatenate((x_image, fake_y), axis=1) #得到x域的输入图像以及对应的生成的y域图像 51 | y_output = np.concatenate((y_image, fake_x), axis=1) #得到y域的输入图像以及对应的生成的x域图像 52 | return x_output, y_output 53 | 54 | def main(): 55 | if not os.path.exists(args.out_dir_x): #如果保存x域测试结果的文件夹不存在则创建 56 | os.makedirs(args.out_dir_x) 57 | if not os.path.exists(args.out_dir_y): #如果保存y域测试结果的文件夹不存在则创建 58 | os.makedirs(args.out_dir_y) 59 | 60 | x_datalists, y_datalists = make_test_data_list(args.x_test_data_path, args.y_test_data_path) #得到待测试的x域和y域图像路径名称列表 61 | test_x_image = tf.placeholder(tf.float32,shape=[1, 256, 256, 3], name = 'test_x_image') #输入的x域图像 62 | test_y_image = tf.placeholder(tf.float32,shape=[1, 256, 256, 3], name = 'test_y_image') #输入的y域图像 63 | 64 | fake_y = generator(image=test_x_image, reuse=False, name='generator_x2y') #得到生成的y域图像 65 | fake_x = generator(image=test_y_image, reuse=False, name='generator_y2x') #得到生成的x域图像 66 | 67 | restore_var = [v for v in tf.global_variables() if 'generator' in v.name] #需要载入的已训练的模型参数 68 | 69 | config = tf.ConfigProto() 70 | config.gpu_options.allow_growth = True #设定显存不超量使用 71 | sess = tf.Session(config=config) #建立会话层 72 | 73 | saver = tf.train.Saver(var_list=restore_var,max_to_keep=1) #导入模型参数时使用 74 | checkpoint = tf.train.latest_checkpoint(args.snapshots) #读取模型参数 75 | saver.restore(sess, checkpoint) #导入模型参数 76 | 77 | total_step = len(x_datalists) if len(x_datalists) > len(y_datalists) else len(y_datalists) #测试的总步数 78 | for step in range(total_step): 79 | test_ximage_name, test_ximage = TestImageReader(x_datalists, step, args.image_size) #得到x域的输入及名称 80 | test_yimage_name, test_yimage = TestImageReader(y_datalists, step, args.image_size) #得到y域的输入及名称 81 | batch_x_image = np.expand_dims(np.array(test_ximage).astype(np.float32), axis = 0) #填充维度 82 | batch_y_image = np.expand_dims(np.array(test_yimage).astype(np.float32), axis = 0) #填充维度 83 | feed_dict = { test_x_image : batch_x_image, test_y_image : batch_y_image} #建立feed_dict 84 | fake_y_value, fake_x_value = sess.run([fake_y, fake_x], feed_dict=feed_dict) #得到生成的y域图像与x域图像 85 | x_write_image, y_write_image = get_write_picture(test_ximage, test_yimage, fake_y_value, fake_x_value) #得到最终的图片结果 86 | x_write_image_name = args.out_dir_x + "/"+ test_ximage_name + ".png" #待保存的x域图像与其对应的y域生成结果名字 87 | y_write_image_name = args.out_dir_y + "/"+ test_yimage_name + ".png" #待保存的y域图像与其对应的x域生成结果名字 88 | cv2.imwrite(x_write_image_name, x_write_image) #保存图像 89 | cv2.imwrite(y_write_image_name, y_write_image) #保存图像 90 | print('step {:d}'.format(step)) 91 | 92 | if __name__ == '__main__': 93 | main() 94 | -------------------------------------------------------------------------------- /ssim_ms.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Thu Sep 20 21:22:55 2018 4 | 5 | @author: djj 6 | """ 7 | import cv2 8 | import tensorflow as tf 9 | import numpy as np 10 | 11 | def _tf_fspecial_gauss(size, sigma): 12 | """Function to mimic the 'fspecial' gaussian MATLAB function 13 | """ 14 | x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1] 15 | 16 | x_data = np.expand_dims(x_data, axis=-1) 17 | x_data = np.expand_dims(x_data, axis=-1) 18 | 19 | y_data = np.expand_dims(y_data, axis=-1) 20 | y_data = np.expand_dims(y_data, axis=-1) 21 | 22 | x = tf.constant(x_data, dtype=tf.float32) 23 | y = tf.constant(y_data, dtype=tf.float32) 24 | 25 | g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2))) 26 | return g / tf.reduce_sum(g) 27 | 28 | 29 | def tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=1.5): 30 | window = _tf_fspecial_gauss(size, sigma) # window shape [size, size] 31 | K1 = 0.01 32 | K2 = 0.03 33 | L = 1 # depth of image (255 in case the image has a differnt scale) 34 | C1 = (K1*L)**2 35 | C2 = (K2*L)**2 36 | mu1 = tf.nn.conv2d(img1, window, strides=[1,1,1,1], padding='VALID') 37 | mu2 = tf.nn.conv2d(img2, window, strides=[1,1,1,1],padding='VALID') 38 | mu1_sq = mu1*mu1 39 | mu2_sq = mu2*mu2 40 | mu1_mu2 = mu1*mu2 41 | sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1,1,1,1],padding='VALID') - mu1_sq 42 | sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1,1,1,1],padding='VALID') - mu2_sq 43 | sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1,1,1,1],padding='VALID') - mu1_mu2 44 | if cs_map: 45 | value = (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)* 46 | (sigma1_sq + sigma2_sq + C2)), 47 | (2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2)) 48 | else: 49 | # value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)* 50 | # (sigma1_sq + sigma2_sq + C2)) 51 | value=tf.div(tf.multiply(tf.add(tf.multiply(2,mu1_mu2),C1),tf.add(tf.multiply(2,sigma12),C2)),tf.multiply(tf.add(tf.add(mu1_sq,mu2_sq),C1),tf.add(tf.add(sigma1_sq,sigma2_sq),C2))) 52 | 53 | if mean_metric: 54 | value = tf.reduce_mean(value) 55 | return value 56 | 57 | 58 | def tf_ms_ssim(img1, img2, mean_metric=True, level=5): 59 | weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32) 60 | mssim = [] 61 | mcs = [] 62 | for l in range(level): 63 | ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False) 64 | mssim.append(tf.reduce_mean(ssim_map)) 65 | mcs.append(tf.reduce_mean(cs_map)) 66 | filtered_im1 = tf.nn.avg_pool(img1, [1,2,2,1], [1,2,2,1], padding='SAME') 67 | filtered_im2 = tf.nn.avg_pool(img2, [1,2,2,1], [1,2,2,1], padding='SAME') 68 | img1 = filtered_im1 69 | img2 = filtered_im2 70 | 71 | # list to tensor of dim D+1 72 | mssim = tf.stack(mssim, axis=0) 73 | mcs = tf.stack(mcs, axis=0) 74 | 75 | value = (tf.reduce_prod(mcs[0:level-1]**weight[0:level-1])* 76 | (mssim[level-1]**weight[level-1])) 77 | 78 | if mean_metric: 79 | value = tf.reduce_mean(value) 80 | return value 81 | 82 | x_img = tf.placeholder(tf.float32,shape=[1, 256, 256,1],name='x_img') #输入的x域图像,n*col*row*channel 83 | y_img = tf.placeholder(tf.float32,shape=[1, 256, 256,1],name='y_img') #输入的y域图像,n*col*row*channel 84 | 85 | tf_ssim_loss_ms = tf_ms_ssim(x_img, y_img, mean_metric=True, level=5) 86 | tf_ssim_loss = tf_ssim(x_img, y_img, cs_map=False, mean_metric=True, size=11, sigma=1.5) 87 | 88 | config = tf.ConfigProto() 89 | config.gpu_options.allow_growth = True #设定显存不超量使用 90 | sess = tf.Session(config=config) #新建会话层 91 | init = tf.global_variables_initializer() #参数初始化器 92 | sess.run(init) #初始化所有可训练参数 93 | 94 | 95 | 96 | 97 | img1_gray = np.zeros((1,256,256,1)) 98 | img2_gray = np.zeros((1,256,256,1)) 99 | 100 | img1 = np.float32(cv2.imread('//home//usrp1//djj_cycle_GAN//img//tongue_type_1//type_1_standard_msr//image_1.jpg')) 101 | img1 =cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) 102 | img1 = (cv2.resize(img1, (256, 256),interpolation=cv2.INTER_AREA)) #改变读取的x域图片的大小 103 | img1_gray[0,:,:,0] = img1 104 | #img1 =cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY) 105 | #img1 = (cv2.resize(img1, (256, 256),interpolation=cv2.INTER_AREA))/255 #改变读取的x域图片的大小 106 | #img1_gray[0,:,:,0] = img1 107 | #img1 = np.expand_dims(np.array(img1).astype(np.float32), axis = 0) #填充维度 108 | 109 | img2 = np.float32(cv2.imread('//home//usrp1//djj_cycle_GAN//img//tongue_type_1//type_1_standard_msr//image_1.jpg')) 110 | img2 =cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY) 111 | img2 = (cv2.resize(img2, (256, 256),interpolation=cv2.INTER_AREA)) #改变读取的x域图片的大小 112 | img2_gray[0,:,:,0] = img2 113 | 114 | #img2 = np.expand_dims(np.array(img2).astype(np.float32), axis = 0) #填充维度 115 | feed_dict={x_img:img1_gray,y_img:img2_gray} 116 | ## 117 | ssim_value_ms,ssim_loss = sess.run([tf_ssim_loss_ms,tf_ssim_loss],feed_dict = feed_dict) 118 | -------------------------------------------------------------------------------- /ssim: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Jul 30 12:47:57 2018 5 | 6 | @author: usrp1 7 | """ 8 | 9 | from random import shuffle 10 | import numpy as np 11 | import argparse 12 | import tensorflow as tf 13 | import random 14 | import cv2 15 | from numba import jit 16 | #import ssim_exp 17 | #from skimage.measure import _structural_similarity as ssim 18 | #import function.ssim_multiscale as ssim 19 | #import skimage.measure._structural_similarity 20 | 21 | #image_1为原图,image_2为生成图片 22 | #@jit 23 | #def ssim(image_1,image_2,c_1=0.02,c_2=0.03,patch=3.,expand=1):##expand和patch是联动的 24 | # 25 | # img_x_1 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_x_1' ) 26 | # img_x_2 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_x_2' ) 27 | # img_x_3 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_x_3' ) 28 | # img_y_1 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_y_1' ) 29 | # img_y_2 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_y_2' ) 30 | # img_y_3 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_y_3' ) 31 | # 32 | # img_x_1 = tf.assign(img_x_1[expand:-expand,expand:-expand], image_1[0,:,:,0], validate_shape=True, use_locking=None, name=None) 33 | # img_x_2 = tf.assign(img_x_2[expand:-expand,expand:-expand], image_1[0,:,:,1], validate_shape=True, use_locking=None, name=None) 34 | # img_x_3 = tf.assign(img_x_3[expand:-expand,expand:-expand], image_1[0,:,:,2], validate_shape=True, use_locking=None, name=None) 35 | # 36 | # img_y_1 = tf.assign(img_y_1[expand:-expand,expand:-expand], image_2[0,:,:,0], validate_shape=True, use_locking=None, name=None) 37 | # img_y_2 = tf.assign(img_y_2[expand:-expand,expand:-expand], image_2[0,:,:,1], validate_shape=True, use_locking=None, name=None) 38 | # img_y_3 = tf.assign(img_y_3[expand:-expand,expand:-expand], image_2[0,:,:,2], validate_shape=True, use_locking=None, name=None) 39 | # 40 | # ssim_loss_1 = tf.Variable( 0,name = 'ssim_loss_1' ) 41 | # for i in range(256): 42 | # for j in range(256): 43 | # u_x_1 = tf.reduce_mean(img_x_1[i:i+patch,j:j+patch]) 44 | # u_y_1 = tf.reduce_mean(img_y_1[i:i+patch,j:j+patch]) 45 | # v_x_1 = tf.reduce_mean((img_x_1[i:i+patch,j:j+patch]-u_x_1)**2) 46 | # v_y_1 = tf.reduce_mean((img_y_1[i:i+patch,j:j+patch]-u_y_1)**2) 47 | # c_v_1 = tf.reduce_mean( (img_y_1[i:i+patch,j:j+patch]-u_y_1)*(img_x_1[i:i+patch,j:j+patch]-u_x_1) ) 48 | # return img_x_1 49 | #image_1 = cv2.imread('E:\\transfer_learning\\image_sets\\type_1\\image_1.jpg') 50 | #image_2 = cv2.imread('E:\\transfer_learning\\image_sets\\type_1\\image_2.jpg') 51 | #index = ssim.compare_ssim(image_1,image_2,win_size = 13,multichannel = True,gaussian_weights=True) 52 | #djj_1 = np.ones((1,2000,1504,3)) 53 | #djj_2 = np.ones((1,2000,1504,3)) 54 | #djj_1[0,:] = image_1[:] 55 | #djj_2[0,:] = image_2[:] 56 | #index = ssim.compare_ssim(djj_1,djj_2,win_size = 13,multichannel = True,gaussian_weights=True) 57 | #x = np.array([[1,1],[2,2]]) 58 | #y = np.var(image_1) 59 | #z = np.mean(image_1) 60 | #w = np.cov(image_1[:,:,0],image_2[:,:,0]) 61 | 62 | ######################################################################################### 63 | patch=3 64 | expand=1 65 | image_1 = tf.placeholder(dtype=tf.float32,shape=[1,256, 256,1],name='x_img') #输入的x域图像 66 | image_2 = tf.placeholder(dtype=tf.float32,shape=[1,256, 256,1],name='y_img') #输入的y域图像 67 | #img_x_1 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_x_1' ) 68 | #img_x_2 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_x_2' ) 69 | #img_x_3 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_x_3' ) 70 | #img_y_1 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_y_1' ) 71 | #img_y_2 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_y_2' ) 72 | #img_y_3 =tf.Variable( tf.zeros((256+patch-1,256+patch-1)),name = 'img_y_3' ) 73 | # 74 | #img_x_1 = tf.assign(img_x_1[expand:-expand,expand:-expand], image_1[0,:,:,0], validate_shape=True, use_locking=None, name=None) 75 | #img_x_2 = tf.assign(img_x_2[expand:-expand,expand:-expand], image_1[0,:,:,1], validate_shape=True, use_locking=None, name=None) 76 | #img_x_3 = tf.assign(img_x_3[expand:-expand,expand:-expand], image_1[0,:,:,2], validate_shape=True, use_locking=None, name=None) 77 | # 78 | #img_y_1 = tf.assign(img_y_1[expand:-expand,expand:-expand], image_2[0,:,:,0], validate_shape=True, use_locking=None, name=None) 79 | #img_y_2 = tf.assign(img_y_2[expand:-expand,expand:-expand], image_2[0,:,:,1], validate_shape=True, use_locking=None, name=None) 80 | #img_y_3 = tf.assign(img_y_3[expand:-expand,expand:-expand], image_2[0,:,:,2], validate_shape=True, use_locking=None, name=None) 81 | #with tf.device("/gpu:0"): 82 | #ssim_loss_1 = tf.Variable( 0,name = 'ssim_loss_1' ) 83 | k=1 84 | gauss_filter = np.array([1,4,7,4,1,4,16,26,16,4,7,26,41,26,7,4,16,26,16,4,1,4,7,4,1])/273.0 85 | gauss_filter.reshape((5,5,1,1))##我先用的5*5的滤波器################################### 86 | gauss_filter = tf.constant(value =gauss_filter ,shape=[5, 5, 1, 1],dtype=tf.float32,name='gauss_filter') 87 | #gauss_filter=np.array([[1,4,7,4,1],[4,16,26,16,4],[7,26,41,26,7],[4,16,26,16,4],[1,4,7,4,1]])/273.0 88 | #gauss_filter = tf.constant(gauss_filter,name='gauss_filter',dtype=tf.float32) 89 | image_1_u = tf.nn.conv2d(image_1,gauss_filter, [1, 1, 1, 1], padding = 'SAME') 90 | image_1_u2 = tf.multiply(image_1_u,image_1_u) 91 | 92 | image_2_u = tf.nn.conv2d(image_2,gauss_filter, [1, 1, 1, 1], padding = 'SAME') 93 | image_2_u2 = tf.multiply(image_2_u,image_2_u) 94 | 95 | image_u1_u2 = tf.multiply(image_2_u,image_1_u) 96 | 97 | var_image_1 = tf.nn.conv2d(tf.multiply(image_1,image_1),gauss_filter, [1, 1, 1, 1], padding = 'SAME') -image_1_u2 98 | var_image_2 = tf.nn.conv2d(tf.multiply(image_2,image_2),gauss_filter, [1, 1, 1, 1], padding = 'SAME') -image_2_u2 99 | var_image_12 = tf.nn.conv2d(tf.multiply(image_1,image_2),gauss_filter, [1, 1, 1, 1], padding = 'SAME') -image_u1_u2 100 | #ssim_map = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2)); 101 | c_1 = 0.02 102 | c_2 = 0.03 103 | ssim_map = tf.multiply(tf.divide((2* image_u1_u2+c_1),(image_1_u2 +image_2_u2 + c_1 )),tf.divide((2*var_image_12+c_2),(var_image_1+var_image_2+c_2))) 104 | ssim_ch1=1-tf.reduce_mean(ssim_map) 105 | #for i in range(256): 106 | # for j in range(256): 107 | # u_x_1 = tf.reduce_mean(img_x_1[i:i+patch,j:j+patch]) 108 | # u_y_1 = tf.reduce_mean(img_y_1[i:i+patch,j:j+patch]) 109 | # v_x_1 = tf.reduce_mean((img_x_1[i:i+patch,j:j+patch]-u_x_1)**2) 110 | # v_y_1 = tf.reduce_mean((img_y_1[i:i+patch,j:j+patch]-u_y_1)**2) 111 | # c_v_1 = tf.reduce_mean( (img_y_1[i:i+patch,j:j+patch]-u_y_1)*(img_x_1[i:i+patch,j:j+patch]-u_x_1) ) 112 | # print(k) 113 | # k+=1 114 | 115 | 116 | ########################################################################################## 117 | #image_1 = tf.placeholder(dtype=tf.float32,shape=[256, 256,3],name='x_img') #输入的x域图像 118 | #image_2 = tf.placeholder(dtype=tf.float32,shape=[256, 256,3],name='y_img') #输入的y域图像 119 | ################image sizes are n*row*col*channel 120 | config = tf.ConfigProto() 121 | config.gpu_options.allow_growth = True #设定显存不超量使用 122 | sess = tf.Session(config=config) #新建会话层 123 | init = tf.global_variables_initializer() #参数初始化器 124 | sess.run(init) #初始化所有可训练参数 125 | 126 | patch=3 127 | a = np.ones((1,256,256,1)) 128 | b = np.zeros((1,256,256,1)) 129 | ha = sess.run(ssim_ch1,feed_dict={image_1:a,image_2:b }) -------------------------------------------------------------------------------- /net.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sun Jul 22 09:24:14 2018 4 | 5 | @author: djj 6 | """ 7 | 8 | import numpy as np 9 | import tensorflow as tf 10 | import math 11 | import random 12 | #构造可训练参数 13 | def make_var(name, shape, trainable = True): 14 | return tf.get_variable(name, shape, trainable = trainable) 15 | 16 | #定义卷积层 17 | def conv2d(input_, output_dim, kernel_size, stride, padding = "SAME", name = "conv2d", biased = True): 18 | input_dim = input_.get_shape()[-1] 19 | # with tf.device('/gpu:1'): 20 | with tf.variable_scope(name): 21 | kernel = make_var(name = 'weights', shape=[kernel_size, kernel_size, input_dim, output_dim]) 22 | output = tf.nn.conv2d(input_, kernel, [1, stride, stride, 1], padding = padding) 23 | if biased: 24 | biases = make_var(name = 'biases', shape = [output_dim]) 25 | output = tf.nn.bias_add(output, biases) 26 | return output 27 | 28 | #定义空洞卷积层 29 | def atrous_conv2d(input_, output_dim, kernel_size, dilation, padding = "SAME", name = "atrous_conv2d", biased = False): 30 | input_dim = input_.get_shape()[-1] 31 | with tf.variable_scope(name): 32 | kernel = make_var(name = 'weights', shape = [kernel_size, kernel_size, input_dim, output_dim]) 33 | output = tf.nn.atrous_conv2d(input_, kernel, dilation, padding = padding) 34 | if biased: 35 | biases = make_var(name = 'biases', shape = [output_dim]) 36 | output = tf.nn.bias_add(output, biases) 37 | return output 38 | 39 | #定义反卷积层 40 | def deconv2d(input_, output_dim, kernel_size, stride, padding = "SAME", name = "deconv2d"): 41 | input_dim = input_.get_shape()[-1] 42 | input_height = int(input_.get_shape()[1]) 43 | input_width = int(input_.get_shape()[2]) 44 | with tf.variable_scope(name): 45 | kernel = make_var(name = 'weights', shape = [kernel_size, kernel_size, output_dim, input_dim]) 46 | output = tf.nn.conv2d_transpose(input_, kernel, [1, input_height * 2, input_width * 2, output_dim], [1, 2, 2, 1], padding = "SAME") 47 | return output 48 | 49 | #定义batchnorm(批次归一化)层 50 | def batch_norm(input_, name="batch_norm"): 51 | with tf.variable_scope(name): 52 | input_dim = input_.get_shape()[-1] 53 | scale = tf.get_variable("scale", [input_dim], initializer=tf.random_normal_initializer(1.0, 0.02, dtype=tf.float32)) 54 | offset = tf.get_variable("offset", [input_dim], initializer=tf.constant_initializer(0.0)) 55 | mean, variance = tf.nn.moments(input_, axes=[1,2], keep_dims=True) 56 | epsilon = 1e-5 57 | inv = tf.rsqrt(variance + epsilon) 58 | normalized = (input_-mean)*inv 59 | output = scale*normalized + offset 60 | return output 61 | 62 | #定义最大池化层 63 | def max_pooling(input_, kernel_size, stride, name, padding = "SAME"): 64 | return tf.nn.max_pool(input_, ksize=[1, kernel_size, kernel_size, 1], strides=[1, stride, stride, 1], padding=padding, name=name) 65 | 66 | #定义平均池化层 67 | def avg_pooling(input_, kernel_size, stride, name, padding = "SAME"): 68 | return tf.nn.avg_pool(input_, ksize=[1, kernel_size, kernel_size, 1], strides=[1, stride, stride, 1], padding=padding, name=name) 69 | 70 | #定义lrelu激活层 71 | def lrelu(x, leak=0.2, name = "lrelu"): 72 | return tf.maximum(x, leak*x) 73 | 74 | #定义relu激活层 75 | def relu(input_, name = "relu"): 76 | return tf.nn.relu(input_, name = name) 77 | 78 | #定义残差块 79 | def residule_block_33(input_, output_dim, kernel_size = 3, stride = 1, dilation = 2, atrous = False, name = "res"): 80 | if atrous: 81 | conv2dc0 = atrous_conv2d(input_ = input_, output_dim = output_dim, kernel_size = kernel_size, dilation = dilation, name = (name + '_c0')) 82 | conv2dc0_norm = batch_norm(input_ = conv2dc0, name = (name + '_bn0')) 83 | conv2dc0_relu = relu(input_ = conv2dc0_norm) 84 | conv2dc1 = atrous_conv2d(input_ = conv2dc0_relu, output_dim = output_dim, kernel_size = kernel_size, dilation = dilation, name = (name + '_c1')) 85 | conv2dc1_norm = batch_norm(input_ = conv2dc1, name = (name + '_bn1')) 86 | else: 87 | conv2dc0 = conv2d(input_ = input_, output_dim = output_dim, kernel_size = kernel_size, stride = stride, name = (name + '_c0')) 88 | conv2dc0_norm = batch_norm(input_ = conv2dc0, name = (name + '_bn0')) 89 | conv2dc0_relu = relu(input_ = conv2dc0_norm) 90 | conv2dc1 = conv2d(input_ = conv2dc0_relu, output_dim = output_dim, kernel_size = kernel_size, stride = stride, name = (name + '_c1')) 91 | conv2dc1_norm = batch_norm(input_ = conv2dc1, name = (name + '_bn1')) 92 | add_raw = input_ + conv2dc1_norm 93 | output = relu(input_ = add_raw) 94 | return output 95 | 96 | #定义生成器 97 | def generator(image, gf_dim=64, reuse=False, name="generator"): 98 | #生成器输入尺度: 1*256*256*3 99 | input_dim = image.get_shape()[-1] 100 | # with tf.device('/gpu:1'): 101 | with tf.variable_scope(name): 102 | if reuse: 103 | tf.get_variable_scope().reuse_variables() 104 | else: 105 | assert tf.get_variable_scope().reuse is False 106 | #第1个卷积模块,输出尺度: 1*256*256*64 107 | c0 = relu(batch_norm(conv2d(input_ = image, output_dim = gf_dim, kernel_size = 7, stride = 1, name = 'g_e0_c'), name = 'g_e0_bn')) 108 | #第2个卷积模块,输出尺度: 1*128*128*128 109 | c1 = relu(batch_norm(conv2d(input_ = c0, output_dim = gf_dim * 2, kernel_size = 3, stride = 2, name = 'g_e1_c'), name = 'g_e1_bn')) 110 | #第3个卷积模块,输出尺度: 1*64*64*256 111 | c2 = relu(batch_norm(conv2d(input_ = c1, output_dim = gf_dim * 4, kernel_size = 3, stride = 2, name = 'g_e2_c'), name = 'g_e2_bn')) 112 | 113 | #9个残差块: 114 | r1 = residule_block_33(input_ = c2, output_dim = gf_dim*4, atrous = False, name='g_r1') 115 | r2 = residule_block_33(input_ = r1, output_dim = gf_dim*4, atrous = False, name='g_r2') 116 | r3 = residule_block_33(input_ = r2, output_dim = gf_dim*4, atrous = False, name='g_r3') 117 | r4 = residule_block_33(input_ = r3, output_dim = gf_dim*4, atrous = False, name='g_r4') 118 | r5 = residule_block_33(input_ = r4, output_dim = gf_dim*4, atrous = False, name='g_r5') 119 | r6 = residule_block_33(input_ = r5, output_dim = gf_dim*4, atrous = False, name='g_r6') 120 | r7 = residule_block_33(input_ = r6, output_dim = gf_dim*4, atrous = False, name='g_r7') 121 | r8 = residule_block_33(input_ = r7, output_dim = gf_dim*4, atrous = False, name='g_r8') 122 | r9 = residule_block_33(input_ = r8, output_dim = gf_dim*4, atrous = False, name='g_r9') 123 | #第9个残差块的输出尺度: 5*64*64*256 124 | 125 | #第1个反卷积模块,输出尺度: 1*128*128*128 126 | d1 = relu(batch_norm(deconv2d(input_ = r9, output_dim = gf_dim * 2, kernel_size = 3, stride = 2, name = 'g_d1_dc'),name = 'g_d1_bn')) 127 | #第2个反卷积模块,输出尺度: 1*256*256*64 128 | d2 = relu(batch_norm(deconv2d(input_ = d1, output_dim = gf_dim, kernel_size = 3, stride = 2, name = 'g_d2_dc'),name = 'g_d2_bn')) 129 | # d3 = relu(batch_norm(deconv2d(input_ = d2, output_dim = gf_dim, kernel_size = 3, stride = 2, name = 'g_d3_dc'),name = 'g_d3_bn')) 130 | # d4 = relu(batch_norm(deconv2d(input_ = d3, output_dim = gf_dim, kernel_size = 3, stride = 2, name = 'g_d4_dc'),name = 'g_d4_bn')) 131 | #最后一个卷积模块,输出尺度: 1*256*256*3 132 | d3 = conv2d(input_=d2, output_dim = input_dim, kernel_size = 7, stride = 1, name = 'g_d3_c') 133 | d4 = conv2d(input_=d3, output_dim = input_dim, kernel_size = 7, stride = 1, name = 'g_d4_c') 134 | d5 = conv2d(input_=d4, output_dim = input_dim, kernel_size = 7, stride = 1, name = 'g_d5_c') 135 | # d3 = conv2d(input_=d2, output_dim = input_dim, kernel_size = 7, stride = 1, name = 'g_d4_c') 136 | # d3 = conv2d(input_=d2, output_dim = input_dim, kernel_size = 7, stride = 1, name = 'g_d5_c') 137 | #经过tanh函数激活得到生成的输出 138 | output = tf.nn.tanh(d5) 139 | return output 140 | 141 | #定义判别器,buffer is used to store previous images which were generated in the certain domain. 142 | def discriminator(image, df_dim=64, reuse=False, name="discriminator",buffer_use=False,buffer = None): 143 | with tf.variable_scope(name): 144 | if reuse: 145 | tf.get_variable_scope().reuse_variables() 146 | else: 147 | assert tf.get_variable_scope().reuse is False 148 | 149 | if buffer_use==False: 150 | # with tf.device('/gpu:0'): 151 | image_crop_1 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_1') 152 | image_crop_2 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_2') 153 | image_crop_3 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_3') 154 | image_crop_4 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_4') 155 | image_crop_5 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_5') 156 | image_crop_6 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_6') 157 | image_crop_7 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_7') 158 | image_crop_8 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_8') 159 | image_crop_9 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_9') 160 | image_crop_10 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_10') 161 | image_crop_11 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_11') 162 | image_crop_12 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_12') 163 | image_crop_13 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_13') 164 | image_crop_14 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_14') 165 | image_crop_15 = tf.random_crop(image, [1, 30, 30, 3],name='image_crop_15') 166 | image_ensemble = tf.concat([image_crop_1,image_crop_2,image_crop_3,image_crop_4,image_crop_5,image_crop_6,image_crop_7,image_crop_8,image_crop_9,image_crop_10,image_crop_11,image_crop_12,image_crop_13,image_crop_14,image_crop_15],axis=0) 167 | #第1个卷积模块,输出尺度: 1*128*128*64 168 | h0 = lrelu(conv2d(input_ = image_ensemble, output_dim = df_dim, kernel_size = 4, stride = 2, name='d_h0_conv')) 169 | #第2个卷积模块,输出尺度: 1*64*64*128 170 | h1 = lrelu(batch_norm(conv2d(input_ = h0, output_dim = df_dim*2, kernel_size = 4, stride = 2, name='d_h1_conv'), 'd_bn1')) 171 | #第3个卷积模块,输出尺度: 1*32*32*256 172 | h2 = lrelu(batch_norm(conv2d(input_ = h1, output_dim = df_dim*4, kernel_size = 4, stride = 2, name='d_h2_conv'), 'd_bn2')) 173 | #第4个卷积模块,输出尺度: 1*32*32*512 174 | h3 = lrelu(batch_norm(conv2d(input_ = h2, output_dim = df_dim*8, kernel_size = 4, stride = 2, name='d_h3_conv'), 'd_bn3')) 175 | #最后一个卷积模块,输出尺度: 1*32*32*1 176 | output = conv2d(input_ = h3, output_dim = 1, kernel_size = 4, stride = 1, name='d_h4_conv') 177 | else: 178 | rand_seed = random.sample(range(15),13) 179 | for i in range(13): 180 | tf.assign(buffer[rand_seed[i],:,:,:],tf.random_crop(image, [1, 70, 70, 3])) 181 | 182 | # image_crop_1 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_1') 183 | # image_crop_2 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_2') 184 | # image_crop_3 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_3') 185 | # image_crop_4 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_4') 186 | # image_crop_5 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_5') 187 | # image_crop_6 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_6') 188 | # image_crop_7 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_7') 189 | # image_crop_8 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_8') 190 | # image_crop_9 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_9') 191 | # image_crop_10 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_10') 192 | # image_crop_11 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_11') 193 | # image_crop_12 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_12') 194 | # image_crop_13 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_13') 195 | # image_crop_14 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_14') 196 | # image_crop_15 = tf.random_crop(image, [1, 70, 70, 3],name='image_crop_15') 197 | # image_ensemble = tf.concat([image_crop_1,image_crop_2,image_crop_3,image_crop_4,image_crop_5,image_crop_6,image_crop_7,image_crop_8,image_crop_9,image_crop_10,image_crop_11,image_crop_12,image_crop_13,image_crop_14,image_crop_15],axis=0) 198 | ## random_seed =random.sample(range(0,20),20) 199 | # for i in range(1): 200 | # tf.assign(buffer[random_seed[i],:,:,:],tf.random_crop(image, [1, 70, 70, 3],seed=None)) 201 | # 第1个卷积模块,输出尺度: 1*128*128*64 202 | h0 = lrelu(conv2d(input_ = buffer, output_dim = df_dim, kernel_size = 4, stride = 2, name='d_h0_conv')) 203 | #第2个卷积模块,输出尺度: 1*64*64*128 204 | h1 = lrelu(batch_norm(conv2d(input_ = h0, output_dim = df_dim*2, kernel_size = 4, stride = 2, name='d_h1_conv'), 'd_bn1')) 205 | #第3个卷积模块,输出尺度: 1*32*32*256 206 | h2 = lrelu(batch_norm(conv2d(input_ = h1, output_dim = df_dim*4, kernel_size = 4, stride = 2, name='d_h2_conv'), 'd_bn2')) 207 | #第4个卷积模块,输出尺度: 1*32*32*512 208 | h3 = lrelu(batch_norm(conv2d(input_ = h2, output_dim = df_dim*8, kernel_size = 4, stride = 2, name='d_h3_conv'), 'd_bn3')) 209 | #最后一个卷积模块,输出尺度: 1*32*32*1 210 | output = conv2d(input_ = h3, output_dim = 1, kernel_size = 4, stride = 1, name='d_h4_conv') 211 | return output 212 | -------------------------------------------------------------------------------- /test: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Sun Dec 2 14:18:45 2018 5 | 6 | @author: usrp1 7 | """ 8 | 9 | from __future__ import print_function 10 | 11 | #from numba import jit 12 | import argparse 13 | from datetime import datetime 14 | from random import shuffle 15 | import random 16 | import os 17 | import sys 18 | import time 19 | import math 20 | import tensorflow as tf 21 | import numpy as np 22 | import glob 23 | import cv2 24 | from skimage.measure import _structural_similarity as ssim 25 | #import random 26 | #import TrainImageReader 27 | 28 | from train_image_reader import * 29 | from net import * 30 | 31 | parser = argparse.ArgumentParser(description='') 32 | 33 | parser.add_argument("--snapshot_dir", default='./snapshots', help="path of snapshots") #保存模型的路径 34 | parser.add_argument("--out_dir", default='./train_out', help="path of train outputs") #训练时保存可视化输出的路径 35 | parser.add_argument("--image_size", type=int, default=256, help="load image size") #网络输入的尺度 36 | parser.add_argument("--random_seed", type=int, default=1234, help="random seed") #随机数种子 37 | parser.add_argument('--base_lr', type=float, default=0.0002, help='initial learning rate for adam') #基础学习率 38 | parser.add_argument('--epoch', dest='epoch', type=int, default=4000, help='# of epoch') #训练的epoch数量 39 | parser.add_argument('--epoch_step', dest='epoch_step', type=int, default=4000, help='# of epoch to decay lr') #训练中保持学习率不变的epoch数量 40 | parser.add_argument("--lamda", type=float, default=10.0, help="L1 lamda") #训练中L1_Loss前的乘数 41 | parser.add_argument('--beta1', dest='beta1', type=float, default=0.5, help='momentum term of adam') #adam优化器的beta1参数 42 | parser.add_argument("--summary_pred_every", type=int, default=1000, help="times to summary.") #训练中每过多少step保存训练日志(记录一下loss值) 43 | parser.add_argument("--write_pred_every", type=int, default=100, help="times to write.") #训练中每过多少step保存可视化结果 44 | parser.add_argument("--save_pred_every", type=int, default=10000, help="times to save.") #训练中每过多少step保存模型(可训练参数) 45 | parser.add_argument("--x_train_data_path", default='//home//usrp1//djj_cycle_GAN//img//type_9//随机色温图片数据增强//', help="path of x training datas.") #x域的训练图片路径 46 | parser.add_argument("--y_train_data_path", default='//home//usrp1//djj_cycle_GAN//img//type_9//色温6500K数据增强//', help="path of y training datas.") #y域的训练图片路径 47 | args = parser.parse_args() 48 | 49 | #def save(saver, sess, logdir, step): #保存模型的save函数 50 | # model_name = 'model' #保存的模型名前缀 51 | # checkpoint_path = os.path.join(logdir, model_name) #模型的保存路径与名称 52 | # if not os.path.exists(logdir): #如果路径不存在即创建 53 | # os.makedirs(logdir) 54 | # saver.save(sess, checkpoint_path, global_step=step) #保存模型 55 | # print('The checkpoint has been created.') 56 | 57 | def cv_inv_proc(img): #cv_inv_proc函数将读取图片时归一化的图片还原成原图 58 | img_rgb = (img + 1.) * 127.5 59 | return img_rgb.astype(np.float32) #返回bgr格式的图像,方便cv2写图像 60 | 61 | def get_write_picture(x_image, y_image, fake_y, fake_x_, fake_x, fake_y_): #get_write_picture函数得到训练过程中的可视化结果 62 | x_image = cv_inv_proc(x_image) #还原x域的图像 63 | y_image = cv_inv_proc(y_image) #还原y域的图像 64 | x_image = x_image[0] 65 | y_image = y_image[0] 66 | fake_y = cv_inv_proc(fake_y[0]) #还原生成的y域的图像 67 | fake_x_ = cv_inv_proc(fake_x_[0]) #还原重建的x域的图像 68 | fake_x = cv_inv_proc(fake_x[0]) #还原生成的x域的图像 69 | fake_y_ = cv_inv_proc(fake_y_[0]) #还原重建的y域的图像 70 | row1 = np.concatenate((x_image, fake_y, fake_x_), axis=1) #得到训练中可视化结果的第一行 71 | row2 = np.concatenate((y_image, fake_x, fake_y_), axis=1) #得到训练中可视化结果的第二行 72 | output = np.concatenate((row1, row2), axis=0) #得到训练中可视化结果 73 | return output 74 | 75 | def make_train_data_list(x_data_path, y_data_path): #make_train_data_list函数得到训练中的x域和y域的图像路径名称列表 76 | x_input_images_raw = glob.glob(os.path.join(x_data_path, "*")) #读取全部的x域图像路径名称列表 77 | y_input_images_raw = glob.glob(os.path.join(y_data_path, "*")) #读取全部的y域图像路径名称列表 78 | x_input_images, y_input_images = add_train_list(x_input_images_raw, y_input_images_raw) #将x域图像数量与y域图像数量对齐 79 | return x_input_images, y_input_images 80 | 81 | def add_train_list(x_input_images_raw, y_input_images_raw): #add_train_list函数将x域和y域的图像数量变成一致 82 | if len(x_input_images_raw) == len(y_input_images_raw): #如果x域和y域图像数量本来就一致,直接返回 83 | return shuffle(x_input_images_raw), shuffle(y_input_images_raw) 84 | elif len(x_input_images_raw) > len(y_input_images_raw): #如果x域的训练图像数量大于y域的训练图像数量,则随机选择y域的图像补充y域 85 | mul_num = int(len(x_input_images_raw)/len(y_input_images_raw)) #计算两域图像数量相差的倍数 86 | y_append_num = len(x_input_images_raw) - len(y_input_images_raw)*mul_num #计算需要随机出的y域图像数量 87 | append_list = [random.randint(0,len(y_input_images_raw)-1) for i in range(y_append_num)] #得到需要补充的y域图像下标 88 | y_append_images = [] #初始化需要被补充的y域图像路径名称列表 89 | for a in append_list: 90 | y_append_images.append(y_input_images_raw[a]) 91 | y_input_images = y_input_images_raw * mul_num + y_append_images #得到数量与x域一致的y域图像 92 | shuffle(x_input_images_raw) #随机打乱x域图像顺序 93 | shuffle(y_input_images) #随机打乱y域图像顺序 94 | return x_input_images_raw, y_input_images #返回数量一致的x域和y域图像路径名称列表 95 | else: #与elif中的逻辑一致,只是x与y互换,不再赘述 96 | mul_num = int(len(y_input_images_raw)/len(x_input_images_raw)) 97 | x_append_num = len(y_input_images_raw) - len(x_input_images_raw)*mul_num 98 | append_list = [random.randint(0,len(x_input_images_raw)-1) for i in range(x_append_num)] 99 | x_append_images = [] 100 | for a in append_list: 101 | x_append_images.append(x_input_images_raw[a]) 102 | x_input_images = x_input_images_raw * mul_num + x_append_images 103 | shuffle(y_input_images_raw) 104 | shuffle(x_input_images) 105 | return x_input_images, y_input_images_raw 106 | #@jit 107 | def l1_loss(src, dst): #定义l1_loss 108 | return tf.reduce_mean(tf.abs(src - dst)) 109 | #@jit 110 | def gan_loss(src, dst): #定义gan_loss,在这里用了二范数 111 | return tf.reduce_mean((src-dst)**2) 112 | 113 | def var(name): 114 | return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) 115 | #@jit 116 | def ssim_tf(image_1,image_2,c_1 = 0.01,c_2 = 0.03): 117 | image_1=(image_1+1)*127.5 118 | image_2=(image_2+1)*127.5 119 | image_1 =tf.reshape(image_1,[1,256,256,1]) 120 | image_2 =tf.reshape(image_2,[1,256,256,1]) 121 | # gauss_filter=tf.constant(np.random.normal(loc=0,scale=1,size=(5,5,1,1)),name = 'gauss_filter',dtype=tf.float32) 122 | gauss_filter = np.array([1,4,7,4,1,4,16,26,16,4,7,26,41,26,7,4,16,26,16,4,1,4,7,4,1])/273.0 123 | gauss_filter.reshape((5,5,1,1))##我先用的5*5的滤波器################################### 124 | gauss_filter = tf.constant(value =gauss_filter ,shape=[5, 5, 1, 1],dtype=tf.float32,name='gauss_filter') 125 | #gauss_filter=np.array([[1,4,7,4,1],[4,16,26,16,4],[7,26,41,26,7],[4,16,26,16,4],[1,4,7,4,1]])/273.0 126 | #gauss_filter = tf.constant(gauss_filter,name='gauss_filter',dtype=tf.float32) 127 | image_1_u = tf.nn.conv2d(image_1,gauss_filter, [1, 1, 1, 1], padding = 'SAME') 128 | image_1_u2 = tf.multiply(image_1_u,image_1_u) 129 | 130 | image_2_u = tf.nn.conv2d(image_2,gauss_filter, [1, 1, 1, 1], padding = 'SAME') 131 | image_2_u2 = tf.multiply(image_2_u,image_2_u) 132 | 133 | image_u1_u2 = tf.multiply(image_2_u,image_1_u) 134 | 135 | var_image_1 = tf.nn.conv2d(tf.multiply(image_1,image_1),gauss_filter, [1, 1, 1, 1], padding = 'SAME') -image_1_u2 136 | var_image_2 = tf.nn.conv2d(tf.multiply(image_2,image_2),gauss_filter, [1, 1, 1, 1], padding = 'SAME') -image_2_u2 137 | var_image_12 = tf.nn.conv2d(tf.multiply(image_1,image_2),gauss_filter, [1, 1, 1, 1], padding = 'SAME') -image_u1_u2 138 | #ssim_map = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2)); 139 | c_1 = (0.01*255)*(0.01*255)### 140 | c_2 = (0.03*255)*(0.03*255)### 141 | ssim_map = tf.multiply(tf.divide((2* image_u1_u2+c_1),(image_1_u2 +image_2_u2 + c_1 )),tf.divide((2*var_image_12+c_2),(var_image_1+var_image_2+c_2))) 142 | ssim_ch=1-tf.reduce_mean(ssim_map) 143 | return ssim_ch 144 | 145 | 146 | 147 | def color_similar(img1,img2): 148 | img1 = tf.multiply(tf.add(img1,1.0),127.5) 149 | img2 = tf.multiply(tf.add(img2,1.0),127.5) 150 | 151 | img1_r=tf.floor(tf.multiply(tf.add(img1[0,:,:,0],0),255)) 152 | img1_g=tf.floor(tf.multiply(tf.add(img1[0,:,:,1],0),255)) 153 | img1_b=tf.floor(tf.multiply(tf.add(img1[0,:,:,2],0),255)) 154 | 155 | img2_r=tf.floor(tf.multiply(tf.add(img2[0,:,:,0],0),255)) 156 | img2_g=tf.floor(tf.multiply(tf.add(img2[0,:,:,1],0),255)) 157 | img2_b=tf.floor(tf.multiply(tf.add(img2[0,:,:,2],0),255)) 158 | 159 | hist_1_r = tf.histogram_fixed_width(values=img1_r,nbins=256,value_range=[0.0,255.0],name='hist_1_r',dtype=tf.float32) 160 | hist_1_g = tf.histogram_fixed_width(values=img1_g,nbins=256,value_range=[0.0,255.0],name='hist_1_g',dtype=tf.float32) 161 | hist_1_b = tf.histogram_fixed_width(values=img1_b,nbins=256,value_range=[0.0,255.0],name='hist_1_b',dtype=tf.float32) 162 | 163 | hist_2_r = tf.histogram_fixed_width(values=img2_r,nbins=256,value_range=[0.0,255.0],name='hist_2_r',dtype=tf.float32) 164 | hist_2_g = tf.histogram_fixed_width(values=img2_g,nbins=256,value_range=[0.0,255.0],name='hist_2_g',dtype=tf.float32) 165 | hist_2_b = tf.histogram_fixed_width(values=img2_b,nbins=256,value_range=[0.0,255.0],name='hist_2_b',dtype=tf.float32) 166 | 167 | # print(hist_1) 168 | hist_1_r_norm = tf.multiply(hist_1_r,1.0/65536,name='hist_1_r_norm') 169 | hist_1_g_norm = tf.multiply(hist_1_g,1.0/65536,name='hist_1_g_norm') 170 | hist_1_b_norm = tf.multiply(hist_1_b,1.0/65536,name='hist_1_b_norm') 171 | 172 | hist_2_r_norm = tf.multiply(hist_2_r,1.0/65536,name='hist_2_r_norm') 173 | hist_2_g_norm = tf.multiply(hist_2_g,1.0/65536,name='hist_2_g_norm') 174 | hist_2_b_norm = tf.multiply(hist_2_b,1.0/65536,name='hist_2_b_norm') 175 | 176 | 177 | 178 | index_r = tf.reduce_sum(tf.minimum(hist_1_r_norm,hist_2_r_norm)) 179 | index_g = tf.reduce_sum(tf.minimum(hist_1_g_norm,hist_2_g_norm)) 180 | index_b = tf.reduce_sum(tf.minimum(hist_1_b_norm,hist_2_b_norm)) 181 | index = (index_r+index_g+index_b)/3.0 182 | # k=1 183 | # for i in range(256): 184 | # for j in range(256): 185 | # tf.add(hist_1[0,img1[i,j]],1) 186 | # tf.add(hist_2[0,img2[i,j]],1) 187 | # print(k) 188 | # k=k+1 189 | # 190 | return 1-index 191 | 192 | 193 | ################################################################################################ 194 | ################################################################################################ 195 | #@jit 196 | def tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=5, sigma=1.5): 197 | gauss_filter = np.array([1,4,7,4,1,4,16,26,16,4,7,26,41,26,7,4,16,26,16,4,1,4,7,4,1])/273.0 198 | gauss_filter.reshape((5,5,1,1))##我先用的5*5的滤波器################################### 199 | window = tf.constant(value =gauss_filter ,shape=[5, 5, 1, 1],dtype=tf.float32,name='window') 200 | # window = _tf_fspecial_gauss(size, sigma) # window shape [size, size] 201 | K1 = 0.01 202 | K2 = 0.03 203 | L = 255 # depth of image (255 in case the image has a differnt scale) 204 | C1 = (K1*L)**2 205 | C2 = (K2*L)**2 206 | mu1 = tf.nn.conv2d(img1, window, strides=[1,1,1,1], padding='SAME') 207 | mu2 = tf.nn.conv2d(img2, window, strides=[1,1,1,1],padding='SAME') 208 | mu1_sq = mu1*mu1 209 | mu2_sq = mu2*mu2 210 | mu1_mu2 = mu1*mu2 211 | sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1,1,1,1],padding='SAME') - mu1_sq 212 | sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1,1,1,1],padding='SAME') - mu2_sq 213 | sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1,1,1,1],padding='SAME') - mu1_mu2 214 | if cs_map: 215 | value = (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)* 216 | (sigma1_sq + sigma2_sq + C2)), 217 | (2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2)) ###### SSIM_map,C(X,Y) 218 | else: 219 | value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)* 220 | (sigma1_sq + sigma2_sq + C2)) 221 | 222 | if mean_metric: 223 | value = tf.reduce_mean(value) 224 | return value 225 | 226 | #@jit 227 | def tf_ms_ssim(img1, img2, mean_metric=True, level=5): 228 | img1 =tf.reshape(img1,[1,256,256,1]) 229 | img2 =tf.reshape(img2,[1,256,256,1]) 230 | img1=(img1+1)*127.5 231 | img2=(img2+1)*127.5 232 | weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32) 233 | mssim = [] 234 | mcs = [] 235 | for l in range(level): 236 | ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False) 237 | mssim.append(tf.reduce_mean(ssim_map)) 238 | mcs.append(tf.reduce_mean(cs_map)) 239 | filtered_im1 = tf.nn.avg_pool(img1, [1,2,2,1], [1,2,2,1], padding='SAME') 240 | filtered_im2 = tf.nn.avg_pool(img2, [1,2,2,1], [1,2,2,1], padding='SAME') 241 | img1 = filtered_im1 242 | img2 = filtered_im2 243 | 244 | # list to tensor of dim D+1 245 | mssim = tf.stack(mssim, axis=0) 246 | mcs = tf.stack(mcs, axis=0) 247 | 248 | # c=mcs[0:level-1]**weight[0:level-1] 249 | # print(c) 250 | # b=tf.reduce_prod(mcs[0:level-1]**weight[0:level-1]) 251 | value = (tf.reduce_prod(mcs[0:level-1]**weight[0:level-1])* 252 | (mssim[0:level]**weight[0:level])) 253 | 254 | if mean_metric: 255 | value = 1-tf.reduce_mean(value) 256 | return value 257 | ################################################################################################ 258 | ################################################################################################ 259 | #@jit 260 | def hue_similar(img1,img2): 261 | img1 = tf.multiply(tf.add(img1,1.0),127.5) 262 | img2 = tf.multiply(tf.add(img2,1.0),127.5) 263 | 264 | img1_hsv = tf.image.rgb_to_hsv(img1); 265 | img1_h=tf.floor(tf.multiply(tf.add(img1_hsv[:,:,0],0),255)) 266 | img1_s=tf.floor(tf.multiply(tf.add(img1_hsv[:,:,1],0),255.0)) 267 | img1_v=tf.floor(tf.multiply(tf.add(img1_hsv[:,:,2],0),255.0)) 268 | 269 | hist_1_h = tf.histogram_fixed_width(values=img1_h,nbins=256,value_range=[0.0,255.0],name='hist_1_h',dtype=tf.int32) 270 | hist_1_h_norm = tf.multiply(tf.cast(hist_1_h,dtype=tf.float32),1.0/65536,name='hist_1_h_norm') 271 | 272 | hist_1_s = tf.histogram_fixed_width(values=img1_s,nbins=256,value_range=[0.0,255.0],name='hist_1_s',dtype=tf.int32) 273 | hist_1_s_norm = tf.multiply(tf.cast(hist_1_s,dtype=tf.float32),1.0/65536,name='hist_1_s_norm') 274 | 275 | hist_1_v = tf.histogram_fixed_width(values=img1_v,nbins=256,value_range=[0.0,255.0],name='hist_1_v',dtype=tf.int32) 276 | hist_1_v_norm = tf.multiply(tf.cast(hist_1_v,dtype=tf.float32),1.0/65536,name='hist_1_v_norm') 277 | 278 | img2_hsv = tf.image.rgb_to_hsv(img2); 279 | img2_h=tf.floor(tf.multiply(tf.add(img2_hsv[:,:,0],0),255)) 280 | img2_s=tf.floor(tf.multiply(tf.add(img2_hsv[:,:,1],0),255)) 281 | img2_v=tf.floor(tf.multiply(tf.add(img2_hsv[:,:,2],0),255)) 282 | 283 | hist_2_h = tf.histogram_fixed_width(values=img2_h,nbins=256,value_range=[0.0,255.0],name='hist_2_h',dtype=tf.int32) 284 | hist_2_h_norm = tf.multiply(tf.cast(hist_2_h,dtype=tf.float32),1.0/65536,name='hist_2_h_norm') 285 | 286 | hist_2_s = tf.histogram_fixed_width(values=img2_s,nbins=256,value_range=[0.0,255.0],name='hist_2_s',dtype=tf.int32) 287 | hist_2_s_norm = tf.multiply(tf.cast(hist_2_s,dtype=tf.float32),1.0/65536,name='hist_2_s_norm') 288 | 289 | hist_2_v = tf.histogram_fixed_width(values=img2_v,nbins=256,value_range=[0.0,255.0],name='hist_2_v',dtype=tf.int32) 290 | hist_2_v_norm = tf.multiply(tf.cast(hist_2_v,dtype=tf.float32),1.0/65536,name='hist_2_v_norm') 291 | 292 | index_h =1- tf.reduce_sum(tf.minimum(hist_1_h_norm,hist_2_h_norm)) 293 | index_s =1- tf.reduce_sum(tf.minimum(hist_1_s_norm,hist_2_s_norm)) 294 | index_v =1- tf.reduce_sum(tf.minimum(hist_1_v_norm,hist_2_v_norm)) 295 | 296 | return (0.3*index_h+0.3*index_s+0.3*index_v) 297 | #@jit 298 | def color_index_adjust(index):###系数范围在0-1,放大10倍进行加权 299 | # return tf.cond(index < tf.constant(0.25),lambda:0.0,lambda:tf.div(10.0,tf.add(tf.exp(tf.add(tf.multiply(index,-10.0),5.0)),1.5))+0.3) 300 | # return loss 301 | 302 | # if (tf.less_equal(index,0.25)): 303 | # return 0 304 | # else: 305 | # return tf.div(10.0,tf.add(tf.exp(tf.add(tf.multiply(index,-10.0),5.0)),1.33)) 306 | return tf.div(10.0,tf.add(tf.exp(tf.add(tf.multiply(index,-10.0),5.0)),1.0))+1 307 | #@jit 308 | def ssim_index_adjust(index): 309 | 310 | return tf.div(10.0,tf.add(tf.exp(tf.add(tf.multiply(index,-10.0),5.0)),1.5)) 311 | # return tf.multiply(index,10.0) 312 | 313 | def main(): 314 | # if not os.path.exists(args.snapshot_dir): #如果保存模型参数的文件夹不存在则创建 315 | # os.makedirs(args.snapshot_dir) 316 | # if not os.path.exists(args.out_dir): #如果保存训练中可视化输出的文件夹不存在则创建 317 | # os.makedirs(args.out_dir) 318 | x_datalists, y_datalists = make_train_data_list(args.x_train_data_path, args.y_train_data_path) #得到数量相同的x域和y域图像路径名称列表 319 | tf.set_random_seed(args.random_seed) #初始一下随机数 320 | x_img = tf.placeholder(tf.float32,shape=[1, args.image_size, args.image_size,3],name='x_img') #输入的x域图像 321 | y_img = tf.placeholder(tf.float32,shape=[1, args.image_size, args.image_size,3],name='y_img') #输入的y域图像 322 | 323 | 324 | 325 | buffer_Dx = tf.Variable(tf.truncated_normal([15,70,70,3]),name = 'buffuer_Dx') 326 | buffer_Dy = tf.Variable(tf.truncated_normal([15,70,70,3]),name = 'buffuer_Dy') 327 | ################image sizes are n*row*col*channel 328 | fake_y = generator(image=x_img, reuse=False, name='generator_x2y') #生成的y域图像 329 | fake_x_ = generator(image=fake_y, reuse=False, name='generator_y2x') #重建的x域图像 330 | fake_x = generator(image=y_img, reuse=True, name='generator_y2x') #生成的x域图像 331 | fake_y_ = generator(image=fake_x, reuse=True, name='generator_x2y') #重建的y域图像 332 | 333 | # 334 | # ssim_loss = (ssim_loss_x+ssim_loss_y)/2 335 | # ssim_loss=2*(ssim_loss_1_x+ssim_loss_2_x+ssim_loss_3_x+ssim_loss_1_y+ssim_loss_2_y+ssim_loss_3_y) 336 | 337 | # store_x = tf.placeholder(tf.float32,shape=[10, 70, 70,3],name='store_image_x') #输入的x域图像 338 | # store_y = tf.placeholder(tf.float32,shape=[10, 70, 70,3],name='store_image_y') #输入的x域图像 339 | dy_fake = discriminator(image=fake_y, reuse=False, name='discriminator_y',buffer_use=False,buffer = buffer_Dy) #判别器返回的对生成的y域图像的判别结果 340 | dx_fake = discriminator(image=fake_x, reuse=False, name='discriminator_x',buffer_use=False,buffer = buffer_Dx) #判别器返回的对生成的x域图像的判别结果 341 | dy_real = discriminator(image=y_img, reuse=True, name='discriminator_y',buffer_use=False) #判别器返回的对真实的y域图像的判别结果 342 | dx_real = discriminator(image=x_img, reuse=True, name='discriminator_x',buffer_use=False) #判别器返回的对真实的x域图像的判别结果 343 | 344 | lr = tf.placeholder(tf.float32, None, name='learning_rate') #训练中的学习率 345 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 346 | # os.environ["CUDA_VISIBLE_ DEVICES"] = "0" 347 | config = tf.ConfigProto() 348 | config.gpu_options.allow_growth = True #设定显存不超量使用 349 | sess = tf.Session(config=config) #新建会话层 350 | init = tf.global_variables_initializer() #参数初始化器 351 | # 352 | sess.run(init) #初始化所有可训练参数 353 | # check = os.listdir('/home/usrp1/djj_cycle_GAN/CycleGAN_color_correction/snapshots/') 354 | # saver = tf.train.import_meta_graph('/home/usrp1/djj_cycle_GAN/CycleGAN_color_correction/snapshots/cycle_model-159.meta') 355 | saver=tf.train.Saver() 356 | saver.restore(sess, '/home/usrp1/djj_cycle_GAN/CycleGAN_color_correction/snapshots/cycle_model-159') 357 | lrate =0.0001 358 | # for step in range(len(x_datalists)): #每个训练epoch中的训练step数 359 | # counter += 1 360 | x_out_image = np.ones((1,256,256,3)) 361 | y_out_image = np.ones((1,256,256,3)) 362 | x_image = np.float32(cv2.imread('/home/usrp1/djj_cycle_GAN/img/type_9/test/IMG_0605.JPG')) #读取一张x域的图片 363 | y_image = np.float32(cv2.imread('/home/usrp1/djj_cycle_GAN/img/type_9/test/IMG_0605.JPG')) #读取一张y域的图片 364 | x_image_resize_t = cv2.resize(x_image, (256, 256),interpolation=cv2.INTER_AREA) #改变读取的x域图片的大小 365 | x_image_resize = x_image_resize_t/127.5-1. #归一化x域的图片 366 | y_image_resize_t = cv2.resize(y_image, (256, 256),interpolation=cv2.INTER_AREA) #改变读取的y域图片的大小 367 | y_image_resize = y_image_resize_t/127.5-1. #归一化y域的图片 368 | x_out_image[0,:,:,:]=x_image_resize 369 | y_out_image[0,:,:,:]=y_image_resize 370 | # x_image_resize, y_image_resize = TrainImageReader(x_datalists, y_datalists, step, args.image_size) #读取x域图像和y域图像 371 | # batch_x_image = np.expand_dims(np.array(x_image_resize).astype(np.float32), axis = 0) #填充维度 372 | # batch_y_image = np.expand_dims(np.array(y_image_resize).astype(np.float32), axis = 0) #填充维度 373 | feed_dict = { lr : lrate, x_img : x_out_image, y_img : y_out_image} #得到feed_dict 374 | fake_y_value, fake_x__value, fake_x_value, fake_y__value = sess.run([fake_y, fake_x_, fake_x, fake_y_], feed_dict=feed_dict) #run出网络输出 375 | # np.save('//home//usrp1//djj_cycle_GAN//CycleGAN_color_correction//train_out_num//'+'out'+str(counter)+'.npy',fake_y_value) 376 | # np.expand_dims(np.array(x_image_resize[0,:]).astype(np.float32), axis = 0) 377 | write_image = get_write_picture(x_out_image, y_out_image, fake_y_value, fake_x__value, fake_x_value, fake_y__value) #得到训练的可视化结果 378 | # write_image_name = args.out_dir + "/out"+ str(counter) + ".png" #待保存的训练可视化结果路径与名称 379 | cv2.imwrite('/home/usrp1/djj_cycle_GAN/CycleGAN_color_correction/test_img/test.png', write_image) #保存训练的可视化结果 380 | 381 | if __name__ == '__main__': 382 | main() --------------------------------------------------------------------------------