├── Model ├── __init__.py ├── MLP.py └── helpingFunctions_v2.py ├── PACS ├── __init__.py ├── datagenerator.py ├── alex_cnn_top_adv.py ├── alex_cnn_top.py └── alex_cnn_baseline.py ├── MNIST_R ├── __init__.py ├── calculateResults.py ├── cnn_v2_baseline.py ├── mnist_r.py ├── rotate.sh └── cnn_v2.py ├── office ├── __init__.py ├── organizeImages.py ├── visualize.py ├── mlp.sh ├── MLP_baseline.py └── MLP.py ├── sentiment ├── __init__.py ├── cnn_baseline.py ├── cnn_mlp.py ├── cnn_v2.py └── visualize.py ├── GLCM_Testing ├── __init__.py ├── PatternClassifier.py ├── nglcm.py └── cnn_glcm.py ├── MNIST_Pattern ├── __init__.py ├── organizeDFTimages.py ├── mnist.sh ├── cnn_v2_baseline.py ├── visualize.py ├── cnn_v2_mlp.py └── cnn_v2_dft.py ├── dataGeneration ├── __init__.py ├── dataInspection.py ├── add_background_256.py ├── add_pattern.py ├── addBackground.py ├── mnist_loader.py ├── dataGeneration.py └── addBackground_diffpos_28.py ├── simulations ├── __init__.py ├── helpingMethods.py ├── Lasso.py ├── script.py └── HEX_linear.py ├── script ├── dog.jpg └── imageProcessing.py ├── .gitignore └── README.md /Model/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | -------------------------------------------------------------------------------- /PACS/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | -------------------------------------------------------------------------------- /MNIST_R/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | -------------------------------------------------------------------------------- /office/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | -------------------------------------------------------------------------------- /sentiment/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | -------------------------------------------------------------------------------- /GLCM_Testing/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | -------------------------------------------------------------------------------- /MNIST_Pattern/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | -------------------------------------------------------------------------------- /dataGeneration/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | -------------------------------------------------------------------------------- /simulations/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | -------------------------------------------------------------------------------- /script/dog.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HaohanWang/HEX_experiments/HEAD/script/dog.jpg -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/* 2 | *.pyc 3 | *.npy 4 | data/* 5 | *.txt 6 | *.png 7 | *status* 8 | *.log 9 | *ckpts* 10 | images/* 11 | test/* 12 | results/* 13 | *.pdf 14 | *.jpg 15 | 16 | 17 | -------------------------------------------------------------------------------- /MNIST_R/calculateResults.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | 5 | def checkScore(cat): 6 | scores = [] 7 | for i in range(10): 8 | text = [line.strip() for line in open('../results/MNIST_R/hex_'+str(cat)+'_'+str(i)+'.txt')][-1] 9 | scores.append(float(text.split()[-1])) 10 | return np.mean(scores) 11 | 12 | if __name__ == '__main__': 13 | for i in range(6): 14 | print checkScore(i) -------------------------------------------------------------------------------- /dataGeneration/dataInspection.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | from dataLoader import * 4 | 5 | seed = 0 6 | corr = 0.8 7 | 8 | # n = 500 9 | # p = 1000 10 | # group = 2 11 | # 12 | # Xtrain, Ytrain, Xval, Yval, Xtest, Ytest = loadData(seed, n, p, corr, group) 13 | # 14 | # from matplotlib import pyplot as plt 15 | # 16 | # plt.scatter(xrange(n), Ytrain[:,0]) 17 | # # plt.scatter(xrange(n), Ytest[:,0]+0.1) 18 | # plt.ylim(-0.1, 1.2) 19 | # plt.show() 20 | # 21 | # X = np.append(Xtrain, Xval, 0) 22 | # X = np.append(X, Xtest, 0) 23 | # plt.imshow(X) 24 | # plt.show() 25 | # 26 | # d = np.dot(Xtrain, Xtrain.T) 27 | # np.matrix.sort(d, 0) 28 | # np.matrix.sort(d, 1) 29 | # 30 | # plt.imshow(d) 31 | # plt.show() 32 | 33 | r = np.load('../Model/results_useful.npy') 34 | print r -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Learning Robust Representations by Projecting Superficial Statistics Out 2 | 3 | This repository is for the codes that are used to generate main experiment results in the paper [Learning Robust Representations by Projecting Superficial Statistics Out](https://openreview.net/pdf?id=rJEjjoR9K7). 4 | 5 | Note that this repository holds the codes that can replicate the experiments. The codes may look messy (because we keep it the same as when we submit the paper) and uninformative for the usage of the main contribution of the paper. 6 | 7 | **Update**: Please see [here](https://github.com/HaohanWang/HEX_experiments/issues/1) for instructions to replicate the PACS results. 8 | 9 | ### Clear Demonstration 10 | For a better demonstration of the usage of the main idea of the paper, please visit [HaohanWang/HEX](https://github.com/HaohanWang/HEX) 11 | 12 | ### Contact 13 | - [Haohan Wang](http://www.cs.cmu.edu/~haohanw/) 14 | - [@HaohanWang](https://twitter.com/HaohanWang) 15 | -------------------------------------------------------------------------------- /script/imageProcessing.py: -------------------------------------------------------------------------------- 1 | # -*- encoding=utf-8 -*- 2 | import cv2 3 | import numpy as np 4 | import math 5 | 6 | def run(ngray=16): 7 | img = cv2.imread('dog.jpg', cv2.IMREAD_UNCHANGED) 8 | img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_CUBIC) 9 | 10 | print ("deal date with ngray=%d..." % (ngray)) 11 | row = 0 12 | column = 1 13 | direction=np.diag((-1)*np.ones(256*256)) 14 | for i in range(256*256): 15 | x=int(math.floor(i/256)) 16 | y=int(i%256) 17 | if x+row<256 and y+column<256: 18 | direction[i][i+row*256+column]=1 19 | 20 | imgray=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY) 21 | 22 | re = np.copy(imgray).reshape(256*256) 23 | d = np.copy(imgray).reshape(256*256) 24 | 25 | re = np.asarray(1.0 * re * (ngray-1) / re.max(), dtype=np.int16) 26 | d =np.dot(re,direction) 27 | 28 | re = re.reshape([256, 256]) 29 | d = d.reshape([256, 256]) 30 | 31 | re = 256*re/float(re.max()) 32 | d = 256*d/float(d.max()) 33 | 34 | 35 | cv2.imwrite('x.jpg', img) 36 | cv2.imwrite('re.jpg', re) 37 | cv2.imwrite('d.jpg', d) 38 | 39 | if __name__ == '__main__': 40 | run() 41 | -------------------------------------------------------------------------------- /office/organizeImages.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import cv2 4 | import numpy as np 5 | 6 | def loadImages(cn): 7 | results = [] 8 | for dn in ['amazon', 'dslr', 'webcam']: 9 | r = [] 10 | for i in range(1, 10): 11 | filename = '../data/office/Original_images/'+dn+'/images/'+cn+'/frame_000'+str(i) + '.jpg' 12 | print filename 13 | img = cv2.imread(filename, cv2.IMREAD_UNCHANGED) 14 | img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_CUBIC) 15 | r.append(img) 16 | results.append(r) 17 | return results 18 | 19 | def writeMergeImages(cn): 20 | Images = np.zeros([260*3, 260*9, 3]) 21 | results = loadImages(cn) 22 | for i in range(len(results)): 23 | for j in range(len(results[i])): 24 | Images[1+i*260:1+i*260+256, 1+j*260:1+j*260+256,:] = results[i][j] 25 | 26 | cv2.imwrite('resultImages/'+cn+'.jpg', Images) 27 | 28 | 29 | 30 | if __name__ == '__main__': 31 | dic = {'calculator': 5, 'ring_binder': 27, 'printer': 12, 32 | 'keyboard': 30, 'scissors': 26, 'laptop_computer': 7, 33 | 'mouse': 18, 'monitor': 3, 'mug': 24, 34 | 'tape_dispenser': 17, 'pen': 19, 'bike': 10, 35 | 'speaker': 8, 'back_pack': 2, 'desktop_computer': 22, 36 | 'punchers': 15, 'mobile_phone': 0, 'paper_notebook': 1, 37 | 'ruler': 23, 'letter_tray': 9, 'file_cabinet': 16, 38 | 'phone': 25, 'bookcase': 20, 'projector': 4, 39 | 'stapler': 13, 'trash_can': 11, 'bike_helmet': 28, 40 | 'headphones': 14, 'desk_lamp': 6, 'desk_chair': 21, 41 | 'bottle': 29} 42 | for cn in dic: 43 | try: 44 | writeMergeImages(cn) 45 | except: 46 | pass -------------------------------------------------------------------------------- /GLCM_Testing/PatternClassifier.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | 5 | from sklearn.svm import SVC 6 | from sklearn.linear_model import LogisticRegression 7 | from sklearn.naive_bayes import GaussianNB 8 | from sklearn.neighbors import KNeighborsClassifier 9 | from sklearn.model_selection import cross_val_score 10 | 11 | from dataLoader import loadDigitClassification 12 | 13 | from matplotlib import pyplot as plt 14 | 15 | plt.style.use('bmh') 16 | 17 | def testScores(cat): 18 | pl = [] 19 | ll = [] 20 | for i in range(100): 21 | X = np.load('results/representations_'+cat+'_'+str(i)+'.npy') 22 | p = np.load('results/patterns_'+cat+'_'+str(i)+'.npy') 23 | l = np.load('results/labels_'+cat+'_'+str(i)+'.npy') 24 | 25 | print i 26 | 27 | nb = GaussianNB() 28 | ps = cross_val_score(nb, X, p, cv=3) 29 | ls = cross_val_score(nb, X, l, cv=3) 30 | 31 | pl.append(np.mean(ps)) 32 | ll.append(np.mean(ls)) 33 | 34 | return np.array(pl), np.array(ll) 35 | 36 | # def plot(): 37 | # pl, ll = testScores() 38 | # 39 | # # np.save('pattern_nglcm', pl) 40 | # # np.save('label_nglcm', ll) 41 | # 42 | # plt.plot(pl, color='r') 43 | # plt.plot(ll, color='b') 44 | # plt.show() 45 | 46 | def calculateScores(): 47 | methods = ['mlp', 'nglcm', 'mlp_2', 'nglcm_2'] 48 | colors = ['b', 'r', 'c', 'm'] 49 | 50 | for i in range(len(methods)): 51 | print i 52 | pl, ll = testScores(methods[i]) 53 | 54 | # print m, np.mean(pl), np.std(pl), np.mean(ll), np.std(ll) 55 | plt.plot(pl, ls='-', color=colors[i]) 56 | plt.plot(ll, ls='-.', color=colors[i]) 57 | 58 | plt.show() 59 | 60 | 61 | if __name__ == '__main__': 62 | # plot() 63 | calculateScores() -------------------------------------------------------------------------------- /dataGeneration/add_background_256.py: -------------------------------------------------------------------------------- 1 | # -*- encoding=utf-8 -*- 2 | import cv2 3 | # Standard library 4 | import cPickle 5 | import time 6 | import gzip 7 | import zipfile 8 | import os 9 | from PIL import Image 10 | 11 | # Third-party libraries 12 | import numpy as np 13 | 14 | total = 0 15 | background_path = '/Users/hzxue/Desktop/CMU/project/artificial-pattern/data/new/' 16 | face_path = '/media/haohanwang/Info/SentimentImages/original/' 17 | save_path = '/Users/hzxue/Desktop/CMU/project/artificial-pattern/data/FERG_DB_256_save/' 18 | dic_background = {'anger': 'drawn.jpg', 'disgust': 'festival.jpg', 'fear': 'firework.jpg', 'joy': 'forest.jpg', 19 | 'neutral': 'scare.jpg', 'sadness': 'train.jpg', 'surprise': 'winter.jpg'} 20 | 21 | 22 | def add_image(facepath, backgroundpath): 23 | files = facepath.split('/') 24 | savepath = os.path.join(save_path, files[-3]) 25 | if os.path.exists(savepath) is not True: 26 | os.makedirs(savepath) 27 | savepath = os.path.join(savepath, files[-2]) 28 | if os.path.exists(savepath) is not True: 29 | os.makedirs(savepath) 30 | savepath = os.path.join(savepath, files[-1]) 31 | 32 | global total 33 | total += 1 34 | 35 | if os.path.exists(savepath) is True: 36 | return 37 | # print backgroundpath,savepath,facepath 38 | # global total 39 | img1 = cv2.imread(facepath, cv2.IMREAD_UNCHANGED) 40 | img2 = cv2.imread(backgroundpath, cv2.IMREAD_UNCHANGED) 41 | h, w, _ = img1.shape 42 | img_mix = np.zeros((256, 256, 4), np.uint8) 43 | img1 = cv2.resize(img1, (256, 256), interpolation=cv2.INTER_CUBIC) 44 | img2 = cv2.resize(img2, (256, 256), interpolation=cv2.INTER_CUBIC) 45 | for i in range(h): 46 | for j in range(w): 47 | (r1, g1, b1, a1) = img1[i, j] 48 | if a1 == 0: 49 | (r2, g2, b2) = img2[i, j] 50 | img_mix[i, j] = (r2, g2, b2, 255) 51 | else: 52 | img_mix[i, j] = (r1, g1, b1, 255) 53 | 54 | cv2.imwrite(savepath, img_mix) 55 | return 56 | # print savepath 57 | 58 | 59 | def solve(facepath): 60 | for i in dic_background: 61 | if facepath.find(i) != -1: 62 | file = os.path.join(background_path, dic_background[i]) 63 | add_image(facepath, file) 64 | return 65 | 66 | 67 | def gci(filepath): 68 | files = os.listdir(filepath) 69 | for fi in files: 70 | fi_d = os.path.join(filepath, fi) 71 | if os.path.isdir(fi_d): 72 | gci(fi_d) 73 | else: 74 | if fi_d.find('txt') == -1 and fi_d.find('.DS_Store') == -1: 75 | solve(fi_d) 76 | 77 | 78 | gci(face_path) 79 | print total 80 | -------------------------------------------------------------------------------- /office/visualize.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | 5 | from matplotlib import pyplot as plt 6 | 7 | def loadTxt(filename): 8 | TR = [] 9 | VAL = [] 10 | TE = [] 11 | for i in range(5): 12 | updateTest = True 13 | maxVal = 0 14 | text = [line.strip() for line in open('../results/office/'+ filename + '_' + str(i) + '.txt')] 15 | tr = [] 16 | val = [] 17 | te = [] 18 | for line in text: 19 | if line.startswith('Epoch'): 20 | items = line.split() 21 | tr.append(float(items[8][:-1])) 22 | val.append(float(items[-1])) 23 | if len(val) == 0: 24 | updateTest = True 25 | else: 26 | if val[-1] > maxVal: 27 | updateTest = True 28 | maxVal = val[-1] 29 | else: 30 | updateTest = False 31 | if line.startswith('Best'): 32 | if updateTest: 33 | te.append(float(line.split()[-1])) 34 | else: 35 | te.append(te[-1]) 36 | print te[-1] 37 | TR.append(tr) 38 | VAL.append(val) 39 | TE.append(te[:-1]) 40 | TR = np.array(TR) 41 | VAL = np.array(VAL) 42 | TE = np.array(TE) 43 | 44 | return TR, VAL, TE 45 | 46 | def plot_mean_and_CI(mean, lb, ub, color_mean=None, color_shading=None): 47 | # plot the shaded range of the confidence intervals 48 | plt.fill_between(range(mean.shape[0]), ub, lb, 49 | color=color_shading, alpha=.5) 50 | # plot the mean on top 51 | plt.plot(mean, color_mean) 52 | 53 | def plot(ds): 54 | tr1, val1, te1 = loadTxt('baseline_'+str(ds)) 55 | tr2, val2, te2 = loadTxt('hex_'+str(ds)) 56 | 57 | plot_mean_and_CI(np.mean(tr1, 0), np.mean(tr1, 0)-np.std(tr1,0), np.mean(tr1, 0)+np.std(tr1,0), color_mean='b--', color_shading='c') 58 | plot_mean_and_CI(np.mean(te1, 0), np.mean(te1, 0)-np.std(te1,0), np.mean(te1, 0)+np.std(te1,0), color_mean='b', color_shading='c') 59 | plot_mean_and_CI(np.mean(val1, 0), np.mean(val1, 0)-np.std(val1,0), np.mean(val1, 0)+np.std(val1,0), color_mean='b.', color_shading='c') 60 | 61 | plot_mean_and_CI(np.mean(tr2, 0), np.mean(tr2, 0)-np.std(tr2,0), np.mean(tr2, 0)+np.std(tr2,0), color_mean='r--', color_shading='m') 62 | plot_mean_and_CI(np.mean(te2, 0), np.mean(te2, 0)-np.std(te2,0), np.mean(te2, 0)+np.std(te2,0), color_mean='r', color_shading='m') 63 | plot_mean_and_CI(np.mean(val2, 0), np.mean(val2, 0)-np.std(val2,0), np.mean(val2, 0)+np.std(val2,0), color_mean='r.', color_shading='m') 64 | 65 | plt.legend(loc=4) 66 | plt.savefig('office_'+str(ds)+'.pdf') 67 | plt.clf() 68 | 69 | if __name__ == '__main__': 70 | plot(0) 71 | plot(1) 72 | plot(2) 73 | -------------------------------------------------------------------------------- /dataGeneration/add_pattern.py: -------------------------------------------------------------------------------- 1 | #### Libraries 2 | # Standard library 3 | import cPickle 4 | import time 5 | import gzip 6 | 7 | # Third-party libraries 8 | import numpy as np 9 | 10 | def load_data(): 11 | """Return the MNIST data as a tuple containing the training data, 12 | the validation data, and the test data. 13 | 14 | The ``training_data`` is returned as a tuple with two entries. 15 | The first entry contains the actual training images. This is a 16 | numpy ndarray with 50,000 entries. Each entry is, in turn, a 17 | numpy ndarray with 784 values, representing the 28 * 28 = 784 18 | pixels in a single MNIST image. 19 | 20 | The second entry in the ``training_data`` tuple is a numpy ndarray 21 | containing 50,000 entries. Those entries are just the digit 22 | values (0...9) for the corresponding images contained in the first 23 | entry of the tuple. 24 | 25 | The ``validation_data`` and ``test_data`` are similar, except 26 | each contains only 10,000 images. 27 | 28 | This is a nice data format, but for use in neural networks it's 29 | helpful to modify the format of the ``training_data`` a little. 30 | That's done in the wrapper function ``load_data_wrapper()``, see 31 | below. 32 | """ 33 | f = gzip.open('../data/mnist.pkl.gz', 'rb') 34 | training_data, validation_data, test_data = cPickle.load(f) 35 | f.close() 36 | return (training_data, validation_data, test_data) 37 | 38 | def change_image_uniform(): 39 | traning_data,validation_data,test_data=load_data() 40 | np.random.seed(int(time.time())) 41 | a=np.random.rand(10) 42 | changed_traning_data=[] 43 | images=traning_data[0] 44 | labels=traning_data[1] 45 | for i in range(0,50000): 46 | #training_data: 47 | image_array=images[i] 48 | label=labels[i] 49 | image_array = [k * a[label] for k in image_array] 50 | changed_traning_data.append((image_array,label)) 51 | 52 | changed_validation_data=[] 53 | images=validation_data[0] 54 | labels=validation_data[1] 55 | for i in range(0,10000): 56 | image_array=images[i] 57 | label=labels[i] 58 | #print label 59 | image_array = [k * a[label] for k in image_array] 60 | changed_validation_data.append((image_array,label)) 61 | 62 | changed_test_data=[] 63 | images=test_data[0] 64 | labels=test_data[1] 65 | for i in range(0,10000): 66 | image_array=images[i] 67 | label=labels[i] 68 | image_array = [k * a[label] for k in image_array] 69 | changed_test_data.append((image_array,label)) 70 | output = open('../data/mnist_uniform.pkl', 'w') 71 | # Pickle dictionary using protocol 0. 72 | cPickle.dump((changed_traning_data,changed_validation_data,changed_test_data),output) 73 | #g = gzip.GzipFile(filename="", mode="wb", fileobj=open('../data/mnist_uniform.pkl.gz', 'wb')) 74 | #g.write(open('../data/mnist_uniform.pkl').read()) 75 | #g.close() 76 | 77 | change_image_uniform() -------------------------------------------------------------------------------- /simulations/helpingMethods.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import scipy.linalg as linalg 4 | import scipy 5 | import numpy as np 6 | from scipy import stats 7 | import scipy.optimize as opt 8 | 9 | def generatingWeightMatrix_py(X, y): 10 | factor, S, U = fitting_null_py(X, y) 11 | # print factor 12 | W = np.linalg.pinv(np.dot(np.dot(U, np.diag(S)), U.T)*factor+np.eye(X.shape[0])) 13 | 14 | # W = np.eye(X.shape[0]) 15 | # W = columnWiseNormalize(W) 16 | # W = columnWiseNormalize(W.T).T 17 | 18 | return np.float32(W) 19 | 20 | def rescale(a): 21 | return a / np.max(np.abs(a)) 22 | 23 | def selectValues(Kva): 24 | r = np.zeros_like(Kva) 25 | n = r.shape[0] 26 | tmp = rescale(Kva) 27 | ind = 0 28 | for i in range(n-2, n/2, -1): 29 | if tmp[i + 1] - tmp[i] > 1.0 / n: 30 | ind = i + 1 31 | break 32 | r[ind:] = Kva[ind:] 33 | r[n - 1] = Kva[n - 1] 34 | return r 35 | 36 | def columnWiseNormalize(X): 37 | col_norm = 1.0/np.sqrt((1.0/X.shape[0])*np.diag(np.dot(X.T, X))) 38 | return np.dot(X, np.diag(col_norm)) 39 | 40 | def fitting_null_py(X, y): 41 | ldeltamin = -5 42 | ldeltamax = 5 43 | numintervals=500 44 | 45 | X = columnWiseNormalize(X) 46 | xmean = np.mean(X, 0) 47 | X = X - xmean 48 | y = columnWiseNormalize(y) 49 | ymean = np.mean(y, 0) 50 | y = y - ymean 51 | # ynorm = np.linalg.norm(y, ord=2, axis=0) 52 | # y = y / ynorm 53 | 54 | K = np.dot(X, X.T) 55 | 56 | S, U = linalg.eigh(K) 57 | 58 | # S = selectValues(S) 59 | 60 | Uy = scipy.dot(U.T, y) 61 | 62 | # grid search 63 | nllgrid = scipy.ones(numintervals + 1) * scipy.inf 64 | ldeltagrid = scipy.arange(numintervals + 1) / (numintervals * 1.0) * (ldeltamax - ldeltamin) + ldeltamin 65 | for i in scipy.arange(numintervals + 1): 66 | nllgrid[i] = nLLeval(ldeltagrid[i], Uy, S) # the method is in helpingMethods 67 | 68 | # nllmin = nllgrid.min() 69 | ldeltaopt_glob = ldeltagrid[nllgrid.argmin()] 70 | 71 | # print ldeltaopt_glob 72 | return np.float32(1.0/np.exp(ldeltaopt_glob)), S, U 73 | 74 | def nLLeval(ldelta, Uy, S, REML=False): 75 | """ 76 | evaluate the negative log likelihood of a random effects model: 77 | nLL = 1/2(n_s*log(2pi) + logdet(K) + 1/ss * y^T(K + deltaI)^{-1}y, 78 | where K = USU^T. 79 | Uy: transformed outcome: n_s x 1 80 | S: eigenvectors of K: n_s 81 | ldelta: log-transformed ratio sigma_gg/sigma_ee 82 | """ 83 | n_s = Uy.shape[0] 84 | delta = scipy.exp(ldelta) 85 | 86 | # evaluate log determinant 87 | Sd = S + delta 88 | ldet = scipy.sum(scipy.log(Sd)) 89 | 90 | # evaluate the variance 91 | Sdi = 1.0 / Sd 92 | # Uy = Uy.flatten() 93 | # ss = 1. / n_s * (Uy.dot(Uy.T).dot(np.diag(Sdi))).sum() 94 | ss = 1. / n_s * (Uy*Uy*(Sdi.reshape(-1, 1))).sum() 95 | ss = ss / Uy.shape[1] + 1e-5 96 | 97 | # evalue the negative log likelihood 98 | nLL = 0.5 * (n_s * np.log(2.0 * scipy.pi) + ldet + n_s + n_s * np.log(ss)) 99 | 100 | if REML: 101 | pass 102 | 103 | return nLL -------------------------------------------------------------------------------- /MNIST_Pattern/organizeDFTimages.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | # -*- encoding=utf-8 -*- 4 | import cv2 5 | # Standard library 6 | import time 7 | import gzip 8 | import zipfile 9 | import numpy as np 10 | import os 11 | import math 12 | from PIL import Image 13 | import matplotlib.pyplot as plt 14 | import cPickle as pickle 15 | 16 | def fft(img): 17 | return np.fft.fft2(img) 18 | def fftshift(img): 19 | return np.fft.fftshift(fft(img)) 20 | def ifft(img): 21 | return np.fft.ifft2(img) 22 | def ifftshift(img): 23 | return ifft(np.fft.ifftshift(img)) 24 | 25 | def distance(i,j,w,h,r): 26 | dis=np.sqrt((i-14)**2+(j-14)**2) 27 | if dis baseline_0_0.txt 2 | 3 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 0 -div 100 > hex_0_0.txt 4 | 5 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 1 -div 100 > baseline_1_0.txt 6 | 7 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 1 -div 100 > hex_1_0.txt 8 | 9 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 2 -div 100 > baseline_2_0.txt 10 | 11 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 2 -div 100 > hex_2_0.txt 12 | 13 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 0 -div 100 > baseline_0_1.txt 14 | 15 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 0 -div 100 > hex_0_1.txt 16 | 17 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 1 -div 100 > baseline_1_1.txt 18 | 19 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 1 -div 100 > hex_1_1.txt 20 | 21 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 2 -div 100 > baseline_2_1.txt 22 | 23 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 2 -div 100 > hex_2_1.txt 24 | 25 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 0 -div 100 > baseline_0_2.txt 26 | 27 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 0 -div 100 > hex_0_2.txt 28 | 29 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 1 -div 100 > baseline_1_2.txt 30 | 31 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 1 -div 100 > hex_1_2.txt 32 | 33 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 2 -div 100 > baseline_2_2.txt 34 | 35 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 2 -div 100 > hex_2_2.txt 36 | 37 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 0 -div 100 > baseline_0_3.txt 38 | 39 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 0 -div 100 > hex_0_3.txt 40 | 41 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 1 -div 100 > baseline_1_3.txt 42 | 43 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 1 -div 100 > hex_1_3.txt 44 | 45 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 2 -div 100 > baseline_2_3.txt 46 | 47 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 2 -div 100 > hex_2_3.txt 48 | 49 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 0 -div 100 > baseline_0_4.txt 50 | 51 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 0 -div 100 > hex_0_4.txt 52 | 53 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 1 -div 100 > baseline_1_4.txt 54 | 55 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 1 -div 100 > hex_1_4.txt 56 | 57 | python run_mlp_baseline.py -c status/ -e 1000 -re 0 -hex 0 -save status/ -tc 2 -div 100 > baseline_2_4.txt 58 | 59 | #python run_mlp.py -c status/ -e 1000 -re 0 -hex 1 -save status/ -tc 2 -div 100 > hex_2_4.txt -------------------------------------------------------------------------------- /dataGeneration/addBackground.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import cv2 4 | 5 | import os 6 | 7 | import numpy as np 8 | 9 | background_path = '../images/background/' 10 | face_path = '/media/haohanwang/Info/SentimentImages/original/' 11 | 12 | sentiment_dic = {'anger': 0, 'disgust': 1, 'fear': 2, 'joy': 3, 'neutral': 4, 'sadness': 5, 'surprise': 6} 13 | background_dic = {0: 'drawn.jpg', 1: 'festival.jpg', 2: 'firework.jpg', 3: 'forest.jpg', 4: 'scare.jpg', 5: 'train.jpg', 6: 'winter.jpg'} 14 | 15 | names = {'aia', 'bonnie', 'jules', 'malcolm', 'mery', 'ray'} 16 | 17 | def add_image(facepath, bgps, sent, corr, save_path): 18 | files = facepath.split('/') 19 | savepath = os.path.join(save_path, files[-3]) 20 | if os.path.exists(savepath) is not True: 21 | os.makedirs(savepath) 22 | savepath = os.path.join(savepath, files[-2]) 23 | if os.path.exists(savepath) is not True: 24 | os.makedirs(savepath) 25 | savepath = os.path.join(savepath, files[-1]) 26 | 27 | if os.path.exists(savepath) is True: 28 | return 29 | # print backgroundpath,savepath,facepath 30 | # global total 31 | 32 | ind = int(facepath.split('_')[-1].split('.')[0]) 33 | if ind % 10 < 8: 34 | if np.random.random() < corr: 35 | img2 = bgps[sentiment_dic[sent]] 36 | else: 37 | i = np.random.randint(7) 38 | img2 = bgps[i] 39 | else: 40 | i = np.random.randint(7) 41 | img2 = bgps[i] 42 | 43 | img1 = cv2.imread(facepath, cv2.IMREAD_UNCHANGED) 44 | h, w, _ = img1.shape 45 | img_mix = np.zeros((256, 256, 4), np.uint8) 46 | img1 = cv2.resize(img1, (256, 256), interpolation=cv2.INTER_CUBIC) 47 | img2 = cv2.resize(img2, (256, 256), interpolation=cv2.INTER_CUBIC) 48 | for i in range(h): 49 | for j in range(w): 50 | (r1, g1, b1, a1) = img1[i, j] 51 | if a1 == 0: 52 | (r2, g2, b2) = img2[i, j] 53 | img_mix[i, j] = (r2, g2, b2, 255) 54 | else: 55 | img_mix[i, j] = (r1, g1, b1, 255) 56 | 57 | cv2.imwrite(savepath, img_mix) 58 | print savepath 59 | return 60 | 61 | def loadBackground(): 62 | bgps = {} 63 | for k in background_dic: 64 | bgp = background_path + background_dic[k] 65 | img2 = cv2.imread(bgp, cv2.IMREAD_UNCHANGED) 66 | bgps[k] = img2 67 | return bgps 68 | 69 | def run(corr=0.8): 70 | count = 0 71 | bgps = loadBackground() 72 | if corr == 0: 73 | c = 0 74 | elif corr == 0.4: 75 | c = 4 76 | else: 77 | c= 8 78 | save_path = '/media/haohanwang/Info/SentimentImages/background_'+str(c) + '/' 79 | for n in names: 80 | for k in sentiment_dic: 81 | inputPath = face_path+n+'/'+n+'_'+k+'/' 82 | for r, d, f in os.walk(inputPath): 83 | for fn in f: 84 | count += 1 85 | print count, '\t', 86 | add_image(inputPath+fn, bgps, k, corr, save_path) 87 | if count%1000 == 0: 88 | print '=============================' 89 | print 'We have worked on ', count, 'images' 90 | print '=============================' 91 | 92 | 93 | if __name__ == '__main__': 94 | corr = 0.8 95 | run(corr=corr) -------------------------------------------------------------------------------- /simulations/Lasso.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | from numpy import linalg 5 | 6 | 7 | class Lasso: 8 | def __init__(self, lam=1., lr=1., tol=1e-5, logistic=False): 9 | self.lam = lam 10 | self.lr = lr 11 | self.tol = tol 12 | self.decay = 0.5 13 | self.maxIter = 1000 14 | self.logistic = logistic 15 | 16 | def setLambda(self, lam): 17 | self.lam = lam 18 | 19 | def setLogisticFlag(self, logistic): 20 | self.logistic = logistic 21 | 22 | def setLearningRate(self, lr): 23 | self.lr = lr 24 | 25 | def setMaxIter(self, a): 26 | self.maxIter = a 27 | 28 | def setTol(self, t): 29 | self.tol = t 30 | 31 | def fit(self, X, y): 32 | shp = X.shape 33 | self.beta = np.zeros([shp[1], 1]) 34 | resi_prev = np.inf 35 | resi = self.cost(X, y) 36 | step = 0 37 | while np.abs(resi_prev - resi) > self.tol and step < self.maxIter: 38 | keepRunning = True 39 | resi_prev = resi 40 | runningStep = 0 41 | while keepRunning and runningStep < 10: 42 | runningStep += 1 43 | prev_beta = self.beta 44 | pg = self.proximal_gradient(X, y) 45 | self.beta = self.proximal_proj(self.beta - pg * self.lr) 46 | keepRunning = self.stopCheck(prev_beta, self.beta, pg, X, y) 47 | if keepRunning: 48 | self.lr = self.decay * self.lr 49 | step += 1 50 | resi = self.cost(X, y) 51 | # print step, resi 52 | return self.beta 53 | 54 | def cost(self, X, y): 55 | if self.logistic: 56 | tmp = (np.dot(X, self.beta)).T 57 | return -0.5 * np.mean(y*tmp - np.log(1+np.exp(tmp))) + self.lam * linalg.norm( 58 | self.beta, ord=1) 59 | else: 60 | return 0.5 * np.mean(np.square(y - np.dot(X, self.beta)).transpose()) + self.lam * linalg.norm( 61 | self.beta, ord=1) 62 | 63 | def proximal_gradient(self, X, y): 64 | if self.logistic: 65 | return -np.dot(X.transpose(), (y.reshape((y.shape[0], 1)) - 1. / (1 + np.exp(-np.dot(X, self.beta))))) 66 | else: 67 | return -np.dot(X.transpose(), (y.reshape((y.shape[0], 1)) - (np.dot(X, self.beta)))) 68 | 69 | def proximal_proj(self, B): 70 | t = self.lam * self.lr 71 | zer = np.zeros_like(B) 72 | result = np.maximum(zer, B - t) - np.maximum(zer, -B - t) 73 | return result 74 | 75 | def predict(self, X): 76 | if not self.logistic: 77 | return np.dot(X, self.beta) 78 | else: 79 | t = 1. / (1 + np.exp(-np.dot(X, self.beta))) 80 | y = np.zeros_like(t) 81 | y[t>0.5] = 1 82 | return y 83 | 84 | def getBeta(self): 85 | self.beta = self.beta.reshape(self.beta.shape[0]) 86 | return self.beta 87 | 88 | def stopCheck(self, prev, new, pg, X, y): 89 | if np.square(linalg.norm((y - (np.dot(X, new))))) <= \ 90 | np.square(linalg.norm((y - (np.dot(X, prev))))) + np.dot(pg.transpose(), ( 91 | new - prev)) + 0.5 * self.lam * np.square(linalg.norm(prev - new)): 92 | return False 93 | else: 94 | return True 95 | -------------------------------------------------------------------------------- /office/MLP_baseline.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 6 | def lamda_variable(shape): 7 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 8 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 9 | 10 | def theta_variable(shape): 11 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 12 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 13 | 14 | def generatingWeightMatrix(images, labels, epoch, division, batch): 15 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0] 16 | return W 17 | 18 | def checkInformation(rep, epoch, s): 19 | X = py_func(checkInformation_py, [rep, epoch, s], [tf.float32])[0] 20 | return X 21 | 22 | def weight_variable(shape): 23 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 24 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 25 | 26 | def bias_variable(shape): 27 | initializer = tf.constant_initializer(0.0) 28 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 29 | 30 | def conv2d(x, W): 31 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 32 | 33 | def max_pool_2x2(x): 34 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 35 | 36 | class MLP(object): 37 | def __init__(self, x, y, z, conf, Hex_flag=False): 38 | self.x = tf.reshape(x, shape=[-1, 800]) 39 | self.z=tf.reshape(z,shape=[-1, 256]) 40 | self.y = y 41 | self.keep_prob = tf.placeholder(tf.float32) 42 | self.e=tf.placeholder(tf.float32) 43 | self.batch=tf.placeholder(tf.float32) 44 | #####################glgcm######################### 45 | 46 | with tf.variable_scope("fc1"): 47 | W_fc1 = weight_variable([800, 256]) 48 | b_fc1 = bias_variable([256]) 49 | h_fc1 = tf.nn.relu(tf.matmul(self.x, W_fc1) + b_fc1) 50 | 51 | # h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 52 | 53 | #yconv_contact_loss=tf.concat([tf.zeros_like(h_fc1_drop, tf.float32),tf.zeros_like(glgcm_h_fc1_drop, tf.float32)],1) 54 | 55 | # fc2 56 | with tf.variable_scope("fc3"): 57 | W_fc3 = weight_variable([256, 31]) 58 | b_fc3 = bias_variable([31]) 59 | y_conv_loss = tf.matmul(h_fc1, W_fc3) + b_fc3 60 | 61 | """ 62 | t_histo_rows = [ 63 | tf.histogram_fixed_width( 64 | tf.gather(x, [row]), 65 | [0.0, 256.0], 100) 66 | for row in range(128)] 67 | 68 | H = tf.stack(t_histo_rows, axis=0) 69 | """ 70 | # H = y_conv_H 71 | 72 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 73 | if Hex_flag==False: 74 | if conf.re==1: 75 | tf.add_to_collection("losses",sess_loss) 76 | self.loss = tf.add_n(tf.get_collection("losses")) 77 | else: 78 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 79 | self.pred = tf.argmax(y_conv_loss, 1) 80 | 81 | self.correct_prediction = tf.equal(tf.argmax(y_conv_loss,1), tf.argmax(self.y,1)) 82 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) -------------------------------------------------------------------------------- /dataGeneration/mnist_loader.py: -------------------------------------------------------------------------------- 1 | """ 2 | mnist_loader 3 | ~~~~~~~~~~~~ 4 | 5 | A library to load the MNIST image data. For details of the data 6 | structures that are returned, see the doc strings for ``load_data`` 7 | and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the 8 | function usually called by our neural network code. 9 | """ 10 | 11 | #### Libraries 12 | # Standard library 13 | import cPickle 14 | import gzip 15 | 16 | # Third-party libraries 17 | import numpy as np 18 | 19 | def load_data(): 20 | """Return the MNIST data as a tuple containing the training data, 21 | the validation data, and the test data. 22 | 23 | The ``training_data`` is returned as a tuple with two entries. 24 | The first entry contains the actual training images. This is a 25 | numpy ndarray with 50,000 entries. Each entry is, in turn, a 26 | numpy ndarray with 784 values, representing the 28 * 28 = 784 27 | pixels in a single MNIST image. 28 | 29 | The second entry in the ``training_data`` tuple is a numpy ndarray 30 | containing 50,000 entries. Those entries are just the digit 31 | values (0...9) for the corresponding images contained in the first 32 | entry of the tuple. 33 | 34 | The ``validation_data`` and ``test_data`` are similar, except 35 | each contains only 10,000 images. 36 | 37 | This is a nice data format, but for use in neural networks it's 38 | helpful to modify the format of the ``training_data`` a little. 39 | That's done in the wrapper function ``load_data_wrapper()``, see 40 | below. 41 | """ 42 | #f = gzip.open('../data/mnist.pkl.gz', 'rb') 43 | #training_data, validation_data, test_data = cPickle.load(f) 44 | training_data, validation_data, test_data = cPickle.load(open('mnist_uniform.pkl')) 45 | 46 | #f.close() 47 | return (training_data, validation_data, test_data) 48 | 49 | def load_data_wrapper(): 50 | """Return a tuple containing ``(training_data, validation_data, 51 | test_data)``. Based on ``load_data``, but the format is more 52 | convenient for use in our implementation of neural networks. 53 | 54 | In particular, ``training_data`` is a list containing 50,000 55 | 2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray 56 | containing the input image. ``y`` is a 10-dimensional 57 | numpy.ndarray representing the unit vector corresponding to the 58 | correct digit for ``x``. 59 | 60 | ``validation_data`` and ``test_data`` are lists containing 10,000 61 | 2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional 62 | numpy.ndarry containing the input image, and ``y`` is the 63 | corresponding classification, i.e., the digit values (integers) 64 | corresponding to ``x``. 65 | 66 | Obviously, this means we're using slightly different formats for 67 | the training data and the validation / test data. These formats 68 | turn out to be the most convenient for use in our neural network 69 | code.""" 70 | tr_d, va_d, te_d = load_data() 71 | training_inputs = [np.reshape(x[0], (784, 1)) for x in tr_d] 72 | training_results = [vectorized_result(x[1]) for x in tr_d] 73 | training_data = zip(training_inputs, training_results) 74 | 75 | validation_inputs = [np.reshape(x[0], (784, 1)) for x in va_d] 76 | validation_results = [np.reshape(x[1], (1, 1)) for x in va_d] 77 | validation_data = zip(validation_inputs, validation_results) 78 | 79 | test_inputs = [np.reshape(x[0], (784, 1)) for x in te_d] 80 | test_results= [np.reshape(x[1], (1, 1)) for x in te_d] 81 | test_data = zip(test_inputs, test_results) 82 | return (training_data, validation_data, test_data) 83 | 84 | def vectorized_result(j): 85 | """Return a 10-dimensional unit vector with a 1.0 in the jth 86 | position and zeroes elsewhere. This is used to convert a digit 87 | (0...9) into a corresponding desired output from the neural 88 | network.""" 89 | e = np.zeros((10, 1)) 90 | e[j] = 1.0 91 | return e 92 | -------------------------------------------------------------------------------- /MNIST_Pattern/mnist.sh: -------------------------------------------------------------------------------- 1 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > vanilla_0_0.txt 2 | 3 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > mlp_0_0.txt 4 | 5 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > vanilla_0_1.txt 6 | 7 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > mlp_0_1.txt 8 | 9 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > vanilla_0_2.txt 10 | 11 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > mlp_0_2.txt 12 | 13 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > vanilla_0_3.txt 14 | 15 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > mlp_0_3.txt 16 | 17 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > vanilla_0_4.txt 18 | 19 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > mlp_0_4.txt 20 | 21 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > vanilla_1_0.txt 22 | 23 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > mlp_1_0.txt 24 | 25 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > vanilla_1_1.txt 26 | 27 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > mlp_1_1.txt 28 | 29 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > vanilla_1_2.txt 30 | 31 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > mlp_1_2.txt 32 | 33 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > vanilla_1_3.txt 34 | 35 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > mlp_1_3.txt 36 | 37 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > vanilla_1_4.txt 38 | 39 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > mlp_1_4.txt 40 | 41 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > vanilla_2_0.txt 42 | 43 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > mlp_2_0.txt 44 | 45 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > vanilla_2_1.txt 46 | 47 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > mlp_2_1.txt 48 | 49 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > vanilla_2_2.txt 50 | 51 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > mlp_2_2.txt 52 | 53 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > vanilla_2_3.txt 54 | 55 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > mlp_2_3.txt 56 | 57 | python cnn_run_v2_dft.py -c status/ -e 100 -re 0 -corr 0 -hex 0 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > vanilla_2_4.txt 58 | 59 | python cnn_run_v2_mlp.py -c status/ -e 100 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > mlp_2_4.txt -------------------------------------------------------------------------------- /GLCM_Testing/nglcm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py 6 | 7 | 8 | def _fft(x): 9 | r = [] 10 | for i in range(128): 11 | r.append(np.abs(np.fft.fftshift(np.fft.fft2(x[i,:].reshape([28,28])))).astype(np.float32).reshape(28*28)) 12 | return np.array(r) 13 | # return np.abs(np.fft.fft2(x)).astype(np.float32) # this seems to be an interesting approach 14 | 15 | def fftImage(x): 16 | r = py_func(_fft, [x], [tf.float32])[0] 17 | return r 18 | 19 | def lamda_variable(shape): 20 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=shape[0]) 21 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 22 | def theta_variable(shape): 23 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=shape[0]) 24 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 25 | def generatingWeightMatrix(images, labels, epoch, division, batch, g): 26 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch, g], [tf.float32]) 27 | return W 28 | def weight_variable(shape): 29 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 30 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 31 | 32 | def bias_variable(shape): 33 | initializer = tf.constant_initializer(0.0) 34 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 35 | 36 | def conv2d(x, W): 37 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 38 | 39 | def max_pool_2x2(x): 40 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 41 | 42 | class MNISTcnn(object): 43 | def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False): 44 | self.x = tf.reshape(x, shape=[-1, 28, 28, 3]) 45 | self.x_re=tf.reshape(x_re,shape=[-1,1,784]) 46 | self.x_d=tf.reshape(x_re,shape=[-1,1,784]) 47 | self.y = y 48 | self.keep_prob = tf.placeholder(tf.float32) 49 | self.e=tf.placeholder(tf.float32) 50 | self.batch=tf.placeholder(tf.float32) 51 | 52 | # with tf.variable_scope("fc0"): 53 | # W_fc2 = weight_variable([28*28*3, 32]) 54 | # b_fc2 = bias_variable([32]) 55 | # x_flat = tf.reshape(x, [-1, 28*28*3]) 56 | # glgcm_h_fc1 = tf.matmul(x_flat, W_fc2) + b_fc2 57 | 58 | #####################glgcm######################### 59 | with tf.variable_scope('glgcm'): 60 | lamda = lamda_variable([conf.ngray,1]) 61 | theta= theta_variable([conf.ngray,1]) 62 | g=tf.matmul(tf.minimum(tf.maximum(tf.subtract(self.x_d,lamda),1e-5),1),tf.minimum(tf.maximum(tf.subtract(self.x_re,theta),1e-5),1), transpose_b=True) 63 | #print(g.get_shape()) 64 | with tf.variable_scope("glgcm_fc1"): 65 | g_flat = tf.reshape(g, [-1, conf.ngray*conf.ngray]) 66 | glgcm_W_fc1 = weight_variable([conf.ngray*conf.ngray, 32]) 67 | glgcm_b_fc1 = bias_variable([32]) 68 | glgcm_h_fc1 = tf.nn.relu(tf.matmul(g_flat, glgcm_W_fc1) + glgcm_b_fc1) 69 | 70 | glgcm_h_fc1 = tf.nn.l2_normalize(glgcm_h_fc1, 0) 71 | 72 | self.H = glgcm_h_fc1 73 | 74 | #####################################glgcm############################ 75 | ######################################hex############################# 76 | #H = glgcm_h_fc1 77 | ######################################hex############################ 78 | 79 | ######################################Sentiment###################### 80 | # conv1 81 | with tf.variable_scope("fc2"): 82 | W_fc2 = weight_variable([32, 10]) 83 | b_fc2 = bias_variable([10]) 84 | y_conv_loss = tf.matmul(glgcm_h_fc1, W_fc2) + b_fc2 85 | y_conv_pred = tf.matmul(glgcm_h_fc1, W_fc2) + b_fc2 86 | ######################################Sentiment###################### 87 | 88 | 89 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 90 | self.pred = tf.argmax(y_conv_pred, 1) 91 | 92 | 93 | self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1)) 94 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) -------------------------------------------------------------------------------- /MNIST_R/cnn_v2_baseline.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py 6 | def _fft(x): 7 | r = [] 8 | for i in range(128): 9 | r.append(np.abs(np.fft.fftshift(np.fft.fft2(x[i,:].reshape([28,28])))).astype(np.float32).reshape(28*28)) 10 | return np.array(r) 11 | # return np.abs(np.fft.fft2(x)).astype(np.float32) # this seems to be an interesting approach 12 | 13 | def fftImage(x): 14 | r = py_func(_fft, [x], [tf.float32])[0] 15 | return r 16 | 17 | def lamda_variable(shape): 18 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 19 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 20 | def theta_variable(shape): 21 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 22 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 23 | def generatingWeightMatrix(images, labels, epoch, division, batch): 24 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0] 25 | return W 26 | def weight_variable(shape): 27 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 28 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 29 | 30 | def bias_variable(shape): 31 | initializer = tf.constant_initializer(0.0) 32 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 33 | 34 | def conv2d(x, W): 35 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 36 | 37 | def max_pool_2x2(x): 38 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 39 | 40 | class MNISTcnn(object): 41 | def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False): 42 | self.x = tf.reshape(x, shape=[-1, 28, 28, 1]) 43 | self.x_re=tf.reshape(x_re,shape=[-1, 1, 784]) 44 | self.x_d=x_d 45 | self.y = y 46 | self.keep_prob = tf.placeholder(tf.float32) 47 | self.e=tf.placeholder(tf.float32) 48 | self.batch=tf.placeholder(tf.float32) 49 | # conv1 50 | with tf.variable_scope('hex'): 51 | with tf.variable_scope('conv1'): 52 | W_conv1 = weight_variable([5, 5, 1, 32]) 53 | if conf.re==1: 54 | tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.001)(W_conv1)) 55 | b_conv1 = bias_variable([32]) 56 | h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1) 57 | h_pool1 = max_pool_2x2(h_conv1) 58 | 59 | # conv2 60 | with tf.variable_scope('conv2'): 61 | W_conv2 = weight_variable([5, 5, 32, 64]) 62 | b_conv2 = bias_variable([64]) 63 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) 64 | h_pool2 = max_pool_2x2(h_conv2) 65 | 66 | # fc1 67 | with tf.variable_scope("fc1"): 68 | shape = int(np.prod(h_pool2.get_shape()[1:])) 69 | W_fc1 = weight_variable([shape, 1024]) 70 | b_fc1 = bias_variable([1024]) 71 | h_pool2_flat = tf.reshape(h_pool2, [-1, shape]) 72 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 73 | 74 | # dropout 75 | h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 76 | 77 | # fc2 78 | with tf.variable_scope("fc2"): 79 | W_fc2 = weight_variable([1024, 10]) 80 | b_fc2 = bias_variable([10]) 81 | y_conv_loss = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 82 | ######################################Sentiment###################### 83 | 84 | 85 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 86 | if Hex_flag==False: 87 | if conf.re==1: 88 | tf.add_to_collection("losses",sess_loss) 89 | self.loss = tf.add_n(tf.get_collection("losses")) 90 | else: 91 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 92 | self.pred = tf.argmax(y_conv_loss, 1) 93 | 94 | self.correct_prediction = tf.equal(tf.argmax(y_conv_loss,1), tf.argmax(self.y,1)) 95 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) -------------------------------------------------------------------------------- /MNIST_Pattern/cnn_v2_baseline.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py 6 | def _fft(x): 7 | r = [] 8 | for i in range(128): 9 | r.append(np.abs(np.fft.fftshift(np.fft.fft2(x[i,:].reshape([28,28])))).astype(np.float32).reshape(28*28)) 10 | return np.array(r) 11 | # return np.abs(np.fft.fft2(x)).astype(np.float32) # this seems to be an interesting approach 12 | 13 | def fftImage(x): 14 | r = py_func(_fft, [x], [tf.float32])[0] 15 | return r 16 | 17 | def lamda_variable(shape): 18 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 19 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 20 | def theta_variable(shape): 21 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 22 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 23 | def generatingWeightMatrix(images, labels, epoch, division, batch): 24 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0] 25 | return W 26 | def weight_variable(shape): 27 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 28 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 29 | 30 | def bias_variable(shape): 31 | initializer = tf.constant_initializer(0.0) 32 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 33 | 34 | def conv2d(x, W): 35 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 36 | 37 | def max_pool_2x2(x): 38 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 39 | 40 | class MNISTcnn(object): 41 | def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False): 42 | self.x = tf.reshape(x, shape=[-1, 28, 28, 1]) 43 | self.x_re=tf.reshape(x_re,shape=[-1, 1, 784]) 44 | self.x_d=x_d 45 | self.y = y 46 | self.keep_prob = tf.placeholder(tf.float32) 47 | self.e=tf.placeholder(tf.float32) 48 | self.batch=tf.placeholder(tf.float32) 49 | # conv1 50 | with tf.variable_scope('hex'): 51 | with tf.variable_scope('conv1'): 52 | W_conv1 = weight_variable([5, 5, 1, 32]) 53 | if conf.re==1: 54 | tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.001)(W_conv1)) 55 | b_conv1 = bias_variable([32]) 56 | h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1) 57 | h_pool1 = max_pool_2x2(h_conv1) 58 | 59 | # conv2 60 | with tf.variable_scope('conv2'): 61 | W_conv2 = weight_variable([5, 5, 32, 64]) 62 | b_conv2 = bias_variable([64]) 63 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) 64 | h_pool2 = max_pool_2x2(h_conv2) 65 | 66 | # fc1 67 | with tf.variable_scope("fc1"): 68 | shape = int(np.prod(h_pool2.get_shape()[1:])) 69 | W_fc1 = weight_variable([shape, 1024]) 70 | b_fc1 = bias_variable([1024]) 71 | h_pool2_flat = tf.reshape(h_pool2, [-1, shape]) 72 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 73 | 74 | # dropout 75 | h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 76 | 77 | # fc2 78 | with tf.variable_scope("fc2"): 79 | W_fc2 = weight_variable([1024, 10]) 80 | b_fc2 = bias_variable([10]) 81 | y_conv_loss = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 82 | ######################################Sentiment###################### 83 | 84 | 85 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 86 | if Hex_flag==False: 87 | if conf.re==1: 88 | tf.add_to_collection("losses",sess_loss) 89 | self.loss = tf.add_n(tf.get_collection("losses")) 90 | else: 91 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 92 | self.pred = tf.argmax(y_conv_loss, 1) 93 | 94 | self.correct_prediction = tf.equal(tf.argmax(y_conv_loss,1), tf.argmax(self.y,1)) 95 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) -------------------------------------------------------------------------------- /simulations/script.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | from HEX_linear import Hex_linear 5 | from Lasso import Lasso 6 | 7 | from sklearn.metrics import mean_squared_error as mse, accuracy_score 8 | 9 | from matplotlib import pyplot as plt 10 | 11 | n = 200 12 | p = 10 13 | k = 10 14 | e = 1 15 | corr = 0.8 16 | 17 | logisticFlag = True 18 | 19 | def evaluate(y1, y2, logistic=False): 20 | if not logistic: 21 | return mse(y1, y1) 22 | else: 23 | return accuracy_score(y1.astype(int), y2.astype(int)) 24 | 25 | def binary_transform(y): 26 | m = np.exp(y) 27 | m = m/(1+m) 28 | k = [] 29 | for i in range(m.shape[0]): 30 | k.append(np.random.binomial(1, m[i], 1)[0]) 31 | return np.array(k) 32 | 33 | def predict(X, beta, logistic=False): 34 | if not logistic: 35 | return np.dot(X, beta) 36 | else: 37 | t = 1. / (1 + np.exp(-np.dot(X, beta))) 38 | y = np.zeros_like(t) 39 | y[t>0.5] = 1 40 | return y 41 | 42 | def generateData(): 43 | X1 = np.random.normal(size=[2*n, p]) 44 | X2 = np.random.normal(size=[2*n, p]) 45 | 46 | # X2[:int(n*corr), :] = X1[:int(n*corr), :] 47 | 48 | X = np.append(X1, X2, axis=1) 49 | 50 | # plt.imshow(X) 51 | # plt.show() 52 | 53 | Xtr = X[:n, :] 54 | Xte = X[n:, :] 55 | 56 | beta1 = np.random.random(k) + 1 57 | beta2 = np.random.random(k) + 1 58 | 59 | ytr = (np.dot(Xtr[:,:k], beta1) + np.dot(Xtr[:, p:p+k], beta2))/2 #+ np.random.normal(size=[n]) 60 | yte = np.dot(Xte[:,:k], beta1) #+ np.random.normal(size=[n]) 61 | 62 | ytr = binary_transform(ytr) 63 | yte = binary_transform(yte) 64 | 65 | Z = np.random.normal(size=[n, 2*p]) 66 | Zte = np.dot(Z[:, p:p+k], beta2) #+ np.random.normal(size=[n]) 67 | 68 | Zte = binary_transform(Zte) 69 | 70 | return Xtr, Xte, ytr, yte, Z, Zte, beta1 71 | 72 | def run(): 73 | Xtr, Xte, ytr, yte, Z, Zte, beta1 = generateData() 74 | 75 | print '-----------------' 76 | 77 | m0 = Lasso(lam=0, lr=1e0, logistic=False) 78 | m0.fit(Xtr[:, :p], ytr) 79 | yte0 = predict(Xte[:,:p], m0.getBeta(), logistic=logisticFlag) 80 | zte0 = predict(Z[:,:p], m0.getBeta(), logistic=logisticFlag) 81 | 82 | print '-----------------' 83 | m1 = Lasso(lam=0, lr=1e0, logistic=False) 84 | m1.fit(Xtr, ytr) 85 | yte1 = predict(Xte[:,:p], m1.getBeta()[:p], logistic=logisticFlag) 86 | zte1 = predict(Z[:,:p], m1.getBeta()[:p], logistic=logisticFlag) 87 | # 88 | print '-----------------' 89 | 90 | m2 = Hex_linear(hex_start=np.inf, ignoringIndex=p, lam=0, lr=1e0, project=True, logistic=False) 91 | m2.fit(Xtr, ytr) 92 | yte2 = predict(Xte[:,:p], m2.getBeta()[:p], logistic=logisticFlag) 93 | zte2 = predict(Z[:,:p], m2.getBeta()[:p], logistic=logisticFlag) 94 | 95 | print '-----------------' 96 | 97 | m3 = Hex_linear(hex_start = np.inf, ignoringIndex=p, lam=0, lr=1e0, logistic=False) #1e2 works OK 98 | m3.fit(Xtr, ytr) 99 | yte3 = predict(Xte[:,:p], m3.getBeta()[:p], logistic=logisticFlag) 100 | zte3 = predict(Z[:,:p], m3.getBeta()[:p], logistic=logisticFlag) 101 | 102 | # print '================' 103 | # print '--------------' 104 | # print mse(yte, yte0) 105 | # print mse(yte, yte1) 106 | # # print mse(yte, yte2) 107 | # print mse(yte, yte3) 108 | # print '--------------' 109 | # print '================' 110 | # print '--------------' 111 | # print mse(Zte, zte0) 112 | # print mse(Zte, zte1) 113 | # # print mse(yte, yte2) 114 | # print mse(Zte, zte3) 115 | # print '--------------' 116 | # print '================' 117 | # print '--------------' 118 | # print beta1[:10] 119 | # print m0.getBeta()[:20] 120 | # print m1.getBeta()[:20] 121 | # print m2.getBeta()[:20] 122 | # print m3.getBeta()[:20] 123 | # print '--------------' 124 | 125 | a0 = evaluate(yte, yte0, logistic=logisticFlag) 126 | a1 = evaluate(yte, yte1, logistic=logisticFlag) 127 | a2 = evaluate(yte, yte2, logistic=logisticFlag) 128 | a3 = evaluate(yte, yte3, logistic=logisticFlag) 129 | 130 | b0 = evaluate(Zte, zte0, logistic=logisticFlag) 131 | b1 = evaluate(Zte, zte1, logistic=logisticFlag) 132 | b2 = evaluate(Zte, zte2, logistic=logisticFlag) 133 | b3 = evaluate(Zte, zte3, logistic=logisticFlag) 134 | 135 | return a0, a1, a2, a3, b0, b1, b2, b3 136 | 137 | if __name__ == '__main__': 138 | for seed in range(10): 139 | np.random.seed(seed) 140 | print 'Seed', seed, '\t', 141 | m = run() 142 | for a in m: 143 | print a, '\t', 144 | print 145 | # np.random.seed(2) 146 | # m = run() 147 | # for a in m: 148 | # print a, '\t', 149 | -------------------------------------------------------------------------------- /dataGeneration/dataGeneration.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | 5 | def binary_transform(y): 6 | m = np.exp(y) 7 | m = m/(1+m) 8 | k = [] 9 | for i in range(m.shape[0]): 10 | k.append(np.random.binomial(1, m[i], 1)[0]) 11 | return np.array(k) 12 | 13 | def oneHotRepresentation(y): 14 | n = y.shape[0] 15 | r = np.zeros([n, 2]) 16 | for i in range(r.shape[0]): 17 | r[i,y[i]] = 1 18 | return r 19 | 20 | def sampleMultivariateGaussianDiagonalVariance(n, sigma_e, mu=0): 21 | return np.array([np.random.normal(mu, sigma_e) for i in range(n)]) 22 | 23 | def sampleData(n, p, corr, mu=0, sigma=1): 24 | # https://en.wikipedia.org/wiki/Autoregressive_model#Yule.E2.80.93Walker_equations 25 | c = np.random.normal(mu, sigma, size=n) 26 | sigma_e = np.sqrt((sigma ** 2) * (1 - corr ** 2)) 27 | 28 | # Sample the auto-regressive process. 29 | signal = [c] 30 | for _ in range(1, p): 31 | s = corr * signal[-1] + np.random.normal(mu, sigma_e, size=n) 32 | signal.append(s) 33 | 34 | return np.array(signal).T 35 | 36 | def dataGeneration(n, p): 37 | X = np.random.normal(size=[n, p]) 38 | # b = np.zeros([p]) 39 | # b[:10] = np.random.random(size=10) + 5 40 | # y = np.dot(X, b) + np.random.normal(size=[n]) 41 | 42 | return X 43 | 44 | def dataGeneration_Autoregressive(n, p, corr, group): 45 | p0 = p/group 46 | X = None 47 | for i in range(group): 48 | X0 = sampleData(n, p0, corr=corr) 49 | if i == 0: 50 | X = X0 51 | else: 52 | X = np.append(X, X0, 1) 53 | 54 | return X 55 | 56 | def dataGeneration_SNP(n, p, popNum=5, groupNum=2, 57 | totalGeneration=10, 58 | splitGeneration=8, migrationRate=2, 59 | MAF=0.1, 60 | r1 = 5, 61 | r2 = 0.5 62 | ): 63 | priorCount = np.zeros(shape=[p, 1]) 64 | 65 | samples = np.random.multinomial(n - popNum * 2, [1.0 / popNum] * popNum, size=1)[0] + 2 66 | subSamples = [0]*popNum*groupNum 67 | for i in range(popNum): 68 | for j in range(groupNum-1): 69 | subSamples[i*groupNum+j] = samples[i]/groupNum 70 | subSamples[i*groupNum+groupNum-1] = samples[i] - sum(subSamples[i*groupNum:i*groupNum+groupNum-1]) 71 | 72 | Z = np.zeros([n, len(subSamples)]) 73 | s =-1 74 | for i in range(len(subSamples)): 75 | for j in range(subSamples[i]): 76 | s+=1 77 | Z[s,i] = 1 78 | 79 | X = None 80 | for i in range(Z.shape[1]): 81 | num = len(np.where(Z[:,i]==1)[0]) 82 | xtmp = np.zeros(shape=[num, p]) 83 | 84 | if i%5==0: 85 | for j in range(p): 86 | prob = np.random.random()/10 + 0.45 87 | priorCount[j] = np.random.binomial(2, prob) 88 | 89 | 90 | for j in range(p): 91 | prob = np.random.beta(1+r1*priorCount[j], 1+r1*(2-priorCount[j])) 92 | prior_c = np.random.binomial(2, prob) 93 | for k in range(num): 94 | prob = np.random.beta(1+r2*prior_c, 1+r2*(2-prior_c)) 95 | xtmp[k,j] = np.random.binomial(2, prob) 96 | if i == 0: 97 | X = xtmp 98 | else: 99 | X = np.append(X, xtmp, 0) 100 | 101 | return X.astype(float) 102 | 103 | def discreteMapping(X): 104 | X[X>0] = 2 105 | X[X<0] = 1 106 | return X 107 | 108 | def generateData(seed, n, p, snp=True): 109 | np.random.seed(seed) 110 | Xtrain = dataGeneration_SNP(n=n, p=p) 111 | Xval = dataGeneration_SNP(n=n, p=p) 112 | Xtest = dataGeneration(n=n, p=p) 113 | 114 | # Xtrain = discreteMapping(Xtrain) 115 | # Xval = discreteMapping(Xval) 116 | # Xtest = discreteMapping(Xtest) 117 | 118 | b = np.zeros(p) 119 | for i in range(group): 120 | b[i*p/group] = np.random.random() 121 | Ytrain = oneHotRepresentation(binary_transform(np.dot(Xtrain, b))) 122 | Yval = oneHotRepresentation(binary_transform(np.dot(Xval, b))) 123 | Ytest = oneHotRepresentation(binary_transform(np.dot(Xtest, b))) 124 | 125 | dataPath = '../data/'+str(seed) + '_' + str(n) + '_' + str(p) + '_' + str(group) + '_' 126 | 127 | np.save(dataPath+'Xtrain', Xtrain) 128 | np.save(dataPath+'Xval', Xval) 129 | np.save(dataPath+'Xtest', Xtest) 130 | np.save(dataPath+'Ytrain', Ytrain) 131 | np.save(dataPath+'Yval', Yval) 132 | np.save(dataPath+'Ytest', Ytest) 133 | 134 | if __name__ == '__main__': 135 | for seed in range(10): 136 | n = 5000 137 | p = 1000 138 | group = 100 139 | generateData(seed=seed, n=n, p=p, snp=True) -------------------------------------------------------------------------------- /simulations/HEX_linear.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | from numpy import linalg 5 | from scipy import linalg as scialg 6 | from helpingMethods import generatingWeightMatrix_py 7 | 8 | def pred(X, b): 9 | t = 1. / (1 + np.exp(-np.dot(X, b))) 10 | y = np.zeros_like(t) 11 | y[t>0.5] = 1 12 | return y 13 | 14 | class Hex_linear: 15 | def __init__(self, lam=1., lr=1., tol=1e-5, logistic=False, ignoringIndex = 100, hex_start=100, project=False): 16 | self.lam = lam 17 | self.lr = lr 18 | self.tol = tol 19 | self.decay = 0.5 20 | self.maxIter = 1000 21 | self.logistic = logistic 22 | self.hex_start = hex_start 23 | self.ignoringIndex = ignoringIndex 24 | self.project = project 25 | 26 | def setLambda(self, lam): 27 | self.lam = lam 28 | 29 | def setLogisticFlag(self, logistic): 30 | self.logistic = logistic 31 | 32 | def setLearningRate(self, lr): 33 | self.lr = lr 34 | 35 | def setMaxIter(self, a): 36 | self.maxIter = a 37 | 38 | def setTol(self, t): 39 | self.tol = t 40 | 41 | def fit(self, X, y): 42 | shp = X.shape 43 | self.beta = np.zeros([shp[1], 1]) 44 | resi_prev = np.inf 45 | resi = self.cost(X, y) 46 | step = 0 47 | hex_flag = False 48 | while np.abs(resi_prev - resi) > self.tol and step < self.maxIter: 49 | 50 | if hex_flag or (self.project and step>0): 51 | if not self.project: 52 | # W = generatingWeightMatrix_py(pred(X[:,self.ignoringIndex:], self.beta[self.ignoringIndex:]), y.reshape([y.shape[0], 1])) 53 | # W = generatingWeightMatrix_py(np.dot(X[:,self.ignoringIndex:], self.beta[self.ignoringIndex:]), y.reshape([y.shape[0], 1])) 54 | W = generatingWeightMatrix_py(X[:,self.ignoringIndex:], y.reshape([y.shape[0], 1])) 55 | W_half = np.real(scialg.sqrtm(W)) 56 | else: 57 | # T = pred(X[:,self.ignoringIndex:], self.beta[self.ignoringIndex:]) 58 | # T = np.dot(X[:,self.ignoringIndex:], self.beta[self.ignoringIndex:]) 59 | T = X[:,self.ignoringIndex:] 60 | W_half = np.eye(shp[0]) - np.dot(T, np.dot(np.linalg.inv(np.dot(T.T, T)), T.T)) 61 | 62 | Xproj = np.dot(W_half, X) 63 | yproj = np.dot(W_half, y).reshape(y.shape[0]) 64 | else: 65 | Xproj = X 66 | yproj = y 67 | 68 | 69 | keepRunning = True 70 | resi_prev = resi 71 | runningStep = 0 72 | while keepRunning and runningStep < 10: 73 | runningStep += 1 74 | prev_beta = self.beta 75 | pg = self.proximal_gradient(Xproj, yproj) 76 | self.beta = self.proximal_proj(self.beta - pg * self.lr) 77 | keepRunning = self.stopCheck(prev_beta, self.beta, pg, Xproj, yproj) 78 | if keepRunning: 79 | self.lr = self.decay * self.lr 80 | 81 | # print self.beta.T 82 | 83 | step += 1 84 | resi = self.cost(Xproj, yproj) 85 | 86 | if (np.abs(resi_prev - resi)0.5] = 1 120 | return t 121 | 122 | def getBeta(self): 123 | self.beta = self.beta.reshape(self.beta.shape[0]) 124 | return self.beta 125 | 126 | def stopCheck(self, prev, new, pg, X, y): 127 | if np.square(linalg.norm((y - (np.dot(X, new))))) <= \ 128 | np.square(linalg.norm((y - (np.dot(X, prev))))) + np.dot(pg.transpose(), ( 129 | new - prev)) + 0.5 * self.lam * np.square(linalg.norm(prev - new)): 130 | return False 131 | else: 132 | return True 133 | -------------------------------------------------------------------------------- /office/MLP.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 6 | def lamda_variable(shape): 7 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 8 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 9 | 10 | def theta_variable(shape): 11 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 12 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 13 | 14 | def generatingWeightMatrix(images, labels, epoch, division, batch): 15 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0] 16 | return W 17 | 18 | def checkInformation(rep, epoch, s): 19 | X = py_func(checkInformation_py, [rep, epoch, s], [tf.float32])[0] 20 | return X 21 | 22 | def weight_variable(shape): 23 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 24 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 25 | 26 | def bias_variable(shape): 27 | initializer = tf.constant_initializer(0.0) 28 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 29 | 30 | def conv2d(x, W): 31 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 32 | 33 | def max_pool_2x2(x): 34 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 35 | 36 | class MLP(object): 37 | def __init__(self, x, y, z, conf, Hex_flag=False): 38 | self.x = tf.reshape(x, shape=[-1, 800]) 39 | self.z=tf.reshape(z,shape=[-1, 256]) 40 | self.y = y 41 | self.keep_prob = tf.placeholder(tf.float32) 42 | self.e=tf.placeholder(tf.float32) 43 | self.batch=tf.placeholder(tf.float32) 44 | #####################glgcm######################### 45 | 46 | with tf.variable_scope("fc1"): 47 | W_fc1 = weight_variable([800, 256]) 48 | b_fc1 = bias_variable([256]) 49 | h_fc1 = tf.nn.relu(tf.matmul(self.x, W_fc1) + b_fc1) 50 | 51 | h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 52 | 53 | with tf.variable_scope("fc2"): 54 | W_fc2 = weight_variable([256, 32]) 55 | b_fc2 = bias_variable([32]) 56 | h_fc2 = tf.nn.relu(tf.matmul(self.z, W_fc2) + b_fc2) 57 | 58 | 59 | yconv_contact_loss=tf.concat([h_fc1_drop, h_fc2],1) 60 | #yconv_contact_loss=tf.concat([tf.zeros_like(h_fc1_drop, tf.float32),tf.zeros_like(glgcm_h_fc1_drop, tf.float32)],1) 61 | 62 | pad=tf.zeros_like(h_fc2, tf.float32) 63 | yconv_contact_pred=tf.concat([h_fc1_drop, pad],1) 64 | 65 | pad2 = tf.zeros_like(h_fc1, tf.float32) 66 | yconv_contact_H = tf.concat([pad2, h_fc2],1) 67 | 68 | # fc2 69 | with tf.variable_scope("fc3"): 70 | W_fc3 = weight_variable([288, 31]) 71 | b_fc3 = bias_variable([31]) 72 | y_conv_loss = tf.matmul(yconv_contact_loss, W_fc3) + b_fc3 73 | y_conv_pred = tf.matmul(yconv_contact_pred, W_fc3) + b_fc3 74 | y_conv_H = tf.matmul(yconv_contact_H, W_fc3) + b_fc3 75 | 76 | """ 77 | t_histo_rows = [ 78 | tf.histogram_fixed_width( 79 | tf.gather(x, [row]), 80 | [0.0, 256.0], 100) 81 | for row in range(128)] 82 | 83 | H = tf.stack(t_histo_rows, axis=0) 84 | """ 85 | # H = y_conv_H 86 | 87 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 88 | if Hex_flag==False: 89 | if conf.re==1: 90 | tf.add_to_collection("losses",sess_loss) 91 | self.loss = tf.add_n(tf.get_collection("losses")) 92 | else: 93 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 94 | self.pred = tf.argmax(y_conv_pred, 1) 95 | 96 | # H = y_conv_H 97 | # H = tf.argmax(y_conv_H, 1) 98 | # y_H = tf.one_hot(H, depth=7) 99 | 100 | # y_conv_pred = checkInformation(y_conv_pred, self.e, 'hey') 101 | # H = checkInformation(H, self.e, 'ha') 102 | 103 | self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1)) 104 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 105 | if Hex_flag: 106 | # loss = tf.sqrt(tf.reshape(tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32), [-1, 1]) + 1e-10) 107 | 108 | # y_conv_loss = generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch) 109 | 110 | y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 111 | y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 112 | # 113 | # I1 = checkInformation(y_conv_loss, self.e, self.batch) 114 | # 115 | # I2 = checkInformation(y_conv_H, self.e, self.batch) 116 | 117 | y_conv_loss = y_conv_loss - \ 118 | tf.matmul(tf.matmul(tf.matmul(y_conv_H, tf.matrix_inverse(tf.matmul(y_conv_H, y_conv_H, transpose_a=True))), y_conv_H, transpose_b=True), y_conv_loss) 119 | 120 | # I3 = checkInformation(y_conv_loss, self.e, self.batch) 121 | 122 | # y_conv_loss = tf.matmul(I1, tf.matmul(I2, tf.matmul(I3, y_conv_loss))) 123 | 124 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 125 | 126 | # self.loss = tf.reduce_mean(tf.multiply(W, tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32))) 127 | 128 | # tf.stop_gradient(W) 129 | # if conf.re==1: 130 | # sess_loss = tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) 131 | # 132 | # tf.add_to_collection("losses",tf.reshape(sess_loss,[])) 133 | # self.loss = tf.add_n(tf.get_collection("losses")) 134 | # else: 135 | # self.loss=tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) -------------------------------------------------------------------------------- /PACS/datagenerator.py: -------------------------------------------------------------------------------- 1 | # Created on Wed May 31 14:48:46 2017 2 | # 3 | # @author: Frederik Kratzert 4 | 5 | """Containes a helper class for image input pipelines in tensorflow.""" 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | import os 10 | from tensorflow.python.framework import dtypes 11 | from tensorflow.python.framework.ops import convert_to_tensor 12 | 13 | IMAGENET_MEAN = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32) 14 | 15 | 16 | class ImageDataGenerator(object): 17 | """Wrapper class around the new Tensorflows dataset pipeline. 18 | 19 | Requires Tensorflow >= version 1.12rc0 20 | """ 21 | 22 | def __init__(self, txt_file, dataroot, mode, batch_size, num_classes, shuffle=True, 23 | buffer_size=1000): 24 | """Create a new ImageDataGenerator. 25 | 26 | Recieves a path string to a text file, which consists of many lines, 27 | where each line has first a path string to an image and seperated by 28 | a space an integer, referring to the class number. Using this data, 29 | this class will create TensrFlow datasets, that can be used to train 30 | e.g. a convolutional neural network. 31 | 32 | Args: 33 | txt_file: Path to the text file. 34 | mode: Either 'training' or 'validation'. Depending on this value, 35 | different parsing functions will be used. 36 | batch_size: Number of images per batch. 37 | num_classes: Number of classes in the dataset. 38 | shuffle: Wether or not to shuffle the data in the dataset and the 39 | initial file list. 40 | buffer_size: Number of images used as buffer for TensorFlows 41 | shuffling of the dataset. 42 | 43 | Raises: 44 | ValueError: If an invalid mode is passed. 45 | 46 | """ 47 | self.dataroot = dataroot 48 | self.txt_file = txt_file 49 | self.num_classes = num_classes 50 | 51 | # retrieve the data from the text file 52 | self._read_txt_file() 53 | 54 | # number of samples in the dataset 55 | self.data_size = len(self.labels) 56 | 57 | # initial shuffling of the file and label lists (together!) 58 | if shuffle: 59 | self._shuffle_lists() 60 | 61 | # convert lists to TF tensor 62 | self.img_paths = convert_to_tensor(self.img_paths, dtype=dtypes.string) 63 | self.labels = convert_to_tensor(self.labels, dtype=dtypes.int32) 64 | 65 | # create dataset 66 | data = tf.data.Dataset.from_tensor_slices((self.img_paths, self.labels)).repeat() 67 | 68 | # distinguish between train/infer. when calling the parsing functions 69 | if mode == 'training': 70 | data = data.map(self._parse_function_train, num_parallel_calls=8) 71 | #output_buffer_size=100*batch_size) 72 | 73 | elif mode == 'inference': 74 | data = data.map(self._parse_function_inference, num_parallel_calls=8) 75 | #output_buffer_size=100*batch_size) 76 | 77 | else: 78 | raise ValueError("Invalid mode '%s'." % (mode)) 79 | 80 | # shuffle the first `buffer_size` elements of the dataset 81 | if shuffle: 82 | data = data.shuffle(buffer_size=buffer_size) 83 | 84 | # create a new dataset with batches of images 85 | data = data.batch(batch_size) 86 | 87 | self.data = data 88 | 89 | def _read_txt_file(self): 90 | """Read the content of the text file and store it into lists.""" 91 | self.img_paths = [] 92 | self.labels = [] 93 | with open(self.txt_file, 'r') as f: 94 | lines = f.readlines() 95 | for line in lines: 96 | items = line.split(' ') 97 | self.img_paths.append(os.path.join(self.dataroot, items[0])) 98 | self.labels.append(int(items[1])) 99 | 100 | def _shuffle_lists(self): 101 | """Conjoined shuffling of the list of paths and labels.""" 102 | path = self.img_paths 103 | labels = self.labels 104 | permutation = np.random.permutation(self.data_size) 105 | self.img_paths = [] 106 | self.labels = [] 107 | for i in permutation: 108 | self.img_paths.append(path[i]) 109 | self.labels.append(labels[i]) 110 | 111 | def _parse_function_train(self, filename, label): 112 | """Input parser for samples of the training set.""" 113 | # convert label number into one-hot-encoding 114 | one_hot = tf.one_hot(label, self.num_classes) 115 | 116 | # load and preprocess the image 117 | img_string = tf.read_file(filename) 118 | img_decoded = tf.image.decode_png(img_string, channels=3) 119 | img_resized = tf.image.resize_images(img_decoded, [227, 227]) 120 | """ 121 | Dataaugmentation comes here. 122 | """ 123 | img_centered = tf.subtract(img_resized, IMAGENET_MEAN) 124 | 125 | 126 | img_resized2 = tf.image.resize_images(img_decoded, [28, 28]) 127 | img_centered2 = tf.subtract(img_resized2, IMAGENET_MEAN) 128 | gray=tf.image.rgb_to_grayscale(img_centered2) 129 | 130 | # RGB -> BGR 131 | img_bgr = img_centered[:, :, ::-1] 132 | 133 | return img_bgr, gray, one_hot 134 | 135 | def _parse_function_inference(self, filename, label): 136 | """Input parser for samples of the validation/test set.""" 137 | # convert label number into one-hot-encoding 138 | one_hot = tf.one_hot(label, self.num_classes) 139 | 140 | # load and preprocess the image 141 | img_string = tf.read_file(filename) 142 | img_decoded = tf.image.decode_png(img_string, channels=3) 143 | img_resized = tf.image.resize_images(img_decoded, [227, 227]) 144 | img_centered = tf.subtract(img_resized, IMAGENET_MEAN) 145 | 146 | img_resized2 = tf.image.resize_images(img_decoded, [28, 28]) 147 | img_centered2 = tf.subtract(img_resized2, IMAGENET_MEAN) 148 | gray=tf.image.rgb_to_grayscale(img_centered2) 149 | # RGB -> BGR 150 | img_bgr = img_centered[:, :, ::-1] 151 | 152 | return img_bgr,gray, one_hot 153 | -------------------------------------------------------------------------------- /Model/MLP.py: -------------------------------------------------------------------------------- 1 | """ Multilayer Perceptron. 2 | 3 | A Multilayer Perceptron (Neural Network) implementation example using 4 | TensorFlow library. This example is using the MNIST database of handwritten 5 | digits (http://yann.lecun.com/exdb/synthetic/). 6 | 7 | Links: 8 | [MNIST Dataset](http://yann.lecun.com/exdb/synthetic/). 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | 14 | # ------------------------------------------------------------------ 15 | # 16 | # THIS EXAMPLE HAS BEEN RENAMED 'neural_network.py', FOR SIMPLICITY. 17 | # 18 | # ------------------------------------------------------------------ 19 | 20 | 21 | from __future__ import print_function 22 | 23 | import sys 24 | sys.path.append('../') 25 | 26 | import numpy as np 27 | import tensorflow as tf 28 | from tensorflow import py_func 29 | 30 | from dataGeneration.dataLoader import loadData 31 | from helpingFunctions import generatingWeightMatrix_py 32 | 33 | def generatingWeightMatrix(images, labels): 34 | W = py_func(generatingWeightMatrix_py, [images, labels], [tf.float32])[0] 35 | return W 36 | 37 | def loss(logits, labels, h, HEX=True): 38 | cross_entropy = tf.nn.softmax_cross_entropy_with_logits( 39 | labels=labels, logits=logits, name='cross_entropy_per_example') 40 | if not HEX: 41 | return tf.reduce_mean(cross_entropy) 42 | cross_entropy = tf.sqrt(tf.reshape(cross_entropy, [-1, 1]) + 1e-10) 43 | W = generatingWeightMatrix(h, labels) 44 | tf.stop_gradient(W) 45 | cross_entropy = tf.matmul(tf.matmul(cross_entropy, W, transpose_a=True), cross_entropy) 46 | # cross_entropy = tf.matmul(cross_entropy, cross_entropy, transpose_a=True) 47 | return tf.reduce_mean(cross_entropy) 48 | 49 | # import numpy as np 50 | # np.random.seed(1) 51 | tf.set_random_seed(1) 52 | 53 | # Experiment Setting 54 | 55 | def experiment(seed, HEX_flag): 56 | 57 | n = 500 58 | p = 1000 59 | group = 100 60 | 61 | # Parameters 62 | learning_rate = 1e-3 63 | training_epochs = 100 64 | batch_size = 100 65 | display_step = 1 66 | 67 | # Network Parameters 68 | n_hidden_1 = 50 # 1st layer number of neurons 69 | n_hidden_2 = 10 # 2nd layer number of neurons 70 | n_input = p # MNIST data input (img shape: 28*28) 71 | n_classes = 2 # synthetic 2 classes 72 | 73 | Xtrain, Ytrain, Xval, Yval, Xtest, Ytest = loadData(seed, n, p, group) 74 | 75 | # tf Graph input 76 | X = tf.placeholder("float", [None, n_input]) 77 | Y = tf.placeholder("float", [None, n_classes]) 78 | 79 | # Store layers weight & bias 80 | weights = { 81 | 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1],seed=0)), 82 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2],seed=0)), 83 | 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes],seed=0)) 84 | } 85 | biases = { 86 | 'b1': tf.Variable(tf.random_normal([n_hidden_1],seed=0)), 87 | 'b2': tf.Variable(tf.random_normal([n_hidden_2],seed=0)), 88 | 'out': tf.Variable(tf.random_normal([n_classes],seed=0)) 89 | } 90 | 91 | 92 | # Create model 93 | def multilayer_perceptron(x): 94 | # Hidden fully connected layer with 256 neurons 95 | layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) 96 | # Hidden fully connected layer with 256 neurons 97 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 98 | # Output fully connected layer with a neuron for each class 99 | out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] 100 | return layer_1, layer_2, out_layer 101 | 102 | # Construct model 103 | layer1, layer2, logits = multilayer_perceptron(X) 104 | 105 | # Define loss and optimizer 106 | loss_op = loss(logits, Y, h=layer1, HEX=HEX_flag) 107 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 108 | train_op = optimizer.minimize(loss_op) 109 | # Initializing the variables 110 | init = tf.global_variables_initializer() 111 | 112 | # maxiVal = 0 113 | # saver = tf.train.Saver() 114 | 115 | with tf.Session() as sess: 116 | sess.run(init) 117 | 118 | pred = tf.nn.softmax(logits) # Apply softmax to logits 119 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1)) 120 | # Calculate accuracy 121 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 122 | 123 | # Training cycle 124 | for epoch in range(training_epochs): 125 | avg_cost = 0. 126 | total_batch = int(n/batch_size) 127 | # Loop over all batches 128 | for i in range(total_batch): 129 | # batch_x, batch_y = synthetic.train.next_batch(batch_size) 130 | batch_x = Xtrain[i*batch_size:(i+1)*batch_size,:] 131 | batch_y = Ytrain[i*batch_size:(i+1)*batch_size,:] 132 | # Run optimization op (backprop) and cost op (to get loss value) 133 | _, c = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y}) 134 | # Compute average loss 135 | avg_cost += c / total_batch 136 | # Display logs per epoch step 137 | if epoch % display_step == 0: 138 | print("Epoch:", '%04d' % (epoch+1), "cost={:.9f}".format(avg_cost)) 139 | 140 | val = accuracy.eval({X: Xval, Y: Yval}) 141 | print("\tValidation Accuracy: ={:.9f}".format(val)) 142 | 143 | # if val > maxiVal: 144 | # maxiVal = val 145 | # saver.save(sess, 'current_best') 146 | 147 | print("Optimization Finished!") 148 | 149 | # Test model 150 | 151 | score = accuracy.eval({X: Xtest, Y: Ytest}) 152 | print("Accuracy:", score) 153 | return score 154 | 155 | if __name__ == '__main__': 156 | # results = [] 157 | # for corr in [0.8]: 158 | # for HEX_flag in [False, True]: 159 | # result = [] 160 | # for seed in range(10): 161 | # a = experiment(seed=seed, HEX_flag=HEX_flag) 162 | # result.append(a) 163 | # results.append(result) 164 | # results = np.array(results) 165 | # np.save('results_useful', results) 166 | experiment(seed=3, HEX_flag=True) -------------------------------------------------------------------------------- /MNIST_R/mnist_r.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | 4 | from datetime import datetime 5 | import numpy as np 6 | import imutils 7 | import cv2 8 | #(x_train, y_train), (x_test, y_test) = mnist.load_data() 9 | borderType = cv2.BORDER_CONSTANT 10 | start = datetime.now() 11 | batch_size = 128 12 | num_classes = 10 13 | epochs = 21 14 | def check(img,y): 15 | print(y) 16 | cv2.imshow('1',img) 17 | cv2.waitKey(0) 18 | cv2.destroyAllWindows() 19 | # input image dimensions 20 | img_rows, img_cols = 28, 28 21 | 22 | # the data, split between train and test sets 23 | #(x_train, y_train), (x_test, y_test) = mnist.load_data() 24 | import cPickle as pickle 25 | import gzip 26 | # f = gzip.open('../../../data/mnist.pkl.gz', 'rb') 27 | # #f=gzip.open('/media/student/Data/zexue/MNIST/mnist.pkl.gz','rb') 28 | # training_data, validation_data, test_data = pickle.load(f) 29 | # x_train=training_data[0] 30 | # y_train=training_data[1] 31 | # x_val=validation_data[0] 32 | # y_val=validation_data[1] 33 | # x_test=test_data[0] 34 | # y_test=test_data[1] 35 | # x_train=x_train.reshape(x_train.shape[0],28,28) 36 | # x_test=x_test.reshape(x_test.shape[0],28,28) 37 | # x_val=x_val.reshape(x_val.shape[0],28,28) 38 | # print (x_train.shape) 39 | # print(x_train.astype) 40 | # print(x_val.astype) 41 | 42 | # def rotate(x,y): 43 | # rotatedx = [] 44 | # rotatedy = [] 45 | # rotatedp=[] 46 | # angles=[0,15,45,60,75] 47 | # for dig, lab in zip(x,y): 48 | # i=np.random.randint(5) 49 | # rotated = imutils.rotate(dig, angles[i]) 50 | # rotatedx.append(rotated.reshape(784)) 51 | # rotatedy.append(lab) 52 | # rotatedp.append(i) 53 | # rox=np.array(rotatedx) 54 | # roy=np.array(rotatedy) 55 | # rop=np.array(rotatedp) 56 | # return rox,roy,rop 57 | 58 | # xtrain,ytrain,ptrain=rotate(x_train,y_train) 59 | # print xtrain.shape 60 | #check(x_train[19],ytrain[19]) 61 | 62 | #check(xtrain[19].reshape(28,28),ptrain[19]) 63 | 64 | # xval,yval,pval=rotate(x_val,y_val) 65 | # xtest,ytest,ptest=rotate(x_test,y_test) 66 | # np.save('../../../data/MNIST-r/npy/xtrain.npy',xtrain) 67 | # np.save('../../../data/MNIST-r/npy/ytrain.npy',ytrain) 68 | # np.save('../../../data/MNIST-r/npy/ptrain.npy',ptrain) 69 | # 70 | # np.save('../../../data/MNIST-r/npy/xval.npy',xval) 71 | # np.save('../../../data/MNIST-r/npy/yval.npy',yval) 72 | # np.save('../../../data/MNIST-r/npy/pval.npy',pval) 73 | # 74 | # np.save('../../../data/MNIST-r/npy/xtest.npy',xtest) 75 | # np.save('../../../data/MNIST-r/npy/ytest.npy',ytest) 76 | # np.save('../../../data/MNIST-r/npy/ptest.npy',ptest) 77 | 78 | def oneHotRepresentation(y): 79 | n = y.shape[0] 80 | r = np.zeros([n, 10]) 81 | for i in range(r.shape[0]): 82 | r[i,int(y[i])] = 1 83 | return r 84 | 85 | def rotateImg_TrainVal(x, angles): 86 | rotatedx = [] 87 | rotatedp=[] 88 | for dig in x: 89 | i=np.random.randint(5) 90 | rotated = imutils.rotate(dig, angles[i]) 91 | rotatedx.append(rotated.reshape(784)) 92 | rotatedp.append(i) 93 | rox=np.array(rotatedx) 94 | rop=np.array(rotatedp) 95 | return rox,rop 96 | 97 | def rotateImg_Test(x, angle): 98 | rotatedx = [] 99 | for dig in x: 100 | rotated = imutils.rotate(dig, angle) 101 | rotatedx.append(rotated.reshape(784)) 102 | rox=np.array(rotatedx) 103 | return rox 104 | 105 | def rotateImg(x, angle): 106 | x = x.reshape(x.shape[0],28,28) 107 | rotatedx = [] 108 | for dig in x: 109 | rotated = imutils.rotate(dig, angle) 110 | rotatedx.append(rotated.reshape(28*28)) 111 | rox=np.array(rotatedx) 112 | return rox 113 | 114 | def subSamplingImages(X, y): 115 | indices = np.random.permutation(X.shape[0]) 116 | X = X[indices,:] 117 | y = y[indices] 118 | mx = [] 119 | my = [] 120 | count = [0]*10 121 | for i in range(X.shape[0]): 122 | if count[y[i]] < 100: 123 | count[y[i]] += 1 124 | mx.append(X[i,:]) 125 | my.append(y[i]) 126 | return np.array(mx), np.array(my) 127 | 128 | def loadDataRotate(test=0): 129 | # np.random.seed(0) 130 | 131 | if test == 0: 132 | trainAng = [15, 30, 45, 60, 75] 133 | testAng = 0 134 | elif test == 1: 135 | trainAng = [0, 30, 45, 60, 75] 136 | testAng = 15 137 | elif test == 2: 138 | trainAng = [0, 15, 45, 60, 75] 139 | testAng = 30 140 | elif test == 3: 141 | trainAng = [0, 15, 30, 60, 75] 142 | testAng = 45 143 | elif test == 4: 144 | trainAng = [0, 15, 30, 45, 75] 145 | testAng = 60 146 | else: 147 | trainAng = [0, 15, 30, 45, 60] 148 | testAng = 75 149 | 150 | f = gzip.open('../data/MNIST/mnist.pkl.gz', 'rb') 151 | training_data, validation_data, test_data = pickle.load(f) 152 | 153 | # x_train=training_data[0] 154 | # y_train=training_data[1] 155 | # x_val=validation_data[0] 156 | # y_val=validation_data[1] 157 | # x_test=test_data[0] 158 | # y_test=test_data[1] 159 | # x_train=x_train.reshape(x_train.shape[0],28,28) 160 | # x_test=x_test.reshape(x_test.shape[0],28,28) 161 | # x_val=x_val.reshape(x_val.shape[0],28,28) 162 | # 163 | # 164 | # 165 | # x_train, r_train = rotateImg_TrainVal(x_train, trainAng) 166 | # x_val, r_val = rotateImg_TrainVal(x_val, trainAng) 167 | # x_test = rotateImg_Test(x_test, testAng) 168 | # 169 | # return x_train, oneHotRepresentation(y_train),x_val,oneHotRepresentation(y_val),x_test,oneHotRepresentation(y_test) 170 | 171 | mx, my = subSamplingImages(training_data[0], training_data[1]) 172 | trainValX = None 173 | trainValy = None 174 | for i in trainAng: 175 | if trainValX is None: 176 | trainValX = rotateImg(mx, i) 177 | trainValy = my 178 | else: 179 | trainValX = np.append(trainValX, rotateImg(mx, i), 0) 180 | trainValy = np.append(trainValy, my, 0) 181 | 182 | n = trainValX.shape[0] 183 | indices = np.random.permutation(n) 184 | trainX = trainValX[indices[:int(0.8*n)], :] 185 | trainY = trainValy[indices[:int(0.8*n)]] 186 | valX = trainValX[indices[int(0.8*n):], :] 187 | valY = trainValy[indices[int(0.8*n):]] 188 | 189 | testX = rotateImg(mx, testAng) 190 | testY = my 191 | 192 | return trainX, oneHotRepresentation(trainY), valX, oneHotRepresentation(valY), testX, oneHotRepresentation(testY) 193 | 194 | -------------------------------------------------------------------------------- /sentiment/cnn_baseline.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 6 | def lamda_variable(shape): 7 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 8 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 9 | 10 | def theta_variable(shape): 11 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 12 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 13 | 14 | def generatingWeightMatrix(images, labels, epoch, division, batch): 15 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0] 16 | return W 17 | 18 | def checkInformation(rep, epoch, s): 19 | X = py_func(checkInformation_py, [rep, epoch, s], [tf.float32])[0] 20 | return X 21 | 22 | def weight_variable(shape): 23 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 24 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 25 | 26 | def bias_variable(shape): 27 | initializer = tf.constant_initializer(0.0) 28 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 29 | 30 | def conv2d(x, W): 31 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 32 | 33 | def max_pool_2x2(x): 34 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 35 | 36 | class MNISTcnn(object): 37 | def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False): 38 | self.x = tf.reshape(x, shape=[-1, 28, 28, 1]) 39 | self.x_re=tf.reshape(x_re,shape=[-1,conf.ngray,784]) 40 | self.x_d=x_d 41 | self.y = y 42 | self.keep_prob = tf.placeholder(tf.float32) 43 | self.e=tf.placeholder(tf.float32) 44 | self.batch=tf.placeholder(tf.float32) 45 | #####################glgcm######################### 46 | 47 | # with tf.variable_scope('glgcm'): 48 | # lamda = lamda_variable([conf.ngray,1]) 49 | # theta= theta_variable([conf.ngray,1]) 50 | # # index=tf.multiply(tf.minimum(tf.maximum(tf.subtract(self.x_d,lamda),1e-5),1),tf.minimum(tf.maximum(tf.subtract(self.x_re,theta),1e-5),1)) 51 | # # g=tf.reduce_sum(index,reduction_indices=2) 52 | # g=tf.matmul(tf.minimum(tf.maximum(tf.subtract(self.x_d,lamda),0),1),tf.minimum(tf.maximum(tf.subtract(self.x_re,theta),0),1), transpose_b=True) 53 | # #print(g.get_shape()) 54 | # 55 | # 56 | # with tf.variable_scope("glgcm_fc1"): 57 | # g_flat = tf.reshape(g, [-1, conf.ngray*conf.ngray]) 58 | # glgcm_W_fc1 = weight_variable([conf.ngray*conf.ngray, 32]) 59 | # glgcm_b_fc1 = bias_variable([32]) 60 | # glgcm_h_fc1 = tf.nn.relu(tf.matmul(g_flat, glgcm_W_fc1) + glgcm_b_fc1) 61 | # glgcm_h_fc1_drop = tf.nn.dropout(glgcm_h_fc1, self.keep_prob) 62 | 63 | #####################################glgcm############################ 64 | ######################################hex############################# 65 | # H = glgcm_h_fc1 66 | ######################################hex############################ 67 | 68 | ######################################Sentiment###################### 69 | # conv1 70 | with tf.variable_scope('hex'): 71 | with tf.variable_scope('conv1'): 72 | W_conv1 = weight_variable([5, 5, 1, 32]) 73 | if conf.re==1: 74 | tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.001)(W_conv1)) 75 | b_conv1 = bias_variable([32]) 76 | h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1) 77 | h_pool1 = max_pool_2x2(h_conv1) 78 | 79 | # conv2 80 | with tf.variable_scope('conv2'): 81 | W_conv2 = weight_variable([5, 5, 32, 64]) 82 | b_conv2 = bias_variable([64]) 83 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) 84 | h_pool2 = max_pool_2x2(h_conv2) 85 | 86 | # fc1 87 | with tf.variable_scope("fc1"): 88 | shape = int(np.prod(h_pool2.get_shape()[1:])) 89 | W_fc1 = weight_variable([shape, 1024]) 90 | b_fc1 = bias_variable([1024]) 91 | h_pool2_flat = tf.reshape(h_pool2, [-1, shape]) 92 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 93 | 94 | # h_fc1 = checkInformation(h_fc1, self.e) 95 | 96 | # dropout 97 | h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 98 | 99 | 100 | # yconv_contact_loss=tf.concat([h_fc1_drop, glgcm_h_fc1],1) 101 | # #yconv_contact_loss=tf.concat([tf.zeros_like(h_fc1_drop, tf.float32),tf.zeros_like(glgcm_h_fc1_drop, tf.float32)],1) 102 | # 103 | # pad=tf.zeros_like(glgcm_h_fc1, tf.float32) 104 | # yconv_contact_pred=tf.concat([h_fc1_drop, pad],1) 105 | # 106 | # pad2 = tf.zeros_like(h_fc1, tf.float32) 107 | # yconv_contact_H = tf.concat([pad2, glgcm_h_fc1],1) 108 | 109 | # fc2 110 | with tf.variable_scope("fc2"): 111 | W_fc2 = weight_variable([1024, 7]) 112 | b_fc2 = bias_variable([7]) 113 | y_conv_loss = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 114 | ######################################Sentiment###################### 115 | 116 | """ 117 | t_histo_rows = [ 118 | tf.histogram_fixed_width( 119 | tf.gather(x, [row]), 120 | [0.0, 256.0], 100) 121 | for row in range(128)] 122 | 123 | H = tf.stack(t_histo_rows, axis=0) 124 | """ 125 | # H = y_conv_H 126 | 127 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 128 | if Hex_flag==False: 129 | if conf.re==1: 130 | tf.add_to_collection("losses",sess_loss) 131 | self.loss = tf.add_n(tf.get_collection("losses")) 132 | else: 133 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 134 | self.pred = tf.argmax(y_conv_loss, 1) 135 | 136 | # H = y_conv_H 137 | # H = tf.argmax(y_conv_H, 1) 138 | # y_H = tf.one_hot(H, depth=7) 139 | 140 | # y_conv_pred = checkInformation(y_conv_pred, self.e, 'hey') 141 | # H = checkInformation(H, self.e, 'ha') 142 | 143 | self.correct_prediction = tf.equal(tf.argmax(y_conv_loss,1), tf.argmax(self.y,1)) 144 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) -------------------------------------------------------------------------------- /Model/helpingFunctions_v2.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import scipy.linalg as linalg 4 | import scipy 5 | import numpy as np 6 | from scipy import stats 7 | import scipy.optimize as opt 8 | 9 | Wlist = [] 10 | 11 | def checkInformation_py(X, epoch, s, y): 12 | if epoch > 0: 13 | # print X.shape 14 | print X[0,:], np.argmax(y[0,:]), s 15 | return np.float32(np.diag(np.ones(64))) 16 | 17 | def generatingWeightMatrix_py(Xp, Xc, epoch, division, batch): 18 | 19 | if epoch < division: 20 | return np.float32(Xc) 21 | else: 22 | W = np.eye(Xc.shape[0]) - np.dot(Xp, np.dot(np.linalg.inv(np.dot(Xp.T, Xp)), Xp.T)) 23 | Xc = np.dot(Xc, W) 24 | return np.float32(Xc) 25 | 26 | # lam = 1e-2 27 | # 28 | # if epoch < division: 29 | # #print epoch 30 | # return np.float32(0) 31 | # 32 | # p_pred = np.argmax(X, 1) 33 | # p_prob = np.max(X, 1) 34 | # 35 | # c_pred = np.argmax(y, 1) 36 | # c_prob = np.max(y, 1) 37 | # 38 | # # corr = np.dot(p_pred-np.mean(p_pred), c_pred-np.mean(c_pred))/(np.std(p_pred)*np.std(c_pred)*(X.shape[0])) 39 | # 40 | # a = np.zeros(X.shape[0]) 41 | # 42 | # a[p_pred==c_pred] = 1 43 | # # print np.mean(a), 44 | # accu = np.mean(a) 45 | # print accu, 46 | # a[p_pred==c_pred] = c_prob[p_pred==c_pred]**2 47 | # # if batch == 0: 48 | # # print a 49 | # if accu <=1.0/7 or np.isnan(accu): 50 | # return np.float32(np.ones(X.shape[0])) 51 | # a += 1/accu 52 | # a = 1.0/a 53 | # 54 | # a = (a/np.sum(a))*X.shape[0] 55 | # return np.float32(a) 56 | 57 | def generatingWeightMatrix_py2(X, y, epoch, division, batch): 58 | 59 | # print np.linalg.matrix_rank(X), '\t', np.linalg.matrix_rank(y), 60 | # for i in range(10): 61 | # print np.mean(X[i,:]), np.where(y[i]==1)[0] 62 | 63 | # X = X.reshape([128, 28*28]) #todo: note to change these back 64 | # X = X[:, :5] 65 | 66 | batch = int(batch) 67 | 68 | if epoch < division: 69 | #print epoch 70 | return np.float32(np.eye(X.shape[0])) 71 | else: 72 | # print np.linalg.matrix_rank(X), '\t', np.linalg.matrix_rank(np.dot(X.T, X)) 73 | # 74 | # for i in range(10): 75 | # print np.mean(X[i,:]), np.where(y[i]==1)[0] 76 | # if len(Wlist) == batch: #todo: let's try to change modelling power 77 | # 78 | # factor, S, U = fitting_null_py(X, y) 79 | # W = np.linalg.inv(np.dot(np.dot(U, np.diag(S)), U.T)*factor+np.eye(X.shape[0])) 80 | # 81 | # # W = np.eye(X.shape[0]) 82 | # # W = W/np.mean(W) # this line was not there in the sentiment experiment 83 | # 84 | # # W = columnWiseNormalize(W) 85 | # # W = columnWiseNormalize(W.T).T 86 | # 87 | # # W = np.diag(np.diag(W)) 88 | # 89 | # # W = np.eye(X.shape[0]) - np.dot(X, np.dot(np.linalg.inv(np.dot(X.T, X)), X.T)) 90 | # # W = columnWiseNormalize(W) 91 | # # W = columnWiseNormalize(W.T).T 92 | # # 93 | # 94 | # Wlist.append(W) 95 | # 96 | # return np.float32(Wlist[batch]) 97 | 98 | 99 | factor, S, U = fitting_null_py(X, y) 100 | W = np.linalg.inv(np.dot(np.dot(U, np.diag(S)), U.T)*factor+np.eye(X.shape[0])) 101 | # # 102 | # # # W = np.eye(X.shape[0]) 103 | # # # W = W/np.mean(W) 104 | # # 105 | # # W = columnWiseNormalize(W) 106 | # # W = columnWiseNormalize(W.T).T 107 | # # 108 | # X = X.reshape([X.shape[0], 1]) 109 | # W = np.eye(X.shape[0]) - np.dot(X, np.dot(np.linalg.inv(np.dot(X.T, X)), X.T)) 110 | # # # W = columnWiseNormalize(W) 111 | # # # W = columnWiseNormalize(W.T).T 112 | # 113 | return np.float32(W) 114 | 115 | def rescale(a): 116 | return a / np.max(np.abs(a)) 117 | 118 | def selectValues(Kva): 119 | r = np.zeros_like(Kva) 120 | n = r.shape[0] 121 | tmp = rescale(Kva) 122 | ind = 0 123 | for i in range(n/2, n-2): 124 | if tmp[i + 1] - tmp[i] > 1.0 / n: 125 | ind = i + 1 126 | break 127 | r[ind:] = Kva[ind:] 128 | r[n - 1] = Kva[n - 1] 129 | return r 130 | 131 | def columnWiseNormalize(X): 132 | # col_norm = 1.0/np.sqrt((1.0/X.shape[0])*np.diag(np.dot(X.T, X))) 133 | # return np.dot(X, np.diag(col_norm)) 134 | [n, p] = X.shape 135 | col_norm = np.ones(X.shape[1]) 136 | for i in range(p): 137 | s = (1.0/n)*np.dot(X[:, i].T, X[:,i]) 138 | if s != 0: 139 | col_norm[i] = 1.0/np.sqrt(s) 140 | X[:, i] = X[:,i]*col_norm[i] 141 | return X 142 | 143 | def fitting_null_py(X, y): 144 | ldeltamin = -5 145 | ldeltamax = 5 146 | numintervals=500 147 | 148 | X = X.reshape([X.shape[0], 1]) 149 | # X = columnWiseNormalize(X) 150 | # xmean = np.mean(X, 0) 151 | # X = X - xmean 152 | y = np.argmax(y, axis=1) 153 | # y = y - np.mean(y) 154 | y = y.reshape([y.shape[0], 1]) 155 | # y = columnWiseNormalize(y) 156 | 157 | # print y.T 158 | 159 | # ynorm = np.linalg.norm(y, ord=2, axis=0) 160 | # y = y / ynorm 161 | 162 | K = np.dot(X, X.T) 163 | S, U = linalg.eigh(K) 164 | 165 | # S = selectValues(S) 166 | # print S 167 | # 168 | # print np.linalg.matrix_rank(K) 169 | # 170 | # print len(np.where(S!=0)[0]) 171 | 172 | # from matplotlib import pyplot as plt 173 | # plt.imshow(K) 174 | # plt.savefig('tmp.png') 175 | # plt.clf() 176 | 177 | Uy = scipy.dot(U.T, y) 178 | 179 | # grid search 180 | nllgrid = scipy.ones(numintervals + 1) * scipy.inf 181 | ldeltagrid = scipy.arange(numintervals + 1) / (numintervals * 1.0) * (ldeltamax - ldeltamin) + ldeltamin 182 | for i in scipy.arange(numintervals + 1): 183 | nllgrid[i] = nLLeval(ldeltagrid[i], Uy, S) # the method is in helpingMethods 184 | 185 | # nllmin = nllgrid.min() 186 | ldeltaopt_glob = ldeltagrid[nllgrid.argmin()] 187 | 188 | print ldeltaopt_glob, 189 | return np.float32(1.0/np.exp(ldeltaopt_glob)), S, U 190 | 191 | def nLLeval(ldelta, Uy, S, REML=False): 192 | """ 193 | evaluate the negative log likelihood of a random effects model: 194 | nLL = 1/2(n_s*log(2pi) + logdet(K) + 1/ss * y^T(K + deltaI)^{-1}y, 195 | where K = USU^T. 196 | Uy: transformed outcome: n_s x 1 197 | S: eigenvectors of K: n_s 198 | ldelta: log-transformed ratio sigma_gg/sigma_ee 199 | """ 200 | n_s = Uy.shape[0] 201 | delta = scipy.exp(ldelta) 202 | 203 | # evaluate log determinant 204 | Sd = S + delta 205 | ldet = scipy.sum(scipy.log(Sd)) 206 | 207 | # evaluate the variance 208 | Sdi = 1.0 / Sd 209 | # Uy = Uy.flatten() 210 | # ss = 1. / n_s * (Uy.dot(Uy.T).dot(np.diag(Sdi))).sum() 211 | ss = 1. / n_s * (Uy*Uy*(Sdi.reshape(-1, 1))).sum() 212 | ss = ss / Uy.shape[1] + 1e-5 213 | 214 | # evalue the negative log likelihood 215 | nLL = 0.5 * (n_s * np.log(2.0 * scipy.pi) + ldet + n_s + n_s * np.log(ss)) 216 | 217 | if REML: 218 | pass 219 | 220 | return nLL 221 | -------------------------------------------------------------------------------- /MNIST_Pattern/visualize.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | 5 | from matplotlib import pyplot as plt 6 | 7 | import matplotlib 8 | 9 | font = {'family' : 'normal', 10 | 'weight' : 'bold', 11 | 'size' : 18} 12 | 13 | matplotlib.rc('font', **font) 14 | 15 | def loadTxt(path_filename): 16 | TR = [] 17 | VAL = [] 18 | TE = [] 19 | for i in range(5): 20 | updateTest = True 21 | maxVal = 0 22 | text = [line.strip() for line in open(path_filename + '_' + str(i) + '.txt')] 23 | tr = [] 24 | val = [] 25 | te = [] 26 | for line in text: 27 | if line.startswith('Epoch'): 28 | items = line.split() 29 | tr.append(float(items[8][:-1])) 30 | val.append(float(items[-1])) 31 | if len(val) == 0: 32 | updateTest = True 33 | else: 34 | if val[-1] > maxVal: 35 | updateTest = True 36 | maxVal = val[-1] 37 | else: 38 | updateTest = False 39 | if line.startswith('Best'): 40 | if updateTest: 41 | te.append(float(line.split()[-1])) 42 | else: 43 | te.append(te[-1]) 44 | print te[-1] 45 | TR.append(tr) 46 | VAL.append(val) 47 | TE.append(te[:-1]) 48 | TR = np.array(TR) 49 | VAL = np.array(VAL) 50 | TE = np.array(TE) 51 | 52 | return TR, VAL, TE 53 | 54 | def loadTxtNew(path_filename): 55 | TR = [] 56 | VAL = [] 57 | TE = [] 58 | for i in range(5): 59 | updateTest = True 60 | maxVal = 0 61 | text = [line.strip() for line in open(path_filename+ '_' + str(i) + '.txt')] 62 | tr = [] 63 | val = [] 64 | te = [] 65 | startUpdate = False 66 | for line in text: 67 | if line.startswith('Start'): 68 | startUpdate = True 69 | if startUpdate: 70 | if line.startswith('Epoch'): 71 | items = line.split() 72 | tr.append(float(items[8][:-1])) 73 | val.append(float(items[-1])) 74 | if len(val) == 0: 75 | updateTest = True 76 | else: 77 | if val[-1] > maxVal: 78 | updateTest = True 79 | maxVal = val[-1] 80 | else: 81 | te.append(te[-1]) 82 | if line.startswith('Best'): 83 | if updateTest: 84 | te.append(float(line.split()[-1])) 85 | 86 | TR.append(tr) 87 | VAL.append(val) 88 | TE.append(te[:-1]) 89 | TR = np.array(TR) 90 | VAL = np.array(VAL) 91 | TE = np.array(TE) 92 | 93 | return TR, VAL, TE 94 | 95 | def plot_mean_and_CI(mean, lb, ub, color_mean=None, color_shading=None): 96 | # plot the shaded range of the confidence intervals 97 | plt.fill_between(range(mean.shape[0]), ub, lb, 98 | color=color_shading, alpha=.5) 99 | # plot the mean on top 100 | plt.plot(mean, color_mean) 101 | 102 | def plot(corr=0): 103 | tr1, val1, te1 = loadTxt('baseline_'+str(corr)) 104 | tr2, val2, te2 = loadTxt('hex_'+str(corr)) 105 | 106 | plot_mean_and_CI(np.mean(tr1, 0), np.mean(tr1, 0)-np.std(tr1,0), np.mean(tr1, 0)+np.std(tr1,0), color_mean='b--', color_shading='c') 107 | plot_mean_and_CI(np.mean(te1, 0), np.mean(te1, 0)-np.std(te1,0), np.mean(te1, 0)+np.std(te1,0), color_mean='b', color_shading='c') 108 | plot_mean_and_CI(np.mean(val1, 0), np.mean(val1, 0)-np.std(val1,0), np.mean(val1, 0)+np.std(val1,0), color_mean='b.', color_shading='c') 109 | 110 | plot_mean_and_CI(np.mean(tr2, 0), np.mean(tr2, 0)-np.std(tr2,0), np.mean(tr2, 0)+np.std(tr2,0), color_mean='r--', color_shading='m') 111 | plot_mean_and_CI(np.mean(te2, 0), np.mean(te2, 0)-np.std(te2,0), np.mean(te2, 0)+np.std(te2,0), color_mean='r', color_shading='m') 112 | plot_mean_and_CI(np.mean(val2, 0), np.mean(val2, 0)-np.std(val2,0), np.mean(val2, 0)+np.std(val2,0), color_mean='r.', color_shading='m') 113 | 114 | plt.legend(loc=4) 115 | plt.ylim(0.4, 1.05) 116 | plt.savefig('MNIST_Pattern_Confound_'+str(corr)+'.pdf') 117 | plt.clf() 118 | 119 | def resultPlot(): 120 | boxColors = ['darkkhaki', 'royalblue'] 121 | 122 | fig = plt.figure(dpi=350, figsize=(25, 5)) 123 | axs = [0 for i in range(10)] 124 | 125 | newFiles = ['pre', 'info'] 126 | 127 | fileNames = ['baseline', 'mlp', 'vanilla', 'adv', 'hex', 'pre', 'info'] 128 | labelNames = ['B', 'M', 'N', 'A', 'H', 'G', 'I'] 129 | 130 | plt.style.use('bmh') 131 | 132 | for i in range(6): 133 | axs[i] = fig.add_axes([0.075+i*0.15, 0.1, 0.12, 0.7]) 134 | 135 | ts = [] 136 | if i < 3: 137 | for k in range(len(fileNames)): 138 | if fileNames[k] in newFiles: 139 | tr, val, te = loadTxtNew('../results/MNIST_Pattern/'+ fileNames[k]+'_'+str(i)) 140 | else: 141 | tr, val, te = loadTxt('../results/MNIST_Pattern/'+ fileNames[k]+'_'+str(i)) 142 | ts.append(te[:,-1]) 143 | else: 144 | for k in range(len(fileNames)): 145 | if fileNames[k] in newFiles: 146 | tr, val, te = loadTxtNew('../results/MNIST_Pattern_Confound/'+ fileNames[k]+'_'+str(i%3)) 147 | else: 148 | tr, val, te = loadTxt('../results/MNIST_Pattern_Confound/'+ fileNames[k]+'_'+str(i%3)) 149 | ts.append(te[:,-1]) 150 | 151 | # m1 = np.mean(r1) 152 | # s1 = np.std(r1) 153 | # m2 = np.mean(r2) 154 | # s2 = np.std(r2) 155 | 156 | # axs[c].errorbar(x=[0, 1], y=[m1, m2], yerr=[s1, s2]) 157 | 158 | axs[i].boxplot(ts, positions=[j for j in range(len(fileNames))], widths=[0.5 for j in range(len(fileNames))]) 159 | # axs[c].boxplot(r2, positions=[1]) 160 | 161 | axs[i].set_xlim(-0.5, len(fileNames)-0.5) 162 | axs[i].set_ylim(0, 1.1) 163 | 164 | if i == 0: 165 | axs[i].set_ylabel('Accuracy') 166 | axs[i].set_xticklabels(labelNames) 167 | # if c1 == 0: 168 | # axs[c].set_xticks([0, 1], ['NN', 'HEX-NN']) 169 | # else: 170 | # axs[c].get_xaxis().set_visible(False) 171 | if i == 0: 172 | axs[i].title.set_text('original\nindependent') 173 | elif i == 1: 174 | axs[i].title.set_text('random\nindependent') 175 | elif i == 2: 176 | axs[i].title.set_text('radial\nindependent') 177 | elif i == 3: 178 | axs[i].title.set_text('original\ndependent') 179 | elif i == 4: 180 | axs[i].title.set_text('random\ndependent') 181 | elif i == 5: 182 | axs[i].title.set_text('radial\ndependent') 183 | 184 | 185 | # plt.legend(loc="upper center", bbox_to_anchor=(1, 1), fancybox=True, ncol=2) 186 | plt.savefig('fig.pdf', dpi=350, format='pdf') 187 | 188 | if __name__ == '__main__': 189 | # for i in range(3): 190 | # plot(i) 191 | resultPlot() 192 | -------------------------------------------------------------------------------- /dataGeneration/addBackground_diffpos_28.py: -------------------------------------------------------------------------------- 1 | # addBackground_28.py 2 | import cv2 3 | 4 | import os 5 | 6 | import numpy as np 7 | 8 | # background_path = '/Users/hzxue/Desktop/CMU/project/artificial-pattern/data/background/' 9 | # face_path = '/Users/hzxue/Desktop/CMU/project/artificial-pattern/data/original/' 10 | background_path = '../images/background/' 11 | face_path = '/media/haohanwang/Data/SentimentImages/original/' 12 | 13 | sentiment_dic = {'anger': 0, 'disgust': 1, 'fear': 2, 'joy': 3, 'neutral': 4, 'sadness': 5, 'surprise': 6} 14 | background_dic = {0: 'drawn.jpg', 1: 'festival.jpg', 2: 'firework.jpg', 3: 'forest.jpg', 4: 'scare.jpg', 5: 'train.jpg', 6: 'winter.jpg'} 15 | 16 | names = {'aia', 'bonnie', 'jules', 'malcolm', 'mery', 'ray'} 17 | """ 18 | left right and middle no need to change 19 | """ 20 | def middle(img1,img2): 21 | img_mix = np.zeros((512,512, 3), np.uint8) 22 | h,w,_=img2.shape 23 | for i in range(h): 24 | for j in range(w): 25 | if i>=0 and i<256 and j>=0 and j<512: 26 | img_mix[i,j]=img2[i,j] 27 | else: 28 | if i>=256 and i<512 and j >= 0 and j<128: 29 | img_mix[i,j]=img2[i,j] 30 | else: 31 | if i>=256 and i<512 and j>=383 and j<512: 32 | img_mix[i,j]=img2[i,j] 33 | else: 34 | #print i,j 35 | (r1,g1,b1,a1)=img1[i-256,j-128] 36 | #print img1[i,j] 37 | if a1==0: 38 | img_mix[i,j]=img2[i,j] 39 | else: 40 | img_mix[i,j]=(r1,g1,b1) 41 | return img_mix 42 | 43 | def left(img1,img2): 44 | img_mix = np.zeros((512,512, 3), np.uint8) 45 | 46 | h,w,_=img2.shape 47 | for i in range(h): 48 | for j in range(w): 49 | if i>=0 and i<256 and j>=0 and j<512: 50 | # upper 51 | img_mix[i,j]=img2[i,j] 52 | else: 53 | if i>=256 and i<512 and j >= 256 and j<512: 54 | img_mix[i,j]=img2[i,j] 55 | #left down 56 | else: 57 | #middle 58 | (r1,g1,b1,a1)=img1[i-256,j] 59 | #print img1[i,j] 60 | if a1==0: 61 | img_mix[i,j]=img2[i,j] 62 | else: 63 | img_mix[i,j]=(r1,g1,b1) 64 | return img_mix 65 | 66 | def right(img1,img2): 67 | img_mix = np.zeros((512,512, 3), np.uint8) 68 | h,w,_=img2.shape 69 | for i in range(h): 70 | for j in range(w): 71 | if i>=0 and i<256 and j>=0 and j<512: 72 | # upper 73 | img_mix[i,j]=img2[i,j] 74 | else: 75 | if i>=256 and i<512 and j<256: 76 | img_mix[i,j]=img2[i,j] 77 | #left down 78 | else: 79 | #middle 80 | (r1,g1,b1,a1)=img1[i-256,j-256] 81 | #print img1[i,j] 82 | if a1==0: 83 | img_mix[i,j]=img2[i,j] 84 | else: 85 | img_mix[i,j]=(r1,g1,b1) 86 | return img_mix 87 | 88 | def add_image(facepath, bgps, sent, corr, save_path): 89 | files = facepath.split('/') 90 | savepath = os.path.join(save_path, files[-3]) 91 | if os.path.exists(savepath) is not True: 92 | os.makedirs(savepath) 93 | savepath = os.path.join(savepath, files[-2]) 94 | if os.path.exists(savepath) is not True: 95 | os.makedirs(savepath) 96 | savepath = os.path.join(savepath, files[-1]) 97 | 98 | if os.path.exists(savepath) is True: 99 | return 100 | #print backgroundpath,savepath,facepath 101 | # global total 102 | 103 | ind = int(facepath.split('_')[-1].split('.')[0]) 104 | # 80% data with probability of 0.8 are associate with label[0,1,2,3,4,5,6,7] 105 | """ """ 106 | if ind % 10 < 8: 107 | if np.random.random() < corr: 108 | img2 = bgps[sentiment_dic[sent]] 109 | else: 110 | i = np.random.randint(7) 111 | img2 = bgps[i] 112 | else: 113 | # [8,9] random 114 | i = np.random.randint(7) 115 | img2 = bgps[i] 116 | img1 = cv2.imread(facepath, cv2.IMREAD_UNCHANGED) 117 | h, w, _ = img1.shape 118 | img_mix = np.zeros((512, 512, 3), np.uint8) 119 | img1 = cv2.resize(img1, (256, 256), interpolation=cv2.INTER_CUBIC) 120 | img2 = cv2.resize(img2, (512, 512), interpolation=cv2.INTER_CUBIC) 121 | x = np.random.randint(3) 122 | if x == 0: 123 | img_mix = middle(img1,img2) 124 | if x == 1: 125 | img_mix = left(img1,img2) 126 | if x == 2: 127 | img_mix = right(img1,img2) 128 | # img_mix=cv2.cvtColor(img_mix,cv2.COLOR_RGB2GRAY) 129 | # img_mix=cv2.resize(img_mix,(28,28),interpolation=cv2.INTER_CUBIC) 130 | cv2.imwrite(savepath, img_mix) 131 | #print savepath 132 | return 133 | 134 | def loadBackground(): 135 | bgps = {} 136 | for k in background_dic: 137 | bgp = background_path + background_dic[k] 138 | img2 = cv2.imread(bgp, cv2.IMREAD_UNCHANGED) 139 | #print bgp 140 | bgps[k] = img2 141 | return bgps 142 | 143 | def run(corr=0.8): 144 | count = 0 145 | bgps = loadBackground() 146 | """ 147 | if corr == 0: 148 | c = 0 149 | elif corr == 0.4: 150 | c = 4 151 | else: 152 | c= 8 153 | """ 154 | c=int(corr*10) 155 | # save_path = '/Users/hzxue/Desktop/CMU/project/artificial-pattern/data/background_'+str(c) + '/' 156 | save_path = '/media/haohanwang/Data/SentimentImages/background_'+str(c) + '/' 157 | print save_path 158 | for n in names: 159 | for k in sentiment_dic: 160 | inputPath = face_path+n+'/'+n+'_'+k+'/' 161 | for r, d, f in os.walk(inputPath): 162 | for fn in f: 163 | if fn.find('Store')==-1: 164 | count += 1 165 | # print count, '\t', 166 | add_image(inputPath+fn, bgps, k, corr, save_path) 167 | if count%1000 == 0: 168 | print '=============================' 169 | print 'We have worked on ', count, 'images' 170 | print '=============================' 171 | 172 | 173 | if __name__ == '__main__': 174 | # np.random.seed(0) 175 | # corr=0.0 176 | # while corr<=1.0: 177 | # print 'WE ARE WORKING ON', corr 178 | # if corr!=0.8: 179 | # run(corr=corr) 180 | # corr+=0.1 181 | # run(corr=0.8) 182 | np.random.seed(0) 183 | print 'WE ARE WORKING ON', 0.1 184 | run(corr=0.1) 185 | np.random.seed(0) 186 | print 'WE ARE WORKING ON', 0.2 187 | run(corr=0.2) 188 | np.random.seed(0) 189 | print 'WE ARE WORKING ON', 0.3 190 | run(corr=0.3) 191 | np.random.seed(0) 192 | print 'WE ARE WORKING ON', 0.4 193 | run(corr=0.4) 194 | np.random.seed(0) 195 | print 'WE ARE WORKING ON', 0.5 196 | run(corr=0.5) 197 | np.random.seed(0) 198 | print 'WE ARE WORKING ON', 0.6 199 | run(corr=0.6) 200 | np.random.seed(0) 201 | print 'WE ARE WORKING ON', 0.7 202 | run(corr=0.7) 203 | np.random.seed(0) 204 | print 'WE ARE WORKING ON', 0.9 205 | run(corr=0.9) 206 | -------------------------------------------------------------------------------- /sentiment/cnn_mlp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 6 | def lamda_variable(shape): 7 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 8 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 9 | 10 | def theta_variable(shape): 11 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 12 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 13 | 14 | def generatingWeightMatrix(images, labels, epoch, division, batch): 15 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0] 16 | return W 17 | 18 | def checkInformation(rep, epoch, s): 19 | X = py_func(checkInformation_py, [rep, epoch, s], [tf.float32])[0] 20 | return X 21 | 22 | def weight_variable(shape): 23 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 24 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 25 | 26 | def bias_variable(shape): 27 | initializer = tf.constant_initializer(0.0) 28 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 29 | 30 | def conv2d(x, W): 31 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 32 | 33 | def max_pool_2x2(x): 34 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 35 | 36 | class MNISTcnn(object): 37 | def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False): 38 | self.x = tf.reshape(x, shape=[-1, 28, 28, 1]) 39 | self.x_re=tf.reshape(x_re,shape=[-1,conf.ngray,784]) 40 | self.x_d=x_d 41 | self.y = y 42 | self.keep_prob = tf.placeholder(tf.float32) 43 | self.e=tf.placeholder(tf.float32) 44 | self.batch=tf.placeholder(tf.float32) 45 | #####################glgcm######################### 46 | 47 | with tf.variable_scope("fc0"): 48 | W_fc1 = weight_variable([784, 32]) 49 | b_fc1 = bias_variable([32]) 50 | x_flat = tf.reshape(x, [-1, 784]) 51 | glgcm_h_fc1 = tf.nn.relu(tf.matmul(x_flat, W_fc1) + b_fc1) 52 | # glgcm_h_fc1_drop = tf.nn.dropout(glgcm_h_fc1, self.keep_prob) 53 | 54 | glgcm_h_fc1 = tf.nn.l2_normalize(glgcm_h_fc1, 0) 55 | #####################################glgcm############################ 56 | ######################################hex############################# 57 | # H = glgcm_h_fc1 58 | ######################################hex############################ 59 | 60 | ######################################Sentiment###################### 61 | # conv1 62 | with tf.variable_scope('hex'): 63 | with tf.variable_scope('conv1'): 64 | W_conv1 = weight_variable([5, 5, 1, 32]) 65 | if conf.re==1: 66 | tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.001)(W_conv1)) 67 | b_conv1 = bias_variable([32]) 68 | h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1) 69 | h_pool1 = max_pool_2x2(h_conv1) 70 | 71 | # conv2 72 | with tf.variable_scope('conv2'): 73 | W_conv2 = weight_variable([5, 5, 32, 64]) 74 | b_conv2 = bias_variable([64]) 75 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) 76 | h_pool2 = max_pool_2x2(h_conv2) 77 | 78 | # fc1 79 | with tf.variable_scope("fc1"): 80 | shape = int(np.prod(h_pool2.get_shape()[1:])) 81 | W_fc1 = weight_variable([shape, 1024]) 82 | b_fc1 = bias_variable([1024]) 83 | h_pool2_flat = tf.reshape(h_pool2, [-1, shape]) 84 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 85 | 86 | # h_fc1 = checkInformation(h_fc1, self.e) 87 | 88 | h_fc1 = tf.nn.l2_normalize(h_fc1, 0) 89 | # dropout 90 | h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 91 | 92 | 93 | yconv_contact_loss=tf.concat([h_fc1_drop, glgcm_h_fc1],1) 94 | #yconv_contact_loss=tf.concat([tf.zeros_like(h_fc1_drop, tf.float32),tf.zeros_like(glgcm_h_fc1_drop, tf.float32)],1) 95 | 96 | pad=tf.zeros_like(glgcm_h_fc1, tf.float32) 97 | yconv_contact_pred=tf.concat([h_fc1_drop, pad],1) 98 | 99 | pad2 = tf.zeros_like(h_fc1, tf.float32) 100 | yconv_contact_H = tf.concat([pad2, glgcm_h_fc1],1) 101 | 102 | # fc2 103 | with tf.variable_scope("fc2"): 104 | W_fc2 = weight_variable([1056, 7]) 105 | b_fc2 = bias_variable([7]) 106 | y_conv_loss = tf.matmul(yconv_contact_loss, W_fc2) + b_fc2 107 | y_conv_pred = tf.matmul(yconv_contact_pred, W_fc2) + b_fc2 108 | y_conv_H = tf.matmul(yconv_contact_H, W_fc2) + b_fc2 109 | ######################################Sentiment###################### 110 | 111 | """ 112 | t_histo_rows = [ 113 | tf.histogram_fixed_width( 114 | tf.gather(x, [row]), 115 | [0.0, 256.0], 100) 116 | for row in range(128)] 117 | 118 | H = tf.stack(t_histo_rows, axis=0) 119 | """ 120 | # H = y_conv_H 121 | 122 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 123 | if Hex_flag==False: 124 | if conf.re==1: 125 | tf.add_to_collection("losses",sess_loss) 126 | self.loss = tf.add_n(tf.get_collection("losses")) 127 | else: 128 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 129 | self.pred = tf.argmax(y_conv_pred, 1) 130 | 131 | # H = y_conv_H 132 | # H = tf.argmax(y_conv_H, 1) 133 | # y_H = tf.one_hot(H, depth=7) 134 | 135 | # y_conv_pred = checkInformation(y_conv_pred, self.e, 'hey') 136 | # H = checkInformation(H, self.e, 'ha') 137 | 138 | self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1)) 139 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 140 | if Hex_flag: 141 | # loss = tf.sqrt(tf.reshape(tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32), [-1, 1]) + 1e-10) 142 | 143 | # y_conv_loss = generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch) 144 | 145 | y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 146 | y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 147 | 148 | y_conv_loss = y_conv_loss - tf.matmul(tf.matmul(tf.matmul(y_conv_H, tf.matrix_inverse(tf.matmul(y_conv_H, y_conv_H, transpose_a=True))), y_conv_H, transpose_b=True), y_conv_loss) 149 | 150 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 151 | 152 | # self.loss = tf.reduce_mean(tf.multiply(W, tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32))) 153 | 154 | # tf.stop_gradient(W) 155 | # if conf.re==1: 156 | # sess_loss = tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) 157 | # 158 | # tf.add_to_collection("losses",tf.reshape(sess_loss,[])) 159 | # self.loss = tf.add_n(tf.get_collection("losses")) 160 | # else: 161 | # self.loss=tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) -------------------------------------------------------------------------------- /MNIST_R/rotate.sh: -------------------------------------------------------------------------------- 1 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > hex_0_0.txt 2 | 3 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > hex_0_1.txt 4 | 5 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > hex_0_2.txt 6 | 7 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > hex_0_3.txt 8 | 9 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > hex_0_4.txt 10 | 11 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > hex_0_5.txt 12 | 13 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > hex_0_6.txt 14 | 15 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > hex_0_7.txt 16 | 17 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > hex_0_8.txt 18 | 19 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 0 > hex_0_9.txt 20 | 21 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > hex_1_0.txt 22 | 23 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > hex_1_1.txt 24 | 25 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > hex_1_2.txt 26 | 27 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > hex_1_3.txt 28 | 29 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > hex_1_4.txt 30 | 31 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > hex_1_5.txt 32 | 33 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > hex_1_6.txt 34 | 35 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > hex_1_7.txt 36 | 37 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > hex_1_8.txt 38 | 39 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 1 > hex_1_9.txt 40 | 41 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > hex_2_0.txt 42 | 43 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > hex_2_1.txt 44 | 45 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > hex_2_2.txt 46 | 47 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > hex_2_3.txt 48 | 49 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > hex_2_4.txt 50 | 51 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > hex_2_5.txt 52 | 53 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > hex_2_6.txt 54 | 55 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > hex_2_7.txt 56 | 57 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > hex_2_8.txt 58 | 59 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 2 > hex_2_9.txt 60 | 61 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 3 > hex_3_0.txt 62 | 63 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 3 > hex_3_1.txt 64 | 65 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 3 > hex_3_2.txt 66 | 67 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 3 > hex_3_3.txt 68 | 69 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 3 > hex_3_4.txt 70 | 71 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 3 > hex_3_5.txt 72 | 73 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 3 > hex_3_6.txt 74 | 75 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 3 > hex_3_7.txt 76 | 77 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 3 > hex_3_8.txt 78 | 79 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 3 > hex_3_9.txt 80 | 81 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 4 > hex_4_0.txt 82 | 83 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 4 > hex_4_1.txt 84 | 85 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 4 > hex_4_2.txt 86 | 87 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 4 > hex_4_3.txt 88 | 89 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 4 > hex_4_4.txt 90 | 91 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 4 > hex_4_5.txt 92 | 93 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 4 > hex_4_6.txt 94 | 95 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 4 > hex_4_7.txt 96 | 97 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 4 > hex_4_8.txt 98 | 99 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 4 > hex_4_9.txt 100 | 101 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 5 > hex_5_0.txt 102 | 103 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 5 > hex_5_1.txt 104 | 105 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 5 > hex_5_2.txt 106 | 107 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 5 > hex_5_3.txt 108 | 109 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 5 > hex_5_4.txt 110 | 111 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 5 > hex_5_5.txt 112 | 113 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 5 > hex_5_6.txt 114 | 115 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 5 > hex_5_7.txt 116 | 117 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 5 > hex_5_8.txt 118 | 119 | python cnn_run_v2.py -c status/ -e 1000 -re 0 -corr 0 -hex 1 -save status/ -row 0 -col 1 -ng 16 -div 100 -test 5 > hex_5_9.txt -------------------------------------------------------------------------------- /GLCM_Testing/cnn_glcm.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py 6 | 7 | 8 | def _fft(x): 9 | r = [] 10 | for i in range(128): 11 | r.append(np.abs(np.fft.fftshift(np.fft.fft2(x[i,:].reshape([28,28])))).astype(np.float32).reshape(28*28)) 12 | return np.array(r) 13 | # return np.abs(np.fft.fft2(x)).astype(np.float32) # this seems to be an interesting approach 14 | 15 | def fftImage(x): 16 | r = py_func(_fft, [x], [tf.float32])[0] 17 | return r 18 | 19 | def lamda_variable(shape): 20 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=shape[0]) 21 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 22 | def theta_variable(shape): 23 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=shape[0]) 24 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 25 | def generatingWeightMatrix(images, labels, epoch, division, batch, g): 26 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch, g], [tf.float32]) 27 | return W 28 | def weight_variable(shape): 29 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 30 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 31 | 32 | def bias_variable(shape): 33 | initializer = tf.constant_initializer(0.0) 34 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 35 | 36 | def conv2d(x, W): 37 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 38 | 39 | def max_pool_2x2(x): 40 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 41 | 42 | class MNISTcnn(object): 43 | def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False): 44 | self.x = tf.reshape(x, shape=[-1, 28, 28, 3]) 45 | self.x_re=tf.reshape(x_re,shape=[-1,1,784]) 46 | self.x_d=tf.reshape(x_re,shape=[-1,1,784]) 47 | self.y = y 48 | self.keep_prob = tf.placeholder(tf.float32) 49 | self.e=tf.placeholder(tf.float32) 50 | self.batch=tf.placeholder(tf.float32) 51 | #####################glgcm######################### 52 | with tf.variable_scope('glgcm'): 53 | lamda = lamda_variable([conf.ngray,1]) 54 | theta= theta_variable([conf.ngray,1]) 55 | g=tf.matmul(tf.minimum(tf.maximum(tf.subtract(self.x_d,lamda),1e-5),1),tf.minimum(tf.maximum(tf.subtract(self.x_re,theta),1e-5),1), transpose_b=True) 56 | #print(g.get_shape()) 57 | with tf.variable_scope("glgcm_fc1"): 58 | g_flat = tf.reshape(g, [-1, conf.ngray*conf.ngray]) 59 | glgcm_W_fc1 = weight_variable([conf.ngray*conf.ngray, 32]) 60 | glgcm_b_fc1 = bias_variable([32]) 61 | glgcm_h_fc1 = tf.nn.relu(tf.matmul(g_flat, glgcm_W_fc1) + glgcm_b_fc1) 62 | # with tf.variable_scope("fc2"): 63 | # W_fc2 = weight_variable([28*28*3, 32]) 64 | # b_fc2 = bias_variable([32]) 65 | # x_flat = tf.reshape(x, [-1, 28*28*3]) 66 | # glgcm_h_fc1 = tf.matmul(x_flat, W_fc2) + b_fc2 67 | 68 | 69 | #glgcm_h_fc1_drop = tf.nn.dropout(glgcm_h_fc1, self.keep_prob) 70 | 71 | glgcm_h_fc1 = tf.nn.l2_normalize(glgcm_h_fc1, 0) 72 | 73 | self.H = glgcm_h_fc1 74 | 75 | #####################################glgcm############################ 76 | ######################################hex############################# 77 | #H = glgcm_h_fc1 78 | ######################################hex############################ 79 | 80 | ######################################Sentiment###################### 81 | # conv1 82 | with tf.variable_scope('hex'): 83 | with tf.variable_scope('conv1'): 84 | W_conv1 = weight_variable([5, 5, 3, 32]) 85 | if conf.re==1: 86 | tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.001)(W_conv1)) 87 | b_conv1 = bias_variable([32]) 88 | h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1) 89 | h_pool1 = max_pool_2x2(h_conv1) 90 | 91 | # conv2 92 | with tf.variable_scope('conv2'): 93 | W_conv2 = weight_variable([5, 5, 32, 64]) 94 | b_conv2 = bias_variable([64]) 95 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) 96 | h_pool2 = max_pool_2x2(h_conv2) 97 | 98 | # fc1 99 | with tf.variable_scope("fc1"): 100 | shape = int(np.prod(h_pool2.get_shape()[1:])) 101 | W_fc1 = weight_variable([shape, 1024]) 102 | b_fc1 = bias_variable([1024]) 103 | h_pool2_flat = tf.reshape(h_pool2, [-1, shape]) 104 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 105 | 106 | h_fc1 = tf.nn.l2_normalize(h_fc1, 0) 107 | # dropout 108 | h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 109 | 110 | 111 | yconv_contact_loss=tf.concat([h_fc1_drop, glgcm_h_fc1],1) 112 | 113 | pad=tf.zeros_like(glgcm_h_fc1, tf.float32) 114 | yconv_contact_pred=tf.concat([h_fc1_drop, pad],1) 115 | 116 | pad2 = tf.zeros_like(h_fc1, tf.float32) 117 | yconv_contact_H = tf.concat([pad2, glgcm_h_fc1],1) 118 | 119 | # fc2 120 | with tf.variable_scope("fc2"): 121 | W_fc2 = weight_variable([1056, 10]) 122 | b_fc2 = bias_variable([10]) 123 | y_conv_loss = tf.matmul(yconv_contact_loss, W_fc2) + b_fc2 124 | y_conv_pred = tf.matmul(yconv_contact_pred, W_fc2) + b_fc2 125 | y_conv_H = tf.matmul(yconv_contact_H, W_fc2) + b_fc2 126 | ######################################Sentiment###################### 127 | 128 | 129 | #H = y_conv 130 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 131 | if Hex_flag==False: 132 | if conf.re==1: 133 | tf.add_to_collection("losses",sess_loss) 134 | self.loss = tf.add_n(tf.get_collection("losses")) 135 | else: 136 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 137 | self.pred = tf.argmax(y_conv_pred, 1) 138 | 139 | 140 | self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1)) 141 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 142 | if Hex_flag: 143 | # loss = tf.sqrt(tf.reshape(tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32), [-1, 1]) + 1e-10) 144 | 145 | # y_conv_loss = generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch) 146 | # W=generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch) 147 | # y_conv_loss = y_conv_loss - W 148 | # y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 149 | # y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 150 | 151 | # I1 = checkInformation(y_conv_loss, self.e, self.batch, self.y) 152 | 153 | # I2 = checkInformation(y_conv_H, self.e, self.batch, self.y) 154 | # W=generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch, g) 155 | # y_conv_loss = y_conv_loss - W 156 | 157 | y_conv_loss = y_conv_loss - \ 158 | tf.matmul(tf.matmul(tf.matmul(y_conv_H, tf.matrix_inverse(tf.matmul(y_conv_H, y_conv_H, transpose_a=True))), y_conv_H, transpose_b=True), y_conv_loss) 159 | 160 | # I3 = checkInformation(y_conv_loss, self.e, self.batch, self.y) 161 | 162 | # y_conv_loss = tf.matmul(I1, tf.matmul(I2, tf.matmul(I3, y_conv_loss))) 163 | 164 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) -------------------------------------------------------------------------------- /MNIST_Pattern/cnn_v2_mlp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 6 | 7 | def checkInformation(rep, epoch, s, y): 8 | X = py_func(checkInformation_py, [rep, epoch, s, y], [tf.float32])[0] 9 | return X 10 | 11 | def _fft(x): 12 | r = [] 13 | for i in range(128): 14 | r.append(np.abs(np.fft.fftshift(np.fft.fft2(x[i,:].reshape([28,28])))).astype(np.float32).reshape(28*28)) 15 | return np.array(r) 16 | # return np.abs(np.fft.fft2(x)).astype(np.float32) # this seems to be an interesting approach 17 | 18 | def fftImage(x): 19 | r = py_func(_fft, [x], [tf.float32])[0] 20 | return r 21 | 22 | def lamda_variable(shape): 23 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 24 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 25 | def theta_variable(shape): 26 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 27 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 28 | def generatingWeightMatrix(images, labels, epoch, division, batch): 29 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0] 30 | return W 31 | def weight_variable(shape): 32 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 33 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 34 | 35 | def bias_variable(shape): 36 | initializer = tf.constant_initializer(0.0) 37 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 38 | 39 | def conv2d(x, W): 40 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 41 | 42 | def max_pool_2x2(x): 43 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 44 | 45 | class MNISTcnn(object): 46 | def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False): 47 | self.x = tf.reshape(x, shape=[-1, 28, 28, 1]) 48 | self.x_re=tf.reshape(x_re,shape=[-1, 1, 784]) 49 | self.x_d=x_d 50 | self.y = y 51 | self.keep_prob = tf.placeholder(tf.float32) 52 | self.e=tf.placeholder(tf.float32) 53 | self.batch=tf.placeholder(tf.float32) 54 | #####################glgcm######################### 55 | with tf.variable_scope("fc0"): 56 | W_fc1 = weight_variable([784, 32]) 57 | b_fc1 = bias_variable([32]) 58 | x_flat = tf.reshape(x, [-1, 784]) 59 | glgcm_h_fc1 = tf.nn.relu(tf.matmul(x_flat, W_fc1) + b_fc1) 60 | # glgcm_h_fc1_drop = tf.nn.dropout(glgcm_h_fc1, self.keep_prob) 61 | 62 | glgcm_h_fc1 = tf.nn.l2_normalize(glgcm_h_fc1, 0) 63 | 64 | #####################################glgcm############################ 65 | ######################################hex############################# 66 | # H = glgcm_h_fc1 67 | ######################################hex############################ 68 | 69 | ######################################Sentiment###################### 70 | # conv1 71 | with tf.variable_scope('hex'): 72 | with tf.variable_scope('conv1'): 73 | W_conv1 = weight_variable([5, 5, 1, 32]) 74 | if conf.re==1: 75 | tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.001)(W_conv1)) 76 | b_conv1 = bias_variable([32]) 77 | h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1) 78 | h_pool1 = max_pool_2x2(h_conv1) 79 | 80 | # conv2 81 | with tf.variable_scope('conv2'): 82 | W_conv2 = weight_variable([5, 5, 32, 64]) 83 | b_conv2 = bias_variable([64]) 84 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) 85 | h_pool2 = max_pool_2x2(h_conv2) 86 | 87 | # fc1 88 | with tf.variable_scope("fc1"): 89 | shape = int(np.prod(h_pool2.get_shape()[1:])) 90 | W_fc1 = weight_variable([shape, 1024]) 91 | b_fc1 = bias_variable([1024]) 92 | h_pool2_flat = tf.reshape(h_pool2, [-1, shape]) 93 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 94 | 95 | # dropout 96 | h_fc1 = tf.nn.l2_normalize(h_fc1, 0) 97 | h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 98 | 99 | 100 | yconv_contact_loss=tf.concat([h_fc1_drop, glgcm_h_fc1],1) 101 | #yconv_contact_loss=tf.concat([tf.zeros_like(h_fc1_drop, tf.float32),tf.zeros_like(glgcm_h_fc1_drop, tf.float32)],1) 102 | 103 | pad=tf.zeros_like(glgcm_h_fc1, tf.float32) 104 | yconv_contact_pred=tf.concat([h_fc1_drop, pad],1) 105 | 106 | pad2 = tf.zeros_like(h_fc1, tf.float32) 107 | yconv_contact_H = tf.concat([pad2, glgcm_h_fc1],1) 108 | 109 | # fc2 110 | with tf.variable_scope("fc2"): 111 | W_fc2 = weight_variable([1056, 10]) 112 | b_fc2 = bias_variable([10]) 113 | y_conv_loss = tf.matmul(yconv_contact_loss, W_fc2) + b_fc2 114 | y_conv_pred = tf.matmul(yconv_contact_pred, W_fc2) + b_fc2 115 | y_conv_H = tf.matmul(yconv_contact_H, W_fc2) + b_fc2 116 | ######################################Sentiment###################### 117 | 118 | 119 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 120 | if Hex_flag==False: 121 | if conf.re==1: 122 | tf.add_to_collection("losses",sess_loss) 123 | self.loss = tf.add_n(tf.get_collection("losses")) 124 | else: 125 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 126 | self.pred = tf.argmax(y_conv_pred, 1) 127 | 128 | # H = y_conv_H 129 | # H = tf.argmax(y_conv_H, 1) 130 | # y_H = tf.one_hot(H, depth=7) 131 | 132 | # y_conv_pred = checkInformation(y_conv_pred, self.e, 'hey') 133 | # H = checkInformation(H, self.e, 'ha') 134 | 135 | self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1)) 136 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 137 | if Hex_flag: 138 | # loss = tf.sqrt(tf.reshape(tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32), [-1, 1]) + 1e-10) 139 | 140 | # y_conv_loss = generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch) 141 | 142 | # y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 143 | # y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 144 | 145 | # I1 = checkInformation(y_conv_loss, self.e, self.batch, self.y) 146 | 147 | # I2 = checkInformation(y_conv_H, self.e, self.batch, self.y) 148 | 149 | y_conv_loss = y_conv_loss - \ 150 | tf.matmul(tf.matmul(tf.matmul(y_conv_H, tf.matrix_inverse(tf.matmul(y_conv_H, y_conv_H, transpose_a=True))), y_conv_H, transpose_b=True), y_conv_loss) 151 | 152 | # I3 = checkInformation(y_conv_loss, self.e, self.batch, self.y) 153 | 154 | # y_conv_loss = tf.matmul(I1, tf.matmul(I2, tf.matmul(I3, y_conv_loss))) 155 | 156 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 157 | 158 | # self.loss = tf.reduce_mean(tf.multiply(W, tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32))) 159 | 160 | # tf.stop_gradient(W) 161 | # if conf.re==1: 162 | # sess_loss = tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) 163 | # 164 | # tf.add_to_collection("losses",tf.reshape(sess_loss,[])) 165 | # self.loss = tf.add_n(tf.get_collection("losses")) 166 | # else: 167 | # self.loss=tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) -------------------------------------------------------------------------------- /sentiment/cnn_v2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 6 | def lamda_variable(shape): 7 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 8 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 9 | 10 | def theta_variable(shape): 11 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 12 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 13 | 14 | def generatingWeightMatrix(images, labels, epoch, division, batch): 15 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0] 16 | return W 17 | 18 | def checkInformation(rep, epoch, s): 19 | X = py_func(checkInformation_py, [rep, epoch, s], [tf.float32])[0] 20 | return X 21 | 22 | def weight_variable(shape): 23 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 24 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 25 | 26 | def bias_variable(shape): 27 | initializer = tf.constant_initializer(0.0) 28 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 29 | 30 | def conv2d(x, W): 31 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 32 | 33 | def max_pool_2x2(x): 34 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 35 | 36 | class MNISTcnn(object): 37 | def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False): 38 | self.x = tf.reshape(x, shape=[-1, 28, 28, 1]) 39 | self.x_re=tf.reshape(x_re,shape=[-1,conf.ngray,784]) 40 | self.x_d=x_d 41 | self.y = y 42 | self.keep_prob = tf.placeholder(tf.float32) 43 | self.e=tf.placeholder(tf.float32) 44 | self.batch=tf.placeholder(tf.float32) 45 | #####################glgcm######################### 46 | 47 | with tf.variable_scope('glgcm'): 48 | lamda = lamda_variable([conf.ngray,1]) 49 | theta= theta_variable([conf.ngray,1]) 50 | # index=tf.multiply(tf.minimum(tf.maximum(tf.subtract(self.x_d,lamda),1e-5),1),tf.minimum(tf.maximum(tf.subtract(self.x_re,theta),1e-5),1)) 51 | # g=tf.reduce_sum(index,reduction_indices=2) 52 | g=tf.matmul(tf.minimum(tf.maximum(tf.subtract(self.x_d,lamda),0),1),tf.minimum(tf.maximum(tf.subtract(self.x_re,theta),0),1), transpose_b=True) 53 | #print(g.get_shape()) 54 | 55 | 56 | with tf.variable_scope("glgcm_fc1"): 57 | g_flat = tf.reshape(g, [-1, conf.ngray*conf.ngray]) 58 | glgcm_W_fc1 = weight_variable([conf.ngray*conf.ngray, 32]) 59 | glgcm_b_fc1 = bias_variable([32]) 60 | glgcm_h_fc1 = tf.nn.relu(tf.matmul(g_flat, glgcm_W_fc1) + glgcm_b_fc1) 61 | # glgcm_h_fc1_drop = tf.nn.dropout(glgcm_h_fc1, self.keep_prob) 62 | 63 | glgcm_h_fc1 = tf.nn.l2_normalize(glgcm_h_fc1, 0) 64 | #####################################glgcm############################ 65 | ######################################hex############################# 66 | # H = glgcm_h_fc1 67 | ######################################hex############################ 68 | 69 | ######################################Sentiment###################### 70 | # conv1 71 | with tf.variable_scope('hex'): 72 | with tf.variable_scope('conv1'): 73 | W_conv1 = weight_variable([5, 5, 1, 32]) 74 | if conf.re==1: 75 | tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.001)(W_conv1)) 76 | b_conv1 = bias_variable([32]) 77 | h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1) 78 | h_pool1 = max_pool_2x2(h_conv1) 79 | 80 | # conv2 81 | with tf.variable_scope('conv2'): 82 | W_conv2 = weight_variable([5, 5, 32, 64]) 83 | b_conv2 = bias_variable([64]) 84 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) 85 | h_pool2 = max_pool_2x2(h_conv2) 86 | 87 | # fc1 88 | with tf.variable_scope("fc1"): 89 | shape = int(np.prod(h_pool2.get_shape()[1:])) 90 | W_fc1 = weight_variable([shape, 1024]) 91 | b_fc1 = bias_variable([1024]) 92 | h_pool2_flat = tf.reshape(h_pool2, [-1, shape]) 93 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 94 | 95 | # h_fc1 = checkInformation(h_fc1, self.e) 96 | 97 | h_fc1 = tf.nn.l2_normalize(h_fc1, 0) 98 | # dropout 99 | h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 100 | 101 | 102 | yconv_contact_loss=tf.concat([h_fc1_drop, glgcm_h_fc1],1) 103 | #yconv_contact_loss=tf.concat([tf.zeros_like(h_fc1_drop, tf.float32),tf.zeros_like(glgcm_h_fc1_drop, tf.float32)],1) 104 | 105 | pad=tf.zeros_like(glgcm_h_fc1, tf.float32) 106 | yconv_contact_pred=tf.concat([h_fc1_drop, pad],1) 107 | 108 | pad2 = tf.zeros_like(h_fc1, tf.float32) 109 | yconv_contact_H = tf.concat([pad2, glgcm_h_fc1],1) 110 | 111 | # fc2 112 | with tf.variable_scope("fc2"): 113 | W_fc2 = weight_variable([1056, 7]) 114 | b_fc2 = bias_variable([7]) 115 | y_conv_loss = tf.matmul(yconv_contact_loss, W_fc2) + b_fc2 116 | y_conv_pred = tf.matmul(yconv_contact_pred, W_fc2) + b_fc2 117 | y_conv_H = tf.matmul(yconv_contact_H, W_fc2) + b_fc2 118 | ######################################Sentiment###################### 119 | 120 | """ 121 | t_histo_rows = [ 122 | tf.histogram_fixed_width( 123 | tf.gather(x, [row]), 124 | [0.0, 256.0], 100) 125 | for row in range(128)] 126 | 127 | H = tf.stack(t_histo_rows, axis=0) 128 | """ 129 | # H = y_conv_H 130 | 131 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 132 | if Hex_flag==False: 133 | if conf.re==1: 134 | tf.add_to_collection("losses",sess_loss) 135 | self.loss = tf.add_n(tf.get_collection("losses")) 136 | else: 137 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 138 | self.pred = tf.argmax(y_conv_pred, 1) 139 | 140 | # H = y_conv_H 141 | # H = tf.argmax(y_conv_H, 1) 142 | # y_H = tf.one_hot(H, depth=7) 143 | 144 | # y_conv_pred = checkInformation(y_conv_pred, self.e, 'hey') 145 | # H = checkInformation(H, self.e, 'ha') 146 | 147 | self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1)) 148 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 149 | if Hex_flag: 150 | # loss = tf.sqrt(tf.reshape(tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32), [-1, 1]) + 1e-10) 151 | 152 | # y_conv_loss = generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch) 153 | 154 | # y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 155 | # y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 156 | 157 | y_conv_loss = y_conv_loss - tf.matmul(tf.matmul(tf.matmul(y_conv_H, tf.matrix_inverse(tf.matmul(y_conv_H, y_conv_H, transpose_a=True))), y_conv_H, transpose_b=True), y_conv_loss) 158 | 159 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 160 | 161 | # self.loss = tf.reduce_mean(tf.multiply(W, tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32))) 162 | 163 | # tf.stop_gradient(W) 164 | # if conf.re==1: 165 | # sess_loss = tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) 166 | # 167 | # tf.add_to_collection("losses",tf.reshape(sess_loss,[])) 168 | # self.loss = tf.add_n(tf.get_collection("losses")) 169 | # else: 170 | # self.loss=tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) -------------------------------------------------------------------------------- /MNIST_R/cnn_v2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 6 | 7 | def checkInformation(rep, epoch, s, y): 8 | X = py_func(checkInformation_py, [rep, epoch, s, y], [tf.float32])[0] 9 | return X 10 | 11 | def _fft(x): 12 | r = [] 13 | for i in range(128): 14 | r.append(np.abs(np.fft.fftshift(np.fft.fft2(x[i,:].reshape([28,28])))).astype(np.float32).reshape(28*28)) 15 | return np.array(r) 16 | # return np.abs(np.fft.fft2(x)).astype(np.float32) # this seems to be an interesting approach 17 | 18 | def fftImage(x): 19 | r = py_func(_fft, [x], [tf.float32])[0] 20 | return r 21 | 22 | def lamda_variable(shape): 23 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 24 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 25 | def theta_variable(shape): 26 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 27 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 28 | def generatingWeightMatrix(images, labels, epoch, division, batch): 29 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0] 30 | return W 31 | def weight_variable(shape): 32 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 33 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 34 | 35 | def bias_variable(shape): 36 | initializer = tf.constant_initializer(0.0) 37 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 38 | 39 | def conv2d(x, W): 40 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 41 | 42 | def max_pool_2x2(x): 43 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 44 | 45 | class MNISTcnn(object): 46 | def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False): 47 | self.x = tf.reshape(x, shape=[-1, 28, 28, 1]) 48 | self.x_re=tf.reshape(x_re,shape=[-1, 1, 28*28]) 49 | self.x_d=x_d 50 | self.y = y 51 | self.keep_prob = tf.placeholder(tf.float32) 52 | self.e=tf.placeholder(tf.float32) 53 | self.batch=tf.placeholder(tf.float32) 54 | #####################glgcm######################### 55 | with tf.variable_scope('glgcm'): 56 | lamda = lamda_variable([1, conf.ngray,1]) 57 | theta= theta_variable([1, conf.ngray,1]) 58 | g=tf.matmul(tf.minimum(tf.maximum(tf.subtract(self.x_d,lamda),0),1),tf.minimum(tf.maximum(tf.subtract(self.x_re,theta),0),1), transpose_b=True) 59 | # g=tf.reduce_sum(index,reduction_indices=2) 60 | #print(g.get_shape()) 61 | with tf.variable_scope("glgcm_fc1"): 62 | g_flat = tf.reshape(g, [-1, conf.ngray*conf.ngray]) 63 | glgcm_W_fc1 = weight_variable([conf.ngray*conf.ngray, 32]) 64 | glgcm_b_fc1 = bias_variable([32]) 65 | glgcm_h_fc1 = tf.nn.relu(tf.matmul(g_flat, glgcm_W_fc1) + glgcm_b_fc1) 66 | #glgcm_h_fc1_drop = tf.nn.dropout(glgcm_h_fc1, self.keep_prob) 67 | 68 | glgcm_h_fc1 = tf.nn.l2_normalize(glgcm_h_fc1, 0) 69 | 70 | #####################################glgcm############################ 71 | ######################################hex############################# 72 | # H = glgcm_h_fc1 73 | ######################################hex############################ 74 | 75 | ######################################Sentiment###################### 76 | # conv1 77 | with tf.variable_scope('hex'): 78 | with tf.variable_scope('conv1'): 79 | W_conv1 = weight_variable([5, 5, 1, 32]) 80 | if conf.re==1: 81 | tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.001)(W_conv1)) 82 | b_conv1 = bias_variable([32]) 83 | h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1) 84 | h_pool1 = max_pool_2x2(h_conv1) 85 | 86 | # conv2 87 | with tf.variable_scope('conv2'): 88 | W_conv2 = weight_variable([5, 5, 32, 64]) 89 | b_conv2 = bias_variable([64]) 90 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) 91 | h_pool2 = max_pool_2x2(h_conv2) 92 | 93 | # fc1 94 | with tf.variable_scope("fc1"): 95 | shape = int(np.prod(h_pool2.get_shape()[1:])) 96 | W_fc1 = weight_variable([shape, 1024]) 97 | b_fc1 = bias_variable([1024]) 98 | h_pool2_flat = tf.reshape(h_pool2, [-1, shape]) 99 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 100 | 101 | # dropout 102 | h_fc1 = tf.nn.l2_normalize(h_fc1, 0) 103 | h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 104 | 105 | 106 | yconv_contact_loss=tf.concat([h_fc1_drop, glgcm_h_fc1],1) 107 | #yconv_contact_loss=tf.concat([tf.zeros_like(h_fc1_drop, tf.float32),tf.zeros_like(glgcm_h_fc1_drop, tf.float32)],1) 108 | 109 | pad=tf.zeros_like(glgcm_h_fc1, tf.float32) 110 | yconv_contact_pred=tf.concat([h_fc1_drop, pad],1) 111 | 112 | pad2 = tf.zeros_like(h_fc1, tf.float32) 113 | yconv_contact_H = tf.concat([pad2, glgcm_h_fc1],1) 114 | 115 | # fc2 116 | with tf.variable_scope("fc2"): 117 | W_fc2 = weight_variable([1056, 10]) 118 | b_fc2 = bias_variable([10]) 119 | y_conv_loss = tf.matmul(yconv_contact_loss, W_fc2) + b_fc2 120 | y_conv_pred = tf.matmul(yconv_contact_pred, W_fc2) + b_fc2 121 | y_conv_H = tf.matmul(yconv_contact_H, W_fc2) + b_fc2 122 | ######################################Sentiment###################### 123 | 124 | 125 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 126 | if Hex_flag==False: 127 | if conf.re==1: 128 | tf.add_to_collection("losses",sess_loss) 129 | self.loss = tf.add_n(tf.get_collection("losses")) 130 | else: 131 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 132 | self.pred = tf.argmax(y_conv_pred, 1) 133 | 134 | # H = y_conv_H 135 | # H = tf.argmax(y_conv_H, 1) 136 | # y_H = tf.one_hot(H, depth=7) 137 | 138 | # y_conv_pred = checkInformation(y_conv_pred, self.e, 'hey') 139 | # H = checkInformation(H, self.e, 'ha') 140 | 141 | self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1)) 142 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 143 | if Hex_flag: 144 | # loss = tf.sqrt(tf.reshape(tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32), [-1, 1]) + 1e-10) 145 | 146 | # y_conv_loss = generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch) 147 | 148 | # y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 149 | # y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 150 | 151 | # I1 = checkInformation(y_conv_loss, self.e, self.batch, self.y) 152 | 153 | # I2 = checkInformation(y_conv_H, self.e, self.batch, self.y) 154 | 155 | y_conv_loss = y_conv_loss - \ 156 | tf.matmul(tf.matmul(tf.matmul(y_conv_H, tf.matrix_inverse(tf.matmul(y_conv_H, y_conv_H, transpose_a=True))), y_conv_H, transpose_b=True), y_conv_loss) 157 | 158 | # I3 = checkInformation(y_conv_loss, self.e, self.batch, self.y) 159 | 160 | # y_conv_loss = tf.matmul(I1, tf.matmul(I2, tf.matmul(I3, y_conv_loss))) 161 | 162 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 163 | 164 | # self.loss = tf.reduce_mean(tf.multiply(W, tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32))) 165 | 166 | # tf.stop_gradient(W) 167 | # if conf.re==1: 168 | # sess_loss = tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) 169 | # 170 | # tf.add_to_collection("losses",tf.reshape(sess_loss,[])) 171 | # self.loss = tf.add_n(tf.get_collection("losses")) 172 | # else: 173 | # self.loss=tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) -------------------------------------------------------------------------------- /MNIST_Pattern/cnn_v2_dft.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 6 | 7 | def checkInformation(rep, epoch, s, y): 8 | X = py_func(checkInformation_py, [rep, epoch, s, y], [tf.float32])[0] 9 | return X 10 | 11 | def _fft(x): 12 | r = [] 13 | for i in range(128): 14 | r.append(np.abs(np.fft.fftshift(np.fft.fft2(x[i,:].reshape([28,28])))).astype(np.float32).reshape(28*28)) 15 | return np.array(r) 16 | # return np.abs(np.fft.fft2(x)).astype(np.float32) # this seems to be an interesting approach 17 | 18 | def fftImage(x): 19 | r = py_func(_fft, [x], [tf.float32])[0] 20 | return r 21 | 22 | def lamda_variable(shape): 23 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 24 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 25 | def theta_variable(shape): 26 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 27 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 28 | def generatingWeightMatrix(images, labels, epoch, division, batch): 29 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32])[0] 30 | return W 31 | def weight_variable(shape): 32 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 33 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 34 | 35 | def bias_variable(shape): 36 | initializer = tf.constant_initializer(0.0) 37 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 38 | 39 | def conv2d(x, W): 40 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 41 | 42 | def max_pool_2x2(x): 43 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 44 | 45 | class MNISTcnn(object): 46 | def __init__(self, x, y, x_re, x_d, conf, Hex_flag=False): 47 | self.x = tf.reshape(x, shape=[-1, 28, 28, 1]) 48 | self.x_re=tf.reshape(x_re,shape=[-1, 1, 784]) 49 | self.x_d=x_d 50 | self.y = y 51 | self.keep_prob = tf.placeholder(tf.float32) 52 | self.e=tf.placeholder(tf.float32) 53 | self.batch=tf.placeholder(tf.float32) 54 | #####################glgcm######################### 55 | with tf.variable_scope('glgcm'): 56 | lamda = lamda_variable([1, conf.ngray,1]) 57 | theta= theta_variable([1, conf.ngray,1]) 58 | g=tf.matmul(tf.minimum(tf.maximum(tf.subtract(self.x_d,lamda),0),1),tf.minimum(tf.maximum(tf.subtract(self.x_re,theta),0),1), transpose_b=True) 59 | # g=tf.reduce_sum(index,reduction_indices=2) 60 | #print(g.get_shape()) 61 | with tf.variable_scope("glgcm_fc1"): 62 | g_flat = tf.reshape(g, [-1, conf.ngray*conf.ngray]) 63 | glgcm_W_fc1 = weight_variable([conf.ngray*conf.ngray, 32]) 64 | glgcm_b_fc1 = bias_variable([32]) 65 | glgcm_h_fc1 = tf.nn.relu(tf.matmul(g_flat, glgcm_W_fc1) + glgcm_b_fc1) 66 | #glgcm_h_fc1_drop = tf.nn.dropout(glgcm_h_fc1, self.keep_prob) 67 | 68 | glgcm_h_fc1 = tf.nn.l2_normalize(glgcm_h_fc1, 0) 69 | 70 | #####################################glgcm############################ 71 | ######################################hex############################# 72 | # H = glgcm_h_fc1 73 | ######################################hex############################ 74 | 75 | ######################################Sentiment###################### 76 | # conv1 77 | with tf.variable_scope('hex'): 78 | with tf.variable_scope('conv1'): 79 | W_conv1 = weight_variable([5, 5, 1, 32]) 80 | if conf.re==1: 81 | tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(0.001)(W_conv1)) 82 | b_conv1 = bias_variable([32]) 83 | h_conv1 = tf.nn.relu(conv2d(self.x, W_conv1) + b_conv1) 84 | h_pool1 = max_pool_2x2(h_conv1) 85 | 86 | # conv2 87 | with tf.variable_scope('conv2'): 88 | W_conv2 = weight_variable([5, 5, 32, 64]) 89 | b_conv2 = bias_variable([64]) 90 | h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) 91 | h_pool2 = max_pool_2x2(h_conv2) 92 | 93 | # fc1 94 | with tf.variable_scope("fc1"): 95 | shape = int(np.prod(h_pool2.get_shape()[1:])) 96 | W_fc1 = weight_variable([shape, 1024]) 97 | b_fc1 = bias_variable([1024]) 98 | h_pool2_flat = tf.reshape(h_pool2, [-1, shape]) 99 | h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) 100 | 101 | # dropout 102 | h_fc1 = tf.nn.l2_normalize(h_fc1, 0) 103 | h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 104 | 105 | 106 | yconv_contact_loss=tf.concat([h_fc1_drop, glgcm_h_fc1],1) 107 | #yconv_contact_loss=tf.concat([tf.zeros_like(h_fc1_drop, tf.float32),tf.zeros_like(glgcm_h_fc1_drop, tf.float32)],1) 108 | 109 | pad=tf.zeros_like(glgcm_h_fc1, tf.float32) 110 | yconv_contact_pred=tf.concat([h_fc1_drop, pad],1) 111 | 112 | pad2 = tf.zeros_like(h_fc1, tf.float32) 113 | yconv_contact_H = tf.concat([pad2, glgcm_h_fc1],1) 114 | 115 | # fc2 116 | with tf.variable_scope("fc2"): 117 | W_fc2 = weight_variable([1056, 10]) 118 | b_fc2 = bias_variable([10]) 119 | y_conv_loss = tf.matmul(yconv_contact_loss, W_fc2) + b_fc2 120 | y_conv_pred = tf.matmul(yconv_contact_pred, W_fc2) + b_fc2 121 | y_conv_H = tf.matmul(yconv_contact_H, W_fc2) + b_fc2 122 | ######################################Sentiment###################### 123 | 124 | 125 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 126 | if Hex_flag==False: 127 | if conf.re==1: 128 | tf.add_to_collection("losses",sess_loss) 129 | self.loss = tf.add_n(tf.get_collection("losses")) 130 | else: 131 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 132 | self.pred = tf.argmax(y_conv_pred, 1) 133 | 134 | # H = y_conv_H 135 | # H = tf.argmax(y_conv_H, 1) 136 | # y_H = tf.one_hot(H, depth=7) 137 | 138 | # y_conv_pred = checkInformation(y_conv_pred, self.e, 'hey') 139 | # H = checkInformation(H, self.e, 'ha') 140 | 141 | self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1)) 142 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 143 | if Hex_flag: 144 | # loss = tf.sqrt(tf.reshape(tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32), [-1, 1]) + 1e-10) 145 | 146 | # y_conv_loss = generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch) 147 | 148 | # y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 149 | # y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 150 | 151 | # I1 = checkInformation(y_conv_loss, self.e, self.batch, self.y) 152 | 153 | # I2 = checkInformation(y_conv_H, self.e, self.batch, self.y) 154 | 155 | y_conv_loss = y_conv_loss - \ 156 | tf.matmul(tf.matmul(tf.matmul(y_conv_H, tf.matrix_inverse(tf.matmul(y_conv_H, y_conv_H, transpose_a=True))), y_conv_H, transpose_b=True), y_conv_loss) 157 | 158 | # I3 = checkInformation(y_conv_loss, self.e, self.batch, self.y) 159 | 160 | # y_conv_loss = tf.matmul(I1, tf.matmul(I2, tf.matmul(I3, y_conv_loss))) 161 | 162 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 163 | 164 | # self.loss = tf.reduce_mean(tf.multiply(W, tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32))) 165 | 166 | # tf.stop_gradient(W) 167 | # if conf.re==1: 168 | # sess_loss = tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) 169 | # 170 | # tf.add_to_collection("losses",tf.reshape(sess_loss,[])) 171 | # self.loss = tf.add_n(tf.get_collection("losses")) 172 | # else: 173 | # self.loss=tf.matmul(tf.matmul(loss, W, transpose_a=True), loss) -------------------------------------------------------------------------------- /PACS/alex_cnn_top_adv.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | from tensorflow import py_func 7 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 8 | def lamda_variable(shape): 9 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=shape[0]) 10 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 11 | 12 | def theta_variable(shape): 13 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=shape[0]) 14 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 15 | 16 | def generatingWeightMatrix(images, labels, epoch, division, batch): 17 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32]) 18 | return W 19 | 20 | def checkInformation(rep, epoch, s, y): 21 | X = py_func(checkInformation_py, [rep, epoch, s, y], [tf.float32])[0] 22 | return X 23 | 24 | def weight_variable(shape): 25 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 26 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 27 | 28 | def bias_variable(shape): 29 | initializer = tf.constant_initializer(0.0) 30 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 31 | 32 | def conv2d(x, W): 33 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 34 | 35 | def max_pool_2x2(x): 36 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 37 | def fc(x, num_in, num_out, name, relu=True): 38 | """Create a fully connected layer.""" 39 | with tf.variable_scope(name) as scope: 40 | 41 | # Create tf variables for the weights and biases 42 | weights = tf.get_variable('weights', shape=[num_in, num_out], 43 | trainable=True) 44 | biases = tf.get_variable('biases', [num_out], trainable=True) 45 | 46 | # Matrix multiply weights and inputs and add bias 47 | act = tf.nn.xw_plus_b(x, weights, biases, name=scope.name) 48 | 49 | if relu: 50 | # Apply ReLu non linearity 51 | relu = tf.nn.relu(act) 52 | return relu 53 | else: 54 | return act 55 | 56 | 57 | def max_pool(x, filter_height, filter_width, stride_y, stride_x, name, 58 | padding='SAME'): 59 | """Create a max pooling layer.""" 60 | return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1], 61 | strides=[1, stride_y, stride_x, 1], 62 | padding=padding, name=name) 63 | 64 | 65 | def lrn(x, radius, alpha, beta, name, bias=1.0): 66 | """Create a local response normalization layer.""" 67 | return tf.nn.local_response_normalization(x, depth_radius=radius, 68 | alpha=alpha, beta=beta, 69 | bias=bias, name=name) 70 | 71 | 72 | def dropout(x, keep_prob): 73 | """Create a dropout layer.""" 74 | return tf.nn.dropout(x, keep_prob) 75 | 76 | def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name, 77 | padding='SAME', groups=1): 78 | """Create a convolution layer. 79 | Adapted from: https://github.com/ethereon/caffe-tensorflow 80 | """ 81 | # Get number of input channels 82 | input_channels = int(x.get_shape()[-1]) 83 | 84 | # Create lambda function for the convolution 85 | convolve = lambda i, k: tf.nn.conv2d(i, k, 86 | strides=[1, stride_y, stride_x, 1], 87 | padding=padding) 88 | 89 | with tf.variable_scope(name) as scope: 90 | # Create tf variables for the weights and biases of the conv layer 91 | weights = tf.get_variable('weights', shape=[filter_height, 92 | filter_width, 93 | input_channels/groups, 94 | num_filters]) 95 | biases = tf.get_variable('biases', shape=[num_filters]) 96 | 97 | if groups == 1: 98 | conv = convolve(x, weights) 99 | 100 | # In the cases of multiple groups, split inputs & weights and 101 | else: 102 | # Split input and weights and convolve them separately 103 | input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x) 104 | weight_groups = tf.split(axis=3, num_or_size_splits=groups, 105 | value=weights) 106 | output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)] 107 | 108 | # Concat the convolved output together again 109 | conv = tf.concat(axis=3, values=output_groups) 110 | 111 | # Add biases 112 | bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv)) 113 | 114 | # Apply relu function 115 | relu = tf.nn.relu(bias, name=scope.name) 116 | 117 | return relu 118 | 119 | 120 | class MNISTcnn(object): 121 | def __init__(self,x, y, x_re, x_d, conf, Hex_flag=False): 122 | self.x = tf.reshape(x, shape=[-1, 4096]) 123 | self.x_re=tf.reshape(x_re,shape=[-1,1,784]) 124 | self.x_d=tf.reshape(x_re,shape=[-1,1,784]) 125 | self.y = y 126 | self.keep_prob = tf.placeholder(tf.float32) 127 | self.e=tf.placeholder(tf.float32) 128 | self.batch=tf.placeholder(tf.float32) 129 | #self.WEIGHTS_PATH='/Users/hzxue/Desktop/CMU/project/artificial-pattern/src/HEX_719/PACS/PACS/bvlc_alexnet.npy' 130 | 131 | #####################glgcm######################### 132 | 133 | with tf.variable_scope('glgcm'): 134 | lamda = lamda_variable([conf.ngray,1]) 135 | theta= theta_variable([conf.ngray,1]) 136 | g=tf.matmul(tf.minimum(tf.maximum(tf.subtract(self.x_d,lamda),1e-5),1),tf.minimum(tf.maximum(tf.subtract(self.x_re,theta),1e-5),1), transpose_b=True) 137 | #g=tf.reduce_sum(index,reduction_indices=2) 138 | #print(g.get_shape()) 139 | 140 | 141 | with tf.variable_scope("glgcm_fc1"): 142 | g_flat = tf.reshape(g, [-1, conf.ngray*conf.ngray]) 143 | glgcm_W_fc1 = weight_variable([conf.ngray*conf.ngray, 32]) 144 | glgcm_b_fc1 = bias_variable([32]) 145 | glgcm_h_fc1 = tf.nn.relu(tf.matmul(g_flat, glgcm_W_fc1) + glgcm_b_fc1) 146 | # glgcm_h_fc1_drop = tf.nn.dropout(glgcm_h_fc1, self.keep_prob) 147 | 148 | glgcm_h_fc1 = tf.nn.l2_normalize(glgcm_h_fc1, 0) 149 | 150 | #####################################glgcm############################ 151 | ######################################hex############################# 152 | # H = glgcm_h_fc1 153 | ######################################hex############################ 154 | 155 | ######################################Sentiment###################### 156 | 157 | rep = tf.nn.l2_normalize(self.x, 0) 158 | 159 | dropout7 = dropout(rep, self.keep_prob) 160 | 161 | # 8th Layer: FC and return unscaled activations 162 | #self.fc8 = fc(dropout7, 4096, self.NUM_CLASSES, relu=False, name='fc8') 163 | # conv2 164 | # dropout 165 | #h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 166 | h_fc1_drop = dropout7 167 | 168 | 169 | yconv_contact_loss=tf.concat([h_fc1_drop, glgcm_h_fc1],1) 170 | pad=tf.zeros_like(glgcm_h_fc1, tf.float32) 171 | yconv_contact_pred=tf.concat([h_fc1_drop, pad],1) 172 | pad2 = tf.zeros_like(rep, tf.float32) 173 | yconv_contact_H = tf.concat([pad2, glgcm_h_fc1],1) 174 | # fc2 175 | with tf.variable_scope("fc2"): 176 | W_fc2 = weight_variable([4128, 7]) 177 | b_fc2 = bias_variable([7]) 178 | y_conv_loss = tf.matmul(yconv_contact_loss, W_fc2) + b_fc2 179 | y_conv_pred = tf.matmul(yconv_contact_pred, W_fc2) + b_fc2 180 | y_conv_H = tf.matmul(yconv_contact_H, W_fc2) + b_fc2 181 | ######################################Sentiment###################### 182 | # H = y_conv_H 183 | # y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 184 | # y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 185 | 186 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_pred)) 187 | self.pred = tf.argmax(y_conv_pred, 1) 188 | 189 | self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1)) 190 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 191 | 192 | self.loss -= tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_conv_H, logits=y_conv_pred)) -------------------------------------------------------------------------------- /sentiment/visualize.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | 5 | from matplotlib import pyplot as plt 6 | 7 | import matplotlib 8 | 9 | font = {'family' : 'normal', 10 | 'weight' : 'bold', 11 | 'size' : 18} 12 | 13 | matplotlib.rc('font', **font) 14 | 15 | 16 | plt.style.use('bmh') 17 | 18 | def loadTxt(filename): 19 | print 'loading', filename 20 | TR = [] 21 | VAL = [] 22 | TE = [] 23 | for i in range(1, 6): 24 | updateTest = True 25 | maxVal = 0 26 | text = [line.strip() for line in open('../results/sentiment/'+ filename + '_' + str(i) + '.txt')] 27 | tr = [] 28 | val = [] 29 | te = [] 30 | for line in text: 31 | if line.startswith('Epoch'): 32 | items = line.split() 33 | tr.append(float(items[8][:-1])) 34 | val.append(float(items[-1])) 35 | if len(val) == 0: 36 | updateTest = True 37 | else: 38 | if val[-1] > maxVal: 39 | updateTest = True 40 | maxVal = val[-1] 41 | else: 42 | updateTest = False 43 | if line.startswith('Best'): 44 | if updateTest: 45 | te.append(float(line.split()[-1])) 46 | else: 47 | te.append(te[-1]) 48 | print te[-1] 49 | TR.append(tr) 50 | VAL.append(val) 51 | TE.append(te[:-1]) 52 | TR = np.array(TR) 53 | VAL = np.array(VAL) 54 | TE = np.array(TE) 55 | 56 | return TR, VAL, TE 57 | 58 | def loadTxtNew(filename): 59 | print 'loading', filename 60 | TR = [] 61 | VAL = [] 62 | TE = [] 63 | for i in range(1, 6): 64 | updateTest = True 65 | maxVal = 0 66 | text = [line.strip() for line in open('../results/sentiment/'+ filename + '_' + str(i) + '.txt')] 67 | tr = [] 68 | val = [] 69 | te = [] 70 | startUpdate = False 71 | for line in text: 72 | if line.startswith('Start'): 73 | startUpdate = True 74 | if startUpdate: 75 | if line.startswith('Epoch'): 76 | items = line.split() 77 | tr.append(float(items[8][:-1])) 78 | val.append(float(items[-1])) 79 | if len(val) == 0: 80 | updateTest = True 81 | else: 82 | if val[-1] > maxVal: 83 | updateTest = True 84 | maxVal = val[-1] 85 | else: 86 | te.append(te[-1]) 87 | if line.startswith('Best'): 88 | if updateTest: 89 | te.append(float(line.split()[-1])) 90 | 91 | TR.append(tr) 92 | VAL.append(val) 93 | TE.append(te[:100]) 94 | TR = np.array(TR) 95 | VAL = np.array(VAL) 96 | TE = np.array(TE) 97 | 98 | return TR, VAL, TE 99 | 100 | def plot_mean_and_CI(mean, lb, ub, color_mean=None, color_shading=None): 101 | # plot the shaded range of the confidence intervals 102 | plt.fill_between(range(mean.shape[0]), ub, lb, 103 | color=color_shading, alpha=.5) 104 | # plot the mean on top 105 | plt.plot(mean, color_mean) 106 | 107 | def plot(corr=0): 108 | plt.style.use('bmh') 109 | tr1, val1, te1 = loadTxt('baseline_'+str(corr)) 110 | tr2, val2, te2 = loadTxt('vanilla_'+str(corr)) 111 | tr3, val3, te3 = loadTxt('mlp_'+str(corr)) 112 | tr4, val4, te4 = loadTxt('adv_'+str(corr)) 113 | tr0, val0, te0 = loadTxt('hex_'+str(corr)) 114 | tr5, val5, te5 = loadTxtNew('pre_'+str(corr)) 115 | tr6, val6, te6 = loadTxtNew('info_'+str(corr)) 116 | 117 | # plot_mean_and_CI(np.mean(tr1, 0), np.mean(tr1, 0)-np.std(tr1,0)/5.0, np.mean(tr1, 0)+np.std(tr1,0)/5.0, color_mean='g--', color_shading='g') 118 | plot_mean_and_CI(np.mean(te1, 0), np.mean(te1, 0)-np.std(te1,0), np.mean(te1, 0)+np.std(te1,0), color_mean='g', color_shading='g') 119 | # plot_mean_and_CI(np.mean(val1, 0), np.mean(val1, 0)-np.std(val1,0)/5.0, np.mean(val1, 0)+np.std(val1,0)/5.0, color_mean='g.', color_shading='g') 120 | 121 | # plot_mean_and_CI(np.mean(tr2, 0), np.mean(tr2, 0)-np.std(tr2,0)/5.0, np.mean(tr2, 0)+np.std(tr2,0)/5.0, color_mean='b--', color_shading='b') 122 | plot_mean_and_CI(np.mean(te2, 0), np.mean(te2, 0)-np.std(te2,0), np.mean(te2, 0)+np.std(te2,0), color_mean='b', color_shading='b') 123 | # plot_mean_and_CI(np.mean(val2, 0), np.mean(val2, 0)-np.std(val2,0)/5.0, np.mean(val2, 0)+np.std(val2,0)/5.0, color_mean='b.', color_shading='b') 124 | 125 | # plot_mean_and_CI(np.mean(tr3, 0), np.mean(tr3, 0)-np.std(tr3,0)/5.0, np.mean(tr3, 0)+np.std(tr3,0)/5.0, color_mean='c--', color_shading='c') 126 | plot_mean_and_CI(np.mean(te3, 0), np.mean(te3, 0)-np.std(te3,0), np.mean(te3, 0)+np.std(te3,0), color_mean='c', color_shading='c') 127 | # plot_mean_and_CI(np.mean(val3, 0), np.mean(val3, 0)-np.std(val3,0)/5.0, np.mean(val3, 0)+np.std(val3,0)/5.0, color_mean='c.', color_shading='c') 128 | 129 | # plot_mean_and_CI(np.mean(tr4, 0), np.mean(tr4, 0)-np.std(tr4,0)/5.0, np.mean(tr4, 0)+np.std(tr4,0)/5.0, color_mean='m--', color_shading='m') 130 | plot_mean_and_CI(np.mean(te4, 0), np.mean(te4, 0)-np.std(te4,0), np.mean(te4, 0)+np.std(te4,0), color_mean='m', color_shading='m') 131 | # plot_mean_and_CI(np.mean(val4, 0), np.mean(val4, 0)-np.std(val4,0)/5.0, np.mean(val4, 0)+np.std(val4,0)/5.0, color_mean='m.', color_shading='m') 132 | 133 | # plot_mean_and_CI(np.mean(tr0, 0), np.mean(tr0, 0)-np.std(tr0,0)/5.0, np.mean(tr0, 0)+np.std(tr0,0)/5.0, color_mean='r--', color_shading='r') 134 | plot_mean_and_CI(np.mean(te0, 0), np.mean(te0, 0)-np.std(te0,0), np.mean(te0, 0)+np.std(te0,0), color_mean='r', color_shading='r') 135 | # plot_mean_and_CI(np.mean(val0, 0), np.mean(val0, 0)-np.std(val0,0)/5.0, np.mean(val0, 0)+np.std(val0,0)/5.0, color_mean='r.', color_shading='r') 136 | 137 | # plot_mean_and_CI(np.mean(tr5, 0), np.mean(tr5, 0)-np.std(tr5,0)/5.0, np.mean(tr5, 0)+np.std(tr5,0)/5.0, color_mean='y--', color_shading='y') 138 | plot_mean_and_CI(np.mean(te5, 0), np.mean(te5, 0)-np.std(te5,0), np.mean(te5, 0)+np.std(te5,0), color_mean='y', color_shading='y') 139 | # plot_mean_and_CI(np.mean(val5, 0), np.mean(val5, 0)-np.std(val5,0)/5.0, np.mean(val5, 0)+np.std(val5,0)/5.0, color_mean='y.', color_shading='y') 140 | 141 | # plot_mean_and_CI(np.mean(tr6, 0), np.mean(tr6, 0)-np.std(tr6,0)/5.0, np.mean(tr6, 0)+np.std(tr6,0)/5.0, color_mean='k--', color_shading='k') 142 | plot_mean_and_CI(np.mean(te6, 0), np.mean(te6, 0)-np.std(te6,0), np.mean(te6, 0)+np.std(te6,0), color_mean='k', color_shading='k') 143 | # plot_mean_and_CI(np.mean(val6, 0), np.mean(val6, 0)-np.std(val6,0)/5.0, np.mean(val6, 0)+np.std(val6,0)/5.0, color_mean='k.', color_shading='k') 144 | 145 | plt.legend(loc=4) 146 | plt.ylim(0,1.05) 147 | plt.savefig('sentiment_'+str(corr)+'.pdf') 148 | plt.clf() 149 | 150 | def plotLegend(): 151 | plt.style.use('bmh') 152 | methodsName = ['Baseline', 'Ablation M', 'Ablation N', 'Adv', 'HEX', 'DANN', 'InfoDrop'] 153 | colors = ['g', 'c', 'b', 'm', 'r', 'y', 'k'] 154 | 155 | fig = plt.figure(dpi=350, figsize=(20, 1)) 156 | ax = fig.add_axes([0, 0, 0.001, 0.001]) 157 | for i in range(len(colors)): 158 | ax.plot(xrange(10), xrange(10), label=methodsName[i], color=colors[i]) 159 | plt.legend(loc="upper center", bbox_to_anchor=(500, 800), ncol=7) 160 | plt.savefig('legend.pdf') 161 | 162 | 163 | def resultPlot(): 164 | boxColors = ['darkkhaki', 'royalblue'] 165 | 166 | fig = plt.figure(dpi=350, figsize=(25, 9)) 167 | axs = [0 for i in range(10)] 168 | 169 | newFiles = ['pre', 'info'] 170 | 171 | fileNames = ['baseline', 'mlp', 'vanilla', 'adv', 'hex', 'pre', 'info'] 172 | labelNames = ['B', 'M', 'N', 'A', 'H', 'D', 'I'] 173 | 174 | plt.style.use('bmh') 175 | 176 | for i in range(10): 177 | if i < 5: 178 | m = 1 179 | z = i%5 180 | else: 181 | m = 0 182 | z = i%5 183 | axs[i] = fig.add_axes([0.075+z*0.18, 0.1+m*0.45, 0.16, 0.35]) 184 | 185 | ts = [] 186 | for k in range(len(fileNames)): 187 | if fileNames[k] in newFiles: 188 | tr, val, te = loadTxtNew(fileNames[k]+'_'+str(i)) 189 | else: 190 | tr, val, te = loadTxt(fileNames[k]+'_'+str(i)) 191 | ts.append(te[:,-1]) 192 | 193 | # m1 = np.mean(r1) 194 | # s1 = np.std(r1) 195 | # m2 = np.mean(r2) 196 | # s2 = np.std(r2) 197 | 198 | # axs[c].errorbar(x=[0, 1], y=[m1, m2], yerr=[s1, s2]) 199 | 200 | axs[i].boxplot(ts, positions=[j for j in range(len(fileNames))], widths=[0.5 for j in range(len(fileNames))]) 201 | # axs[c].boxplot(r2, positions=[1]) 202 | 203 | axs[i].set_xlim(-0.5, len(fileNames)-0.5) 204 | axs[i].set_ylim(0.0, 1.1) 205 | 206 | if i == 0 or i == 5: 207 | axs[i].set_ylabel('Accuracy') 208 | 209 | axs[i].set_xticklabels(labelNames) 210 | # if c1 == 0: 211 | # axs[c].set_xticks([0, 1], ['NN', 'HEX-NN']) 212 | # else: 213 | # axs[c].get_xaxis().set_visible(False) 214 | 215 | axs[i].title.set_text(r'$\rho$: '+str(i/float(10))) 216 | 217 | # plt.legend(loc="upper center", bbox_to_anchor=(1, 1), fancybox=True, ncol=2) 218 | plt.savefig('fig.pdf', dpi=350, format='pdf') 219 | 220 | 221 | if __name__ == '__main__': 222 | plotLegend() 223 | # for i in range(10): 224 | # plot(i) 225 | # resultPlot() -------------------------------------------------------------------------------- /PACS/alex_cnn_top.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Haohan Wang' 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | from tensorflow import py_func 7 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 8 | def lamda_variable(shape): 9 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=shape[0]) 10 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 11 | 12 | def theta_variable(shape): 13 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=shape[0]) 14 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 15 | 16 | def generatingWeightMatrix(images, labels, epoch, division, batch): 17 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32]) 18 | return W 19 | 20 | def checkInformation(rep, epoch, s, y): 21 | X = py_func(checkInformation_py, [rep, epoch, s, y], [tf.float32])[0] 22 | return X 23 | 24 | def weight_variable(shape): 25 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 26 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 27 | 28 | def bias_variable(shape): 29 | initializer = tf.constant_initializer(0.0) 30 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 31 | 32 | def conv2d(x, W): 33 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 34 | 35 | def max_pool_2x2(x): 36 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 37 | def fc(x, num_in, num_out, name, relu=True): 38 | """Create a fully connected layer.""" 39 | with tf.variable_scope(name) as scope: 40 | 41 | # Create tf variables for the weights and biases 42 | weights = tf.get_variable('weights', shape=[num_in, num_out], 43 | trainable=True) 44 | biases = tf.get_variable('biases', [num_out], trainable=True) 45 | 46 | # Matrix multiply weights and inputs and add bias 47 | act = tf.nn.xw_plus_b(x, weights, biases, name=scope.name) 48 | 49 | if relu: 50 | # Apply ReLu non linearity 51 | relu = tf.nn.relu(act) 52 | return relu 53 | else: 54 | return act 55 | 56 | 57 | def max_pool(x, filter_height, filter_width, stride_y, stride_x, name, 58 | padding='SAME'): 59 | """Create a max pooling layer.""" 60 | return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1], 61 | strides=[1, stride_y, stride_x, 1], 62 | padding=padding, name=name) 63 | 64 | 65 | def lrn(x, radius, alpha, beta, name, bias=1.0): 66 | """Create a local response normalization layer.""" 67 | return tf.nn.local_response_normalization(x, depth_radius=radius, 68 | alpha=alpha, beta=beta, 69 | bias=bias, name=name) 70 | 71 | 72 | def dropout(x, keep_prob): 73 | """Create a dropout layer.""" 74 | return tf.nn.dropout(x, keep_prob) 75 | 76 | def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name, 77 | padding='SAME', groups=1): 78 | """Create a convolution layer. 79 | Adapted from: https://github.com/ethereon/caffe-tensorflow 80 | """ 81 | # Get number of input channels 82 | input_channels = int(x.get_shape()[-1]) 83 | 84 | # Create lambda function for the convolution 85 | convolve = lambda i, k: tf.nn.conv2d(i, k, 86 | strides=[1, stride_y, stride_x, 1], 87 | padding=padding) 88 | 89 | with tf.variable_scope(name) as scope: 90 | # Create tf variables for the weights and biases of the conv layer 91 | weights = tf.get_variable('weights', shape=[filter_height, 92 | filter_width, 93 | input_channels/groups, 94 | num_filters]) 95 | biases = tf.get_variable('biases', shape=[num_filters]) 96 | 97 | if groups == 1: 98 | conv = convolve(x, weights) 99 | 100 | # In the cases of multiple groups, split inputs & weights and 101 | else: 102 | # Split input and weights and convolve them separately 103 | input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x) 104 | weight_groups = tf.split(axis=3, num_or_size_splits=groups, 105 | value=weights) 106 | output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)] 107 | 108 | # Concat the convolved output together again 109 | conv = tf.concat(axis=3, values=output_groups) 110 | 111 | # Add biases 112 | bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv)) 113 | 114 | # Apply relu function 115 | relu = tf.nn.relu(bias, name=scope.name) 116 | 117 | return relu 118 | 119 | 120 | class MNISTcnn(object): 121 | def __init__(self,x, y, x_re, x_d, conf, Hex_flag=False): 122 | self.x = tf.reshape(x, shape=[-1, 4096]) 123 | self.x_re=tf.reshape(x_re,shape=[-1,1,784]) 124 | self.x_d=tf.reshape(x_re,shape=[-1,1,784]) 125 | self.y = y 126 | self.keep_prob = tf.placeholder(tf.float32) 127 | self.e=tf.placeholder(tf.float32) 128 | self.batch=tf.placeholder(tf.float32) 129 | #self.WEIGHTS_PATH='/Users/hzxue/Desktop/CMU/project/artificial-pattern/src/HEX_719/PACS/PACS/bvlc_alexnet.npy' 130 | 131 | #####################glgcm######################### 132 | 133 | with tf.variable_scope('glgcm'): 134 | lamda = lamda_variable([conf.ngray,1]) 135 | theta= theta_variable([conf.ngray,1]) 136 | g=tf.matmul(tf.minimum(tf.maximum(tf.subtract(self.x_d,lamda),1e-5),1),tf.minimum(tf.maximum(tf.subtract(self.x_re,theta),1e-5),1), transpose_b=True) 137 | #g=tf.reduce_sum(index,reduction_indices=2) 138 | #print(g.get_shape()) 139 | 140 | 141 | with tf.variable_scope("glgcm_fc1"): 142 | g_flat = tf.reshape(g, [-1, conf.ngray*conf.ngray]) 143 | glgcm_W_fc1 = weight_variable([conf.ngray*conf.ngray, 32]) 144 | glgcm_b_fc1 = bias_variable([32]) 145 | glgcm_h_fc1 = tf.nn.relu(tf.matmul(g_flat, glgcm_W_fc1) + glgcm_b_fc1) 146 | # glgcm_h_fc1_drop = tf.nn.dropout(glgcm_h_fc1, self.keep_prob) 147 | 148 | glgcm_h_fc1 = tf.nn.l2_normalize(glgcm_h_fc1, 0) 149 | 150 | #####################################glgcm############################ 151 | ######################################hex############################# 152 | # H = glgcm_h_fc1 153 | ######################################hex############################ 154 | 155 | ######################################Sentiment###################### 156 | 157 | rep = tf.nn.l2_normalize(self.x, 0) 158 | 159 | dropout7 = dropout(rep, self.keep_prob) 160 | 161 | # 8th Layer: FC and return unscaled activations 162 | #self.fc8 = fc(dropout7, 4096, self.NUM_CLASSES, relu=False, name='fc8') 163 | # conv2 164 | # dropout 165 | #h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 166 | h_fc1_drop = dropout7 167 | 168 | 169 | yconv_contact_loss=tf.concat([h_fc1_drop, glgcm_h_fc1],1) 170 | pad=tf.zeros_like(glgcm_h_fc1, tf.float32) 171 | yconv_contact_pred=tf.concat([h_fc1_drop, pad],1) 172 | pad2 = tf.zeros_like(rep, tf.float32) 173 | yconv_contact_H = tf.concat([pad2, glgcm_h_fc1],1) 174 | # fc2 175 | with tf.variable_scope("fc2"): 176 | W_fc2 = weight_variable([4128, 7]) 177 | b_fc2 = bias_variable([7]) 178 | y_conv_loss = tf.matmul(yconv_contact_loss, W_fc2) + b_fc2 179 | y_conv_pred = tf.matmul(yconv_contact_pred, W_fc2) + b_fc2 180 | y_conv_H = tf.matmul(yconv_contact_H, W_fc2) + b_fc2 181 | ######################################Sentiment###################### 182 | # H = y_conv_H 183 | # y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 184 | # y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 185 | 186 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 187 | if Hex_flag==False: 188 | if conf.re==1: 189 | tf.add_to_collection("losses",sess_loss) 190 | self.loss = tf.add_n(tf.get_collection("losses")) 191 | else: 192 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 193 | self.pred = tf.argmax(y_conv_pred, 1) 194 | 195 | self.correct_prediction = tf.equal(tf.argmax(y_conv_pred,1), tf.argmax(self.y,1)) 196 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 197 | if Hex_flag: 198 | # loss = tf.sqrt(tf.reshape(tf.cast(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss), tf.float32), [-1, 1]) + 1e-10) 199 | 200 | # y_conv_loss = generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch) 201 | # W=generatingWeightMatrix(y_conv_H, y_conv_loss, self.e, conf.div, self.batch) 202 | # y_conv_loss = y_conv_loss - W 203 | # y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 204 | # y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 205 | 206 | # I1 = checkInformation(y_conv_loss, self.e, self.batch, self.y) 207 | 208 | # I2 = checkInformation(y_conv_H, self.e, self.batch, self.y) 209 | 210 | y_conv_loss = y_conv_loss - \ 211 | tf.matmul(tf.matmul(tf.matmul(y_conv_H, tf.matrix_inverse(tf.matmul(y_conv_H, y_conv_H, transpose_a=True))), y_conv_H, transpose_b=True), y_conv_loss) 212 | 213 | # I3 = checkInformation(y_conv_loss, self.e, self.batch, self.y) 214 | 215 | # y_conv_loss = tf.matmul(I1, tf.matmul(I2, tf.matmul(I3, y_conv_loss))) 216 | 217 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) -------------------------------------------------------------------------------- /PACS/alex_cnn_baseline.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from tensorflow import py_func 5 | from Model.helpingFunctions_v2 import generatingWeightMatrix_py, checkInformation_py 6 | def lamda_variable(shape): 7 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 8 | return tf.get_variable("lamda", shape,initializer=initializer, dtype=tf.float32) 9 | 10 | def theta_variable(shape): 11 | initializer = tf.random_uniform_initializer(dtype=tf.float32, minval=0, maxval=16) 12 | return tf.get_variable("theta", shape,initializer=initializer, dtype=tf.float32) 13 | 14 | def generatingWeightMatrix(images, labels, epoch, division, batch): 15 | W = py_func(generatingWeightMatrix_py, [images, labels, epoch, division, batch], [tf.float32]) 16 | return W 17 | 18 | def checkInformation(rep, epoch, s): 19 | X = py_func(checkInformation_py, [rep, epoch, s], [tf.float32]) 20 | return X 21 | 22 | def weight_variable(shape): 23 | initializer = tf.truncated_normal_initializer(dtype=tf.float32, stddev=1e-1) 24 | return tf.get_variable("weights", shape,initializer=initializer, dtype=tf.float32) 25 | 26 | def bias_variable(shape): 27 | initializer = tf.constant_initializer(0.0) 28 | return tf.get_variable("biases", shape, initializer=initializer, dtype=tf.float32) 29 | 30 | def conv2d(x, W): 31 | return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 32 | 33 | def max_pool_2x2(x): 34 | return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 35 | def fc(x, num_in, num_out, name, relu=True): 36 | """Create a fully connected layer.""" 37 | with tf.variable_scope(name) as scope: 38 | 39 | # Create tf variables for the weights and biases 40 | weights = tf.get_variable('weights', shape=[num_in, num_out], 41 | trainable=True) 42 | biases = tf.get_variable('biases', [num_out], trainable=True) 43 | 44 | # Matrix multiply weights and inputs and add bias 45 | act = tf.nn.xw_plus_b(x, weights, biases, name=scope.name) 46 | 47 | if relu: 48 | # Apply ReLu non linearity 49 | relu = tf.nn.relu(act) 50 | return relu 51 | else: 52 | return act 53 | 54 | 55 | def max_pool(x, filter_height, filter_width, stride_y, stride_x, name, 56 | padding='SAME'): 57 | """Create a max pooling layer.""" 58 | return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1], 59 | strides=[1, stride_y, stride_x, 1], 60 | padding=padding, name=name) 61 | 62 | 63 | def lrn(x, radius, alpha, beta, name, bias=1.0): 64 | """Create a local response normalization layer.""" 65 | return tf.nn.local_response_normalization(x, depth_radius=radius, 66 | alpha=alpha, beta=beta, 67 | bias=bias, name=name) 68 | 69 | 70 | def dropout(x, keep_prob): 71 | """Create a dropout layer.""" 72 | return tf.nn.dropout(x, keep_prob) 73 | 74 | def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name, 75 | padding='SAME', groups=1): 76 | """Create a convolution layer. 77 | Adapted from: https://github.com/ethereon/caffe-tensorflow 78 | """ 79 | # Get number of input channels 80 | input_channels = int(x.get_shape()[-1]) 81 | 82 | # Create lambda function for the convolution 83 | convolve = lambda i, k: tf.nn.conv2d(i, k, 84 | strides=[1, stride_y, stride_x, 1], 85 | padding=padding) 86 | 87 | with tf.variable_scope(name) as scope: 88 | # Create tf variables for the weights and biases of the conv layer 89 | weights = tf.get_variable('weights', shape=[filter_height, 90 | filter_width, 91 | input_channels/groups, 92 | num_filters]) 93 | biases = tf.get_variable('biases', shape=[num_filters]) 94 | 95 | if groups == 1: 96 | conv = convolve(x, weights) 97 | 98 | # In the cases of multiple groups, split inputs & weights and 99 | else: 100 | # Split input and weights and convolve them separately 101 | input_groups = tf.split(axis=3, num_or_size_splits=groups, value=x) 102 | weight_groups = tf.split(axis=3, num_or_size_splits=groups, 103 | value=weights) 104 | output_groups = [convolve(i, k) for i, k in zip(input_groups, weight_groups)] 105 | 106 | # Concat the convolved output together again 107 | conv = tf.concat(axis=3, values=output_groups) 108 | 109 | # Add biases 110 | bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv)) 111 | 112 | # Apply relu function 113 | relu = tf.nn.relu(bias, name=scope.name) 114 | 115 | return relu 116 | 117 | 118 | class MNISTcnn(object): 119 | def __init__(self,x, y, x_re, x_d, conf, Hex_flag=False): 120 | self.x = tf.reshape(x, shape=[-1, 227, 227, 3]) 121 | self.x_re=tf.reshape(x_re,shape=[-1,1,784]) 122 | self.x_d=tf.reshape(x_re,shape=[-1,1,784]) 123 | self.y = y 124 | self.keep_prob = tf.placeholder(tf.float32) 125 | self.e=tf.placeholder(tf.float32) 126 | self.batch=tf.placeholder(tf.float32) 127 | self.WEIGHTS_PATH='weights/bvlc_alexnet.npy' 128 | #self.WEIGHTS_PATH='/Users/hzxue/Desktop/CMU/project/artificial-pattern/src/HEX_719/PACS/PACS/bvlc_alexnet.npy' 129 | 130 | #####################glgcm######################### 131 | 132 | conv1 = conv(self.x, 11, 11, 96, 4, 4, padding='VALID', name='conv1') 133 | norm1 = lrn(conv1, 2, 1e-05, 0.75, name='norm1') 134 | pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1') 135 | 136 | # 2nd Layer: Conv (w ReLu) -> Lrn -> Pool with 2 groups 137 | conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2') 138 | norm2 = lrn(conv2, 2, 1e-05, 0.75, name='norm2') 139 | pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2') 140 | 141 | # 3rd Layer: Conv (w ReLu) 142 | conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3') 143 | 144 | # 4th Layer: Conv (w ReLu) splitted into two groups 145 | conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4') 146 | 147 | # 5th Layer: Conv (w ReLu) -> Pool splitted into two groups 148 | conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5') 149 | pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5') 150 | 151 | # 6th Layer: Flatten -> FC (w ReLu) -> Dropout 152 | flattened = tf.reshape(pool5, [-1, 6*6*256]) 153 | fc6 = fc(flattened, 6*6*256, 4096, name='fc6') 154 | dropout6 = dropout(fc6, self.keep_prob) 155 | 156 | # 7th Layer: FC (w ReLu) -> Dropout 157 | fc7 = fc(dropout6, 4096, 4096, name='fc7') 158 | 159 | fc7 = tf.nn.l2_normalize(fc7, 0) 160 | 161 | dropout7 = dropout(fc7, self.keep_prob) 162 | 163 | # 8th Layer: FC and return unscaled activations 164 | #self.fc8 = fc(dropout7, 4096, self.NUM_CLASSES, relu=False, name='fc8') 165 | # conv2 166 | # dropout 167 | #h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) 168 | h_fc1_drop = dropout7 169 | 170 | self.rep = fc7 171 | 172 | # fc2 173 | with tf.variable_scope("fc2"): 174 | W_fc2 = weight_variable([4096, 7]) 175 | b_fc2 = bias_variable([7]) 176 | y_conv_loss = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 177 | ######################################Sentiment###################### 178 | # H = y_conv_H 179 | # y_conv_loss = tf.nn.l2_normalize(y_conv_loss, 0) 180 | # y_conv_H = tf.nn.l2_normalize(y_conv_H, 0) 181 | 182 | sess_loss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 183 | if Hex_flag==False: 184 | if conf.re==1: 185 | tf.add_to_collection("losses",sess_loss) 186 | self.loss = tf.add_n(tf.get_collection("losses")) 187 | else: 188 | self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=y_conv_loss)) 189 | self.pred = tf.argmax(y_conv_loss, 1) 190 | 191 | self.correct_prediction = tf.equal(tf.argmax(y_conv_loss,1), tf.argmax(self.y,1)) 192 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) 193 | 194 | def load_initial_weights(self, session): 195 | """Load weights from file into network. 196 | As the weights from http://www.cs.toronto.edu/~guerzhoy/tf_alexnet/ 197 | come as a dict of lists (e.g. weights['conv1'] is a list) and not as 198 | dict of dicts (e.g. weights['conv1'] is a dict with keys 'weights' & 199 | 'biases') we need a special load function 200 | """ 201 | # Load the weights into memory 202 | weights_dict = np.load(self.WEIGHTS_PATH, encoding='bytes').item() 203 | 204 | # Loop over all layer names stored in the weights dict 205 | for op_name in weights_dict: 206 | 207 | # Check if layer should be trained from scratch 208 | if op_name != 'fc8': 209 | 210 | with tf.variable_scope(op_name, reuse=True): 211 | 212 | # Assign weights/biases to their corresponding tf variable 213 | for data in weights_dict[op_name]: 214 | 215 | # Biases 216 | if len(data.shape) == 1: 217 | var = tf.get_variable('biases', trainable=True) 218 | session.run(var.assign(data)) 219 | 220 | # Weights 221 | else: 222 | var = tf.get_variable('weights', trainable=True) 223 | session.run(var.assign(data)) 224 | 225 | --------------------------------------------------------------------------------