├── LICENSE ├── MUMT.py ├── README.md ├── data └── MUMT_data_3x3.mat ├── graphs ├── costVSalpha.png ├── costVSbeta.png ├── gainVSmemsize.png ├── gainVSnum_net.png ├── gainVSnum_net_VM.png ├── gainVSnum_net_kubernetes.png ├── gainVSnum_net_local.png └── timeVSnum_of_dnn.png ├── logs ├── costVSalpha.log ├── costVSbeta.log ├── gainVSmemsize.log ├── gainVSnum_nets.log ├── gainVSnum_nets_KubernetesIBM.log ├── main_dnn_local.log ├── main_dnn_vm.log └── main_dnn_vm_1dnn.log ├── main.py ├── memory.py ├── presentation.pdf ├── report.pdf └── scripts ├── costVSalpha.sbatch ├── costVSbeta.sbatch ├── gainVSmemsize.sbatch ├── gainVSnum_nets.sbatch ├── slurm-18862818.out ├── slurm-18862824.out ├── slurm-18862841.out ├── slurm-18863347.out └── slurm-18889060.out /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 IshJaisia 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MUMT.py: -------------------------------------------------------------------------------- 1 | # ################################################################# 2 | # This file compute the system utility Q, provided with the size of all tasks and offloading decision 3 | # ################################################################# 4 | import numpy as np 5 | import pandas as pd 6 | class MUMT(object): 7 | def __init__(self,N,M,rand_seed=1): 8 | #users and tasks 9 | self.N,self.M=N,M 10 | #dataframe's index and columns 11 | self.users=['user%d'%(i+1) for i in range(N)] 12 | self.DIN=['DIN%d'%(i+1) for i in range(M)] 13 | self.DOUT=['DOUT%d'%(i+1) for i in range(M)] 14 | self.Task=['Task%d'%(i+1) for i in range(M)] 15 | #original dataframe:datain and dataout 16 | np.random.seed(rand_seed) 17 | self.Datain=pd.DataFrame(np.random.randint(10,31,size=(N,M)),index=self.users,columns=self.DIN) 18 | self.Dataout=pd.DataFrame(np.random.randint(0,1,size=(N,M)),index=self.users,columns=self.DOUT) 19 | self.Data = pd.concat([self.Datain, self.Dataout], axis=1,join_axes=[self.Datain.index]) 20 | #fixed parameters 21 | # self.APP denotes the number of CPU cycles per bit required to complete the task 22 | self.APP,self.fc,self.p,self.a,self.et=1900.,10.*10**9,1,1.5*10**-7,1.42*10**-7 23 | self.El,self.Tl,self.CUL,self.CDL=3.25*10**-7,4.75*10**-7,100/8,100/8 24 | #addional parameters 25 | self.Data['El'] = pd.Series([self.Data.loc[['user%d'%(i+1)],'DIN1':'DIN%d'%M]*8*2**20*self.El for i in range(N)], index=self.users) 26 | #the local energy consumption 27 | self.Data['et'] = pd.Series([self.Data.loc[['user%d'%(i+1)],'DIN1':'DIN%d'%M]*8*2**20*self.et for i in range(N)], index=self.users) 28 | #transmission energy consumption 29 | self.Data['d'] = pd.Series([self.Data.loc[['user%d'%(i+1)],'DIN1':'DIN%d'%M]*8*2**20 for i in range(N)], index=self.users) 30 | self.Data['EC'] = self.Data.loc[:,'d']*self.a+self.Data.loc[:,'et'] 31 | #energy consumption of the edge server 32 | self.Data['TL'] = pd.Series([self.Data.loc[['user%d'%(i+1)],'DIN1':'DIN%d'%M]*8*2**20*self.Tl for i in range(N)], index=self.users) 33 | #local time delay 34 | self.Data['Tc'] = pd.Series([self.Data.loc[['user%d'%(i+1)],'DIN1':'DIN%d'%M]*self.APP*8*2**20/self.fc for i in range(N)], index=self.users) 35 | #The edge server delay 36 | self.X=pd.DataFrame(np.random.randint(0,2,size=(N,M)),index=self.users,columns=self.Task) 37 | self.C=pd.DataFrame(np.random.rand(N,2),index=self.users,columns=['cu','cd']) 38 | self.XC = pd.concat([self.X, self.C], axis=1,join_axes=[self.Datain.index]) 39 | for i in range(self.N): 40 | self.XC.loc['user%d'%(i+1),['cu','cd']]=[self.CUL/self.N,self.CDL/self.N] 41 | 42 | 43 | def compute_Q(self,task_size,M): 44 | #Provide task_size and offloading decision, and compute the result. 45 | self.Data.iloc[0,0:3]=task_size[0:3] 46 | self.Data.iloc[1,0:3]=task_size[3:6] 47 | self.Data.iloc[2,0:3]=task_size[6:9] 48 | 49 | self.Data['El'] = pd.Series([self.Data.loc[['user%d'%(i+1)],'DIN1':'DIN%d'%self.M]*8*2**20*self.El for i in range(self.N)], index=self.users) 50 | #the local energy consumption 51 | self.Data['et'] = pd.Series([self.Data.loc[['user%d'%(i+1)],'DIN1':'DIN%d'%self.M]*8*2**20*self.et for i in range(self.N)], index=self.users) 52 | #transmission energy consumption 53 | self.Data['d'] = pd.Series([self.Data.loc[['user%d'%(i+1)],'DIN1':'DIN%d'%self.M]*8*2**20 for i in range(self.N)], index=self.users) 54 | self.Data['EC'] = self.Data.loc[:,'d']*self.a+self.Data.loc[:,'et'] 55 | #energy consumption of the edge server 56 | self.Data['TL'] = pd.Series([self.Data.loc[['user%d'%(i+1)],'DIN1':'DIN%d'%self.M]*8*2**20*self.Tl for i in range(self.N)], index=self.users) 57 | #local time delay 58 | self.Data['Tc'] = pd.Series([self.Data.loc[['user%d'%(i+1)],'DIN1':'DIN%d'%self.M]*self.APP*8*2**20/self.fc for i in range(self.N)], index=self.users) 59 | #The edge server delay 60 | self.XC.iloc[0,0:3]=M[:3] 61 | self.XC.iloc[1,0:3]=M[3:6] 62 | self.XC.iloc[2,0:3]=M[6:9] 63 | SUM=0 64 | for i in range(self.N): 65 | sum1=(self.Data.loc['user%d'%(i+1),'El']*(1-np.array(self.XC.loc['user%d'%(i+1),'Task1':'Task%d'%self.M]))+self.Data.loc['user%d'%(i+1),'EC']*np.array(self.XC.loc['user%d'%(i+1),'Task1':'Task%d'%self.M])).iloc[0,0:].sum() 66 | temp1=(self.Data.loc['user%d'%(i+1),'TL']*(1-np.array(self.XC.loc['user%d'%(i+1),'Task1':'Task%d'%self.M]))).iloc[0,0:].sum() 67 | temp2=((self.Data.loc['user%d'%(i+1),'DIN1':'DIN%d'%self.M]/self.XC.loc['user%d'%(i+1),'cu']+self.Data.loc['user%d'%(i+1),'Tc'])*np.array(self.XC.loc['user%d'%(i+1),'Task1':'Task%d'%self.M])).iloc[0,0:].sum() 68 | SUM+=sum1+self.p*max(temp1,temp2) 69 | #Integrate energy consumption and time delay 70 | return SUM 71 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Edge Computing 2 | *To be, or not to be : Edge Computing Version* 3 | 4 | Python code to reproduce our work on Mobile edge computing project, where multiple parallel Deep Neural Networks (DNNs) are used to efficiently generate near-optimal binary offloading decisions. This project includes: 5 | 6 | - [memory.py](memory.py): the DNN structure for DDLO, inclduing training structure and test structure 7 | 8 | - [data](./data): all data are stored in this subdirectory, includes: 9 | 10 | - **MUMT_data_3X3.mat**: training and testing data sets, where 3X3 means that the user number is 3, and each has 3 tasks. 11 | 12 | - [main.py](main.py): run this file, inclduing setting system parameters 13 | 14 | - [MUMT.py](MUMT.py): compute system utility Q, provided with the size of all tasks and offloading decision 15 | 16 | ## Required packages 17 | 18 | - Tensorflow 19 | 20 | - numpy 21 | 22 | - scipy 23 | 24 | ## How the code works 25 | 26 | run the file, [main.py](main.py) 27 | -------------------------------------------------------------------------------- /data/MUMT_data_3x3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/data/MUMT_data_3x3.mat -------------------------------------------------------------------------------- /graphs/costVSalpha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/graphs/costVSalpha.png -------------------------------------------------------------------------------- /graphs/costVSbeta.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/graphs/costVSbeta.png -------------------------------------------------------------------------------- /graphs/gainVSmemsize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/graphs/gainVSmemsize.png -------------------------------------------------------------------------------- /graphs/gainVSnum_net.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/graphs/gainVSnum_net.png -------------------------------------------------------------------------------- /graphs/gainVSnum_net_VM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/graphs/gainVSnum_net_VM.png -------------------------------------------------------------------------------- /graphs/gainVSnum_net_kubernetes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/graphs/gainVSnum_net_kubernetes.png -------------------------------------------------------------------------------- /graphs/gainVSnum_net_local.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/graphs/gainVSnum_net_local.png -------------------------------------------------------------------------------- /graphs/timeVSnum_of_dnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/graphs/timeVSnum_of_dnn.png -------------------------------------------------------------------------------- /logs/main_dnn_vm.log: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/logs/main_dnn_vm.log -------------------------------------------------------------------------------- /logs/main_dnn_vm_1dnn.log: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/logs/main_dnn_vm_1dnn.log -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # ################################################################# 2 | # 3 | # This file contains the main code to train and test the algorithm. It loads the 4 | # training samples saved in ./data/MUMT_data_3X3.mat, splits the samples into 5 | # three parts (training and testing data constitutes 80% and 6 | # 20%), trains the DNN with training and validation samples, and finally tests 7 | # the DNN with test data. 8 | # 9 | # Input: ./data/MUMT_data_3X3.mat 10 | # - THere are 20,000 samples saved in ./data/MUMT_data_3X3.matself. 11 | # - The offloading modes in the Data samples are generated by enumerating all 12 | # possible binary combinations of offloading decisions. 13 | # ----------------------------------------------------------------- 14 | # | task size | task_size | 15 | # ----------------------------------------------------------------- 16 | # | optimal value | gain_min | 17 | # ----------------------------------------------------------------- 18 | # | optimal offloading decision | gain_mode | 19 | # ----------------------------------------------------------------- 20 | # 21 | # ################################################################# 22 | 23 | import scipy.io as sio # import scipy.io for .mat file I/ 24 | import numpy as np # import numpy 25 | import MUMT as MU 26 | from memory import MemoryDNN 27 | import time 28 | 29 | def plot_gain(gain_his,name=None): 30 | #display data 31 | import matplotlib.pyplot as plt 32 | import pandas as pd 33 | import matplotlib as mpl 34 | 35 | gain_array = np.asarray(gain_his) 36 | df = pd.DataFrame(gain_his) 37 | 38 | mpl.style.use('seaborn') 39 | fig, ax = plt.subplots(figsize=(15,8)) 40 | rolling_intv = 60 41 | df_roll=df.rolling(rolling_intv, min_periods=1).mean() 42 | if name != None: 43 | sio.savemat('./data/MUMT(%s)'%name,{'ratio':gain_his}) 44 | 45 | plt.plot(np.arange(len(gain_array))+1, df_roll, 'b') 46 | plt.fill_between(np.arange(len(gain_array))+1, df.rolling(rolling_intv, min_periods=1).min()[0], df.rolling(rolling_intv, min_periods=1).max()[0], color = 'b', alpha = 0.2) 47 | plt.ylabel('Gain ratio') 48 | plt.xlabel('learning steps') 49 | plt.savefig('test.png') 50 | plt.show() 51 | 52 | def save_to_txt(gain_his, file_path): 53 | with open(file_path, 'w') as f: 54 | for gain in gain_his: 55 | f.write("%s \n" % gain) 56 | 57 | if __name__ == "__main__": 58 | ''' 59 | This algorithm generates K modes from DNN, and chooses with largest 60 | reward. The mode with largest reward is stored in the memory, which is 61 | further used to train the DNN. 62 | ''' 63 | N = 20000 # number of channel 64 | net_num = 3 # number of DNNs 65 | WD_num = 3 # number of WDs in the MERCHANTABILITY 66 | task_num = 3 # number of tasks per WD 67 | 68 | # Load data 69 | task_size = sio.loadmat('./data/MUMT_data_3x3')['task_size'] 70 | gain = sio.loadmat('./data/MUMT_data_3x3')['gain_min'] 71 | 72 | # generate the train and test data sample index 73 | # data are splitted as 80:20 74 | # training data are randomly sampled with duplication if N > total data size 75 | split_idx = int(.8* len(task_size)) 76 | num_test = min(len(task_size) - split_idx, N - int(.8* N)) # training data size 77 | 78 | mem = MemoryDNN(net = [WD_num*task_num, 120, 80, WD_num*task_num], 79 | net_num=net_num, 80 | learning_rate = 0.01, 81 | training_interval=10, 82 | batch_size=128, 83 | memory_size=1024 84 | ) 85 | 86 | start_time=time.time() 87 | 88 | gain_his = [] 89 | gain_his_ratio = [] 90 | knm_idx_his = [] 91 | m_li=[] 92 | env = MU.MUMT(3,3,rand_seed=1) 93 | for i in range(N): 94 | if i % (N//100) == 0: 95 | print("----------------------------------------------rate of progress:%0.2f"%(i/N)) 96 | if i < N - num_test: 97 | #training 98 | i_idx = i % split_idx 99 | else: 100 | # test 101 | i_idx = i - N + num_test + split_idx 102 | t1 = task_size[i_idx,:] 103 | #pretreatment,for better train 104 | t = t1*10-200 105 | 106 | 107 | #produce offloading decision 108 | m_list = mem.decode(t) 109 | m_li.append(m_list) 110 | r_list = [] 111 | for m in m_list: 112 | r_list.append(env.compute_Q(t1,m)) 113 | 114 | 115 | # memorize the largest reward and train DNN 116 | # the train process is included in mem.encode() 117 | mem.encode(t, m_list[np.argmin(r_list)]) 118 | 119 | # record the index of largest reward 120 | gain_his.append(np.min(r_list)) 121 | knm_idx_his.append(np.argmin(r_list)) 122 | gain_his_ratio.append(gain[0][i_idx]/gain_his[-1]) 123 | 124 | 125 | total_time=time.time()-start_time 126 | print('time_cost:%s'%total_time) 127 | print("gain/max ratio of test: ", sum(gain_his_ratio[-num_test: -1])/num_test) 128 | print("The number of net: ", net_num) 129 | mem.plot_cost() 130 | #cost of DNN 131 | plot_gain(gain_his_ratio,name=None) 132 | #draw the ratio of the predicted value to the optimal value 133 | -------------------------------------------------------------------------------- /memory.py: -------------------------------------------------------------------------------- 1 | # ################################################################# 2 | # This file contains memory operation including encoding and decoding operations. 3 | # ################################################################# 4 | 5 | from __future__ import print_function 6 | import tensorflow as tf 7 | import numpy as np 8 | 9 | 10 | # DNN network for memory 11 | class MemoryDNN: 12 | def __init__( 13 | self, 14 | net, 15 | net_num, 16 | learning_rate = 0.01, 17 | training_interval=10, 18 | batch_size=128, 19 | memory_size=1024, 20 | output_graph=False 21 | ): 22 | # net: [n_input, n_hidden_1st, n_hidded_2ed, n_output] 23 | assert(len(net) is 4) # only 4-layer DNN 24 | 25 | self.net = net 26 | self.net_num=net_num 27 | self.training_interval = training_interval # learn every #training_interval 28 | self.lr = learning_rate 29 | self.batch_size = batch_size 30 | self.memory_size = memory_size 31 | 32 | # stored # memory entry 33 | self.memory_counter = 1 34 | self.m_pred=[] 35 | self.loss=[] 36 | self.train_op=[] 37 | self.cost_his=[[] for i in range(self.net_num)] 38 | 39 | # reset graph 40 | tf.reset_default_graph() 41 | 42 | # initialize zero memory [h, m] 43 | self.memory = np.zeros((self.memory_size, self.net[0]+ self.net[-1])) 44 | # construct memory network 45 | self._build_net() 46 | 47 | self.sess = tf.Session() 48 | 49 | # for tensorboard 50 | if output_graph: 51 | # $ tensorboard --logdir=logs 52 | # tf.train.SummaryWriter soon be deprecated, use following 53 | tf.summary.FileWriter("logs/", self.sess.graph) 54 | 55 | self.sess.run(tf.global_variables_initializer()) 56 | 57 | 58 | def _build_net(self): 59 | def build_layers(h, c_names, net, w_initializer, b_initializer): 60 | with tf.variable_scope('l1'): 61 | w1 = tf.get_variable('w1', [net[0], net[1]], initializer=w_initializer, collections=c_names) 62 | b1 = tf.get_variable('b1', [1, self.net[1]], initializer=b_initializer, collections=c_names) 63 | l1 = tf.nn.relu(tf.matmul(h, w1) + b1) 64 | #print(w1.name) 65 | with tf.variable_scope('l2'): 66 | w2 = tf.get_variable('w2', [net[1], net[2]], initializer=w_initializer, collections=c_names) 67 | b2 = tf.get_variable('b2', [1, net[2]], initializer=b_initializer, collections=c_names) 68 | l2 = tf.nn.relu(tf.matmul(l1, w2) + b2) 69 | 70 | with tf.variable_scope('M'): 71 | w3 = tf.get_variable('w3', [net[2], net[3]], initializer=w_initializer, collections=c_names) 72 | b3 = tf.get_variable('b3', [1, net[3]], initializer=b_initializer, collections=c_names) 73 | out = tf.matmul(l2, w3) + b3 74 | 75 | return out 76 | 77 | # ------------------ build memory_net ------------------ 78 | self.h = tf.placeholder(tf.float32, [None, self.net[0]], name='h') # input 79 | self.m = tf.placeholder(tf.float32, [None, self.net[-1]], name='mode') # for calculating loss 80 | self.is_train = tf.placeholder("bool") # train or evaluate 81 | for i in range(self.net_num): 82 | with tf.variable_scope('memory%d_net'%i): 83 | w_initializer, b_initializer = \ 84 | tf.random_normal_initializer(0., 1/self.net[0]), tf.constant_initializer(0) # config of layers 85 | self.m_pred.append(build_layers(self.h, ['memory%d_net_params'%i, tf.GraphKeys.GLOBAL_VARIABLES], self.net, w_initializer, b_initializer)) 86 | with tf.variable_scope('loss%d'%i): 87 | self.loss.append(tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = self.m, logits = self.m_pred[i]))) 88 | with tf.variable_scope('train%d'%i): 89 | self.train_op.append(tf.train.AdamOptimizer(self.lr, 0.09).minimize(self.loss[i])) 90 | 91 | def remember(self, h, m): 92 | # replace the old memory with new memory 93 | idx = self.memory_counter % self.memory_size 94 | self.memory[idx, :] = np.hstack((h,m)) 95 | 96 | self.memory_counter += 1 97 | 98 | def encode(self, h, m): 99 | # encoding the entry 100 | self.remember(h, m) 101 | # train the DNN every 10 step 102 | if self.memory_counter>=512 and self.memory_counter % self.training_interval == 0: 103 | self.learn() 104 | 105 | def learn(self): 106 | # sample batch memory from all memory 107 | sample_index=[] 108 | batch_memory=[] 109 | h_train=[] 110 | m_train=[] 111 | if self.memory_counter > self.memory_size: 112 | for j in range(self.net_num): 113 | sample_index.append(np.random.choice(self.memory_size, size=self.batch_size)) 114 | else: 115 | for j in range(self.net_num): 116 | sample_index.append(np.random.choice(self.memory_counter, size=self.batch_size)) 117 | for j in range(self.net_num): 118 | batch_memory.append(self.memory[sample_index[j], :]) 119 | h_train.append(batch_memory[j][:, 0: self.net[0]]) 120 | m_train.append(batch_memory[j][:, self.net[0]:]) 121 | _, cost = self.sess.run([self.train_op[j], self.loss[j]], 122 | feed_dict={self.h: h_train[j], self.m: m_train[j]}) 123 | assert(cost >0) 124 | self.cost_his[j].append(cost) 125 | 126 | def decode(self, h): 127 | # to have batch dimension when feed into tf placeholder 128 | m_list = [] 129 | h = h[np.newaxis, :] 130 | for k in range(self.net_num): 131 | m_pred = self.sess.run(self.m_pred[k], feed_dict={self.h: h}) 132 | m_list.append(1*(m_pred[0]>0)) 133 | 134 | return m_list 135 | 136 | def plot_cost(self, net_num): 137 | import matplotlib.pyplot as plty 138 | plty.figure(net_num) 139 | colors ="bgrcmykw" 140 | for p in range(self.net_num): 141 | plty.plot(np.arange(len(self.cost_his[p])), self.cost_his[p],colors[np.random.randint(0,8)]) 142 | plty.ylabel('Cost of MemoryDNN') 143 | plty.xlabel('training steps') 144 | # plty.show() 145 | plty.savefig("plotcost_" + str(net_num) + ".png") 146 | -------------------------------------------------------------------------------- /presentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/presentation.pdf -------------------------------------------------------------------------------- /report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/utkarshKumr/Offloading-in-edge-computing/632097655f8ac1674aafdfa44df341f105ed86c1/report.pdf -------------------------------------------------------------------------------- /scripts/costVSalpha.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=edge-sim 3 | #SBATCH --nodes=4 4 | #SBATCH --ntasks-per-node=1 5 | #SBATCH --cpus-per-task=1 6 | #SBATCH --mem=8GB 7 | #SBATCH --time=14:00:00 8 | 9 | module purge 10 | module load anaconda2/2019.10 11 | conda init bash 12 | source /home/av2783/.bashrc 13 | conda activate tf1.0v1 14 | 15 | cd /home/av2783/cml/edgeComputing/Alpha 16 | python alpha.py > /home/av2783/cml/edgeComputing/logs/costVSalpha.log 17 | -------------------------------------------------------------------------------- /scripts/costVSbeta.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=edge-sim 3 | #SBATCH --nodes=4 4 | #SBATCH --ntasks-per-node=1 5 | #SBATCH --cpus-per-task=1 6 | #SBATCH --mem=8GB 7 | #SBATCH --time=14:00:00 8 | 9 | module purge 10 | module load anaconda2/2019.10 11 | conda init bash 12 | source /home/av2783/.bashrc 13 | conda activate tf1.0v1 14 | 15 | cd /home/av2783/cml/edgeComputing/Beta 16 | python beta.py > /home/av2783/cml/edgeComputing/logs/costVSbeta.log 17 | -------------------------------------------------------------------------------- /scripts/gainVSmemsize.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=edge-sim 3 | #SBATCH --nodes=4 4 | #SBATCH --ntasks-per-node=1 5 | #SBATCH --cpus-per-task=1 6 | #SBATCH --mem=8GB 7 | #SBATCH --time=14:00:00 8 | 9 | module purge 10 | module load anaconda2/2019.10 11 | conda init bash 12 | source /home/av2783/.bashrc 13 | conda activate tf1.0v1 14 | 15 | cd /home/av2783/cml/edgeComputing/ 16 | python main_memsize.py > /home/av2783/cml/edgeComputing/logs/gainVSmemsize.log 17 | 18 | -------------------------------------------------------------------------------- /scripts/gainVSnum_nets.sbatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=edge-sim 3 | #SBATCH --nodes=4 4 | #SBATCH --ntasks-per-node=1 5 | #SBATCH --cpus-per-task=1 6 | #SBATCH --mem=8GB 7 | #SBATCH --time=14:00:00 8 | 9 | module purge 10 | module load anaconda2/2019.10 11 | conda init bash 12 | source /home/av2783/.bashrc 13 | conda activate tf1.0v1 14 | 15 | cd /home/av2783/cml/edgeComputing/ 16 | python main_dnn.py > /home/av2783/cml/edgeComputing/logs/gainVSnum_nets.log 17 | 18 | -------------------------------------------------------------------------------- /scripts/slurm-18862818.out: -------------------------------------------------------------------------------- 1 | no change /share/apps/anaconda2/2019.10/condabin/conda 2 | no change /share/apps/anaconda2/2019.10/bin/conda 3 | no change /share/apps/anaconda2/2019.10/bin/conda-env 4 | no change /share/apps/anaconda2/2019.10/bin/activate 5 | no change /share/apps/anaconda2/2019.10/bin/deactivate 6 | no change /share/apps/anaconda2/2019.10/etc/profile.d/conda.sh 7 | no change /share/apps/anaconda2/2019.10/etc/fish/conf.d/conda.fish 8 | no change /share/apps/anaconda2/2019.10/shell/condabin/Conda.psm1 9 | no change /share/apps/anaconda2/2019.10/shell/condabin/conda-hook.ps1 10 | no change /share/apps/anaconda2/2019.10/lib/python2.7/site-packages/xontrib/conda.xsh 11 | no change /share/apps/anaconda2/2019.10/etc/profile.d/conda.csh 12 | no change /home/av2783/.bashrc 13 | No action taken. 14 | Could not open a connection to your authentication agent. 15 | /home/av2783/.bashrc: line 21: hadoop: command not found 16 | /home/av2783/.bashrc: line 22: hadoop: command not found 17 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE3 instructions, but these are available on your machine and could speed up CPU computations. 18 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations. 19 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations. 20 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations. 21 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations. 22 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX512F instructions, but these are available on your machine and could speed up CPU computations. 23 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations. 24 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:471: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 25 | _np_qint8 = np.dtype([("qint8", np.int8, 1)]) 26 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:472: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 27 | _np_quint8 = np.dtype([("quint8", np.uint8, 1)]) 28 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:473: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 29 | _np_qint16 = np.dtype([("qint16", np.int16, 1)]) 30 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:474: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 31 | _np_quint16 = np.dtype([("quint16", np.uint16, 1)]) 32 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:475: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 33 | _np_qint32 = np.dtype([("qint32", np.int32, 1)]) 34 | /home/av2783/cml/edgeComputing/MUMT.py:21: FutureWarning: The join_axes-keyword is deprecated. Use .reindex or .reindex_like on the result to achieve the same functionality. 35 | self.Data = pd.concat([self.Datain, self.Dataout], axis=1,join_axes=[self.Datain.index]) 36 | /home/av2783/cml/edgeComputing/MUMT.py:40: FutureWarning: The join_axes-keyword is deprecated. Use .reindex or .reindex_like on the result to achieve the same functionality. 37 | self.XC = pd.concat([self.X, self.C], axis=1,join_axes=[self.Datain.index]) 38 | -------------------------------------------------------------------------------- /scripts/slurm-18862824.out: -------------------------------------------------------------------------------- 1 | no change /share/apps/anaconda2/2019.10/condabin/conda 2 | no change /share/apps/anaconda2/2019.10/bin/conda 3 | no change /share/apps/anaconda2/2019.10/bin/conda-env 4 | no change /share/apps/anaconda2/2019.10/bin/activate 5 | no change /share/apps/anaconda2/2019.10/bin/deactivate 6 | no change /share/apps/anaconda2/2019.10/etc/profile.d/conda.sh 7 | no change /share/apps/anaconda2/2019.10/etc/fish/conf.d/conda.fish 8 | no change /share/apps/anaconda2/2019.10/shell/condabin/Conda.psm1 9 | no change /share/apps/anaconda2/2019.10/shell/condabin/conda-hook.ps1 10 | no change /share/apps/anaconda2/2019.10/lib/python2.7/site-packages/xontrib/conda.xsh 11 | no change /share/apps/anaconda2/2019.10/etc/profile.d/conda.csh 12 | no change /home/av2783/.bashrc 13 | No action taken. 14 | Could not open a connection to your authentication agent. 15 | /home/av2783/.bashrc: line 21: hadoop: command not found 16 | /home/av2783/.bashrc: line 22: hadoop: command not found 17 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE3 instructions, but these are available on your machine and could speed up CPU computations. 18 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations. 19 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations. 20 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations. 21 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations. 22 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX512F instructions, but these are available on your machine and could speed up CPU computations. 23 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations. 24 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:471: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 25 | _np_qint8 = np.dtype([("qint8", np.int8, 1)]) 26 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:472: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 27 | _np_quint8 = np.dtype([("quint8", np.uint8, 1)]) 28 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:473: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 29 | _np_qint16 = np.dtype([("qint16", np.int16, 1)]) 30 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:474: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 31 | _np_quint16 = np.dtype([("quint16", np.uint16, 1)]) 32 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:475: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 33 | _np_qint32 = np.dtype([("qint32", np.int32, 1)]) 34 | /home/av2783/cml/edgeComputing/MUMT.py:21: FutureWarning: The join_axes-keyword is deprecated. Use .reindex or .reindex_like on the result to achieve the same functionality. 35 | self.Data = pd.concat([self.Datain, self.Dataout], axis=1,join_axes=[self.Datain.index]) 36 | /home/av2783/cml/edgeComputing/MUMT.py:40: FutureWarning: The join_axes-keyword is deprecated. Use .reindex or .reindex_like on the result to achieve the same functionality. 37 | self.XC = pd.concat([self.X, self.C], axis=1,join_axes=[self.Datain.index]) 38 | -------------------------------------------------------------------------------- /scripts/slurm-18862841.out: -------------------------------------------------------------------------------- 1 | no change /share/apps/anaconda2/2019.10/condabin/conda 2 | no change /share/apps/anaconda2/2019.10/bin/conda 3 | no change /share/apps/anaconda2/2019.10/bin/conda-env 4 | no change /share/apps/anaconda2/2019.10/bin/activate 5 | no change /share/apps/anaconda2/2019.10/bin/deactivate 6 | no change /share/apps/anaconda2/2019.10/etc/profile.d/conda.sh 7 | no change /share/apps/anaconda2/2019.10/etc/fish/conf.d/conda.fish 8 | no change /share/apps/anaconda2/2019.10/shell/condabin/Conda.psm1 9 | no change /share/apps/anaconda2/2019.10/shell/condabin/conda-hook.ps1 10 | no change /share/apps/anaconda2/2019.10/lib/python2.7/site-packages/xontrib/conda.xsh 11 | no change /share/apps/anaconda2/2019.10/etc/profile.d/conda.csh 12 | no change /home/av2783/.bashrc 13 | No action taken. 14 | Could not open a connection to your authentication agent. 15 | /home/av2783/.bashrc: line 21: hadoop: command not found 16 | /home/av2783/.bashrc: line 22: hadoop: command not found 17 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE3 instructions, but these are available on your machine and could speed up CPU computations. 18 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations. 19 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations. 20 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations. 21 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations. 22 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX512F instructions, but these are available on your machine and could speed up CPU computations. 23 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations. 24 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:471: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 25 | _np_qint8 = np.dtype([("qint8", np.int8, 1)]) 26 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:472: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 27 | _np_quint8 = np.dtype([("quint8", np.uint8, 1)]) 28 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:473: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 29 | _np_qint16 = np.dtype([("qint16", np.int16, 1)]) 30 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:474: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 31 | _np_quint16 = np.dtype([("quint16", np.uint16, 1)]) 32 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:475: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 33 | _np_qint32 = np.dtype([("qint32", np.int32, 1)]) 34 | /home/av2783/cml/edgeComputing/Alpha/MUMT.py:21: FutureWarning: The join_axes-keyword is deprecated. Use .reindex or .reindex_like on the result to achieve the same functionality. 35 | self.Data = pd.concat([self.Datain, self.Dataout], axis=1,join_axes=[self.Datain.index]) 36 | /home/av2783/cml/edgeComputing/Alpha/MUMT.py:40: FutureWarning: The join_axes-keyword is deprecated. Use .reindex or .reindex_like on the result to achieve the same functionality. 37 | self.XC = pd.concat([self.X, self.C], axis=1,join_axes=[self.Datain.index]) 38 | -------------------------------------------------------------------------------- /scripts/slurm-18863347.out: -------------------------------------------------------------------------------- 1 | no change /share/apps/anaconda2/2019.10/condabin/conda 2 | no change /share/apps/anaconda2/2019.10/bin/conda 3 | no change /share/apps/anaconda2/2019.10/bin/conda-env 4 | no change /share/apps/anaconda2/2019.10/bin/activate 5 | no change /share/apps/anaconda2/2019.10/bin/deactivate 6 | no change /share/apps/anaconda2/2019.10/etc/profile.d/conda.sh 7 | no change /share/apps/anaconda2/2019.10/etc/fish/conf.d/conda.fish 8 | no change /share/apps/anaconda2/2019.10/shell/condabin/Conda.psm1 9 | no change /share/apps/anaconda2/2019.10/shell/condabin/conda-hook.ps1 10 | no change /share/apps/anaconda2/2019.10/lib/python2.7/site-packages/xontrib/conda.xsh 11 | no change /share/apps/anaconda2/2019.10/etc/profile.d/conda.csh 12 | no change /home/av2783/.bashrc 13 | No action taken. 14 | Could not open a connection to your authentication agent. 15 | /home/av2783/.bashrc: line 21: hadoop: command not found 16 | /home/av2783/.bashrc: line 22: hadoop: command not found 17 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE3 instructions, but these are available on your machine and could speed up CPU computations. 18 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations. 19 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations. 20 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations. 21 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations. 22 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX512F instructions, but these are available on your machine and could speed up CPU computations. 23 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations. 24 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:471: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 25 | _np_qint8 = np.dtype([("qint8", np.int8, 1)]) 26 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:472: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 27 | _np_quint8 = np.dtype([("quint8", np.uint8, 1)]) 28 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:473: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 29 | _np_qint16 = np.dtype([("qint16", np.int16, 1)]) 30 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:474: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 31 | _np_quint16 = np.dtype([("quint16", np.uint16, 1)]) 32 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:475: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 33 | _np_qint32 = np.dtype([("qint32", np.int32, 1)]) 34 | /home/av2783/cml/edgeComputing/Beta/MUMT.py:21: FutureWarning: The join_axes-keyword is deprecated. Use .reindex or .reindex_like on the result to achieve the same functionality. 35 | self.Data = pd.concat([self.Datain, self.Dataout], axis=1,join_axes=[self.Datain.index]) 36 | /home/av2783/cml/edgeComputing/Beta/MUMT.py:40: FutureWarning: The join_axes-keyword is deprecated. Use .reindex or .reindex_like on the result to achieve the same functionality. 37 | self.XC = pd.concat([self.X, self.C], axis=1,join_axes=[self.Datain.index]) 38 | -------------------------------------------------------------------------------- /scripts/slurm-18889060.out: -------------------------------------------------------------------------------- 1 | no change /share/apps/anaconda2/2019.10/condabin/conda 2 | no change /share/apps/anaconda2/2019.10/bin/conda 3 | no change /share/apps/anaconda2/2019.10/bin/conda-env 4 | no change /share/apps/anaconda2/2019.10/bin/activate 5 | no change /share/apps/anaconda2/2019.10/bin/deactivate 6 | no change /share/apps/anaconda2/2019.10/etc/profile.d/conda.sh 7 | no change /share/apps/anaconda2/2019.10/etc/fish/conf.d/conda.fish 8 | no change /share/apps/anaconda2/2019.10/shell/condabin/Conda.psm1 9 | no change /share/apps/anaconda2/2019.10/shell/condabin/conda-hook.ps1 10 | no change /share/apps/anaconda2/2019.10/lib/python2.7/site-packages/xontrib/conda.xsh 11 | no change /share/apps/anaconda2/2019.10/etc/profile.d/conda.csh 12 | no change /home/av2783/.bashrc 13 | No action taken. 14 | Could not open a connection to your authentication agent. 15 | /home/av2783/.bashrc: line 21: hadoop: command not found 16 | /home/av2783/.bashrc: line 22: hadoop: command not found 17 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE3 instructions, but these are available on your machine and could speed up CPU computations. 18 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations. 19 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations. 20 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations. 21 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX2 instructions, but these are available on your machine and could speed up CPU computations. 22 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX512F instructions, but these are available on your machine and could speed up CPU computations. 23 | W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use FMA instructions, but these are available on your machine and could speed up CPU computations. 24 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:471: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 25 | _np_qint8 = np.dtype([("qint8", np.int8, 1)]) 26 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:472: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 27 | _np_quint8 = np.dtype([("quint8", np.uint8, 1)]) 28 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:473: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 29 | _np_qint16 = np.dtype([("qint16", np.int16, 1)]) 30 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:474: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 31 | _np_quint16 = np.dtype([("quint16", np.uint16, 1)]) 32 | /home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/tensorflow/python/framework/dtypes.py:475: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'. 33 | _np_qint32 = np.dtype([("qint32", np.int32, 1)]) 34 | /home/av2783/cml/edgeComputing/MUMT.py:21: FutureWarning: The join_axes-keyword is deprecated. Use .reindex or .reindex_like on the result to achieve the same functionality. 35 | self.Data = pd.concat([self.Datain, self.Dataout], axis=1,join_axes=[self.Datain.index]) 36 | /home/av2783/cml/edgeComputing/MUMT.py:40: FutureWarning: The join_axes-keyword is deprecated. Use .reindex or .reindex_like on the result to achieve the same functionality. 37 | self.XC = pd.concat([self.X, self.C], axis=1,join_axes=[self.Datain.index]) 38 | Traceback (most recent call last): 39 | File "main_dnn.py", line 165, in 40 | save_plot() 41 | File "main_dnn.py", line 54, in save_plot 42 | plt.savefig('graphs_local/gainVSnum_net.png') 43 | File "/home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/matplotlib/pyplot.py", line 689, in savefig 44 | res = fig.savefig(*args, **kwargs) 45 | File "/home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/matplotlib/figure.py", line 2094, in savefig 46 | self.canvas.print_figure(fname, **kwargs) 47 | File "/home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/matplotlib/backend_bases.py", line 2075, in print_figure 48 | **kwargs) 49 | File "/home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/matplotlib/backends/backend_agg.py", line 521, in print_png 50 | cbook.open_file_cm(filename_or_obj, "wb") as fh: 51 | File "/home/av2783/.conda/envs/tf1.0v1/lib/python3.5/contextlib.py", line 59, in __enter__ 52 | return next(self.gen) 53 | File "/home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/matplotlib/cbook/__init__.py", line 407, in open_file_cm 54 | fh, opened = to_filehandle(path_or_file, mode, True, encoding) 55 | File "/home/av2783/.conda/envs/tf1.0v1/lib/python3.5/site-packages/matplotlib/cbook/__init__.py", line 392, in to_filehandle 56 | fh = open(fname, flag, encoding=encoding) 57 | FileNotFoundError: [Errno 2] No such file or directory: 'graphs_local/gainVSnum_net.png' 58 | --------------------------------------------------------------------------------