├── README.md ├── ensemble_model.py ├── main_ArrayImperfections.py └── utils.py /README.md: -------------------------------------------------------------------------------- 1 | # DNN-DOA -------------------------------------------------------------------------------- /ensemble_model.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | import numpy as np 4 | 5 | class Ensemble_Model(): 6 | def __init__(self, input_size_sf, hidden_size_sf, output_size_sf, SF_NUM, learning_rate_sf, input_size_ss, hidden_size_ss, 7 | output_size_ss, learning_rate_ss, reconstruct_nn_flag, train_sf_flag, train_ss_flag, model_path_nn, model_path_sf, model_path_ss): 8 | # # auto-encoder for spatial filtering 9 | # place holder 10 | self.data_train_ = tf.placeholder(tf.float32, shape=[None, input_size_sf]) 11 | self.label_sf_ = tf.placeholder(tf.float32, shape=[None, output_size_sf * SF_NUM]) 12 | self.data_train = tf.transpose(self.data_train_) 13 | self.label_sf = tf.transpose(self.label_sf_) 14 | 15 | # load nn parameter files 16 | if reconstruct_nn_flag == False: 17 | var_dict_nn = np.load(model_path_nn).item() 18 | if train_sf_flag == False: 19 | var_dict_sf = np.load(model_path_sf).item() 20 | if train_ss_flag == False: 21 | var_dict_ss = np.load(model_path_ss).item() 22 | 23 | # nn parameters 24 | if reconstruct_nn_flag == True: 25 | # encoder parameters 26 | self.W_ec = tf.Variable(initial_value=tf.random_uniform([hidden_size_sf, input_size_sf], minval=-0.1, maxval=0.1), 27 | trainable=True, name='W_ec') 28 | self.b_ec = tf.Variable(initial_value=tf.random_uniform([hidden_size_sf, 1], minval=-0.1, maxval=0.1), 29 | trainable=True, name='b_ec') 30 | 31 | # decoder parameters 32 | self.W_dc = tf.Variable(initial_value=tf.random_uniform([output_size_sf * SF_NUM, hidden_size_sf], minval=-0.1, maxval=0.1), 33 | trainable=True, name='W_dc') 34 | self.b_dc = tf.Variable(initial_value=tf.random_uniform([output_size_sf * SF_NUM, 1], minval=-0.1, maxval=0.1), 35 | trainable=True, name='b_dc') 36 | elif train_sf_flag == True: 37 | self.W_ec = tf.Variable(initial_value=var_dict_nn['W_ec:0'], 38 | trainable=True, name='W_ec') 39 | self.b_ec = tf.Variable(initial_value=var_dict_nn['b_ec:0'], 40 | trainable=True, name='b_ec') 41 | self.W_dc = tf.Variable(initial_value=var_dict_nn['W_dc:0'], 42 | trainable=True, name='W_dc') 43 | self.b_dc = tf.Variable(initial_value=var_dict_nn['b_dc:0'], 44 | trainable=True, name='b_dc') 45 | else: 46 | # load variable dictionary 47 | self.W_ec = tf.Variable(initial_value=var_dict_sf['W_ec:0'], 48 | trainable=False, name='W_ec') 49 | self.b_ec = tf.Variable(initial_value=var_dict_sf['b_ec:0'], 50 | trainable=False, name='b_ec') 51 | self.W_dc = tf.Variable(initial_value=var_dict_sf['W_dc:0'], 52 | trainable=False, name='W_dc') 53 | self.b_dc = tf.Variable(initial_value=var_dict_sf['b_dc:0'], 54 | trainable=False, name='b_dc') 55 | 56 | # output prediction 57 | self.h_sf = tf.matmul(self.W_ec, self.data_train) + self.b_ec 58 | self.output_pred_sf = tf.matmul(self.W_dc, self.h_sf) + self.b_dc 59 | 60 | # output target 61 | self.output_target_sf = self.label_sf 62 | 63 | # loss and train 64 | self.error_sf = self.output_target_sf - self.output_pred_sf 65 | self.loss_sf = tf.reduce_mean(tf.square(self.error_sf)) * (output_size_sf * SF_NUM) 66 | if train_sf_flag == True: 67 | self.train_op_sf = tf.train.RMSPropOptimizer(learning_rate=learning_rate_sf).minimize(self.loss_sf) 68 | 69 | 70 | 71 | # # spatial spectrum estimation 72 | self.label_ss_ = tf.placeholder(tf.float32, shape=[None, output_size_ss * SF_NUM]) 73 | self.label_ss = tf.transpose(self.label_ss_) 74 | # get input from spatial filters 75 | self.sf_output_list = [] 76 | for sf_idx in range(SF_NUM): 77 | sf_output_curr = self.output_pred_sf[sf_idx * output_size_sf : (sf_idx + 1) * output_size_sf] 78 | self.sf_output_list.append(sf_output_curr) 79 | 80 | # input-to-hidden, hidden-to-hidden, hidden-to-output parameters 81 | self.Whi_list = [] 82 | self.bhi_list = [] 83 | self.Whh_list = [] 84 | self.bhh_list = [] 85 | self.Woh_list = [] 86 | self.boh_list = [] 87 | if reconstruct_nn_flag == True: 88 | for sf_idx in range(SF_NUM): 89 | Whi_curr = tf.Variable( 90 | initial_value=tf.random_uniform([hidden_size_ss[0], input_size_ss], minval=-0.1, maxval=0.1), 91 | trainable=True, name='Whi_' + str(sf_idx)) 92 | self.Whi_list.append(Whi_curr) 93 | bhi_curr = tf.Variable( 94 | initial_value=tf.random_uniform([hidden_size_ss[0], 1], minval=-0.1, maxval=0.1), 95 | trainable=True, name='bhi_' + str(sf_idx)) 96 | self.bhi_list.append(bhi_curr) 97 | Whh_curr = tf.Variable( 98 | initial_value=tf.random_uniform([hidden_size_ss[1], hidden_size_ss[0]], minval=-0.1, maxval=0.1), 99 | trainable=True, name='Whh_' + str(sf_idx)) 100 | self.Whh_list.append(Whh_curr) 101 | bhh_curr = tf.Variable( 102 | initial_value=tf.random_uniform([hidden_size_ss[1], 1], minval=-0.1, maxval=0.1), 103 | trainable=True, name='bhh_' + str(sf_idx)) 104 | self.bhh_list.append(bhh_curr) 105 | Woh_curr = tf.Variable( 106 | initial_value=tf.random_uniform([output_size_ss, hidden_size_ss[1]], minval=-0.1, maxval=0.1), 107 | trainable=True, name='Woh_' + str(sf_idx)) 108 | self.Woh_list.append(Woh_curr) 109 | boh_curr = tf.Variable( 110 | initial_value=tf.random_uniform([output_size_ss, 1], minval=-0.1, maxval=0.1), 111 | trainable=True, name='boh_' + str(sf_idx)) 112 | self.boh_list.append(boh_curr) 113 | # self.Woo = tf.Variable( 114 | # initial_value=tf.random_uniform([output_size_ss * SF_NUM, output_size_ss * SF_NUM], minval=-0.1, maxval=0.1), 115 | # trainable=True, name='Woo') 116 | # self.boo = tf.Variable( 117 | # initial_value=tf.random_uniform([output_size_ss * SF_NUM, 1], minval=-0.1, 118 | # maxval=0.1), 119 | # trainable=True, name='boo') 120 | elif (train_ss_flag == True) or (train_sf_flag == True and train_ss_flag == False): 121 | for sf_idx in range(SF_NUM): 122 | Whi_curr = tf.Variable( 123 | initial_value=var_dict_nn['Whi_' + str(sf_idx) + ':0'], 124 | trainable=True, name='Whi_' + str(sf_idx)) 125 | self.Whi_list.append(Whi_curr) 126 | bhi_curr = tf.Variable( 127 | initial_value=var_dict_nn['bhi_' + str(sf_idx) + ':0'], 128 | trainable=True, name='bhi_' + str(sf_idx)) 129 | self.bhi_list.append(bhi_curr) 130 | Whh_curr = tf.Variable( 131 | initial_value=var_dict_nn['Whh_' + str(sf_idx) + ':0'], 132 | trainable=True, name='Whh_' + str(sf_idx)) 133 | self.Whh_list.append(Whh_curr) 134 | bhh_curr = tf.Variable( 135 | initial_value=var_dict_nn['bhh_' + str(sf_idx) + ':0'], 136 | trainable=True, name='bhh_' + str(sf_idx)) 137 | self.bhh_list.append(bhh_curr) 138 | Woh_curr = tf.Variable( 139 | initial_value=var_dict_nn['Woh_' + str(sf_idx) + ':0'], 140 | trainable=True, name='Woh_' + str(sf_idx)) 141 | self.Woh_list.append(Woh_curr) 142 | boh_curr = tf.Variable( 143 | initial_value=var_dict_nn['boh_' + str(sf_idx) + ':0'], 144 | trainable=True, name='boh_' + str(sf_idx)) 145 | self.boh_list.append(boh_curr) 146 | # self.Woo = tf.Variable( 147 | # initial_value=var_dict_nn['Woo:0'], 148 | # trainable=True, name='Woo') 149 | # self.boo = tf.Variable( 150 | # initial_value=var_dict_nn['boo:0'], 151 | # trainable=True, name='boo') 152 | else: 153 | # load variable dictionary 154 | for sf_idx in range(SF_NUM): 155 | Whi_curr = tf.Variable( 156 | initial_value=var_dict_ss['Whi_' + str(sf_idx) + ':0'], 157 | trainable=False, name='Whi_' + str(sf_idx)) 158 | self.Whi_list.append(Whi_curr) 159 | bhi_curr = tf.Variable( 160 | initial_value=var_dict_ss['bhi_' + str(sf_idx) + ':0'], 161 | trainable=False, name='bhi_' + str(sf_idx)) 162 | self.bhi_list.append(bhi_curr) 163 | Whh_curr = tf.Variable( 164 | initial_value=var_dict_ss['Whh_' + str(sf_idx) + ':0'], 165 | trainable=False, name='Whh_' + str(sf_idx)) 166 | self.Whh_list.append(Whh_curr) 167 | bhh_curr = tf.Variable( 168 | initial_value=var_dict_ss['bhh_' + str(sf_idx) + ':0'], 169 | trainable=False, name='bhh_' + str(sf_idx)) 170 | self.bhh_list.append(bhh_curr) 171 | Woh_curr = tf.Variable( 172 | initial_value=var_dict_ss['Woh_' + str(sf_idx) + ':0'], 173 | trainable=False, name='Woh_' + str(sf_idx)) 174 | self.Woh_list.append(Woh_curr) 175 | boh_curr = tf.Variable( 176 | initial_value=var_dict_ss['boh_' + str(sf_idx) + ':0'], 177 | trainable=False, name='boh_' + str(sf_idx)) 178 | self.boh_list.append(boh_curr) 179 | # self.Woo = tf.Variable( 180 | # initial_value=var_dict_ss['Woo:0'], 181 | # trainable=False, name='Woo') 182 | # self.boo = tf.Variable( 183 | # initial_value=var_dict_ss['boo:0'], 184 | # trainable=False, name='boo') 185 | 186 | # feed-forward 187 | self.output_ss_ = [] 188 | for sf_idx in range(SF_NUM): 189 | input_ss_curr = self.sf_output_list[sf_idx] 190 | Whi = self.Whi_list[sf_idx] 191 | bhi = self.bhi_list[sf_idx] 192 | Whh = self.Whh_list[sf_idx] 193 | bhh = self.bhh_list[sf_idx] 194 | Woh = self.Woh_list[sf_idx] 195 | boh = self.boh_list[sf_idx] 196 | h1_ = tf.matmul(Whi, input_ss_curr) + bhi 197 | h1 = tf.tanh(h1_) 198 | h2_ = tf.matmul(Whh, h1) + bhh 199 | h2 = tf.tanh(h2_) 200 | output_ss_curr = tf.matmul(Woh, h2) + boh 201 | self.output_ss_.append(output_ss_curr) 202 | self.output_ss_ = tf.concat(self.output_ss_, axis=0) 203 | # self.output_ss = tf.matmul(self.Woo, tf.tanh(self.output_ss_)) + self.boo 204 | self.output_ss = self.output_ss_ 205 | 206 | # loss and optimizer 207 | self.error_ss = self.label_ss - self.output_ss 208 | self.loss_ss = tf.reduce_mean(tf.norm(tf.square(self.error_ss), ord=1)) 209 | if train_ss_flag == True: 210 | self.train_op_ss = tf.train.RMSPropOptimizer(learning_rate=learning_rate_ss).minimize(self.loss_ss) 211 | -------------------------------------------------------------------------------- /main_ArrayImperfections.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import tensorflow as tf 4 | import matplotlib.pyplot as plt 5 | import scipy.linalg as la 6 | 7 | from ensemble_model import * 8 | from utils import * 9 | 10 | # # array signal parameters 11 | fc = 1e9 # carrier frequency 12 | c = 3e8 # light speed 13 | M = 10 # array sensor number 14 | N = 400 # snapshot number 15 | wavelength = c / fc # signal wavelength 16 | d = 0.5 * wavelength # inter-sensor distance 17 | 18 | # # spatial filter training parameters 19 | doa_min = -60 # minimal DOA (degree) 20 | doa_max = 60 # maximal DOA (degree) 21 | grid_sf = 1 # DOA step (degree) for generating different scenarios 22 | GRID_NUM_SF = int((doa_max - doa_min) / grid_sf) 23 | SF_NUM = 6 # number of spatial filters 24 | SF_SCOPE = (doa_max - doa_min) / SF_NUM # spatial scope of each filter 25 | SNR_sf = 10 26 | NUM_REPEAT_SF = 1 # number of repeated sampling with random noise 27 | 28 | noise_flag_sf = 1 # 0: noise-free; 1: noise-present 29 | amp_or_phase = 0 # show filter amplitude or phase: 0-amplitude; 1-phase 30 | 31 | # # autoencoder parameters 32 | input_size_sf = M * (M-1) 33 | hidden_size_sf = int(1/2 * input_size_sf) 34 | output_size_sf = input_size_sf 35 | batch_size_sf = 32 36 | num_epoch_sf = 1000 37 | learning_rate_sf = 0.001 38 | 39 | # # training set parameters 40 | # SS_SCOPE = SF_SCOPE / SF_NUM # scope of signal directions 41 | step_ss = 1 # DOA step (degree) for generating different scenarios 42 | K_ss = 2 # signal number 43 | doa_delta = np.array(np.arange(20) + 1) * 0.1 * SF_SCOPE # inter-signal direction differences 44 | SNR_ss = np.array([10, 10, 10]) + 0 45 | NUM_REPEAT_SS = 10 # number of repeated sampling with random noise 46 | 47 | noise_flag_ss = 1 # 0: noise-free; 1: noise-present 48 | 49 | # # DNN parameters 50 | grid_ss = 1 # inter-grid angle in spatial spectrum 51 | NUM_GRID_SS = int((doa_max - doa_min + 0.5 * grid_ss) / grid_ss) # spectrum grids 52 | L = 2 # number of hidden layer 53 | input_size_ss = M * (M-1) 54 | hidden_size_ss = [int(2/3* input_size_ss), int(4/9* input_size_ss), int(1/3* input_size_ss)] 55 | output_size_ss = int(NUM_GRID_SS / SF_NUM) 56 | batch_size_ss = 32 57 | learning_rate_ss = 0.001 58 | num_epoch_ss = 300 59 | 60 | # # test data parameters 61 | test_DOA = np.array([31.5, 41.5]) 62 | test_K = len(test_DOA) 63 | test_SNR = np.array([10, 10]) 64 | 65 | # # retrain the networks or not 66 | reconstruct_nn_flag = True 67 | retrain_sf_flag = True 68 | retrain_ss_flag = True 69 | 70 | # # file path of neural network parameters 71 | model_path_nn = 'initial_model_AI.npy' 72 | model_path_sf = 'spatialfilter_model_AI.npy' 73 | model_path_ss = 'spatialspectrum_model_AI.npy' 74 | 75 | # # array imperfection parameters 76 | mc_flag = True 77 | ap_flag = False 78 | pos_flag = False 79 | 80 | rmse_path = 'arrayimperf' 81 | if mc_flag == True: 82 | rmse_path += '_mc' 83 | if ap_flag == True: 84 | rmse_path += '_ap' 85 | if pos_flag == True: 86 | rmse_path += '_pos' 87 | rmse_path += '.npy' 88 | 89 | Rho = np.arange(11) * 0.1 90 | num_epoch_test = 1000 91 | RMSE = [] 92 | for rho in Rho: 93 | # mutual coupling matrix 94 | if mc_flag == True: 95 | mc_para = rho * 0.3 * np.exp(1j * 60 / 180 * np.pi) 96 | MC_coef = mc_para ** np.array(np.arange(M)) 97 | MC_mtx = la.toeplitz(MC_coef) 98 | else: 99 | MC_mtx = np.identity(M) 100 | # amplitude & phase error 101 | if ap_flag == True: 102 | amp_coef = rho * np.array([0.0, 0.2, 0.2, 0.2, 0.2, 0.2, -0.2, -0.2, -0.2, -0.2]) 103 | phase_coef = rho * np.array([0.0, -30, -30, -30, -30, -30, 30, 30, 30, 30]) 104 | AP_coef = [(1+amp_coef[idx])*np.exp(1j*phase_coef[idx]/180*np.pi) for idx in range(M)] 105 | AP_mtx = np.diag(AP_coef) 106 | else: 107 | AP_mtx = np.identity(M) 108 | # sensor position error 109 | if pos_flag == True: 110 | pos_para_ = rho * np.array([0.0, -1, -1, -1, -1, -1, 1, 1, 1, 1]) * 0.2 * d 111 | pos_para = np.expand_dims(pos_para_, axis=-1) 112 | else: 113 | pos_para = np.zeros([M, 1]) 114 | 115 | # # train multi-task autoencoder for spatial filtering 116 | if reconstruct_nn_flag == True: 117 | tf.reset_default_graph() 118 | enmod_0 = Ensemble_Model(input_size_sf=input_size_sf, 119 | hidden_size_sf=hidden_size_sf, 120 | output_size_sf=output_size_sf, 121 | SF_NUM=SF_NUM, 122 | learning_rate_sf=learning_rate_sf, 123 | input_size_ss=input_size_ss, 124 | hidden_size_ss=hidden_size_ss, 125 | output_size_ss=output_size_ss, 126 | learning_rate_ss=learning_rate_ss, 127 | reconstruct_nn_flag=True, 128 | train_sf_flag=True, 129 | train_ss_flag=True, 130 | model_path_nn=model_path_nn, 131 | model_path_sf=model_path_sf, 132 | model_path_ss=model_path_ss) 133 | 134 | with tf.Session() as sess: 135 | sess.run(tf.global_variables_initializer()) 136 | 137 | var_dict_nn = {} 138 | for var in tf.trainable_variables(): 139 | value = sess.run(var) 140 | var_dict_nn[var.name] = value 141 | np.save(model_path_nn, var_dict_nn) 142 | 143 | # # train multi-task autoencoder for spatial filtering 144 | if retrain_sf_flag == True: 145 | # # generate spatial filter training dataset 146 | data_train_sf = generate_training_data_sf_AI(M, N, d, wavelength, SNR_sf, doa_min, NUM_REPEAT_SF, grid_sf, GRID_NUM_SF, 147 | output_size_sf, SF_NUM, SF_SCOPE, MC_mtx, AP_mtx, pos_para) 148 | 149 | tf.reset_default_graph() 150 | enmod_1 = Ensemble_Model(input_size_sf=input_size_sf, 151 | hidden_size_sf=hidden_size_sf, 152 | output_size_sf=output_size_sf, 153 | SF_NUM=SF_NUM, 154 | learning_rate_sf=learning_rate_sf, 155 | input_size_ss=input_size_ss, 156 | hidden_size_ss=hidden_size_ss, 157 | output_size_ss=output_size_ss, 158 | learning_rate_ss=learning_rate_ss, 159 | reconstruct_nn_flag=False, 160 | train_sf_flag=True, 161 | train_ss_flag=False, 162 | model_path_nn=model_path_nn, 163 | model_path_sf=model_path_sf, 164 | model_path_ss=model_path_ss) 165 | 166 | with tf.Session() as sess: 167 | sess.run(tf.global_variables_initializer()) 168 | 169 | for epoch in range(num_epoch_sf): 170 | [data_batches, label_batches] = generate_spec_batches(data_train_sf, batch_size_sf, noise_flag_sf) 171 | for batch_idx in range(len(data_batches)): 172 | data_batch = data_batches[batch_idx] 173 | label_batch = label_batches[batch_idx] 174 | feed_dict = {enmod_1.data_train_: data_batch, enmod_1.label_sf_: label_batch} 175 | _, loss = sess.run([enmod_1.train_op_sf, enmod_1.loss_sf], feed_dict=feed_dict) 176 | 177 | print('Epoch: {}, Batch: {}, loss: {:g}'.format(epoch, batch_idx, loss)) 178 | 179 | var_dict_sf = {} 180 | for var in tf.trainable_variables(): 181 | value = sess.run(var) 182 | var_dict_sf[var.name] = value 183 | np.save(model_path_sf, var_dict_sf) 184 | 185 | # # train DNN for spectrum estimation, with autoencoder parameters fixed 186 | if retrain_ss_flag == True: 187 | # # generate spatial spectrum training dataset 188 | data_train_ss = generate_training_data_ss_AI(M, N, K_ss, d, wavelength, SNR_ss, doa_min, doa_max, step_ss, doa_delta, 189 | NUM_REPEAT_SS, grid_ss, NUM_GRID_SS, MC_mtx, AP_mtx, pos_para) 190 | 191 | tf.reset_default_graph() 192 | enmod_2 = Ensemble_Model(input_size_sf=input_size_sf, 193 | hidden_size_sf=hidden_size_sf, 194 | output_size_sf=output_size_sf, 195 | SF_NUM=SF_NUM, 196 | learning_rate_sf=learning_rate_sf, 197 | input_size_ss=input_size_ss, 198 | hidden_size_ss=hidden_size_ss, 199 | output_size_ss=output_size_ss, 200 | learning_rate_ss=learning_rate_ss, 201 | reconstruct_nn_flag=False, 202 | train_sf_flag=False, 203 | train_ss_flag=True, 204 | model_path_nn=model_path_nn, 205 | model_path_sf=model_path_sf, 206 | model_path_ss=model_path_ss) 207 | 208 | with tf.Session() as sess: 209 | sess.run(tf.global_variables_initializer()) 210 | print('spectrum estimating...') 211 | 212 | # train 213 | for epoch in range(num_epoch_ss): 214 | [data_batches, label_batches] = generate_spec_batches(data_train_ss, batch_size_ss, noise_flag_ss) 215 | for batch_idx in range(len(data_batches)): 216 | data_batch = data_batches[batch_idx] 217 | label_batch = label_batches[batch_idx] 218 | feed_dict = {enmod_2.data_train_: data_batch, enmod_2.label_ss_: label_batch} 219 | _, loss_ss = sess.run([enmod_2.train_op_ss, enmod_2.loss_ss], feed_dict=feed_dict) 220 | 221 | print('Epoch: {}, Batch: {}, loss: {:g}'.format(epoch, batch_idx, loss_ss)) 222 | 223 | var_dict_ss = {} 224 | for var in tf.trainable_variables(): 225 | value = sess.run(var) 226 | var_dict_ss[var.name] = value 227 | np.save(model_path_ss, var_dict_ss) 228 | 229 | 230 | # # test 231 | tf.reset_default_graph() 232 | enmod_3 = Ensemble_Model(input_size_sf=input_size_sf, 233 | hidden_size_sf=hidden_size_sf, 234 | output_size_sf=output_size_sf, 235 | SF_NUM=SF_NUM, 236 | learning_rate_sf=learning_rate_sf, 237 | input_size_ss=input_size_ss, 238 | hidden_size_ss=hidden_size_ss, 239 | output_size_ss=output_size_ss, 240 | learning_rate_ss=learning_rate_ss, 241 | reconstruct_nn_flag=False, 242 | train_sf_flag=False, 243 | train_ss_flag=False, 244 | model_path_nn=model_path_nn, 245 | model_path_sf=model_path_sf, 246 | model_path_ss=model_path_ss) 247 | 248 | with tf.Session() as sess: 249 | sess.run(tf.global_variables_initializer()) 250 | print('testing...') 251 | 252 | # # test 253 | est_DOA = [] 254 | MSE_rho = np.zeros([test_K, ]) 255 | for epoch in range(num_epoch_test): 256 | test_cov_vector = generate_array_cov_vector_AI(M, N, d, wavelength, test_DOA, test_SNR, MC_mtx, AP_mtx, pos_para) 257 | data_batch = np.expand_dims(test_cov_vector, axis=-1) 258 | feed_dict = {enmod_3.data_train: data_batch} 259 | ss_output = sess.run(enmod_3.output_ss, feed_dict=feed_dict) 260 | ss_min = np.min(ss_output) 261 | ss_output_regularized = [ss if ss > -ss_min else [0.0] for ss in ss_output] 262 | 263 | est_DOA_ii = get_DOA_estimate(ss_output, test_DOA, doa_min, grid_ss) 264 | est_DOA.append(est_DOA_ii) 265 | MSE_rho += np.square(est_DOA_ii - test_DOA) 266 | RMSE_rho = np.sqrt(MSE_rho / num_epoch_test) 267 | RMSE.append(RMSE_rho) 268 | 269 | np.save(rmse_path, RMSE) 270 | 271 | plt.figure() 272 | for kk in range(test_K): 273 | RMSE_kk = [rmse[kk] for rmse in RMSE] 274 | plt.plot(Rho, RMSE_kk) 275 | plt.show() -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | 4 | def generate_spec_batches(data_train, batch_size, noise_flag): 5 | if noise_flag == 0: 6 | data_ = data_train['input_nf'] 7 | else: 8 | data_ = data_train['input'] 9 | label_ = data_train['target_spec'] 10 | data_len = len(label_) 11 | 12 | # shuffle data 13 | shuffle_seq = np.random.permutation(range(data_len)) 14 | data = [data_[idx] for idx in shuffle_seq] 15 | label = [label_[idx] for idx in shuffle_seq] 16 | 17 | # generate batches 18 | num_batch = int(data_len / batch_size) 19 | data_batches = [] 20 | label_batches = [] 21 | for batch_idx in range(num_batch): 22 | batch_start = batch_idx * batch_size 23 | batch_end = np.min([(batch_idx + 1) * batch_size, data_len]) 24 | data_batch = data[batch_start : batch_end] 25 | label_batch = label[batch_start: batch_end] 26 | data_batches.append(data_batch) 27 | label_batches.append(label_batch) 28 | 29 | return data_batches, label_batches 30 | 31 | def convert_real_to_complex(real_vector): 32 | vector_len = len(real_vector) 33 | 34 | vector_real = real_vector[:int((vector_len + 1) / 2)] 35 | vector_imag = real_vector[int((vector_len + 1) / 2):] 36 | complex_vector = vector_real + 1j * vector_imag 37 | 38 | return complex_vector 39 | 40 | def generate_array_cov_vector(M, N, d, wavelength, DOA, SNR): 41 | K = len(DOA) 42 | add_noise = np.random.randn(M, N) + 1j * np.random.randn(M, N) 43 | array_signal = 0 44 | 45 | for ki in range(K): 46 | signal_i = 10 ** (SNR[ki] / 20) * (np.random.randn(1, N) + 1j * np.random.randn(1, N)) 47 | phase_shift_unit = 2 * np.pi * d / wavelength * np.sin(DOA[ki] / 180 * np.pi) 48 | a_i_ = np.cos(np.array(range(M)) * phase_shift_unit) + 1j * np.sin(np.array(range(M)) * phase_shift_unit) 49 | a_i = np.expand_dims(a_i_, axis=-1) 50 | array_signal_i = np.matmul(a_i, signal_i) 51 | array_signal += array_signal_i 52 | 53 | array_output = array_signal + add_noise 54 | 55 | array_covariance = 1 / N * (np.matmul(array_output, np.matrix.getH(array_output))) 56 | cov_vector_ = [] 57 | for row_idx in range(M): 58 | cov_vector_.extend(array_covariance[row_idx, (row_idx + 1):]) 59 | cov_vector_ = np.asarray(cov_vector_) 60 | cov_vector_ext = np.concatenate([cov_vector_.real, cov_vector_.imag]) 61 | cov_vector = 1 / np.linalg.norm(cov_vector_ext) * cov_vector_ext 62 | 63 | return cov_vector 64 | 65 | def spatial_scan(vector_complex, vector_dictionary): 66 | Spatial_Spectrum = [] 67 | NUM_GRID = len(vector_dictionary) 68 | for grid_idx in range(NUM_GRID): 69 | basis_real = vector_dictionary[grid_idx] 70 | basis_complex = convert_real_to_complex(basis_real) 71 | corr_i_ = np.matmul(np.matrix.getH(basis_complex), vector_complex) 72 | corr_i = np.abs(corr_i_) 73 | Spatial_Spectrum.extend(corr_i) 74 | 75 | return Spatial_Spectrum 76 | 77 | def generate_target_spectrum(DOA, doa_min, grid, NUM_GRID): 78 | K = len(DOA) 79 | target_vector = 0 80 | for ki in range(K): 81 | doa_i = DOA[ki] 82 | target_vector_i_ = [] 83 | grid_idx = 0 84 | while grid_idx < NUM_GRID: 85 | grid_pre = doa_min + grid * grid_idx 86 | grid_post = doa_min + grid * (grid_idx + 1) 87 | if grid_pre <= doa_i and grid_post > doa_i: 88 | expand_vec = np.array([grid_post - doa_i, doa_i - grid_pre]) / grid 89 | grid_idx += 2 90 | else: 91 | expand_vec = np.array([0.0]) 92 | grid_idx += 1 93 | target_vector_i_.extend(expand_vec) 94 | if len(target_vector_i_) >= NUM_GRID: 95 | target_vector_i = target_vector_i_[:NUM_GRID] 96 | else: 97 | expand_vec = np.zeros(NUM_GRID - len(target_vector_i_)) 98 | target_vector_i = target_vector_i_ 99 | target_vector_i.extend(expand_vec) 100 | target_vector += np.asarray(target_vector_i) 101 | # target_vector /= K 102 | 103 | return target_vector 104 | 105 | def generate_target_svm(DOA, doa_min, grid, NUM_GRID): 106 | K = len(DOA) 107 | target_svm = np.zeros([NUM_GRID, 1]) 108 | target_svr = np.zeros([NUM_GRID, 1]) 109 | for ki in range(K): 110 | doa_i = DOA[ki] 111 | grid_idx = 0 112 | while grid_idx < NUM_GRID: 113 | grid_pre = doa_min + grid * grid_idx 114 | grid_post = doa_min + grid * (grid_idx + 1) 115 | if grid_pre <= doa_i and grid_post > doa_i: 116 | target_svm[grid_idx] = 1 117 | target_svr[grid_idx] = doa_i 118 | grid_idx += 1 119 | 120 | return target_svm, target_svr 121 | 122 | def generate_training_data_sf(M, N, d, wavelength, SNR, doa_min, NUM_REPEAT_SF, grid, GRID_NUM, output_size, SF_NUM, SF_SCOPE): 123 | data_train_sf = {} 124 | data_train_sf['input_nf'] = [] 125 | data_train_sf['input'] = [] 126 | data_train_sf['target_spec'] = [] 127 | for doa_idx in range(GRID_NUM): 128 | DOA = doa_min + grid * doa_idx 129 | 130 | for rep_idx in range(NUM_REPEAT_SF): 131 | add_noise = np.random.randn(M, N) + 1j * np.random.randn(M, N) 132 | array_signal = 0 133 | 134 | signal_i = 10 ** (SNR / 20) * (np.random.randn(1, N) + 1j * np.random.randn(1, N)) 135 | phase_shift_unit = 2 * np.pi * d / wavelength * np.sin(DOA / 180 * np.pi) 136 | a_i_ = np.cos(np.array(range(M)) * phase_shift_unit) + 1j * np.sin(np.array(range(M)) * phase_shift_unit) 137 | a_i = np.expand_dims(a_i_, axis=-1) 138 | array_signal_i = np.matmul(a_i, signal_i) 139 | array_signal += array_signal_i 140 | 141 | array_output_nf = array_signal + 0 * add_noise # noise-free output 142 | array_output = array_signal + 1 * add_noise 143 | 144 | array_covariance_nf = 1 / N * (np.matmul(array_output_nf, np.matrix.getH(array_output_nf))) 145 | array_covariance = 1 / N * (np.matmul(array_output, np.matrix.getH(array_output))) 146 | cov_vector_nf_ = [] 147 | cov_vector_ = [] 148 | for row_idx in range(M): 149 | cov_vector_nf_.extend(array_covariance_nf[row_idx, (row_idx + 1):]) 150 | cov_vector_.extend(array_covariance[row_idx, (row_idx + 1):]) 151 | cov_vector_nf_ = np.asarray(cov_vector_nf_) 152 | cov_vector_nf_ext = np.concatenate([cov_vector_nf_.real, cov_vector_nf_.imag]) 153 | cov_vector_nf = 1 / np.linalg.norm(cov_vector_nf_ext) * cov_vector_nf_ext 154 | data_train_sf['input_nf'].append(cov_vector_nf) 155 | cov_vector_ = np.asarray(cov_vector_) 156 | cov_vector_ext = np.concatenate([cov_vector_.real, cov_vector_.imag]) 157 | cov_vector = 1 / np.linalg.norm(cov_vector_ext) * cov_vector_ext 158 | data_train_sf['input'].append(cov_vector) 159 | # construct multi-task autoencoder target 160 | scope_label = int((DOA - doa_min) / SF_SCOPE) 161 | target_curr_pre = np.zeros([output_size * scope_label, 1]) 162 | target_curr_post = np.zeros([output_size * (SF_NUM - scope_label - 1), 1]) 163 | target_curr = np.expand_dims(cov_vector, axis=-1) 164 | target = np.concatenate([target_curr_pre, target_curr, target_curr_post], axis=0) 165 | data_train_sf['target_spec'].append(np.squeeze(target)) 166 | 167 | return data_train_sf 168 | 169 | 170 | def generate_training_data_ss(M, N, K, d, wavelength, SNR_ss, SNR_DIFF, doa_min, doa_max, step, doa_delta, NUM_REPEAT_SS, grid_ss, NUM_GRID_SS): 171 | data_train_ss = {} 172 | data_train_ss['input_nf'] = [] 173 | data_train_ss['input'] = [] 174 | data_train_ss['target_spec'] = [] 175 | SNR0 = SNR_ss[0] 176 | for snr_diff in SNR_DIFF: 177 | SNR = [SNR0 - k * snr_diff for k in range(K)] 178 | for delta_idx in range(len(doa_delta)): 179 | delta_curr = doa_delta[delta_idx] # inter-signal direction differences 180 | delta_cum_seq_ = [delta_curr] # doa differences w.r.t first signal 181 | delta_cum_seq = np.concatenate([[0], delta_cum_seq_]) # the first signal included 182 | delta_sum = np.sum(delta_curr) # direction difference between first and last signals 183 | NUM_STEP = int((doa_max - doa_min - delta_sum) / step) # number of scanning steps 184 | 185 | for step_idx in range(NUM_STEP): 186 | doa_first = doa_min + step * step_idx 187 | DOA = delta_cum_seq + doa_first 188 | 189 | for rep_idx in range(NUM_REPEAT_SS): 190 | add_noise = np.random.randn(M, N) + 1j * np.random.randn(M, N) 191 | array_signal = 0 192 | for ki in range(K): 193 | signal_i = 10 ** (SNR[ki] / 20) * (np.random.randn(1, N) + 1j * np.random.randn(1, N)) 194 | phase_shift_unit = 2 * np.pi * d / wavelength * np.sin(DOA[ki] / 180 * np.pi) 195 | a_i_ = np.cos(np.array(range(M)) * phase_shift_unit) + 1j * np.sin( 196 | np.array(range(M)) * phase_shift_unit) 197 | a_i = np.expand_dims(a_i_, axis=-1) 198 | array_signal_i = np.matmul(a_i, signal_i) 199 | array_signal += array_signal_i 200 | 201 | array_output_nf = array_signal + 0 * add_noise # noise-free output 202 | array_output = array_signal + 1 * add_noise 203 | 204 | array_covariance_nf = 1 / N * (np.matmul(array_output_nf, np.matrix.getH(array_output_nf))) 205 | array_covariance = 1 / N * (np.matmul(array_output, np.matrix.getH(array_output))) 206 | cov_vector_nf_ = [] 207 | cov_vector_ = [] 208 | for row_idx in range(M): 209 | cov_vector_nf_.extend(array_covariance_nf[row_idx, (row_idx + 1):]) 210 | cov_vector_.extend(array_covariance[row_idx, (row_idx + 1):]) 211 | cov_vector_nf_ = np.asarray(cov_vector_nf_) 212 | cov_vector_nf_ext = np.concatenate([cov_vector_nf_.real, cov_vector_nf_.imag]) 213 | cov_vector_nf = 1 / np.linalg.norm(cov_vector_nf_ext) * cov_vector_nf_ext 214 | data_train_ss['input_nf'].append(cov_vector_nf) 215 | cov_vector_ = np.asarray(cov_vector_) 216 | cov_vector_ext = np.concatenate([cov_vector_.real, cov_vector_.imag]) 217 | cov_vector = 1 / np.linalg.norm(cov_vector_ext) * cov_vector_ext 218 | data_train_ss['input'].append(cov_vector) 219 | # construct spatial spectrum target 220 | target_spectrum = generate_target_spectrum(DOA, doa_min, grid_ss, NUM_GRID_SS) 221 | data_train_ss['target_spec'].append(target_spectrum) 222 | 223 | return data_train_ss 224 | 225 | def smooth_spectrum(ss_orig, smooth_width): 226 | ss_len = len(ss_orig) 227 | half_width = int(smooth_width / 2) 228 | ss_smooth = [] 229 | for idx in range(ss_len): 230 | start_loc = np.max([0, idx - half_width]) 231 | end_loc = np.min([ss_len, idx + half_width]) 232 | ss_ave_curr = np.sum(ss_orig[start_loc:end_loc]) / smooth_width 233 | ss_smooth.append(ss_ave_curr) 234 | 235 | return ss_smooth 236 | 237 | def get_DOA_estimate(spec, DOA, doa_min, grid): 238 | K = len(DOA) 239 | 240 | # extract peaks from spectrum 241 | peaks = [] 242 | peak_flag = False 243 | peak_start = 0 244 | peak_end = 0 245 | idx = 0 246 | while idx < len(spec): 247 | if spec[idx][0] > 0: 248 | if peak_flag == False: 249 | peak_start = idx 250 | peak_end = idx 251 | else: 252 | peak_end += 1 253 | peak_flag = True 254 | else: 255 | if peak_flag == True: 256 | peak_curr = np.array([peak_start, peak_end]) 257 | peaks.append(peak_curr) 258 | peak_flag = False 259 | idx += 1 260 | 261 | # estimate directions 262 | K_est = len(peaks) 263 | peak_doa_list = [] 264 | peak_amp_list = [] 265 | for ki in range(K_est): 266 | curr_start = peaks[ki][0] 267 | curr_end = peaks[ki][1] 268 | curr_spec = [spec[ii][0] for ii in range(curr_start, curr_end + 1)] 269 | curr_grid = doa_min + grid * np.arange(curr_start, curr_end + 1) 270 | curr_amp = np.sum(curr_spec) # sort peaks with total energy 271 | curr_doa = np.sum(curr_spec * curr_grid) / np.sum(curr_spec) 272 | peak_doa_list.append(curr_doa) 273 | peak_amp_list.append(curr_amp) 274 | 275 | # output doa estimates 276 | doa_est = [] 277 | if K_est == 0: 278 | for ki in range(K): 279 | doa_est.append(DOA[0]) 280 | elif K_est <= K: 281 | for ki in range(K): 282 | doa_i = DOA[ki] 283 | est_error = [np.abs(peak_doa - doa_i) for peak_doa in peak_doa_list] 284 | est_idx = np.argmin(est_error) 285 | doa_est_i = peak_doa_list[est_idx] 286 | doa_est.append(doa_est_i) 287 | else: 288 | doa_est_ = [] 289 | for ki in range(K): 290 | est_idx = np.argmax(peak_amp_list) 291 | doa_est_i = peak_doa_list[est_idx] 292 | doa_est_.append(doa_est_i) 293 | peak_amp_list[est_idx] = -1 294 | for ki in range(K): 295 | doa_i = DOA[ki] 296 | est_error = [np.abs(peak_doa - doa_i) for peak_doa in doa_est_] 297 | est_idx = np.argmin(est_error) 298 | doa_est_i = doa_est_[est_idx] 299 | doa_est.append(doa_est_i) 300 | 301 | return doa_est 302 | 303 | 304 | def generate_training_data_sf_AI(M, N, d, wavelength, SNR, doa_min, NUM_REPEAT_SF, grid, GRID_NUM, output_size, SF_NUM, SF_SCOPE, MC_mtx, AP_mtx, pos_para): 305 | data_train_sf = {} 306 | data_train_sf['input_nf'] = [] 307 | data_train_sf['input'] = [] 308 | data_train_sf['target_spec'] = [] 309 | for doa_idx in range(GRID_NUM): 310 | DOA = doa_min + grid * doa_idx 311 | 312 | for rep_idx in range(NUM_REPEAT_SF): 313 | add_noise = np.random.randn(M, N) + 1j * np.random.randn(M, N) 314 | array_signal = 0 315 | 316 | signal_i = 10 ** (SNR / 20) * (np.random.randn(1, N) + 1j * np.random.randn(1, N)) 317 | # phase_shift_unit = 2 * np.pi * d / wavelength * np.sin(DOA / 180 * np.pi) 318 | array_geom = np.expand_dims(np.array(np.arange(M)), axis=-1) * d + pos_para 319 | phase_shift_array = 2 * np.pi * array_geom / wavelength * np.sin(DOA / 180 * np.pi) 320 | a_i = np.cos(phase_shift_array) + 1j * np.sin(phase_shift_array) 321 | a_i = np.matmul(AP_mtx, a_i) 322 | a_i = np.matmul(MC_mtx, a_i) 323 | array_signal_i = np.matmul(a_i, signal_i) 324 | array_signal += array_signal_i 325 | 326 | array_output_nf = array_signal + 0 * add_noise # noise-free output 327 | array_output = array_signal + 1 * add_noise 328 | 329 | array_covariance_nf = 1 / N * (np.matmul(array_output_nf, np.matrix.getH(array_output_nf))) 330 | array_covariance = 1 / N * (np.matmul(array_output, np.matrix.getH(array_output))) 331 | cov_vector_nf_ = [] 332 | cov_vector_ = [] 333 | for row_idx in range(M): 334 | cov_vector_nf_.extend(array_covariance_nf[row_idx, (row_idx + 1):]) 335 | cov_vector_.extend(array_covariance[row_idx, (row_idx + 1):]) 336 | cov_vector_nf_ = np.asarray(cov_vector_nf_) 337 | cov_vector_nf_ext = np.concatenate([cov_vector_nf_.real, cov_vector_nf_.imag]) 338 | cov_vector_nf = 1 / np.linalg.norm(cov_vector_nf_ext) * cov_vector_nf_ext 339 | data_train_sf['input_nf'].append(cov_vector_nf) 340 | cov_vector_ = np.asarray(cov_vector_) 341 | cov_vector_ext = np.concatenate([cov_vector_.real, cov_vector_.imag]) 342 | cov_vector = 1 / np.linalg.norm(cov_vector_ext) * cov_vector_ext 343 | data_train_sf['input'].append(cov_vector) 344 | # construct multi-task autoencoder target 345 | scope_label = int((DOA - doa_min) / SF_SCOPE) 346 | target_curr_pre = np.zeros([output_size * scope_label, 1]) 347 | target_curr_post = np.zeros([output_size * (SF_NUM - scope_label - 1), 1]) 348 | target_curr = np.expand_dims(cov_vector, axis=-1) 349 | target = np.concatenate([target_curr_pre, target_curr, target_curr_post], axis=0) 350 | data_train_sf['target_spec'].append(np.squeeze(target)) 351 | 352 | return data_train_sf 353 | 354 | 355 | def generate_training_data_ss_AI(M, N, K, d, wavelength, SNR, doa_min, doa_max, step, doa_delta, NUM_REPEAT_SS, grid_ss, NUM_GRID_SS, MC_mtx, AP_mtx, pos_para): 356 | data_train_ss = {} 357 | data_train_ss['input_nf'] = [] 358 | data_train_ss['input'] = [] 359 | data_train_ss['target_spec'] = [] 360 | for delta_idx in range(len(doa_delta)): 361 | delta_curr = doa_delta[delta_idx] # inter-signal direction differences 362 | delta_cum_seq_ = [delta_curr] # doa differences w.r.t first signal 363 | delta_cum_seq = np.concatenate([[0], delta_cum_seq_]) # the first signal included 364 | delta_sum = np.sum(delta_curr) # direction difference between first and last signals 365 | NUM_STEP = int((doa_max - doa_min - delta_sum) / step) # number of scanning steps 366 | 367 | for step_idx in range(NUM_STEP): 368 | doa_first = doa_min + step * step_idx 369 | DOA = delta_cum_seq + doa_first 370 | 371 | for rep_idx in range(NUM_REPEAT_SS): 372 | add_noise = np.random.randn(M, N) + 1j * np.random.randn(M, N) 373 | array_signal = 0 374 | for ki in range(K): 375 | signal_i = 10 ** (SNR[ki] / 20) * (np.random.randn(1, N) + 1j * np.random.randn(1, N)) 376 | # phase_shift_unit = 2 * np.pi * d / wavelength * np.sin(DOA / 180 * np.pi) 377 | array_geom = np.expand_dims(np.array(np.arange(M)), axis=-1) * d + pos_para 378 | phase_shift_array = 2 * np.pi * array_geom / wavelength * np.sin(DOA[ki] / 180 * np.pi) 379 | a_i = np.cos(phase_shift_array) + 1j * np.sin(phase_shift_array) 380 | a_i = np.matmul(AP_mtx, a_i) 381 | a_i = np.matmul(MC_mtx, a_i) 382 | array_signal_i = np.matmul(a_i, signal_i) 383 | array_signal += array_signal_i 384 | 385 | array_output_nf = array_signal + 0 * add_noise # noise-free output 386 | array_output = array_signal + 1 * add_noise 387 | 388 | array_covariance_nf = 1 / N * (np.matmul(array_output_nf, np.matrix.getH(array_output_nf))) 389 | array_covariance = 1 / N * (np.matmul(array_output, np.matrix.getH(array_output))) 390 | cov_vector_nf_ = [] 391 | cov_vector_ = [] 392 | for row_idx in range(M): 393 | cov_vector_nf_.extend(array_covariance_nf[row_idx, (row_idx + 1):]) 394 | cov_vector_.extend(array_covariance[row_idx, (row_idx + 1):]) 395 | cov_vector_nf_ = np.asarray(cov_vector_nf_) 396 | cov_vector_nf_ext = np.concatenate([cov_vector_nf_.real, cov_vector_nf_.imag]) 397 | cov_vector_nf = 1 / np.linalg.norm(cov_vector_nf_ext) * cov_vector_nf_ext 398 | data_train_ss['input_nf'].append(cov_vector_nf) 399 | cov_vector_ = np.asarray(cov_vector_) 400 | cov_vector_ext = np.concatenate([cov_vector_.real, cov_vector_.imag]) 401 | cov_vector = 1 / np.linalg.norm(cov_vector_ext) * cov_vector_ext 402 | data_train_ss['input'].append(cov_vector) 403 | # construct spatial spectrum target 404 | target_spectrum = generate_target_spectrum(DOA, doa_min, grid_ss, NUM_GRID_SS) 405 | data_train_ss['target_spec'].append(target_spectrum) 406 | 407 | return data_train_ss 408 | 409 | 410 | def generate_array_cov_vector_AI(M, N, d, wavelength, DOA, SNR, MC_mtx, AP_mtx, pos_para): 411 | K = len(DOA) 412 | add_noise = np.random.randn(M, N) + 1j * np.random.randn(M, N) 413 | array_signal = 0 414 | 415 | for ki in range(K): 416 | signal_i = 10 ** (SNR[ki] / 20) * (np.random.randn(1, N) + 1j * np.random.randn(1, N)) 417 | # phase_shift_unit = 2 * np.pi * d / wavelength * np.sin(DOA / 180 * np.pi) 418 | array_geom = np.expand_dims(np.array(np.arange(M)), axis=-1) * d + pos_para 419 | phase_shift_array = 2 * np.pi * array_geom / wavelength * np.sin(DOA[ki] / 180 * np.pi) 420 | a_i = np.cos(phase_shift_array) + 1j * np.sin(phase_shift_array) 421 | a_i = np.matmul(AP_mtx, a_i) 422 | a_i = np.matmul(MC_mtx, a_i) 423 | array_signal_i = np.matmul(a_i, signal_i) 424 | array_signal += array_signal_i 425 | 426 | array_output = array_signal + add_noise 427 | 428 | array_covariance = 1 / N * (np.matmul(array_output, np.matrix.getH(array_output))) 429 | cov_vector_ = [] 430 | for row_idx in range(M): 431 | cov_vector_.extend(array_covariance[row_idx, (row_idx + 1):]) 432 | cov_vector_ = np.asarray(cov_vector_) 433 | cov_vector_ext = np.concatenate([cov_vector_.real, cov_vector_.imag]) 434 | cov_vector = 1 / np.linalg.norm(cov_vector_ext) * cov_vector_ext 435 | 436 | return cov_vector --------------------------------------------------------------------------------