├── 20200219 ├── DeepMIMO_generator.m ├── construct_DeepMIMO_channel.m ├── deepmu.asv ├── deepmu.m ├── leave_now │ ├── DeepMIMO_Dataset_Generator.m │ ├── LIS-DeepLearning-master │ │ ├── Fig10_data.mat │ │ ├── Fig10_generator.m │ │ ├── Figure10.png │ │ ├── LICENSE.md │ │ ├── Main_fn.m │ │ └── README.md │ └── UPA_codebook_generator.m ├── read_raytracing.m └── sinr_CONSTRAINT.m ├── DeepMIMO_generator.m ├── README.md ├── alternating_optimization.m ├── construct_DeepMIMO_channel.m ├── drl_IRS.m ├── iter_opt_prob_1.m ├── iter_opt_prob_2.m ├── leave_now ├── DeepMIMO_Dataset_Generator.m ├── LIS-DeepLearning-master │ ├── Fig10_data.mat │ ├── Fig10_generator.m │ ├── Figure10.png │ ├── LICENSE.md │ ├── Main_fn.m │ └── README.md └── UPA_codebook_generator.m ├── main_deepIRS.m ├── old_archive ├── resetfcn_power.m ├── resetfcn_throughput.m ├── stepfcn_power.m └── stepfcn_throughput.m ├── read_raytracing.m ├── resetfcn.m ├── sinr_CONSTRAINT.m └── stepfcn.m /20200219/DeepMIMO_generator.m: -------------------------------------------------------------------------------- 1 | % --------- DeepMIMO: A Generic Dataset for mmWave and massive MIMO ------% 2 | % Author: Ahmed Alkhateeb 3 | % Date: Sept. 5, 2018 4 | % Goal: Encouraging research on ML/DL for mmWave MIMO applications and 5 | % providing a benchmarking tool for the developed algorithms 6 | % ---------------------------------------------------------------------- % 7 | function [DeepMIMO_dataset,params]=DeepMIMO_generator(params,deepmimo_root_path) 8 | 9 | % -------------------------- DeepMIMO Dataset Generation -----------------% 10 | fprintf(' DeepMIMO Dataset Generation started \n') 11 | % Read scenario parameters 12 | load(strcat(deepmimo_root_path,'/RayTracing Scenarios/',params.scenario,'/',params.scenario,'.params.mat')) 13 | params.num_BS=num_BS; 14 | num_rows=max(min(user_grids(:,2),params.active_user_last)-max(user_grids(:,1),params.active_user_first)+1,0); 15 | params.num_user=sum(num_rows.*user_grids(:,3)); % total number of users 16 | 17 | current_grid=min(find(max(params.active_user_first,user_grids(:,2))==user_grids(:,2))); 18 | user_first=sum((max(min(params.active_user_first,user_grids(:,2))-user_grids(:,1)+1,0)).*user_grids(:,3))-user_grids(current_grid,3)+1; 19 | user_last=user_first+params.num_user-1; 20 | 21 | BW=params.bandwidth*1e9; % Bandwidth in Hz 22 | 23 | % Reading ray tracing data 24 | fprintf(' Reading the channel parameters of the ray-tracing scenario %s', params.scenario) 25 | count_done=0; 26 | reverseStr=0; 27 | percentDone = 100 * count_done / length(params.active_BS); 28 | msg = sprintf('- Percent done: %3.1f', percentDone); %Don't forget this semicolon 29 | fprintf([reverseStr, msg]); 30 | reverseStr = repmat(sprintf('\b'), 1, length(msg)); 31 | 32 | for t=1:1:params.num_BS 33 | if sum(t == params.active_BS) ==1 34 | filename_DoD=strcat(deepmimo_root_path,'/RayTracing Scenarios/',params.scenario,'/',params.scenario,'.',int2str(t),'.DoD.mat'); 35 | filename_CIR=strcat(deepmimo_root_path,'/RayTracing Scenarios/',params.scenario,'/',params.scenario,'.',int2str(t),'.CIR.mat'); 36 | filename_Loc=strcat(deepmimo_root_path,'/RayTracing Scenarios/',params.scenario,'/',params.scenario,'.Loc.mat'); 37 | [TX{t}.channel_params]=read_raytracing(filename_DoD,filename_CIR,filename_Loc,params.num_paths,user_first,user_last); 38 | 39 | count_done=count_done+1; 40 | percentDone = 100 * count_done / length(params.active_BS); 41 | msg = sprintf('- Percent done: %3.1f', percentDone); %Don't forget this semicolon 42 | fprintf([reverseStr, msg]); 43 | reverseStr = repmat(sprintf('\b'), 1, length(msg)); 44 | end 45 | end 46 | 47 | % Constructing the channel matrices 48 | TX_count=0; 49 | for t=1:1:params.num_BS 50 | if sum(t == params.active_BS) ==1 51 | fprintf('\n Constructing the DeepMIMO Dataset for BS %d', t) 52 | reverseStr=0; 53 | percentDone = 0; 54 | msg = sprintf('- Percent done: %3.1f', percentDone); %Don't forget this semicolon 55 | fprintf([reverseStr, msg]); 56 | reverseStr = repmat(sprintf('\b'), 1, length(msg)); 57 | TX_count=TX_count+1; 58 | for user=1:1:params.num_user 59 | [DeepMIMO_dataset{TX_count}.user{user}.channel]=construct_DeepMIMO_channel(TX{t}.channel_params(user),params.num_ant_x,params.num_ant_y,params.num_ant_z, ... 60 | BW,params.num_OFDM,params.OFDM_sampling_factor,params.OFDM_limit,params.ant_spacing); 61 | DeepMIMO_dataset{TX_count}.user{user}.loc=TX{t}.channel_params(user).loc; 62 | 63 | percentDone = 100* round(user / params.num_user,2); 64 | msg = sprintf('- Percent done: %3.1f', round(percentDone,2)); %Don't forget this semicolon 65 | fprintf([reverseStr, msg]); 66 | reverseStr = repmat(sprintf('\b'), 1, length(msg)); 67 | end 68 | end 69 | end 70 | 71 | if params.saveDataset==1 72 | sfile_DeepMIMO=strcat(deepmimo_root_path,'/DeepMIMO Dataset/DeepMIMO_dataset.mat'); 73 | save(sfile_DeepMIMO,'DeepMIMO_dataset','-v7.3'); 74 | end 75 | 76 | fprintf('\n DeepMIMO Dataset Generation completed \n') -------------------------------------------------------------------------------- /20200219/construct_DeepMIMO_channel.m: -------------------------------------------------------------------------------- 1 | % --------- DeepMIMO: A Generic Dataset for mmWave and massive MIMO ------% 2 | % Author: Ahmed Alkhateeb 3 | % Date: Sept. 5, 2018 4 | % Goal: Encouraging research on ML/DL for mmWave MIMO applications and 5 | % providing a benchmarking tool for the developed algorithms 6 | % ---------------------------------------------------------------------- % 7 | function [channel]=construct_DeepMIMO_channel(params,num_ant_x,num_ant_y,num_ant_z,BW,... 8 | ofdm_num_subcarriers,output_subcarrier_downsampling_factor,output_subcarrier_limit,antenna_spacing_wavelength_ratio) 9 | 10 | kd=2*pi*antenna_spacing_wavelength_ratio; 11 | ang_conv=pi/180; 12 | Ts=1/BW; 13 | 14 | Mx_Ind=0:1:num_ant_x-1; 15 | My_Ind=0:1:num_ant_y-1; 16 | Mz_Ind=0:1:num_ant_z-1; 17 | Mxx_Ind=repmat(Mx_Ind,1,num_ant_y*num_ant_z)'; 18 | Myy_Ind=repmat(reshape(repmat(My_Ind,num_ant_x,1),1,num_ant_x*num_ant_y),1,num_ant_z)'; 19 | Mzz_Ind=reshape(repmat(Mz_Ind,num_ant_x*num_ant_y,1),1,num_ant_x*num_ant_y*num_ant_z)'; 20 | M=num_ant_x*num_ant_y*num_ant_z; 21 | 22 | k=0:output_subcarrier_downsampling_factor:output_subcarrier_limit-1; 23 | num_sampled_subcarriers=length(k); 24 | channel=zeros(M,num_sampled_subcarriers); 25 | 26 | for l=1:1:params.num_paths 27 | gamma_x=1j*kd*sin(params.DoD_theta(l)*ang_conv)*cos(params.DoD_phi(l)*ang_conv); 28 | gamma_y=1j*kd*sin(params.DoD_theta(l)*ang_conv)*sin(params.DoD_phi(l)*ang_conv); 29 | gamma_z=1j*kd*cos(params.DoD_theta(l)*ang_conv); 30 | gamma_comb=Mxx_Ind*gamma_x+Myy_Ind*gamma_y + Mzz_Ind*gamma_z; 31 | array_response=exp(gamma_comb); 32 | delay_normalized=params.ToA(l)/Ts; 33 | channel=channel+array_response*sqrt(params.power(l)/ofdm_num_subcarriers)*exp(1j*params.phase(l)*ang_conv)*exp(-1j*2*pi*(k/ofdm_num_subcarriers)*delay_normalized); 34 | end 35 | 36 | end -------------------------------------------------------------------------------- /20200219/deepmu.asv: -------------------------------------------------------------------------------- 1 | %% Code Description and Credits 2 | % This code is to simulate an ML-Driven IRS-aided communication setting, 3 | % where for now, 2 single antenna users communicate with a 2-antenna BS via 4 | % an IRS with M reflecting elements. 5 | 6 | % Downlink for now. 7 | 8 | % This code is aided by that of the DeepMIMO dataset developed by A. 9 | % Alkhateeb et al. 10 | 11 | % It implements the optimization algorithms of the paper below 12 | % [R1] Qingqing Wu, Rui Zhang, "Intelligent Reflecting Surface Enhanced Wireless 13 | % Network via Joint Active and Passive Beamforming", in IEEE Transactions on 14 | % Wireless Communications, Nov. 2019. 15 | 16 | % This paper optimizes the active beamforming at the BS and the passive 17 | % reflection at the IRS to minimize the total transmit power under SINR 18 | % QoS constraints. 19 | 20 | % It hence trains and tests a deep neural network (DNN) on the optimized outputs 21 | % and compares the results to the optimized benchmark. The DNN results can 22 | % be obtained in a more timely manner than those obtained via optimization. 23 | 24 | %% Simulation Parameters 25 | clear 26 | close all 27 | 28 | % ---------- Base Station (BS) ----------- 29 | % BS is one user with the following row and column indices 30 | Ut_row = 850; % user Ut row number 31 | Ut_element = 90; % user Ut col number 32 | % yet to add its MIMO functionality 33 | N_BS = 5; % Number of BS antennas 34 | %Pt = 100; % Transmit power in dBm 35 | 36 | % ----------- Users ----------- 37 | N_users= 2; % Number of Users 38 | % Users will be randomized from the following region (between the following rows) 39 | % each row contains 181 possible user locations 40 | Ur_rows = [1000 1300]; % user Ur rows 41 | No_user_pairs = (Ur_rows(2)-Ur_rows(1))*181; % Number of (Ut,Ur) user pairs %%%%%%%%% GENERALIZE LATER %%%% 42 | % Which row is ommitted? Starting or ending 1? otherwise No_user_pairs 43 | % should be this value +1 --> Last row is not neglected .. ok .. from 44 | % Ur_rows(1) till Ur_rows(2)-1 according to the channel generation code below 45 | RandP_all = randperm(No_user_pairs).'; % Random permutation of the available dataset 46 | 47 | all_users = 1:1:N_users; % vector of all user indices 48 | 49 | int_users_matrix = meshgrid(all_users).'; % indices of interfering users for each user 50 | int_users_matrix(1:N_users+1:N_users^2) = []; 51 | int_users_matrix = reshape(int_users_matrix, N_users-1, N_users).'; 52 | 53 | % ----------- IRS ----------- 54 | % at (one of the dataset BS *locations* - considered passive here of course) 55 | % Also known as LIS: Large Intelligent Surface 56 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 57 | My = 6; % number of LIS reflecting elements across the y axis 58 | Mz = 6; % number of LIS reflecting elements across the z axis 59 | Mx = 1; % number of LIS reflecting elements across the x axis 60 | M = Mx.*My.*Mz; % Total number of LIS reflecting elements 61 | % M_bar=8; % number of active elements, not used here so far 62 | % IRS is assumed at one of the BS locations in the O1 scenario as given by 63 | % params.active_BS. 64 | params.active_BS=3; % active basestation(/s) in the chosen scenario 65 | 66 | % Ray tracing parameters 67 | params.scenario='O1_60'; % DeepMIMO Dataset scenario: http://deepmimo.net/ 68 | L =1; % number of channel paths (L) .. L=1 returns a narrowband channel with no ISI 69 | kbeams=1; % select the top kbeams, get their feedback and find the max actual achievable rate 70 | D_Lambda = 0.5; % Antenna spacing relative to the wavelength 71 | BW = 100e6; % Bandwidth in Hz (ex: 100e6 --> 100 MHz) 72 | 73 | % Not used parameters so far since the system is not OFDM 74 | % K_DL=64; % number of subcarriers as input to the Deep Learning model 75 | % Validation_Size = 6200; % Validation dataset Size 76 | % K = 512; % number of subcarriers 77 | K=1; % No OFDM 78 | K_DL =1; 79 | 80 | sim_len = 1e2; % Number of generated different multiuser scenarios 81 | 82 | %% Channel Generation 83 | % Using the DeepMIMO Dataset by Alkhateeb et al. 84 | 85 | % Select which much the code is running on 86 | personal = 1; 87 | if personal == 1 88 | % Personal machine 89 | deepmimo_root_path= 'C:/Khafagy/DeepMIMO'; 90 | code_folder = 'C:/Users/Mohammad/Google Drive (mgkhafagy@aucegypt.edu)/MATLAB Codes'; 91 | elseif personal == 0 92 | % Research Lab Workstation 93 | deepmimo_root_path= 'D:/Khafagy/DeepMIMO'; 94 | code_folder = 'C:/Users/Dr. M-Khafagy/Google Drive/MATLAB Codes'; 95 | end 96 | cd(code_folder) 97 | 98 | % for dataset retrieval and storage from/to local server (not on the cloud) 99 | 100 | % %% Beamforming Codebook (CALLING UPA_codebook_generator) 101 | % disp('=============GENERATING REFLECTION MATRIX CODEBOOK==========='); 102 | % % BF codebook parameters 103 | % over_sampling_x=1; % The beamsteering oversampling factor in the x direction 104 | % over_sampling_y=1; % The beamsteering oversampling factor in the y direction 105 | % over_sampling_z=1; % The beamsteering oversampling factor in the z direction 106 | % 107 | % % Generating the BF codebook 108 | % % [BF_codebook]=sqrt(Mx*My*Mz)*... 109 | % % UPA_codebook_generator(Mx,My,Mz,over_sampling_x,over_sampling_y,over_sampling_z,D_Lambda); 110 | % % codebook_size=size(BF_codebook,2); 111 | 112 | %% DeepMIMO Dataset Generation (CALLING DeepMIMO_generator) 113 | % DeepMIMO_generator calls read_raytracing then construct_DeepMIMO_channel 114 | % These code files are created by Alkhateeb et al. 115 | disp('===============GENERATING DEEPMIMO DATASET==================='); 116 | %disp('-------------------------------------------------------------'); 117 | %disp([' Calculating for K_DL = ' num2str(K_DL)]); 118 | % ------ Inputs to the DeepMIMO dataset generation code ------------ % 119 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 120 | params.num_ant_x= Mx; % Number of the UPA antenna array on the x-axis 121 | params.num_ant_y= My; % Number of the UPA antenna array on the y-axis 122 | params.num_ant_z= Mz; % Number of the UPA antenna array on the z-axis 123 | params.ant_spacing=D_Lambda; % ratio of the wavelnegth; for half wavelength enter .5 124 | params.bandwidth= BW*1e-9; % The bandiwdth in GHz 125 | params.num_OFDM= K; % Number of OFDM subcarriers 126 | params.OFDM_sampling_factor=1; % The constructed channels will be calculated only at the sampled subcarriers (to reduce the size of the dataset) 127 | params.OFDM_limit=K_DL*1; % Only the first params.OFDM_limit subcarriers will be considered when constructing the channels 128 | params.num_paths=L; % Maximum number of paths to be considered (a value between 1 and 25), e.g., choose 1 if you are only interested in the strongest path 129 | params.saveDataset=0; 130 | disp([' Calculating for L = ' num2str(params.num_paths)]); 131 | 132 | %% BS-IRS Channels 133 | disp('==========Generating Transmit BS-IRS Full Channel============'); 134 | % ------------------ DeepMIMO "Ut" Dataset Generation ----------------- % 135 | params.active_user_first=Ut_row; 136 | params.active_user_last=Ut_row; % Only one active user (but where is Ut_element to fully specify the user??) -- see below 137 | DeepMIMO_dataset=DeepMIMO_generator(params,deepmimo_root_path); % Generator function generates data for entire rows 138 | %Ht = single(DeepMIMO_dataset{1}.user{Ut_element}.channel); % Selecting element of interest here 139 | Ht = DeepMIMO_dataset{1}.user{Ut_element}.channel; % Selecting element of interest here 140 | 141 | clear DeepMIMO_dataset 142 | 143 | % ----------- Add BS MIMO Functionality here ------- 144 | % Remember to later randomize the transmitter as well, so that the neural 145 | % network is not a function of a fixed BS-IRS channel 146 | 147 | % Adjust size for now (simply replicate), then fix the MIMO functionality later 148 | %Ht = repmat(Ht,1, N_BS); 149 | Ht = 1e-2/sqrt(2)*(randn(M, N_BS)+1i*randn(M, N_BS)); 150 | 151 | %% IRS - Receiver Channels 152 | disp('===========Generating IRS-Receiver Full Channels============='); 153 | % ------------------ DeepMIMO "Ur" Dataset Generation -----------------% 154 | %initialization 155 | Ur_rows_step = 300; % access the dataset 100 rows at a time 156 | Ur_rows_grid=Ur_rows(1):Ur_rows_step:Ur_rows(2); 157 | Delta_H_max = single(0); 158 | for pp = 1:1:numel(Ur_rows_grid)-1 % loop for Normalizing H 159 | clear DeepMIMO_dataset 160 | params.active_user_first=Ur_rows_grid(pp); 161 | params.active_user_last=Ur_rows_grid(pp+1)-1; 162 | disp(['=== User Row Batch ' num2str(pp) ' out of ' num2str(numel(Ur_rows_grid)-1) ', each holding ' num2str(Ur_rows_step) ' rows =====']) 163 | [DeepMIMO_dataset,params]=DeepMIMO_generator(params,deepmimo_root_path); 164 | for u=1:params.num_user % seems to be hard-coded as rows*181 already 165 | Hr = single(conj(DeepMIMO_dataset{1}.user{u}.channel)); % conjugated since it is now downlink 166 | Delta_H = max(max(abs(Ht.*Hr))); 167 | if Delta_H >= Delta_H_max 168 | Delta_H_max = single(Delta_H); % storing the maximum absolute value of the end-to-end product channel for later normalization 169 | end 170 | end 171 | end 172 | clear Delta_H 173 | 174 | %% Loop over different user permutations and store optimized solutions 175 | ML_dataset{sim_len} = {}; % Store channels, locations, and solutions 176 | user_loc{N_users} = {}; 177 | 178 | % Fix seed 179 | rng(1); 180 | 181 | disp('Looping over different multi-user patterns and generating optimized matrices') 182 | for sim_index = 1:sim_len 183 | disp(['=== User pattern ' num2str(sim_index) ' out of ' num2str(sim_len) ' =====']) 184 | ML_dataset{sim_index}.Ht = Ht; % Store transmit (1st hop) channel 185 | 186 | % Select N_users random user indices 187 | clear Hr 188 | Hr{N_users} = []; 189 | user_loc{N_users} = {}; 190 | users = randperm(params.num_user, N_users); 191 | for user_ind = 1:N_users 192 | Hr{user_ind} = DeepMIMO_dataset{1}.user{users(user_ind)}.channel; 193 | user_loc{user_ind} = DeepMIMO_dataset{1}.user{users(user_ind)}.loc; 194 | end 195 | Hr = [Hr{:}]; 196 | 197 | ML_dataset{sim_index}.Hr = Hr; % Store receive (2nd hop) channel 198 | ML_dataset{sim_index}.user_loc = [user_loc{:}]; % Store user_locations 199 | 200 | % Implement Optimization algorithm here 201 | 202 | % Let the direct channel be denoted by Hsd (source to destination) 203 | % Since no direct link is assumed here, put as an all zero matrix 204 | %Hd = zeros(N_BS, N_users); 205 | Hd = 1e-10/sqrt(2)*(randn(N_BS, N_users)+1i*randn(N_BS, N_users)); 206 | 207 | ML_dataset{sim_index}.Hd = Hd; % Store direct channel 208 | 209 | sigma_2_dBm = -80; % Noise variance in dBm 210 | sigma_2 = 10^(sigma_2_dBm/10) * 1e-3; % (in Watts) 211 | 212 | % SINR target 213 | SINR_target_dB = 15; % Check: changing target SINR should change the transmit power (the cvx_optval objective value) 214 | SINR_target = 10^(SINR_target_dB/10); 215 | 216 | 217 | % Alternating optimization algorithm (Algorithm 1 in [R1]) 218 | % Set error threshold for alternating algorithm termination 219 | eps_iter=1e0; 220 | frac_error=1e10; % Initialize fractional error 221 | obj_last = 1e3; % Initialize last objective value to a large number 222 | 223 | disp('Running alternating optimization algorithm') 224 | r=1; % iteration index 225 | % Initialize reflection matrix theta 226 | beta_vec = ones(M,1); % Fixed to 1 for now as in the paper 227 | theta_vec = ones(M,1); % 2*pi*rand(M,1); % Uniformly randomized from 0 to 2*pi 228 | theta_mat= diag(beta_vec.*exp(1i*theta_vec)); 229 | 230 | % Check rank criterion for feasbility of the initial theta choice 231 | while ~(rank(Ht'*theta_mat*Hr + Hd) == N_users) % if infeasible choice, randomize and check again 232 | disp('infeasible initial choice of theta, .. reselecting ..') 233 | theta_vec = 2*pi*rand(M,1); % Uniformly randomized from 0 to 2*pi 234 | theta_mat= diag(beta_vec.*exp(1i*theta_vec)); 235 | end 236 | 237 | cvx_status = 'nothing'; % initialize 238 | 239 | while (frac_error > eps_iter) && ~contains(cvx_status,'Infeasible','IgnoreCase',true) 240 | if mod(r,1e2)==0 241 | disp(['Iteration r =' num2str(r)]) 242 | end 243 | 244 | % ==== Optimize W while fixing theta ==== BS Transmit Beamforming 245 | disp('Active Beamformer Design') 246 | 247 | cvx_clear 248 | clear W tau INTERFERENCE obj_fn1 cvx_optval 249 | 250 | cvx_begin 251 | cvx_quiet(true) 252 | cvx_solver SeDuMi %SDPT3 %Mosek % Choose the underlying solver 253 | cvx_precision best % Change the cvx numerical precision 254 | 255 | % Define your optimization variables here 256 | variable W(N_BS,N_users) complex; % add the word binary for binary constraints - see CVX documentation for more options 257 | variable tau nonnegative; % Auxiliary variable 258 | expressions INTERFERENCE(N_users,N_users); 259 | 260 | for k = all_users 261 | int_users = int_users_matrix(k,:); % interfering users 262 | INTERFERENCE(:,k) = [ ... 263 | W(:,int_users)'*(Ht'*(theta_mat')*(Hr(:,k)) + (Hd(:,k))); 264 | sqrt(sigma_2)]; 265 | end 266 | 267 | % Write the optimization problem 268 | minimize( tau^2 ); 269 | subject to 270 | for k = all_users 271 | {sqrt(SINR_target)*INTERFERENCE(:,k), real(((Hr(:,k)'*theta_mat*Ht + Hd(:,k)')*W(:,k)))} == complex_lorentz(N_users); % SINR CONSTRAINT 272 | end 273 | {W(:), tau} == complex_lorentz(N_BS * N_users); % POWER CONSTRAINT 274 | cvx_end 275 | disp(['CVX Status: ' cvx_status ', CVX_optval = ' num2str(10*log10(cvx_optval*1000)) ' dBm']) 276 | 277 | frac_error = abs(obj_last - cvx_optval)/obj_last *100; 278 | obj_last = cvx_optval; 279 | 280 | achieved_SINR = zeros(1,N_users); 281 | % Actual achieved SINR 282 | for k = all_users 283 | achieved_SINR(k) = (norm((Hr(:,k)'*theta_mat*Ht+ Hd(:,k)')*W(:,k)))^2/(norm(INTERFERENCE(:,k)))^2; 284 | end 285 | %achieved_SINR 286 | 287 | % ==== Optimize theta while fixing W ==== IRS Reflection Matrix 288 | % (P4') in paper 289 | disp('Passive Beamformer Design') 290 | 291 | % Define a, b and R 292 | a = cell(N_users,N_users); 293 | b = cell(N_users,N_users); 294 | R = cell(N_users,N_users); 295 | for k = all_users % looping over all users 296 | int_users = int_users_matrix(k,:); % interfering users 297 | a{k,k}= diag(Hr(:,k)')*Ht*W(:,k); 298 | b{k,k}= Hd(:,k)'*W(:,k); 299 | R{k,k}= [ a{k,k}* (a{k,k}') a{k,k}* (b{k,k}') ; a{k,k}'* b{k,k} 0]; 300 | for m = int_users 301 | a{k,m}= diag(Hr(:,k)')*Ht*W(:,m); 302 | b{k,m}= Hd(:,k)'*W(:,m); 303 | R{k,m}= [ a{k,m}* (a{k,m}') a{k,m}* (b{k,m}') ; a{k,m}'* b{k,m} 0]; 304 | end 305 | end 306 | 307 | cvx_begin sdp 308 | cvx_quiet(true) 309 | cvx_solver SDPT3 %SeDuMi %SDPT3 %Mosek % Choose the underlying solver 310 | cvx_precision best % Change the cvx numerical precision 311 | 312 | variable V(M+1,M+1) complex semidefinite; 313 | variable a_aux(1,N_users) nonnegative; % Auxiliary variables for max sum 314 | %variable a_aux nonnegative; % Auxiliary variables for max min 315 | expressions SINR_CONSTR(N_users) desired(N_users) interference(N_users); 316 | 317 | % Define the expressions desired, interference, and SINR_CONSTR in terms of the optimization variables 318 | %sinr_fun_handle = @sinr_CONSTRAINT; 319 | 320 | %[desired, interference, SINR_CONSTR] = sinr_fun_handle(V, b, R, SINR_target, sigma_2, all_users, int_users_matrix); 321 | for k = all_users % looping over all users 322 | int_users = int_users_matrix(k,:); % interfering users 323 | desired(k) = trace(real(R{k,k}*V)) + square_abs(b{k,k}); 324 | interference(k) = 0; 325 | for m = int_users 326 | interference(k) = interference(k) + trace(real(R{k,m}*V)) + square_abs(b{k,m}); 327 | end 328 | SINR_CONSTR(k) = desired(k) - a_aux(k) - SINR_target * (interference(k) + sigma_2); 329 | end 330 | 331 | %all_elements = 1:M+1; 332 | 333 | % Write the optimization problem 334 | maximize( sum(a_aux) ); 335 | subject to 336 | %SINR_CONSTR == nonnegative(N_users) 337 | for k = 1:N_users 338 | desired(k) >= a_aux(k) + SINR_target * (interference(k) + sigma_2); 339 | %sqrt(SINR_CONSTR(k)) >= 0 340 | end 341 | diag(V) == ones(M+1,1); 342 | % Other 2 constraints are already in the definitions of opt variables 343 | %obj_fn2 >= 0; % Dummy constraint to check/prevent the resulting -ve cvx_optval 344 | cvx_end 345 | 346 | disp(['CVX Status: ' cvx_status]) 347 | 348 | if ~contains(cvx_status,'Infeasible','IgnoreCase',true) 349 | disp('Running Gaussian Randomization') 350 | [U,D] = eig(full(V)); % Eigenvalue Decomposition 351 | if rank(full(V)) == 1 352 | v_bar = U*sqrt(D); 353 | theta_vec = angle(v_bar(1:M)/v_bar(M+1)); 354 | v = exp(1i*theta_vec); 355 | theta_mat = diag(v); 356 | 357 | else % Apply Gaussian Randomization 358 | 359 | num_rands = 1e3; % number of randomizations 360 | 361 | % Generate Gaussian random vector ~ CN(0, I) 362 | %gpudev = gpuDevice(); 363 | %reset(gpudev); 364 | r_vec_matrix = (1/sqrt(2))*((mvnrnd(zeros(M+1,1),eye(M+1),num_rands) + 1i * mvnrnd(zeros(M+1,1),eye(M+1), num_rands)).'); %gpuArray() 365 | v_bar_matrix = U*sqrt(D)*r_vec_matrix; 366 | 367 | best_index = 0; 368 | best_value = -1e8; 369 | %v_bar_matrix = exp(1i*2*pi*rand(M+1,num_rands)); 370 | 371 | for randmzn_index = 1:num_rands 372 | v_bar_vec = v_bar_matrix(:,randmzn_index); 373 | V_rand = v_bar_vec*(v_bar_vec'); 374 | 375 | [desired, interference, constr_value] = sinr_CONSTRAINT(V_rand, b, R, SINR_target, sigma_2, all_users, int_users_matrix); 376 | 377 | % Check feasibility and best value 378 | feasibility_check = prod( constr_value >= a_aux ); 379 | better_value_check = (sum(constr_value) > best_value); 380 | if feasibility_check && better_value_check 381 | best_index = randmzn_index; 382 | best_value = sum(constr_value); 383 | end 384 | end 385 | 386 | if best_index ~= 0 387 | % select best v_bar that maximizes SINR_CONSTR 388 | v_bar = v_bar_matrix(:,best_index); 389 | theta_vec = angle(v_bar(1:M)/v_bar(M+1)); 390 | v = exp(-1i*theta_vec); 391 | theta_mat = diag(v); 392 | else 393 | cvx_status = 'Infeasible'; 394 | end 395 | disp(['CVX Status after randomization: ' cvx_status]) 396 | end 397 | end 398 | 399 | % Increment iteration index 400 | r = r+1; 401 | end 402 | 403 | ML_dataset{sim_index}.W = W; % Store Transmit Beamformer 404 | ML_dataset{sim_index}.theta = diag(theta_mat); % Store Reflection Matrix Diagonal 405 | ML_dataset{sim_index}.iterations = r-1; 406 | 407 | % ----------- end iterative algorithm ------------------ 408 | 409 | end 410 | save([deepmimo_root_path '/saved_datasets.mat'], 'ML_dataset') 411 | 412 | %% Build Neural Network here 413 | 414 | % For regression neural network, we can directly use newgr 415 | 416 | % Prepare INPUT and OUTPUT matrices 417 | INPUT = zeros(sim_len,2*(M * N_users + M * N_BS + N_BS* N_users)); % The 3 vectorized channel matrices 418 | OUTPUT = zeros(sim_len, 2*(M^2 + N_BS* N_users)); % Vectorized beamformers 419 | iterations = zeros(sim_len,1); 420 | % Generalized Regression Neural Networks in MATLAB 421 | for loop_index = 1:sim_len 422 | TEMP = ML_dataset{loop_index}; 423 | INPUT(loop_index,:) = [real(TEMP.Ht(:)); imag(TEMP.Ht(:)); 424 | real(TEMP.Hr(:)); imag(TEMP.Hr(:)); 425 | real(TEMP.Hd(:)); imag(TEMP.Hd(:))].'; 426 | OUTPUT(loop_index,:) = [real(TEMP.W(:)); imag(TEMP.W(:)); 427 | real(TEMP.theta(:)); imag(TEMP.theta(:))].'; 428 | iterations(loop_index) = TEMP.iterations; 429 | end 430 | 431 | net = newgrnn(INPUT.',OUTPUT.'); 432 | y = net(INPUT.').'; 433 | 434 | % Training_Size=[2 1e4*(1:.4:3)]; % Training Dataset Size vector 435 | % % Should be made a function of sim_len: the size of our stored optimized 436 | % % data, which will be split into training, testing, and validation 437 | % 438 | % Validation_Size = 6200; % Validation dataset Size 439 | % miniBatchSize = 500; % Size of the minibatch for the Deep Learning 440 | 441 | % disp('======================DL BEAMFORMING========================='); 442 | % % ------------------ Training and Testing Datasets -----------------% 443 | % for dd=1:1:numel(Training_Size) 444 | % disp([' Calculating for Dataset Size = ' num2str(Training_Size(dd))]); 445 | % Training_Ind = RandP_all(1:Training_Size(dd)); 446 | % 447 | % XTrain = single(DL_input_reshaped(:,1,1,Training_Ind)); 448 | % YTrain = single(DL_output_reshaped(1,1,:,Training_Ind)); 449 | % XValidation = single(DL_input_reshaped(:,1,1,Validation_Ind)); 450 | % YValidation = single(DL_output_reshaped(1,1,:,Validation_Ind)); 451 | % YValidation_un = single(DL_output_reshaped_un); 452 | % 453 | % % ------------------ DL Model definition -----------------% 454 | % % Layers 455 | % layers = [ 456 | % % INPUT Layer 457 | % imageInputLayer([size(XTrain,1),1,1],'Name','input') 458 | % 459 | % % Fully Connected Layer 1 with Dropout 460 | % fullyConnectedLayer(size(YTrain,3),'Name','Fully1') 461 | % reluLayer('Name','relu1') 462 | % dropoutLayer(0.5,'Name','dropout1') 463 | % 464 | % % Fully Connected Layer 2 with Dropout 465 | % fullyConnectedLayer(4*size(YTrain,3),'Name','Fully2') 466 | % reluLayer('Name','relu2') 467 | % dropoutLayer(0.5,'Name','dropout2') 468 | % 469 | % % Fully Connected Layer 3 with Dropout 470 | % fullyConnectedLayer(4*size(YTrain,3),'Name','Fully3') 471 | % reluLayer('Name','relu3') 472 | % dropoutLayer(0.5,'Name','dropout3') 473 | % 474 | % % OUTPUT Layer 475 | % fullyConnectedLayer(size(YTrain,3),'Name','Fully4') 476 | % regressionLayer('Name','outReg')]; 477 | % 478 | % if Training_Size(dd) < miniBatchSize 479 | % validationFrequency = Training_Size(dd); 480 | % else 481 | % validationFrequency = floor(Training_Size(dd)/miniBatchSize); 482 | % end 483 | % VerboseFrequency = validationFrequency; 484 | % 485 | % % Options 486 | % options = trainingOptions('sgdm', ... 487 | % 'MiniBatchSize',miniBatchSize, ... 488 | % 'MaxEpochs',20, ... 489 | % 'InitialLearnRate',1e-1, ... 490 | % 'LearnRateSchedule','piecewise', ... 491 | % 'LearnRateDropFactor',0.5, ... 492 | % 'LearnRateDropPeriod',3, ... 493 | % 'L2Regularization',1e-4,... 494 | % 'Shuffle','every-epoch', ... 495 | % 'ValidationData',{XValidation,YValidation}, ... 496 | % 'ValidationFrequency',validationFrequency, ... 497 | % 'Plots','none', ... % 'training-progress' 498 | % 'Verbose',0, ... % 1 499 | % 'ExecutionEnvironment', 'cpu', ... 500 | % 'VerboseFrequency',VerboseFrequency); 501 | % 502 | % % ------------- DL Model Training and Prediction -----------------% 503 | % [~,Indmax_OPT]= max(YValidation,[],3); 504 | % Indmax_OPT = squeeze(Indmax_OPT); %Upper bound on achievable rates 505 | % MaxR_OPT = single(zeros(numel(Indmax_OPT),1)); 506 | % 507 | % [trainedNet,traininfo] = trainNetwork(XTrain,YTrain,layers,options); 508 | % 509 | % YPredicted = predict(trainedNet,XValidation); 510 | % 511 | % % --------------------- Achievable Rate --------------------------% <--- change 512 | % [~,Indmax_DL] = maxk(YPredicted,kbeams,2); 513 | % MaxR_DL = single(zeros(size(Indmax_DL,1),1)); %True achievable rates 514 | % for b=1:size(Indmax_DL,1) 515 | % MaxR_DL(b) = max(squeeze(YValidation_un(1,1,Indmax_DL(b,:),b))); 516 | % MaxR_OPT(b) = squeeze(YValidation_un(1,1,Indmax_OPT(b),b)); 517 | % end 518 | % 519 | % % shall be removed 520 | % Rate_OPT(dd) = mean(MaxR_OPT); 521 | % Rate_DL(dd) = mean(MaxR_DL); 522 | % LastValidationRMSE(dd) = traininfo.ValidationRMSE(end); 523 | % 524 | % clear trainedNet traininfo YPredicted 525 | % clear layers options 526 | % end 527 | 528 | %% Plot Figures 529 | % Power Consumption 530 | 531 | % Time/Complexity 532 | -------------------------------------------------------------------------------- /20200219/deepmu.m: -------------------------------------------------------------------------------- 1 | %% Code Description and Credits 2 | % This code is to simulate an ML-Driven IRS-aided communication setting, 3 | % where for now, 2 single antenna users communicate with a 2-antenna BS via 4 | % an IRS with M reflecting elements. 5 | 6 | % Downlink for now. 7 | 8 | % This code is aided by that of the DeepMIMO dataset developed by A. 9 | % Alkhateeb et al. 10 | 11 | % It implements the optimization algorithms of the paper below 12 | % [R1] Qingqing Wu, Rui Zhang, "Intelligent Reflecting Surface Enhanced Wireless 13 | % Network via Joint Active and Passive Beamforming", in IEEE Transactions on 14 | % Wireless Communications, Nov. 2019. 15 | 16 | % This paper optimizes the active beamforming at the BS and the passive 17 | % reflection at the IRS to minimize the total transmit power under SINR 18 | % QoS constraints. 19 | 20 | % It hence trains and tests a deep neural network (DNN) on the optimized outputs 21 | % and compares the results to the optimized benchmark. The DNN results can 22 | % be obtained in a more timely manner than those obtained via optimization. 23 | 24 | %% Simulation Parameters 25 | clear 26 | close all 27 | 28 | % ---------- Base Station (BS) ----------- 29 | % BS is one user with the following row and column indices 30 | Ut_row = 850; % user Ut row number 31 | Ut_element = 90; % user Ut col number 32 | % yet to add its MIMO functionality 33 | N_BS = 5; % Number of BS antennas 34 | %Pt = 100; % Transmit power in dBm 35 | 36 | % ----------- Users ----------- 37 | N_users= 2; % Number of Users 38 | % Users will be randomized from the following region (between the following rows) 39 | % each row contains 181 possible user locations 40 | Ur_rows = [1000 1300]; % user Ur rows 41 | No_user_pairs = (Ur_rows(2)-Ur_rows(1))*181; % Number of (Ut,Ur) user pairs %%%%%%%%% GENERALIZE LATER %%%% 42 | % Which row is ommitted? Starting or ending 1? otherwise No_user_pairs 43 | % should be this value +1 --> Last row is not neglected .. ok .. from 44 | % Ur_rows(1) till Ur_rows(2)-1 according to the channel generation code below 45 | RandP_all = randperm(No_user_pairs).'; % Random permutation of the available dataset 46 | 47 | all_users = 1:1:N_users; % vector of all user indices 48 | 49 | int_users_matrix = meshgrid(all_users).'; % indices of interfering users for each user 50 | int_users_matrix(1:N_users+1:N_users^2) = []; 51 | int_users_matrix = reshape(int_users_matrix, N_users-1, N_users).'; 52 | 53 | % ----------- IRS ----------- 54 | % at (one of the dataset BS *locations* - considered passive here of course) 55 | % Also known as LIS: Large Intelligent Surface 56 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 57 | My = 6; % number of LIS reflecting elements across the y axis 58 | Mz = 6; % number of LIS reflecting elements across the z axis 59 | Mx = 1; % number of LIS reflecting elements across the x axis 60 | M = Mx.*My.*Mz; % Total number of LIS reflecting elements 61 | % M_bar=8; % number of active elements, not used here so far 62 | % IRS is assumed at one of the BS locations in the O1 scenario as given by 63 | % params.active_BS. 64 | params.active_BS=3; % active basestation(/s) in the chosen scenario 65 | 66 | % Ray tracing parameters 67 | params.scenario='O1_60'; % DeepMIMO Dataset scenario: http://deepmimo.net/ 68 | L =1; % number of channel paths (L) .. L=1 returns a narrowband channel with no ISI 69 | kbeams=1; % select the top kbeams, get their feedback and find the max actual achievable rate 70 | D_Lambda = 0.5; % Antenna spacing relative to the wavelength 71 | BW = 100e6; % Bandwidth in Hz (ex: 100e6 --> 100 MHz) 72 | 73 | % Not used parameters so far since the system is not OFDM 74 | % K_DL=64; % number of subcarriers as input to the Deep Learning model 75 | % Validation_Size = 6200; % Validation dataset Size 76 | % K = 512; % number of subcarriers 77 | K=1; % No OFDM 78 | K_DL =1; 79 | 80 | sim_len = 1e2; % Number of generated different multiuser scenarios 81 | 82 | %% Channel Generation 83 | % Using the DeepMIMO Dataset by Alkhateeb et al. 84 | 85 | % Select which much the code is running on 86 | personal = 1; 87 | if personal == 1 88 | % Personal machine 89 | deepmimo_root_path= 'C:/Khafagy/DeepMIMO'; 90 | code_folder = 'C:/Users/Mohammad/Google Drive (mgkhafagy@aucegypt.edu)/MATLAB Codes'; 91 | elseif personal == 0 92 | % Research Lab Workstation 93 | deepmimo_root_path= 'D:/Khafagy/DeepMIMO'; 94 | code_folder = 'C:/Users/Dr. M-Khafagy/Google Drive/MATLAB Codes'; 95 | end 96 | cd(code_folder) 97 | 98 | % for dataset retrieval and storage from/to local server (not on the cloud) 99 | 100 | % %% Beamforming Codebook (CALLING UPA_codebook_generator) 101 | % disp('=============GENERATING REFLECTION MATRIX CODEBOOK==========='); 102 | % % BF codebook parameters 103 | % over_sampling_x=1; % The beamsteering oversampling factor in the x direction 104 | % over_sampling_y=1; % The beamsteering oversampling factor in the y direction 105 | % over_sampling_z=1; % The beamsteering oversampling factor in the z direction 106 | % 107 | % % Generating the BF codebook 108 | % % [BF_codebook]=sqrt(Mx*My*Mz)*... 109 | % % UPA_codebook_generator(Mx,My,Mz,over_sampling_x,over_sampling_y,over_sampling_z,D_Lambda); 110 | % % codebook_size=size(BF_codebook,2); 111 | 112 | %% DeepMIMO Dataset Generation (CALLING DeepMIMO_generator) 113 | % DeepMIMO_generator calls read_raytracing then construct_DeepMIMO_channel 114 | % These code files are created by Alkhateeb et al. 115 | disp('===============GENERATING DEEPMIMO DATASET==================='); 116 | %disp('-------------------------------------------------------------'); 117 | %disp([' Calculating for K_DL = ' num2str(K_DL)]); 118 | % ------ Inputs to the DeepMIMO dataset generation code ------------ % 119 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 120 | params.num_ant_x= Mx; % Number of the UPA antenna array on the x-axis 121 | params.num_ant_y= My; % Number of the UPA antenna array on the y-axis 122 | params.num_ant_z= Mz; % Number of the UPA antenna array on the z-axis 123 | params.ant_spacing=D_Lambda; % ratio of the wavelnegth; for half wavelength enter .5 124 | params.bandwidth= BW*1e-9; % The bandiwdth in GHz 125 | params.num_OFDM= K; % Number of OFDM subcarriers 126 | params.OFDM_sampling_factor=1; % The constructed channels will be calculated only at the sampled subcarriers (to reduce the size of the dataset) 127 | params.OFDM_limit=K_DL*1; % Only the first params.OFDM_limit subcarriers will be considered when constructing the channels 128 | params.num_paths=L; % Maximum number of paths to be considered (a value between 1 and 25), e.g., choose 1 if you are only interested in the strongest path 129 | params.saveDataset=0; 130 | disp([' Calculating for L = ' num2str(params.num_paths)]); 131 | 132 | %% BS-IRS Channels 133 | disp('==========Generating Transmit BS-IRS Full Channel============'); 134 | % ------------------ DeepMIMO "Ut" Dataset Generation ----------------- % 135 | params.active_user_first=Ut_row; 136 | params.active_user_last=Ut_row; % Only one active user (but where is Ut_element to fully specify the user??) -- see below 137 | DeepMIMO_dataset=DeepMIMO_generator(params,deepmimo_root_path); % Generator function generates data for entire rows 138 | %Ht = single(DeepMIMO_dataset{1}.user{Ut_element}.channel); % Selecting element of interest here 139 | Ht = DeepMIMO_dataset{1}.user{Ut_element}.channel; % Selecting element of interest here 140 | 141 | clear DeepMIMO_dataset 142 | 143 | % ----------- Add BS MIMO Functionality here ------- 144 | % Remember to later randomize the transmitter as well, so that the neural 145 | % network is not a function of a fixed BS-IRS channel 146 | 147 | % Adjust size for now (simply replicate), then fix the MIMO functionality later 148 | %Ht = repmat(Ht,1, N_BS); 149 | Ht = 1e-2/sqrt(2)*(randn(M, N_BS)+1i*randn(M, N_BS)); 150 | 151 | %% IRS - Receiver Channels 152 | disp('===========Generating IRS-Receiver Full Channels============='); 153 | % ------------------ DeepMIMO "Ur" Dataset Generation -----------------% 154 | %initialization 155 | Ur_rows_step = 300; % access the dataset 100 rows at a time 156 | Ur_rows_grid=Ur_rows(1):Ur_rows_step:Ur_rows(2); 157 | Delta_H_max = single(0); 158 | for pp = 1:1:numel(Ur_rows_grid)-1 % loop for Normalizing H 159 | clear DeepMIMO_dataset 160 | params.active_user_first=Ur_rows_grid(pp); 161 | params.active_user_last=Ur_rows_grid(pp+1)-1; 162 | disp(['=== User Row Batch ' num2str(pp) ' out of ' num2str(numel(Ur_rows_grid)-1) ', each holding ' num2str(Ur_rows_step) ' rows =====']) 163 | [DeepMIMO_dataset,params]=DeepMIMO_generator(params,deepmimo_root_path); 164 | for u=1:params.num_user % seems to be hard-coded as rows*181 already 165 | Hr = single(conj(DeepMIMO_dataset{1}.user{u}.channel)); % conjugated since it is now downlink 166 | Delta_H = max(max(abs(Ht.*Hr))); 167 | if Delta_H >= Delta_H_max 168 | Delta_H_max = single(Delta_H); % storing the maximum absolute value of the end-to-end product channel for later normalization 169 | end 170 | end 171 | end 172 | clear Delta_H 173 | 174 | %% Loop over different user permutations and store optimized solutions 175 | ML_dataset{sim_len} = {}; % Store channels, locations, and solutions 176 | user_loc{N_users} = {}; 177 | 178 | % Fix seed 179 | rng(1); 180 | 181 | disp('Looping over different multi-user patterns and generating optimized matrices') 182 | for sim_index = 1:sim_len 183 | disp(['=== User pattern ' num2str(sim_index) ' out of ' num2str(sim_len) ' =====']) 184 | ML_dataset{sim_index}.Ht = Ht; % Store transmit (1st hop) channel 185 | 186 | % Select N_users random user indices 187 | clear Hr 188 | Hr{N_users} = []; 189 | user_loc{N_users} = {}; 190 | users = randperm(params.num_user, N_users); 191 | for user_ind = 1:N_users 192 | Hr{user_ind} = DeepMIMO_dataset{1}.user{users(user_ind)}.channel; 193 | user_loc{user_ind} = DeepMIMO_dataset{1}.user{users(user_ind)}.loc; 194 | end 195 | Hr = [Hr{:}]; 196 | 197 | ML_dataset{sim_index}.Hr = Hr; % Store receive (2nd hop) channel 198 | ML_dataset{sim_index}.user_loc = [user_loc{:}]; % Store user_locations 199 | 200 | % Implement Optimization algorithm here 201 | 202 | % Let the direct channel be denoted by Hsd (source to destination) 203 | %Hd = zeros(N_BS, N_users); 204 | Hd = 1e-10/sqrt(2)*(randn(N_BS, N_users)+1i*randn(N_BS, N_users)); 205 | 206 | ML_dataset{sim_index}.Hd = Hd; % Store direct channel 207 | 208 | sigma_2_dBm = -80; % Noise variance in dBm 209 | sigma_2 = 10^(sigma_2_dBm/10) * 1e-3; % (in Watts) 210 | 211 | % SINR target 212 | SINR_target_dB = 15; % Check: changing target SINR should change the transmit power (the cvx_optval objective value) 213 | SINR_target = 10^(SINR_target_dB/10); 214 | 215 | % Alternating optimization algorithm (Algorithm 1 in [R1]) 216 | % Set error threshold for alternating algorithm termination 217 | eps_iter=1e0; 218 | frac_error=1e10; % Initialize fractional error 219 | obj_last = 1e3; % Initialize last objective value to a large number 220 | 221 | disp('Running alternating optimization algorithm') 222 | r=1; % iteration index 223 | % Initialize reflection matrix theta 224 | beta_vec = ones(M,1); % Fixed to 1 for now as in the paper 225 | theta_vec = ones(M,1); % 2*pi*rand(M,1); % Uniformly randomized from 0 to 2*pi 226 | theta_mat= diag(beta_vec.*exp(1i*theta_vec)); 227 | 228 | % Check rank criterion for feasbility of the initial theta choice 229 | while ~(rank(Ht'*theta_mat*Hr + Hd) == N_users) % if infeasible choice, randomize and check again 230 | disp('infeasible initial choice of theta, .. reselecting ..') 231 | theta_vec = 2*pi*rand(M,1); % Uniformly randomized from 0 to 2*pi 232 | theta_mat= diag(beta_vec.*exp(1i*theta_vec)); 233 | end 234 | 235 | cvx_status = 'nothing'; % initialize 236 | 237 | while (frac_error > eps_iter) && ~contains(cvx_status,'Infeasible','IgnoreCase',true) 238 | if mod(r,1e2)==0 239 | disp(['Iteration r =' num2str(r)]) 240 | end 241 | 242 | % ==== Optimize W while fixing theta ==== BS Transmit Beamforming 243 | disp('Active Beamformer Design') 244 | 245 | cvx_clear 246 | clear W tau INTERFERENCE obj_fn1 cvx_optval 247 | 248 | cvx_begin 249 | cvx_quiet(true) 250 | cvx_solver SDPT3 %SDPT3 %Mosek % Choose the underlying solver 251 | cvx_precision best % Change the cvx numerical precision 252 | 253 | % Define your optimization variables here 254 | variable W(N_BS,N_users) complex; % add the word binary for binary constraints - see CVX documentation for more options 255 | variable tau nonnegative; % Auxiliary variable 256 | expressions INTERFERENCE(N_users,N_users); 257 | 258 | for k = all_users 259 | int_users = int_users_matrix(k,:); % interfering users 260 | INTERFERENCE(:,k) = [ ... 261 | W(:,int_users)'*(Ht'*(theta_mat')*(Hr(:,k)) + (Hd(:,k))); 262 | sqrt(sigma_2)]; 263 | end 264 | 265 | % Write the optimization problem 266 | minimize( tau^2 ); 267 | subject to 268 | for k = all_users 269 | {INTERFERENCE(:,k), sqrt(1/SINR_target)*real(((Hr(:,k)'*theta_mat*Ht + Hd(:,k)')*W(:,k)))} == complex_lorentz(N_users); % SINR CONSTRAINT 270 | end 271 | {W(:), tau} == complex_lorentz(N_BS * N_users); % POWER CONSTRAINT 272 | cvx_end 273 | disp(['CVX Status: ' cvx_status ', CVX_optval = ' num2str(10*log10(cvx_optval*1000)) ' dBm']) 274 | 275 | frac_error = abs(obj_last - cvx_optval)/obj_last *100; 276 | obj_last = cvx_optval; 277 | 278 | achieved_SINR = zeros(1,N_users); 279 | % Actual achieved SINR 280 | for k = all_users 281 | achieved_SINR(k) = (norm((Hr(:,k)'*theta_mat*Ht+ Hd(:,k)')*W(:,k)))^2/(norm(INTERFERENCE(:,k)))^2; 282 | end 283 | % achieved_SINR 284 | % trace(W*(W')) 285 | % cvx_optval 286 | % 10*log10(trace(W*(W'))*1000) 287 | 288 | % ==== Optimize theta while fixing W ==== IRS Reflection Matrix 289 | % (P4') in paper 290 | disp('Passive Beamformer Design') 291 | 292 | % Define a, b and R 293 | a = cell(N_users,N_users); 294 | b = cell(N_users,N_users); 295 | R = cell(N_users,N_users); 296 | for k = all_users % looping over all users 297 | int_users = int_users_matrix(k,:); % interfering users 298 | a{k,k}= diag(Hr(:,k)')*Ht*W(:,k); 299 | b{k,k}= Hd(:,k)'*W(:,k); 300 | R{k,k}= [ a{k,k}* (a{k,k}') a{k,k}* (b{k,k}') ; a{k,k}'* b{k,k} 0]; 301 | for m = int_users 302 | a{k,m}= diag(Hr(:,k)')*Ht*W(:,m); 303 | b{k,m}= Hd(:,k)'*W(:,m); 304 | R{k,m}= [ a{k,m}* (a{k,m}') a{k,m}* (b{k,m}') ; a{k,m}'* b{k,m} 0]; 305 | end 306 | end 307 | 308 | cvx_begin sdp 309 | cvx_quiet(true) 310 | cvx_solver SDPT3 %SeDuMi %SDPT3 %Mosek % Choose the underlying solver 311 | cvx_precision best % Change the cvx numerical precision 312 | 313 | variable V(M+1,M+1) complex semidefinite; 314 | variable a_aux(1,N_users) nonnegative; % Auxiliary variables for max sum 315 | %variable a_aux nonnegative; % Auxiliary variables for max min 316 | expressions SINR_CONSTR(N_users) desired(N_users) interference(N_users); 317 | 318 | % Define the expressions desired, interference, and SINR_CONSTR in terms of the optimization variables 319 | %sinr_fun_handle = @sinr_CONSTRAINT; 320 | 321 | %[desired, interference, SINR_CONSTR] = sinr_fun_handle(V, b, R, SINR_target, sigma_2, all_users, int_users_matrix); 322 | for k = all_users % looping over all users 323 | int_users = int_users_matrix(k,:); % interfering users 324 | desired(k) = trace(real(R{k,k}*V)) + square_abs(b{k,k}); 325 | interference(k) = 0; 326 | for m = int_users 327 | interference(k) = interference(k) + trace(real(R{k,m}*V)) + square_abs(b{k,m}); 328 | end 329 | SINR_CONSTR(k) = desired(k) - a_aux(k) - SINR_target * (interference(k) + sigma_2); 330 | end 331 | 332 | %all_elements = 1:M+1; 333 | 334 | % Write the optimization problem 335 | maximize( sum(a_aux) ); 336 | subject to 337 | %SINR_CONSTR == nonnegative(N_users) 338 | for k = 1:N_users 339 | desired(k) >= a_aux(k) + SINR_target * (interference(k) + sigma_2); 340 | %sqrt(SINR_CONSTR(k)) >= 0 341 | end 342 | diag(V) == ones(M+1,1); 343 | % Other 2 constraints are already in the definitions of opt variables 344 | %obj_fn2 >= 0; % Dummy constraint to check/prevent the resulting -ve cvx_optval 345 | cvx_end 346 | 347 | disp(['CVX Status: ' cvx_status]) 348 | 349 | if ~contains(cvx_status,'Infeasible','IgnoreCase',true) 350 | disp('Running Gaussian Randomization') 351 | [U,D] = eig(full(V)); % Eigenvalue Decomposition 352 | if rank(full(V)) == 1 353 | v_bar = U*sqrt(D); 354 | theta_vec = angle(v_bar(1:M)/v_bar(M+1)); 355 | v = exp(-1i*theta_vec); 356 | theta_mat = diag(v); 357 | 358 | else % Apply Gaussian Randomization 359 | 360 | num_rands = 1e3; % number of randomizations 361 | 362 | % Generate Gaussian random vector ~ CN(0, I) 363 | %gpudev = gpuDevice(); 364 | %reset(gpudev); 365 | r_vec_matrix = (1/sqrt(2))*((mvnrnd(zeros(M+1,1),eye(M+1),num_rands) + 1i * mvnrnd(zeros(M+1,1),eye(M+1), num_rands)).'); %gpuArray() 366 | v_bar_matrix = U*sqrt(D)*r_vec_matrix; 367 | 368 | best_index = 0; 369 | best_value = -1e8; 370 | %v_bar_matrix = exp(1i*2*pi*rand(M+1,num_rands)); 371 | 372 | for randmzn_index = 1:num_rands 373 | v_bar_vec = v_bar_matrix(:,randmzn_index); 374 | V_rand = v_bar_vec*(v_bar_vec'); 375 | 376 | [~, ~, constr_value] = sinr_CONSTRAINT(V_rand, b, R, SINR_target, sigma_2, all_users, int_users_matrix); 377 | 378 | % Check feasibility and best value 379 | feasibility_check = prod( constr_value >= a_aux ); 380 | better_value_check = (sum(constr_value) > best_value); 381 | if feasibility_check && better_value_check 382 | best_index = randmzn_index; 383 | best_value = sum(constr_value); 384 | end 385 | end 386 | 387 | if best_index ~= 0 388 | % select best v_bar that maximizes SINR_CONSTR 389 | v_bar = v_bar_matrix(:,best_index); 390 | theta_vec = angle(v_bar(1:M)/v_bar(M+1)); 391 | v = exp(-1i*theta_vec); 392 | theta_mat = diag(v); 393 | else 394 | cvx_status = 'Infeasible'; 395 | end 396 | 397 | disp(['CVX Status after randomization: ' cvx_status]) 398 | end 399 | end 400 | 401 | % Increment iteration index 402 | r = r+1; 403 | end 404 | 405 | ML_dataset{sim_index}.W = W; % Store Transmit Beamformer 406 | ML_dataset{sim_index}.theta = diag(theta_mat); % Store Reflection Matrix Diagonal 407 | ML_dataset{sim_index}.iterations = r-1; 408 | 409 | % ----------- end iterative algorithm ------------------ 410 | 411 | end 412 | save([deepmimo_root_path '/saved_datasets.mat'], 'ML_dataset') 413 | 414 | %% Build Neural Network here 415 | 416 | % For regression neural network, we can directly use newgr 417 | 418 | % Prepare INPUT and OUTPUT matrices 419 | INPUT = zeros(sim_len,2*(M * N_users + M * N_BS + N_BS* N_users)); % The 3 vectorized channel matrices 420 | OUTPUT = zeros(sim_len, 2*(M^2 + N_BS* N_users)); % Vectorized beamformers 421 | iterations = zeros(sim_len,1); 422 | % Generalized Regression Neural Networks in MATLAB 423 | for loop_index = 1:sim_len 424 | TEMP = ML_dataset{loop_index}; 425 | INPUT(loop_index,:) = [real(TEMP.Ht(:)); imag(TEMP.Ht(:)); 426 | real(TEMP.Hr(:)); imag(TEMP.Hr(:)); 427 | real(TEMP.Hd(:)); imag(TEMP.Hd(:))].'; 428 | OUTPUT(loop_index,:) = [real(TEMP.W(:)); imag(TEMP.W(:)); 429 | real(TEMP.theta(:)); imag(TEMP.theta(:))].'; 430 | iterations(loop_index) = TEMP.iterations; 431 | end 432 | 433 | net = newgrnn(INPUT.',OUTPUT.'); 434 | y = net(INPUT.').'; 435 | 436 | % Training_Size=[2 1e4*(1:.4:3)]; % Training Dataset Size vector 437 | % % Should be made a function of sim_len: the size of our stored optimized 438 | % % data, which will be split into training, testing, and validation 439 | % 440 | % Validation_Size = 6200; % Validation dataset Size 441 | % miniBatchSize = 500; % Size of the minibatch for the Deep Learning 442 | 443 | % disp('======================DL BEAMFORMING========================='); 444 | % % ------------------ Training and Testing Datasets -----------------% 445 | % for dd=1:1:numel(Training_Size) 446 | % disp([' Calculating for Dataset Size = ' num2str(Training_Size(dd))]); 447 | % Training_Ind = RandP_all(1:Training_Size(dd)); 448 | % 449 | % XTrain = single(DL_input_reshaped(:,1,1,Training_Ind)); 450 | % YTrain = single(DL_output_reshaped(1,1,:,Training_Ind)); 451 | % XValidation = single(DL_input_reshaped(:,1,1,Validation_Ind)); 452 | % YValidation = single(DL_output_reshaped(1,1,:,Validation_Ind)); 453 | % YValidation_un = single(DL_output_reshaped_un); 454 | % 455 | % % ------------------ DL Model definition -----------------% 456 | % % Layers 457 | % layers = [ 458 | % % INPUT Layer 459 | % imageInputLayer([size(XTrain,1),1,1],'Name','input') 460 | % 461 | % % Fully Connected Layer 1 with Dropout 462 | % fullyConnectedLayer(size(YTrain,3),'Name','Fully1') 463 | % reluLayer('Name','relu1') 464 | % dropoutLayer(0.5,'Name','dropout1') 465 | % 466 | % % Fully Connected Layer 2 with Dropout 467 | % fullyConnectedLayer(4*size(YTrain,3),'Name','Fully2') 468 | % reluLayer('Name','relu2') 469 | % dropoutLayer(0.5,'Name','dropout2') 470 | % 471 | % % Fully Connected Layer 3 with Dropout 472 | % fullyConnectedLayer(4*size(YTrain,3),'Name','Fully3') 473 | % reluLayer('Name','relu3') 474 | % dropoutLayer(0.5,'Name','dropout3') 475 | % 476 | % % OUTPUT Layer 477 | % fullyConnectedLayer(size(YTrain,3),'Name','Fully4') 478 | % regressionLayer('Name','outReg')]; 479 | % 480 | % if Training_Size(dd) < miniBatchSize 481 | % validationFrequency = Training_Size(dd); 482 | % else 483 | % validationFrequency = floor(Training_Size(dd)/miniBatchSize); 484 | % end 485 | % VerboseFrequency = validationFrequency; 486 | % 487 | % % Options 488 | % options = trainingOptions('sgdm', ... 489 | % 'MiniBatchSize',miniBatchSize, ... 490 | % 'MaxEpochs',20, ... 491 | % 'InitialLearnRate',1e-1, ... 492 | % 'LearnRateSchedule','piecewise', ... 493 | % 'LearnRateDropFactor',0.5, ... 494 | % 'LearnRateDropPeriod',3, ... 495 | % 'L2Regularization',1e-4,... 496 | % 'Shuffle','every-epoch', ... 497 | % 'ValidationData',{XValidation,YValidation}, ... 498 | % 'ValidationFrequency',validationFrequency, ... 499 | % 'Plots','none', ... % 'training-progress' 500 | % 'Verbose',0, ... % 1 501 | % 'ExecutionEnvironment', 'cpu', ... 502 | % 'VerboseFrequency',VerboseFrequency); 503 | % 504 | % % ------------- DL Model Training and Prediction -----------------% 505 | % [~,Indmax_OPT]= max(YValidation,[],3); 506 | % Indmax_OPT = squeeze(Indmax_OPT); %Upper bound on achievable rates 507 | % MaxR_OPT = single(zeros(numel(Indmax_OPT),1)); 508 | % 509 | % [trainedNet,traininfo] = trainNetwork(XTrain,YTrain,layers,options); 510 | % 511 | % YPredicted = predict(trainedNet,XValidation); 512 | % 513 | % % --------------------- Achievable Rate --------------------------% <--- change 514 | % [~,Indmax_DL] = maxk(YPredicted,kbeams,2); 515 | % MaxR_DL = single(zeros(size(Indmax_DL,1),1)); %True achievable rates 516 | % for b=1:size(Indmax_DL,1) 517 | % MaxR_DL(b) = max(squeeze(YValidation_un(1,1,Indmax_DL(b,:),b))); 518 | % MaxR_OPT(b) = squeeze(YValidation_un(1,1,Indmax_OPT(b),b)); 519 | % end 520 | % 521 | % % shall be removed 522 | % Rate_OPT(dd) = mean(MaxR_OPT); 523 | % Rate_DL(dd) = mean(MaxR_DL); 524 | % LastValidationRMSE(dd) = traininfo.ValidationRMSE(end); 525 | % 526 | % clear trainedNet traininfo YPredicted 527 | % clear layers options 528 | % end 529 | 530 | %% Plot Figures 531 | % Power Consumption 532 | 533 | % Time/Complexity 534 | 535 | 536 | -------------------------------------------------------------------------------- /20200219/leave_now/DeepMIMO_Dataset_Generator.m: -------------------------------------------------------------------------------- 1 | % --------- DeepMIMO: A Generic Dataset for mmWave and massive MIMO ------% 2 | % Author: Ahmed Alkhateeb 3 | % Date: Sept. 5, 2018 4 | % Goal: Encouraging research on ML/DL for mmWave/massive MIMO applications and 5 | % providing a benchmarking tool for the developed algorithms 6 | % ---------------------------------------------------------------------- % 7 | 8 | function [DeepMIMO_dataset,params]=DeepMIMO_Dataset_Generator() 9 | 10 | % ------ Inputs to the DeepMIMO dataset generation code ------------ % 11 | 12 | %------Ray-tracing scenario 13 | params.scenario='O1_60'; % The adopted ray tracing scenarios [check the available scenarios at www.aalkhateeb.net/DeepMIMO.html] 14 | 15 | %------DeepMIMO parameters set 16 | %Active base stations 17 | params.active_BS=[3]; % Includes the numbers of the active BSs (values from 1-18 for 'O1') 18 | 19 | % Active users 20 | params.active_user_first=1000; % The first row of the considered receivers section (check the scenario description for the receiver row map) 21 | params.active_user_last=1300; % The last row of the considered receivers section (check the scenario description for the receiver row map) 22 | 23 | % Number of BS Antenna 24 | params.num_ant_x=1; % Number of the UPA antenna array on the x-axis 25 | params.num_ant_y=32; % Number of the UPA antenna array on the y-axis 26 | params.num_ant_z=8; % Number of the UPA antenna array on the z-axis 27 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 28 | 29 | % Antenna spacing 30 | params.ant_spacing=.5; % ratio of the wavelnegth; for half wavelength enter .5 31 | 32 | % System bandwidth 33 | params.bandwidth=0.5; % The bandiwdth in GHz 34 | 35 | % OFDM parameters 36 | params.num_OFDM=1024; % Number of OFDM subcarriers 37 | params.OFDM_sampling_factor=1; % The constructed channels will be calculated only at the sampled subcarriers (to reduce the size of the dataset) 38 | params.OFDM_limit=64; % Only the first params.OFDM_limit subcarriers will be considered when constructing the channels 39 | 40 | % Number of paths 41 | params.num_paths=5; % Maximum number of paths to be considered (a value between 1 and 25), e.g., choose 1 if you are only interested in the strongest path 42 | 43 | params.saveDataset=0; 44 | 45 | % -------------------------- DeepMIMO Dataset Generation -----------------% 46 | [DeepMIMO_dataset,params]=DeepMIMO_generator(params); 47 | 48 | end 49 | -------------------------------------------------------------------------------- /20200219/leave_now/LIS-DeepLearning-master/Fig10_data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CodeCasting/DeepIRS/c84d8ec834a519e379e6bfec1bb8ee474a359066/20200219/leave_now/LIS-DeepLearning-master/Fig10_data.mat -------------------------------------------------------------------------------- /20200219/leave_now/LIS-DeepLearning-master/Fig10_generator.m: -------------------------------------------------------------------------------- 1 | %clearvars 2 | %close all 3 | %clc 4 | 5 | %% Description: 6 | % 7 | % This is the main code for generating Figure 10 in the original article 8 | % mentioned below. 9 | % 10 | % version 1.0 (Last edited: 2019-05-10) 11 | % 12 | % The definitions and equations used in this code refer (mostly) to the 13 | % following publication: 14 | % 15 | % Abdelrahman Taha, Muhammad Alrabeiah, and Ahmed Alkhateeb, "Enabling 16 | % Large Intelligent Surfaces with Compressive Sensing and Deep Learning," 17 | % arXiv e-prints, p. arXiv:1904.10136, Apr 2019. 18 | % [Online]. Available: https://arxiv.org/abs/1904.10136 19 | % 20 | % The DeepMIMO dataset is adopted. 21 | % [Online]. Available: http://deepmimo.net/ 22 | % 23 | % License: This code is licensed under a Creative Commons 24 | % Attribution-NonCommercial-ShareAlike 4.0 International License. 25 | % [Online]. Available: https://creativecommons.org/licenses/by-nc-sa/4.0/ 26 | % If you in any way use this code for research that results in 27 | % publications, please cite our original article mentioned above. 28 | 29 | %% System Model parameters 30 | 31 | kbeams=1; %select the top kbeams, get their feedback and find the max actual achievable rate 32 | Pt=5; % dB 33 | L =1; % number of channel paths (L) 34 | 35 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 36 | My_ar=[32 64]; % number of LIS reflecting elements across the y axis 37 | Mz_ar=[32 64]; % number of LIS reflecting elements across the z axis 38 | M_bar=8; % number of active elements 39 | K_DL=64; % number of subcarriers as input to the Deep Learning model 40 | Training_Size=[2 1e4*(1:.4:3)]; % Training Dataset Size vector 41 | 42 | % Preallocation of output variables 43 | Rate_DLt=zeros(numel(My_ar),numel(Training_Size)); 44 | Rate_OPTt=zeros(numel(My_ar),numel(Training_Size)); 45 | 46 | %% Figure Data Generation 47 | 48 | for rr = 1:1:numel(My_ar) 49 | save Fig10_data.mat L My_ar Mz_ar M_bar Training_Size K_DL Rate_DLt Rate_OPTt 50 | [Rate_DL,Rate_OPT]=Main_fn(L,My_ar(rr),Mz_ar(rr),M_bar,K_DL,Pt,kbeams,Training_Size); 51 | Rate_DLt(rr,:)=Rate_DL; Rate_OPTt(rr,:)=Rate_OPT; 52 | end 53 | 54 | save Fig10_data.mat L My_ar Mz_ar M_bar Training_Size K_DL Rate_DLt Rate_OPTt 55 | 56 | %% Figure Plot 57 | 58 | %------------- Figure Input Variables ---------------------------% 59 | % M; My_ar; Mz_ar; M_bar; 60 | % Training_Size; Rate_DLt; Rate_OPTt; 61 | 62 | %------------------ Fixed Parameters ----------------------------% 63 | % Full Regression 64 | % L = min = 1 65 | % K = 512, K_DL = max = 64 66 | % M_bar = 8 67 | % random distribution of active elements 68 | 69 | Colour = 'brgmcky'; 70 | 71 | f10 = figure('Name', 'Figure10', 'units','pixels'); 72 | hold on; grid on; box on; 73 | title(['Achievable Rate for different dataset sizes using only ' num2str(M_bar) ' active elements'],'fontsize',12) 74 | xlabel('Deep Learning Training Dataset Size (Thousands of Samples)','fontsize',14) 75 | ylabel('Achievable Rate (bps/Hz)','fontsize',14) 76 | set(gca,'FontSize',13) 77 | if ishandle(f10) 78 | set(0, 'CurrentFigure', f10) 79 | hold on; grid on; 80 | for rr=1:1:numel(My_ar) 81 | plot((Training_Size*1e-3),Rate_OPTt(rr,:),[Colour(rr) '*--'],'markersize',8,'linewidth',2, 'DisplayName',['Genie-Aided Reflection Beamforming, M = ' num2str(My_ar(rr)) '*' num2str(Mz_ar(rr))]) 82 | plot((Training_Size*1e-3),Rate_DLt(rr,:),[Colour(rr) 's-'],'markersize',8,'linewidth',2, 'DisplayName', ['DL Reflection Beamforming, M = ' num2str(My_ar(rr)) '*' num2str(Mz_ar(rr))]) 83 | end 84 | legend('Location','SouthEast') 85 | legend show 86 | end 87 | drawnow 88 | hold off -------------------------------------------------------------------------------- /20200219/leave_now/LIS-DeepLearning-master/Figure10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CodeCasting/DeepIRS/c84d8ec834a519e379e6bfec1bb8ee474a359066/20200219/leave_now/LIS-DeepLearning-master/Figure10.png -------------------------------------------------------------------------------- /20200219/leave_now/LIS-DeepLearning-master/LICENSE.md: -------------------------------------------------------------------------------- 1 | Attribution-NonCommercial-ShareAlike 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 58 | Public License 59 | 60 | By exercising the Licensed Rights (defined below), You accept and agree 61 | to be bound by the terms and conditions of this Creative Commons 62 | Attribution-NonCommercial-ShareAlike 4.0 International Public License 63 | ("Public License"). To the extent this Public License may be 64 | interpreted as a contract, You are granted the Licensed Rights in 65 | consideration of Your acceptance of these terms and conditions, and the 66 | Licensor grants You such rights in consideration of benefits the 67 | Licensor receives from making the Licensed Material available under 68 | these terms and conditions. 69 | 70 | 71 | Section 1 -- Definitions. 72 | 73 | a. Adapted Material means material subject to Copyright and Similar 74 | Rights that is derived from or based upon the Licensed Material 75 | and in which the Licensed Material is translated, altered, 76 | arranged, transformed, or otherwise modified in a manner requiring 77 | permission under the Copyright and Similar Rights held by the 78 | Licensor. For purposes of this Public License, where the Licensed 79 | Material is a musical work, performance, or sound recording, 80 | Adapted Material is always produced where the Licensed Material is 81 | synched in timed relation with a moving image. 82 | 83 | b. Adapter's License means the license You apply to Your Copyright 84 | and Similar Rights in Your contributions to Adapted Material in 85 | accordance with the terms and conditions of this Public License. 86 | 87 | c. BY-NC-SA Compatible License means a license listed at 88 | creativecommons.org/compatiblelicenses, approved by Creative 89 | Commons as essentially the equivalent of this Public License. 90 | 91 | d. Copyright and Similar Rights means copyright and/or similar rights 92 | closely related to copyright including, without limitation, 93 | performance, broadcast, sound recording, and Sui Generis Database 94 | Rights, without regard to how the rights are labeled or 95 | categorized. For purposes of this Public License, the rights 96 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 97 | Rights. 98 | 99 | e. Effective Technological Measures means those measures that, in the 100 | absence of proper authority, may not be circumvented under laws 101 | fulfilling obligations under Article 11 of the WIPO Copyright 102 | Treaty adopted on December 20, 1996, and/or similar international 103 | agreements. 104 | 105 | f. Exceptions and Limitations means fair use, fair dealing, and/or 106 | any other exception or limitation to Copyright and Similar Rights 107 | that applies to Your use of the Licensed Material. 108 | 109 | g. License Elements means the license attributes listed in the name 110 | of a Creative Commons Public License. The License Elements of this 111 | Public License are Attribution, NonCommercial, and ShareAlike. 112 | 113 | h. Licensed Material means the artistic or literary work, database, 114 | or other material to which the Licensor applied this Public 115 | License. 116 | 117 | i. Licensed Rights means the rights granted to You subject to the 118 | terms and conditions of this Public License, which are limited to 119 | all Copyright and Similar Rights that apply to Your use of the 120 | Licensed Material and that the Licensor has authority to license. 121 | 122 | j. Licensor means the individual(s) or entity(ies) granting rights 123 | under this Public License. 124 | 125 | k. NonCommercial means not primarily intended for or directed towards 126 | commercial advantage or monetary compensation. For purposes of 127 | this Public License, the exchange of the Licensed Material for 128 | other material subject to Copyright and Similar Rights by digital 129 | file-sharing or similar means is NonCommercial provided there is 130 | no payment of monetary compensation in connection with the 131 | exchange. 132 | 133 | l. Share means to provide material to the public by any means or 134 | process that requires permission under the Licensed Rights, such 135 | as reproduction, public display, public performance, distribution, 136 | dissemination, communication, or importation, and to make material 137 | available to the public including in ways that members of the 138 | public may access the material from a place and at a time 139 | individually chosen by them. 140 | 141 | m. Sui Generis Database Rights means rights other than copyright 142 | resulting from Directive 96/9/EC of the European Parliament and of 143 | the Council of 11 March 1996 on the legal protection of databases, 144 | as amended and/or succeeded, as well as other essentially 145 | equivalent rights anywhere in the world. 146 | 147 | n. You means the individual or entity exercising the Licensed Rights 148 | under this Public License. Your has a corresponding meaning. 149 | 150 | 151 | Section 2 -- Scope. 152 | 153 | a. License grant. 154 | 155 | 1. Subject to the terms and conditions of this Public License, 156 | the Licensor hereby grants You a worldwide, royalty-free, 157 | non-sublicensable, non-exclusive, irrevocable license to 158 | exercise the Licensed Rights in the Licensed Material to: 159 | 160 | a. reproduce and Share the Licensed Material, in whole or 161 | in part, for NonCommercial purposes only; and 162 | 163 | b. produce, reproduce, and Share Adapted Material for 164 | NonCommercial purposes only. 165 | 166 | 2. Exceptions and Limitations. For the avoidance of doubt, where 167 | Exceptions and Limitations apply to Your use, this Public 168 | License does not apply, and You do not need to comply with 169 | its terms and conditions. 170 | 171 | 3. Term. The term of this Public License is specified in Section 172 | 6(a). 173 | 174 | 4. Media and formats; technical modifications allowed. The 175 | Licensor authorizes You to exercise the Licensed Rights in 176 | all media and formats whether now known or hereafter created, 177 | and to make technical modifications necessary to do so. The 178 | Licensor waives and/or agrees not to assert any right or 179 | authority to forbid You from making technical modifications 180 | necessary to exercise the Licensed Rights, including 181 | technical modifications necessary to circumvent Effective 182 | Technological Measures. For purposes of this Public License, 183 | simply making modifications authorized by this Section 2(a) 184 | (4) never produces Adapted Material. 185 | 186 | 5. Downstream recipients. 187 | 188 | a. Offer from the Licensor -- Licensed Material. Every 189 | recipient of the Licensed Material automatically 190 | receives an offer from the Licensor to exercise the 191 | Licensed Rights under the terms and conditions of this 192 | Public License. 193 | 194 | b. Additional offer from the Licensor -- Adapted Material. 195 | Every recipient of Adapted Material from You 196 | automatically receives an offer from the Licensor to 197 | exercise the Licensed Rights in the Adapted Material 198 | under the conditions of the Adapter's License You apply. 199 | 200 | c. No downstream restrictions. You may not offer or impose 201 | any additional or different terms or conditions on, or 202 | apply any Effective Technological Measures to, the 203 | Licensed Material if doing so restricts exercise of the 204 | Licensed Rights by any recipient of the Licensed 205 | Material. 206 | 207 | 6. No endorsement. Nothing in this Public License constitutes or 208 | may be construed as permission to assert or imply that You 209 | are, or that Your use of the Licensed Material is, connected 210 | with, or sponsored, endorsed, or granted official status by, 211 | the Licensor or others designated to receive attribution as 212 | provided in Section 3(a)(1)(A)(i). 213 | 214 | b. Other rights. 215 | 216 | 1. Moral rights, such as the right of integrity, are not 217 | licensed under this Public License, nor are publicity, 218 | privacy, and/or other similar personality rights; however, to 219 | the extent possible, the Licensor waives and/or agrees not to 220 | assert any such rights held by the Licensor to the limited 221 | extent necessary to allow You to exercise the Licensed 222 | Rights, but not otherwise. 223 | 224 | 2. Patent and trademark rights are not licensed under this 225 | Public License. 226 | 227 | 3. To the extent possible, the Licensor waives any right to 228 | collect royalties from You for the exercise of the Licensed 229 | Rights, whether directly or through a collecting society 230 | under any voluntary or waivable statutory or compulsory 231 | licensing scheme. In all other cases the Licensor expressly 232 | reserves any right to collect such royalties, including when 233 | the Licensed Material is used other than for NonCommercial 234 | purposes. 235 | 236 | 237 | Section 3 -- License Conditions. 238 | 239 | Your exercise of the Licensed Rights is expressly made subject to the 240 | following conditions. 241 | 242 | a. Attribution. 243 | 244 | 1. If You Share the Licensed Material (including in modified 245 | form), You must: 246 | 247 | a. retain the following if it is supplied by the Licensor 248 | with the Licensed Material: 249 | 250 | i. identification of the creator(s) of the Licensed 251 | Material and any others designated to receive 252 | attribution, in any reasonable manner requested by 253 | the Licensor (including by pseudonym if 254 | designated); 255 | 256 | ii. a copyright notice; 257 | 258 | iii. a notice that refers to this Public License; 259 | 260 | iv. a notice that refers to the disclaimer of 261 | warranties; 262 | 263 | v. a URI or hyperlink to the Licensed Material to the 264 | extent reasonably practicable; 265 | 266 | b. indicate if You modified the Licensed Material and 267 | retain an indication of any previous modifications; and 268 | 269 | c. indicate the Licensed Material is licensed under this 270 | Public License, and include the text of, or the URI or 271 | hyperlink to, this Public License. 272 | 273 | 2. You may satisfy the conditions in Section 3(a)(1) in any 274 | reasonable manner based on the medium, means, and context in 275 | which You Share the Licensed Material. For example, it may be 276 | reasonable to satisfy the conditions by providing a URI or 277 | hyperlink to a resource that includes the required 278 | information. 279 | 3. If requested by the Licensor, You must remove any of the 280 | information required by Section 3(a)(1)(A) to the extent 281 | reasonably practicable. 282 | 283 | b. ShareAlike. 284 | 285 | In addition to the conditions in Section 3(a), if You Share 286 | Adapted Material You produce, the following conditions also apply. 287 | 288 | 1. The Adapter's License You apply must be a Creative Commons 289 | license with the same License Elements, this version or 290 | later, or a BY-NC-SA Compatible License. 291 | 292 | 2. You must include the text of, or the URI or hyperlink to, the 293 | Adapter's License You apply. You may satisfy this condition 294 | in any reasonable manner based on the medium, means, and 295 | context in which You Share Adapted Material. 296 | 297 | 3. You may not offer or impose any additional or different terms 298 | or conditions on, or apply any Effective Technological 299 | Measures to, Adapted Material that restrict exercise of the 300 | rights granted under the Adapter's License You apply. 301 | 302 | 303 | Section 4 -- Sui Generis Database Rights. 304 | 305 | Where the Licensed Rights include Sui Generis Database Rights that 306 | apply to Your use of the Licensed Material: 307 | 308 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 309 | to extract, reuse, reproduce, and Share all or a substantial 310 | portion of the contents of the database for NonCommercial purposes 311 | only; 312 | 313 | b. if You include all or a substantial portion of the database 314 | contents in a database in which You have Sui Generis Database 315 | Rights, then the database in which You have Sui Generis Database 316 | Rights (but not its individual contents) is Adapted Material, 317 | including for purposes of Section 3(b); and 318 | 319 | c. You must comply with the conditions in Section 3(a) if You Share 320 | all or a substantial portion of the contents of the database. 321 | 322 | For the avoidance of doubt, this Section 4 supplements and does not 323 | replace Your obligations under this Public License where the Licensed 324 | Rights include other Copyright and Similar Rights. 325 | 326 | 327 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 328 | 329 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 330 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 331 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 332 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 333 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 334 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 335 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 336 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 337 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 338 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 339 | 340 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 341 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 342 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 343 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 344 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 345 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 346 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 347 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 348 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 349 | 350 | c. The disclaimer of warranties and limitation of liability provided 351 | above shall be interpreted in a manner that, to the extent 352 | possible, most closely approximates an absolute disclaimer and 353 | waiver of all liability. 354 | 355 | 356 | Section 6 -- Term and Termination. 357 | 358 | a. This Public License applies for the term of the Copyright and 359 | Similar Rights licensed here. However, if You fail to comply with 360 | this Public License, then Your rights under this Public License 361 | terminate automatically. 362 | 363 | b. Where Your right to use the Licensed Material has terminated under 364 | Section 6(a), it reinstates: 365 | 366 | 1. automatically as of the date the violation is cured, provided 367 | it is cured within 30 days of Your discovery of the 368 | violation; or 369 | 370 | 2. upon express reinstatement by the Licensor. 371 | 372 | For the avoidance of doubt, this Section 6(b) does not affect any 373 | right the Licensor may have to seek remedies for Your violations 374 | of this Public License. 375 | 376 | c. For the avoidance of doubt, the Licensor may also offer the 377 | Licensed Material under separate terms or conditions or stop 378 | distributing the Licensed Material at any time; however, doing so 379 | will not terminate this Public License. 380 | 381 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 382 | License. 383 | 384 | 385 | Section 7 -- Other Terms and Conditions. 386 | 387 | a. The Licensor shall not be bound by any additional or different 388 | terms or conditions communicated by You unless expressly agreed. 389 | 390 | b. Any arrangements, understandings, or agreements regarding the 391 | Licensed Material not stated herein are separate from and 392 | independent of the terms and conditions of this Public License. 393 | 394 | 395 | Section 8 -- Interpretation. 396 | 397 | a. For the avoidance of doubt, this Public License does not, and 398 | shall not be interpreted to, reduce, limit, restrict, or impose 399 | conditions on any use of the Licensed Material that could lawfully 400 | be made without permission under this Public License. 401 | 402 | b. To the extent possible, if any provision of this Public License is 403 | deemed unenforceable, it shall be automatically reformed to the 404 | minimum extent necessary to make it enforceable. If the provision 405 | cannot be reformed, it shall be severed from this Public License 406 | without affecting the enforceability of the remaining terms and 407 | conditions. 408 | 409 | c. No term or condition of this Public License will be waived and no 410 | failure to comply consented to unless expressly agreed to by the 411 | Licensor. 412 | 413 | d. Nothing in this Public License constitutes or may be interpreted 414 | as a limitation upon, or waiver of, any privileges and immunities 415 | that apply to the Licensor or You, including from the legal 416 | processes of any jurisdiction or authority. 417 | 418 | ======================================================================= 419 | 420 | Creative Commons is not a party to its public 421 | licenses. Notwithstanding, Creative Commons may elect to apply one of 422 | its public licenses to material it publishes and in those instances 423 | will be considered the “Licensor.” The text of the Creative Commons 424 | public licenses is dedicated to the public domain under the CC0 Public 425 | Domain Dedication. Except for the limited purpose of indicating that 426 | material is shared under a Creative Commons public license or as 427 | otherwise permitted by the Creative Commons policies published at 428 | creativecommons.org/policies, Creative Commons does not authorize the 429 | use of the trademark "Creative Commons" or any other trademark or logo 430 | of Creative Commons without its prior written consent including, 431 | without limitation, in connection with any unauthorized modifications 432 | to any of its public licenses or any other arrangements, 433 | understandings, or agreements concerning use of licensed material. For 434 | the avoidance of doubt, this paragraph does not form part of the 435 | public licenses. 436 | 437 | Creative Commons may be contacted at creativecommons.org. 438 | -------------------------------------------------------------------------------- /20200219/leave_now/LIS-DeepLearning-master/Main_fn.m: -------------------------------------------------------------------------------- 1 | function [Rate_DL,Rate_OPT]=Main_fn(L,My,Mz,M_bar,K_DL,Pt,kbeams,Training_Size) 2 | %% Description: 3 | % 4 | % This is the function called by the main script for ploting Figure 10 5 | % in the original article mentioned below. 6 | % 7 | % version 1.0 (Last edited: 2019-05-10) 8 | % 9 | % The definitions and equations used in this code refer (mostly) to the 10 | % following publication: 11 | % 12 | % Abdelrahman Taha, Muhammad Alrabeiah, and Ahmed Alkhateeb, "Enabling 13 | % Large Intelligent Surfaces with Compressive Sensing and Deep Learning," 14 | % arXiv e-prints, p. arXiv:1904.10136, Apr 2019. 15 | % [Online]. Available: https://arxiv.org/abs/1904.10136 16 | % 17 | % The DeepMIMO dataset is adopted. 18 | % [Online]. Available: http://deepmimo.net/ 19 | % 20 | % License: This code is licensed under a Creative Commons 21 | % Attribution-NonCommercial-ShareAlike 4.0 International License. 22 | % [Online]. Available: https://creativecommons.org/licenses/by-nc-sa/4.0/ 23 | % If you in any way use this code for research that results in 24 | % publications, please cite our original article mentioned above. 25 | 26 | %% System Model Parameters 27 | 28 | params.scenario='O1_28'; % DeepMIMO Dataset scenario: http://deepmimo.net/ 29 | params.active_BS=3; % active basestation(/s) in the chosen scenario 30 | D_Lambda = 0.5; % Antenna spacing relative to the wavelength 31 | BW = 100e6; % Bandwidth in Hz (ex: 100e6 --> 100 MHz) 32 | 33 | % Transmitter (user) 34 | Ut_row = 850; % user Ut row number 35 | Ut_element = 90; % user Ut col number 36 | 37 | % Receiver(s) (quantized locations in a region) 38 | Ur_rows = [1000 1300]; % user Ur rows 39 | 40 | Validation_Size = 6200; % Validation dataset Size 41 | K = 512; % number of subcarriers 42 | miniBatchSize = 500; % Size of the minibatch for the Deep Learning 43 | 44 | % === LIS (one of the BS *locations* - considered passive here of course) === 45 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 46 | Mx = 1; % number of LIS reflecting elements across the x axis 47 | M = Mx.*My.*Mz; % Total number of LIS reflecting elements 48 | 49 | % Preallocation of output variables 50 | Rate_DL = zeros(1,length(Training_Size)); 51 | Rate_OPT = Rate_DL; 52 | LastValidationRMSE = Rate_DL; 53 | 54 | %--- Accounting SNR in each rate calculations 55 | %--- Defining Noisy channel measurements 56 | Gt=3; % dBi 57 | Gr=3; % dBi 58 | NF=5; % Noise figure at the User equipment 59 | Process_Gain=10; % Channel estimation processing gain 60 | noise_power_dB=-204+10*log10(BW/K)+NF-Process_Gain; % Noise power in dB 61 | SNR=10^(.1*(-noise_power_dB))*(10^(.1*(Gt+Gr+Pt)))^2; % Signal-to-noise ratio 62 | 63 | % channel estimation noise 64 | noise_power_bar=10^(.1*(noise_power_dB))/(10^(.1*(Gt+Gr+Pt))); 65 | 66 | No_user_pairs = (Ur_rows(2)-Ur_rows(1))*181; % Number of (Ut,Ur) user pairs 67 | % Which row is ommitted? Starting or ending 1? otherwise No_user_pairs 68 | % should be this value +1 --> Last row is not neglected .. ok .. from 69 | % Ur_rows(1) till Ur_rows(2)-1 according to the channel generation code below 70 | RandP_all = randperm(No_user_pairs).'; % Random permutation of the available dataset 71 | 72 | %% Starting the code 73 | disp('============================================================='); 74 | disp([' Calculating for M = ' num2str(M)]); 75 | Rand_M_bar_all = randperm(M); 76 | 77 | %% Beamforming Codebook (CALLING UPA_codebook_generator) 78 | disp('=============GENERATING REFLECTION MATRIX CODEBOOK==========='); 79 | % BF codebook parameters 80 | over_sampling_x=1; % The beamsteering oversampling factor in the x direction 81 | over_sampling_y=1; % The beamsteering oversampling factor in the y direction 82 | over_sampling_z=1; % The beamsteering oversampling factor in the z direction 83 | 84 | % Generating the BF codebook 85 | [BF_codebook]=sqrt(Mx*My*Mz)*... 86 | UPA_codebook_generator(Mx,My,Mz,over_sampling_x,over_sampling_y,over_sampling_z,D_Lambda); 87 | codebook_size=size(BF_codebook,2); 88 | 89 | %% DeepMIMO Dataset Generation (CALLING DeepMIMO_Generator) 90 | disp('===============GENERATING DEEPMIMO DATASET==================='); 91 | disp('-------------------------------------------------------------'); 92 | disp([' Calculating for K_DL = ' num2str(K_DL)]); 93 | % ------ Inputs to the DeepMIMO dataset generation code ------------ % 94 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 95 | params.num_ant_x= Mx; % Number of the UPA antenna array on the x-axis 96 | params.num_ant_y= My; % Number of the UPA antenna array on the y-axis 97 | params.num_ant_z= Mz; % Number of the UPA antenna array on the z-axis 98 | params.ant_spacing=D_Lambda; % ratio of the wavelnegth; for half wavelength enter .5 99 | params.bandwidth= BW*1e-9; % The bandiwdth in GHz 100 | params.num_OFDM= K; % Number of OFDM subcarriers 101 | params.OFDM_sampling_factor=1; % The constructed channels will be calculated only at the sampled subcarriers (to reduce the size of the dataset) 102 | params.OFDM_limit=K_DL*1; % Only the first params.OFDM_limit subcarriers will be considered when constructing the channels 103 | params.num_paths=L; % Maximum number of paths to be considered (a value between 1 and 25), e.g., choose 1 if you are only interested in the strongest path 104 | params.saveDataset=0; 105 | disp([' Calculating for L = ' num2str(params.num_paths)]); 106 | disp('==========Generating Transmitter-IRS Full Channel============'); 107 | % ------------------ DeepMIMO "Ut" Dataset Generation ----------------- % 108 | params.active_user_first=Ut_row; 109 | params.active_user_last=Ut_row; % Only one active user (but where is Ut_element to fully specify the user??) 110 | DeepMIMO_dataset=DeepMIMO_generator(params); % Generator function generates data for entire rows 111 | Ht = single(DeepMIMO_dataset{1}.user{Ut_element}.channel); % Selecting element of interest here 112 | clear DeepMIMO_dataset 113 | 114 | disp('===========Generating IRS-Receiver Full Channels============='); 115 | % ------------------ DeepMIMO "Ur" Dataset Generation -----------------% 116 | %Validation part for the actual achievable rate perf eval 117 | Validation_Ind = RandP_all(end-Validation_Size+1:end); 118 | [~,VI_sortind] = sort(Validation_Ind); 119 | [~,VI_rev_sortind] = sort(VI_sortind); 120 | %initialization 121 | Ur_rows_step = 100; % access the dataset 100 rows at a time 122 | Ur_rows_grid=Ur_rows(1):Ur_rows_step:Ur_rows(2); 123 | Delta_H_max = single(0); 124 | for pp = 1:1:numel(Ur_rows_grid)-1 % loop for Normalizing H 125 | clear DeepMIMO_dataset 126 | params.active_user_first=Ur_rows_grid(pp); 127 | params.active_user_last=Ur_rows_grid(pp+1)-1; 128 | [DeepMIMO_dataset,params]=DeepMIMO_generator(params); 129 | for u=1:params.num_user 130 | Hr = single(conj(DeepMIMO_dataset{1}.user{u}.channel)); % conjugated since it is now downlink 131 | Delta_H = max(max(abs(Ht.*Hr))); 132 | if Delta_H >= Delta_H_max 133 | Delta_H_max = single(Delta_H); % storing the maximum absolute value of the end-to-end product channel for later normalization 134 | end 135 | end 136 | end 137 | clear Delta_H 138 | 139 | 140 | disp('======================Sampling Channels======================'); 141 | disp([' Calculating for M_bar = ' num2str(M_bar)]); 142 | Rand_M_bar =unique(Rand_M_bar_all(1:M_bar)); 143 | Ht_bar = reshape(Ht(Rand_M_bar,:),M_bar*K_DL,1); 144 | 145 | 146 | DL_input = single(zeros(M_bar*K_DL*2,No_user_pairs)); 147 | DL_output = single(zeros(No_user_pairs,codebook_size)); 148 | DL_output_un= single(zeros(numel(Validation_Ind),codebook_size)); 149 | 150 | Delta_H_bar_max = single(0); 151 | count=0; 152 | for pp = 1:1:numel(Ur_rows_grid)-1 153 | clear DeepMIMO_dataset 154 | disp(['Starting received user access ' num2str(pp)]); 155 | params.active_user_first=Ur_rows_grid(pp); 156 | params.active_user_last=Ur_rows_grid(pp+1)-1; 157 | [DeepMIMO_dataset,params]=DeepMIMO_generator(params); % Why generating again and not sampling from the beginning? 158 | 159 | 160 | %% Construct Deep Learning inputs 161 | u_step=100; 162 | Htx=repmat(Ht(:,1),1,u_step); 163 | Hrx=zeros(M,u_step); 164 | for u=1:u_step:params.num_user 165 | for uu=1:1:u_step 166 | Hr = single(conj(DeepMIMO_dataset{1}.user{u+uu-1}.channel)); 167 | Hr_bar = reshape(Hr(Rand_M_bar,:),M_bar*K_DL,1); 168 | %--- Constructing the sampled channel 169 | n1=sqrt(noise_power_bar/2)*(randn(M_bar*K_DL,1)+1j*randn(M_bar*K_DL,1)); 170 | n2=sqrt(noise_power_bar/2)*(randn(M_bar*K_DL,1)+1j*randn(M_bar*K_DL,1)); 171 | H_bar = ((Ht_bar+n1).*(Hr_bar+n2)); 172 | DL_input(:,u+uu-1+((pp-1)*params.num_user))= reshape([real(H_bar) imag(H_bar)].',[],1); 173 | Delta_H_bar = max(max(abs(H_bar))); 174 | if Delta_H_bar >= Delta_H_bar_max 175 | Delta_H_bar_max = single(Delta_H_bar); 176 | end 177 | Hrx(:,uu)=Hr(:,1); 178 | end 179 | %--- Actual achievable rate for performance evaluation 180 | H = Htx.*Hrx; 181 | H_BF=H.'*BF_codebook; 182 | SNR_sqrt_var = abs(H_BF); 183 | for uu=1:1:u_step 184 | if sum((Validation_Ind == u+uu-1+((pp-1)*params.num_user))) 185 | count=count+1; 186 | DL_output_un(count,:) = single(sum(log2(1+(SNR*((SNR_sqrt_var(uu,:)).^2))),1)); 187 | end 188 | end 189 | %--- Label for the sampled channel 190 | R = single(log2(1+(SNR_sqrt_var/Delta_H_max).^2)); 191 | % --- DL output normalization 192 | Delta_Out_max = max(R,[],2); 193 | if ~sum(Delta_Out_max == 0) 194 | Rn=diag(1./Delta_Out_max)*R; 195 | end 196 | DL_output(u+((pp-1)*params.num_user):u+((pp-1)*params.num_user)+u_step-1,:) = 1*Rn; %%%%% Normalized %%%%% 197 | end 198 | end 199 | clear u Delta_H_bar R Rn 200 | %-- Sorting back the DL_output_un 201 | DL_output_un = DL_output_un(VI_rev_sortind,:); 202 | %--- DL input normalization 203 | DL_input= 1*(DL_input/Delta_H_bar_max); %%%%% Normalized from -1->1 %%%%% 204 | 205 | %% DL Beamforming (Using Deep Learning) 206 | disp('======================DL BEAMFORMING========================='); 207 | 208 | % ------------------ Training and Testing Datasets -----------------% 209 | DL_output_reshaped = reshape(DL_output.',1,1,size(DL_output,2),size(DL_output,1)); 210 | DL_output_reshaped_un = reshape(DL_output_un.',1,1,size(DL_output_un,2),size(DL_output_un,1)); 211 | DL_input_reshaped= reshape(DL_input,size(DL_input,1),1,1,size(DL_input,2)); 212 | for dd=1:1:numel(Training_Size) 213 | disp([' Calculating for Dataset Size = ' num2str(Training_Size(dd))]); 214 | Training_Ind = RandP_all(1:Training_Size(dd)); 215 | 216 | XTrain = single(DL_input_reshaped(:,1,1,Training_Ind)); 217 | YTrain = single(DL_output_reshaped(1,1,:,Training_Ind)); 218 | XValidation = single(DL_input_reshaped(:,1,1,Validation_Ind)); 219 | YValidation = single(DL_output_reshaped(1,1,:,Validation_Ind)); 220 | YValidation_un = single(DL_output_reshaped_un); 221 | 222 | % ------------------ DL Model definition -----------------% 223 | layers = [ 224 | % INPUT Layer 225 | imageInputLayer([size(XTrain,1),1,1],'Name','input') 226 | 227 | % Fully Connected Layer 1 with Dropout 228 | fullyConnectedLayer(size(YTrain,3),'Name','Fully1') 229 | reluLayer('Name','relu1') 230 | dropoutLayer(0.5,'Name','dropout1') 231 | 232 | % Fully Connected Layer 2 with Dropout 233 | fullyConnectedLayer(4*size(YTrain,3),'Name','Fully2') 234 | reluLayer('Name','relu2') 235 | dropoutLayer(0.5,'Name','dropout2') 236 | 237 | % Fully Connected Layer 3 with Dropout 238 | fullyConnectedLayer(4*size(YTrain,3),'Name','Fully3') 239 | reluLayer('Name','relu3') 240 | dropoutLayer(0.5,'Name','dropout3') 241 | 242 | % OUTPUT Layer 243 | fullyConnectedLayer(size(YTrain,3),'Name','Fully4') 244 | regressionLayer('Name','outReg')]; 245 | 246 | if Training_Size(dd) < miniBatchSize 247 | validationFrequency = Training_Size(dd); 248 | else 249 | validationFrequency = floor(Training_Size(dd)/miniBatchSize); 250 | end 251 | 252 | VerboseFrequency = validationFrequency; 253 | options = trainingOptions('sgdm', ... 254 | 'MiniBatchSize',miniBatchSize, ... 255 | 'MaxEpochs',20, ... 256 | 'InitialLearnRate',1e-1, ... 257 | 'LearnRateSchedule','piecewise', ... 258 | 'LearnRateDropFactor',0.5, ... 259 | 'LearnRateDropPeriod',3, ... 260 | 'L2Regularization',1e-4,... 261 | 'Shuffle','every-epoch', ... 262 | 'ValidationData',{XValidation,YValidation}, ... 263 | 'ValidationFrequency',validationFrequency, ... 264 | 'Plots','none', ... % 'training-progress' 265 | 'Verbose',0, ... % 1 266 | 'ExecutionEnvironment', 'cpu', ... 267 | 'VerboseFrequency',VerboseFrequency); 268 | 269 | % ------------- DL Model Training and Prediction -----------------% 270 | [~,Indmax_OPT]= max(YValidation,[],3); 271 | Indmax_OPT = squeeze(Indmax_OPT); %Upper bound on achievable rates 272 | MaxR_OPT = single(zeros(numel(Indmax_OPT),1)); 273 | [trainedNet,traininfo] = trainNetwork(XTrain,YTrain,layers,options); 274 | YPredicted = predict(trainedNet,XValidation); 275 | 276 | % --------------------- Achievable Rate --------------------------% 277 | [~,Indmax_DL] = maxk(YPredicted,kbeams,2); 278 | MaxR_DL = single(zeros(size(Indmax_DL,1),1)); %True achievable rates 279 | for b=1:size(Indmax_DL,1) 280 | MaxR_DL(b) = max(squeeze(YValidation_un(1,1,Indmax_DL(b,:),b))); 281 | MaxR_OPT(b) = squeeze(YValidation_un(1,1,Indmax_OPT(b),b)); 282 | end 283 | Rate_OPT(dd) = mean(MaxR_OPT); 284 | Rate_DL(dd) = mean(MaxR_DL); 285 | LastValidationRMSE(dd) = traininfo.ValidationRMSE(end); 286 | clear trainedNet traininfo YPredicted 287 | clear layers options Rate_DL_Temp MaxR_DL_Temp Highest_Rate 288 | end 289 | end 290 | -------------------------------------------------------------------------------- /20200219/leave_now/LIS-DeepLearning-master/README.md: -------------------------------------------------------------------------------- 1 | # Enabling Large Intelligent Surfaces with Compressive Sensing and Deep Learning 2 | This is a MATLAB code package related to the following article: 3 | Abdelrahman Taha, Muhammad Alrabeiah, and Ahmed Alkhateeb, “[Enabling Large Intelligent Surfaces with Compressive Sensing and Deep Learning](https://arxiv.org/abs/1904.10136),” arXiv e-prints, p. arXiv:1904.10136, Apr 2019. 4 | # Abstract of the Article 5 | Employing large intelligent surfaces (LISs) is a promising solution for improving the coverage and rate of future wireless systems. These surfaces comprise a massive number of nearly-passive elements that interact with the incident signals, for example by reflecting them, in a smart way that improves the wireless system performance. Prior work focused on the design of the LIS reflection matrices assuming full knowledge of the channels. Estimating these channels at the LIS, however, is a key challenging problem, and is associated with large training overhead given the massive number of LIS elements. This paper proposes efficient solutions for these problems by leveraging tools from compressive sensing and deep learning. First, a novel LIS architecture based on sparse channel sensors is proposed. In this architecture, all the LIS elements are passive except for a few elements that are active (connected to the baseband of the LIS controller). We then develop two solutions that design the LIS reflection matrices with negligible training overhead. In the first approach, we leverage compressive sensing tools to construct the channels at all the LIS elements from the channels seen only at the active elements. These full channels can then be used to design the LIS reflection matrices with no training overhead. In the second approach, we develop a deep learning based solution where the LIS learns how to optimally interact with the incident signal given the channels at the active elements, which represent the current state of the environment and transmitter/receiver locations. We show that the achievable rates of the proposed compressive sensing and deep learning solutions approach the upper bound, that assumes perfect channel knowledge, with negligible training overhead and with less than 1% of the elements being active. 6 | # Code Package Content 7 | The main script for generating Figure 10 as shown below, illustrated in the original article, is named `Fig10_generator.m`. 8 | One additional MATLAB function named `Main_fn.m` is called by the main script. Another additional MATLAB function named `UPA_codebook_generator.m` is called by the function `Main_fn.m`. 9 | ![Figure10](https://github.com/Abdelrahman-Taha/LIS-DeepLearning/blob/master/Figure10.png) 10 | The script adopts the publicly available parameterized [DeepMIMO dataset](http://deepmimo.net/ray_tracing.html?i=1) published for deep learning applications in mmWave and massive MIMO systems. The 'O1_28' scenario is adopted for this figure. 11 | 12 | **To reproduce the results, please follow these steps:** 13 | 1. Download all the files of this project and add them to the "DeepMIMO_Dataset_Generation" folder. 14 | (Note that both the DeepMIMO dataset generation files and the source data of the 'O1_28' scenario are available on [this link](https://github.com/DeepMIMO/DeepMIMO-codes)). 15 | 2. Run the file named `Fig10_generator.m` in MATLAB and the script will sequentially execute the following tasks: 16 | 1. Generate the inputs and outputs of the deep learning model. 17 | 2. Build, train, and test the deep learning model. 18 | 3. Process the deep learning outputs and generate the performance results. 19 | 20 | If you have any questions regarding the code and used dataset, please contact [Abdelrahman Taha](https://sites.google.com/view/abdelrahmantaha). 21 | 22 | # License and Referencing 23 | This code package is licensed under a [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-nc-sa/4.0/). If you in any way use this code for research that results in publications, please cite our original article: 24 | > A. Taha, M. Alrabeiah, and A. Alkhateeb, “[Enabling Large Intelligent Surfaces with Compressive Sensing and Deep Learning](https://arxiv.org/abs/1904.10136),” arXiv e-prints, p. arXiv:1904.10136, Apr 2019. 25 | -------------------------------------------------------------------------------- /20200219/leave_now/UPA_codebook_generator.m: -------------------------------------------------------------------------------- 1 | function [F_CB,all_beams]=UPA_codebook_generator(Mx,My,Mz,over_sampling_x,over_sampling_y,over_sampling_z,ant_spacing) 2 | 3 | % Code by A. Alkhateeb et al. 4 | % Commented by M. G. Khafagy 5 | 6 | % INPUT: 7 | % 1) Number of antennas in (x,y,z) dimensions, 8 | % 2) Oversampling in (x,y,z), 9 | % 3) antenna spacing (to calculate the constant kd). 10 | 11 | % OUTPUT: 12 | % 1) F_CB: 3D Codebook 13 | % 2) all_beams: 3D Indices 14 | 15 | % Constant 16 | kd=2*pi*ant_spacing; 17 | 18 | % Index Vectors 19 | antx_index=0:1:Mx-1; 20 | anty_index=0:1:My-1; 21 | antz_index=0:1:Mz-1; 22 | 23 | % M=Mx*My*Mz; % Total number of antennas in the UPA 24 | 25 | % Defining the RF beamforming codebook 26 | codebook_size_x=over_sampling_x*Mx; 27 | codebook_size_y=over_sampling_y*My; 28 | codebook_size_z=over_sampling_z*Mz; 29 | 30 | % ============= X Axis ============= 31 | theta_qx=0:pi/codebook_size_x:pi-1e-6; % quantized beamsteering angles 32 | % Why not theta_qx=0:pi/codebook_size_x:pi-pi/codebook_size_x 33 | % It assumes pi/codebook_size_x will be always greater than 1e-6 .. ok 34 | F_CBx=zeros(Mx,codebook_size_x); 35 | for i=1:1:length(theta_qx) % For each beamsteering angle in the x directon 36 | F_CBx(:,i)=sqrt(1/Mx)*exp(-1j*kd*antx_index'*cos(theta_qx(i))); % calculate the reflection vector in x direction 37 | end 38 | % ============= Y Axis ============= 39 | range_y=(20+307)*pi/180; 40 | theta_qy=20*pi/180:-range_y/codebook_size_y:-307*pi/180+1e-6; % quantized beamsteering angles 41 | F_CBy=zeros(My,codebook_size_y); 42 | for i=1:1:length(theta_qy) % For each beamsteering angle in the y directon 43 | F_CBy(:,i)=sqrt(1/My)*exp(-1j*anty_index'*theta_qy(i)); % calculate the reflection vector in y direction 44 | % ############################################################## 45 | % ###################### WHY NO kd HERE ######################## 46 | % ################### DIFFERENT CALCULATION #################### 47 | end 48 | % ============= Z Axis ============= 49 | theta_qz=0:pi/codebook_size_z:pi-1e-6; % quantized beamsteering angles 50 | F_CBz=zeros(Mz,codebook_size_z); 51 | for i=1:1:length(theta_qz) % For each beamsteering angle in the z directon 52 | F_CBz(:,i)=sqrt(1/Mz)*exp(-1j*kd*antz_index'*cos(theta_qz(i))); % calculate the reflection vector in z direction 53 | end 54 | 55 | % ============= 3D codebook ============= 56 | F_CBxy=kron(F_CBy,F_CBx); 57 | F_CB=kron(F_CBz,F_CBxy); 58 | 59 | 60 | % ============= 3D Indices ============= 61 | beams_x=1:1:codebook_size_x; 62 | beams_y=1:1:codebook_size_y; 63 | beams_z=1:1:codebook_size_z; 64 | 65 | Mxx_Ind=repmat(beams_x,1,codebook_size_y*codebook_size_z)'; 66 | Myy_Ind=repmat(reshape(repmat(beams_y,codebook_size_x,1),1,codebook_size_x*codebook_size_y),1,codebook_size_z)'; 67 | Mzz_Ind=reshape(repmat(beams_z,codebook_size_x*codebook_size_y,1),1,codebook_size_x*codebook_size_y*codebook_size_z)'; 68 | 69 | Tx=cat(3,Mxx_Ind',Myy_Ind',Mzz_Ind'); 70 | all_beams=reshape(Tx,[],3); 71 | end 72 | -------------------------------------------------------------------------------- /20200219/read_raytracing.m: -------------------------------------------------------------------------------- 1 | % --------- DeepMIMO: A Generic Dataset for mmWave and massive MIMO ------% 2 | % Author: Ahmed Alkhateeb 3 | % Date: Sept. 5, 2018 4 | % Goal: Encouraging research on ML/DL for mmWave MIMO applications and 5 | % providing a benchmarking tool for the developed algorithms 6 | % ---------------------------------------------------------------------- % 7 | 8 | % This function reads the scenario dataset and reorganizes it from its compact representation 9 | function [channel_params]=read_raytracing(filename_DoD,filename_CIR,filename_Loc,num_paths,user_first,user_last) 10 | 11 | DoD_array=importdata(filename_DoD); 12 | CIR_array=importdata(filename_CIR); 13 | Loc_array=importdata(filename_Loc); 14 | 15 | total_num_users=DoD_array(1); 16 | pointer=0; 17 | 18 | DoD_array(1)=[]; 19 | CIR_array(1)=[]; 20 | 21 | channel_params_all=struct('DoD_phi',[],'DoD_theta',[],'phase',[],'ToA',[],'power',[],'num_paths',[],'loc',[]); 22 | 23 | % Generates for all users then selects the required subset of users 24 | % Inefficient.. 25 | for Receiver_Number=1:total_num_users 26 | max_paths=DoD_array(pointer+2); 27 | num_path_limited=min(num_paths,max_paths); 28 | if (max_paths>0) 29 | Relevant_data_length=max_paths*4; 30 | Relevant_limited_data_length=num_path_limited*4; 31 | 32 | Relevant_DoD_array=DoD_array(pointer+3:pointer+2+Relevant_data_length); 33 | Relevant_CIR_array=CIR_array(pointer+3:pointer+2+Relevant_data_length); 34 | 35 | channel_params_all(Receiver_Number).DoD_phi=Relevant_DoD_array(2:4:Relevant_limited_data_length); % Departure Azimuth 36 | channel_params_all(Receiver_Number).DoD_theta=Relevant_DoD_array(3:4:Relevant_limited_data_length); % Departure Elevation 37 | channel_params_all(Receiver_Number).phase=Relevant_CIR_array(2:4:Relevant_limited_data_length); % Path Phase 38 | channel_params_all(Receiver_Number).ToA=Relevant_CIR_array(3:4:Relevant_limited_data_length); % Path Delay 39 | channel_params_all(Receiver_Number).power=1e-3*(10.^(.1*(30+Relevant_CIR_array(4:4:Relevant_limited_data_length)))); % Path Power 40 | channel_params_all(Receiver_Number).num_paths=num_path_limited; 41 | channel_params_all(Receiver_Number).loc=Loc_array(Receiver_Number,2:4); % Receiver Location 42 | else % If no paths exist 43 | channel_params_all(Receiver_Number).DoD_phi=[]; 44 | channel_params_all(Receiver_Number).DoD_theta=[]; 45 | channel_params_all(Receiver_Number).phase=[]; 46 | channel_params_all(Receiver_Number).ToA=[]; 47 | channel_params_all(Receiver_Number).power=[]; 48 | channel_params_all(Receiver_Number).num_paths=0; 49 | channel_params_all(Receiver_Number).loc=Loc_array(Receiver_Number,2:4); 50 | end 51 | pointer=pointer+max_paths*4+2; %update pointer value and continue 52 | end 53 | 54 | channel_params=channel_params_all(1,user_first:user_last); % Selects the required users 55 | 56 | end -------------------------------------------------------------------------------- /20200219/sinr_CONSTRAINT.m: -------------------------------------------------------------------------------- 1 | function [desired_fun, interference_fun, constr_fun] = sinr_CONSTRAINT(V, b, R, SINR_target, sigma_2, all_users, int_users_matrix) 2 | for k = all_users % looping over all users 3 | int_users = int_users_matrix(k,:); % interfering users 4 | desired_fun(k) = trace(real(R{k,k}*V)) + square_abs(b{k,k}); 5 | interference_fun(k) = 0; 6 | for m = int_users 7 | interference_fun(k) = interference_fun(k) + trace(real(R{k,m}*V)) + square_abs(b{k,m}); 8 | end 9 | constr_fun(k) = desired_fun(k) - SINR_target * (interference_fun(k) + sigma_2); 10 | end 11 | end -------------------------------------------------------------------------------- /DeepMIMO_generator.m: -------------------------------------------------------------------------------- 1 | % --------- DeepMIMO: A Generic Dataset for mmWave and massive MIMO ------% 2 | % Author: Ahmed Alkhateeb 3 | % Date: Sept. 5, 2018 4 | % Goal: Encouraging research on ML/DL for mmWave MIMO applications and 5 | % providing a benchmarking tool for the developed algorithms 6 | % ---------------------------------------------------------------------- % 7 | function [DeepMIMO_dataset,params]=DeepMIMO_generator(params,deepmimo_root_path) 8 | 9 | % -------------------------- DeepMIMO Dataset Generation -----------------% 10 | fprintf(' DeepMIMO Dataset Generation started \n') 11 | % Read scenario parameters 12 | load(strcat(deepmimo_root_path,'/RayTracing Scenarios/',params.scenario,'/',params.scenario,'.params.mat')) 13 | params.num_BS=num_BS; 14 | num_rows=max(min(user_grids(:,2),params.active_user_last)-max(user_grids(:,1),params.active_user_first)+1,0); 15 | params.num_user=sum(num_rows.*user_grids(:,3)); % total number of users 16 | 17 | current_grid=min(find(max(params.active_user_first,user_grids(:,2))==user_grids(:,2))); 18 | user_first=sum((max(min(params.active_user_first,user_grids(:,2))-user_grids(:,1)+1,0)).*user_grids(:,3))-user_grids(current_grid,3)+1; 19 | user_last=user_first+params.num_user-1; 20 | 21 | BW=params.bandwidth*1e9; % Bandwidth in Hz 22 | 23 | % Reading ray tracing data 24 | fprintf(' Reading the channel parameters of the ray-tracing scenario %s', params.scenario) 25 | count_done=0; 26 | reverseStr=0; 27 | percentDone = 100 * count_done / length(params.active_BS); 28 | msg = sprintf('- Percent done: %3.1f', percentDone); %Don't forget this semicolon 29 | fprintf([reverseStr, msg]); 30 | reverseStr = repmat(sprintf('\b'), 1, length(msg)); 31 | 32 | for t=1:1:params.num_BS 33 | if sum(t == params.active_BS) ==1 34 | filename_DoD=strcat(deepmimo_root_path,'/RayTracing Scenarios/',params.scenario,'/',params.scenario,'.',int2str(t),'.DoD.mat'); 35 | filename_CIR=strcat(deepmimo_root_path,'/RayTracing Scenarios/',params.scenario,'/',params.scenario,'.',int2str(t),'.CIR.mat'); 36 | filename_Loc=strcat(deepmimo_root_path,'/RayTracing Scenarios/',params.scenario,'/',params.scenario,'.Loc.mat'); 37 | [TX{t}.channel_params]=read_raytracing(filename_DoD,filename_CIR,filename_Loc,params.num_paths,user_first,user_last); 38 | 39 | count_done=count_done+1; 40 | percentDone = 100 * count_done / length(params.active_BS); 41 | msg = sprintf('- Percent done: %3.1f', percentDone); %Don't forget this semicolon 42 | fprintf([reverseStr, msg]); 43 | reverseStr = repmat(sprintf('\b'), 1, length(msg)); 44 | end 45 | end 46 | 47 | % Constructing the channel matrices 48 | TX_count=0; 49 | for t=1:1:params.num_BS 50 | if sum(t == params.active_BS) ==1 51 | fprintf('\n Constructing the DeepMIMO Dataset for BS %d', t) 52 | reverseStr=0; 53 | percentDone = 0; 54 | msg = sprintf('- Percent done: %3.1f', percentDone); %Don't forget this semicolon 55 | fprintf([reverseStr, msg]); 56 | reverseStr = repmat(sprintf('\b'), 1, length(msg)); 57 | TX_count=TX_count+1; 58 | for user=1:1:params.num_user 59 | [DeepMIMO_dataset{TX_count}.user{user}.channel]=construct_DeepMIMO_channel(TX{t}.channel_params(user),params.num_ant_x,params.num_ant_y,params.num_ant_z, ... 60 | BW,params.num_OFDM,params.OFDM_sampling_factor,params.OFDM_limit,params.ant_spacing); 61 | DeepMIMO_dataset{TX_count}.user{user}.loc=TX{t}.channel_params(user).loc; 62 | 63 | percentDone = 100* round(user / params.num_user,2); 64 | msg = sprintf('- Percent done: %3.1f', round(percentDone,2)); %Don't forget this semicolon 65 | fprintf([reverseStr, msg]); 66 | reverseStr = repmat(sprintf('\b'), 1, length(msg)); 67 | end 68 | end 69 | end 70 | 71 | if params.saveDataset==1 72 | sfile_DeepMIMO=strcat(deepmimo_root_path,'/DeepMIMO Dataset/DeepMIMO_dataset.mat'); 73 | save(sfile_DeepMIMO,'DeepMIMO_dataset','-v7.3'); 74 | end 75 | 76 | fprintf('\n DeepMIMO Dataset Generation completed \n') -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DeepIRS 2 | A repository for the DeepIRS project codes 3 | 4 | main_deepIRS.m is the main file (standing for deep intelligent reflecting surfaces) 5 | 6 | You can run it in MATLAB in a section-by-section fashion. 7 | 8 | It calls several other functions/scripts, some of them are developed by me, and others are borrowed from the code by Al-Khateeb et. al. when generating channels from his DeepMIMO dataset. 9 | 10 | Channels are currently generated simply as circularly-symmetric complex Gaussian to have the direct link as well. Still awaiting the updated dataset from Al-Khateeb to use his ray-traced data. 11 | -------------------------------------------------------------------------------- /alternating_optimization.m: -------------------------------------------------------------------------------- 1 | %disp('Running alternating optimization algorithm') 2 | % r=1; % iteration index 3 | % Initialize reflection matrix theta 4 | beta_vec = ones(M,1); % Fixed to 1 for now as in the paper 5 | theta_vec = 2*pi*rand(M,1); % Uniformly randomized from 0 to 2*pi 6 | theta_mat= diag(beta_vec.*exp(1i*theta_vec)); 7 | 8 | H = Ht'*(theta_mat')*Hr + Hd; 9 | 10 | % Check rank criterion for feasbility of the initial theta choice 11 | while ~(rank(H) == N_users) % if infeasible choice, randomize and check again 12 | %disp('infeasible initial choice of theta, .. reselecting ..') 13 | theta_vec = 2*pi*rand(M,1); % Uniformly randomized from 0 to 2*pi 14 | theta_mat= diag(beta_vec.*exp(1i*theta_vec)); 15 | H = Ht'*(theta_mat')*Hr + Hd; 16 | end 17 | 18 | cvx_status = 'nothing'; % initialize 19 | 20 | while (frac_error > eps_iter) && ~contains(cvx_status,'Infeasible','IgnoreCase',true) 21 | % if mod(r,1e2)==0 22 | % %disp(['Iteration r =' num2str(r)]) 23 | % end 24 | 25 | H = Ht'*(theta_mat')*Hr + Hd; 26 | 27 | % ==== Optimize W while fixing theta ==== BS Transmit Beamforming 28 | %disp('Active Beamformer Design') 29 | 30 | [W, tau, INTERF, cvx_status, cvx_optval] = iter_opt_prob_1(H,sigma_2,SINR_target,int_users_matrix); 31 | 32 | if cvx_optval==Inf 33 | %disp('Infeasible .. passing this iteration') 34 | continue 35 | end 36 | %disp(['CVX Status: ' cvx_status ', CVX_optval = ' num2str(10*log10(cvx_optval*1000)) ' dBm']) 37 | %disp(['CVX Status: ' cvx_status ', CVX_optval = ' num2str(10*log10(trace(W'*W)*1000)) ' dBm']) 38 | 39 | frac_error = abs(obj_last - cvx_optval)/obj_last *100; 40 | obj_last = cvx_optval; 41 | 42 | achieved_SINR = zeros(1,N_users); 43 | % Actual achieved SINR 44 | for k = all_users 45 | achieved_SINR(k) = (norm((H(:,k)')*W(:,k)))^2/(norm(INTERF(:,k)))^2; 46 | end 47 | 48 | 49 | % ==== Optimize theta while fixing W ==== IRS Reflection Matrix 50 | % (P4') in paper 51 | %disp('Passive Beamformer Design') 52 | 53 | [V, a_aux, a, b, R, desired, interference, SINR_CONSTR, cvx_status, cvx_optval] = iter_opt_prob_2(W, Ht,Hr,Hd,sigma_2,SINR_target,int_users_matrix); 54 | 55 | %disp(['CVX Status: ' cvx_status]) 56 | 57 | if ~contains(cvx_status,'Infeasible','IgnoreCase',true) 58 | %disp('Running Gaussian Randomization') 59 | [U,D] = eig(full(V)); % Eigenvalue Decomposition 60 | if rank(full(V)) == 1 61 | v_bar = U*sqrt(D); 62 | theta_vec = angle(v_bar(1:M)/v_bar(M+1)); 63 | v = exp(-1i*theta_vec); 64 | theta_mat = diag(v); 65 | 66 | else % Apply Gaussian Randomization 67 | 68 | num_rands = 1e3; % number of randomizations 69 | 70 | % Generate Gaussian random vector ~ CN(0, I) 71 | %gpudev = gpuDevice(); 72 | %reset(gpudev); 73 | r_vec_matrix = (1/sqrt(2))*((mvnrnd(zeros(M+1,1),eye(M+1),num_rands) + 1i * mvnrnd(zeros(M+1,1),eye(M+1), num_rands)).'); %gpuArray() 74 | v_bar_matrix = U*sqrt(D)*r_vec_matrix; 75 | 76 | best_index = 0; 77 | best_value = -1e8; 78 | %v_bar_matrix = exp(1i*2*pi*rand(M+1,num_rands)); 79 | 80 | for randmzn_index = 1:num_rands 81 | v_bar_vec = v_bar_matrix(:,randmzn_index); 82 | V_rand = v_bar_vec*(v_bar_vec'); 83 | 84 | [~, ~, constr_value] = sinr_CONSTRAINT(V_rand, b, R, SINR_target, sigma_2, all_users, int_users_matrix); 85 | 86 | % Check feasibility and best value 87 | feasibility_check = prod( constr_value >= 0 ); 88 | better_value_check = (sum(constr_value) > best_value); 89 | if feasibility_check && better_value_check 90 | best_index = randmzn_index; 91 | best_value = sum(constr_value); 92 | end 93 | end 94 | 95 | if best_index ~= 0 96 | % select best v_bar that maximizes SINR_CONSTR 97 | v_bar = v_bar_matrix(:,best_index); 98 | theta_vec = angle(v_bar(1:M)/v_bar(M+1)); 99 | v = exp(-1i*theta_vec); 100 | theta_mat = diag(v); 101 | else 102 | cvx_status = 'Infeasible'; 103 | end 104 | 105 | %disp(['CVX Status after randomization: ' cvx_status]) 106 | end 107 | end 108 | 109 | % % Increment iteration index 110 | % r = r+1; 111 | end -------------------------------------------------------------------------------- /construct_DeepMIMO_channel.m: -------------------------------------------------------------------------------- 1 | % --------- DeepMIMO: A Generic Dataset for mmWave and massive MIMO ------% 2 | % Author: Ahmed Alkhateeb 3 | % Date: Sept. 5, 2018 4 | % Goal: Encouraging research on ML/DL for mmWave MIMO applications and 5 | % providing a benchmarking tool for the developed algorithms 6 | % ---------------------------------------------------------------------- % 7 | function [channel]=construct_DeepMIMO_channel(params,num_ant_x,num_ant_y,num_ant_z,BW,... 8 | ofdm_num_subcarriers,output_subcarrier_downsampling_factor,output_subcarrier_limit,antenna_spacing_wavelength_ratio) 9 | 10 | kd=2*pi*antenna_spacing_wavelength_ratio; 11 | ang_conv=pi/180; 12 | Ts=1/BW; 13 | 14 | Mx_Ind=0:1:num_ant_x-1; 15 | My_Ind=0:1:num_ant_y-1; 16 | Mz_Ind=0:1:num_ant_z-1; 17 | Mxx_Ind=repmat(Mx_Ind,1,num_ant_y*num_ant_z)'; 18 | Myy_Ind=repmat(reshape(repmat(My_Ind,num_ant_x,1),1,num_ant_x*num_ant_y),1,num_ant_z)'; 19 | Mzz_Ind=reshape(repmat(Mz_Ind,num_ant_x*num_ant_y,1),1,num_ant_x*num_ant_y*num_ant_z)'; 20 | M=num_ant_x*num_ant_y*num_ant_z; 21 | 22 | k=0:output_subcarrier_downsampling_factor:output_subcarrier_limit-1; 23 | num_sampled_subcarriers=length(k); 24 | channel=zeros(M,num_sampled_subcarriers); 25 | 26 | for l=1:1:params.num_paths 27 | gamma_x=1j*kd*sin(params.DoD_theta(l)*ang_conv)*cos(params.DoD_phi(l)*ang_conv); 28 | gamma_y=1j*kd*sin(params.DoD_theta(l)*ang_conv)*sin(params.DoD_phi(l)*ang_conv); 29 | gamma_z=1j*kd*cos(params.DoD_theta(l)*ang_conv); 30 | gamma_comb=Mxx_Ind*gamma_x+Myy_Ind*gamma_y + Mzz_Ind*gamma_z; 31 | array_response=exp(gamma_comb); 32 | delay_normalized=params.ToA(l)/Ts; 33 | channel=channel+array_response*sqrt(params.power(l)/ofdm_num_subcarriers)*exp(1j*params.phase(l)*ang_conv)*exp(-1j*2*pi*(k/ofdm_num_subcarriers)*delay_normalized); 34 | end 35 | 36 | end -------------------------------------------------------------------------------- /drl_IRS.m: -------------------------------------------------------------------------------- 1 | %% Script Description and Credits 2 | % This script implements the algorithm proposed in the paper preprint: 3 | % Chongwen Huang, Ronghong Mo and Chau Yuen, "Reconfigurable Intelligent 4 | % Surface Assisted Multiuser MISO Systems Exploiting Deep Reinforcement 5 | % Learning" currently available on ARXIV: https://arxiv.org/abs/2002.10072 6 | % Accepted by IEEE JSAC special issue on Multiple Antenna Technologies for Beyond 5G 7 | 8 | % The actor network state is the stacked vectorized form of all channels in 9 | % addition to previous action, transmit/received powers as will be specified shortly. 10 | 11 | % The actor network action is a stacked form of the active/passive beamforming matrices. 12 | 13 | % The reward is the resulting sum throughput. 14 | 15 | % Both states and actions are continuous here, thus using DDPG learning agent. 16 | 17 | % Code written by Mohammad Galal Khafagy 18 | % Postdoctoral Researcher, American University in Cairo (AUC) 19 | % March 2020 20 | 21 | % This paper does not take the direct link into account, but we can still 22 | % take it here if we have it in the dataset 23 | % Channel model is a narrow-band model (frequency-flat channel fading), so 24 | % it can be applied to an OFDM subcarrier for example 25 | 26 | % This code is using the Reinforcement Learning Toolbox of MATLAB 27 | disp('---------- Running DDPG --------') 28 | 29 | % Environment and Agent are first created, then trained over the channels. 30 | 31 | %% Simulation Parameters (in Table 1 in the paper) 32 | % close ALL FORCE 33 | % For more info about DDPG agent in MATLAB, see: 34 | % https://www.mathworks.com/help/reinforcement-learning/ug/ddpg-agents.html#mw_086ee5c6-c185-4597-aefc-376207c6c24c 35 | % For other supported Reinforcement Learning agents see: 36 | % https://www.mathworks.com/help/reinforcement-learning/ug/create-agents-for-reinforcement-learning.html 37 | 38 | % ---------- For Separate ACTOR/CRITIC AGENTS ----------------- 39 | used_optmzr = 'adam'; % Used optimizer 40 | used_device = 'cpu'; % gpu or cpu 41 | % Learning and Decay rates 42 | % Actor 43 | u_a = 1e-1; % learning rate for training actor network update 44 | lam_a= 1e-5; % decaying rate for training actor network update 45 | % Target Actor 46 | t_a = 1e-1; % learning rate for target actor network update 47 | % Critic 48 | u_c = 1e-1; % learning rate for training critic network update 49 | lam_c= 1e-5; % decaying rate for training critic network update 50 | % Target Critic 51 | t_c = 1e-1; % learning rate for target critic network update 52 | 53 | % ------------- Created DDPG AGENT Options ------------------- 54 | D = 2e5; % Length of replay experience memory window 55 | W_exp = 16; % Number of experiences in the mini-batch 56 | gam = 0.000001; % Discount factor 57 | U = 1; % Number of steps synchronizing target with training network 58 | 59 | % ------------- For DDPG AGENT Training ---------------------- 60 | %N_epis = 5e2; % Number of episodes (changed due to DUPLICATE NAME) 61 | %T = 2e4; % Number of steps per episode 62 | 63 | %% Memory Preallocation 64 | % N_users = size(Hr,2); 65 | % M = size(Ht,1); 66 | % N_BS = size(Ht,2); 67 | 68 | % Channel Observations 69 | chan_obs_len = 2*(M * N_users + M * N_BS + N_BS* N_users); % channel observation (state) length (multiplied by 2 to account for complex nature) 70 | % Action length (number of reflecting elements + size of BS beamforming matrix) 71 | % multiplied by 2 for complex nature 72 | act_len = M+ 2*N_BS* N_users; % 2*(M + N_BS* N_users); 73 | 74 | transmit_pow_len = 2*N_users; 75 | receive_pow_len = 2*N_users^2; 76 | 77 | chan_state_design = 2; 78 | switch chan_state_design 79 | case 1 80 | obs_len = chan_obs_len + transmit_pow_len + receive_pow_len + act_len; 81 | past_action_default = 1e-6*ones(transmit_pow_len + receive_pow_len + act_len,1); 82 | case 2 83 | obs_len = chan_obs_len; 84 | end 85 | 86 | 87 | %% Create Environment 88 | disp('------- Creating Environment --------') 89 | % https://www.mathworks.com/help/reinforcement-learning/matlab-environments.html 90 | % https://www.mathworks.com/help/reinforcement-learning/ug/create-matlab-environments-for-reinforcement-learning.html 91 | % https://www.mathworks.com/help/reinforcement-learning/ug/create-custom-reinforcement-learning-environment-in-matlab.html 92 | 93 | % The environment at a certain state receives the action and outputs the 94 | % new state and the returned reward 95 | 96 | % Our reward here in the paper is the total throughput, which we can change 97 | % later to the (negative) sum power if needed 98 | 99 | % ==== Specification of Observation and Action ======= 100 | % Observation (state) specification 101 | % State at time t is specified in the paper to be composed as follows: 102 | % 1- Transmission power in the t^{th} time step. (Real and Imaginary power are separated) 103 | % 2- The received power of all users in the t^{th} time step. 104 | % 3- The previous action in the (t-1)^{th} time step. 105 | % 4- The channels. 106 | 107 | obs_lower_lim = -Inf; 108 | obs_upper_lim = Inf; 109 | obsInfo = rlNumericSpec([obs_len 1], 'LowerLimit', obs_lower_lim, 'UpperLimit',obs_upper_lim); 110 | obsInfo.Name = 'observation'; 111 | obsInfo.Description = 'instantaneously observed channels, transmit and rec. powers, and past action'; 112 | 113 | % Action Specification 114 | x = 0.002; 115 | act_lower_lim = [-x*ones(act_len-M,1); 0*ones(M,1)]; 116 | act_upper_lim = [x*ones(act_len-M,1); 2*pi*ones(M,1)]; 117 | actInfo = rlNumericSpec([act_len, 1], 'LowerLimit', act_lower_lim, 'UpperLimit',act_upper_lim); 118 | actInfo.Name = 'action'; 119 | actInfo.Description = 'stacked active and passive beamformers'; 120 | 121 | ResetHandle = @() resetfcn(H_mat, sigma_2, SINR_target, chan_state_design); 122 | StepHandle = @(action, LoggedSignals) stepfcn(action, LoggedSignals); 123 | 124 | % Create Environment 125 | MU_MISO_IRS_env = rlFunctionEnv(obsInfo,actInfo,StepHandle,ResetHandle); 126 | 127 | % environment is validated! validateEnvironment(MU_MISO_IRS_env) 128 | 129 | %% Create Learning Agent (Actor and Critic) 130 | disp('===== Creating DDPG Agent =====') 131 | 132 | % https://www.mathworks.com/help/reinforcement-learning/ug/ddpg-agents.html 133 | % A DDPG agent consists of two agents: an actor and a critic, cooperating 134 | % together to get a better output action 135 | 136 | % ---- Whitening Process for Input Decorrelation --- NOT DONE YET 137 | 138 | %% 1- Create an actor using an rlDeterministicActorRepresentation object. 139 | disp('------- Creating Actor --------') 140 | % 1-a) Actor Network 141 | actor_layers = [ 142 | % INPUT Layer 143 | imageInputLayer([obs_len,1],'Name',obsInfo.Name,'Normalization','none') 144 | % Hidden Fully Connected Layer 1 with/without Dropout 145 | fullyConnectedLayer(act_len,'Name','a_fully1') 146 | tanhLayer('Name','a_tanh1') 147 | %dropoutLayer(0.5,'Name','a_dropout1') 148 | % Batch Normalization Layer 1 149 | %batchNormalizationLayer('Name','a_batchNorm1') 150 | % Hidden Fully Connected Layer 2 with/without Dropout 151 | fullyConnectedLayer(4*act_len,'Name','a_fully2') 152 | tanhLayer('Name','a_tanh2') 153 | %dropoutLayer(0.5,'Name','a_dropout2') 154 | % Batch Normalization Layer 2 155 | %batchNormalizationLayer('Name','a_batchNorm2') 156 | % OUTPUT Layer 157 | fullyConnectedLayer(act_len,'Name',actInfo.Name) 158 | regressionLayer('Name','a_reg_o') 159 | % Power and Modular Normalization Layer still 160 | % scalingLayer 161 | ]; 162 | 163 | actor_net = layerGraph(actor_layers); 164 | 165 | % plot actor network 166 | % plot(actor_net) 167 | 168 | % https://www.mathworks.com/help/reinforcement-learning/ref/rlrepresentationoptions.html 169 | actor_repOpts = rlRepresentationOptions(... 170 | 'Optimizer',used_optmzr,... 171 | 'LearnRate', u_a,... 172 | 'UseDevice',used_device); 173 | % yet to define the decay rate and differentiate between target and actual 174 | % networks 175 | 176 | % Create actor agent 177 | ACTOR = rlDeterministicActorRepresentation(actor_net,... 178 | obsInfo,... 179 | actInfo,... 180 | 'Observation',obsInfo.Name,... 181 | 'Action',actInfo.Name,... 182 | actor_repOpts); 183 | 184 | %% 1-b) Critic Network 185 | % https://www.mathworks.com/help/reinforcement-learning/ref/rlqvaluerepresentation.html 186 | disp('------- Creating Critic --------') 187 | 188 | % observation path input layer 189 | obsPath = [imageInputLayer([obs_len 1 1], 'Normalization','none','Name',obsInfo.Name) 190 | fullyConnectedLayer(1,'Name','obsout') 191 | ]; 192 | % action path input layers 193 | actPath = [imageInputLayer([act_len 1 1], 'Normalization','none','Name',actInfo.Name) 194 | fullyConnectedLayer(1,'Name','actout') 195 | ]; 196 | % common path 197 | comPath = [additionLayer(2,'Name', 'add') 198 | fullyConnectedLayer(1, 'Name', 'critic_input')]; 199 | critic_net = addLayers(layerGraph(obsPath),actPath); 200 | critic_net = addLayers(critic_net,comPath); 201 | 202 | % connect layers 203 | critic_net = connectLayers(critic_net,'obsout','add/in1'); 204 | critic_net = connectLayers(critic_net,'actout','add/in2'); 205 | 206 | critic_layers = [ 207 | % Hidden Fully Connected Layer 1 with/without Dropout 208 | fullyConnectedLayer(obs_len+act_len,'Name','c_fully1') 209 | tanhLayer('Name','c_tanh1') 210 | %dropoutLayer(0.5,'Name','c_dropout1') 211 | % Batch Normalization Layer 1 212 | %batchNormalizationLayer('Name','c_batchNorm1') 213 | % Hidden Fully Connected Layer 2 with/without Dropout 214 | fullyConnectedLayer(4*(obs_len+act_len),'Name','c_fully2') 215 | tanhLayer('Name','c_tanh2') 216 | %dropoutLayer(0.5,'Name','c_dropout2') 217 | % Batch Normalization Layer 2 218 | % batchNormalizationLayer('Name','c_batchNorm2') 219 | % OUTPUT Layer 220 | fullyConnectedLayer(1,'Name','q_approx') 221 | regressionLayer('Name','c_outReg') 222 | ]; 223 | 224 | critic_net = addLayers(critic_net,critic_layers); 225 | 226 | % connect input and rest of layers 227 | critic_net = connectLayers(critic_net,'critic_input','c_fully1'); 228 | 229 | critic_repOpts = rlRepresentationOptions(... 230 | 'Optimizer',used_optmzr,... 231 | 'LearnRate', u_c,... 232 | 'UseDevice',used_device); 233 | % yet to define the decay rate and differentiate between target and actual 234 | % networks 235 | 236 | % Create critic agent 237 | CRITIC = rlQValueRepresentation(critic_net,... 238 | obsInfo,... 239 | actInfo,... 240 | 'Observation',obsInfo.Name,... 241 | 'Action',actInfo.Name,... 242 | critic_repOpts); 243 | 244 | % plot(critic_net) 245 | 246 | %% 3- Specify DDPG Agent options 247 | disp('------- Specifying DDPG Agent Options --------') 248 | DDPG_agent_OPTIONS = rlDDPGAgentOptions(... 249 | 'DiscountFactor',gam, ... 250 | 'ExperienceBufferLength',D,... 251 | 'MiniBatchSize', W_exp,... 252 | 'TargetUpdateFrequency', U,... 253 | 'ResetExperienceBufferBeforeTraining',0,... 254 | 'NumStepsToLookAhead',T,... 255 | 'SaveExperienceBufferWithAgent',0); 256 | 257 | % Create here ExplorationModel noise object: exploration aspect 258 | DDPG_agent_OPTIONS.NoiseOptions.Variance = [1e-5*ones(2*N_BS* N_users,1); 1e-3*2*pi*ones(M,1)]; 259 | DDPG_agent_OPTIONS.NoiseOptions.VarianceMin= [0*ones(2*N_BS* N_users,1); 1e-4*2*pi*ones(M,1)]; % to always guarantee exploration 260 | DDPG_agent_OPTIONS.NoiseOptions.VarianceDecayRate = 1e-4; 261 | 262 | %% 4- Create DDPG agent 263 | disp('------- Creating DDPG Agent --------') 264 | % https://www.mathworks.com/help/reinforcement-learning/ref/rlddpgagent.html 265 | DDPG_AGENT = rlDDPGAgent(ACTOR,CRITIC,DDPG_agent_OPTIONS); 266 | 267 | %% Let the agent interact with the environment 268 | % https://www.mathworks.com/help/reinforcement-learning/ug/train-reinforcement-learning-agents.html 269 | % https://www.mathworks.com/help/reinforcement-learning/ref/rl.agent.rlqagent.train.html 270 | disp('------- Training Agent --------') 271 | 272 | % Training Options 273 | DDPG_train_options = rlTrainingOptions(... 274 | 'MaxEpisodes',N_epis,... 275 | 'MaxStepsPerEpisode',T,... 276 | 'StopTrainingCriteria', 'GlobalStepCount',... 277 | 'StopTrainingValue', 5e5,...%'UseParallel', true,...%'Parallelization', 'async',... 278 | 'ScoreAveragingWindowLength',T,... 279 | 'Verbose', true,... 280 | 'Plots', 'training-progress',... 281 | 'StopOnError', 'off'); 282 | 283 | % DDPG_train_options.ParallelizationOptions.Mode = 284 | 285 | % Train DDPG Agent 286 | trainStats = train(DDPG_AGENT,... % Agent 287 | MU_MISO_IRS_env,... % Environment 288 | DDPG_train_options); % Training Options 289 | 290 | % Terminate parallel session 291 | delete(gcp('nocreate')) 292 | 293 | % % Write Algorithm 1 in paper, then annotate how the MATLAB commands 294 | % % summarize it 295 | % ------------------ DONE via TRAIN command ------------------- 296 | % for episodes = 1:N % loop over episodes 297 | % ---------------- DONE through RESET function ---------------- 298 | % % Initialize s(1) 299 | % for t = 1:T % move over time instants 300 | % ---------------- DONE through STEP function ---------------- 301 | % 1- obtain action a from actor network 302 | % % 2- observe next state 303 | % % 3- observe instant reward 304 | % % 4- store experience in replay memory 305 | % 306 | % % Obtain Q-value from critic network 307 | % % Sample random mini-batches of size _exp of experiences from 308 | % % experience replay memory \mathcal{M} 309 | % 310 | % % Construct critic network loss function 311 | % % Perform SGD on training and target critic, training actor, to obtain deltas 312 | % 313 | % % Update critic then actor networks 314 | % 315 | % % Every U steps update the target critic and actor networks 316 | % 317 | % % Set input to DNN as s(t+1) 318 | % end 319 | % end -------------------------------------------------------------------------------- /iter_opt_prob_1.m: -------------------------------------------------------------------------------- 1 | function [W, tau, INTERFERENCE, cvx_status, cvx_optval] = iter_opt_prob_1(H,sigma_2,SINR_target,int_users_matrix) 2 | 3 | N_BS = size(H,1); 4 | N_users = size(H,2); 5 | 6 | %cvx_begin 7 | cvx_clear 8 | clear W tau INTERFERENCE cvx_optval 9 | 10 | cvx_begin 11 | cvx_quiet(true) 12 | cvx_solver sedumi %SDPT3 %Mosek % Choose the underlying solver 13 | cvx_precision best % Change the cvx numerical precision 14 | 15 | % Define your optimization variables here 16 | variable W(N_BS,N_users) complex; % add the word binary for binary constraints - see CVX documentation for more options 17 | variable tau nonnegative; % Auxiliary variable 18 | expression INTERFERENCE(N_users,N_users); 19 | 20 | all_users = 1:N_users; 21 | 22 | for k = all_users 23 | int_users = int_users_matrix(k,:); % interfering users 24 | INTERFERENCE(:,k) = [W(:,int_users)'*H(:,k); sqrt(sigma_2)]; 25 | end 26 | 27 | % Write the optimization problem 28 | minimize( tau^2 ); 29 | subject to 30 | for k = all_users 31 | {INTERFERENCE(:,k), sqrt(1/SINR_target)*real(((H(:,k)')*W(:,k)))} == complex_lorentz(N_users); % SINR CONSTRAINT 32 | end 33 | {W(:), tau} == complex_lorentz(N_BS * N_users); % POWER CONSTRAINT 34 | cvx_end 35 | end 36 | 37 | -------------------------------------------------------------------------------- /iter_opt_prob_2.m: -------------------------------------------------------------------------------- 1 | function [V, a_aux, a, b, R, desired, interference, SINR_CONSTR, cvx_status, cvx_optval] = iter_opt_prob_2(W, Ht,Hr,Hd,sigma_2,SINR_target,int_users_matrix) 2 | 3 | N_users = size(Hd,2); 4 | M = size(Ht,1); 5 | 6 | all_users = 1:N_users; 7 | 8 | % Define a, b and R 9 | a = cell(N_users,N_users); 10 | b = cell(N_users,N_users); 11 | R = cell(N_users,N_users); 12 | for k = all_users % looping over all users 13 | int_users = int_users_matrix(k,:); % interfering users 14 | a{k,k}= diag(Hr(:,k)')*Ht*W(:,k); 15 | b{k,k}= Hd(:,k)'*W(:,k); 16 | R{k,k}= [ a{k,k}* (a{k,k}') a{k,k}* (b{k,k}') ; a{k,k}'* b{k,k} 0]; 17 | for m = int_users 18 | a{k,m}= diag(Hr(:,k)')*Ht*W(:,m); 19 | b{k,m}= Hd(:,k)'*W(:,m); 20 | R{k,m}= [ a{k,m}* (a{k,m}') a{k,m}* (b{k,m}') ; a{k,m}'* b{k,m} 0]; 21 | end 22 | end 23 | 24 | cvx_clear 25 | 26 | cvx_begin sdp 27 | cvx_quiet(true) 28 | cvx_solver SDPT3 %SeDuMi %SDPT3 %Mosek % Choose the underlying solver 29 | cvx_precision best % Change the cvx numerical precision 30 | 31 | variable V(M+1,M+1) complex semidefinite; 32 | variable a_aux(1,N_users) nonnegative; % Auxiliary variables for max sum 33 | %variable a_aux nonnegative; % Auxiliary variables for max min 34 | expressions SINR_CONSTR(N_users) desired(N_users) interference(N_users); 35 | 36 | % Define the expressions desired, interference, and SINR_CONSTR in terms of the optimization variables 37 | %sinr_fun_handle = @sinr_CONSTRAINT; 38 | 39 | %[desired, interference, SINR_CONSTR] = sinr_fun_handle(V, b, R, SINR_target, sigma_2, all_users, int_users_matrix); 40 | for k = all_users % looping over all users 41 | int_users = int_users_matrix(k,:); % interfering users 42 | desired(k) = trace(real(R{k,k}*V)) + square_abs(b{k,k}); 43 | interference(k) = 0; 44 | for m = int_users 45 | interference(k) = interference(k) + trace(real(R{k,m}*V)) + square_abs(b{k,m}); 46 | end 47 | SINR_CONSTR(k) = desired(k) - a_aux(k) - SINR_target * (interference(k) + sigma_2); 48 | end 49 | 50 | %all_elements = 1:M+1; 51 | 52 | % Write the optimization problem 53 | maximize( sum(a_aux) ); 54 | subject to 55 | %SINR_CONSTR == nonnegative(N_users) 56 | for k = 1:N_users 57 | desired(k) >= a_aux(k) + SINR_target * (interference(k) + sigma_2); 58 | %sqrt(SINR_CONSTR(k)) >= 0 59 | end 60 | diag(V) == ones(M+1,1); 61 | % Other 2 constraints are already in the definitions of opt variables 62 | %obj_fn2 >= 0; % Dummy constraint to check/prevent the resulting -ve cvx_optval 63 | cvx_end 64 | end 65 | 66 | -------------------------------------------------------------------------------- /leave_now/DeepMIMO_Dataset_Generator.m: -------------------------------------------------------------------------------- 1 | % --------- DeepMIMO: A Generic Dataset for mmWave and massive MIMO ------% 2 | % Author: Ahmed Alkhateeb 3 | % Date: Sept. 5, 2018 4 | % Goal: Encouraging research on ML/DL for mmWave/massive MIMO applications and 5 | % providing a benchmarking tool for the developed algorithms 6 | % ---------------------------------------------------------------------- % 7 | 8 | function [DeepMIMO_dataset,params]=DeepMIMO_Dataset_Generator() 9 | 10 | % ------ Inputs to the DeepMIMO dataset generation code ------------ % 11 | 12 | %------Ray-tracing scenario 13 | params.scenario='O1_60'; % The adopted ray tracing scenarios [check the available scenarios at www.aalkhateeb.net/DeepMIMO.html] 14 | 15 | %------DeepMIMO parameters set 16 | %Active base stations 17 | params.active_BS=[3]; % Includes the numbers of the active BSs (values from 1-18 for 'O1') 18 | 19 | % Active users 20 | params.active_user_first=1000; % The first row of the considered receivers section (check the scenario description for the receiver row map) 21 | params.active_user_last=1300; % The last row of the considered receivers section (check the scenario description for the receiver row map) 22 | 23 | % Number of BS Antenna 24 | params.num_ant_x=1; % Number of the UPA antenna array on the x-axis 25 | params.num_ant_y=32; % Number of the UPA antenna array on the y-axis 26 | params.num_ant_z=8; % Number of the UPA antenna array on the z-axis 27 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 28 | 29 | % Antenna spacing 30 | params.ant_spacing=.5; % ratio of the wavelnegth; for half wavelength enter .5 31 | 32 | % System bandwidth 33 | params.bandwidth=0.5; % The bandiwdth in GHz 34 | 35 | % OFDM parameters 36 | params.num_OFDM=1024; % Number of OFDM subcarriers 37 | params.OFDM_sampling_factor=1; % The constructed channels will be calculated only at the sampled subcarriers (to reduce the size of the dataset) 38 | params.OFDM_limit=64; % Only the first params.OFDM_limit subcarriers will be considered when constructing the channels 39 | 40 | % Number of paths 41 | params.num_paths=5; % Maximum number of paths to be considered (a value between 1 and 25), e.g., choose 1 if you are only interested in the strongest path 42 | 43 | params.saveDataset=0; 44 | 45 | % -------------------------- DeepMIMO Dataset Generation -----------------% 46 | [DeepMIMO_dataset,params]=DeepMIMO_generator(params); 47 | 48 | end 49 | -------------------------------------------------------------------------------- /leave_now/LIS-DeepLearning-master/Fig10_data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CodeCasting/DeepIRS/c84d8ec834a519e379e6bfec1bb8ee474a359066/leave_now/LIS-DeepLearning-master/Fig10_data.mat -------------------------------------------------------------------------------- /leave_now/LIS-DeepLearning-master/Fig10_generator.m: -------------------------------------------------------------------------------- 1 | %clearvars 2 | %close all 3 | %clc 4 | 5 | %% Description: 6 | % 7 | % This is the main code for generating Figure 10 in the original article 8 | % mentioned below. 9 | % 10 | % version 1.0 (Last edited: 2019-05-10) 11 | % 12 | % The definitions and equations used in this code refer (mostly) to the 13 | % following publication: 14 | % 15 | % Abdelrahman Taha, Muhammad Alrabeiah, and Ahmed Alkhateeb, "Enabling 16 | % Large Intelligent Surfaces with Compressive Sensing and Deep Learning," 17 | % arXiv e-prints, p. arXiv:1904.10136, Apr 2019. 18 | % [Online]. Available: https://arxiv.org/abs/1904.10136 19 | % 20 | % The DeepMIMO dataset is adopted. 21 | % [Online]. Available: http://deepmimo.net/ 22 | % 23 | % License: This code is licensed under a Creative Commons 24 | % Attribution-NonCommercial-ShareAlike 4.0 International License. 25 | % [Online]. Available: https://creativecommons.org/licenses/by-nc-sa/4.0/ 26 | % If you in any way use this code for research that results in 27 | % publications, please cite our original article mentioned above. 28 | 29 | %% System Model parameters 30 | 31 | kbeams=1; %select the top kbeams, get their feedback and find the max actual achievable rate 32 | Pt=5; % dB 33 | L =1; % number of channel paths (L) 34 | 35 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 36 | My_ar=[32 64]; % number of LIS reflecting elements across the y axis 37 | Mz_ar=[32 64]; % number of LIS reflecting elements across the z axis 38 | M_bar=8; % number of active elements 39 | K_DL=64; % number of subcarriers as input to the Deep Learning model 40 | Training_Size=[2 1e4*(1:.4:3)]; % Training Dataset Size vector 41 | 42 | % Preallocation of output variables 43 | Rate_DLt=zeros(numel(My_ar),numel(Training_Size)); 44 | Rate_OPTt=zeros(numel(My_ar),numel(Training_Size)); 45 | 46 | %% Figure Data Generation 47 | 48 | for rr = 1:1:numel(My_ar) 49 | save Fig10_data.mat L My_ar Mz_ar M_bar Training_Size K_DL Rate_DLt Rate_OPTt 50 | [Rate_DL,Rate_OPT]=Main_fn(L,My_ar(rr),Mz_ar(rr),M_bar,K_DL,Pt,kbeams,Training_Size); 51 | Rate_DLt(rr,:)=Rate_DL; Rate_OPTt(rr,:)=Rate_OPT; 52 | end 53 | 54 | save Fig10_data.mat L My_ar Mz_ar M_bar Training_Size K_DL Rate_DLt Rate_OPTt 55 | 56 | %% Figure Plot 57 | 58 | %------------- Figure Input Variables ---------------------------% 59 | % M; My_ar; Mz_ar; M_bar; 60 | % Training_Size; Rate_DLt; Rate_OPTt; 61 | 62 | %------------------ Fixed Parameters ----------------------------% 63 | % Full Regression 64 | % L = min = 1 65 | % K = 512, K_DL = max = 64 66 | % M_bar = 8 67 | % random distribution of active elements 68 | 69 | Colour = 'brgmcky'; 70 | 71 | f10 = figure('Name', 'Figure10', 'units','pixels'); 72 | hold on; grid on; box on; 73 | title(['Achievable Rate for different dataset sizes using only ' num2str(M_bar) ' active elements'],'fontsize',12) 74 | xlabel('Deep Learning Training Dataset Size (Thousands of Samples)','fontsize',14) 75 | ylabel('Achievable Rate (bps/Hz)','fontsize',14) 76 | set(gca,'FontSize',13) 77 | if ishandle(f10) 78 | set(0, 'CurrentFigure', f10) 79 | hold on; grid on; 80 | for rr=1:1:numel(My_ar) 81 | plot((Training_Size*1e-3),Rate_OPTt(rr,:),[Colour(rr) '*--'],'markersize',8,'linewidth',2, 'DisplayName',['Genie-Aided Reflection Beamforming, M = ' num2str(My_ar(rr)) '*' num2str(Mz_ar(rr))]) 82 | plot((Training_Size*1e-3),Rate_DLt(rr,:),[Colour(rr) 's-'],'markersize',8,'linewidth',2, 'DisplayName', ['DL Reflection Beamforming, M = ' num2str(My_ar(rr)) '*' num2str(Mz_ar(rr))]) 83 | end 84 | legend('Location','SouthEast') 85 | legend show 86 | end 87 | drawnow 88 | hold off -------------------------------------------------------------------------------- /leave_now/LIS-DeepLearning-master/Figure10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CodeCasting/DeepIRS/c84d8ec834a519e379e6bfec1bb8ee474a359066/leave_now/LIS-DeepLearning-master/Figure10.png -------------------------------------------------------------------------------- /leave_now/LIS-DeepLearning-master/LICENSE.md: -------------------------------------------------------------------------------- 1 | Attribution-NonCommercial-ShareAlike 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 58 | Public License 59 | 60 | By exercising the Licensed Rights (defined below), You accept and agree 61 | to be bound by the terms and conditions of this Creative Commons 62 | Attribution-NonCommercial-ShareAlike 4.0 International Public License 63 | ("Public License"). To the extent this Public License may be 64 | interpreted as a contract, You are granted the Licensed Rights in 65 | consideration of Your acceptance of these terms and conditions, and the 66 | Licensor grants You such rights in consideration of benefits the 67 | Licensor receives from making the Licensed Material available under 68 | these terms and conditions. 69 | 70 | 71 | Section 1 -- Definitions. 72 | 73 | a. Adapted Material means material subject to Copyright and Similar 74 | Rights that is derived from or based upon the Licensed Material 75 | and in which the Licensed Material is translated, altered, 76 | arranged, transformed, or otherwise modified in a manner requiring 77 | permission under the Copyright and Similar Rights held by the 78 | Licensor. For purposes of this Public License, where the Licensed 79 | Material is a musical work, performance, or sound recording, 80 | Adapted Material is always produced where the Licensed Material is 81 | synched in timed relation with a moving image. 82 | 83 | b. Adapter's License means the license You apply to Your Copyright 84 | and Similar Rights in Your contributions to Adapted Material in 85 | accordance with the terms and conditions of this Public License. 86 | 87 | c. BY-NC-SA Compatible License means a license listed at 88 | creativecommons.org/compatiblelicenses, approved by Creative 89 | Commons as essentially the equivalent of this Public License. 90 | 91 | d. Copyright and Similar Rights means copyright and/or similar rights 92 | closely related to copyright including, without limitation, 93 | performance, broadcast, sound recording, and Sui Generis Database 94 | Rights, without regard to how the rights are labeled or 95 | categorized. For purposes of this Public License, the rights 96 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 97 | Rights. 98 | 99 | e. Effective Technological Measures means those measures that, in the 100 | absence of proper authority, may not be circumvented under laws 101 | fulfilling obligations under Article 11 of the WIPO Copyright 102 | Treaty adopted on December 20, 1996, and/or similar international 103 | agreements. 104 | 105 | f. Exceptions and Limitations means fair use, fair dealing, and/or 106 | any other exception or limitation to Copyright and Similar Rights 107 | that applies to Your use of the Licensed Material. 108 | 109 | g. License Elements means the license attributes listed in the name 110 | of a Creative Commons Public License. The License Elements of this 111 | Public License are Attribution, NonCommercial, and ShareAlike. 112 | 113 | h. Licensed Material means the artistic or literary work, database, 114 | or other material to which the Licensor applied this Public 115 | License. 116 | 117 | i. Licensed Rights means the rights granted to You subject to the 118 | terms and conditions of this Public License, which are limited to 119 | all Copyright and Similar Rights that apply to Your use of the 120 | Licensed Material and that the Licensor has authority to license. 121 | 122 | j. Licensor means the individual(s) or entity(ies) granting rights 123 | under this Public License. 124 | 125 | k. NonCommercial means not primarily intended for or directed towards 126 | commercial advantage or monetary compensation. For purposes of 127 | this Public License, the exchange of the Licensed Material for 128 | other material subject to Copyright and Similar Rights by digital 129 | file-sharing or similar means is NonCommercial provided there is 130 | no payment of monetary compensation in connection with the 131 | exchange. 132 | 133 | l. Share means to provide material to the public by any means or 134 | process that requires permission under the Licensed Rights, such 135 | as reproduction, public display, public performance, distribution, 136 | dissemination, communication, or importation, and to make material 137 | available to the public including in ways that members of the 138 | public may access the material from a place and at a time 139 | individually chosen by them. 140 | 141 | m. Sui Generis Database Rights means rights other than copyright 142 | resulting from Directive 96/9/EC of the European Parliament and of 143 | the Council of 11 March 1996 on the legal protection of databases, 144 | as amended and/or succeeded, as well as other essentially 145 | equivalent rights anywhere in the world. 146 | 147 | n. You means the individual or entity exercising the Licensed Rights 148 | under this Public License. Your has a corresponding meaning. 149 | 150 | 151 | Section 2 -- Scope. 152 | 153 | a. License grant. 154 | 155 | 1. Subject to the terms and conditions of this Public License, 156 | the Licensor hereby grants You a worldwide, royalty-free, 157 | non-sublicensable, non-exclusive, irrevocable license to 158 | exercise the Licensed Rights in the Licensed Material to: 159 | 160 | a. reproduce and Share the Licensed Material, in whole or 161 | in part, for NonCommercial purposes only; and 162 | 163 | b. produce, reproduce, and Share Adapted Material for 164 | NonCommercial purposes only. 165 | 166 | 2. Exceptions and Limitations. For the avoidance of doubt, where 167 | Exceptions and Limitations apply to Your use, this Public 168 | License does not apply, and You do not need to comply with 169 | its terms and conditions. 170 | 171 | 3. Term. The term of this Public License is specified in Section 172 | 6(a). 173 | 174 | 4. Media and formats; technical modifications allowed. The 175 | Licensor authorizes You to exercise the Licensed Rights in 176 | all media and formats whether now known or hereafter created, 177 | and to make technical modifications necessary to do so. The 178 | Licensor waives and/or agrees not to assert any right or 179 | authority to forbid You from making technical modifications 180 | necessary to exercise the Licensed Rights, including 181 | technical modifications necessary to circumvent Effective 182 | Technological Measures. For purposes of this Public License, 183 | simply making modifications authorized by this Section 2(a) 184 | (4) never produces Adapted Material. 185 | 186 | 5. Downstream recipients. 187 | 188 | a. Offer from the Licensor -- Licensed Material. Every 189 | recipient of the Licensed Material automatically 190 | receives an offer from the Licensor to exercise the 191 | Licensed Rights under the terms and conditions of this 192 | Public License. 193 | 194 | b. Additional offer from the Licensor -- Adapted Material. 195 | Every recipient of Adapted Material from You 196 | automatically receives an offer from the Licensor to 197 | exercise the Licensed Rights in the Adapted Material 198 | under the conditions of the Adapter's License You apply. 199 | 200 | c. No downstream restrictions. You may not offer or impose 201 | any additional or different terms or conditions on, or 202 | apply any Effective Technological Measures to, the 203 | Licensed Material if doing so restricts exercise of the 204 | Licensed Rights by any recipient of the Licensed 205 | Material. 206 | 207 | 6. No endorsement. Nothing in this Public License constitutes or 208 | may be construed as permission to assert or imply that You 209 | are, or that Your use of the Licensed Material is, connected 210 | with, or sponsored, endorsed, or granted official status by, 211 | the Licensor or others designated to receive attribution as 212 | provided in Section 3(a)(1)(A)(i). 213 | 214 | b. Other rights. 215 | 216 | 1. Moral rights, such as the right of integrity, are not 217 | licensed under this Public License, nor are publicity, 218 | privacy, and/or other similar personality rights; however, to 219 | the extent possible, the Licensor waives and/or agrees not to 220 | assert any such rights held by the Licensor to the limited 221 | extent necessary to allow You to exercise the Licensed 222 | Rights, but not otherwise. 223 | 224 | 2. Patent and trademark rights are not licensed under this 225 | Public License. 226 | 227 | 3. To the extent possible, the Licensor waives any right to 228 | collect royalties from You for the exercise of the Licensed 229 | Rights, whether directly or through a collecting society 230 | under any voluntary or waivable statutory or compulsory 231 | licensing scheme. In all other cases the Licensor expressly 232 | reserves any right to collect such royalties, including when 233 | the Licensed Material is used other than for NonCommercial 234 | purposes. 235 | 236 | 237 | Section 3 -- License Conditions. 238 | 239 | Your exercise of the Licensed Rights is expressly made subject to the 240 | following conditions. 241 | 242 | a. Attribution. 243 | 244 | 1. If You Share the Licensed Material (including in modified 245 | form), You must: 246 | 247 | a. retain the following if it is supplied by the Licensor 248 | with the Licensed Material: 249 | 250 | i. identification of the creator(s) of the Licensed 251 | Material and any others designated to receive 252 | attribution, in any reasonable manner requested by 253 | the Licensor (including by pseudonym if 254 | designated); 255 | 256 | ii. a copyright notice; 257 | 258 | iii. a notice that refers to this Public License; 259 | 260 | iv. a notice that refers to the disclaimer of 261 | warranties; 262 | 263 | v. a URI or hyperlink to the Licensed Material to the 264 | extent reasonably practicable; 265 | 266 | b. indicate if You modified the Licensed Material and 267 | retain an indication of any previous modifications; and 268 | 269 | c. indicate the Licensed Material is licensed under this 270 | Public License, and include the text of, or the URI or 271 | hyperlink to, this Public License. 272 | 273 | 2. You may satisfy the conditions in Section 3(a)(1) in any 274 | reasonable manner based on the medium, means, and context in 275 | which You Share the Licensed Material. For example, it may be 276 | reasonable to satisfy the conditions by providing a URI or 277 | hyperlink to a resource that includes the required 278 | information. 279 | 3. If requested by the Licensor, You must remove any of the 280 | information required by Section 3(a)(1)(A) to the extent 281 | reasonably practicable. 282 | 283 | b. ShareAlike. 284 | 285 | In addition to the conditions in Section 3(a), if You Share 286 | Adapted Material You produce, the following conditions also apply. 287 | 288 | 1. The Adapter's License You apply must be a Creative Commons 289 | license with the same License Elements, this version or 290 | later, or a BY-NC-SA Compatible License. 291 | 292 | 2. You must include the text of, or the URI or hyperlink to, the 293 | Adapter's License You apply. You may satisfy this condition 294 | in any reasonable manner based on the medium, means, and 295 | context in which You Share Adapted Material. 296 | 297 | 3. You may not offer or impose any additional or different terms 298 | or conditions on, or apply any Effective Technological 299 | Measures to, Adapted Material that restrict exercise of the 300 | rights granted under the Adapter's License You apply. 301 | 302 | 303 | Section 4 -- Sui Generis Database Rights. 304 | 305 | Where the Licensed Rights include Sui Generis Database Rights that 306 | apply to Your use of the Licensed Material: 307 | 308 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 309 | to extract, reuse, reproduce, and Share all or a substantial 310 | portion of the contents of the database for NonCommercial purposes 311 | only; 312 | 313 | b. if You include all or a substantial portion of the database 314 | contents in a database in which You have Sui Generis Database 315 | Rights, then the database in which You have Sui Generis Database 316 | Rights (but not its individual contents) is Adapted Material, 317 | including for purposes of Section 3(b); and 318 | 319 | c. You must comply with the conditions in Section 3(a) if You Share 320 | all or a substantial portion of the contents of the database. 321 | 322 | For the avoidance of doubt, this Section 4 supplements and does not 323 | replace Your obligations under this Public License where the Licensed 324 | Rights include other Copyright and Similar Rights. 325 | 326 | 327 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 328 | 329 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 330 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 331 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 332 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 333 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 334 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 335 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 336 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 337 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 338 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 339 | 340 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 341 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 342 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 343 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 344 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 345 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 346 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 347 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 348 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 349 | 350 | c. The disclaimer of warranties and limitation of liability provided 351 | above shall be interpreted in a manner that, to the extent 352 | possible, most closely approximates an absolute disclaimer and 353 | waiver of all liability. 354 | 355 | 356 | Section 6 -- Term and Termination. 357 | 358 | a. This Public License applies for the term of the Copyright and 359 | Similar Rights licensed here. However, if You fail to comply with 360 | this Public License, then Your rights under this Public License 361 | terminate automatically. 362 | 363 | b. Where Your right to use the Licensed Material has terminated under 364 | Section 6(a), it reinstates: 365 | 366 | 1. automatically as of the date the violation is cured, provided 367 | it is cured within 30 days of Your discovery of the 368 | violation; or 369 | 370 | 2. upon express reinstatement by the Licensor. 371 | 372 | For the avoidance of doubt, this Section 6(b) does not affect any 373 | right the Licensor may have to seek remedies for Your violations 374 | of this Public License. 375 | 376 | c. For the avoidance of doubt, the Licensor may also offer the 377 | Licensed Material under separate terms or conditions or stop 378 | distributing the Licensed Material at any time; however, doing so 379 | will not terminate this Public License. 380 | 381 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 382 | License. 383 | 384 | 385 | Section 7 -- Other Terms and Conditions. 386 | 387 | a. The Licensor shall not be bound by any additional or different 388 | terms or conditions communicated by You unless expressly agreed. 389 | 390 | b. Any arrangements, understandings, or agreements regarding the 391 | Licensed Material not stated herein are separate from and 392 | independent of the terms and conditions of this Public License. 393 | 394 | 395 | Section 8 -- Interpretation. 396 | 397 | a. For the avoidance of doubt, this Public License does not, and 398 | shall not be interpreted to, reduce, limit, restrict, or impose 399 | conditions on any use of the Licensed Material that could lawfully 400 | be made without permission under this Public License. 401 | 402 | b. To the extent possible, if any provision of this Public License is 403 | deemed unenforceable, it shall be automatically reformed to the 404 | minimum extent necessary to make it enforceable. If the provision 405 | cannot be reformed, it shall be severed from this Public License 406 | without affecting the enforceability of the remaining terms and 407 | conditions. 408 | 409 | c. No term or condition of this Public License will be waived and no 410 | failure to comply consented to unless expressly agreed to by the 411 | Licensor. 412 | 413 | d. Nothing in this Public License constitutes or may be interpreted 414 | as a limitation upon, or waiver of, any privileges and immunities 415 | that apply to the Licensor or You, including from the legal 416 | processes of any jurisdiction or authority. 417 | 418 | ======================================================================= 419 | 420 | Creative Commons is not a party to its public 421 | licenses. Notwithstanding, Creative Commons may elect to apply one of 422 | its public licenses to material it publishes and in those instances 423 | will be considered the “Licensor.” The text of the Creative Commons 424 | public licenses is dedicated to the public domain under the CC0 Public 425 | Domain Dedication. Except for the limited purpose of indicating that 426 | material is shared under a Creative Commons public license or as 427 | otherwise permitted by the Creative Commons policies published at 428 | creativecommons.org/policies, Creative Commons does not authorize the 429 | use of the trademark "Creative Commons" or any other trademark or logo 430 | of Creative Commons without its prior written consent including, 431 | without limitation, in connection with any unauthorized modifications 432 | to any of its public licenses or any other arrangements, 433 | understandings, or agreements concerning use of licensed material. For 434 | the avoidance of doubt, this paragraph does not form part of the 435 | public licenses. 436 | 437 | Creative Commons may be contacted at creativecommons.org. 438 | -------------------------------------------------------------------------------- /leave_now/LIS-DeepLearning-master/Main_fn.m: -------------------------------------------------------------------------------- 1 | function [Rate_DL,Rate_OPT]=Main_fn(L,My,Mz,M_bar,K_DL,Pt,kbeams,Training_Size) 2 | %% Description: 3 | % 4 | % This is the function called by the main script for ploting Figure 10 5 | % in the original article mentioned below. 6 | % 7 | % version 1.0 (Last edited: 2019-05-10) 8 | % 9 | % The definitions and equations used in this code refer (mostly) to the 10 | % following publication: 11 | % 12 | % Abdelrahman Taha, Muhammad Alrabeiah, and Ahmed Alkhateeb, "Enabling 13 | % Large Intelligent Surfaces with Compressive Sensing and Deep Learning," 14 | % arXiv e-prints, p. arXiv:1904.10136, Apr 2019. 15 | % [Online]. Available: https://arxiv.org/abs/1904.10136 16 | % 17 | % The DeepMIMO dataset is adopted. 18 | % [Online]. Available: http://deepmimo.net/ 19 | % 20 | % License: This code is licensed under a Creative Commons 21 | % Attribution-NonCommercial-ShareAlike 4.0 International License. 22 | % [Online]. Available: https://creativecommons.org/licenses/by-nc-sa/4.0/ 23 | % If you in any way use this code for research that results in 24 | % publications, please cite our original article mentioned above. 25 | 26 | %% System Model Parameters 27 | 28 | params.scenario='O1_28'; % DeepMIMO Dataset scenario: http://deepmimo.net/ 29 | params.active_BS=3; % active basestation(/s) in the chosen scenario 30 | D_Lambda = 0.5; % Antenna spacing relative to the wavelength 31 | BW = 100e6; % Bandwidth in Hz (ex: 100e6 --> 100 MHz) 32 | 33 | % Transmitter (user) 34 | Ut_row = 850; % user Ut row number 35 | Ut_element = 90; % user Ut col number 36 | 37 | % Receiver(s) (quantized locations in a region) 38 | Ur_rows = [1000 1300]; % user Ur rows 39 | 40 | Validation_Size = 6200; % Validation dataset Size 41 | K = 512; % number of subcarriers 42 | miniBatchSize = 500; % Size of the minibatch for the Deep Learning 43 | 44 | % === LIS (one of the BS *locations* - considered passive here of course) === 45 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 46 | Mx = 1; % number of LIS reflecting elements across the x axis 47 | M = Mx.*My.*Mz; % Total number of LIS reflecting elements 48 | 49 | % Preallocation of output variables 50 | Rate_DL = zeros(1,length(Training_Size)); 51 | Rate_OPT = Rate_DL; 52 | LastValidationRMSE = Rate_DL; 53 | 54 | %--- Accounting SNR in each rate calculations 55 | %--- Defining Noisy channel measurements 56 | Gt=3; % dBi 57 | Gr=3; % dBi 58 | NF=5; % Noise figure at the User equipment 59 | Process_Gain=10; % Channel estimation processing gain 60 | noise_power_dB=-204+10*log10(BW/K)+NF-Process_Gain; % Noise power in dB 61 | SNR=10^(.1*(-noise_power_dB))*(10^(.1*(Gt+Gr+Pt)))^2; % Signal-to-noise ratio 62 | 63 | % channel estimation noise 64 | noise_power_bar=10^(.1*(noise_power_dB))/(10^(.1*(Gt+Gr+Pt))); 65 | 66 | No_user_pairs = (Ur_rows(2)-Ur_rows(1))*181; % Number of (Ut,Ur) user pairs 67 | % Which row is ommitted? Starting or ending 1? otherwise No_user_pairs 68 | % should be this value +1 --> Last row is not neglected .. ok .. from 69 | % Ur_rows(1) till Ur_rows(2)-1 according to the channel generation code below 70 | RandP_all = randperm(No_user_pairs).'; % Random permutation of the available dataset 71 | 72 | %% Starting the code 73 | disp('============================================================='); 74 | disp([' Calculating for M = ' num2str(M)]); 75 | Rand_M_bar_all = randperm(M); 76 | 77 | %% Beamforming Codebook (CALLING UPA_codebook_generator) 78 | disp('=============GENERATING REFLECTION MATRIX CODEBOOK==========='); 79 | % BF codebook parameters 80 | over_sampling_x=1; % The beamsteering oversampling factor in the x direction 81 | over_sampling_y=1; % The beamsteering oversampling factor in the y direction 82 | over_sampling_z=1; % The beamsteering oversampling factor in the z direction 83 | 84 | % Generating the BF codebook 85 | [BF_codebook]=sqrt(Mx*My*Mz)*... 86 | UPA_codebook_generator(Mx,My,Mz,over_sampling_x,over_sampling_y,over_sampling_z,D_Lambda); 87 | codebook_size=size(BF_codebook,2); 88 | 89 | %% DeepMIMO Dataset Generation (CALLING DeepMIMO_Generator) 90 | disp('===============GENERATING DEEPMIMO DATASET==================='); 91 | disp('-------------------------------------------------------------'); 92 | disp([' Calculating for K_DL = ' num2str(K_DL)]); 93 | % ------ Inputs to the DeepMIMO dataset generation code ------------ % 94 | % Note: The axes of the antennas match the axes of the ray-tracing scenario 95 | params.num_ant_x= Mx; % Number of the UPA antenna array on the x-axis 96 | params.num_ant_y= My; % Number of the UPA antenna array on the y-axis 97 | params.num_ant_z= Mz; % Number of the UPA antenna array on the z-axis 98 | params.ant_spacing=D_Lambda; % ratio of the wavelnegth; for half wavelength enter .5 99 | params.bandwidth= BW*1e-9; % The bandiwdth in GHz 100 | params.num_OFDM= K; % Number of OFDM subcarriers 101 | params.OFDM_sampling_factor=1; % The constructed channels will be calculated only at the sampled subcarriers (to reduce the size of the dataset) 102 | params.OFDM_limit=K_DL*1; % Only the first params.OFDM_limit subcarriers will be considered when constructing the channels 103 | params.num_paths=L; % Maximum number of paths to be considered (a value between 1 and 25), e.g., choose 1 if you are only interested in the strongest path 104 | params.saveDataset=0; 105 | disp([' Calculating for L = ' num2str(params.num_paths)]); 106 | disp('==========Generating Transmitter-IRS Full Channel============'); 107 | % ------------------ DeepMIMO "Ut" Dataset Generation ----------------- % 108 | params.active_user_first=Ut_row; 109 | params.active_user_last=Ut_row; % Only one active user (but where is Ut_element to fully specify the user??) 110 | DeepMIMO_dataset=DeepMIMO_generator(params); % Generator function generates data for entire rows 111 | Ht = single(DeepMIMO_dataset{1}.user{Ut_element}.channel); % Selecting element of interest here 112 | clear DeepMIMO_dataset 113 | 114 | disp('===========Generating IRS-Receiver Full Channels============='); 115 | % ------------------ DeepMIMO "Ur" Dataset Generation -----------------% 116 | %Validation part for the actual achievable rate perf eval 117 | Validation_Ind = RandP_all(end-Validation_Size+1:end); 118 | [~,VI_sortind] = sort(Validation_Ind); 119 | [~,VI_rev_sortind] = sort(VI_sortind); 120 | %initialization 121 | Ur_rows_step = 100; % access the dataset 100 rows at a time 122 | Ur_rows_grid=Ur_rows(1):Ur_rows_step:Ur_rows(2); 123 | Delta_H_max = single(0); 124 | for pp = 1:1:numel(Ur_rows_grid)-1 % loop for Normalizing H 125 | clear DeepMIMO_dataset 126 | params.active_user_first=Ur_rows_grid(pp); 127 | params.active_user_last=Ur_rows_grid(pp+1)-1; 128 | [DeepMIMO_dataset,params]=DeepMIMO_generator(params); 129 | for u=1:params.num_user 130 | Hr = single(conj(DeepMIMO_dataset{1}.user{u}.channel)); % conjugated since it is now downlink 131 | Delta_H = max(max(abs(Ht.*Hr))); 132 | if Delta_H >= Delta_H_max 133 | Delta_H_max = single(Delta_H); % storing the maximum absolute value of the end-to-end product channel for later normalization 134 | end 135 | end 136 | end 137 | clear Delta_H 138 | 139 | 140 | disp('======================Sampling Channels======================'); 141 | disp([' Calculating for M_bar = ' num2str(M_bar)]); 142 | Rand_M_bar =unique(Rand_M_bar_all(1:M_bar)); 143 | Ht_bar = reshape(Ht(Rand_M_bar,:),M_bar*K_DL,1); 144 | 145 | 146 | DL_input = single(zeros(M_bar*K_DL*2,No_user_pairs)); 147 | DL_output = single(zeros(No_user_pairs,codebook_size)); 148 | DL_output_un= single(zeros(numel(Validation_Ind),codebook_size)); 149 | 150 | Delta_H_bar_max = single(0); 151 | count=0; 152 | for pp = 1:1:numel(Ur_rows_grid)-1 153 | clear DeepMIMO_dataset 154 | disp(['Starting received user access ' num2str(pp)]); 155 | params.active_user_first=Ur_rows_grid(pp); 156 | params.active_user_last=Ur_rows_grid(pp+1)-1; 157 | [DeepMIMO_dataset,params]=DeepMIMO_generator(params); % Why generating again and not sampling from the beginning? 158 | 159 | 160 | %% Construct Deep Learning inputs 161 | u_step=100; 162 | Htx=repmat(Ht(:,1),1,u_step); 163 | Hrx=zeros(M,u_step); 164 | for u=1:u_step:params.num_user 165 | for uu=1:1:u_step 166 | Hr = single(conj(DeepMIMO_dataset{1}.user{u+uu-1}.channel)); 167 | Hr_bar = reshape(Hr(Rand_M_bar,:),M_bar*K_DL,1); 168 | %--- Constructing the sampled channel 169 | n1=sqrt(noise_power_bar/2)*(randn(M_bar*K_DL,1)+1j*randn(M_bar*K_DL,1)); 170 | n2=sqrt(noise_power_bar/2)*(randn(M_bar*K_DL,1)+1j*randn(M_bar*K_DL,1)); 171 | H_bar = ((Ht_bar+n1).*(Hr_bar+n2)); 172 | DL_input(:,u+uu-1+((pp-1)*params.num_user))= reshape([real(H_bar) imag(H_bar)].',[],1); 173 | Delta_H_bar = max(max(abs(H_bar))); 174 | if Delta_H_bar >= Delta_H_bar_max 175 | Delta_H_bar_max = single(Delta_H_bar); 176 | end 177 | Hrx(:,uu)=Hr(:,1); 178 | end 179 | %--- Actual achievable rate for performance evaluation 180 | H = Htx.*Hrx; 181 | H_BF=H.'*BF_codebook; 182 | SNR_sqrt_var = abs(H_BF); 183 | for uu=1:1:u_step 184 | if sum((Validation_Ind == u+uu-1+((pp-1)*params.num_user))) 185 | count=count+1; 186 | DL_output_un(count,:) = single(sum(log2(1+(SNR*((SNR_sqrt_var(uu,:)).^2))),1)); 187 | end 188 | end 189 | %--- Label for the sampled channel 190 | R = single(log2(1+(SNR_sqrt_var/Delta_H_max).^2)); 191 | % --- DL output normalization 192 | Delta_Out_max = max(R,[],2); 193 | if ~sum(Delta_Out_max == 0) 194 | Rn=diag(1./Delta_Out_max)*R; 195 | end 196 | DL_output(u+((pp-1)*params.num_user):u+((pp-1)*params.num_user)+u_step-1,:) = 1*Rn; %%%%% Normalized %%%%% 197 | end 198 | end 199 | clear u Delta_H_bar R Rn 200 | %-- Sorting back the DL_output_un 201 | DL_output_un = DL_output_un(VI_rev_sortind,:); 202 | %--- DL input normalization 203 | DL_input= 1*(DL_input/Delta_H_bar_max); %%%%% Normalized from -1->1 %%%%% 204 | 205 | %% DL Beamforming (Using Deep Learning) 206 | disp('======================DL BEAMFORMING========================='); 207 | 208 | % ------------------ Training and Testing Datasets -----------------% 209 | DL_output_reshaped = reshape(DL_output.',1,1,size(DL_output,2),size(DL_output,1)); 210 | DL_output_reshaped_un = reshape(DL_output_un.',1,1,size(DL_output_un,2),size(DL_output_un,1)); 211 | DL_input_reshaped= reshape(DL_input,size(DL_input,1),1,1,size(DL_input,2)); 212 | for dd=1:1:numel(Training_Size) 213 | disp([' Calculating for Dataset Size = ' num2str(Training_Size(dd))]); 214 | Training_Ind = RandP_all(1:Training_Size(dd)); 215 | 216 | XTrain = single(DL_input_reshaped(:,1,1,Training_Ind)); 217 | YTrain = single(DL_output_reshaped(1,1,:,Training_Ind)); 218 | XValidation = single(DL_input_reshaped(:,1,1,Validation_Ind)); 219 | YValidation = single(DL_output_reshaped(1,1,:,Validation_Ind)); 220 | YValidation_un = single(DL_output_reshaped_un); 221 | 222 | % ------------------ DL Model definition -----------------% 223 | layers = [ 224 | % INPUT Layer 225 | imageInputLayer([size(XTrain,1),1,1],'Name','input') 226 | 227 | % Fully Connected Layer 1 with Dropout 228 | fullyConnectedLayer(size(YTrain,3),'Name','Fully1') 229 | reluLayer('Name','relu1') 230 | dropoutLayer(0.5,'Name','dropout1') 231 | 232 | % Fully Connected Layer 2 with Dropout 233 | fullyConnectedLayer(4*size(YTrain,3),'Name','Fully2') 234 | reluLayer('Name','relu2') 235 | dropoutLayer(0.5,'Name','dropout2') 236 | 237 | % Fully Connected Layer 3 with Dropout 238 | fullyConnectedLayer(4*size(YTrain,3),'Name','Fully3') 239 | reluLayer('Name','relu3') 240 | dropoutLayer(0.5,'Name','dropout3') 241 | 242 | % OUTPUT Layer 243 | fullyConnectedLayer(size(YTrain,3),'Name','Fully4') 244 | regressionLayer('Name','outReg')]; 245 | 246 | if Training_Size(dd) < miniBatchSize 247 | validationFrequency = Training_Size(dd); 248 | else 249 | validationFrequency = floor(Training_Size(dd)/miniBatchSize); 250 | end 251 | 252 | VerboseFrequency = validationFrequency; 253 | options = trainingOptions('sgdm', ... 254 | 'MiniBatchSize',miniBatchSize, ... 255 | 'MaxEpochs',20, ... 256 | 'InitialLearnRate',1e-1, ... 257 | 'LearnRateSchedule','piecewise', ... 258 | 'LearnRateDropFactor',0.5, ... 259 | 'LearnRateDropPeriod',3, ... 260 | 'L2Regularization',1e-4,... 261 | 'Shuffle','every-epoch', ... 262 | 'ValidationData',{XValidation,YValidation}, ... 263 | 'ValidationFrequency',validationFrequency, ... 264 | 'Plots','none', ... % 'training-progress' 265 | 'Verbose',0, ... % 1 266 | 'ExecutionEnvironment', 'cpu', ... 267 | 'VerboseFrequency',VerboseFrequency); 268 | 269 | % ------------- DL Model Training and Prediction -----------------% 270 | [~,Indmax_OPT]= max(YValidation,[],3); 271 | Indmax_OPT = squeeze(Indmax_OPT); %Upper bound on achievable rates 272 | MaxR_OPT = single(zeros(numel(Indmax_OPT),1)); 273 | [trainedNet,traininfo] = trainNetwork(XTrain,YTrain,layers,options); 274 | YPredicted = predict(trainedNet,XValidation); 275 | 276 | % --------------------- Achievable Rate --------------------------% 277 | [~,Indmax_DL] = maxk(YPredicted,kbeams,2); 278 | MaxR_DL = single(zeros(size(Indmax_DL,1),1)); %True achievable rates 279 | for b=1:size(Indmax_DL,1) 280 | MaxR_DL(b) = max(squeeze(YValidation_un(1,1,Indmax_DL(b,:),b))); 281 | MaxR_OPT(b) = squeeze(YValidation_un(1,1,Indmax_OPT(b),b)); 282 | end 283 | Rate_OPT(dd) = mean(MaxR_OPT); 284 | Rate_DL(dd) = mean(MaxR_DL); 285 | LastValidationRMSE(dd) = traininfo.ValidationRMSE(end); 286 | clear trainedNet traininfo YPredicted 287 | clear layers options Rate_DL_Temp MaxR_DL_Temp Highest_Rate 288 | end 289 | end 290 | -------------------------------------------------------------------------------- /leave_now/LIS-DeepLearning-master/README.md: -------------------------------------------------------------------------------- 1 | # Enabling Large Intelligent Surfaces with Compressive Sensing and Deep Learning 2 | This is a MATLAB code package related to the following article: 3 | Abdelrahman Taha, Muhammad Alrabeiah, and Ahmed Alkhateeb, “[Enabling Large Intelligent Surfaces with Compressive Sensing and Deep Learning](https://arxiv.org/abs/1904.10136),” arXiv e-prints, p. arXiv:1904.10136, Apr 2019. 4 | # Abstract of the Article 5 | Employing large intelligent surfaces (LISs) is a promising solution for improving the coverage and rate of future wireless systems. These surfaces comprise a massive number of nearly-passive elements that interact with the incident signals, for example by reflecting them, in a smart way that improves the wireless system performance. Prior work focused on the design of the LIS reflection matrices assuming full knowledge of the channels. Estimating these channels at the LIS, however, is a key challenging problem, and is associated with large training overhead given the massive number of LIS elements. This paper proposes efficient solutions for these problems by leveraging tools from compressive sensing and deep learning. First, a novel LIS architecture based on sparse channel sensors is proposed. In this architecture, all the LIS elements are passive except for a few elements that are active (connected to the baseband of the LIS controller). We then develop two solutions that design the LIS reflection matrices with negligible training overhead. In the first approach, we leverage compressive sensing tools to construct the channels at all the LIS elements from the channels seen only at the active elements. These full channels can then be used to design the LIS reflection matrices with no training overhead. In the second approach, we develop a deep learning based solution where the LIS learns how to optimally interact with the incident signal given the channels at the active elements, which represent the current state of the environment and transmitter/receiver locations. We show that the achievable rates of the proposed compressive sensing and deep learning solutions approach the upper bound, that assumes perfect channel knowledge, with negligible training overhead and with less than 1% of the elements being active. 6 | # Code Package Content 7 | The main script for generating Figure 10 as shown below, illustrated in the original article, is named `Fig10_generator.m`. 8 | One additional MATLAB function named `Main_fn.m` is called by the main script. Another additional MATLAB function named `UPA_codebook_generator.m` is called by the function `Main_fn.m`. 9 | ![Figure10](https://github.com/Abdelrahman-Taha/LIS-DeepLearning/blob/master/Figure10.png) 10 | The script adopts the publicly available parameterized [DeepMIMO dataset](http://deepmimo.net/ray_tracing.html?i=1) published for deep learning applications in mmWave and massive MIMO systems. The 'O1_28' scenario is adopted for this figure. 11 | 12 | **To reproduce the results, please follow these steps:** 13 | 1. Download all the files of this project and add them to the "DeepMIMO_Dataset_Generation" folder. 14 | (Note that both the DeepMIMO dataset generation files and the source data of the 'O1_28' scenario are available on [this link](https://github.com/DeepMIMO/DeepMIMO-codes)). 15 | 2. Run the file named `Fig10_generator.m` in MATLAB and the script will sequentially execute the following tasks: 16 | 1. Generate the inputs and outputs of the deep learning model. 17 | 2. Build, train, and test the deep learning model. 18 | 3. Process the deep learning outputs and generate the performance results. 19 | 20 | If you have any questions regarding the code and used dataset, please contact [Abdelrahman Taha](https://sites.google.com/view/abdelrahmantaha). 21 | 22 | # License and Referencing 23 | This code package is licensed under a [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-nc-sa/4.0/). If you in any way use this code for research that results in publications, please cite our original article: 24 | > A. Taha, M. Alrabeiah, and A. Alkhateeb, “[Enabling Large Intelligent Surfaces with Compressive Sensing and Deep Learning](https://arxiv.org/abs/1904.10136),” arXiv e-prints, p. arXiv:1904.10136, Apr 2019. 25 | -------------------------------------------------------------------------------- /leave_now/UPA_codebook_generator.m: -------------------------------------------------------------------------------- 1 | function [F_CB,all_beams]=UPA_codebook_generator(Mx,My,Mz,over_sampling_x,over_sampling_y,over_sampling_z,ant_spacing) 2 | 3 | % Code by A. Alkhateeb et al. 4 | % Commented by M. G. Khafagy 5 | 6 | % INPUT: 7 | % 1) Number of antennas in (x,y,z) dimensions, 8 | % 2) Oversampling in (x,y,z), 9 | % 3) antenna spacing (to calculate the constant kd). 10 | 11 | % OUTPUT: 12 | % 1) F_CB: 3D Codebook 13 | % 2) all_beams: 3D Indices 14 | 15 | % Constant 16 | kd=2*pi*ant_spacing; 17 | 18 | % Index Vectors 19 | antx_index=0:1:Mx-1; 20 | anty_index=0:1:My-1; 21 | antz_index=0:1:Mz-1; 22 | 23 | % M=Mx*My*Mz; % Total number of antennas in the UPA 24 | 25 | % Defining the RF beamforming codebook 26 | codebook_size_x=over_sampling_x*Mx; 27 | codebook_size_y=over_sampling_y*My; 28 | codebook_size_z=over_sampling_z*Mz; 29 | 30 | % ============= X Axis ============= 31 | theta_qx=0:pi/codebook_size_x:pi-1e-6; % quantized beamsteering angles 32 | % Why not theta_qx=0:pi/codebook_size_x:pi-pi/codebook_size_x 33 | % It assumes pi/codebook_size_x will be always greater than 1e-6 .. ok 34 | F_CBx=zeros(Mx,codebook_size_x); 35 | for i=1:1:length(theta_qx) % For each beamsteering angle in the x directon 36 | F_CBx(:,i)=sqrt(1/Mx)*exp(-1j*kd*antx_index'*cos(theta_qx(i))); % calculate the reflection vector in x direction 37 | end 38 | % ============= Y Axis ============= 39 | range_y=(20+307)*pi/180; 40 | theta_qy=20*pi/180:-range_y/codebook_size_y:-307*pi/180+1e-6; % quantized beamsteering angles 41 | F_CBy=zeros(My,codebook_size_y); 42 | for i=1:1:length(theta_qy) % For each beamsteering angle in the y directon 43 | F_CBy(:,i)=sqrt(1/My)*exp(-1j*anty_index'*theta_qy(i)); % calculate the reflection vector in y direction 44 | % ############################################################## 45 | % ###################### WHY NO kd HERE ######################## 46 | % ################### DIFFERENT CALCULATION #################### 47 | end 48 | % ============= Z Axis ============= 49 | theta_qz=0:pi/codebook_size_z:pi-1e-6; % quantized beamsteering angles 50 | F_CBz=zeros(Mz,codebook_size_z); 51 | for i=1:1:length(theta_qz) % For each beamsteering angle in the z directon 52 | F_CBz(:,i)=sqrt(1/Mz)*exp(-1j*kd*antz_index'*cos(theta_qz(i))); % calculate the reflection vector in z direction 53 | end 54 | 55 | % ============= 3D codebook ============= 56 | F_CBxy=kron(F_CBy,F_CBx); 57 | F_CB=kron(F_CBz,F_CBxy); 58 | 59 | 60 | % ============= 3D Indices ============= 61 | beams_x=1:1:codebook_size_x; 62 | beams_y=1:1:codebook_size_y; 63 | beams_z=1:1:codebook_size_z; 64 | 65 | Mxx_Ind=repmat(beams_x,1,codebook_size_y*codebook_size_z)'; 66 | Myy_Ind=repmat(reshape(repmat(beams_y,codebook_size_x,1),1,codebook_size_x*codebook_size_y),1,codebook_size_z)'; 67 | Mzz_Ind=reshape(repmat(beams_z,codebook_size_x*codebook_size_y,1),1,codebook_size_x*codebook_size_y*codebook_size_z)'; 68 | 69 | Tx=cat(3,Mxx_Ind',Myy_Ind',Mzz_Ind'); 70 | all_beams=reshape(Tx,[],3); 71 | end 72 | -------------------------------------------------------------------------------- /old_archive/resetfcn_power.m: -------------------------------------------------------------------------------- 1 | % Reset function for the MU-MISO IRS environment 2 | % https://www.mathworks.com/help/reinforcement-learning/ug/create-custom-reinforcement-learning-environment-in-matlab.html 3 | 4 | function [InitialObservation,LoggedSignals] = resetfcn_power(Hd_mat, Hr_mat, Ht_mat, sigma_2, SINR_threshold, state_design) 5 | 6 | % -------------- Initialized Logged Signals ------------------ 7 | LoggedSignals.chan_index = 1; % Initialize channel/step index 8 | LoggedSignals.StateDes = state_design; 9 | LoggedSignals.sigma_2 = sigma_2; % Noise variance for all users 10 | LoggedSignals.SINR_threshold = SINR_threshold; % SINR threshold for all users 11 | % Store ALL training channels in Logged Signals (3-dimensional matrices) 12 | LoggedSignals.Hd_mat = Hd_mat; 13 | LoggedSignals.Hr_mat = Hr_mat; 14 | LoggedSignals.Ht_mat = Ht_mat; 15 | 16 | % Prepare first channels 17 | Hd = LoggedSignals.Hd_mat(:,:,LoggedSignals.chan_index); 18 | Hr = LoggedSignals.Hr_mat(:,:,LoggedSignals.chan_index); 19 | Ht = LoggedSignals.Ht_mat(:,:,LoggedSignals.chan_index); 20 | 21 | N_users = size(Hr,2); M = size(Ht,1); N_BS = size(Ht,2); 22 | 23 | LoggedSignals.new_chan_obs.Ht = Ht; 24 | LoggedSignals.new_chan_obs.Hr = Hr; 25 | LoggedSignals.new_chan_obs.Hd = Hd; 26 | 27 | 28 | % ---------- Action Initialization ---------- 29 | % BS beamforming initialization 30 | W = eye(N_BS, N_users); 31 | W_vec = W(:); 32 | W_realimag_vec = [real(W_vec); imag(W_vec)]; 33 | % IRS reflection coefficients initilization 34 | %theta_vec = ones(M,1); %theta_realimag_vec = [real(theta_vec); imag(theta_vec)]; %theta_mat = diag(theta_vec); 35 | theta_vec = pi*ones(M,1); 36 | theta_mat = diag(exp(1i*theta_vec)); 37 | action = [W_realimag_vec; theta_vec]; 38 | % Real/imaginary transmitted power for each user 39 | transmit_pow = [diag(real(W)'*real(W)); diag(imag(W)'*imag(W))]; 40 | 41 | % Real/imaginary received power at each user 42 | H_W = W'*(Ht'*(theta_mat')*Hr + Hd); 43 | H_real_imag_vec = [real(H_W(:)); imag(H_W(:))]; 44 | receive_pow = H_real_imag_vec.^2; 45 | % Channel observation vector 46 | chan_obs = [real(Ht(:)); imag(Ht(:)); real(Hr(:)); imag(Hr(:)); real(Hd(:)); imag(Hd(:))]; 47 | 48 | 49 | % Calculate interferer indices matrix 50 | int_users_matrix = meshgrid(1:N_users).'; 51 | int_users_matrix(1:N_users+1:N_users^2) = []; 52 | int_users_matrix = reshape(int_users_matrix, N_users-1, N_users).'; 53 | 54 | 55 | % -------------- Initialized Logged Signals ------------------ 56 | switch LoggedSignals.StateDes 57 | case 1 58 | LoggedSignals.State = [transmit_pow; receive_pow; chan_obs; action]; 59 | case 2 60 | LoggedSignals.State = chan_obs; 61 | end 62 | LoggedSignals.int_users_matrix = int_users_matrix; % Interferer indices matrix 63 | % LoggedSignals.action = action; 64 | 65 | % Return initial observation 66 | InitialObservation = LoggedSignals.State; 67 | 68 | end -------------------------------------------------------------------------------- /old_archive/resetfcn_throughput.m: -------------------------------------------------------------------------------- 1 | % Reset function for the MU-MISO IRS environment 2 | % https://www.mathworks.com/help/reinforcement-learning/ug/create-custom-reinforcement-learning-environment-in-matlab.html 3 | 4 | function [InitialObservation,LoggedSignals] = resetfcn_throughput(N_BS, N_users, M, sigma_2) 5 | 6 | 7 | % Initialize Channel Index to 1 8 | LoggedSignals.chan_index = 1; % Store new channel index 9 | % Prepare first channels 10 | 11 | Hd = 1e-4/sqrt(2)*(randn(N_BS, N_users)+1i*randn(N_BS, N_users)); 12 | Hr = 1e-2/sqrt(2)*(randn(M, N_users)+1i*randn(M, N_users)); 13 | Ht = 1e-2/sqrt(2)*(randn(M, N_BS)+1i*randn(M, N_BS)); 14 | 15 | LoggedSignals.new_chan_obs.Ht = Ht; 16 | LoggedSignals.new_chan_obs.Hr = Hr; 17 | LoggedSignals.new_chan_obs.Hd = Hd; 18 | 19 | %LoggedSignals.new_chan_obs.Ht = Ht(LoggedSignals.chan_index); % check indices 20 | %LoggedSignals.new_chan_obs.Hr = Hr(LoggedSignals.chan_index); % check indices 21 | %LoggedSignals.new_chan_obs.Hd = Hd(LoggedSignals.chan_index); % check indices 22 | 23 | 24 | % ---------- Action Initialization ---------- 25 | % BS beamforming initialization 26 | W = eye(N_BS, N_users); 27 | W_vec = W(:); 28 | W_realimag_vec = [real(W_vec); imag(W_vec)]; 29 | % IRS reflection coefficients initilization 30 | theta_vec = ones(M,1); 31 | theta_realimag_vec = [real(theta_vec); imag(theta_vec)]; 32 | theta_mat = diag(theta_vec); 33 | Action = [W_realimag_vec; theta_realimag_vec]; 34 | 35 | 36 | transmit_pow = [diag(real(W)'*real(W)); diag(imag(W)'*imag(W))]; 37 | 38 | H_W = W'*(Ht'*(theta_mat')*Hr + Hd); 39 | H_real_imag_vec = [real(H_W(:)); imag(H_W(:))]; 40 | receive_pow = H_real_imag_vec.^2; 41 | 42 | chan_obs = [ real(Ht(:)); imag(Ht(:)); 43 | real(Hr(:)); imag(Hr(:)); 44 | real(Hd(:)); imag(Hd(:))]; 45 | 46 | % Return initial environment state variables as logged signals. 47 | LoggedSignals.State = [transmit_pow; receive_pow; chan_obs; Action]; 48 | InitialObservation = LoggedSignals.State; 49 | 50 | all_users = 1:1:N_users; % vector of all user indices 51 | 52 | int_users_matrix = meshgrid(all_users).'; % indices of interfering users for each user 53 | int_users_matrix(1:N_users+1:N_users^2) = []; 54 | int_users_matrix = reshape(int_users_matrix, N_users-1, N_users).'; 55 | LoggedSignals.int_users_matrix = int_users_matrix; 56 | LoggedSignals.Action = Action; 57 | LoggedSignals.sigma_2 = sigma_2; 58 | 59 | end -------------------------------------------------------------------------------- /old_archive/stepfcn_power.m: -------------------------------------------------------------------------------- 1 | % Code Description 2 | % ---------------- 3 | % https://www.mathworks.com/help/reinforcement-learning/ug/create-custom-reinforcement-learning-environment-in-matlab.html 4 | % https://www.mathworks.com/help/reinforcement-learning/ug/define-reward-signals.html 5 | 6 | % Input is the action taken by the actor agent in DDPG 7 | % This step function calculates the new state/observation and reward 8 | % due to taken (input) action wich the agent has just taken. 9 | 10 | function [new_observation,Reward,IsDone,LoggedSignals] = stepfcn_power(action, LoggedSignals) 11 | 12 | % -------------- Extract Logged Signals ------------------ 13 | sigma_2 = LoggedSignals.sigma_2; % noise variance 14 | % Extract current channel from logged signals 15 | Ht = LoggedSignals.new_chan_obs.Ht; 16 | Hr = LoggedSignals.new_chan_obs.Hr; 17 | Hd = LoggedSignals.new_chan_obs.Hd; 18 | % Numbers of Users, IRS reflecting elements, and BS Antennas 19 | N_users = size(Hr,2); M = size(Ht,1); N_BS = size(Ht,2); 20 | 21 | % Extract BS beamformer from taken action 22 | W = reshape(action(1:N_BS*N_users)+ 1i*action(N_BS*N_users+1:2*N_BS*N_users), N_BS, N_users); 23 | % Extract IRS reflection vector from taken action 24 | theta_vec = action(2*N_BS*N_users+1:2*N_BS*N_users+M); 25 | theta_mat = diag(exp(1i*theta_vec)); 26 | 27 | % Extract past action from Logged signals 28 | % past_action = LoggedSignals.Action; 29 | % Calculate transmit power for each user (stacking real and imag powers) 30 | transmit_pow = [diag(real(W)'*real(W)); diag(imag(W)'*imag(W))]; 31 | % Calculate received power for each user (also stacking real and imag) 32 | H_W = W'*(Ht'*(theta_mat')*Hr + Hd); 33 | H_real_imag_vec = [real(H_W(:)); imag(H_W(:))]; 34 | receive_pow = H_real_imag_vec.^2; 35 | 36 | % Channel observation 37 | chan_obs = [real(Ht(:)); imag(Ht(:)); real(Hr(:)); imag(Hr(:)); real(Hd(:)); imag(Hd(:))]; 38 | 39 | switch LoggedSignals.StateDes 40 | case 1 41 | new_observation = [transmit_pow; receive_pow; chan_obs; action]; 42 | case 2 43 | new_observation = chan_obs; 44 | end 45 | 46 | int_users_matrix = LoggedSignals.int_users_matrix; 47 | % Calculate and return reward 48 | H = Ht'*(theta_mat')*Hr + Hd; 49 | 50 | SINR = zeros(N_users,1); 51 | for user_ind = 1 : N_users 52 | desired = W(:,user_ind)'*H(:,user_ind); 53 | int_users = int_users_matrix(user_ind,:); % interfering user indices 54 | interf = [W(:,int_users)'*H(:,user_ind); sqrt(sigma_2)]; 55 | SINR(user_ind) = norm(desired,2)^2/norm(interf,2)^2; 56 | end 57 | 58 | IsDone = min(SINR)0) 29 | Relevant_data_length=max_paths*4; 30 | Relevant_limited_data_length=num_path_limited*4; 31 | 32 | Relevant_DoD_array=DoD_array(pointer+3:pointer+2+Relevant_data_length); 33 | Relevant_CIR_array=CIR_array(pointer+3:pointer+2+Relevant_data_length); 34 | 35 | channel_params_all(Receiver_Number).DoD_phi=Relevant_DoD_array(2:4:Relevant_limited_data_length); % Departure Azimuth 36 | channel_params_all(Receiver_Number).DoD_theta=Relevant_DoD_array(3:4:Relevant_limited_data_length); % Departure Elevation 37 | channel_params_all(Receiver_Number).phase=Relevant_CIR_array(2:4:Relevant_limited_data_length); % Path Phase 38 | channel_params_all(Receiver_Number).ToA=Relevant_CIR_array(3:4:Relevant_limited_data_length); % Path Delay 39 | channel_params_all(Receiver_Number).power=1e-3*(10.^(.1*(30+Relevant_CIR_array(4:4:Relevant_limited_data_length)))); % Path Power 40 | channel_params_all(Receiver_Number).num_paths=num_path_limited; 41 | channel_params_all(Receiver_Number).loc=Loc_array(Receiver_Number,2:4); % Receiver Location 42 | else % If no paths exist 43 | channel_params_all(Receiver_Number).DoD_phi=[]; 44 | channel_params_all(Receiver_Number).DoD_theta=[]; 45 | channel_params_all(Receiver_Number).phase=[]; 46 | channel_params_all(Receiver_Number).ToA=[]; 47 | channel_params_all(Receiver_Number).power=[]; 48 | channel_params_all(Receiver_Number).num_paths=0; 49 | channel_params_all(Receiver_Number).loc=Loc_array(Receiver_Number,2:4); 50 | end 51 | pointer=pointer+max_paths*4+2; %update pointer value and continue 52 | end 53 | 54 | channel_params=channel_params_all(1,user_first:user_last); % Selects the required users 55 | 56 | end -------------------------------------------------------------------------------- /resetfcn.m: -------------------------------------------------------------------------------- 1 | % Reset function for the MU-MISO IRS environment 2 | % https://www.mathworks.com/help/reinforcement-learning/ug/create-custom-reinforcement-learning-environment-in-matlab.html 3 | function [InitialObservation,LoggedSignals] = resetfcn(H_mat, sigma_2, SINR_threshold, state_design) 4 | 5 | % -------------- Initialized Logged Signals ------------------ 6 | % Initialize channel/step index 7 | if exist('LoggedSignals.chan_index','var') == 0 8 | LoggedSignals.chan_index = 1; 9 | % else 10 | % LoggedSignals.chan_index = LoggedSignals.chan_index + 1; 11 | end 12 | LoggedSignals.step_index = 1; 13 | LoggedSignals.StateDes = state_design; 14 | LoggedSignals.sigma_2 = sigma_2; % Noise variance for all users 15 | LoggedSignals.SINR_threshold = SINR_threshold; % SINR threshold for all users 16 | new_chan_obs.Hd = H_mat.Hd_mat(:,:,LoggedSignals.chan_index); 17 | new_chan_obs.Hr = H_mat.Hr_mat(:,:,LoggedSignals.chan_index); 18 | new_chan_obs.Ht = H_mat.Ht_mat(:,:,LoggedSignals.chan_index); 19 | LoggedSignals.new_chan_obs = new_chan_obs; 20 | 21 | % Prepare first channels 22 | Hd = new_chan_obs.Hd; 23 | Hr = new_chan_obs.Hr; 24 | Ht = new_chan_obs.Ht; 25 | 26 | N_users = size(Hr,2); M = size(Ht,1); N_BS = size(Ht,2); 27 | 28 | % ---------- Action Initialization ---------- 29 | % BS beamforming initialization 30 | W = eye(N_BS, N_users); 31 | W_vec = W(:); 32 | W_realimag_vec = [real(W_vec); imag(W_vec)]; 33 | % IRS reflection coefficients initilization 34 | %theta_vec = ones(M,1); %theta_realimag_vec = [real(theta_vec); imag(theta_vec)]; %theta_mat = diag(theta_vec); 35 | theta_vec = pi*ones(M,1); 36 | theta_mat = diag(exp(1i*theta_vec)); 37 | action = [W_realimag_vec; theta_vec]; 38 | % Real/imaginary transmitted power for each user 39 | transmit_pow = [diag(real(W)'*real(W)); diag(imag(W)'*imag(W))]; 40 | 41 | % Real/imaginary received power at each user 42 | H_W = W'*(Ht'*(theta_mat')*Hr + Hd); 43 | H_real_imag_vec = [real(H_W(:)); imag(H_W(:))]; 44 | receive_pow = H_real_imag_vec.^2; 45 | % Channel observation vector 46 | chan_obs = [real(Ht(:)); imag(Ht(:)); real(Hr(:)); imag(Hr(:)); real(Hd(:)); imag(Hd(:))]; 47 | 48 | 49 | % Calculate interferer indices matrix 50 | int_users_matrix = meshgrid(1:N_users).'; 51 | int_users_matrix(1:N_users+1:N_users^2) = []; 52 | int_users_matrix = reshape(int_users_matrix, N_users-1, N_users).'; 53 | 54 | 55 | % -------------- Initialized Logged Signals ------------------ 56 | switch LoggedSignals.StateDes 57 | case 1 58 | LoggedSignals.State = [transmit_pow; receive_pow; chan_obs; action]; 59 | case 2 60 | LoggedSignals.State = chan_obs; 61 | end 62 | LoggedSignals.int_users_matrix = int_users_matrix; % Interferer indices matrix 63 | % LoggedSignals.action = action; 64 | % Return initial observation 65 | InitialObservation = LoggedSignals.State; 66 | end -------------------------------------------------------------------------------- /sinr_CONSTRAINT.m: -------------------------------------------------------------------------------- 1 | function [desired_fun, interference_fun, constr_fun] = sinr_CONSTRAINT(V, b, R, SINR_target, sigma_2, all_users, int_users_matrix) 2 | for k = all_users % looping over all users 3 | int_users = int_users_matrix(k,:); % interfering users 4 | desired_fun(k) = trace(real(R{k,k}*V)) + square_abs(b{k,k}); 5 | interference_fun(k) = 0; 6 | for m = int_users 7 | interference_fun(k) = interference_fun(k) + trace(real(R{k,m}*V)) + square_abs(b{k,m}); 8 | end 9 | constr_fun(k) = desired_fun(k) - SINR_target * (interference_fun(k) + sigma_2); 10 | end 11 | end -------------------------------------------------------------------------------- /stepfcn.m: -------------------------------------------------------------------------------- 1 | % Code Description 2 | % ---------------- 3 | % https://www.mathworks.com/help/reinforcement-learning/ug/create-custom-reinforcement-learning-environment-in-matlab.html 4 | % https://www.mathworks.com/help/reinforcement-learning/ug/define-reward-signals.html 5 | 6 | % Input is the action taken by the actor agent in DDPG 7 | % This step function calculates the new state/observation and reward 8 | % due to taken (input) action wich the agent has just taken. 9 | 10 | function [new_observation,Reward,IsDone,LoggedSignals] = stepfcn(action, LoggedSignals) 11 | 12 | % -------------- Extract Logged Signals ------------------ 13 | sigma_2 = LoggedSignals.sigma_2; % noise variance 14 | % Extract current channel from logged signals 15 | Ht = LoggedSignals.new_chan_obs.Ht; 16 | Hr = LoggedSignals.new_chan_obs.Hr; 17 | Hd = LoggedSignals.new_chan_obs.Hd; 18 | % Numbers of Users, IRS reflecting elements, and BS Antennas 19 | N_users = size(Hr,2); M = size(Ht,1); N_BS = size(Ht,2); 20 | 21 | % Extract BS beamformer from taken action 22 | W = reshape(action(1:N_BS*N_users)+ 1i*action(N_BS*N_users+1:2*N_BS*N_users), N_BS, N_users); 23 | % Extract IRS reflection vector from taken action 24 | theta_vec = action(2*N_BS*N_users+1:2*N_BS*N_users+M); 25 | theta_mat = diag(exp(1i*theta_vec)); 26 | 27 | % Extract past action from Logged signals 28 | % past_action = LoggedSignals.Action; 29 | % Calculate transmit power for each user (stacking real and imag powers) 30 | transmit_pow = [diag(real(W)'*real(W)); diag(imag(W)'*imag(W))]; 31 | % Calculate received power for each user (also stacking real and imag) 32 | H_W = W'*(Ht'*(theta_mat')*Hr + Hd); 33 | H_real_imag_vec = [real(H_W(:)); imag(H_W(:))]; 34 | receive_pow = H_real_imag_vec.^2; 35 | 36 | % Channel observation 37 | chan_obs = [real(Ht(:)); imag(Ht(:)); real(Hr(:)); imag(Hr(:)); real(Hd(:)); imag(Hd(:))]; 38 | 39 | switch LoggedSignals.StateDes 40 | case 1 41 | new_observation = [transmit_pow; receive_pow; chan_obs; action]; 42 | case 2 43 | new_observation = chan_obs; 44 | end 45 | 46 | int_users_matrix = LoggedSignals.int_users_matrix; 47 | % Calculate and return reward 48 | H = Ht'*(theta_mat')*Hr + Hd; 49 | 50 | SINR = zeros(N_users,1); 51 | for user_ind = 1 : N_users 52 | desired = W(:,user_ind)'*H(:,user_ind); 53 | int_users = int_users_matrix(user_ind,:); % interfering user indices 54 | interf = [W(:,int_users)'*H(:,user_ind); sqrt(sigma_2)]; 55 | SINR(user_ind) = norm(desired,2)^2/norm(interf,2)^2; 56 | end 57 | 58 | IsDone = 0; %min(SINR)>LoggedSignals.SINR_threshold; 59 | 60 | %if ~IsDone 61 | % Reward = -1; 62 | %else 63 | Reward = (1/sum(transmit_pow)); %*(min(SINR)>LoggedSignals.SINR_threshold) 64 | %end 65 | 66 | % -------------- Update Logged Signals ------------------ 67 | LoggedSignals.step_index = LoggedSignals.step_index+1; % Update new step index 68 | LoggedSignals.State = new_observation; % Return past state 69 | %LoggedSignals.Action = Action; % Return past action 70 | end --------------------------------------------------------------------------------