├── README.md ├── matlab ├── HC-CE-Algorithm │ ├── HybridPrecoding.m │ ├── MyReshape.m │ ├── channel_estimation_test.m │ ├── channel_estimation_train.m │ └── power_allocation.m ├── channel_gen.m ├── codebook_bs.mat ├── codebook_ms.mat ├── gen_testdata.m ├── gen_traindata.m ├── get_rate.m └── traditional-HBF │ ├── FD_method.m │ ├── Yuwei_method.m │ ├── yuweiA1.m │ └── yuweiA2.m └── python ├── main.py ├── model ├── CE_HBF_Net.h5 ├── CE_HBF_Net_V1.h5 ├── CE_HBF_Net_V2.h5 └── HBF_Net.h5 ├── model_gen.py └── utils.py /README.md: -------------------------------------------------------------------------------- 1 | # Channel-Estimation-and-Hybrid-Precoding-for-Millimeter-Wave-Systems-Based-on-Deep-Learning 2 | 3 | 1. Put the _"matlab"_ and _"python"_ folders in a root directory 4 | 5 | 2. The _"matlab"_ folder contains traditional HBF algorithm, channel estimation algorithm and data generation code. The _"python"_ folder contains the defined neural network models and the trained models. 6 | 7 | 3. If you want to test the HBF-Net and CE-HBF-Net directly, you can 8 | * Run _"matlab/channel_gen.m"_ to generate test channel. Run _"matlab/gen_testdata.m"_ to generate test dataset 9 | * You can also click [here](https://pan.baidu.com/s/1y6R4lY5XtMC_8MapTThHYQ) (Extraction code: om9r) to download the data set without generating a new test data set. 10 | * The trained models are saved in _"python/model"_. Run "python/main.py" in test mode (train_flag=0), you can test the performance of HBF-Net and CE-HBF-Net. 11 | 12 | 4. If you want to retrain the HBF-Net and CE-HBF-Net, you can 13 | * Run _"matlab/gen_traindata.m"_ to generate training data set 14 | * Set _"python/main.py"_ to training mode (train_flag=1), you can retrain the corresponding neural network model 15 | 16 | **Pay attention to the correspondence between the _saved data path_ and the _load data path_** 17 | -------------------------------------------------------------------------------- /matlab/HC-CE-Algorithm/HybridPrecoding.m: -------------------------------------------------------------------------------- 1 | function [F]=HybridPrecoding(Fopt,Num_Antennas,Num_RFchains,Qbit) 2 | 3 | % System parameters 4 | Kd=pi; % Assuming: K=2pi/lambda, D=lambda/2 5 | Num_Directions=128; 6 | Step=2*pi/Num_Directions; 7 | Antennas_index=0:1:Num_Antennas-1; 8 | Theta_Quantized=0:Step:2*pi-.00001; 9 | 10 | % RF codebook 11 | for i=1:1:length(Theta_Quantized) 12 | Steering_Vec(:,i)=sqrt(1/Num_Antennas)*exp(1j*Antennas_index*Theta_Quantized(i)); 13 | end 14 | 15 | % Initialization 16 | Fres=Fopt; % Residual precoding matrix 17 | Frf=[]; % To carry the RF precoders 18 | Steering_VecX=Steering_Vec; % The RF beamforming codebook 19 | 20 | for m=1:1:Num_RFchains 21 | % Selecting the best RF beamforming vector 22 | Epsi=Steering_VecX'*Fres; 23 | [val,Ind_Direction]=max(diag(Epsi*Epsi')); 24 | Frf=[Frf Steering_Vec(:,Ind_Direction)]; 25 | 26 | % Gram-Schmidt Procedure 27 | E=Steering_VecX(:,Ind_Direction); 28 | Proj_Prev_Directions=E*(E'*Steering_VecX/(E'*E)); 29 | Steering_VecX=Steering_VecX-Proj_Prev_Directions; % Updating the dictionary 30 | 31 | % Digital precoding 32 | try 33 | Fbb=Frf\Fopt; 34 | catch 35 | Fbb = 1; 36 | end 37 | Fres=(Fres-Frf*Fbb)/sqrt(trace((Fres-Frf*Fbb)'*(Fres-Frf*Fbb))); 38 | end 39 | 40 | % Precoding vectors normalization 41 | for i=1:1:size(Fopt,2) 42 | Fbb(:,i)=Fbb(:,i)/sqrt(trace((Frf*Fbb(:,i))'*(Frf*Fbb(:,i)))); 43 | end 44 | 45 | % The final hybrid precoding matrix 46 | F=Frf*Fbb; 47 | end -------------------------------------------------------------------------------- /matlab/HC-CE-Algorithm/MyReshape.m: -------------------------------------------------------------------------------- 1 | function new_array=MyReshape(array) 2 | new_array=[]; 3 | [S,~,Num_paths]=size(array); 4 | for ii=1:1:Num_paths 5 | tmp=[]; 6 | for kk=1:1:S 7 | tmp=[tmp,array(kk,:,ii)]; 8 | end 9 | new_array=[new_array,tmp]; 10 | end -------------------------------------------------------------------------------- /matlab/HC-CE-Algorithm/channel_estimation_test.m: -------------------------------------------------------------------------------- 1 | function [ecsi,data,index_bs,index_ms,nmse] = channel_estimation_test(pcsi,pnr_dB,ITER, Num_paths, Lest,Nrf, Nt, Nr,codebook_bs,codebook_ms) 2 | 3 | %% ------------------------System Parameters--------------------------------- 4 | 5 | BSAntennas_Index=0:1:Nt-1; % Indices of the BS Antennas 6 | Num_BS_RFchains=Nrf; % BS RF chains 7 | MSAntennas_Index=0:1:Nr-1; % Indices of the MS Antennas 8 | Num_MS_RFchains=2; % MS RF chains 9 | 10 | %% ---------------- Channel Estimation Algorithm Parameters------------------ 11 | 12 | G_BS=96; % Required resolution for BS AoD 13 | G_MS=6; % Required resolution for MS AoA 14 | 15 | K_BS=2; % Number of Beamforming vectors per stage 16 | K_MS=2; 17 | 18 | S=floor(log(G_BS/Lest)/log(K_BS)); % Number of iterations 19 | 20 | % Beamsteering vectors generation 21 | for g=1:1:G_BS 22 | AbG(:,g)=sqrt(1/Nt)*exp(1j*(2*pi)*BSAntennas_Index*((g-1)/G_BS)); 23 | end 24 | % Am generation 25 | for g=1:1:G_MS 26 | AmG(:,g)=sqrt(1/Nr)*exp(1j*(2*pi)*MSAntennas_Index*((g-1)/G_MS)); 27 | end 28 | 29 | %% -------------------------------------------------------------------------- 30 | 31 | ecsi = zeros(ITER,Nr,Nt); 32 | error_nmse = zeros(ITER,1); 33 | data=zeros([ITER,(S-1)*2*Lest*Lest+36*3]); 34 | index_bs=zeros([ITER,S*Lest*3]); 35 | index_ms=zeros([ITER,Lest]); 36 | 37 | %% ---------------------start estimation------------------------------------------------------ 38 | 39 | t1 = clock; 40 | for iter=1:1:ITER 41 | 42 | data_tmp=zeros([S-1,Lest*2,Lest]); % save recived data during DOA estimation 43 | data1_tmp=zeros([1,Lest*2*6,Lest]); % save recived data during DOA estimation 44 | 45 | if mod(iter,2000)==0 46 | iter 47 | end 48 | 49 | %% ------------------------------Load Channel ------------------------------------------- 50 | Channel= pcsi(iter,:,:) ; 51 | Channel=squeeze(Channel); 52 | pnr=10^(0.1*pnr_dB); 53 | No=1/pnr; 54 | %% ------------------------------------------------------------------------- 55 | 56 | %Algorithm parameters initialization 57 | KB_final=[]; % To keep the indecis of the estimated AoDs 58 | KM_final=[]; % To keep the indecis of the estimated AoAs 59 | yv_for_path_estimation=zeros(K_BS*Lest,1); % To keep received vectors 60 | 61 | for path=1:1:Lest % An iterations for each path 62 | KB_star=1:1:K_BS*Lest; % Best AoD ranges for the next stage 63 | KM_star=1:1:K_MS*Lest; % Best AoA ranges for the next stage 64 | 65 | %% --------------------------------------------level=1----------------------------------- 66 | level=1; 67 | % Noise calculations 68 | W_HP=codebook_ms; 69 | W_HP=reshape(W_HP,[Nr,2*Lest]); 70 | Noise=W_HP'*(sqrt(No/2)*(randn(Nr,K_BS*Lest)+1j*randn(Nr,K_BS*Lest))); 71 | F_HP=codebook_bs(level,:,KB_star); 72 | F_HP=reshape(F_HP,[Nt,2*Lest]); 73 | Y=W_HP'*Channel*F_HP+Noise; 74 | 75 | 76 | yv=reshape(Y,K_BS*K_MS*Lest^2,1); % vectorized received signal 77 | data1_tmp(1,:,path)=yv; 78 | % Subtracting the contribution of previously estimated paths 79 | for i=1:1:length(KB_final) 80 | A1=transpose(F_HP)*conj(AbG(:,KB_final(i)+1)); 81 | A2=W_HP'*AmG(:,KM_final(i)+1); 82 | Prev_path_cont=kron(A1,A2); 83 | Alp=Prev_path_cont'*yv; 84 | yv=yv-Alp*Prev_path_cont/(Prev_path_cont'*Prev_path_cont); 85 | end 86 | 87 | % Maximum power angles estimation 88 | Y_tmp=reshape(yv,K_MS*Lest*K_BS*Lest,1); 89 | Y=reshape(yv,K_MS*Lest,K_BS*Lest); 90 | 91 | %MS 92 | [val mX]=sort(abs(Y_tmp)); 93 | Max=max(val); 94 | [KM_max KB_temp]=find(abs(Y)==Max); 95 | KM_hist(path)=KM_star(KM_max); 96 | 97 | %BS 98 | [~,mX]=sort(abs(Y(KM_max,:)),'descend'); 99 | index_bs_tmp(level,:,path)=KB_star(mX(1:Lest)); 100 | mx_id=1; 101 | for kk=path:1:Lest 102 | KB_hist(kk,level)=KB_star(mX(mx_id)); 103 | mx_id=mx_id+1; 104 | end 105 | 106 | % Adjusting the directions of the next stage (The adaptive search) 107 | for ln=1:1:Lest 108 | KB_star((ln-1)*K_BS+1:ln*K_BS)=(KB_hist(ln,level)-1)*K_BS+1:1:(KB_hist(ln,level))*K_BS; 109 | end 110 | KM_star=KM_hist(path); 111 | 112 | %% ---------------------------------------the other levels------------------------------------------------ 113 | 114 | for level=2:1:S 115 | 116 | % Noise calculations 117 | W_HP=codebook_ms(1,:,KM_star); 118 | W_HP=reshape(W_HP,[Nr,1]); 119 | Noise=W_HP'*(sqrt(No/2)*(randn(Nr,K_BS*Lest)+1j*randn(Nr,K_BS*Lest))); 120 | 121 | % Received signal 122 | F_HP=codebook_bs(level,:,KB_star); 123 | F_HP=reshape(F_HP,[Nt,2*Lest]); 124 | Y=W_HP'*Channel*F_HP+Noise; 125 | 126 | yv=reshape(Y,K_BS*Lest,1); % vectorized received signal 127 | data_tmp(level-1,:,path)=yv; 128 | if(level==S) 129 | yv_for_path_estimation=yv_for_path_estimation+yv; 130 | end 131 | 132 | % Subtracting the contribution of previously estimated paths 133 | for i=1:1:length(KB_final) 134 | A1=transpose(F_HP)*conj(AbG(:,KB_final(i)+1)); 135 | A2=W_HP'*AmG(:,KM_final(i)+1); 136 | Prev_path_cont=kron(A1,A2); 137 | Alp=Prev_path_cont'*yv; 138 | yv=yv-Alp*Prev_path_cont/(Prev_path_cont'*Prev_path_cont); 139 | end 140 | 141 | % Maximum power angles estimation 142 | [~,mX]=sort(abs(Y),'descend'); 143 | index_bs_tmp(level,:,path)=KB_star(mX(1:Lest)); 144 | mx_id=1; 145 | for kk=path:1:Lest 146 | KB_hist(kk,level)=KB_star(mX(mx_id)); 147 | mx_id=mx_id+1; 148 | end 149 | 150 | 151 | % Final AoAs/AoDs 152 | if(level==S) 153 | KB_final=[KB_final KB_star(mX(1))-1]; 154 | KM_final=[KM_final KM_star-1]; 155 | W_paths(path,:,:)=W_HP; 156 | F_paths(path,:,:)=F_HP; 157 | end 158 | 159 | % Adjusting the directions of the next stage (The adaptive search) 160 | for ln=1:1:Lest 161 | KB_star((ln-1)*K_BS+1:ln*K_BS)=(KB_hist(ln,level)-1)*K_BS+1:1:(KB_hist(ln,level))*K_BS; 162 | end 163 | 164 | 165 | end % -- End of estimating the lth path 166 | 167 | 168 | end %--- End of estimation of the channel 169 | 170 | %% --------------- Reconstructe the estimated channel------------------ 171 | 172 | % -----------------------------Estimated angles---------------------- 173 | AoD_est=2*pi*KB_final/G_BS; 174 | AoA_est=2*pi*KM_final/G_MS; 175 | 176 | % ---------------------------Estimated paths-------------------------- 177 | Wx=zeros(Nr,1); 178 | Fx=zeros(Nt,K_BS*Lest); 179 | Epsix=zeros(K_BS*Lest,Lest); 180 | 181 | for path=1:1:Lest 182 | Epsi=[]; 183 | Wx(:,:)=W_paths(path,:,:); 184 | Fx(:,:)=F_paths(path,:,:); 185 | for i=1:1:length(KB_final) 186 | A1=transpose(Fx)*conj(AbG(:,KB_final(i)+1)); 187 | A2=Wx'*AmG(:,KM_final(i)+1); 188 | E=kron(A1,A2); 189 | Epsi=[Epsi E]; 190 | end 191 | Epsix=Epsix+Epsi; 192 | end 193 | alpha_est=Epsix\yv_for_path_estimation; 194 | 195 | %--------------- Reconstructe the estimated channel------------------ 196 | Channel_est=zeros(Nr,Nt); 197 | for path=1:1:Lest 198 | Abh_est(:,path)=sqrt(1/Nt)*exp(1j*BSAntennas_Index*AoD_est(path)); 199 | Amh_est(:,path)=sqrt(1/Nr)*exp(1j*MSAntennas_Index*AoA_est(path)); 200 | Channel_est=Channel_est+alpha_est(path)*Amh_est(:,path)*Abh_est(:,path)'; 201 | end 202 | 203 | %% ---------------Save data-------------------- 204 | ecsi(iter,:,:) = Channel_est; 205 | error_nmse(iter)=(norm(Channel_est-Channel,'fro')/norm(Channel,'fro'))^2; 206 | data_tmp=MyReshape(data_tmp); 207 | data1_tmp=MyReshape(data1_tmp); 208 | data(iter,:)=[data_tmp,data1_tmp]; 209 | index_bs(iter,:)=MyReshape(index_bs_tmp); 210 | index_ms(iter,:)=reshape(KM_hist,[3,1]); 211 | 212 | end 213 | nmse=mean(error_nmse); 214 | end -------------------------------------------------------------------------------- /matlab/HC-CE-Algorithm/channel_estimation_train.m: -------------------------------------------------------------------------------- 1 | function [pcsi,ecsi,data,index_bs,index_ms,nmse] = channel_estimation_train(pnr_dB,ITER, Num_paths, Lest,Nrf, Nt, Nr,codebook_bs,codebook_ms) 2 | 3 | %% ------------------------System Parameters--------------------------------- 4 | 5 | BSAntennas_Index=0:1:Nt-1; % Indices of the BS Antennas 6 | Num_BS_RFchains=Nrf; % BS RF chains 7 | MSAntennas_Index=0:1:Nr-1; % Indices of the MS Antennas 8 | Num_MS_RFchains=2; % MS RF chains 9 | 10 | %% ---------------- Channel Estimation Algorithm Parameters------------------ 11 | 12 | G_BS=96; % Required resolution for BS AoD 13 | G_MS=6; % Required resolution for MS AoA 14 | 15 | K_BS=2; % Number of Beamforming vectors per stage 16 | K_MS=2; 17 | 18 | S=floor(log(G_BS/Lest)/log(K_BS)); % Number of iterations 19 | 20 | % Beamsteering vectors generation 21 | for g=1:1:G_BS 22 | AbG(:,g)=sqrt(1/Nt)*exp(1j*(2*pi)*BSAntennas_Index*((g-1)/G_BS)); 23 | end 24 | % Am generation 25 | for g=1:1:G_MS 26 | AmG(:,g)=sqrt(1/Nr)*exp(1j*(2*pi)*MSAntennas_Index*((g-1)/G_MS)); 27 | end 28 | 29 | %% -------------------------------------------------------------------------- 30 | 31 | pcsi = zeros(ITER,Nr,Nt); 32 | ecsi = zeros(ITER,Nr,Nt); 33 | error_nmse = zeros(ITER,1); 34 | data=zeros([ITER,(S-1)*2*Lest*Lest+36*3]); 35 | index_bs=zeros([ITER,S*Lest*3]); 36 | index_ms=zeros([ITER,Lest]); 37 | 38 | %% ---------------------start estimation------------------------------------------------------ 39 | 40 | t1 = clock; 41 | for iter=1:1:ITER 42 | 43 | data_tmp=zeros([S-1,Lest*2,Lest]); % save recived data during DOA estimation 44 | data1_tmp=zeros([1,Lest*2*6,Lest]); % save recived data during DOA estimation 45 | 46 | if mod(iter,2000)==0 47 | iter 48 | end 49 | 50 | %% ------------------------------ Channel Generation ------------------------------------------- 51 | % Channel parameters (angles of arrival and departure and path gains) 52 | AoD=2*pi*rand(1,Num_paths); 53 | AoA=2*pi*rand(1,Num_paths); 54 | alpha=(sqrt(1/2)*sqrt(1/Num_paths)*(randn(1,Num_paths)+1j*randn(1,Num_paths))); 55 | Channel=zeros(Nr,Nt); 56 | for path=1:1:Num_paths 57 | Abh(:,path)=sqrt(1/Nt)*exp(1j*BSAntennas_Index*AoD(path)); 58 | Amh(:,path)=sqrt(1/Nr)*exp(1j*MSAntennas_Index*AoA(path)); 59 | Channel=Channel+sqrt(Nt*Nr)*alpha(path)*Amh(:,path)*Abh(:,path)'; 60 | end 61 | pcsi(iter,:,:) = Channel; 62 | pnr=10^(0.1*pnr_dB); 63 | No=1/pnr; 64 | 65 | %% ------------------------------------------------------------------------- 66 | 67 | %Algorithm parameters initialization 68 | KB_final=[]; % To keep the indecis of the estimated AoDs 69 | KM_final=[]; % To keep the indecis of the estimated AoAs 70 | yv_for_path_estimation=zeros(K_BS*Lest,1); % To keep received vectors 71 | 72 | for path=1:1:Lest % An iterations for each path 73 | KB_star=1:1:K_BS*Lest; % Best AoD ranges for the next stage 74 | KM_star=1:1:K_MS*Lest; % Best AoA ranges for the next stage 75 | 76 | %% --------------------------------------------level=1----------------------------------- 77 | level=1; 78 | % Noise calculations 79 | W_HP=codebook_ms; 80 | W_HP=reshape(W_HP,[Nr,2*Lest]); 81 | Noise=W_HP'*(sqrt(No/2)*(randn(Nr,K_BS*Lest)+1j*randn(Nr,K_BS*Lest))); 82 | F_HP=codebook_bs(level,:,KB_star); 83 | F_HP=reshape(F_HP,[Nt,2*Lest]); 84 | Y=W_HP'*Channel*F_HP+Noise; 85 | 86 | 87 | yv=reshape(Y,K_BS*K_MS*Lest^2,1); % vectorized received signal 88 | data1_tmp(1,:,path)=yv; 89 | % Subtracting the contribution of previously estimated paths 90 | for i=1:1:length(KB_final) 91 | A1=transpose(F_HP)*conj(AbG(:,KB_final(i)+1)); 92 | A2=W_HP'*AmG(:,KM_final(i)+1); 93 | Prev_path_cont=kron(A1,A2); 94 | Alp=Prev_path_cont'*yv; 95 | yv=yv-Alp*Prev_path_cont/(Prev_path_cont'*Prev_path_cont); 96 | end 97 | 98 | % Maximum power angles estimation 99 | Y_tmp=reshape(yv,K_MS*Lest*K_BS*Lest,1); 100 | Y=reshape(yv,K_MS*Lest,K_BS*Lest); 101 | 102 | %MS 103 | [val mX]=sort(abs(Y_tmp)); 104 | Max=max(val); 105 | [KM_max KB_temp]=find(abs(Y)==Max); 106 | KM_hist(path)=KM_star(KM_max); 107 | 108 | %BS 109 | [~,mX]=sort(abs(Y(KM_max,:)),'descend'); 110 | index_bs_tmp(level,:,path)=KB_star(mX(1:Lest)); 111 | mx_id=1; 112 | for kk=path:1:Lest 113 | KB_hist(kk,level)=KB_star(mX(mx_id)); 114 | mx_id=mx_id+1; 115 | end 116 | 117 | % Adjusting the directions of the next stage (The adaptive search) 118 | for ln=1:1:Lest 119 | KB_star((ln-1)*K_BS+1:ln*K_BS)=(KB_hist(ln,level)-1)*K_BS+1:1:(KB_hist(ln,level))*K_BS; 120 | end 121 | KM_star=KM_hist(path); 122 | 123 | %% ---------------------------------------the other levels------------------------------------------------ 124 | 125 | for level=2:1:S 126 | 127 | % Noise calculations 128 | W_HP=codebook_ms(1,:,KM_star); 129 | W_HP=reshape(W_HP,[Nr,1]); 130 | Noise=W_HP'*(sqrt(No/2)*(randn(Nr,K_BS*Lest)+1j*randn(Nr,K_BS*Lest))); 131 | 132 | % Received signal 133 | F_HP=codebook_bs(level,:,KB_star); 134 | F_HP=reshape(F_HP,[Nt,2*Lest]); 135 | Y=W_HP'*Channel*F_HP+Noise; 136 | 137 | yv=reshape(Y,K_BS*Lest,1); % vectorized received signal 138 | data_tmp(level-1,:,path)=yv; 139 | if(level==S) 140 | yv_for_path_estimation=yv_for_path_estimation+yv; 141 | end 142 | 143 | % Subtracting the contribution of previously estimated paths 144 | for i=1:1:length(KB_final) 145 | A1=transpose(F_HP)*conj(AbG(:,KB_final(i)+1)); 146 | A2=W_HP'*AmG(:,KM_final(i)+1); 147 | Prev_path_cont=kron(A1,A2); 148 | Alp=Prev_path_cont'*yv; 149 | yv=yv-Alp*Prev_path_cont/(Prev_path_cont'*Prev_path_cont); 150 | end 151 | 152 | % Maximum power angles estimation 153 | [~,mX]=sort(abs(Y),'descend'); 154 | index_bs_tmp(level,:,path)=KB_star(mX(1:Lest)); 155 | mx_id=1; 156 | for kk=path:1:Lest 157 | KB_hist(kk,level)=KB_star(mX(mx_id)); 158 | mx_id=mx_id+1; 159 | end 160 | 161 | 162 | % Final AoAs/AoDs 163 | if(level==S) 164 | KB_final=[KB_final KB_star(mX(1))-1]; 165 | KM_final=[KM_final KM_star-1]; 166 | W_paths(path,:,:)=W_HP; 167 | F_paths(path,:,:)=F_HP; 168 | end 169 | 170 | % Adjusting the directions of the next stage (The adaptive search) 171 | for ln=1:1:Lest 172 | KB_star((ln-1)*K_BS+1:ln*K_BS)=(KB_hist(ln,level)-1)*K_BS+1:1:(KB_hist(ln,level))*K_BS; 173 | end 174 | 175 | 176 | end % -- End of estimating the lth path 177 | 178 | 179 | end %--- End of estimation of the channel 180 | 181 | %% --------------- Reconstructe the estimated channel------------------ 182 | 183 | % -----------------------------Estimated angles---------------------- 184 | AoD_est=2*pi*KB_final/G_BS; 185 | AoA_est=2*pi*KM_final/G_MS; 186 | 187 | % ---------------------------Estimated paths-------------------------- 188 | Wx=zeros(Nr,1); 189 | Fx=zeros(Nt,K_BS*Lest); 190 | Epsix=zeros(K_BS*Lest,Lest); 191 | 192 | for path=1:1:Lest 193 | Epsi=[]; 194 | Wx(:,:)=W_paths(path,:,:); 195 | Fx(:,:)=F_paths(path,:,:); 196 | for i=1:1:length(KB_final) 197 | A1=transpose(Fx)*conj(AbG(:,KB_final(i)+1)); 198 | A2=Wx'*AmG(:,KM_final(i)+1); 199 | E=kron(A1,A2); 200 | Epsi=[Epsi E]; 201 | end 202 | Epsix=Epsix+Epsi; 203 | end 204 | alpha_est=Epsix\yv_for_path_estimation; 205 | 206 | %--------------- Reconstructe the estimated channel------------------ 207 | Channel_est=zeros(Nr,Nt); 208 | for path=1:1:Lest 209 | Abh_est(:,path)=sqrt(1/Nt)*exp(1j*BSAntennas_Index*AoD_est(path)); 210 | Amh_est(:,path)=sqrt(1/Nr)*exp(1j*MSAntennas_Index*AoA_est(path)); 211 | Channel_est=Channel_est+alpha_est(path)*Amh_est(:,path)*Abh_est(:,path)'; 212 | end 213 | 214 | %% ---------------Save data-------------------- 215 | ecsi(iter,:,:) = Channel_est; 216 | error_nmse(iter)=(norm(Channel_est-Channel,'fro')/norm(Channel,'fro'))^2; 217 | data_tmp=MyReshape(data_tmp); 218 | data1_tmp=MyReshape(data1_tmp); 219 | data(iter,:)=[data_tmp,data1_tmp]; 220 | index_bs(iter,:)=MyReshape(index_bs_tmp); 221 | index_ms(iter,:)=reshape(KM_hist,[3,1]); 222 | 223 | end 224 | nmse=mean(error_nmse); 225 | end -------------------------------------------------------------------------------- /matlab/HC-CE-Algorithm/power_allocation.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LQJecho/Channel-Estimation-and-Hybrid-Precoding-for-Millimeter-Wave-Systems-Based-on-Deep-Learning/cd79577896e1658baa763c9a4914c3fd9e9f145d/matlab/HC-CE-Algorithm/power_allocation.m -------------------------------------------------------------------------------- /matlab/channel_gen.m: -------------------------------------------------------------------------------- 1 | clc 2 | close all 3 | clear 4 | 5 | ITER=10000; 6 | Num_MS_Antennas=2; 7 | Num_BS_Antennas=64; 8 | Num_paths=4; 9 | BSAntennas_Index=0:1:Num_BS_Antennas-1; % Indices of the BS Antennas 10 | MSAntennas_Index=0:1:Num_MS_Antennas-1; % Indices of the MS Antennas 11 | 12 | pcsi=zeros(ITER,Num_MS_Antennas,Num_BS_Antennas); 13 | for iter=1:1:ITER 14 | AoD=2*pi*rand(1,Num_paths); 15 | AoA=2*pi*rand(1,Num_paths); 16 | alpha=(sqrt(1/2)*sqrt(1/Num_paths)*(randn(1,Num_paths)+1j*randn(1,Num_paths))); 17 | 18 | % Channel construction 19 | Channel=zeros(Num_MS_Antennas,Num_BS_Antennas); 20 | for l=1:1:Num_paths 21 | Abh(:,l)=sqrt(1/Num_BS_Antennas)*exp(1j*BSAntennas_Index*AoD(l)); 22 | Amh(:,l)=sqrt(1/Num_MS_Antennas)*exp(1j*MSAntennas_Index*AoA(l)); 23 | Channel=Channel+sqrt(Num_BS_Antennas*Num_MS_Antennas)*alpha(l)*Amh(:,l)*Abh(:,l)'; 24 | end 25 | pcsi(iter,:,:) = Channel; 26 | end 27 | 28 | save('dataset_L4/channel.mat','pcsi'); -------------------------------------------------------------------------------- /matlab/codebook_bs.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LQJecho/Channel-Estimation-and-Hybrid-Precoding-for-Millimeter-Wave-Systems-Based-on-Deep-Learning/cd79577896e1658baa763c9a4914c3fd9e9f145d/matlab/codebook_bs.mat -------------------------------------------------------------------------------- /matlab/codebook_ms.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LQJecho/Channel-Estimation-and-Hybrid-Precoding-for-Millimeter-Wave-Systems-Based-on-Deep-Learning/cd79577896e1658baa763c9a4914c3fd9e9f145d/matlab/codebook_ms.mat -------------------------------------------------------------------------------- /matlab/gen_testdata.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | close all 4 | 5 | addpath('./HC-CE-Algorithm') 6 | addpath('./traditional-HBF') 7 | 8 | 9 | Nt = 64; 10 | Nr = 2; 11 | Nrf = 2; 12 | Ns = 2; % the number of data streams 13 | Nloop = 1000; 14 | Lest = 3; % Number of phase shifters quantization bits 15 | L=3; 16 | 17 | file=load('codebook_bs.mat'); 18 | ompcb=file.OMP; 19 | mocb=file.MO; 20 | dbfcb=file.DBF; 21 | 22 | file=load('codebook_ms.mat'); 23 | codebook_ms=file.DBF; 24 | 25 | dir='./dataset/channel_test.mat'; 26 | file=load(dir); 27 | pcsi=file.pcsi; 28 | 29 | pnr_array=0; 30 | snr_array = -10:2:2; 31 | for pnr_id=1:1:length(pnr_array) 32 | pnr=pnr_array(pnr_id); 33 | [ecsi_omp,data,index_bs,index_ms,nmse_omp] = channel_estimation_test(pcsi,pnr,Nloop, L, Lest,Nrf, Nt, Nr,ompcb,codebook_ms); 34 | [ecsi,data,index_bs,index_ms,nmse] = channel_estimation_test(pcsi,pnr,Nloop, L, Lest,Nrf, Nt, Nr,mocb,codebook_ms); 35 | 36 | for snr_id = 1 : length(snr_array) 37 | noise_power = 1 / 10^(snr_array(snr_id)/10); % Noise Power 38 | t1 = clock; 39 | for n = 1 : Nloop 40 | H = squeeze(pcsi(n,:,:)); 41 | 42 | Hest_omp = squeeze(ecsi_omp(n,:,:)); 43 | [V_omp] = Yuwei_method( Hest_omp,Ns,Nrf,noise_power,Nt ); 44 | rate_omp_array(snr_id, n) = get_rate(V_omp,noise_power,Ns,H); 45 | 46 | Hest_mo = squeeze(ecsi(n,:,:)); 47 | [V_mo] = Yuwei_method( Hest_mo,Ns,Nrf,noise_power,Nt ); 48 | rate_mo_array(snr_id, n) = get_rate(V_mo,noise_power,Ns,H); 49 | 50 | Hest_dbf = squeeze(ecsi_dbf(n,:,:)); 51 | [V_dbf] = Yuwei_method( Hest_dbf,Ns,Nrf,noise_power,Nt ); 52 | rate_dbf_array(snr_id, n) = get_rate(V_dbf,noise_power,Ns,H); 53 | 54 | if (n == 20) 55 | mytoc(t1, Nloop*length(snr_array)); 56 | end 57 | 58 | end 59 | 60 | end 61 | 62 | rate_omp = real(mean(rate_omp_array,2)); 63 | rate_mo = real(mean(rate_mo_array,2)); 64 | rate_dbf = real(mean(rate_dbf_array,2)); 65 | 66 | 67 | dir=['./dataset/pnr',num2str(pnr),'_test.mat']; 68 | save(dir,'pnr','snr_array','rate_omp','rate_mo','ecsi','pcsi','data','index_bs','index_ms','nmse','nmse_omp') 69 | 70 | end 71 | 72 | -------------------------------------------------------------------------------- /matlab/gen_traindata.m: -------------------------------------------------------------------------------- 1 | clear 2 | clc 3 | close all 4 | 5 | 6 | addpath('./HC-CE-Algorithm') 7 | 8 | 9 | Nt = 64; 10 | Nr = 2; 11 | Nrf = 2; 12 | Ns = 2; % the number of data streams 13 | Nloop = 1000000; 14 | L=3; 15 | Lest = 3; 16 | 17 | 18 | file=load('codebook_bs.mat'); 19 | codebook_bs=file.MO; 20 | file=load('codebook_ms.mat'); 21 | codebook_ms=file.DBF; 22 | 23 | 24 | pnr_array=0; 25 | for pnr_id=1:1:length(pnr_array) 26 | pnr=pnr_array(pnr_id); 27 | [pcsi, ecsi,data,index_bs,index_ms,nmse] = channel_estimation_train(pnr,Nloop, L, Lest,Nrf, Nt, Nr,codebook_bs,codebook_ms); 28 | dir=['./dataset/pnr',num2str(pnr),'_train.mat']; 29 | save(dir,'pnr','pcsi','ecsi','index_bs','index_ms','data','nmse') 30 | end 31 | 32 | 33 | -------------------------------------------------------------------------------- /matlab/get_rate.m: -------------------------------------------------------------------------------- 1 | function rate = get_rate(V_equal, Vn, Ns, H) 2 | %get the rate (SE) for equivalent V and W 3 | rate = log2(det(eye(Ns) + 1/Vn * H * V_equal * V_equal' * H')); 4 | -------------------------------------------------------------------------------- /matlab/traditional-HBF/FD_method.m: -------------------------------------------------------------------------------- 1 | function [V_FD, W_FD] = FD_method(H,Ns) 2 | % traditional SVD algorithm for rate maximization 3 | [U,~,V] = svd(H); 4 | V_FD = V(:,1:Ns); 5 | %power constraint 6 | V_FD = V_FD / norm(V_FD,'fro'); 7 | W_FD=U(:,1:Ns); 8 | W_FD=W_FD/norm(W_FD,'fro'); 9 | 10 | -------------------------------------------------------------------------------- /matlab/traditional-HBF/Yuwei_method.m: -------------------------------------------------------------------------------- 1 | function [V] = Yuwei_method( H,Ns,Nrf,Vn,Nt ) 2 | % the YUWEI algorithm for both narrowband and broadband 3 | % cite the paper Hybrid digital and analog beamforming design for large-scale antenna arrays 4 | V_RF = yuweiA1(Vn, H, Nrf, Nt); 5 | try 6 | Q = (V_RF'*V_RF); 7 | T = Q^(-0.5); 8 | L = H*V_RF*T; 9 | [~,D,V] = svd(L); 10 | [~,IX] = sort(diag(D),'descend'); 11 | M = V(:,IX); 12 | U = M(:,1:Ns); 13 | V_D = T*U; 14 | catch 15 | V_D = eye(Nrf); 16 | end 17 | V_D = V_D/norm(V_RF*V_D,'fro'); 18 | V=V_RF*V_D; 19 | end 20 | 21 | 22 | -------------------------------------------------------------------------------- /matlab/traditional-HBF/yuweiA1.m: -------------------------------------------------------------------------------- 1 | function V_RF = yuweiA1(Vn, H, Nrf, Nt) 2 | 3 | V_RF = ones(Nt,Nrf); 4 | F = H'*H; 5 | g = 1/Nrf/Nt; 6 | a = g/Vn; 7 | 8 | for Nloop = 1:10 9 | for j = 1:Nrf 10 | VRF = V_RF; 11 | VRF(:,j)=[]; 12 | C = eye(Nrf-1)+a*VRF'*F*VRF; 13 | G = a*F-a^2*F*VRF*C^(-1)*VRF'*F; 14 | for i = 1:Nt 15 | for l = 1:Nt 16 | if i~=l 17 | x(l)=G(i,l)*V_RF(l,j); 18 | end 19 | end 20 | n = sum(x); 21 | if n ==0 22 | V_RF(i,j)=1; 23 | else 24 | V_RF(i,j)=n/abs(n); 25 | end 26 | end 27 | end 28 | end -------------------------------------------------------------------------------- /matlab/traditional-HBF/yuweiA2.m: -------------------------------------------------------------------------------- 1 | function W_RF = yuweiA2(V_D,V_RF, Vn, H, Nrf, Nr) 2 | 3 | W_RF = ones(Nr,Nrf); 4 | F = H*V_RF*V_D*V_D'*V_RF'*H'; 5 | g = 1/Nr; 6 | a = g/Vn; 7 | 8 | for Nloop = 1:10 9 | for j = 1:Nrf 10 | VRF = W_RF; 11 | VRF(:,j)=[]; 12 | C = eye(Nrf-1)+a*VRF'*F*VRF; 13 | G = a*F-a^2*F*VRF*C^(-1)*VRF'*F; 14 | for i = 1:Nr 15 | for l = 1:Nr 16 | if i~=l 17 | x(l)=G(i,l)*W_RF(l,j); 18 | end 19 | end 20 | n = sum(x); 21 | if n ==0 22 | W_RF(i,j)=1; 23 | else 24 | W_RF(i,j)=n/abs(n); 25 | end 26 | end 27 | end 28 | end -------------------------------------------------------------------------------- /python/main.py: -------------------------------------------------------------------------------- 1 | from model_gen import * 2 | 3 | con = tf.ConfigProto() 4 | con.gpu_options.allow_growth = True 5 | tf.keras.backend.set_session(tf.Session(config=con)) 6 | 7 | 8 | def run_hbf_net(): 9 | model = model_hbf_net() 10 | save_dir = './model/HBF_Net.h5' 11 | reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, min_lr=0.00005) 12 | checkpoint = callbacks.ModelCheckpoint(save_dir, monitor='val_loss', 13 | verbose=1, save_best_only=True, mode='min', save_weights_only=True) 14 | H, Hest, Noise_power, Nloop = load_data_hbfnet(pnr, load_trainset_flag, big_trainset_flag=0, L=L) 15 | if train_flag == 1: 16 | model.fit(x=[Hest, H, Noise_power], y=H, batch_size=bs, 17 | epochs=epoch, verbose=2, validation_split=0.1, callbacks=[reduce_lr, checkpoint]) 18 | if train_flag == 0: 19 | rate = [] 20 | model.load_weights(save_dir) 21 | for snr in test_snr_array: 22 | Noise_power = 1 / np.power(10, np.ones([Nloop, 1]) * snr / 10) 23 | y = model.evaluate(x=[Hest, H, Noise_power], y=H, batch_size=10000) 24 | print(snr, y) 25 | rate.append(-y) 26 | print(rate) 27 | def run_ce_hbf_net(): 28 | model = model_ce_hbf_net() 29 | save_dir = './model/CE_HBF_Net.h5' 30 | 31 | reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, min_lr=0.00005) 32 | checkpoint = callbacks.ModelCheckpoint(save_dir, monitor='val_loss', 33 | verbose=1, save_best_only=True, mode='min', save_weights_only=True) 34 | Nloop, H, data, index_bs, index_ms, Noise_power = load_data_cehbfnet(pnr, load_trainset_flag, L=L) 35 | if train_flag == 1: 36 | model.fit(x=[data, index_bs, index_ms, H, Noise_power], y=H, batch_size=bs, 37 | epochs=epoch, verbose=2, validation_split=0.1, callbacks=[reduce_lr, checkpoint]) 38 | if train_flag == 0: 39 | rate = [] 40 | model.load_weights(save_dir) 41 | for snr in test_snr_array: 42 | Noise_power = 1 / np.power(10, np.ones([Nloop, 1]) * snr / 10) 43 | y = model.evaluate(x=[data, index_bs, index_ms, H, Noise_power], y=H, batch_size=10000) 44 | print(snr, y) 45 | rate.append(-y) 46 | print(rate) 47 | 48 | def run_ce_hbf_net_v1(): 49 | model = model_ce_hbf_net_v1() 50 | save_dir = './model/CE_HBF_Net_V1.h5' 51 | reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, min_lr=0.00005) 52 | checkpoint = callbacks.ModelCheckpoint(save_dir, monitor='val_loss', 53 | verbose=1, save_best_only=True, mode='min', save_weights_only=True) 54 | Nloop, H, data, index_bs, index_ms, Noise_power = load_data_cehbfnet(pnr, load_trainset_flag, L=L) 55 | if train_flag == 1: 56 | model.fit(x=[data, H, Noise_power], y=H, batch_size=bs, 57 | epochs=epoch, verbose=2, validation_split=0.1, callbacks=[reduce_lr, checkpoint]) 58 | if train_flag == 0: 59 | rate = [] 60 | model.load_weights(save_dir) 61 | for snr in test_snr_array: 62 | Noise_power = 1 / np.power(10, np.ones([Nloop, 1]) * snr / 10) 63 | y = model.evaluate(x=[data, H, Noise_power], y=H, batch_size=10000) 64 | print(snr, y) 65 | rate.append(-y) 66 | print(rate) 67 | def run_ce_hbf_net_v2(): 68 | model = model_ce_hbf_net_v2() 69 | save_dir = './model/CE_HBF_Net_V2.h5' 70 | 71 | reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=20, min_lr=0.00005) 72 | checkpoint = callbacks.ModelCheckpoint(save_dir, monitor='val_loss', 73 | verbose=1, save_best_only=True, mode='min', save_weights_only=True) 74 | Nloop, H, data, index_bs, index_ms, Noise_power = load_data_cehbfnet_v(load_trainset_flag, v=2) 75 | 76 | if train_flag == 1: 77 | model.fit(x=[data, index_bs, index_ms, H, Noise_power], y=H, batch_size=bs, 78 | epochs=epoch, verbose=2, validation_split=0.1, callbacks=[reduce_lr, checkpoint]) 79 | if train_flag == 0: 80 | rate = [] 81 | model.load_weights(save_dir) 82 | for snr in test_snr_array: 83 | Noise_power = 1 / np.power(10, np.ones([Nloop, 1]) * snr / 10) 84 | y = model.evaluate(x=[data, index_bs, index_ms, H, Noise_power], y=H, batch_size=5000) 85 | print(snr, y) 86 | rate.append(-y) 87 | print(rate) 88 | 89 | 90 | 91 | epoch = 100 92 | bs=2048 * 3 93 | train_flag = 0 94 | load_trainset_flag =train_flag 95 | L=3 96 | pnr= 0 97 | 98 | run_hbf_net() 99 | run_ce_hbf_net() 100 | run_ce_hbf_net_v1() 101 | run_ce_hbf_net_v2() 102 | 103 | 104 | -------------------------------------------------------------------------------- /python/model/CE_HBF_Net.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LQJecho/Channel-Estimation-and-Hybrid-Precoding-for-Millimeter-Wave-Systems-Based-on-Deep-Learning/cd79577896e1658baa763c9a4914c3fd9e9f145d/python/model/CE_HBF_Net.h5 -------------------------------------------------------------------------------- /python/model/CE_HBF_Net_V1.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LQJecho/Channel-Estimation-and-Hybrid-Precoding-for-Millimeter-Wave-Systems-Based-on-Deep-Learning/cd79577896e1658baa763c9a4914c3fd9e9f145d/python/model/CE_HBF_Net_V1.h5 -------------------------------------------------------------------------------- /python/model/CE_HBF_Net_V2.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LQJecho/Channel-Estimation-and-Hybrid-Precoding-for-Millimeter-Wave-Systems-Based-on-Deep-Learning/cd79577896e1658baa763c9a4914c3fd9e9f145d/python/model/CE_HBF_Net_V2.h5 -------------------------------------------------------------------------------- /python/model/HBF_Net.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LQJecho/Channel-Estimation-and-Hybrid-Precoding-for-Millimeter-Wave-Systems-Based-on-Deep-Learning/cd79577896e1658baa763c9a4914c3fd9e9f145d/python/model/HBF_Net.h5 -------------------------------------------------------------------------------- /python/model_gen.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | def channel_attention(input_feature, ratio=1): 4 | 5 | # channel_axis = 1 if K.image_data_format() == "channels_last" else -1 6 | channel = input_feature.shape[-1] 7 | 8 | shared_layer_one = Dense(channel//ratio, 9 | activation='relu', 10 | kernel_initializer='he_normal', 11 | use_bias=True, 12 | bias_initializer='zeros') 13 | shared_layer_two = Dense(channel, 14 | kernel_initializer='he_normal', 15 | use_bias=True, 16 | bias_initializer='zeros') 17 | 18 | avg_pool = GlobalAveragePooling2D()(input_feature) 19 | avg_pool = Reshape((1 ,1 ,channel))(avg_pool) 20 | # assert avg_pool._keras_shape[1:] == (1,1,channel) 21 | avg_pool = shared_layer_one(avg_pool) 22 | # assert avg_pool._keras_shape[1:] == (1,1,channel//ratio) 23 | avg_pool = shared_layer_two(avg_pool) 24 | # assert avg_pool._keras_shape[1:] == (1,1,channel) 25 | 26 | max_pool = GlobalMaxPooling2D()(input_feature) 27 | max_pool = Reshape((1 ,1 ,channel))(max_pool) 28 | # assert max_pool._keras_shape[1:] == (1,1,channel) 29 | max_pool = shared_layer_one(max_pool) 30 | # assert max_pool._keras_shape[1:] == (1,1,channel//ratio) 31 | max_pool = shared_layer_two(max_pool) 32 | # assert max_pool._keras_shape[1:] == (1,1,channel) 33 | 34 | cbam_feature = Add()([avg_pool ,max_pool]) 35 | cbam_feature = Activation('sigmoid')(cbam_feature) 36 | 37 | # if K.image_data_format() == "channels_first": 38 | # cbam_feature = Permute((3, 1, 2))(cbam_feature) 39 | 40 | return multiply([input_feature, cbam_feature]) 41 | def dense_unit_dropout(input_tensor, nn, drop_out_rate): 42 | out_tensor = Dense(nn)(input_tensor) 43 | out_tensor = BatchNormalization()(out_tensor) 44 | out_tensor = tf.nn.relu(out_tensor) 45 | out_tensor = Dropout(drop_out_rate)(out_tensor) 46 | return out_tensor 47 | 48 | def model_hbf_net(): 49 | perfect_CSI = Input(name='perfect_CSI', shape=(Nr,Nt,), dtype=tf.complex64) 50 | estimated_CSI=Input(shape=(Nt,Nr,2,), dtype=tf.float32) 51 | Noise_power_input = Input(name='Noise_power_input', shape=(1,), dtype=tf.float32) 52 | 53 | tmp = BatchNormalization()(estimated_CSI) 54 | 55 | tmp=Conv2D(4*Nt,(3,2),activation='relu')(tmp) 56 | # tmp=AvgPool2D((2,1),strides=2)(tmp) 57 | tmp=BatchNormalization()(tmp) 58 | tmp=channel_attention(tmp) 59 | tmp=channel_attention(tmp) 60 | 61 | tmp=Conv2D(2*Nt,(3,1),activation='relu')(tmp) 62 | # tmp=AvgPool2D((2,1),strides=2)(tmp) 63 | tmp=BatchNormalization()(tmp) 64 | tmp=channel_attention(tmp) 65 | tmp=channel_attention(tmp) 66 | 67 | tmp=Flatten()(tmp) 68 | tmp=concatenate([tmp,Noise_power_input],axis=1) 69 | tmp=BatchNormalization()(tmp) 70 | # tmp=dense_unit_dropout(tmp,512,0.3) 71 | # tmp=BatchNormalization()(tmp) 72 | phase=Dense(Nrf*Nt,'relu')(tmp) 73 | vbb =Dense(8,name='vbb')(tmp) 74 | vrf = Lambda(phase2vrf,name='vrf')(phase) 75 | hbf = Lambda(hbf_func)([vbb, vrf]) 76 | rate = Lambda(rate_func, dtype=tf.float32, output_shape=(1,))([perfect_CSI, vbb,phase, Noise_power_input]) 77 | model = Model(inputs=[estimated_CSI, perfect_CSI, Noise_power_input], outputs=hbf) 78 | model.compile(optimizer='adam', loss=loss_func(rate)) 79 | model.summary() 80 | return model 81 | 82 | def model_ce_hbf_net(): 83 | train_data = Input(shape=(180*2,), dtype=tf.float32) 84 | data_temp = BatchNormalization()(train_data) 85 | train_index_bs = Input(shape=(45,), dtype=tf.float32) 86 | index_bs_temp = BatchNormalization()(train_index_bs) 87 | train_index_ms = Input(shape=(3,), dtype=tf.float32) 88 | index_ms_temp = BatchNormalization()(train_index_ms) 89 | perfect_CSI = Input(name='perfect_CSI', shape=(Nr, Nt,), dtype=tf.complex64) 90 | Noise_power_input = Input(shape=(1,), dtype=tf.float32) 91 | 92 | imperfect_CSI = concatenate([data_temp, index_bs_temp, index_ms_temp, Noise_power_input]) 93 | imperfect_CSI = BatchNormalization()(imperfect_CSI) 94 | attention= dense_unit_dropout(imperfect_CSI, 180 * 2, 0) 95 | data_temp=Multiply()([attention,train_data]) 96 | imperfect_CSI = concatenate([data_temp, index_bs_temp, index_ms_temp, Noise_power_input]) 97 | 98 | num_channel=6 99 | tmp = dense_unit_dropout(imperfect_CSI, Nt * Nr * num_channel, 0) 100 | tmp = Reshape((Nt, Nr, num_channel))(tmp) 101 | 102 | tmp = Conv2D(8 * Nt, (3, 2), activation='relu')(tmp) 103 | tmp=AvgPool2D((2,1),strides=2)(tmp) 104 | tmp = BatchNormalization()(tmp) 105 | tmp = channel_attention(tmp) 106 | 107 | tmp = Conv2D(6 * Nt, (3, 1), activation='relu')(tmp) 108 | tmp = AvgPool2D((2, 1), strides=2)(tmp) 109 | tmp = BatchNormalization()(tmp) 110 | tmp = channel_attention(tmp) 111 | 112 | tmp = Flatten()(tmp) 113 | tmp = concatenate([tmp, Noise_power_input], axis=1) 114 | tmp = BatchNormalization()(tmp) 115 | phase = Dense(Nrf * Nt, 'relu')(tmp) 116 | vbb = Dense(8,name='vbb')(tmp) 117 | vrf = Lambda(phase2vrf,name='vrf')(phase) 118 | hbf = Lambda(hbf_func)([vbb, vrf]) 119 | rate = Lambda(rate_func, dtype=tf.float32, output_shape=(1,))([perfect_CSI, vbb, phase, Noise_power_input]) 120 | model = Model(inputs=[train_data, train_index_bs, train_index_ms, perfect_CSI, Noise_power_input], outputs=hbf) 121 | model.compile(optimizer='adam', loss=loss_func(rate)) 122 | # optimizer = optimizers.adam(lr=0.00001) 123 | # model.compile(optimizer=optimizer, loss=loss_func(rate)) 124 | 125 | model.summary() 126 | return model 127 | 128 | # Input only SNR and R (labeled as ‘HCNet-V1’) 129 | def model_ce_hbf_net_v1(): 130 | train_data = Input(shape=(180*2,), dtype=tf.float32) 131 | data_temp = BatchNormalization()(train_data) 132 | 133 | perfect_CSI = Input(name='perfect_CSI', shape=(Nr, Nt,), dtype=tf.complex64) 134 | Noise_power_input = Input(shape=(1,), dtype=tf.float32) 135 | 136 | imperfect_CSI = concatenate([data_temp, Noise_power_input]) 137 | imperfect_CSI = BatchNormalization()(imperfect_CSI) 138 | attention= dense_unit_dropout(imperfect_CSI, 180 * 2, 0) 139 | data_temp=Multiply()([attention,train_data]) 140 | imperfect_CSI = concatenate([data_temp, Noise_power_input]) 141 | 142 | num_channel=6 143 | tmp = dense_unit_dropout(imperfect_CSI, Nt * Nr * num_channel, 0) 144 | tmp = Reshape((Nt, Nr, num_channel))(tmp) 145 | 146 | tmp = Conv2D(8 * Nt, (3, 2), activation='relu')(tmp) 147 | tmp=AvgPool2D((2,1),strides=2)(tmp) 148 | tmp = BatchNormalization()(tmp) 149 | tmp = channel_attention(tmp) 150 | 151 | tmp = Conv2D(6 * Nt, (3, 1), activation='relu')(tmp) 152 | tmp = AvgPool2D((2, 1), strides=2)(tmp) 153 | tmp = BatchNormalization()(tmp) 154 | tmp = channel_attention(tmp) 155 | 156 | tmp = Flatten()(tmp) 157 | tmp = concatenate([tmp, Noise_power_input], axis=1) 158 | tmp = BatchNormalization()(tmp) 159 | phase = Dense(Nrf * Nt, 'relu')(tmp) 160 | vbb = Dense(8)(tmp) 161 | vrf = Lambda(phase2vrf)(phase) 162 | hbf = Lambda(hbf_func)([vbb, vrf]) 163 | rate = Lambda(rate_func, dtype=tf.float32, output_shape=(1,))([perfect_CSI, vbb, phase, Noise_power_input]) 164 | model = Model(inputs=[train_data, perfect_CSI, Noise_power_input], outputs=hbf) 165 | model.compile(optimizer='adam', loss=loss_func(rate)) 166 | # optimizer = optimizers.adam(lr=0.00001) 167 | # model.compile(optimizer=optimizer, loss=loss_func(rate)) 168 | 169 | model.summary() 170 | return model 171 | # Input only SNR, R and Isel in the last inner iteration of every outer iteration (labeled as ‘HCNet-V2’) 172 | def model_ce_hbf_net_v2(): 173 | train_data = Input(shape=(36,), dtype=tf.float32) 174 | data_temp = BatchNormalization()(train_data) 175 | train_index_bs = Input(shape=(9,), dtype=tf.float32) 176 | index_bs_temp = BatchNormalization()(train_index_bs) 177 | train_index_ms = Input(shape=(3,), dtype=tf.float32) 178 | index_ms_temp = BatchNormalization()(train_index_ms) 179 | perfect_CSI = Input(name='perfect_CSI', shape=(Nr, Nt,), dtype=tf.complex64) 180 | Noise_power_input = Input(shape=(1,), dtype=tf.float32) 181 | 182 | imperfect_CSI = concatenate([data_temp, index_bs_temp, index_ms_temp, Noise_power_input]) 183 | imperfect_CSI = BatchNormalization()(imperfect_CSI) 184 | attention= dense_unit_dropout(imperfect_CSI, 36, 0) 185 | data_temp=Multiply()([attention,train_data]) 186 | imperfect_CSI = concatenate([data_temp, index_bs_temp, index_ms_temp, Noise_power_input]) 187 | 188 | num_channel=6 189 | tmp = dense_unit_dropout(imperfect_CSI, Nt * Nr * num_channel, 0) 190 | tmp = Reshape((Nt, Nr, num_channel))(tmp) 191 | 192 | tmp = Conv2D(8 * Nt, (3, 2), activation='relu')(tmp)#8 193 | tmp=AvgPool2D((2,1),strides=2)(tmp) 194 | tmp = BatchNormalization()(tmp) 195 | tmp = channel_attention(tmp) 196 | 197 | tmp = Conv2D(6 * Nt, (3, 1), activation='relu')(tmp)#6 198 | tmp = AvgPool2D((2, 1), strides=2)(tmp) 199 | tmp = BatchNormalization()(tmp) 200 | tmp = channel_attention(tmp) 201 | 202 | tmp = Flatten()(tmp) 203 | tmp = concatenate([tmp, Noise_power_input], axis=1) 204 | tmp = BatchNormalization()(tmp) 205 | phase = Dense(Nrf * Nt, 'relu')(tmp) 206 | vbb = Dense(8)(tmp) 207 | vrf = Lambda(phase2vrf)(phase) 208 | hbf = Lambda(hbf_func)([vbb, vrf]) 209 | rate = Lambda(rate_func, dtype=tf.float32, output_shape=(1,))([perfect_CSI, vbb, phase, Noise_power_input]) 210 | model = Model(inputs=[train_data, train_index_bs, train_index_ms, perfect_CSI, Noise_power_input], outputs=hbf) 211 | model.compile(optimizer='adam', loss=loss_func(rate)) 212 | # optimizer = optimizers.adam(lr=0.00001) 213 | # model.compile(optimizer=optimizer, loss=loss_func(rate)) 214 | 215 | model.summary() 216 | return model 217 | 218 | 219 | 220 | 221 | -------------------------------------------------------------------------------- /python/utils.py: -------------------------------------------------------------------------------- 1 | from tensorflow.python.keras import * 2 | from tensorflow.python.keras import backend 3 | import tensorflow as tf 4 | import scipy.io as sio 5 | from tensorflow.python.keras.layers import * 6 | import h5py 7 | import numpy as np 8 | import math 9 | 10 | 11 | # --------global parameters------------- 12 | Nt = 64 13 | Nr = 2 14 | Nrf = 2 15 | Ns = 2 16 | 17 | # test snr range 18 | test_snr_begin=-10 19 | test_snr_end=2 20 | test_snr_array = range(test_snr_begin, test_snr_end+1, 2) 21 | # train snr range 22 | snr_begin=-10 23 | snr_end=10 24 | 25 | 26 | def load_data_cehbfnet_v(load_trainset_flag, v, L=3, pnr=0): 27 | def load_from_dir(dir,v): 28 | feature = h5py.File(dir) 29 | H = feature['pcsi'][:] 30 | H = np.transpose(H, (2, 1, 0)) 31 | H = H['real'] + 1j * H['imag'] 32 | data = feature['data'][:] 33 | data = np.squeeze(data) 34 | data = np.transpose(data, (1, 0)) 35 | data = data['real'] + 1j * data['imag'] 36 | index_bs = feature['index_bs'][:] 37 | index_bs = np.transpose(index_bs, (1, 0)) 38 | index_ms = feature['index_ms'][:] 39 | index_ms = np.transpose(index_ms, (1, 0)) 40 | 41 | Nloop = np.int(H.shape[0]) 42 | data = np.reshape(data, [Nloop, -1]) 43 | index_bs = np.reshape(index_bs, [Nloop, -1]) 44 | index_ms = np.reshape(index_ms, [Nloop, -1]) 45 | Noise_power = 1 / np.power(10, np.random.randint(snr_begin, snr_end, [Nloop, 1]) / 10) 46 | 47 | if v==2: 48 | # Input only SNR, R and Isel in the last inner iteration of every outer iteration (labeled as ‘HCNet-V2’) 49 | data = np.concatenate([data[:, 54:60], data[:, 114:120], data[:, 174:180]], axis=1) 50 | data = np.concatenate([np.real(data), np.imag(data)], 1) 51 | index_bs = np.concatenate([index_bs[:, 12:15], index_bs[:, 27:30], index_bs[:, 42:45]], axis=1) 52 | index_ms=index_ms 53 | if v==3: 54 | # Input only SNR, R, and Isel in the first outer iteration (labeled as ‘HCNet-V3’) 55 | data=data[:,0:60] 56 | data = np.concatenate([np.real(data), np.imag(data)], 1) 57 | index_bs=index_bs[:,0:15] 58 | index_ms=index_ms[:,0] 59 | 60 | return Nloop, H, data, index_bs, index_ms, Noise_power 61 | if load_trainset_flag == 0: 62 | dir = f'../matlab/dataset_L{L}/snr{test_snr_begin}_{test_snr_end}/pnr{pnr}_test.mat' 63 | Nloop, H, data, index_bs, index_ms, Noise_power = load_from_dir(dir,v) 64 | if load_trainset_flag == 1: 65 | # -------------------train set---------------------------- 66 | dir1 = f'../matlab/dataset_L{L}/pnr{pnr}_train1.mat' 67 | Nloop1, H1, data1, index_bs1, index_ms1, Noise_power1 = load_from_dir(dir1,v) 68 | dir2 = f'../matlab/dataset_L{L}/pnr{pnr}_train2.mat' 69 | Nloop2, H2, data2, index_bs2, index_ms2, Noise_power2 = load_from_dir(dir2,v) 70 | H = np.concatenate([H1, H2], axis=0) 71 | data = np.concatenate([data1, data2], axis=0) 72 | index_bs = np.concatenate([index_bs1, index_bs2], axis=0) 73 | index_ms = np.concatenate([index_ms1, index_ms2], axis=0) 74 | Noise_power = np.concatenate([Noise_power1, Noise_power2], axis=0) 75 | Nloop = Nloop1 + Nloop2 76 | H=H.astype(np.complex64) 77 | data=data.astype(np.float32) 78 | index_bs=index_bs.astype(np.float32) 79 | index_ms=index_ms.astype(np.float32) 80 | Noise_power=Noise_power.astype(np.float32) 81 | return Nloop, H, data, index_bs, index_ms, Noise_power 82 | 83 | def load_data_from_dir_hbfnet(dir): 84 | Hest_flag = 'ecsi' 85 | feature = h5py.File(dir) 86 | H = feature['pcsi'][:] 87 | H = np.squeeze(H) 88 | H = np.transpose(H, (2, 1, 0)) 89 | H = H['real'] + 1j * H['imag'] 90 | Hest = feature[Hest_flag][:] 91 | Hest = Hest['real'] + 1j * Hest['imag'] 92 | Hest = np.expand_dims(Hest, axis=3) 93 | Hest = np.concatenate([np.real(Hest), np.imag(Hest)], 3) 94 | Hest = np.transpose(Hest, (2, 0, 1, 3)) 95 | Nloop = np.int(H.shape[0]) 96 | Noise_power = 1 / np.power(10, np.random.randint(snr_begin, snr_end, [Nloop, 1]) / 10) 97 | return H, Hest, Noise_power, Nloop 98 | def load_data_hbfnet(pnr, load_trainset_flag, big_trainset_flag=1, L=3): 99 | if load_trainset_flag == 0: 100 | dir = f'../matlab/dataset_L{L}/snr{test_snr_begin}_{test_snr_end}/pnr{pnr}_test.mat' 101 | H, Hest, Noise_power, Nloop = load_data_from_dir_hbfnet(dir) 102 | if load_trainset_flag == 1: 103 | if big_trainset_flag == 1: 104 | dir1 = f'../matlab/dataset_L{L}/pnr{pnr}_train1.mat' 105 | H1, Hest1, Noise_power1, Nloop1 = load_data_from_dir_hbfnet(dir1) 106 | dir2 = f'../matlab/dataset_L{L}/pnr{pnr}_train2.mat' 107 | H2, Hest2, Noise_power2, Nloop2 = load_data_from_dir_hbfnet(dir2) 108 | H = np.concatenate([H1, H2], axis=0) 109 | Hest = np.concatenate([Hest1, Hest2], axis=0) 110 | Noise_power = np.concatenate([Noise_power1, Noise_power2], axis=0) 111 | Nloop = Nloop1 + Nloop2 112 | else: 113 | dir = f'../matlab/dataset_L{L}/pnr{pnr}_train1.mat' 114 | H, Hest, Noise_power, Nloop = load_data_from_dir_hbfnet(dir) 115 | H=H.astype(np.complex64) 116 | Noise_power=Noise_power.astype(np.float32) 117 | return H, Hest, Noise_power, Nloop 118 | 119 | def load_data_from_dir_cehbfnet(dir): 120 | feature = h5py.File(dir) 121 | H = feature['pcsi'][:] 122 | H = np.transpose(H, (2, 1, 0)) 123 | H = H['real'] + 1j * H['imag'] 124 | data = feature['data'][:] 125 | data = np.squeeze(data) 126 | data = np.transpose(data, (1, 0)) 127 | data = data['real'] + 1j * data['imag'] 128 | index_bs = feature['index_bs'][:] 129 | index_bs = np.transpose(index_bs, (1, 0)) 130 | index_ms = feature['index_ms'][:] 131 | index_ms = np.transpose(index_ms, (1, 0)) 132 | 133 | Nloop = np.int(H.shape[0]) 134 | data = np.concatenate([np.real(data), np.imag(data)], 1) 135 | data = np.reshape(data, [Nloop, -1]) 136 | index_bs = np.reshape(index_bs, [Nloop, -1]) 137 | index_ms = np.reshape(index_ms, [Nloop, -1]) 138 | Noise_power = 1 / np.power(10, np.random.randint(snr_begin, snr_end, [Nloop, 1]) / 10) 139 | return Nloop, H, data, index_bs, index_ms, Noise_power 140 | def load_data_cehbfnet(pnr, load_trainset_flag, big_trainset_flag=1, L=3): 141 | if load_trainset_flag == 0: 142 | dir = f'../matlab/dataset_L{L}/snr{test_snr_begin}_{test_snr_end}/pnr{pnr}_test.mat' 143 | Nloop, H, data, index_bs, index_ms, Noise_power = load_data_from_dir_cehbfnet(dir) 144 | if load_trainset_flag == 1: 145 | # -------------------train set---------------------------- 146 | if big_trainset_flag == 1: 147 | dir1 = f'../matlab/dataset_L{L}/pnr{pnr}_train1.mat' 148 | Nloop1, H1, data1, index_bs1, index_ms1, Noise_power1 = load_data_from_dir_cehbfnet(dir1) 149 | dir2 = f'../matlab/dataset_L{L}/pnr{pnr}_train2.mat' 150 | Nloop2, H2, data2, index_bs2, index_ms2, Noise_power2 = load_data_from_dir_cehbfnet(dir2) 151 | H = np.concatenate([H1, H2], axis=0) 152 | data = np.concatenate([data1, data2], axis=0) 153 | index_bs = np.concatenate([index_bs1, index_bs2], axis=0) 154 | index_ms = np.concatenate([index_ms1, index_ms2], axis=0) 155 | Noise_power = np.concatenate([Noise_power1, Noise_power2], axis=0) 156 | Nloop = Nloop1 + Nloop2 157 | 158 | else: 159 | dir = f'../matlab/dataset_L{L}/pnr{pnr}_train1.mat' 160 | Nloop, H, data, index_bs, index_ms, Noise_power = load_data_from_dir_cehbfnet(dir) 161 | H=H.astype(np.complex64) 162 | data=data.astype(np.float32) 163 | index_bs=index_bs.astype(np.float32) 164 | index_ms=index_ms.astype(np.float32) 165 | Noise_power=Noise_power.astype(np.float32) 166 | return Nloop, H, data, index_bs, index_ms, Noise_power 167 | 168 | 169 | def phase2vrf(phase): 170 | v_real = tf.cos(phase) 171 | v_imag = tf.sin(phase) 172 | vrf = tf.cast(tf.complex(v_real, v_imag), tf.complex64) 173 | return vrf 174 | 175 | def hbf_func(temp): 176 | vbb, vrf = temp 177 | vbb11 = tf.complex(tf.slice(vbb, [0, 0], [-1, 1]), tf.slice(vbb, [0, 1], [-1, 1])) 178 | vbb12 = tf.complex(tf.slice(vbb, [0, 2], [-1, 1]), tf.slice(vbb, [0, 3], [-1, 1])) 179 | vbb21 = tf.complex(tf.slice(vbb, [0, 4], [-1, 1]), tf.slice(vbb, [0, 5], [-1, 1])) 180 | vbb22 = tf.complex(tf.slice(vbb, [0, 6], [-1, 1]), tf.slice(vbb, [0, 7], [-1, 1])) 181 | vrf1 = tf.slice(vrf, [0, 0], [-1, Nt]) 182 | vrf2 = tf.slice(vrf, [0, Nt], [-1, Nt]) 183 | vhbf1 = vbb11 * vrf1 + vbb12 * vrf2 184 | vhbf2 = vbb21 * vrf1 + vbb22 * vrf2 185 | vhbf = tf.concat([vhbf1, vhbf2], axis=1) 186 | # sum power constraint 187 | vhbf1 = tf.divide(vhbf1, tf.norm(vhbf, axis=1, keepdims=True)) 188 | vhbf2 = tf.divide(vhbf2, tf.norm(vhbf, axis=1, keepdims=True)) 189 | vhbf = tf.concat([vhbf1, vhbf2], axis=1) 190 | return vhbf 191 | 192 | def rate_func(temp): 193 | h, vbb, phase, noise_power = temp 194 | vrf = phase2vrf(phase) 195 | hbf = hbf_func([vbb, vrf]) 196 | v = tf.reshape(hbf, (-1, 2, Nt)) 197 | noise_power = tf.expand_dims(noise_power, 2) 198 | noise_power = tf.tile(noise_power, [1, 2, 2]) 199 | noise_power = tf.cast(noise_power, tf.complex64) 200 | sig_power = tf.matmul(tf.matmul(h, tf.transpose(v, (0, 2, 1))), 201 | tf.matmul(tf.conj(v), tf.conj(tf.transpose(h, (0, 2, 1))))) 202 | ones = tf.eye(num_rows=Ns, num_columns=2, batch_shape=tf.shape(h)[0:1], dtype=tf.complex64) 203 | rate = tf.math.log(tf.linalg.det(ones + sig_power / noise_power)) / tf.cast(tf.math.log(2.0), tf.complex64) 204 | rate = tf.cast(rate, tf.float32) 205 | # rate=tf.reshape(rate,(-1,1)) 206 | return -rate 207 | 208 | def loss_func(rate): 209 | def final_rate(y_t, y_p): 210 | return rate 211 | return final_rate 212 | 213 | 214 | --------------------------------------------------------------------------------