├── .gitignore ├── AWGN_Channel_Transmission ├── AWGN_Discrete_Density_Evolution.py ├── AWGN_Quantizer_BPSK.py ├── AWGN_channel.py ├── LDPC_Transmitter.py ├── Transmitter.py ├── __init__.py ├── kernels_quanti.cl └── kernels_quanti_template.cl ├── Continous_LDPC_Decoding ├── __init__.py ├── bp_decoder_irreg.py ├── kernels_min_and_BP.cl └── min_sum_decoder_irreg.py ├── Discrete_LDPC_decoding ├── Discrete_Density_Evolution.py ├── Discrete_Density_Evolution_irreg.py ├── GF2MatrixMul_c.c ├── GF2MatrixMul_c.pyx ├── Information_Matching.py ├── LDPC_encoder.py ├── __init__.py ├── discrete_LDPC_decoder.py ├── discrete_LDPC_decoder_irreg.py ├── kernels_template.cl └── kernels_template_irreg.cl ├── Irregular_LDPC_Decoding ├── DVB-S2 │ ├── BER_simulation_OpenCL.py │ ├── BER_simulation_OpenCL_enc.py │ ├── BER_simulation_OpenCL_min_sum.py │ └── decoder_config_generation.py └── WLAN │ ├── BER_simulation_OpenCL.py │ ├── BER_simulation_OpenCL_enc.py │ ├── BER_simulation_OpenCL_min_sum.py │ ├── BER_simulation_OpenCL_quant_BP.py │ ├── decoder_config_generation.py │ └── generate_802.11_matrix.py ├── LICENSE ├── README.md ├── Regular_LDPC_Decoding └── BPSK │ ├── BER_simulation_OpenCL.py │ ├── BER_simulation_OpenCL_min_sum.py │ └── decoder_config_generation.py └── setup_mult_c.py /.gitignore: -------------------------------------------------------------------------------- 1 | OpenCl_tools/matmul.py 2 | .idea/ 3 | *.pyc 4 | Irregular_LDPC_Decoding/WLAN/decoder_config_EbN0_gen_0.7_16cas.pkl 5 | <<<<<<< HEAD 6 | Irregular_LDPC_Decoding/WLAN/decoder_config_EbN0_gen_0.7_32no_match.pkl 7 | Irregular_LDPC_Decoding/WLAN/decoder_config_EbN0_gen_0.8_32no_match.pkl 8 | Irregular_LDPC_Decoding/WLAN/decoder_config_EbN0_gen_0.95_32no_match.pkl 9 | Irregular_LDPC_Decoding/WLAN/decoder_config_EbN0_gen_0.9_32no_match.pkl 10 | Irregular_LDPC_Decoding/WLAN/decoder_config_EbN0_gen_1.0_32no_match.pkl 11 | ======= 12 | >>>>>>> 238ad5f6f845fcae926ed451b80743a0bf684ab1 13 | *.pkl 14 | -------------------------------------------------------------------------------- /AWGN_Channel_Transmission/AWGN_Discrete_Density_Evolution.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | 3 | import scipy as sci 4 | 5 | from AWGN_Channel_Transmission.AWGN_Quantizer_BPSK import AWGN_Channel_Quantizer as AWGN_Channel_Quantizer_BPSK 6 | from AWGN_Channel_Transmission.AWGN_Quantizer_Mary import AwgnChannelQuantizer as AWGN_Channel_Quantizer_QAM 7 | from AWGN_Channel_Transmission.AWGN_Quantizer_Mary import AwgnChannelQuantizer_MPSK as AWGN_Channel_Quantizer_MPSK 8 | from Discrete_LDPC_decoding.Discrete_Density_Evolution import Discrete_Density_Evolution_class as discrete_DE 9 | from Discrete_LDPC_decoding.Discrete_Density_Evolution_irreg import \ 10 | Discrete_Density_Evolution_class_irregular as discrete_DE_irregular 11 | from Discrete_LDPC_decoding.Information_Matching import * 12 | 13 | __author__ = "Maximilian Stark" 14 | __copyright__ = "05.07.2016, Institute of Communications, University of Technology Hamburg" 15 | __credits__ = ["Maximilian Stark"] 16 | __version__ = "1.0" 17 | __email__ = "maximilian.stark@tuhh.de" 18 | __status__ = "Production" 19 | __name__ = "Discrete Density Evolution for a given noise level of an AWGN channel" 20 | __doc__ = """ This module contains classes which combine AWGN channel quantizers and discrete Density Evolution. 21 | They can be used to generate and save lookup tables for a certain design-Eb/N0. Different classes are 22 | found by inheritance of the base class, i.e. AWGN_Discrete_Density_Evolution_class to support different 23 | modulation schemes and irregular LDPC codes. 24 | """ 25 | 26 | class AWGN_Discrete_Density_Evolution_class: 27 | """ Generates a discrete LDPC decoder for a AWGN channel and a regular LDPC code for a certain design-Eb/N0. 28 | 29 | The assumed modulation is BPSK which is considered in the quantizer design. 30 | Attributes: 31 | sigma_n2: noise variance corresponding to the desired design-Eb/N0 of the decoder 32 | AD_max_abs: limits of the quantizer 33 | cardinality_Y_channel: number of steps used for the fine quantization of the input distribution of the quantizer 34 | cardinality_T_channel: cardinality of the compression variable representing the quantizer output 35 | 36 | cardinality_T_decoder_ops: cardinality of the compression variables inside the decoder 37 | 38 | d_c: check node degree 39 | d_v: variable node degree 40 | 41 | imax: maximum number of iterations 42 | nror: number of runs of the Information Bottleneck algorithm 43 | 44 | Trellis_checknodevector_a: vectorized version of the trellis which holds the resulting outputs for a certain 45 | input and iteration at a check node 46 | Trellis_varnodevector_a: vectorized version of the trellis which holds the resulting outputs for a certain 47 | input and iteration at a variable node 48 | """ 49 | def __init__(self, sigma_n2_, AD_max_abs_,cardinality_Y_channel_, cardinality_T_channel_, 50 | cardinality_T_decoder_ops_,d_v_, d_c_, i_max_, nror_): 51 | """Inits the AWGN_Discrete_Density_Evolution_class with the following arguments 52 | 53 | Args: 54 | sigma_n2_: noise variance corresponding to the desired design-Eb/N0 of the decoder 55 | AD_max_abs_: limits of the quantizer 56 | cardinality_Y_channel_: number of steps used for the fine quantization of the input distribution of the quantizer 57 | cardinality_T_channel_: cardinality of the compression variable representing the quantizer output 58 | 59 | cardinality_T_decoder_ops_: cardinality of the compression variables inside the decoder 60 | 61 | d_c_: check node degree 62 | d_v_: variable node degree 63 | 64 | i_max_: maximum number of iterations 65 | nror_: number of runs of the Information Bottleneck algorithm 66 | """ 67 | # copy input arguments to class attributes 68 | self.sigma_n2 = sigma_n2_ 69 | self.AD_max_abs = AD_max_abs_ 70 | 71 | self.cardinality_Y_channel = cardinality_Y_channel_ 72 | self.cardinality_T_channel = cardinality_T_channel_ 73 | self.cardinality_T_decoder_ops = cardinality_T_decoder_ops_ 74 | 75 | self.d_v = d_v_ 76 | self.d_c = d_c_ 77 | 78 | R_c = 1 - self.d_v / self.d_c 79 | if R_c > 0: 80 | self.EbN0 = -10 * np.log10(self.sigma_n2 * 2 * R_c) 81 | 82 | self.imax = i_max_ 83 | self.nror = nror_ 84 | 85 | self.build_quantizer() 86 | 87 | 88 | self.Trellis_checknodevector_a = 0 89 | self.Trellis_varnodevector_a = 0 90 | 91 | def set_code_parameters(self): 92 | """Analysis of the given parity check matrix. 93 | Determines node-degree distribution, edge-degree distribution and code rate 94 | """ 95 | self.degree_checknode_nr = ((self.H_sparse).sum(1)).astype(np.int).A[:, 0] # which check node has which degree? 96 | self.degree_varnode_nr = ((self.H_sparse).sum(0)).astype(np.int).A[0, 97 | :] # which variable node has which degree? 98 | 99 | self.N_v = self.H_sparse.shape[1] # How many variable nodes are present? 100 | self.N_c = self.H_sparse.shape[0] # How many checknodes are present? 101 | 102 | self.d_c_max = self.degree_checknode_nr.max() 103 | self.d_v_max = self.degree_varnode_nr.max() 104 | 105 | self.codeword_len = self.H_sparse.shape[1] 106 | row_sum = self.H_sparse.sum(0).A[0, :] 107 | col_sum = self.H_sparse.sum(1).A[:, 0] 108 | d_v_dist_val = np.unique(row_sum) 109 | d_v_dist = np.zeros(int(d_v_dist_val.max())) 110 | 111 | for d_v in np.sort(d_v_dist_val).astype(np.int): 112 | d_v_dist[d_v - 1] = (row_sum == d_v).sum() 113 | 114 | d_v_dist = d_v_dist / d_v_dist.sum() 115 | 116 | d_c_dist_val = np.unique(col_sum) 117 | d_c_dist = np.zeros(int(d_c_dist_val.max())) 118 | 119 | for d_c in np.sort(d_c_dist_val).astype(np.int): 120 | d_c_dist[d_c - 1] = (col_sum == d_c).sum() 121 | 122 | d_c_dist = d_c_dist / d_c_dist.sum() 123 | 124 | nom = np.dot(d_v_dist, np.arange(d_v_dist_val.max()) + 1) 125 | den = np.dot(d_c_dist, np.arange(d_c_dist_val.max()) + 1) 126 | 127 | self.lambda_vec = convert_node_to_edge_degree(d_v_dist) 128 | self.rho_vec = convert_node_to_edge_degree(d_c_dist) 129 | 130 | self.R_c = 1 - nom / den 131 | 132 | def alistToNumpy(self, lines): 133 | """Converts a parity-check matrix in AList format to a 0/1 numpy array. The argument is a 134 | list-of-lists corresponding to the lines of the AList format, already parsed to integers 135 | if read from a text file. 136 | The AList format is introduced on http://www.inference.phy.cam.ac.uk/mackay/codes/alist.html. 137 | This method supports a "reduced" AList format where lines 3 and 4 (containing column and row 138 | weights, respectively) and the row-based information (last part of the Alist file) are omitted. 139 | Example: 140 | >>> alistToNumpy([[3,2], [2, 2], [1,1,2], [2,2], [1], [2], [1,2], [1,2,3,4]]) 141 | array([[1, 0, 1], 142 | [0, 1, 1]]) 143 | """ 144 | 145 | nCols, nRows = lines[0] 146 | if len(lines[2]) == nCols and len(lines[3]) == nRows: 147 | startIndex = 4 148 | else: 149 | startIndex = 2 150 | matrix = np.zeros((nRows, nCols), dtype=np.int) 151 | for col, nonzeros in enumerate(lines[startIndex:startIndex + nCols]): 152 | for rowIndex in nonzeros: 153 | if rowIndex != 0: 154 | matrix[rowIndex - 1, col] = 1 155 | 156 | return matrix 157 | 158 | def load_sparse_csr(self, filename): 159 | """Performs loading of a sparse parity check matrix which is stored in a *.npy file.""" 160 | loader = np.load(filename) 161 | return sci.sparse.csr_matrix((loader['data'], loader['indices'], loader['indptr']), 162 | shape=loader['shape']) 163 | 164 | def load_check_mat(self, filename): 165 | """Performs loading of a predefined parity check matrix.""" 166 | if filename.endswith('.npy') or filename.endswith('.npz'): 167 | if filename.endswith('.npy'): 168 | H = np.load(filename) 169 | H_sparse = sci.sparse.csr_matrix(H) 170 | else: 171 | H_sparse = self.load_sparse_csr(filename) 172 | else: 173 | arrays = [np.array(list(map(int, line.split()))) for line in open(filename)] 174 | H = self.alistToNumpy(arrays) 175 | H_sparse = sci.sparse.csr_matrix(H) 176 | return H_sparse 177 | 178 | def build_quantizer(self): 179 | """Generates instance of a quantizer for BPSK and an AWGN channel for the given characteristics.""" 180 | quanti = AWGN_Channel_Quantizer_BPSK(self.sigma_n2,self.AD_max_abs,self.cardinality_T_channel,self.cardinality_Y_channel) 181 | self.p_x_and_t_input = quanti.p_x_and_t 182 | 183 | def run_discrete_density_evolution(self): 184 | """Performs the discrete density evolution using the input distributions obtained from the quantizer. 185 | The resulting trellis diagram is stored in a vector that can be used for the real decoder later. 186 | """ 187 | DDE_inst = discrete_DE(self.p_x_and_t_input, self.cardinality_T_decoder_ops, 188 | self.d_v, self.d_c, self.imax, self.nror) 189 | 190 | DDE_inst.run_discrete_Density_Evolution() 191 | 192 | self.Trellis_checknodevector_a = DDE_inst.Trellis_checknodevector_a 193 | self.Trellis_varnodevector_a = DDE_inst.Trellis_varnodevector_a 194 | 195 | self.DDE_inst_data = DDE_inst.__dict__ 196 | 197 | def save_config(self,text=''): 198 | """Saves the instance.""" 199 | #timestr = time.strftime("%Y%m%d-%H%M%S") 200 | timestr ='' 201 | 202 | output = open('decoder_config_EbN0_gen_' + str(self.EbN0) + '_' + str( 203 | self.cardinality_T_decoder_ops) + timestr + text + '.pkl', 'wb') 204 | 205 | # Pickle dictionary using protocol -1. 206 | pickle.dump(self.__dict__, output, protocol=-1) 207 | 208 | class AWGN_Discrete_Density_Evolution_class_irregular(AWGN_Discrete_Density_Evolution_class): 209 | """Inherited from base class AWGN_Discrete_Density_Evolution_class. 210 | 211 | Generalization for irregular codes. Thus a new discrete density evolution schemes is used. 212 | 213 | Attributes: 214 | filename_H: filename of the parity check matrix of the considered code 215 | H_sparse: corresponding parity check matrix for the considered code 216 | 217 | matching_vector_varnode: holds the deterministic mapping found when performing message alginment for a 218 | variable noce 219 | matching_vector_checknode: holds the deterministic mapping found when performing message alginment for a 220 | check node 221 | 222 | match: boolean indicating if alignment should be used or not 223 | """ 224 | def __init__(self, sigma_n2_, AD_max_abs_,cardinality_Y_channel_, cardinality_T_channel_, 225 | cardinality_T_decoder_ops_,filename_H_, i_max_, nror_,match=True): 226 | """Inits AWGN_Discrete_Density_Evolution_class_irregular class. 227 | 228 | Args: 229 | filename_H_: filename of parity check matrix 230 | match: boolean indicating if alignment should be used or not 231 | """ 232 | self.filename_H = filename_H_ 233 | self.H_sparse = self.load_check_mat(self.filename_H) 234 | self.set_code_parameters() 235 | AWGN_Discrete_Density_Evolution_class.__init__(self, sigma_n2_, 236 | AD_max_abs_, cardinality_Y_channel_, cardinality_T_channel_, 237 | cardinality_T_decoder_ops_, self.d_v_max, self.d_c_max , 238 | i_max_, nror_) 239 | 240 | self.EbN0 = -10 * np.log10(self.sigma_n2 * 2 * self.R_c) 241 | self.match = match 242 | 243 | def run_discrete_density_evolution(self): 244 | """Runs discrete density evolution for irregular codes 245 | 246 | Returns also two matching vectors describing the deterministic transformation obtained by message alingment 247 | """ 248 | DDE_inst = discrete_DE_irregular(self.p_x_and_t_input, self.cardinality_T_decoder_ops, 249 | self.lambda_vec, self.rho_vec, self.imax, self.nror, match=self.match) 250 | 251 | DDE_inst.run_discrete_Density_Evolution() 252 | 253 | self.Trellis_checknodevector_a = DDE_inst.Trellis_checknodevector_a 254 | self.Trellis_varnodevector_a = DDE_inst.Trellis_varnodevector_a 255 | self.matching_vector_checknode = DDE_inst.matching_vector_checknode 256 | self.matching_vector_varnode = DDE_inst.matching_vector_varnode 257 | 258 | 259 | self.DDE_inst_data = DDE_inst.__dict__ 260 | 261 | class AWGN_Discrete_Density_Evolution_class_irregular_QAM(AWGN_Discrete_Density_Evolution_class_irregular): 262 | """Inherited from base class AWGN_Discrete_Density_Evolution_class_irregular. 263 | 264 | Adapted version for an irregular LDPC code, an AWGN channel and QAM modulation. 265 | Thus, the quantizer is replaced. 266 | """ 267 | 268 | def __init__(self, sigma_n2_, 269 | AD_max_abs_, 270 | cardinality_Y_channel_, 271 | cardinality_T_channel_, 272 | cardinality_T_decoder_ops_, 273 | filename_H_, 274 | encoding_table, 275 | sqrt_M, 276 | i_max_, 277 | nror_, 278 | match=True): 279 | """Inits AWGN_Discrete_Density_Evolution_class_irregular_QAM class""" 280 | 281 | self.encoding_table = encoding_table 282 | self.sqrt_M = sqrt_M 283 | self.num_bits = int(np.log2(sqrt_M) * 2) 284 | 285 | AWGN_Discrete_Density_Evolution_class_irregular.__init__(self,sigma_n2_, 286 | AD_max_abs_, 287 | cardinality_Y_channel_, 288 | cardinality_T_channel_, 289 | cardinality_T_decoder_ops_, 290 | filename_H_, 291 | i_max_, 292 | nror_, 293 | match) 294 | 295 | 296 | 297 | self.EbN0 = -10 * np.log10(self.sigma_n2 * self.R_c * self.num_bits) 298 | 299 | def build_quantizer(self): 300 | """Generates a quantizer of the AWGN channel output where the used modulation scheme is QAM.""" 301 | 302 | quanti = AWGN_Channel_Quantizer_QAM(self.sigma_n2, 303 | self.AD_max_abs, 304 | self.cardinality_T_channel, 305 | self.cardinality_Y_channel, 306 | self.encoding_table, 307 | sqrt_M=self.sqrt_M ) 308 | self.p_x_and_t_input = quanti.p_b_and_u_matched 309 | 310 | class AWGN_Discrete_Density_Evolution_class_QAM(AWGN_Discrete_Density_Evolution_class): 311 | """Inherited from base class AWGN_Discrete_Density_Evolution_class. 312 | 313 | Adapted version for an regular LDPC code, an AWGN channel and QAM modulation. 314 | Thus, the quantizer is replaced. 315 | """ 316 | 317 | def __init__(self, sigma_n2_, AD_max_abs_,cardinality_Y_channel_, cardinality_T_channel_, 318 | cardinality_T_decoder_ops_,filename_H_, 319 | encoding_table,sqrt_M,i_max_, nror_,match=True): 320 | 321 | self.match = match 322 | self.filename_H = filename_H_ 323 | self.H_sparse = self.load_check_mat(self.filename_H) 324 | self.set_code_parameters() 325 | self.encoding_table = encoding_table 326 | self.sqrt_M = sqrt_M 327 | self.num_bits = int(np.log2(sqrt_M) * 2) 328 | 329 | AWGN_Discrete_Density_Evolution_class.__init__(self, sigma_n2_, 330 | AD_max_abs_, cardinality_Y_channel_, cardinality_T_channel_, 331 | cardinality_T_decoder_ops_, self.d_v_max, self.d_c_max, 332 | i_max_, nror_) 333 | 334 | 335 | 336 | self.EbN0 = -10 * np.log10(self.sigma_n2 * self.R_c * self.num_bits) 337 | pass 338 | 339 | 340 | def build_quantizer(self): 341 | """Generates a quantizer of the AWGN channel output where the used modulation scheme is QAM.""" 342 | 343 | quanti = AWGN_Channel_Quantizer_QAM(self.sigma_n2, 344 | self.AD_max_abs, 345 | self.cardinality_T_channel, 346 | self.cardinality_Y_channel, 347 | self.encoding_table, 348 | sqrt_M=self.sqrt_M ) 349 | if self.match: 350 | self.p_x_and_t_input = quanti.p_b_and_u_matched 351 | else: 352 | self.p_x_and_t_input = quanti.p_b_and_u_matched_no_match 353 | 354 | class AWGN_Discrete_Density_Evolution_class_irregular_MPSK(AWGN_Discrete_Density_Evolution_class_irregular): 355 | """Inherited from base class AWGN_Discrete_Density_Evolution_class_irregular. 356 | 357 | Adapted version for an irregular LDPC code, an AWGN channel and MPSK modulation. 358 | Thus, the quantizer is replaced. 359 | """ 360 | 361 | def __init__(self, sigma_n2_, 362 | AD_max_abs_, 363 | cardinality_Y_channel_, 364 | cardinality_T_channel_, 365 | cardinality_T_decoder_ops_, 366 | filename_H_, 367 | encoding_table, 368 | M, 369 | i_max_, 370 | nror_, 371 | match=True): 372 | 373 | self.encoding_table = encoding_table 374 | self.M = M 375 | self.num_bits = int(np.log2(M)) 376 | 377 | self.filename_H = filename_H_ 378 | self.H_sparse = self.load_check_mat(self.filename_H) 379 | self.set_code_parameters() 380 | 381 | AWGN_Discrete_Density_Evolution_class.__init__(self, sigma_n2_, 382 | AD_max_abs_, cardinality_Y_channel_, cardinality_T_channel_, 383 | cardinality_T_decoder_ops_, self.d_v_max, self.d_c_max, 384 | i_max_, nror_) 385 | self.match = match 386 | self.EbN0 = -10 * np.log10(self.sigma_n2 * self.R_c * self.num_bits) 387 | 388 | def build_quantizer(self): 389 | """Generates a quantizer of the AWGN channel output where the used modulation scheme is 8 PSK.""" 390 | 391 | quanti = AWGN_Channel_Quantizer_MPSK(self.sigma_n2, 392 | self.AD_max_abs, 393 | self.cardinality_T_channel, 394 | self.cardinality_Y_channel, 395 | self.encoding_table, 396 | M=self.M) 397 | 398 | self.p_x_and_t_input = quanti.p_b_and_u_matched -------------------------------------------------------------------------------- /AWGN_Channel_Transmission/AWGN_Quantizer_BPSK.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from information_bottleneck.information_bottleneck_algorithms.symmetric_sIB import symmetric_sIB 3 | from scipy.stats import norm 4 | 5 | try: 6 | import pyopencl as cl 7 | import pyopencl.array as cl_array 8 | from pyopencl.clrandom import rand as clrand 9 | except ImportError: 10 | Warning("PyOpenCl not installed") 11 | import os 12 | from mako.template import Template 13 | 14 | __author__ = "Maximilian Stark" 15 | __copyright__ = "09.08.2016, Institute of Communications, University of Technology Hamburg" 16 | __credits__ = ["Maximilian Stark"] 17 | __version__ = "1.0" 18 | __email__ = "maximilian.stark@tuhh.de" 19 | __status__ = "Production" 20 | __name__ = "AWGN Channel Quantizer" 21 | __doc__ = """This modules contains a class generating a quantizer for an AWGN channel output for BPSK modulation""" 22 | 23 | 24 | class AWGN_Channel_Quantizer: 25 | """Implementation of an information optimum quantizer unit assuming BPSK transmission. 26 | 27 | The quantizer is generated using the symmetric, sequential information bottleneck algorithm. 28 | This class supports OpenCL for faster quantization and even direct quantization and sample generation on the GPU 29 | (cf. quantize direct). 30 | Although it is theoretical correct to quantize directly, it is preferable to create a more realistic 31 | communication chain including an encoder and modulator in your system instead of using this direct quantization approach. 32 | 33 | Attributes: 34 | sigma_n2: noise variance corresponding to the desired design-Eb/N0 of the decoder 35 | AD_max_abs: limits of the quantizer 36 | cardinality_Y: number of steps used for the fine quantization of the input distribution of the quantizer 37 | cardinality_T: cardinality of the compression variable representing the quantizer output 38 | 39 | limits: borders of the quantizer regions 40 | y_vec: fine quantization of the input domain 41 | delta: spacing between two values in the quantized input domain (cf. y_vec) 42 | 43 | x_vec: position of the means of the involved Gaussians 44 | 45 | """ 46 | def __init__(self, sigma_n2_, AD_max_abs_, cardinality_T_, cardinality_Y_, dont_calc = False): 47 | """Inits the quantizer class.""" 48 | self.nror = 5 49 | self.limits = np.zeros(cardinality_T_) 50 | 51 | self.sigma_n2 = sigma_n2_ 52 | self.cardinality_T = cardinality_T_ 53 | self.cardinality_Y = cardinality_Y_ 54 | self.AD_max_abs = AD_max_abs_ 55 | 56 | self.y_vec = np.linspace(-self.AD_max_abs, +self.AD_max_abs, self.cardinality_Y) 57 | self.x_vec = np.array([-1, 1]) 58 | self.delta = self.y_vec[1] - self.y_vec[0] 59 | if not dont_calc: 60 | self.calc_quanti() 61 | 62 | def calc_quanti(self): 63 | """Determines the information optimum quantizer for the given input distribution""" 64 | 65 | # calculate p_xy based on sigma_n2 and AD_max_abs; 66 | # init as normal with mean + 1 67 | p_y_given_x_equals_zero = norm.pdf(self.y_vec, loc=1, scale=np.sqrt(self.sigma_n2)) * self.delta 68 | 69 | # truncate t account for distortion introduced by the quantizer limits then 70 | p_y_given_x_equals_zero[-1] += self.gaussian_over_prob(self.AD_max_abs, 1) 71 | p_y_given_x_equals_zero[0] += self.gaussian_under_prob(-self.AD_max_abs, 1) 72 | 73 | # flip distribution, which realizes mean -1 or a transmitted bit = 1 74 | p_y_given_x_equals_one = p_y_given_x_equals_zero[::-1] 75 | 76 | self.p_xy = 0.5 * np.hstack((p_y_given_x_equals_zero[:,np.newaxis], p_y_given_x_equals_one[:,np.newaxis])) 77 | 78 | self.p_xy = self.p_xy / self.p_xy.sum() #normalize for munerical stability 79 | 80 | # run the symmetric sequential Information Bottleneck algorithm 81 | IB_class = symmetric_sIB(self.p_xy, self.cardinality_T, self.nror) 82 | IB_class.run_IB_algo() 83 | 84 | # store the results 85 | [self.p_t_given_y, self.p_x_given_t, self.p_t] = IB_class.get_results() 86 | 87 | # calculate 88 | # p(t | X = 0)=p(X=0 | t) 89 | # p(t) / p(X=0) 90 | self.p_x_given_t = self.p_x_given_t / self.p_x_given_t.sum(1)[:,np.newaxis] 91 | self.p_x_and_t = self.p_x_given_t * self.p_t[:,np.newaxis] 92 | p_t_given_x_equals_zero = self.p_x_and_t[:, 0] / 0.5 93 | 94 | self.cdf_t_given_x_equals_zero = np.append([0], np.cumsum(p_t_given_x_equals_zero)) 95 | 96 | self.output_LLRs = np.log(self.p_x_and_t[:, 0] / self.p_x_and_t[:, 1]) 97 | self.calc_limits() 98 | 99 | @classmethod 100 | def from_generated(cls, cdf_t_given_x_equals_zero_): 101 | cdf_t_given_x_equals_zero = cdf_t_given_x_equals_zero_ 102 | return cls(cdf_t_given_x_equals_zero,) 103 | 104 | def gaussian_over_prob(self, x, mu): 105 | """Compensates the ignored probability mass caused by fixing the region to +- AD_abs_max.""" 106 | 107 | prob = norm.sf((x-mu+self.delta/2)/np.sqrt(self.sigma_n2)) 108 | return prob 109 | 110 | def gaussian_under_prob(self, x, mu): 111 | """Compensates the ignored probability mass caused by fixing the region to +- AD_abs_max.""" 112 | 113 | prob = 1-self.gaussian_over_prob(x-self.delta,mu) 114 | return prob 115 | 116 | def calc_limits(self): 117 | """Calculates the limits of the quantizer borders""" 118 | 119 | for i in range(self.cardinality_T): 120 | cur_vec = (self.p_t_given_y[:, i] == 1).nonzero() 121 | self.limits[i] = self.y_vec[cur_vec[0].min()] 122 | 123 | self.limits[int(self.cardinality_T/2)] = 0 124 | #self.limits[-1]=self.AD_max_abs 125 | 126 | def quantize_direct(self, input_bits): 127 | """Direct quantization without the need of a channel in between since the inversion method is used. 128 | The clusters are directly sampled. 129 | """ 130 | # create uniform samples 131 | rand_u = np.random.rand(input_bits.shape[0],input_bits.shape[1]) 132 | 133 | # create samples ~ p(t | X = 0) using inversion method 134 | if input_bits.shape[1] > 1: 135 | output_integers = ((np.repeat(rand_u[:,:,np.newaxis], self.cardinality_T+1, axis=2)-self.cdf_t_given_x_equals_zero) > 0).sum(2)-1 136 | output_integers[input_bits.astype(bool)] = self.cardinality_T - 1 - output_integers[input_bits.astype(bool)] 137 | else: 138 | output_integers = ((rand_u - self.cdf_t_given_x_equals_zero) > 0).sum(1) - 1 139 | # "mirror" a sample, when the input bit is 1, otherwise do nothing. 140 | output_integers[input_bits.astype(bool)[:, 0]] = self.cardinality_T - 1 - output_integers[ 141 | input_bits.astype(bool)[:, 0]] 142 | 143 | return output_integers 144 | 145 | def quantize_on_host(self,x): 146 | """Quantizes the received samples on the local machine""" 147 | if x.shape[1] > 1: 148 | cluster = ((np.repeat(x[:,:,np.newaxis], self.cardinality_T, axis=2)-self.limits) > 0).sum(2)-1 149 | cluster[cluster == -1] = 0 150 | else: 151 | cluster = np.sum((x - self.limits) > 0, 1) -1 152 | cluster[cluster==-1] = 0 153 | 154 | return cluster 155 | 156 | def init_OpenCL_quanti(self, N_var,msg_at_time,return_buffer_only=False): 157 | """Inits the OpenCL context and transfers all static data to the device""" 158 | 159 | self.context = cl.create_some_context() 160 | 161 | print(self.context.get_info(cl.context_info.DEVICES)) 162 | path = os.path.split(os.path.abspath(__file__)) 163 | kernelsource = open(os.path.join(path[0], 'kernels_quanti_template.cl')).read() 164 | 165 | tpl = Template(kernelsource) 166 | rendered_tp = tpl.render(Nvar=N_var) 167 | 168 | self.program = cl.Program(self.context, str(rendered_tp)).build() 169 | 170 | self.return_buffer_only = return_buffer_only 171 | 172 | # Set up OpenCL 173 | self.queue = cl.CommandQueue(self.context) 174 | self.quantize = self.program.quantize 175 | self.quantize.set_scalar_arg_dtypes([np.int32, None, None, None]) 176 | self.quantize_LLR = self.program.quantize_LLR 177 | self.quantize_LLR.set_scalar_arg_dtypes([np.int32, None, None, None,None]) 178 | self.limit_buff = cl_array.to_device(self.queue, self.cdf_t_given_x_equals_zero.astype(np.float64)) 179 | self.cluster_buff = cl_array.empty(self.queue, (N_var, msg_at_time), dtype=np.int32) 180 | self.LLR_buff = cl_array.empty(self.queue, (N_var, msg_at_time), dtype=np.float64) 181 | self.LLR_values_buff = cl_array.to_device(self.queue, self.output_LLRs.astype(np.float64)) 182 | 183 | def quantize_OpenCL(self, x): 184 | """Quantizes the received distorted samples on the graphic card""" 185 | 186 | # Create OpenCL buffers 187 | 188 | x_buff = cl_array.to_device(self.queue,x.astype(np.float64) ) 189 | limit_buff = cl_array.to_device(self.queue, self.limits.astype(np.float64)) 190 | cluster_buff = cl_array.empty_like(x_buff.astype(np.int32)) 191 | 192 | self.quantize(self.queue, x.shape, None, self.cardinality_T, x_buff.data, limit_buff.data, cluster_buff.data) 193 | self.queue.finish() 194 | 195 | if self.return_buffer_only: 196 | return cluster_buff 197 | else: 198 | clusters = cluster_buff.get() 199 | return clusters 200 | 201 | def quantize_direct_OpenCL(self,N_var,msg_at_time): 202 | """Direct quantization without the need of a channel in between since the inversion method is used. 203 | The clusters are directly sampled. In this scenario the all-zeros codeword is considered such that no data 204 | needs to be transferred to the graphic card. 205 | 206 | """ 207 | 208 | #rand_u_buff = clrand(self.queue, (N_var,msg_at_time), dtype=np.float64, a=0, b=1) 209 | 210 | rand_u = np.random.rand(N_var,msg_at_time) 211 | 212 | # Create OpenCL buffers 213 | 214 | rand_u_buff = cl_array.to_device(self.queue,rand_u.astype(np.float64) ) 215 | 216 | 217 | 218 | self.quantize(self.queue, (N_var,msg_at_time), None, self.cardinality_T+1, rand_u_buff.data, 219 | self.limit_buff.data, self.cluster_buff.data) 220 | 221 | 222 | self.queue.finish() 223 | 224 | if self.return_buffer_only: 225 | return self.cluster_buff 226 | else: 227 | clusters = self.cluster_buff.get() 228 | return clusters 229 | 230 | def quantize_direct_OpenCL_LLR(self,N_var,msg_at_time): 231 | """ Returns the LLRs of the sampled cluster indices. These indices correspond to the quantized outputs which 232 | are found directly on the graphic card using the inversion method. """ 233 | 234 | rand_u = np.random.rand(N_var,msg_at_time) 235 | 236 | # Create OpenCL buffers 237 | rand_u_buff = cl_array.to_device(self.queue,rand_u.astype(np.float64) ) 238 | 239 | self.quantize_LLR(self.queue, (N_var,msg_at_time), None, self.cardinality_T+1, rand_u_buff.data, 240 | self.limit_buff.data, self.LLR_values_buff.data, self.LLR_buff.data) 241 | 242 | self.queue.finish() 243 | 244 | if self.return_buffer_only: 245 | return self.LLR_buff 246 | else: 247 | LLRs = self.LLR_buff.get() 248 | return LLRs 249 | 250 | -------------------------------------------------------------------------------- /AWGN_Channel_Transmission/AWGN_channel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | __author__ = "Maximilian Stark" 4 | __copyright__ = "09.08.2016, Institute of Communications, University of Technology Hamburg" 5 | __credits__ = ["Maximilian Stark"] 6 | __version__ = "1.0" 7 | __email__ = "maximilian.stark@tuhh.de" 8 | __status__ = "Production" 9 | __name__ = "AWGN Channel" 10 | __doc__ = """Module holds the AWGN_channel class which can be used for simulation of an AWGN channel with real or 11 | complex noise.""" 12 | 13 | class AWGN_channel: 14 | """ Class implements an additive white Gaussian noise channel 15 | 16 | The added noise can either be real or complex depending on the arguments of the constructor. 17 | The default value is real. 18 | 19 | Attributes: 20 | sigma_n2: a double setting noise variance 21 | complex: a boolean value indicating if noise is complex or not 22 | """ 23 | def __init__(self, sigma_n2_, complex =False): 24 | """Inits the AWGN_channel class 25 | Args: 26 | sigma_n2_: noise variance specified by user 27 | complex: default is false, indicating if noise is complex 28 | """ 29 | self.sigma_n2 = sigma_n2_ 30 | self.complex = complex 31 | 32 | def transmission(self, input): 33 | """Performs the transmission of an input stream over an AWGN channel 34 | Args: 35 | input: sequence of symbols as numpy array or scalar 36 | Returns: 37 | output: summation of noise and input 38 | """ 39 | if self.complex: 40 | noise = np.sqrt(self.sigma_n2/2) * np.random.randn(input.shape[0], input.shape[1]) + \ 41 | 1j * np.sqrt(self.sigma_n2/2) * np.random.randn(input.shape[0], input.shape[1]) 42 | 43 | else: 44 | noise = np.sqrt(self.sigma_n2) * np.random.randn(input.shape[0],input.shape[1]) 45 | 46 | output = input + noise 47 | 48 | return output 49 | -------------------------------------------------------------------------------- /AWGN_Channel_Transmission/LDPC_Transmitter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy as sci 3 | 4 | from Discrete_LDPC_decoding.LDPC_encoder import LDPCEncoder 5 | 6 | __author__ = "Maximilian Stark" 7 | __copyright__ = "10.08.2016, Institute of Communications, University of Technology Hamburg" 8 | __credits__ = ["Maximilian Stark"] 9 | __version__ = "1.0" 10 | __email__ = "maximilian.stark@tuhh.de" 11 | __status__ = "Production" 12 | __name__ = "LDPC encoded BPSK Transmitter" 13 | __doc__ = """This class generates a random sequence of BPSK symbols. These symbols are encoded using a given LDPC code 14 | parity check matrix.""" 15 | 16 | class LDPC_BPSK_Transmitter: 17 | 18 | def __init__(self, filename_H_, msg_at_time=1): 19 | self.filename_H = filename_H_ 20 | self.H_sparse = self.load_check_mat(self.filename_H) 21 | 22 | self.encoder = LDPCEncoder(self.filename_H) 23 | 24 | # analyze the H matrix and set all decoder variables 25 | self.set_code_parameters() 26 | 27 | self.data_len = (self.R_c * self.codeword_len).astype(int) 28 | 29 | self.last_transmitted_bits = [] 30 | self.msg_at_time = msg_at_time 31 | 32 | def set_code_parameters(self): 33 | self.degree_checknode_nr = ((self.H_sparse).sum(1)).astype(np.int).A[:, 0] # which check node has which degree? 34 | self.degree_varnode_nr = ((self.H_sparse).sum(0)).astype(np.int).A[0, 35 | :] # which variable node has which degree? 36 | 37 | self.N_v = self.H_sparse.shape[1] # How many variable nodes are present? 38 | self.N_c = self.H_sparse.shape[0] # How many checknodes are present? 39 | 40 | self.d_c_max = self.degree_checknode_nr.max() 41 | self.d_v_max = self.degree_varnode_nr.max() 42 | 43 | self.codeword_len = self.H_sparse.shape[1] 44 | row_sum = self.H_sparse.sum(0).A[0, :] 45 | col_sum = self.H_sparse.sum(1).A[:, 0] 46 | d_v_dist_val = np.unique(row_sum) 47 | d_v_dist = np.zeros(int(d_v_dist_val.max())) 48 | 49 | for d_v in np.sort(d_v_dist_val).astype(np.int): 50 | d_v_dist[d_v - 1] = (row_sum == d_v).sum() 51 | d_v_dist = d_v_dist / d_v_dist.sum() 52 | 53 | d_c_dist_val = np.unique(col_sum) 54 | d_c_dist = np.zeros(int(d_c_dist_val.max())) 55 | 56 | for d_c in np.sort(d_c_dist_val).astype(np.int): 57 | d_c_dist[d_c - 1] = (col_sum == d_c).sum() 58 | 59 | d_c_dist = d_c_dist / d_c_dist.sum() 60 | nom = np.dot(d_v_dist, np.arange(d_v_dist_val.max()) + 1) 61 | den = np.dot(d_c_dist, np.arange(d_c_dist_val.max()) + 1) 62 | 63 | self.R_c = 1 - nom / den 64 | 65 | def alistToNumpy(self, lines): 66 | """Converts a parity-check matrix in AList format to a 0/1 numpy array. The argument is a 67 | list-of-lists corresponding to the lines of the AList format, already parsed to integers 68 | if read from a text file. 69 | The AList format is introduced on http://www.inference.phy.cam.ac.uk/mackay/codes/alist.html. 70 | This method supports a "reduced" AList format where lines 3 and 4 (containing column and row 71 | weights, respectively) and the row-based information (last part of the Alist file) are omitted. 72 | Example: 73 | >>> alistToNumpy([[3,2], [2, 2], [1,1,2], [2,2], [1], [2], [1,2], [1,2,3,4]]) 74 | array([[1, 0, 1], 75 | [0, 1, 1]]) 76 | """ 77 | 78 | nCols, nRows = lines[0] 79 | if len(lines[2]) == nCols and len(lines[3]) == nRows: 80 | startIndex = 4 81 | else: 82 | startIndex = 2 83 | matrix = np.zeros((nRows, nCols), dtype=np.int) 84 | for col, nonzeros in enumerate(lines[startIndex:startIndex + nCols]): 85 | for rowIndex in nonzeros: 86 | if rowIndex != 0: 87 | matrix[rowIndex - 1, col] = 1 88 | 89 | return matrix 90 | 91 | def load_sparse_csr(self, filename): 92 | loader = np.load(filename) 93 | return sci.sparse.csr_matrix((loader['data'], loader['indices'], loader['indptr']), 94 | shape=loader['shape']) 95 | 96 | def load_check_mat(self, filename): 97 | if filename.endswith('.npy') or filename.endswith('.npz'): 98 | if filename.endswith('.npy'): 99 | H = np.load(filename) 100 | H_sparse = sci.sparse.csr_matrix(H) 101 | else: 102 | H_sparse = self.load_sparse_csr(filename) 103 | else: 104 | arrays = [np.array(list(map(int, line.split()))) for line in open(filename)] 105 | H = self.alistToNumpy(arrays) 106 | H_sparse = sci.sparse.csr_matrix(H) 107 | return H_sparse 108 | 109 | def transmit(self): 110 | 111 | uncoded_msgs = np.random.randint(0,2, (self.data_len, self.msg_at_time)) 112 | 113 | #uncoded_msgs = np.zeros( (self.data_len, self.msg_at_time) ) 114 | encoded_msgs = np.zeros((self.codeword_len, self.msg_at_time)) 115 | 116 | 117 | for i in range(self.msg_at_time): 118 | 119 | encoded_msgs[:, i]=self.encoder.encode_c(uncoded_msgs[:, i]) 120 | 121 | self.last_transmitted_bits = uncoded_msgs 122 | 123 | data = self.BPSK_mapping(encoded_msgs) 124 | 125 | return data 126 | 127 | def BPSK_mapping(self, X): 128 | 129 | data = np.ones((self.codeword_len, self.msg_at_time)) 130 | data[X == 1] = -1 131 | 132 | return data 133 | 134 | class LDPC_QAM_Transmitter(LDPC_BPSK_Transmitter): 135 | def __init__(self, filename_H_, encoding_table, sqrt_M, msg_at_time=1): 136 | 137 | LDPC_BPSK_Transmitter.__init__(self,filename_H_, msg_at_time) 138 | 139 | self.encoding_table = encoding_table 140 | self.sqrt_M = sqrt_M 141 | self.num_bits = int(np.log2(sqrt_M) * 2) 142 | 143 | self.amplitude_values = np.zeros(self.sqrt_M) 144 | self.determine_amplitudes_for_encoding_values() 145 | 146 | def transmit(self): 147 | uncoded_msgs = np.random.randint(0, 2, (self.data_len, self.msg_at_time)) 148 | 149 | encoded_msgs = np.zeros((self.codeword_len, self.msg_at_time)) 150 | data = np.zeros((int(self.codeword_len/self.num_bits), self.msg_at_time),dtype=complex) 151 | 152 | for i in range(self.msg_at_time): 153 | encoded_msgs[:, i] = self.encoder.encode_c(uncoded_msgs[:, i]) 154 | data[:, i] = self.QAM_mapping(encoded_msgs[:,i])[:,0] 155 | 156 | self.last_transmitted_bits = uncoded_msgs 157 | 158 | return data 159 | 160 | def QAM_mapping(self, X): 161 | 162 | data_real=np.reshape(self.amplitude_values[((np.reshape(X.T,(-1, self.num_bits )) [:,:int(self.num_bits/2)] * 163 | 2**np.arange( self.num_bits/2 )[::-1]).sum(1)).astype(np.int)],(-1,1)) 164 | 165 | data_imag=np.reshape(self.amplitude_values[((np.reshape(X.T,(-1, self.num_bits )) [:,int(self.num_bits/2):] * 166 | 2**np.arange( self.num_bits/2 )[::-1]).sum(1)).astype(np.int)],(-1,1)) 167 | 168 | data = (data_real + 1j*data_imag) * self.d_min/2 169 | return data 170 | 171 | def determine_amplitudes_for_encoding_values(self): 172 | natural_values = ((self.encoding_table * 2**np.arange( self.num_bits/2 )[::-1]).sum(1)).astype(np.int) 173 | 174 | self.amplitude_values[natural_values] = np.arange(-self.sqrt_M+1,self.sqrt_M,2) 175 | self.d_min = np.sqrt(6/ (self.sqrt_M**2 -1) ) 176 | 177 | class LDPC_MPSK_Transmitter(LDPC_BPSK_Transmitter): 178 | def __init__(self, filename_H_, encoding_table, M, msg_at_time=1): 179 | LDPC_BPSK_Transmitter.__init__(self, filename_H_, msg_at_time) 180 | 181 | self.encoding_table = encoding_table 182 | self.M = M 183 | self.num_bits = int(np.log2(M)) 184 | 185 | self.phase_values = np.zeros(self.M, dtype=complex) 186 | self.determine_phase_for_encoding_values() 187 | 188 | def transmit(self): 189 | uncoded_msgs = np.random.randint(0, 2, (self.data_len, self.msg_at_time)) 190 | #uncoded_msgs = np.zeros((self.data_len, self.msg_at_time)) 191 | 192 | encoded_msgs = np.zeros((self.codeword_len, self.msg_at_time)) 193 | data = np.zeros((int(self.codeword_len / self.num_bits), self.msg_at_time), dtype=complex) 194 | 195 | for i in range(self.msg_at_time): 196 | encoded_msgs[:, i] = self.encoder.encode_c(uncoded_msgs[:, i]) 197 | data[:, i] = self.MPSK_mapping(encoded_msgs[:, i])[:, 0] 198 | 199 | self.last_transmitted_bits = uncoded_msgs 200 | 201 | return data 202 | 203 | def MPSK_mapping(self, X): 204 | data = np.reshape( 205 | self.phase_values[((np.reshape(X.T, (-1, self.num_bits))[:, :int(self.num_bits)] * 206 | 2 ** np.arange(self.num_bits)[::-1]).sum(1)).astype(np.int)], (-1, 1)) 207 | #print(np.angle(data,deg=True)) 208 | return data 209 | 210 | def determine_phase_for_encoding_values(self): 211 | natural_values = ((self.encoding_table * 2 ** np.arange(self.num_bits)[::-1]).sum(1)).astype(np.int) 212 | 213 | angle_ind = np.arange(self.M) 214 | angles = 2 * np.pi/self.M * angle_ind 215 | self.phase_values[natural_values] = 1 * np.exp(1j * angles) 216 | -------------------------------------------------------------------------------- /AWGN_Channel_Transmission/Transmitter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | __author__ = "Maximilian Stark" 4 | __copyright__ = "10.08.2016, Institute of Communications, University of Technology Hamburg" 5 | __credits__ = ["Maximilian Stark"] 6 | __version__ = "1.0" 7 | __email__ = "maximilian.stark@tuhh.de" 8 | __status__ = "Production" 9 | __name__ = "LDPC encoded BPSK Transmitter" 10 | __doc__ = """This module contains classes which can generate a random sequence of symbols for BPSK, QAM, MPSK. 11 | These symbols are encoded using a given LDPC code parity check matrix.""" 12 | 13 | 14 | class BPSK_Transmitter: 15 | def __init__(self, sequence_len, msg_at_time=1): 16 | 17 | self.data_len = int(sequence_len) 18 | 19 | self.last_transmitted_bits = [] 20 | self.msg_at_time = msg_at_time 21 | 22 | def transmit(self): 23 | 24 | uncoded_msgs = np.random.randint(0,2, (self.data_len, self.msg_at_time)) 25 | 26 | self.last_transmitted_bits = uncoded_msgs 27 | 28 | data = self.BPSK_mapping(uncoded_msgs) 29 | 30 | return data 31 | 32 | def BPSK_mapping(self, X): 33 | 34 | data = np.ones((self.codeword_len, self.msg_at_time)) 35 | data[X == 1] = -1 36 | 37 | return data 38 | 39 | 40 | class QAM_Transmitter(BPSK_Transmitter): 41 | def __init__(self, sequence_len, encoding_table, sqrt_M, msg_at_time=1): 42 | 43 | BPSK_Transmitter.__init__(self,sequence_len, msg_at_time) 44 | 45 | self.encoding_table = encoding_table 46 | self.sqrt_M = sqrt_M 47 | self.num_bits = int(np.log2(sqrt_M) * 2) 48 | 49 | self.amplitude_values = np.zeros(self.sqrt_M) 50 | self.determine_amplitudes_for_encoding_values() 51 | 52 | def transmit(self): 53 | uncoded_msgs = np.random.randint(0, 2, (self.data_len, self.msg_at_time)) 54 | 55 | data = np.zeros((int(self.data_len/self.num_bits), self.msg_at_time),dtype=complex) 56 | 57 | for i in range(self.msg_at_time): 58 | data[:, i] = self.QAM_mapping(uncoded_msgs[:,i])[:,0] 59 | 60 | self.last_transmitted_bits = uncoded_msgs 61 | 62 | return data 63 | 64 | def QAM_mapping(self, X): 65 | 66 | data_real=np.reshape(self.amplitude_values[((np.reshape(X.T,(-1, self.num_bits )) [:,:int(self.num_bits/2)] * 67 | 2**np.arange( self.num_bits/2 )[::-1]).sum(1)).astype(np.int)],(-1,1)) 68 | 69 | data_imag=np.reshape(self.amplitude_values[((np.reshape(X.T,(-1, self.num_bits )) [:,int(self.num_bits/2):] * 70 | 2**np.arange( self.num_bits/2 )[::-1]).sum(1)).astype(np.int)],(-1,1)) 71 | 72 | 73 | data = (data_real + 1j*data_imag) * self.d_min/2 74 | return data 75 | 76 | def determine_amplitudes_for_encoding_values(self): 77 | natural_values = ((self.encoding_table * 2**np.arange( self.num_bits/2 )[::-1]).sum(1)).astype(np.int) 78 | 79 | self.amplitude_values[natural_values] = np.arange(-self.sqrt_M+1,self.sqrt_M,2) 80 | self.d_min = np.sqrt(6/ (self.sqrt_M**2 -1) ) 81 | 82 | 83 | class MPSK_Tranmitter(BPSK_Transmitter): 84 | def __init__(self, sequence_len, encoding_table, M, msg_at_time=1): 85 | BPSK_Transmitter.__init__(self, sequence_len, msg_at_time) 86 | 87 | self.encoding_table = encoding_table 88 | self.M = M 89 | self.num_bits = int(np.log2(M)) 90 | 91 | self.phase_values = np.zeros(self.M, dtype=complex) 92 | self.determine_phase_for_encoding_values() 93 | 94 | def transmit(self): 95 | uncoded_msgs = np.random.randint(0, 2, (self.data_len, self.msg_at_time)) 96 | 97 | data = np.zeros((int(self.data_len / self.num_bits), self.msg_at_time), dtype=complex) 98 | 99 | for i in range(self.msg_at_time): 100 | data[:, i] = self.MPSK_mapping(uncoded_msgs[:, i])[:, 0] 101 | 102 | self.last_transmitted_bits = uncoded_msgs 103 | 104 | return data 105 | 106 | def MPSK_mapping(self, X): 107 | data = np.reshape( 108 | self.phase_values[((np.reshape(X.T, (-1, self.num_bits))[:, :int(self.num_bits)] * 109 | 2 ** np.arange(self.num_bits)[::-1]).sum(1)).astype(np.int)], (-1, 1)) 110 | #print(np.angle(data,deg=True)) 111 | return data 112 | 113 | def determine_phase_for_encoding_values(self): 114 | natural_values = ((self.encoding_table * 2 ** np.arange(self.num_bits)[::-1]).sum(1)).astype(np.int) 115 | 116 | angle_ind = np.arange(self.M) 117 | angles = 2 * np.pi/self.M * angle_ind 118 | self.phase_values[natural_values] = 1 * np.exp(1j * angles) 119 | -------------------------------------------------------------------------------- /AWGN_Channel_Transmission/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mx-strk/InformationBottleneckDecodingLDPC/033e496b1b5492c9b8f0987f438373104095a2c6/AWGN_Channel_Transmission/__init__.py -------------------------------------------------------------------------------- /AWGN_Channel_Transmission/kernels_quanti.cl: -------------------------------------------------------------------------------- 1 | 2 | __kernel void quantize(const int cardinality_T, 3 | global const float* channel_values, 4 | global const float* limits, 5 | global int* clusters) 6 | { 7 | int gid0 = get_global_id(0); 8 | int gid1 = get_global_id(1); 9 | 10 | int cluster_val; 11 | cluster_val = 0; 12 | //float lim[16]; 13 | 14 | //for (int k = 0; k < cardinality_T; k++) 15 | //lim[k] = limits[k]; 16 | 17 | for (int w=1;w!=cardinality_T;w++) 18 | { 19 | if (( channel_values[gid0+gid1*10000]-limits[w]) > 0) { 20 | cluster_val = cluster_val + 1; 21 | } 22 | 23 | } 24 | clusters[gid0+gid1*10000] = cluster_val; 25 | 26 | } -------------------------------------------------------------------------------- /AWGN_Channel_Transmission/kernels_quanti_template.cl: -------------------------------------------------------------------------------- 1 | 2 | __kernel void quantize(const int cardinality_T, 3 | global const double* channel_values, 4 | global const double* limits, 5 | global int* clusters) 6 | { 7 | int gid0 = get_global_id(0); 8 | int gid1 = get_global_id(1); 9 | 10 | int cluster_val; 11 | cluster_val = 0; 12 | int Nvar = ${Nvar} ; 13 | //double lim[17]; 14 | 15 | //for (int k = 0; k < cardinality_T; k++) 16 | // lim[k] = limits[k]; 17 | 18 | for (int w=1;w!=cardinality_T;w++) 19 | { 20 | if (( channel_values[gid0+gid1*Nvar]-limits[w]) > 0) { 21 | cluster_val = cluster_val + 1; 22 | } 23 | 24 | } 25 | clusters[gid0+gid1*Nvar] = cluster_val; 26 | 27 | } 28 | 29 | __kernel void quantize_LLR(const int cardinality_T, 30 | global const double* channel_values, 31 | global const double* limits, 32 | global const double* LLR_vector, 33 | global double* LLRs) 34 | { 35 | int gid0 = get_global_id(0); 36 | int gid1 = get_global_id(1); 37 | 38 | int cluster_val; 39 | cluster_val = 0; 40 | int Nvar = ${Nvar} ; 41 | 42 | 43 | for (int w=1;w!=cardinality_T;w++) 44 | { 45 | if (( channel_values[gid0+gid1*Nvar]-limits[w]) > 0) { 46 | cluster_val = cluster_val + 1; 47 | } 48 | 49 | } 50 | LLRs[gid0+gid1*Nvar] = LLR_vector[cluster_val]; 51 | 52 | } -------------------------------------------------------------------------------- /Continous_LDPC_Decoding/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mx-strk/InformationBottleneckDecodingLDPC/033e496b1b5492c9b8f0987f438373104095a2c6/Continous_LDPC_Decoding/__init__.py -------------------------------------------------------------------------------- /Continous_LDPC_Decoding/bp_decoder_irreg.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import pyopencl as cl 5 | import pyopencl.array as cl_array 6 | import scipy as sci 7 | from mako.template import Template 8 | from pyopencl.reduction import get_sum_kernel 9 | 10 | __author__ = "Maximilian Stark" 11 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 12 | __credits__ = ["Maximilian Stark", "Jan Lewandowsky"] 13 | __version__ = "1.0" 14 | __email__ = "maximilian.stark@tuhh.de" 15 | __status__ = "Production" 16 | __name__ = "Belief Propagation Decoder" 17 | __doc__ = """This module holds a class which implements a belief propagation decoder.""" 18 | 19 | class BeliefPropagationDecoderClassIrregular: 20 | """This class implements a belief propagation decoder 21 | """ 22 | 23 | def __init__(self, filename, imax_, cardinality_T_channel_,msg_at_time_): 24 | 25 | # initialize parameters 26 | self.H = self.load_check_mat(filename) 27 | 28 | self.imax = imax_ 29 | 30 | # Quantizer parameters 31 | self.cardinality_T_channel = cardinality_T_channel_ 32 | 33 | # analyze the H matrix and set all decoder variables 34 | self.degree_checknode_nr = ((self.H_sparse).sum(1)).astype(np.int).A[:, 0] # which check node has which degree? 35 | 36 | self.degree_varnode_nr = ((self.H_sparse).sum(0)).astype(np.int).A[0, 37 | :] # which variable node has which degree? 38 | self.N_v = self.H.shape[1] # How many variable nodes are present? 39 | self.N_c = self.H.shape[0] # How many checknodes are present? 40 | 41 | self.d_c_max = self.degree_checknode_nr.max() 42 | self.d_v_max = self.degree_varnode_nr.max() 43 | 44 | self.codeword_len = self.H.shape[1] 45 | row_sum = self.H.sum(0) 46 | col_sum = self.H.sum(1) 47 | d_v_dist_val = np.unique(row_sum) 48 | d_v_dist = np.zeros(int(d_v_dist_val.max())) 49 | 50 | for d_v in np.sort(d_v_dist_val).astype(np.int): 51 | d_v_dist[d_v - 1] = (row_sum == d_v).sum() 52 | d_v_dist = d_v_dist / d_v_dist.sum() 53 | 54 | d_c_dist_val = np.unique(col_sum) 55 | d_c_dist = np.zeros(int(d_c_dist_val.max())) 56 | 57 | for d_c in np.sort(d_c_dist_val).astype(np.int): 58 | d_c_dist[d_c - 1] = (col_sum == d_c).sum() 59 | 60 | d_c_dist = d_c_dist / d_c_dist.sum() 61 | nom = np.dot(d_v_dist, np.arange(d_v_dist_val.max()) + 1) 62 | den = np.dot(d_c_dist, np.arange(d_c_dist_val.max()) + 1) 63 | 64 | self.R_c = 1 - nom / den 65 | 66 | self.data_len = (self.R_c * self.codeword_len).astype(int) 67 | 68 | self.msg_at_time = msg_at_time_ 69 | self.map_node_connections() 70 | 71 | def load_sparse_csr(self, filename): 72 | loader = np.load(filename) 73 | return sci.sparse.csr_matrix((loader['data'], loader['indices'], loader['indptr']), 74 | shape=loader['shape']) 75 | 76 | def load_check_mat(self, filename): 77 | if filename.endswith('.npy') or filename.endswith('.npz'): 78 | if filename.endswith('.npy'): 79 | H = np.load(filename) 80 | self.H_sparse = sci.sparse.csr_matrix(H) 81 | else: 82 | self.H_sparse = self.load_sparse_csr(filename) 83 | H = self.H_sparse.toarray() 84 | else: 85 | arrays = [np.array(list(map(int, line.split()))) for line in open(filename)] 86 | H = self.alistToNumpy(arrays) 87 | self.H_sparse = sci.sparse.csr_matrix(H) 88 | return H 89 | 90 | def map_node_connections(self): 91 | """ The overall idea of this function is to store the connections between var- and check nodes in a new structure 92 | namely two vectors. This vectors are called inboxes, because the entries should be seen as memory for incoming 93 | messages. Therefore it is important to track which check node output rights in which var node input and vince 94 | versa. """ 95 | 96 | self.inbox_memory_start_checknodes = np.append([0], np.cumsum(self.degree_checknode_nr[:-1]) ).astype(np.int) 97 | self.inbox_memory_start_varnodes = np.append([0], np.cumsum(self.degree_varnode_nr[:-1]) ).astype(np.int) 98 | 99 | # At first it is determined which check node delivers to which var node 100 | # This information is encoded in the non-zero columns of each row 101 | # non-zero return the indices in the desired way. 102 | 103 | self.customers_checknode_nr = self.H_sparse.indices 104 | 105 | # Now it is determined which var node delivers to which check node 106 | # This information is encoded in the non-zero rows of each column 107 | # non-zero return the indices in the desired way. 108 | self.customers_varnode_nr = (self.H_sparse.tocsc() ).indices 109 | 110 | # now we now the connections but, since one node has multiple inputs the node number is node enough. 111 | # An additional offset vector needs to be defined. If one node was already connected, then the memory box is 112 | # filled. Performing cumsum on the rows only allows to generate this offset vector at check nodes destinations. 113 | 114 | H_copy = self.H_sparse.tocsc().copy() 115 | for i in range(self.N_v): 116 | H_copy.data[H_copy.indptr[i] : H_copy.indptr[i+1] ] = \ 117 | np.arange(H_copy.indptr[i+1]-H_copy.indptr[i]) 118 | 119 | self.offset_at_dest_checknodes = H_copy.tocsr().data 120 | 121 | 122 | self.target_memory_cells_checknodes = (self.inbox_memory_start_varnodes[self.customers_checknode_nr] + \ 123 | self.offset_at_dest_checknodes).astype(np.int) 124 | 125 | 126 | H_copy = self.H_sparse.copy() 127 | for i in range(self.N_c): 128 | H_copy.data[H_copy.indptr[i] : H_copy.indptr[i+1] ] = \ 129 | np.arange(H_copy.indptr[i+1]-H_copy.indptr[i]) 130 | 131 | self.offset_at_dest_varnodes = H_copy.tocsc().data 132 | 133 | self.target_memory_cells_varnodes = (self.inbox_memory_start_checknodes[self.customers_varnode_nr] + \ 134 | self.offset_at_dest_varnodes).astype(np.int) 135 | 136 | 137 | self.inbox_memory_checknodes = np.zeros((self.degree_checknode_nr.sum().astype(np.int),self.msg_at_time)) 138 | self.inbox_memory_varnodes = np.zeros((self.degree_varnode_nr.sum().astype(np.int),self.msg_at_time)) 139 | self.memory_channel_values = np.zeros(self.N_v) 140 | 141 | def alistToNumpy(self, lines): 142 | """Converts a parity-check matrix in AList format to a 0/1 numpy array. The argument is a 143 | list-of-lists corresponding to the lines of the AList format, already parsed to integers 144 | if read from a text file. 145 | The AList format is introduced on http://www.inference.phy.cam.ac.uk/mackay/codes/alist.html. 146 | This method supports a "reduced" AList format where lines 3 and 4 (containing column and row 147 | weights, respectively) and the row-based information (last part of the Alist file) are omitted. 148 | Example: 149 | >>> alistToNumpy([[3,2], [2, 2], [1,1,2], [2,2], [1], [2], [1,2], [1,2,3,4]]) 150 | array([[1, 0, 1], 151 | [0, 1, 1]]) 152 | """ 153 | 154 | nCols, nRows = lines[0] 155 | if len(lines[2]) == nCols and len(lines[3]) == nRows: 156 | startIndex = 4 157 | else: 158 | startIndex = 2 159 | matrix = np.zeros((nRows, nCols), dtype=np.int) 160 | for col, nonzeros in enumerate(lines[startIndex:startIndex + nCols]): 161 | for rowIndex in nonzeros: 162 | if rowIndex != 0: 163 | matrix[rowIndex - 1, col] = 1 164 | 165 | return matrix 166 | 167 | def init_OpenCL_decoding(self,msg_at_time_, context_=False): 168 | if not context_: 169 | self.context = cl.create_some_context() 170 | else: 171 | self.context = context_ 172 | 173 | print(self.context.get_info(cl.context_info.DEVICES)) 174 | path = os.path.split(os.path.abspath(__file__)) 175 | 176 | kernelsource = open(os.path.join(path[0], "kernels_min_and_BP.cl")).read() 177 | tpl = Template(kernelsource) 178 | rendered_tp = tpl.render(cn_degree=self.d_c_max, vn_degree=self.d_v_max, msg_at_time=msg_at_time_) 179 | 180 | self.program = cl.Program(self.context, str(rendered_tp)).build() 181 | 182 | self.queue = cl.CommandQueue(self.context) 183 | 184 | self.inbox_memory_start_varnodes_buffer = cl_array.to_device(self.queue, 185 | self.inbox_memory_start_varnodes.astype(np.int32)) 186 | 187 | self.inbox_memory_start_checknodes_buffer = cl_array.to_device(self.queue, 188 | self.inbox_memory_start_checknodes.astype(np.int32)) 189 | 190 | self.degree_varnode_nr_buffer = cl_array.to_device(self.queue, self.degree_varnode_nr.astype(np.int32)) 191 | 192 | self.degree_checknode_nr_buffer = cl_array.to_device(self.queue, self.degree_checknode_nr.astype(np.int32)) 193 | 194 | self.target_memorycells_varnodes_buffer = cl_array.to_device(self.queue, 195 | self.target_memory_cells_varnodes.astype(np.int32)) 196 | self.target_memorycells_checknodes_buffer = cl_array.to_device(self.queue, 197 | self.target_memory_cells_checknodes.astype(np.int32)) 198 | 199 | 200 | self.checknode_inbox_buffer = cl_array.empty(self.queue, self.inbox_memory_checknodes.shape, dtype=np.float64) 201 | 202 | self.varnode_inbox_buffer = cl_array.empty(self.queue, self.inbox_memory_varnodes.shape, dtype=np.float64) 203 | 204 | self.syndrom_buffer = cl_array.empty(self.queue, 205 | (self.degree_checknode_nr.shape[0], self.inbox_memory_varnodes.shape[-1]), dtype=np.int32) 206 | 207 | self.krnl = get_sum_kernel(self.context, None, 208 | dtype_in=self.varnode_inbox_buffer.dtype) # varnode_output_buffer.dtype ) 209 | 210 | # define programs 211 | self.send_prog = self.program.send_channel_values_to_checknode_inbox 212 | 213 | self.varnode_update_prog = self.program.varnode_update 214 | 215 | self.checknode_update_prog = self.program.checknode_update 216 | 217 | self.calc_syndrom_prog = self.program.calc_syndrome 218 | 219 | self.varoutput_prog = self.program.calc_varnode_output 220 | 221 | def decode_OpenCL_belief_propagation(self, received_blocks,buffer_in=False,return_buffer=False): 222 | # Set up OpenCL 223 | if buffer_in: 224 | channel_values_buffer = received_blocks 225 | else: 226 | channel_values_buffer = cl_array.to_device(self.queue,received_blocks.astype(np.float64)) 227 | 228 | varnode_output_buffer = cl_array.empty(self.queue, received_blocks.shape, dtype=np.float64) 229 | 230 | 231 | self.send_prog(self.queue, received_blocks.shape, None, 232 | channel_values_buffer.data, 233 | self.inbox_memory_start_varnodes_buffer.data, 234 | self.degree_varnode_nr_buffer.data, 235 | self.target_memorycells_varnodes_buffer.data, 236 | self.checknode_inbox_buffer.data) 237 | self.queue.finish() 238 | syndrome_zero = False 239 | i_num = 1 240 | 241 | while (i_num>> alistToNumpy([[3,2], [2, 2], [1,1,2], [2,2], [1], [2], [1,2], [1,2,3,4]]) 100 | array([[1, 0, 1], 101 | [0, 1, 1]]) 102 | """ 103 | 104 | nCols, nRows = lines[0] 105 | if len(lines[2]) == nCols and len(lines[3]) == nRows: 106 | startIndex = 4 107 | else: 108 | startIndex = 2 109 | matrix = np.zeros((nRows, nCols), dtype=np.int) 110 | for col, nonzeros in enumerate(lines[startIndex:startIndex + nCols]): 111 | for rowIndex in nonzeros: 112 | if rowIndex != 0: 113 | matrix[rowIndex - 1, col] = 1 114 | 115 | return matrix 116 | 117 | def map_node_connections(self): 118 | """ The overall idea of this function is to store the connections between var- and check nodes in a new structure 119 | namely two vectors. This vectors are called inboxes, because the entries should be seen as memory for incoming 120 | messages. Therefore it is important to track which check node output rights in which var node input and vince 121 | versa. """ 122 | 123 | self.inbox_memory_start_checknodes = np.append([0], np.cumsum(self.degree_checknode_nr[:-1]) ).astype(np.int) 124 | self.inbox_memory_start_varnodes = np.append([0], np.cumsum(self.degree_varnode_nr[:-1]) ).astype(np.int) 125 | 126 | # At first it is determined which check node delivers to which var node 127 | # This information is encoded in the non-zero columns of each row 128 | # non-zero return the indices in the desired way. 129 | 130 | self.customers_checknode_nr = self.H_sparse.indices 131 | 132 | # Now it is determined which var node delivers to which check node 133 | # This information is encoded in the non-zero rows of each column 134 | # non-zero return the indices in the desired way. 135 | 136 | self.customers_varnode_nr = (self.H_sparse.tocsc() ).indices 137 | 138 | # now we now the connections but, since one node has multiple inputs the node number is node enough. 139 | # An additional offset vector needs to be defined. If one node was already connected, then the memory box is 140 | # filled. Performing cumsum on the rows only allows to generate this offset vector at check nodes destinations. 141 | 142 | H_copy = self.H_sparse.tocsc().copy() 143 | for i in range(self.N_v): 144 | H_copy.data[H_copy.indptr[i] : H_copy.indptr[i+1] ] = \ 145 | np.arange(H_copy.indptr[i+1]-H_copy.indptr[i]) 146 | 147 | self.offset_at_dest_checknodes = H_copy.tocsr().data 148 | self.target_memory_cells_checknodes = (self.inbox_memory_start_varnodes[self.customers_checknode_nr] + \ 149 | self.offset_at_dest_checknodes).astype(np.int) 150 | 151 | H_copy = self.H_sparse.copy() 152 | for i in range(self.N_c): 153 | H_copy.data[H_copy.indptr[i] : H_copy.indptr[i+1] ] = \ 154 | np.arange(H_copy.indptr[i+1]-H_copy.indptr[i]) 155 | 156 | self.offset_at_dest_varnodes = H_copy.tocsc().data 157 | 158 | self.target_memory_cells_varnodes = (self.inbox_memory_start_checknodes[self.customers_varnode_nr] + \ 159 | self.offset_at_dest_varnodes).astype(np.int) 160 | 161 | 162 | self.inbox_memory_checknodes = np.zeros((self.degree_checknode_nr.sum().astype(np.int),self.msg_at_time)) 163 | self.inbox_memory_varnodes = np.zeros((self.degree_varnode_nr.sum().astype(np.int),self.msg_at_time)) 164 | self.memory_channel_values = np.zeros(self.N_v) 165 | 166 | 167 | def init_OpenCL_decoding(self,msg_at_time_, context_=False): 168 | if not context_: 169 | self.context = cl.create_some_context() 170 | else: 171 | self.context = context_ 172 | path = os.path.split(os.path.abspath(__file__)) 173 | 174 | kernelsource = open(os.path.join(path[0], "kernels_min_and_BP.cl")).read() 175 | tpl = Template(kernelsource) 176 | rendered_tp = tpl.render(cn_degree=self.d_c_max, vn_degree=self.d_v_max, msg_at_time=msg_at_time_) 177 | 178 | self.program = cl.Program(self.context, str(rendered_tp)).build() 179 | 180 | self.queue = cl.CommandQueue(self.context) 181 | 182 | self.inbox_memory_start_varnodes_buffer = cl_array.to_device(self.queue, 183 | self.inbox_memory_start_varnodes.astype(np.int32)) 184 | 185 | self.inbox_memory_start_checknodes_buffer = cl_array.to_device(self.queue, 186 | self.inbox_memory_start_checknodes.astype(np.int32)) 187 | 188 | self.degree_varnode_nr_buffer = cl_array.to_device(self.queue, self.degree_varnode_nr.astype(np.int32)) 189 | 190 | self.degree_checknode_nr_buffer = cl_array.to_device(self.queue, self.degree_checknode_nr.astype(np.int32)) 191 | 192 | self.target_memorycells_varnodes_buffer = cl_array.to_device(self.queue, 193 | self.target_memory_cells_varnodes.astype(np.int32)) 194 | self.target_memorycells_checknodes_buffer = cl_array.to_device(self.queue, 195 | self.target_memory_cells_checknodes.astype(np.int32)) 196 | 197 | 198 | self.checknode_inbox_buffer = cl_array.empty(self.queue, self.inbox_memory_checknodes.shape, dtype=np.float64) 199 | 200 | self.varnode_inbox_buffer = cl_array.empty(self.queue, self.inbox_memory_varnodes.shape, dtype=np.float64) 201 | 202 | self.syndrom_buffer = cl_array.empty(self.queue, 203 | (self.degree_checknode_nr.shape[0], self.inbox_memory_varnodes.shape[-1]), dtype=np.int32) 204 | 205 | self.krnl = get_sum_kernel(self.context, None, 206 | dtype_in=self.varnode_inbox_buffer.dtype) # varnode_output_buffer.dtype ) 207 | 208 | 209 | # define programs 210 | self.send_prog = self.program.send_channel_values_to_checknode_inbox 211 | 212 | self.varnode_update_prog = self.program.varnode_update 213 | 214 | self.checknode_update_prog_min_sum = self.program.checknode_update_minsum 215 | 216 | self.calc_syndrom_prog = self.program.calc_syndrome 217 | 218 | self.varoutput_prog = self.program.calc_varnode_output 219 | 220 | 221 | def decode_OpenCL_min_sum(self, received_blocks,buffer_in=False,return_buffer=False): 222 | # Set up OpenCL 223 | if buffer_in: 224 | channel_values_buffer = received_blocks 225 | else: 226 | channel_values_buffer = cl_array.to_device(self.queue,received_blocks.astype(np.float64)) 227 | 228 | varnode_output_buffer = cl_array.empty(self.queue, received_blocks.shape, dtype=np.float64) 229 | 230 | 231 | self.send_prog(self.queue, received_blocks.shape, None, 232 | channel_values_buffer.data, 233 | self.inbox_memory_start_varnodes_buffer.data, 234 | self.degree_varnode_nr_buffer.data, 235 | self.target_memorycells_varnodes_buffer.data, 236 | self.checknode_inbox_buffer.data) 237 | self.queue.finish() 238 | syndrome_zero = False 239 | i_num = 1 240 | 241 | 242 | while (i_num Z1 42 | and X -> T0 43 | 44 | we are interested in a transformation z0=f(t0) (equivalently: deterministic p(z0|t0)), such that 45 | I(X;Z0) \approx I(X;Y0) and 46 | p(x|z0) \approx p(x|t0) for z0=y0 47 | 48 | 49 | param: 50 | cardinality_Y - number of cluster to optimize 51 | p_x_given_t0 - distribution to transform 52 | p_x_given_z1 - reference distribution for transformation 53 | 54 | return: 55 | 56 | """ 57 | z0_stars = np.zeros(cardinality_Y).astype(np.int) 58 | p_x_given_t0 = p_x_and_t0 / p_x_and_t0.sum(1)[:, np.newaxis] 59 | p_t0 = p_x_and_t0.sum(1) 60 | p_x_given_z1 = p_x_and_z1 / p_x_and_z1.sum(1)[:, np.newaxis] 61 | 62 | for t0 in range( int(cardinality_Y) ): 63 | z0_stars[t0] = np.argmin(kl_divergence(p_x_given_t0[t0,:],p_x_given_z1) ) 64 | 65 | 66 | p_star_z0_given_t0 = np.zeros((cardinality_Y, cardinality_Y)) 67 | for j, z0_star in enumerate(z0_stars): 68 | p_star_z0_given_t0[j, z0_star] = 1 69 | 70 | p_star_z0 = np.zeros(cardinality_Y) 71 | for t0, z0 in enumerate(z0_stars): 72 | p_star_z0[z0] += p_t0[t0] 73 | 74 | p_x_given_z0_stars = 1/(p_star_z0[:,np.newaxis]+1e-80) * np.dot( p_star_z0_given_t0.transpose(), p_x_and_t0) 75 | p_x_and_z0_stars = p_x_given_z0_stars * p_star_z0[:,np.newaxis] 76 | 77 | return p_x_given_z0_stars, p_x_and_z0_stars,p_star_z0, z0_stars , p_star_z0_given_t0 78 | 79 | -------------------------------------------------------------------------------- /Discrete_LDPC_decoding/LDPC_encoder.py: -------------------------------------------------------------------------------- 1 | import Discrete_LDPC_decoding.GF2MatrixMul_c as GF2MatrixMul_c 2 | import numpy as np 3 | import scipy.sparse as sp 4 | 5 | __author__ = "Maximilian Stark" 6 | __copyright__ = "05.07.2016, Institute of Communications, University of Technology Hamburg" 7 | __credits__ = ["Maximilian Stark"] 8 | __version__ = "1.0" 9 | __email__ = "maximilian.stark@tuhh.de" 10 | __status__ = "Production" 11 | __name__ = "LDPC Encoder" 12 | __doc__ = "This class implements an LDPC encoder." 13 | 14 | 15 | class LDPCEncoder: 16 | """This class implements an LDPC encoder. The constructor takes the path to a saved Parity Check Matrix as input. The 17 | file should be in the alist Format. Similar to the LDPCencoder from the Matlab communication toolbox the last 18 | N−K columns in the parity check matrix must be an invertible matrix in GF(2). 19 | This is because the encoding is done only based on parity check matrix, by evaluating a_k' = inv(H_k)*H_l*a_L. 20 | Input X must be a numeric or logical column vector with length equal K. The length of the encoded data output 21 | vector, Y, is N. It is a solution to the parity-check equation, with the first K bits equal to the input, X.""" 22 | 23 | def __init__(self, filename, alist_file = True): 24 | #if alist_file: 25 | # self.H = self.load_check_mat(filename) 26 | #else: 27 | # self.H = np.load(filename) 28 | 29 | # if sp.issparse(self.H): 30 | # self.H = (self.H).toarray() 31 | # self.H_sparse = sp.csr_matrix(self.H) 32 | # self.setParityCheckMatrix(self.H) 33 | 34 | self.H_sparse = self.load_check_mat(filename) 35 | self.setParityCheckMatrix(self.H_sparse) 36 | 37 | 38 | 39 | 40 | def alistToNumpy(self,lines): 41 | """Converts a parity-check matrix in AList format to a 0/1 numpy array. The argument is a 42 | list-of-lists corresponding to the lines of the AList format, already parsed to integers 43 | if read from a text file. 44 | The AList format is introduced on http://www.inference.phy.cam.ac.uk/mackay/codes/alist.html. 45 | This method supports a "reduced" AList format where lines 3 and 4 (containing column and row 46 | weights, respectively) and the row-based information (last part of the Alist file) are omitted. 47 | Example: 48 | >>> alistToNumpy([[3,2], [2, 2], [1,1,2], [2,2], [1], [2], [1,2], [1,2,3,4]]) 49 | array([[1, 0, 1], 50 | [0, 1, 1]]) 51 | """ 52 | 53 | nCols, nRows = lines[0] 54 | if len(lines[2]) == nCols and len(lines[3]) == nRows: 55 | startIndex = 4 56 | else: 57 | startIndex = 2 58 | matrix = np.zeros((nRows, nCols), dtype=np.int) 59 | for col, nonzeros in enumerate(lines[startIndex:startIndex + nCols]): 60 | for rowIndex in nonzeros: 61 | if rowIndex != 0: 62 | matrix[rowIndex - 1, col] = 1 63 | 64 | return matrix 65 | 66 | def load_sparse_csr(self,filename): 67 | loader = np.load(filename) 68 | return sp.csr_matrix((loader['data'], loader['indices'], loader['indptr']), 69 | shape=loader['shape']) 70 | 71 | def load_check_mat(self, filename): 72 | if filename.endswith('.npy') or filename.endswith('.npz'): 73 | if filename.endswith('.npy'): 74 | H = np.load(filename) 75 | H = sp.csr_matrix(H) 76 | else: 77 | H = self.load_sparse_csr(filename) 78 | 79 | else: 80 | arrays = [np.array(list(map(int, line.split()))) for line in open(filename)] 81 | H = self.alistToNumpy(arrays) 82 | H = sp.csr_matrix(H) 83 | 84 | return H 85 | 86 | def encode(self, X): 87 | 88 | EncodingMethod = self.EncodingMethod.copy() 89 | 90 | if self.RowOrder[0] >= 0: 91 | # only if the last (N-K) columns of H are not triangular or if they are lower/upper triangular along the 92 | # antidiagonal 93 | # Todo 94 | pass 95 | else: 96 | pass 97 | 98 | # compute matrix product between first K_columns of H and information bits. 99 | result = self.GF2MatrixMul(X, np.zeros(self.NumParityBits, dtype=int), self.NumInfoBits, self.MatrixA_RowIndices, 100 | self.MatrixA_RowStartLoc, self.MatrixA_ColumnSum, 1) 101 | 102 | # need to perform another substitution if last (N-K) columns are not triangular 103 | if EncodingMethod == 0: 104 | # forward substitution for lower triangular matrix obtained from factorization in GF(2) 105 | result = self.GF2MatrixMul(result, result, self.NumParityBits, self.MatrixL_RowIndices, 106 | self.MatrixL_RowStartLoc, self.MatrixL_ColumnSum, 1) 107 | # now we need to perform backward substitution since B will be upper triangular 108 | EncodingMethod = -1 109 | 110 | if self.RowOrder[0] >= 0: 111 | #first version loop 112 | #for counter in range(self.NumParityBits): 113 | # result[counter] = result[self.RowOrder[counter]] 114 | # second option 115 | result = result[self.RowOrder] 116 | 117 | # Solve for the Parity Check Bits. 118 | # Common step for all shapes. 119 | parity_check_bits = self.GF2MatrixMul(result, result, self.NumParityBits, self.MatrixB_RowIndices, 120 | self.MatrixB_RowStartLoc, self.MatrixB_ColumnSum, EncodingMethod) 121 | 122 | codeword = np.append(X, parity_check_bits) 123 | return codeword 124 | 125 | def encode_c(self, X): 126 | EncodingMethod = self.EncodingMethod 127 | 128 | if self.RowOrder[0] >= 0: 129 | # only if the last (N-K) coloums of H are not triangula or if they are lower/upper triangular along the 130 | # antidiagonal 131 | # Todo 132 | pass 133 | else: 134 | pass 135 | 136 | # compute matrix product between first K_columns of H and information bits. 137 | result = GF2MatrixMul_c.GF2MatrixMul_c(X.astype(np.int32).copy(), np.zeros(self.NumParityBits, dtype=np.int32), 138 | self.NumInfoBits, self.MatrixA_RowIndices,self.MatrixA_RowStartLoc, 139 | self.MatrixA_ColumnSum, 1) 140 | 141 | # need to perform another substitution if last (N-K) columns are not triangular 142 | if EncodingMethod == 0: 143 | # forward substitution for lower triangular matrix obtained from factorization in GF(2) 144 | result = GF2MatrixMul_c.GF2MatrixMul_c(result, result, self.NumParityBits, self.MatrixL_RowIndices, 145 | self.MatrixL_RowStartLoc, self.MatrixL_ColumnSum, 1) 146 | # now we need to perform backward substitution since B will be upper triangular 147 | EncodingMethod = -1 148 | 149 | if self.RowOrder[0] >= 0: 150 | #first version loop 151 | #for counter in range(self.NumParityBits): 152 | # result[counter] = result[self.RowOrder[counter]] 153 | # second option 154 | result = result[self.RowOrder] 155 | 156 | # Solve for the Parity Check Bits. 157 | # Common step for all shapes. 158 | parity_check_bits = GF2MatrixMul_c.GF2MatrixMul_c(result, result, self.NumParityBits, self.MatrixB_RowIndices, 159 | self.MatrixB_RowStartLoc, self.MatrixB_ColumnSum, EncodingMethod) 160 | 161 | codeword = np.append(X, parity_check_bits) 162 | return codeword 163 | 164 | def GF2MatrixMul(self, source, dest, srclen ,RowIndices, RowLoc, ColumnSum, direction): 165 | """ example: 166 | source: InformationBits 167 | dest: MatrixProductbuffer (return value) 168 | srclen: NumInfoBits 169 | RowIndices: A_RowIndices (of matrix A which is the H(:,1:K) 170 | RowLoc: A_RowStartLoc 171 | CoulumnSum 172 | direction: 1 or -1 forward backward substitution 173 | """ 174 | 175 | if direction == 1: 176 | columnindex = 0 # Start from the first column for forward substitution 177 | else: 178 | columnindex = srclen - 1 # Start from the last column for backward substitution 179 | 180 | 181 | for col_counter in range(srclen): 182 | if not source[columnindex] == 0: 183 | for row_counter in range(ColumnSum[columnindex]): 184 | rowindex = RowIndices[RowLoc[columnindex] + row_counter] 185 | dest[rowindex] = 1 - dest[rowindex] 186 | 187 | columnindex += direction 188 | 189 | 190 | return dest 191 | 192 | 193 | def setParityCheckMatrix(self,H): 194 | params = self.getLDPCEncoderParamters(H) 195 | self.storedParityCheckMatrix = H 196 | 197 | def getLDPCEncoderParamters(self,H): 198 | self.N = H.shape[1] 199 | self.K = self.N -H.shape[0] 200 | 201 | #extract last (N-K) columns of parity check matrix 202 | last_Part = H[:,self.K:] 203 | 204 | # check if last_Part is triangular 205 | shape = self.isfulldiagtriangular(last_Part) 206 | 207 | if shape == 1: 208 | algo = 'Forward Substitution' 209 | rowOrder = np.array([-1]) # Don't need to reverse the order 210 | elif shape == -1: 211 | algo = 'Backward Substitution' 212 | rowOrder = np.array([-1]) # Don't need to reverse the order 213 | else: 214 | # Reverse the order of rows in last_Part, but keep lastPart, since if PB is not triangular 215 | # we need to factorize it in GF(2) 216 | Reversed_last = last_Part[::-1,:].copy() 217 | rev_shape = self.isfulldiagtriangular(Reversed_last) 218 | if rev_shape == 1: 219 | algo = 'Forward Substitution' 220 | rowOrder = np.arange((self.N-self.K))[::-1] 221 | last_Part = Reversed_last 222 | elif rev_shape == -1: 223 | algo = 'Backward Substitution' 224 | rowOrder = np.arange((self.N-self.K))[::-1] 225 | last_Part = Reversed_last 226 | else: 227 | algo = 'Matrix Inverse' 228 | 229 | # now we preallocate variable for the encode function 230 | self.MatrixL_RowIndices = np.int32(0) 231 | self.MatrixL_ColumnSum = np.int32(0) 232 | self.MatrixL_RowStartLoc = np.int32(0) 233 | 234 | if algo == 'Forward Substitution': 235 | self.EncodingMethod = np.int8(1) 236 | #P = np.tril(last_Part, -1) # remove diagonal 237 | P = sp.tril(last_Part, -1) # remove diagonal 238 | elif algo == 'Backward Substitution': 239 | self.EncodingMethod = np.int8(1) 240 | #P = np.triu(last_Part, 1) # remove diagonal 241 | P = sp.triu(last_Part, 1) # remove diagonal 242 | else: 243 | # algo is 'Matrix Inverse' so we need to work a bit. So we factorize in GF(2) first. 244 | PL, last_Part, rowOrder, invertible = self.gf2factorize(last_Part.toarray()) 245 | 246 | if not invertible: 247 | print('Not invertible Matrix') 248 | self.EncodingMethod = np.int8(0) 249 | #self.MatrixL_RowIndices, self.MatrixL_RowStartLoc, self.MatrixL_ColumnSum = \ 250 | # self.ConvertMatrixFormat(np.tril(PL, -1)) 251 | 252 | self.MatrixL_RowIndices, self.MatrixL_RowStartLoc, self.MatrixL_ColumnSum = \ 253 | self.ConvertMatrixFormat(sp.tril(PL, -1)) 254 | 255 | last_Part = last_Part[rowOrder, :] 256 | #P = np.triu(last_Part, 1) 257 | P = sp.triu(last_Part, 1) 258 | 259 | # Update all internal data structures for the encoding 260 | self.RowOrder = np.int32(rowOrder) 261 | 262 | self.MatrixA_RowIndices, self.MatrixA_RowStartLoc, self.MatrixA_ColumnSum = self.ConvertMatrixFormat(H[:, :self.K]) 263 | self.MatrixB_RowIndices, self.MatrixB_RowStartLoc, self.MatrixB_ColumnSum = self.ConvertMatrixFormat(P) 264 | 265 | # Update all external properties. 266 | self.NumInfoBits = self.K 267 | self.NumParityBits = self.N - self.K 268 | self.BlockLength = self.N 269 | self.EncodingAlgorithm = algo 270 | 271 | def ConvertMatrixFormat(self, X): 272 | """Create an alternative representation of zero-one matrix""" 273 | 274 | # j, i = np.nonzero(np.transpose(X.toarray())) 275 | # RowIndices = np.int32(i) 276 | # ColumnSum = np.int32((X.toarray()).sum(0)) 277 | # # For each row find the corresponding row indices start in RowIndicies. 278 | # CumulativeSum = np.cumsum(np.double(ColumnSum)) 279 | # RowStartLoc = np.int32(np.append([0], CumulativeSum[:-1])) 280 | 281 | RowIndices = ((X.tocsc()).indices).astype(np.int32) 282 | RowStartLoc = np.int32((X.tocsc()).indptr[:-1]) 283 | ColumnSum = np.int32((X.tocsc().sum(0)).A[0,:]) 284 | 285 | return RowIndices, RowStartLoc, ColumnSum 286 | 287 | def gf2factorize(self,X): 288 | """This function factorizes a square matrix in GF(2) using Gaussian elimination. 289 | X= A * B using modulo 2 arithmetic. 290 | X may be sparse. 291 | A and B will be sparse 292 | 293 | A is always lower triangular. If X is invertible in GF(2), then B(chosen_pivot,:) is upper triangular and 294 | invertible. 295 | """ 296 | 297 | n = X.shape[0] 298 | if not n == X.shape[1]: 299 | print("error non square matrix") 300 | 301 | Y1 = np.eye(n,n,0,bool) 302 | Y2 = np.zeros([n,n]).astype(bool) 303 | Y2[np.nonzero(X)] = 1 304 | chosen_pivots = np.zeros(n).astype(int) 305 | invertible = True 306 | 307 | for col in range(n): 308 | candidate_rows = Y2[:, col].copy() 309 | 310 | candidate_rows[chosen_pivots[:col]] = 0 # never use a chosen pivot 311 | candidate_rows = np.nonzero(candidate_rows)[0] 312 | 313 | if candidate_rows.size ==0: 314 | invertible = False # not invertible 315 | break 316 | else: 317 | pivot = candidate_rows[0] # chose first candidate as pivot 318 | chosen_pivots[col] = pivot # record pivot 319 | # find all nonzero elements in pivot row and xor with corresponding other candidate rows 320 | columnind = np.nonzero(Y2[pivot, :]) 321 | 322 | # subtraction step. but is NOT in GF2 323 | 324 | 325 | Y2[candidate_rows[1:, np.newaxis], columnind] = \ 326 | np.logical_not(Y2[candidate_rows[1:, np.newaxis], columnind]) 327 | 328 | 329 | Y1[candidate_rows[1:], pivot] = 1 330 | 331 | A = sp.csr_matrix(Y1) 332 | B = sp.csr_matrix(Y2) 333 | #A = Y1 334 | #B = Y2 335 | 336 | #if not invertible return empty pivot 337 | if not invertible: 338 | chosen_pivots = np.zeros(n).astype(int) 339 | 340 | return A, B, chosen_pivots, invertible 341 | 342 | def isfulldiagtriangular(self,X): 343 | """X must be a square logical matrix. 344 | shape = 1 if X is ower triangular and has a full diagonal 345 | shape = -1 if X is upper triangular and has a full diagonal 346 | shape = 0""" 347 | 348 | N = X.shape[0] 349 | NumNonZeros = (X != 0).sum() 350 | #if not np.all(np.diagonal(X)): 351 | if not np.all(X.diagonal()): 352 | shape = 0 353 | else: 354 | #NumNonzerosInLowerPart = (np.tril(X) != 0).sum() 355 | NumNonzerosInLowerPart = (sp.tril(X) != 0).sum() 356 | if NumNonzerosInLowerPart == NumNonZeros: 357 | shape = 1 # X is lower triangular 358 | elif NumNonzerosInLowerPart == N: 359 | shape = -1 360 | else: 361 | shape = 0 362 | return shape 363 | 364 | 365 | -------------------------------------------------------------------------------- /Discrete_LDPC_decoding/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mx-strk/InformationBottleneckDecodingLDPC/033e496b1b5492c9b8f0987f438373104095a2c6/Discrete_LDPC_decoding/__init__.py -------------------------------------------------------------------------------- /Discrete_LDPC_decoding/kernels_template.cl: -------------------------------------------------------------------------------- 1 | //#pragma OPENCL EXTENSION cl_khr_fp64: enable 2 | #define CN_DEGREE ${ cn_degree } 3 | #define VN_DEGREE ${ vn_degree } 4 | 5 | void kernel test_kernel() 6 | { 7 | int gid=get_global_id(0); 8 | int lid=get_local_id(0); 9 | 10 | 11 | } 12 | 13 | void kernel send_channel_values_to_checknode_inbox(global const int* channel_values, 14 | global const int* inbox_memory_start_varnodes, 15 | global const int* degree_varnode_nr, 16 | global const int* target_memorycells_varnodes, 17 | global int* checknode_inbox 18 | ) 19 | { 20 | int gid1=get_global_id(0); 21 | int gid2=get_global_id(1); 22 | 23 | 24 | for(int w=0;w!=degree_varnode_nr[gid1];w++) 25 | { 26 | int target_memcell=target_memorycells_varnodes[inbox_memory_start_varnodes[gid1]+w]; 27 | checknode_inbox[target_memcell*${ msg_at_time }+gid2]=channel_values[gid1*${ msg_at_time }+gid2]; 28 | } 29 | 30 | } 31 | 32 | void kernel checknode_update_iter0(global const int* checknode_inbox, 33 | global const int* inbox_memory_start_checknodes, 34 | global const int* degree_checknode_nr, 35 | global const int* target_memorycells_checknodes, 36 | global int* varnode_inbox, 37 | int cardinality_T_channel, 38 | int cardinality_T_decoder_ops, 39 | global const int* Trellis_checknodevector_a 40 | ) 41 | 42 | { 43 | int gid1=get_global_id(0); 44 | int gid2=get_global_id(1); 45 | 46 | int lid=get_local_id(0); 47 | int local_size=get_local_size(0); 48 | 49 | private int current_msgs[CN_DEGREE]; 50 | 51 | int node_degree = degree_checknode_nr[gid1]; 52 | 53 | /*int size_of_lookup_vector_this_iter=cardinality_T_channel*cardinality_T_channel 54 | +(node_degree-3)*cardinality_T_decoder_ops*cardinality_T_channel; 55 | */ 56 | 57 | //read lookup vector for the current operation to the local memory 58 | 59 | int msgs_start_idx=inbox_memory_start_checknodes[gid1]; 60 | 61 | 62 | for (int w=0;w!=node_degree;w++) 63 | { 64 | int target_memcell=target_memorycells_checknodes[inbox_memory_start_checknodes[gid1]+w]; 65 | 66 | 67 | int pos=0; 68 | for(int v=0;v!=node_degree;v++) 69 | { 70 | if(v!=w) { 71 | 72 | 73 | current_msgs[pos]=checknode_inbox[ (msgs_start_idx+v)*${ msg_at_time }+gid2]; 74 | pos++; 75 | } 76 | } 77 | 78 | int t_0=Trellis_checknodevector_a[current_msgs[0]*cardinality_T_channel+current_msgs[1]]; 79 | 80 | int t_lm1=t_0; 81 | for(int l=1;l target_error_rate) and (EbN0_dB < EbN0_dB_max_value): 146 | if BER_vector[-1] < BER_go_on_in_smaller_steps: 147 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_small_stepwidth) 148 | else: 149 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_normal_stepwidth) 150 | 151 | BER_vector = np.append(BER_vector, 0) 152 | else: 153 | ready = True 154 | 155 | 156 | 157 | #Plot 158 | plt.figure() 159 | plt.semilogy(EbN0_dB_vector,BER_vector) 160 | plt.xlabel('Eb/N0') 161 | plt.ylabel('Bit Error Rate ') 162 | plt.grid(True) 163 | plt.show() 164 | 165 | np.savez(os.path.join(pathname,'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 166 | 167 | plt.savefig(os.path.join(pathname,'BER_figure.pgf')) 168 | plt.savefig(os.path.join(pathname,'BER_figure.pdf')) 169 | 170 | res_dict = {"EbN0_dB_vector" : EbN0_dB_vector, "BER_vector":BER_vector 171 | , "decoder_name":decoder_name} 172 | 173 | sio.savemat(os.path.join(pathname,'BER_results.mat'),res_dict) 174 | push = pb.push_note("Simulation done!", filename) 175 | -------------------------------------------------------------------------------- /Irregular_LDPC_Decoding/DVB-S2/BER_simulation_OpenCL_enc.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import pickle 3 | import time 4 | 5 | import matplotlib as mpl 6 | import numpy as np 7 | import scipy.io as sio 8 | from AWGN_Channel_Transmission.LDPC_Transmitter import LDPC_BPSK_Transmitter as transmitter 9 | 10 | from AWGN_Channel_Transmission.AWGN_Quantizer_BPSK import AWGN_Channel_Quantizer 11 | from AWGN_Channel_Transmission.AWGN_channel import AWGN_channel 12 | from Discrete_LDPC_decoding.discrete_LDPC_decoder_irreg import Discrete_LDPC_Decoder_class_irregular as LDPC_Decoder 13 | 14 | mpl.use("pgf") 15 | pgf_with_pdflatex = { 16 | "pgf.texsystem": "pdflatex", 17 | "text.usetex": True, 18 | "font.family": "serif" 19 | } 20 | mpl.rcParams.update(pgf_with_pdflatex) 21 | import matplotlib.pyplot as plt 22 | 23 | 24 | __author__ = "Maximilian Stark" 25 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 26 | __credits__ = ["Maximilian Stark"] 27 | __version__ = "1.0" 28 | __email__ = "maximilian.stark@tuhh.de" 29 | __status__ = "Production" 30 | __name__ = "Simulation Env with encoder" 31 | __doc__ = """This script sets up a proper simulation environment to analyse a purely discrete decoder that works only 32 | on integers. The BER performance of the chosen decoder can be stored and compared.""" 33 | 34 | 35 | # Choose the correct context 36 | #os.environ['PYOPENCL_CTX'] = '1' # CPU 37 | os.environ['PYOPENCL_CTX'] = '0' # GraKA 38 | #os.environ['PYOPENCL_COMPILER_OUTPUT']='1' 39 | np.seterr(all='raise') 40 | 41 | # Load stored data 42 | filepath ="../../LDPC_codes/irregular_codes/DVB_S2_0.5.npz" 43 | 44 | #decoder_name = 'decoder_config_EbN0_gen_0.7_16no_match.pkl' 45 | match='true' #'############## ACHTUNG ######################' 46 | 47 | 48 | for val in ['0.6_16adapt71']: 49 | #decoder_name = 'decoder_config_EbN0_gen_'+str(val)+'_16.pkl' 50 | decoder_name = 'decoder_config_EbN0_gen_'+val+'.pkl' 51 | pkl_file = open(decoder_name, 'rb') 52 | generated_decoder = pickle.load(pkl_file) 53 | 54 | 55 | timestr = time.strftime("%y%m%d-%H%M") 56 | 57 | filename = os.path.splitext(decoder_name)[0].replace('.','').replace('_config','')+'_'+timestr 58 | pathname = os.path.join('BER_Results', filename.replace('.', '')) 59 | os.makedirs(os.path.dirname(os.path.join(pathname,' ')), exist_ok=True) 60 | 61 | 62 | Trellis_checknodevector_a = generated_decoder['Trellis_checknodevector_a'] 63 | Trellis_varnodevector_a = generated_decoder['Trellis_varnodevector_a'] 64 | matching_vector_checknode =generated_decoder['matching_vector_checknode'] 65 | matching_vector_varnode = generated_decoder['matching_vector_varnode'] 66 | # Encode Codeword 67 | 68 | # Human choice 69 | AD_max_abs = 3 70 | cardinality_Y_channel = 2000 71 | cardinality_T_channel = generated_decoder['cardinality_T_decoder_ops'] 72 | cardinality_T_decoder_ops = generated_decoder['cardinality_T_decoder_ops'] 73 | msg_at_time = 2 74 | min_errors = 5000 75 | 76 | imax = generated_decoder['imax'] 77 | 78 | # sets the start EbN0_dB value 79 | EbN0_dB_max_value = 1.2 80 | 81 | # simulation runs until this BER is achieved 82 | target_error_rate = 1e-6 83 | BER_go_on_in_smaller_steps = 1e-5 84 | 85 | # in steps of size.. 86 | EbN0_dB_normal_stepwidth = 0.1 87 | EbN0_dB_small_stepwidth = 0.05 88 | 89 | # start EbN0 simulation 90 | EbN0_dB = 0 91 | EbN0_dB_ind = 0 92 | BER_vector = np.array([0.]) 93 | EbN0_dB_vector = np.array([EbN0_dB]) 94 | ready = False 95 | NR_BLOCKS_PER_CONTROL_MSG = 100 96 | 97 | 98 | transi = transmitter(filepath, msg_at_time) 99 | decodi = LDPC_Decoder(filepath, imax, cardinality_T_channel, cardinality_T_decoder_ops, Trellis_checknodevector_a, 100 | Trellis_varnodevector_a,matching_vector_checknode, matching_vector_varnode, msg_at_time,match=match) 101 | 102 | N_var = transi.codeword_len 103 | 104 | while not ready: 105 | EbN0_dB_ind += EbN0_dB_ind 106 | EbN0_dB = EbN0_dB_vector[-1] 107 | 108 | sigma_n2 = 10**(-EbN0_dB/10) / (2*transi.R_c) 109 | 110 | chani = AWGN_channel(sigma_n2) 111 | 112 | quanti = AWGN_Channel_Quantizer(sigma_n2, AD_max_abs, cardinality_T_channel, cardinality_Y_channel) 113 | quanti.init_OpenCL_quanti(N_var,msg_at_time,return_buffer_only=True) 114 | decodi.init_OpenCL_decoding(msg_at_time,quanti.context) 115 | 116 | 117 | errors = 0 118 | transmitted_blocks = 0 119 | # transmit 120 | start = time.time() 121 | while errors < min_errors: 122 | 123 | send_message = transi.transmit() 124 | 125 | # receive data 126 | rec_data = chani.transmission(send_message) 127 | # quantize received data 128 | rec_data_quantized = quanti.quantize_on_host(rec_data) 129 | 130 | decoded_mat = decodi.decode_OpenCL(rec_data_quantized,buffer_in=False,return_buffer=False) 131 | 132 | errors += ((decoded_mat[:transi.data_len] < cardinality_T_decoder_ops / 2) != transi.last_transmitted_bits).sum() 133 | transmitted_blocks += + msg_at_time 134 | 135 | if np.mod(transmitted_blocks, NR_BLOCKS_PER_CONTROL_MSG) == 0: 136 | time_so_far = time.time()-start 137 | time_per_error = (time_so_far / (errors+1)) #+1 to avoid devide by 0 errors 138 | estim_minutes_left = ((min_errors * time_per_error) - time_so_far) / 60 139 | 140 | print('EbN0_dB=', EbN0_dB, ', ' 141 | 'errors=', errors, 142 | ' elapsed time this run=', time_so_far, 143 | ' BER_estimate=','{:.2e}'.format( (errors / (transi.R_c * transmitted_blocks * N_var))), 144 | ' datarate_Bps =', '{:.2e}'.format( (transi.R_c *transmitted_blocks * N_var) / time_so_far), 145 | ' estim_minutes_left=',estim_minutes_left) 146 | 147 | 148 | 149 | end = time.time() 150 | 151 | BER_vector[-1] = errors / (transi.R_c *transmitted_blocks * N_var) 152 | spent = end-start 153 | datarate_Bps = (transi.R_c * transmitted_blocks * N_var) / spent 154 | print(EbN0_dB_vector[-1], '{:.2e}'.format(BER_vector[-1]), ' Bitrate:','{:.2e}'.format(datarate_Bps) ) 155 | np.savez(os.path.join(pathname, 'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 156 | 157 | plt.semilogy(EbN0_dB_vector, BER_vector) 158 | plt.xlabel('Eb/N0') 159 | plt.ylabel('Bit Error Rate ') 160 | plt.grid(True) 161 | 162 | plt.savefig(os.path.join(pathname, 'BER_figure'+str(EbN0_dB_vector[-1])+'.pdf')) 163 | 164 | 165 | if (BER_vector[-1] > target_error_rate) and (EbN0_dB < EbN0_dB_max_value): 166 | if BER_vector[-1] < BER_go_on_in_smaller_steps: 167 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_small_stepwidth) 168 | else: 169 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_normal_stepwidth) 170 | 171 | BER_vector = np.append(BER_vector, 0) 172 | else: 173 | ready = True 174 | 175 | 176 | 177 | #Plot 178 | plt.figure() 179 | plt.semilogy(EbN0_dB_vector,BER_vector) 180 | plt.xlabel('Eb/N0') 181 | plt.ylabel('Bit Error Rate ') 182 | plt.grid(True) 183 | plt.show() 184 | 185 | #file = open(os.path.join(pathname,'BER_results.dat'),'w') 186 | #file.write(np.array2string(BER_vector)+'\n') 187 | #file.write(np.array2string(EbN0_dB_vector)) 188 | #file.close() 189 | np.savez(os.path.join(pathname,'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 190 | 191 | plt.savefig(os.path.join(pathname,'BER_figure.pgf')) 192 | plt.savefig(os.path.join(pathname,'BER_figure.pdf')) 193 | plt.savefig(os.path.join(pathname, 'BER_figure.png')) 194 | 195 | res_dict = {"EbN0_dB_vector" : EbN0_dB_vector, "BER_vector":BER_vector 196 | , "decoder_name":decoder_name} 197 | 198 | sio.savemat(os.path.join(pathname,'BER_results.mat'),res_dict) 199 | -------------------------------------------------------------------------------- /Irregular_LDPC_Decoding/DVB-S2/BER_simulation_OpenCL_min_sum.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) 5 | import numpy as np 6 | from AWGN_Channel_Transmission.AWGN_Quantizer_BPSK import AWGN_Channel_Quantizer 7 | from AWGN_Channel_Transmission.LDPC_Transmitter import LDPC_BPSK_Transmitter as transmitter 8 | from Continous_LDPC_Decoding.min_sum_decoder_irreg import Min_Sum_Decoder_class_irregular as LDPC_Decoder 9 | import os.path 10 | import matplotlib as mpl 11 | import time 12 | import scipy.io as sio 13 | 14 | mpl.use("pgf") 15 | pgf_with_pdflatex = { 16 | "pgf.texsystem": "pdflatex", 17 | "text.usetex": True, 18 | "font.family": "serif" 19 | } 20 | mpl.rcParams.update(pgf_with_pdflatex) 21 | import matplotlib.pyplot as plt 22 | 23 | 24 | __author__ = "Maximilian Stark" 25 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 26 | __credits__ = ["Maximilian Stark"] 27 | __version__ = "1.0" 28 | __email__ = "maximilian.stark@tuhh.de" 29 | __status__ = "Production" 30 | __name__ = "Simulation Env" 31 | __doc__ = """This script sets up a proper simulation environment to analyse a min-sum decoder. 32 | The BER performance of the chosen decoder can be stored and compared.""" 33 | 34 | # Choose the correct context 35 | #os.environ['PYOPENCL_CTX'] = '1' # CPU 36 | os.environ['PYOPENCL_CTX'] = '0' # GraKA 37 | os.environ['PYOPENCL_COMPILER_OUTPUT']='1' 38 | np.seterr(all='raise') 39 | 40 | # Load stored data 41 | filepath = os.path.join(os.path.dirname(__file__),'../../LDPC_codes/irregular_codes/DVB_S2_0.5.npz') 42 | 43 | 44 | card_vec = np.array([16,32]) 45 | 46 | # Human choice 47 | AD_max_abs = 3 48 | cardinality_Y_channel = 2000 49 | cardinality_T_channel = card_vec[sys.argv[1]] 50 | cardinality_T_decoder_ops = card_vec[sys.argv[1]] 51 | msg_at_time = 2 52 | min_errors = 7000 53 | 54 | decoder_name = 'minsum_'+str(cardinality_T_decoder_ops) 55 | 56 | print("###############") 57 | print(decoder_name) 58 | print("###############") 59 | 60 | 61 | timestr = time.strftime("%y%m%d-%H%M") 62 | 63 | filename = os.path.splitext(decoder_name)[0].replace('.','').replace('_config','')+'_'+timestr 64 | pathname = os.path.join(os.path.dirname(__file__),'BER_Results', filename.replace('.', '')) 65 | os.makedirs(os.path.dirname(os.path.join(pathname,' ')), exist_ok=True) 66 | 67 | 68 | 69 | 70 | imax = 50 71 | 72 | 73 | #sets the start EbN0_dB value 74 | EbN0_dB_max_value = 2.5 75 | 76 | #sets the start EbN0_dB value 77 | EbN0_dB_max_value = 1.2 78 | 79 | #simulation runs until this BER is achieved 80 | target_error_rate=1e-6 81 | BER_go_on_in_smaller_steps=1e-5 82 | 83 | #in steps of size.. 84 | EbN0_dB_normal_stepwidth=0.1 85 | EbN0_dB_small_stepwidth=0.1 86 | 87 | # start EbN0 simulation 88 | EbN0_dB = 0 89 | EbN0_dB_ind = 0 90 | BER_vector = np.array([0.]) 91 | EbN0_dB_vector = np.array([EbN0_dB]) 92 | ready = False 93 | NR_BLOCKS_PER_CONTROL_MSG = 100 94 | 95 | transi = transmitter(filepath, msg_at_time) 96 | decodi = LDPC_Decoder(filepath, imax, cardinality_T_channel, msg_at_time) 97 | 98 | N_var = transi.codeword_len 99 | while not ready: 100 | EbN0_dB_ind += EbN0_dB_ind 101 | EbN0_dB = EbN0_dB_vector[-1] 102 | 103 | sigma_n2 = 10**(-EbN0_dB/10) / (2*transi.R_c) 104 | 105 | quanti = AWGN_Channel_Quantizer(sigma_n2, AD_max_abs, cardinality_T_channel, cardinality_Y_channel) 106 | quanti.init_OpenCL_quanti(N_var,msg_at_time,return_buffer_only=True) 107 | decodi.init_OpenCL_decoding(msg_at_time,quanti.context) 108 | 109 | 110 | errors = 0 111 | transmitted_blocks = 0 112 | # transmit 113 | start = time.time() 114 | while errors < min_errors: 115 | 116 | 117 | rec_data_quantized = quanti.quantize_direct_OpenCL_LLR(N_var, msg_at_time) 118 | decoded_mat = decodi.decode_OpenCL_min_sum(rec_data_quantized,buffer_in=True,return_buffer=True) 119 | 120 | errors += decodi.return_errors_all_zero(decoded_mat) 121 | transmitted_blocks += + msg_at_time 122 | 123 | 124 | 125 | if np.mod(transmitted_blocks, NR_BLOCKS_PER_CONTROL_MSG) == 0: 126 | time_so_far = time.time()-start 127 | time_per_error = (time_so_far / (errors+1)) #+1 to avoid devide by 0 errors 128 | estim_minutes_left = ((min_errors * time_per_error) - time_so_far) / 60 129 | 130 | print('EbN0_dB=', EbN0_dB, ', ' 131 | 'errors=', errors, 132 | ' elapsed time this run=', time_so_far, 133 | ' BER_estimate=','{:.2e}'.format( (errors / (transi.R_c*transmitted_blocks * N_var))), 134 | ' datarate_Bps =', '{:.2e}'.format( (transi.R_c*transmitted_blocks * N_var) / time_so_far), 135 | ' estim_minutes_left=',estim_minutes_left) 136 | 137 | 138 | end = time.time() 139 | 140 | BER_vector[-1] = errors / (transi.R_c*transmitted_blocks * N_var) 141 | spent = end-start 142 | datarate_Bps = (transi.R_c*transmitted_blocks * N_var) / spent 143 | print(EbN0_dB_vector[-1], '{:.2e}'.format(BER_vector[-1]), ' Bitrate:','{:.2e}'.format(datarate_Bps) ) 144 | np.savez(os.path.join(pathname, 'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 145 | 146 | 147 | if (BER_vector[-1] > target_error_rate) and (EbN0_dB < EbN0_dB_max_value): 148 | if BER_vector[-1] < BER_go_on_in_smaller_steps: 149 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_small_stepwidth) 150 | else: 151 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_normal_stepwidth) 152 | 153 | BER_vector = np.append(BER_vector, 0) 154 | else: 155 | ready = True 156 | 157 | 158 | 159 | #Plot 160 | plt.figure() 161 | plt.semilogy(EbN0_dB_vector,BER_vector) 162 | plt.xlabel('Eb/N0') 163 | plt.ylabel('Bit Error Rate ') 164 | plt.grid(True) 165 | plt.show() 166 | 167 | 168 | np.savez(os.path.join(pathname,'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 169 | 170 | plt.savefig(os.path.join(pathname,'BER_figure.pgf')) 171 | plt.savefig(os.path.join(pathname,'BER_figure.pdf')) 172 | 173 | res_dict = {"EbN0_dB_vector" : EbN0_dB_vector, "BER_vector":BER_vector 174 | , "decoder_name":decoder_name} 175 | 176 | sio.savemat(os.path.join(pathname,'BER_results.mat'),res_dict) 177 | -------------------------------------------------------------------------------- /Irregular_LDPC_Decoding/DVB-S2/decoder_config_generation.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | from AWGN_Channel_Transmission.AWGN_Discrete_Density_Evolution import \ 4 | AWGN_Discrete_Density_Evolution_class_irregular as DDE_irregular 5 | from Discrete_LDPC_decoding.Information_Matching import * 6 | 7 | __author__ = "Maximilian Stark" 8 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 9 | __credits__ = ["Maximilian Stark"] 10 | __version__ = "1.0" 11 | __email__ = "maximilian.stark@tuhh.de" 12 | __status__ = "Production" 13 | __name__ = "Decoder Generation" 14 | __doc__ = """This script generates a discrete decoder for the desired design-Eb/N0.""" 15 | 16 | 17 | 18 | # set noise level for DE 19 | EbN0_dB_mapping_gen = 0.7 20 | for EbN0_dB_mapping_gen in np.array([0.6,0.7,0.8,0.9,1.0]): 21 | # set quantizer limits 22 | AD_Max_abs = 3 23 | plt.figure() 24 | 25 | cardinality_Y_channel = 2000 26 | cardinality_T_channel = 16 27 | cardinality_T_decoder_ops = 16 28 | i_max = 50 29 | nror = 10 30 | 31 | # 1 2 3 4 5 6 7 32 | d_c_dist = np.array([0,0,0,0,0,1,32399]) / 32400 33 | # 1 2 3 4 5 6 7 8 34 | d_v_dist = np.array([1,32399,19440,0,0,0,0,12960])/64800 35 | 36 | 37 | lambda_vec = convert_node_to_edge_degree(d_v_dist) 38 | rho_vec = convert_node_to_edge_degree(d_c_dist) 39 | 40 | #R_c = 1-d_v/d_c # code rate 41 | R_c = 1 - (d_v_dist*(np.arange(d_v_dist.shape[0])+1)).sum() / (d_c_dist*(np.arange(d_c_dist.shape[0])+1)).sum() # code rate 42 | 43 | sigma_n2 = 10**(-EbN0_dB_mapping_gen/10) / (2*R_c) 44 | steps = 5 45 | 46 | config = 'cas' 47 | # generate decoder config 48 | DDE_inst = DDE_irregular(sigma_n2, AD_Max_abs, cardinality_Y_channel, cardinality_T_channel, 49 | cardinality_T_decoder_ops, lambda_vec, rho_vec, i_max, nror , match = True) 50 | 51 | DDE_inst.run_discrete_density_evolution() 52 | DDE_inst.save_config(config) 53 | plt.plot(DDE_inst.DDE_inst_data['MI_T_dvm1_v_X_dvm1_v'],label='match') 54 | 55 | 56 | # DDE_inst = DDE_irregular(sigma_n2, AD_Max_abs, cardinality_Y_channel, cardinality_T_channel, 57 | # cardinality_T_decoder_ops, lambda_vec, rho_vec, i_max, nror , match = False) 58 | # 59 | # DDE_inst.run_discrete_density_evolution() 60 | # DDE_inst.save_config('adapt_no_match') 61 | # plt.plot(DDE_inst.DDE_inst_data['MI_T_dvm1_v_X_dvm1_v'],label='no match') 62 | # plt.legend(loc=4) 63 | -------------------------------------------------------------------------------- /Irregular_LDPC_Decoding/WLAN/BER_simulation_OpenCL.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import pickle 3 | import time 4 | 5 | import matplotlib as mpl 6 | import numpy as np 7 | import scipy.io as sio 8 | 9 | from AWGN_Channel_Transmission.AWGN_Quantizer_BPSK import AWGN_Channel_Quantizer 10 | from AWGN_Channel_Transmission.LDPC_Transmitter import LDPC_BPSK_Transmitter as transmitter 11 | from Discrete_LDPC_decoding.discrete_LDPC_decoder_irreg import Discrete_LDPC_Decoder_class_irregular as LDPC_Decoder 12 | 13 | mpl.use("pgf") 14 | pgf_with_pdflatex = { 15 | "pgf.texsystem": "pdflatex", 16 | "text.usetex": True, 17 | "font.family": "serif" 18 | } 19 | mpl.rcParams.update(pgf_with_pdflatex) 20 | import matplotlib.pyplot as plt 21 | 22 | __author__ = "Maximilian Stark" 23 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 24 | __credits__ = ["Maximilian Stark"] 25 | __version__ = "1.0" 26 | __email__ = "maximilian.stark@tuhh.de" 27 | __status__ = "Production" 28 | __name__ = "Simulation Env" 29 | __doc__ = """This script sets up a proper simulation environment to analyse a purely discrete decoder that works only 30 | on integers. The BER performance of the chosen decoder can be stored and compared.""" 31 | 32 | 33 | # Chose the right context 34 | #os.environ['PYOPENCL_CTX'] = '1' # CPU 35 | #os.environ['PYOPENCL_CTX'] = '0' # GraKA 36 | #os.environ['PYOPENCL_COMPILER_OUTPUT']='1' 37 | np.seterr(all='raise') 38 | 39 | # Load stored data 40 | filepath ="../../LDPC_codes/irregular_codes/WLAN_H.npy" 41 | 42 | #decoder_name = 'decoder_config_EbN0_gen_0.7_16no_match.pkl' 43 | match='true' #'############## ACHTUNG ######################' 44 | 45 | # adapt1 - no negatives 46 | # adapt2 - match against pdf with highest mena(abs(LLR)) 47 | # adapt2a - match against pdf with highest max(abs(LLR)) 48 | # adapt3 - p(z1|t0) 49 | # adapt4 - p(z1) 50 | # adapt5 - allow negatives and p(z1) 51 | 52 | for val in ['0.9_16adapt71']: 53 | #decoder_name = 'decoder_config_EbN0_gen_'+str(val)+'_16.pkl' 54 | decoder_name = 'decoder_config_EbN0_gen_'+val+'.pkl' 55 | pkl_file = open(decoder_name, 'rb') 56 | generated_decoder = pickle.load(pkl_file) 57 | 58 | 59 | timestr = time.strftime("%y%m%d-%H%M") 60 | 61 | filename = os.path.splitext(decoder_name)[0].replace('.','').replace('_config','')+'_'+timestr 62 | pathname = os.path.join('BER_Results', filename.replace('.', '')) 63 | os.makedirs(os.path.dirname(os.path.join(pathname,' ')), exist_ok=True) 64 | 65 | 66 | Trellis_checknodevector_a = generated_decoder['Trellis_checknodevector_a'] 67 | Trellis_varnodevector_a = generated_decoder['Trellis_varnodevector_a'] 68 | matching_vector_checknode =generated_decoder['matching_vector_checknode'] 69 | matching_vector_varnode = generated_decoder['matching_vector_varnode'] 70 | # Encode Codeword 71 | 72 | # Human choice 73 | AD_max_abs = 3 74 | cardinality_Y_channel = 2000 75 | cardinality_T_channel = generated_decoder['cardinality_T_decoder_ops'] 76 | cardinality_T_decoder_ops = generated_decoder['cardinality_T_decoder_ops'] 77 | msg_at_time = 2000 78 | min_errors = 7000 79 | 80 | imax = generated_decoder['imax'] 81 | N_var = 1296 82 | 83 | #sets the start EbN0_dB value 84 | EbN0_dB_max_value = 2.5 85 | 86 | #simulation runs until this BER is achieved 87 | target_error_rate=1e-6 88 | BER_go_on_in_smaller_steps=1e-6 89 | 90 | #in steps of size.. 91 | EbN0_dB_normal_stepwidth=0.1 92 | EbN0_dB_small_stepwidth=0.1 93 | 94 | # start EbN0 simulation 95 | EbN0_dB = 0 96 | EbN0_dB_ind = 0 97 | BER_vector = np.array([0.]) 98 | EbN0_dB_vector = np.array([EbN0_dB]) 99 | ready = False 100 | NR_BLOCKS_PER_CONTROL_MSG = 100 101 | 102 | transi = transmitter(filepath, msg_at_time) 103 | decodi = LDPC_Decoder(filepath, imax, cardinality_T_channel, cardinality_T_decoder_ops, Trellis_checknodevector_a, 104 | Trellis_varnodevector_a,matching_vector_checknode, matching_vector_varnode, msg_at_time,match=match) 105 | 106 | 107 | while not ready: 108 | EbN0_dB_ind += EbN0_dB_ind 109 | EbN0_dB = EbN0_dB_vector[-1] 110 | 111 | sigma_n2 = 10**(-EbN0_dB/10) / (2*transi.R_c) 112 | 113 | #chani = AWGN_channel(sigma_n2) 114 | 115 | quanti = AWGN_Channel_Quantizer(sigma_n2, AD_max_abs, cardinality_T_channel, cardinality_Y_channel) 116 | quanti.init_OpenCL_quanti(N_var,msg_at_time,return_buffer_only=True) 117 | decodi.init_OpenCL_decoding(msg_at_time,quanti.context) 118 | 119 | 120 | errors = 0 121 | transmitted_blocks = 0 122 | # transmit 123 | start = time.time() 124 | while errors < min_errors: 125 | 126 | 127 | rec_data_quantized = quanti.quantize_direct_OpenCL(N_var, msg_at_time) 128 | decoded_mat = decodi.decode_OpenCL(rec_data_quantized,buffer_in=True,return_buffer=True) 129 | 130 | errors += decodi.return_errors_all_zero(decoded_mat) 131 | transmitted_blocks += + msg_at_time 132 | 133 | 134 | # last_transmitted_bits = np.zeros((N_var, msg_at_time)) 135 | # rec_data_quantized = quanti.quantize_direct_OpenCL(last_transmitted_bits,msg_at_time) 136 | 137 | # decoded_mat = decodi.decode_OpenCL(rec_data_quantized, buffer_in=True, return_buffer=True) 138 | # errors += decodi.return_errors_all_zero(decoded_mat) 139 | 140 | # transmitted_blocks += + msg_at_time 141 | 142 | if np.mod(transmitted_blocks, NR_BLOCKS_PER_CONTROL_MSG) == 0: 143 | time_so_far = time.time() - start 144 | time_per_error = (time_so_far / (errors + 1)) # +1 to avoid devide by 0 errors 145 | estim_minutes_left = ((min_errors * time_per_error) - time_so_far) / 60 146 | 147 | print('EbN0_dB=', EbN0_dB, ', ' 148 | 'errors=', errors, 149 | ' elapsed time this run=', time_so_far, 150 | ' BER_estimate=', '{:.2e}'.format((errors / (transi.R_c * transmitted_blocks * N_var))), 151 | ' datarate_Bps =', '{:.2e}'.format((transi.R_c * transmitted_blocks * N_var) / time_so_far), 152 | ' estim_minutes_left=', estim_minutes_left) 153 | 154 | 155 | 156 | end = time.time() 157 | 158 | BER_vector[-1] = errors / (transi.R_c * transmitted_blocks * N_var) 159 | spent = end - start 160 | datarate_Bps = (transi.R_c * transmitted_blocks * N_var) / spent 161 | print(EbN0_dB_vector[-1], '{:.2e}'.format(BER_vector[-1]), ' Bitrate:','{:.2e}'.format(datarate_Bps) ) 162 | np.savez(os.path.join(pathname, 'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 163 | if (BER_vector[-1] > target_error_rate) and (EbN0_dB < EbN0_dB_max_value): 164 | if BER_vector[-1] < BER_go_on_in_smaller_steps: 165 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_small_stepwidth) 166 | else: 167 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_normal_stepwidth) 168 | 169 | BER_vector = np.append(BER_vector, 0) 170 | else: 171 | ready = True 172 | 173 | 174 | 175 | #Plot 176 | plt.figure() 177 | plt.semilogy(EbN0_dB_vector,BER_vector) 178 | plt.xlabel('Eb/N0') 179 | plt.ylabel('Bit Error Rate ') 180 | plt.grid(True) 181 | plt.show() 182 | 183 | #file = open(os.path.join(pathname,'BER_results.dat'),'w') 184 | #file.write(np.array2string(BER_vector)+'\n') 185 | #file.write(np.array2string(EbN0_dB_vector)) 186 | #file.close() 187 | np.savez(os.path.join(pathname,'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 188 | 189 | plt.savefig(os.path.join(pathname,'BER_figure.pgf')) 190 | plt.savefig(os.path.join(pathname,'BER_figure.pdf')) 191 | 192 | res_dict = {"EbN0_dB_vector" : EbN0_dB_vector, "BER_vector":BER_vector 193 | , "decoder_name":decoder_name} 194 | 195 | sio.savemat(os.path.join(pathname,'BER_results.mat'),res_dict) 196 | push = pb.push_note("Simulation done!", filename) -------------------------------------------------------------------------------- /Irregular_LDPC_Decoding/WLAN/BER_simulation_OpenCL_enc.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import pickle 3 | import time 4 | 5 | import matplotlib as mpl 6 | import numpy as np 7 | import scipy.io as sio 8 | 9 | from AWGN_Channel_Transmission.AWGN_Quantizer_BPSK import AWGN_Channel_Quantizer 10 | from AWGN_Channel_Transmission.AWGN_channel import AWGN_channel 11 | from AWGN_Channel_Transmission.LDPC_Transmitter import LDPC_BPSK_Transmitter as transmitter 12 | from Discrete_LDPC_decoding.discrete_LDPC_decoder_irreg import Discrete_LDPC_Decoder_class_irregular as LDPC_Decoder 13 | 14 | mpl.use("pgf") 15 | pgf_with_pdflatex = { 16 | "pgf.texsystem": "pdflatex", 17 | "text.usetex": True, 18 | "font.family": "serif" 19 | } 20 | mpl.rcParams.update(pgf_with_pdflatex) 21 | import matplotlib.pyplot as plt 22 | 23 | __author__ = "Maximilian Stark" 24 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 25 | __credits__ = ["Maximilian Stark"] 26 | __version__ = "1.0" 27 | __email__ = "maximilian.stark@tuhh.de" 28 | __status__ = "Production" 29 | __name__ = "Simulation Env" 30 | __doc__ = """This script sets up a proper simulation environment to analyse a purely discrete decoder that works only 31 | on integers. The BER performance of the chosen decoder can be stored and compared.""" 32 | 33 | 34 | # Choose the correct context 35 | #os.environ['PYOPENCL_CTX'] = '1' # CPU 36 | os.environ['PYOPENCL_CTX'] = '0' # GraKA 37 | #os.environ['PYOPENCL_COMPILER_OUTPUT']='1' 38 | np.seterr(all='raise') 39 | 40 | # Load stored data 41 | filepath ="../../LDPC_codes/irregular_codes/WLAN_H.npy" 42 | 43 | #decoder_name = 'decoder_config_EbN0_gen_0.7_16no_match.pkl' 44 | match='true' #'############## ACHTUNG ######################' 45 | 46 | 47 | for val in ['0.7_16cas','0.8_16cas']: 48 | #decoder_name = 'decoder_config_EbN0_gen_'+str(val)+'_16.pkl' 49 | decoder_name = 'decoder_config_EbN0_gen_'+val+'.pkl' 50 | pkl_file = open(decoder_name, 'rb') 51 | generated_decoder = pickle.load(pkl_file) 52 | 53 | 54 | timestr = time.strftime("%y%m%d-%H%M") 55 | 56 | filename = os.path.splitext(decoder_name)[0].replace('.','').replace('_config','')+'_'+timestr 57 | pathname = os.path.join('BER_Results', filename.replace('.', '')) 58 | os.makedirs(os.path.dirname(os.path.join(pathname,' ')), exist_ok=True) 59 | 60 | 61 | Trellis_checknodevector_a = generated_decoder['Trellis_checknodevector_a'] 62 | Trellis_varnodevector_a = generated_decoder['Trellis_varnodevector_a'] 63 | matching_vector_checknode =generated_decoder['matching_vector_checknode'] 64 | matching_vector_varnode = generated_decoder['matching_vector_varnode'] 65 | # Encode Codeword 66 | 67 | # Human choice 68 | AD_max_abs = 3 69 | cardinality_Y_channel = 2000 70 | cardinality_T_channel = generated_decoder['cardinality_T_decoder_ops'] 71 | cardinality_T_decoder_ops = generated_decoder['cardinality_T_decoder_ops'] 72 | msg_at_time = 200 73 | min_errors = 7000 74 | 75 | imax = generated_decoder['imax'] 76 | N_var = 1296 77 | 78 | #sets the start EbN0_dB value 79 | EbN0_dB_max_value = 2.5 80 | 81 | #simulation runs until this BER is achieved 82 | target_error_rate=1e-6 83 | BER_go_on_in_smaller_steps=1e-6 84 | 85 | #in steps of size.. 86 | EbN0_dB_normal_stepwidth=0.1 87 | EbN0_dB_small_stepwidth=0.1 88 | 89 | # start EbN0 simulation 90 | EbN0_dB = 0 91 | EbN0_dB_ind = 0 92 | BER_vector = np.array([0.]) 93 | EbN0_dB_vector = np.array([EbN0_dB]) 94 | ready = False 95 | NR_BLOCKS_PER_CONTROL_MSG = 100 96 | 97 | 98 | transi = transmitter(filepath, msg_at_time) 99 | decodi = LDPC_Decoder(filepath, imax, cardinality_T_channel, cardinality_T_decoder_ops, Trellis_checknodevector_a, 100 | Trellis_varnodevector_a,matching_vector_checknode, matching_vector_varnode, msg_at_time,match=match) 101 | 102 | 103 | while not ready: 104 | EbN0_dB_ind += EbN0_dB_ind 105 | EbN0_dB = EbN0_dB_vector[-1] 106 | 107 | sigma_n2 = 10**(-EbN0_dB/10) / (2*transi.R_c) 108 | 109 | chani = AWGN_channel(sigma_n2) 110 | 111 | quanti = AWGN_Channel_Quantizer(sigma_n2, AD_max_abs, cardinality_T_channel, cardinality_Y_channel) 112 | quanti.init_OpenCL_quanti(N_var,msg_at_time,return_buffer_only=True) 113 | decodi.init_OpenCL_decoding(msg_at_time,quanti.context) 114 | 115 | 116 | errors = 0 117 | transmitted_blocks = 0 118 | # transmit 119 | start = time.time() 120 | while errors < min_errors: 121 | 122 | send_message = transi.transmit() 123 | 124 | # receive data 125 | rec_data = chani.transmission(send_message) 126 | # quantize received data 127 | rec_data_quantized = quanti.quantize_on_host(rec_data) 128 | 129 | 130 | # rec_data_quantized = quanti.quantize_direct_OpenCL(N_var, msg_at_time) 131 | 132 | decoded_mat = decodi.decode_OpenCL(rec_data_quantized,buffer_in=False,return_buffer=False) 133 | 134 | errors += ((decoded_mat[:transi.data_len] < cardinality_T_decoder_ops / 2) != transi.last_transmitted_bits).sum() 135 | transmitted_blocks += + msg_at_time 136 | 137 | # errors += decodi.return_errors_all_zero(decoded_mat) 138 | # transmitted_blocks += + msg_at_time 139 | 140 | 141 | if np.mod(transmitted_blocks, NR_BLOCKS_PER_CONTROL_MSG) == 0: 142 | time_so_far = time.time()-start 143 | time_per_error = (time_so_far / (errors+1)) #+1 to avoid devide by 0 errors 144 | estim_minutes_left = ((min_errors * time_per_error) - time_so_far) / 60 145 | 146 | print('EbN0_dB=', EbN0_dB, ', ' 147 | 'errors=', errors, 148 | ' elapsed time this run=', time_so_far, 149 | ' BER_estimate=','{:.2e}'.format( (errors / (transi.R_c * transmitted_blocks * N_var))), 150 | ' datarate_Bps =', '{:.2e}'.format( (transi.R_c *transmitted_blocks * N_var) / time_so_far), 151 | ' estim_minutes_left=',estim_minutes_left) 152 | 153 | 154 | 155 | end = time.time() 156 | 157 | BER_vector[-1] = errors / (transi.R_c *transmitted_blocks * N_var) 158 | spent = end-start 159 | datarate_Bps = (transi.R_c * transmitted_blocks * N_var) / spent 160 | print(EbN0_dB_vector[-1], '{:.2e}'.format(BER_vector[-1]), ' Bitrate:','{:.2e}'.format(datarate_Bps) ) 161 | np.savez(os.path.join(pathname, 'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 162 | 163 | plt.semilogy(EbN0_dB_vector, BER_vector) 164 | plt.xlabel('Eb/N0') 165 | plt.ylabel('Bit Error Rate ') 166 | plt.grid(True) 167 | 168 | plt.savefig(os.path.join(pathname, 'BER_figure'+str(EbN0_dB_vector[-1])+'.pdf')) 169 | 170 | 171 | if (BER_vector[-1] > target_error_rate) and (EbN0_dB < EbN0_dB_max_value): 172 | if BER_vector[-1] < BER_go_on_in_smaller_steps: 173 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_small_stepwidth) 174 | else: 175 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_normal_stepwidth) 176 | 177 | BER_vector = np.append(BER_vector, 0) 178 | else: 179 | ready = True 180 | 181 | 182 | 183 | #Plot 184 | plt.figure() 185 | plt.semilogy(EbN0_dB_vector,BER_vector) 186 | plt.xlabel('Eb/N0') 187 | plt.ylabel('Bit Error Rate ') 188 | plt.grid(True) 189 | plt.show() 190 | 191 | #file = open(os.path.join(pathname,'BER_results.dat'),'w') 192 | #file.write(np.array2string(BER_vector)+'\n') 193 | #file.write(np.array2string(EbN0_dB_vector)) 194 | #file.close() 195 | np.savez(os.path.join(pathname,'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 196 | 197 | plt.savefig(os.path.join(pathname,'BER_figure.pgf')) 198 | plt.savefig(os.path.join(pathname,'BER_figure.pdf')) 199 | plt.savefig(os.path.join(pathname, 'BER_figure.png')) 200 | 201 | res_dict = {"EbN0_dB_vector" : EbN0_dB_vector, "BER_vector":BER_vector 202 | , "decoder_name":decoder_name} 203 | 204 | sio.savemat(os.path.join(pathname,'BER_results.mat'),res_dict) 205 | -------------------------------------------------------------------------------- /Irregular_LDPC_Decoding/WLAN/BER_simulation_OpenCL_min_sum.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import time 3 | 4 | import matplotlib as mpl 5 | import numpy as np 6 | import scipy.io as sio 7 | 8 | from AWGN_Channel_Transmission.AWGN_Quantizer_BPSK import AWGN_Channel_Quantizer 9 | from AWGN_Channel_Transmission.LDPC_Transmitter import LDPC_BPSK_Transmitter as transmitter 10 | from Continous_LDPC_Decoding.min_sum_decoder_irreg import Min_Sum_Decoder_class_irregular as LDPC_Decoder 11 | 12 | mpl.use("pgf") 13 | pgf_with_pdflatex = { 14 | "pgf.texsystem": "pdflatex", 15 | "text.usetex": True, 16 | "font.family": "serif" 17 | } 18 | mpl.rcParams.update(pgf_with_pdflatex) 19 | import matplotlib.pyplot as plt 20 | 21 | __author__ = "Maximilian Stark" 22 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 23 | __credits__ = ["Maximilian Stark"] 24 | __version__ = "1.0" 25 | __email__ = "maximilian.stark@tuhh.de" 26 | __status__ = "Production" 27 | __name__ = "Simulation Env" 28 | __doc__ = """This script sets up a proper simulation environment to analyse a min-sum decoder. 29 | The BER performance of the chosen decoder can be stored and compared.""" 30 | 31 | # Choose the correct context 32 | #os.environ['PYOPENCL_CTX'] = '1' # CPU 33 | os.environ['PYOPENCL_CTX'] = '0' # GraKA 34 | os.environ['PYOPENCL_COMPILER_OUTPUT']='1' 35 | np.seterr(all='raise') 36 | 37 | # Load stored data 38 | filepath ="../../LDPC_codes/irregular_codes/WLAN_H.npy" 39 | 40 | 41 | # Human choice 42 | AD_max_abs = 3 43 | cardinality_Y_channel = 2000 44 | cardinality_T_channel = 16 45 | cardinality_T_decoder_ops = 16 46 | msg_at_time = 2 47 | min_errors = 7000 48 | 49 | decoder_name = 'minsum_'+str(cardinality_T_decoder_ops) 50 | 51 | timestr = time.strftime("%y%m%d-%H%M") 52 | 53 | filename = os.path.splitext(decoder_name)[0].replace('.','').replace('_config','')+'_'+timestr 54 | pathname = os.path.join('BER_Results', filename.replace('.', '')) 55 | os.makedirs(os.path.dirname(os.path.join(pathname,' ')), exist_ok=True) 56 | 57 | 58 | 59 | 60 | imax = 50 61 | N_var = 1296 62 | 63 | #sets the start EbN0_dB value 64 | EbN0_dB_max_value = 2.5 65 | 66 | #simulation runs until this BER is achieved 67 | target_error_rate=1e-6 68 | BER_go_on_in_smaller_steps=1e-6 69 | 70 | #in steps of size.. 71 | EbN0_dB_normal_stepwidth=0.1 72 | EbN0_dB_small_stepwidth=0.1 73 | 74 | # start EbN0 simulation 75 | EbN0_dB = 0 76 | EbN0_dB_ind = 0 77 | BER_vector = np.array([0.]) 78 | EbN0_dB_vector = np.array([EbN0_dB]) 79 | ready = False 80 | NR_BLOCKS_PER_CONTROL_MSG = 100 81 | 82 | transi = transmitter(filepath, msg_at_time) 83 | decodi = LDPC_Decoder(filepath, imax, cardinality_T_channel, msg_at_time) 84 | 85 | 86 | while not ready: 87 | EbN0_dB_ind += EbN0_dB_ind 88 | EbN0_dB = EbN0_dB_vector[-1] 89 | 90 | sigma_n2 = 10**(-EbN0_dB/10) / (2*transi.R_c) 91 | 92 | quanti = AWGN_Channel_Quantizer(sigma_n2, AD_max_abs, cardinality_T_channel, cardinality_Y_channel) 93 | quanti.init_OpenCL_quanti(N_var,msg_at_time,return_buffer_only=True) 94 | decodi.init_OpenCL_decoding(msg_at_time,quanti.context) 95 | 96 | 97 | errors = 0 98 | errors1 = 0 99 | 100 | transmitted_blocks = 0 101 | # transmit 102 | start = time.time() 103 | while errors < min_errors: 104 | 105 | 106 | rec_data_quantized = quanti.quantize_direct_OpenCL_LLR(N_var, msg_at_time) 107 | decoded_mat = decodi.decode_OpenCL_min_sum(rec_data_quantized,buffer_in=True,return_buffer=True) 108 | errors += decodi.return_errors_all_zero(decoded_mat) 109 | transmitted_blocks += + msg_at_time 110 | 111 | # decoded_mat = decodi.decode_on_host(rec_data_quantized.get()) 112 | # errors += np.sum(decoded_mat < 0) 113 | # transmitted_blocks += + msg_at_time 114 | 115 | 116 | # last_transmitted_bits = np.zeros((N_var, msg_at_time)) 117 | # rec_data_quantized = quanti.quantize_direct_OpenCL(last_transmitted_bits,msg_at_time) 118 | 119 | # decoded_mat = decodi.decode_OpenCL(rec_data_quantized, buffer_in=True, return_buffer=True) 120 | # errors += decodi.return_errors_all_zero(decoded_mat) 121 | 122 | # transmitted_blocks += + msg_at_time 123 | 124 | if np.mod(transmitted_blocks, NR_BLOCKS_PER_CONTROL_MSG) == 0: 125 | time_so_far = time.time()-start 126 | time_per_error = (time_so_far / (errors+1)) #+1 to avoid devide by 0 errors 127 | estim_minutes_left = ((min_errors * time_per_error) - time_so_far) / 60 128 | 129 | print('EbN0_dB=', EbN0_dB, ', ' 130 | 'errors=', errors, 131 | ' elapsed time this run=', time_so_far, 132 | ' BER_estimate=','{:.2e}'.format( (errors / (transi.R_c*transmitted_blocks * N_var))), 133 | ' datarate_Bps =', '{:.2e}'.format( (transi.R_c*transmitted_blocks * N_var) / time_so_far), 134 | ' estim_minutes_left=',estim_minutes_left) 135 | 136 | 137 | 138 | end = time.time() 139 | 140 | BER_vector[-1] = errors / (transi.R_c*transmitted_blocks * N_var) 141 | spent = end-start 142 | datarate_Bps = (transi.R_c*transmitted_blocks * N_var) / spent 143 | print(EbN0_dB_vector[-1], '{:.2e}'.format(BER_vector[-1]), ' Bitrate:','{:.2e}'.format(datarate_Bps) ) 144 | np.savez(os.path.join(pathname, 'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 145 | 146 | # ax = plt.gca() 147 | # ax.clear() 148 | # plt.semilogy(EbN0_dB_vector, BER_vector) 149 | # plt.xlabel('Eb/N0') 150 | # plt.ylabel('Bit Error Rate ') 151 | # plt.grid(True) 152 | # plt.pause(0.0001) 153 | 154 | if (BER_vector[-1] > target_error_rate) and (EbN0_dB < EbN0_dB_max_value): 155 | if BER_vector[-1] < BER_go_on_in_smaller_steps: 156 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_small_stepwidth) 157 | else: 158 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_normal_stepwidth) 159 | 160 | BER_vector = np.append(BER_vector, 0) 161 | else: 162 | ready = True 163 | 164 | 165 | 166 | #Plot 167 | plt.figure() 168 | plt.semilogy(EbN0_dB_vector,BER_vector) 169 | plt.xlabel('Eb/N0') 170 | plt.ylabel('Bit Error Rate ') 171 | plt.grid(True) 172 | plt.show() 173 | 174 | #file = open(os.path.join(pathname,'BER_results.dat'),'w') 175 | #file.write(np.array2string(BER_vector)+'\n') 176 | #file.write(np.array2string(EbN0_dB_vector)) 177 | #file.close() 178 | np.savez(os.path.join(pathname,'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 179 | 180 | plt.savefig(os.path.join(pathname,'BER_figure.pgf')) 181 | plt.savefig(os.path.join(pathname,'BER_figure.pdf')) 182 | 183 | res_dict = {"EbN0_dB_vector" : EbN0_dB_vector, "BER_vector":BER_vector 184 | , "decoder_name":decoder_name} 185 | 186 | sio.savemat(os.path.join(pathname,'BER_results.mat'),res_dict) 187 | -------------------------------------------------------------------------------- /Irregular_LDPC_Decoding/WLAN/BER_simulation_OpenCL_quant_BP.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import time 3 | 4 | import matplotlib as mpl 5 | import numpy as np 6 | import scipy.io as sio 7 | 8 | from AWGN_Channel_Transmission.AWGN_Quantizer_BPSK import AWGN_Channel_Quantizer 9 | from AWGN_Channel_Transmission.LDPC_Transmitter import LDPC_BPSK_Transmitter as transmitter 10 | from Continous_LDPC_Decoding.bp_decoder_irreg import BeliefPropagationDecoderClassIrregular as LDPC_Decoder 11 | 12 | mpl.use("pgf") 13 | pgf_with_pdflatex = { 14 | "pgf.texsystem": "pdflatex", 15 | "text.usetex": True, 16 | "font.family": "serif" 17 | } 18 | mpl.rcParams.update(pgf_with_pdflatex) 19 | import matplotlib.pyplot as plt 20 | 21 | __author__ = "Maximilian Stark" 22 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 23 | __credits__ = ["Maximilian Stark"] 24 | __version__ = "1.0" 25 | __email__ = "maximilian.stark@tuhh.de" 26 | __status__ = "Production" 27 | __name__ = "Simulation Env" 28 | __doc__ = """This script sets up a proper simulation environment to analyse a belief propagation decoder that receives 29 | only a limited number of distinct, quantized LLRs. The BER performance of the chosen decoder can be stored 30 | and compared.""" 31 | 32 | 33 | # Choose the correct context 34 | #os.environ['PYOPENCL_CTX'] = '1' # CPU 35 | os.environ['PYOPENCL_CTX'] = '0' # GraKA 36 | os.environ['PYOPENCL_COMPILER_OUTPUT']='1' 37 | np.seterr(all='raise') 38 | 39 | # Load stored data 40 | filepath ="../../LDPC_codes/irregular_codes/WLAN_H.npy" 41 | 42 | 43 | # Human choice 44 | AD_max_abs = 3 45 | cardinality_Y_channel = 2000 46 | cardinality_T_channel = 16 47 | cardinality_T_decoder_ops = 16 48 | msg_at_time = 100 49 | min_errors = 7000 50 | 51 | decoder_name = 'quant_bp'+str(cardinality_T_decoder_ops) 52 | 53 | timestr = time.strftime("%y%m%d-%H%M") 54 | 55 | filename = os.path.splitext(decoder_name)[0].replace('.','').replace('_config','')+'_'+timestr 56 | pathname = os.path.join('BER_Results', filename.replace('.', '')) 57 | os.makedirs(os.path.dirname(os.path.join(pathname,' ')), exist_ok=True) 58 | 59 | 60 | 61 | 62 | imax = 50 63 | N_var = 1296 64 | 65 | #sets the start EbN0_dB value 66 | EbN0_dB_max_value = 2.5 67 | 68 | #simulation runs until this BER is achieved 69 | target_error_rate=1e-6 70 | BER_go_on_in_smaller_steps=1e-6 71 | 72 | #in steps of size.. 73 | EbN0_dB_normal_stepwidth=0.1 74 | EbN0_dB_small_stepwidth=0.1 75 | 76 | # start EbN0 simulation 77 | EbN0_dB = 0 78 | EbN0_dB_ind = 0 79 | BER_vector = np.array([0.]) 80 | EbN0_dB_vector = np.array([EbN0_dB]) 81 | ready = False 82 | NR_BLOCKS_PER_CONTROL_MSG = 100 83 | 84 | transi = transmitter(filepath, msg_at_time) 85 | decodi = LDPC_Decoder(filepath, imax, cardinality_T_channel, msg_at_time) 86 | 87 | 88 | while not ready: 89 | EbN0_dB_ind += EbN0_dB_ind 90 | EbN0_dB = EbN0_dB_vector[-1] 91 | 92 | sigma_n2 = 10**(-EbN0_dB/10) / (2*transi.R_c) 93 | 94 | quanti = AWGN_Channel_Quantizer(sigma_n2, AD_max_abs, cardinality_T_channel, cardinality_Y_channel) 95 | quanti.init_OpenCL_quanti(N_var,msg_at_time,return_buffer_only=True) 96 | decodi.init_OpenCL_decoding(msg_at_time,quanti.context) 97 | 98 | 99 | errors = 0 100 | errors1 = 0 101 | 102 | transmitted_blocks = 0 103 | # transmit 104 | start = time.time() 105 | while errors < min_errors: 106 | 107 | 108 | rec_data_quantized = quanti.quantize_direct_OpenCL_LLR(N_var, msg_at_time) 109 | decoded_mat = decodi.decode_OpenCL_belief_propagation(rec_data_quantized,buffer_in=True,return_buffer=True) 110 | errors += decodi.return_errors_all_zero(decoded_mat) 111 | transmitted_blocks += + msg_at_time 112 | 113 | # decoded_mat = decodi.decode_on_host(rec_data_quantized.get()) 114 | # errors += np.sum(decoded_mat < 0) 115 | # transmitted_blocks += + msg_at_time 116 | 117 | 118 | # last_transmitted_bits = np.zeros((N_var, msg_at_time)) 119 | # rec_data_quantized = quanti.quantize_direct_OpenCL(last_transmitted_bits,msg_at_time) 120 | 121 | # decoded_mat = decodi.decode_OpenCL(rec_data_quantized, buffer_in=True, return_buffer=True) 122 | # errors += decodi.return_errors_all_zero(decoded_mat) 123 | 124 | # transmitted_blocks += + msg_at_time 125 | 126 | if np.mod(transmitted_blocks, NR_BLOCKS_PER_CONTROL_MSG) == 0: 127 | time_so_far = time.time()-start 128 | time_per_error = (time_so_far / (errors+1)) #+1 to avoid devide by 0 errors 129 | estim_minutes_left = ((min_errors * time_per_error) - time_so_far) / 60 130 | 131 | print('EbN0_dB=', EbN0_dB, ', ' 132 | 'errors=', errors, 133 | ' elapsed time this run=', time_so_far, 134 | ' BER_estimate=','{:.2e}'.format( (errors / (transi.R_c*transmitted_blocks * N_var))), 135 | ' datarate_Bps =', '{:.2e}'.format( (transi.R_c*transmitted_blocks * N_var) / time_so_far), 136 | ' estim_minutes_left=',estim_minutes_left) 137 | 138 | 139 | 140 | end = time.time() 141 | 142 | BER_vector[-1] = errors / (transi.R_c*transmitted_blocks * N_var) 143 | spent = end-start 144 | datarate_Bps = (transi.R_c*transmitted_blocks * N_var) / spent 145 | print(EbN0_dB_vector[-1], '{:.2e}'.format(BER_vector[-1]), ' Bitrate:','{:.2e}'.format(datarate_Bps) ) 146 | np.savez(os.path.join(pathname, 'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 147 | 148 | # ax = plt.gca() 149 | # ax.clear() 150 | # plt.semilogy(EbN0_dB_vector, BER_vector) 151 | # plt.xlabel('Eb/N0') 152 | # plt.ylabel('Bit Error Rate ') 153 | # plt.grid(True) 154 | # plt.pause(0.0001) 155 | 156 | if (BER_vector[-1] > target_error_rate) and (EbN0_dB < EbN0_dB_max_value): 157 | if BER_vector[-1] < BER_go_on_in_smaller_steps: 158 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_small_stepwidth) 159 | else: 160 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_normal_stepwidth) 161 | 162 | BER_vector = np.append(BER_vector, 0) 163 | else: 164 | ready = True 165 | 166 | 167 | 168 | #Plot 169 | plt.figure() 170 | plt.semilogy(EbN0_dB_vector,BER_vector) 171 | plt.xlabel('Eb/N0') 172 | plt.ylabel('Bit Error Rate ') 173 | plt.grid(True) 174 | plt.show() 175 | 176 | #file = open(os.path.join(pathname,'BER_results.dat'),'w') 177 | #file.write(np.array2string(BER_vector)+'\n') 178 | #file.write(np.array2string(EbN0_dB_vector)) 179 | #file.close() 180 | np.savez(os.path.join(pathname,'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 181 | 182 | plt.savefig(os.path.join(pathname,'BER_figure.pgf')) 183 | plt.savefig(os.path.join(pathname,'BER_figure.pdf')) 184 | 185 | res_dict = {"EbN0_dB_vector" : EbN0_dB_vector, "BER_vector":BER_vector 186 | , "decoder_name":decoder_name} 187 | 188 | sio.savemat(os.path.join(pathname,'BER_results.mat'),res_dict) 189 | #push = pb.push_note("Simulation done!", filename) -------------------------------------------------------------------------------- /Irregular_LDPC_Decoding/WLAN/decoder_config_generation.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | from AWGN_Channel_Transmission.AWGN_Discrete_Density_Evolution import \ 4 | AWGN_Discrete_Density_Evolution_class_irregular as DDE_irregular 5 | from Discrete_LDPC_decoding.Information_Matching import * 6 | 7 | __author__ = "Maximilian Stark" 8 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 9 | __credits__ = ["Maximilian Stark"] 10 | __version__ = "1.0" 11 | __email__ = "maximilian.stark@tuhh.de" 12 | __status__ = "Production" 13 | __name__ = "Decoder Generation" 14 | __doc__ = """This script generates a discrete decoder for the desired design-Eb/N0.""" 15 | 16 | # set noise level for DE 17 | EbN0_dB_mapping_gen = 0.7 18 | for EbN0_dB_mapping_gen in np.array([0.6]): 19 | 20 | # set quantizer limits 21 | AD_Max_abs = 3 22 | plt.figure() 23 | 24 | cardinality_Y_channel = 2000 25 | cardinality_T_channel = 32 26 | cardinality_T_decoder_ops = 32 27 | i_max = 50 28 | nror = 10 29 | 30 | # 1 2 3 4 5 6 7 8 31 | d_c_dist = np.array([0,0,0,0,0,0,10/12,2/12]) 32 | # 1 2 3 4 5 6 7 8 9 10 11 33 | d_v_dist = np.array([0,11/24,9/24,1/24,0,0,0,0,0 ,0, 3/24]) 34 | 35 | lambda_vec = convert_node_to_edge_degree(d_v_dist) 36 | rho_vec = convert_node_to_edge_degree(d_c_dist) 37 | R_c = 1 - (d_v_dist*(np.arange(d_v_dist.shape[0])+1)).sum() / (d_c_dist*(np.arange(d_c_dist.shape[0])+1)).sum() # code rate 38 | 39 | sigma_n2 = 10**(-EbN0_dB_mapping_gen/10) / (2*R_c) 40 | steps = 5 41 | 42 | config = 'cas' 43 | # generate decoder config 44 | DDE_inst = DDE_irregular(sigma_n2, AD_Max_abs, cardinality_Y_channel, cardinality_T_channel, 45 | cardinality_T_decoder_ops, lambda_vec, rho_vec, i_max, nror , match = True) 46 | 47 | DDE_inst.run_discrete_density_evolution() 48 | DDE_inst.save_config(config) 49 | plt.plot(DDE_inst.DDE_inst_data['MI_T_dvm1_v_X_dvm1_v'],label='match') 50 | 51 | 52 | plt.savefig(str(EbN0_dB_mapping_gen)+config+'.png') 53 | -------------------------------------------------------------------------------- /Irregular_LDPC_Decoding/WLAN/generate_802.11_matrix.py: -------------------------------------------------------------------------------- 1 | """This script generates a parity check matrix for irregular LDPC codes defined in the WLAN 802.11 2012 standart.""" 2 | 3 | import numpy as np 4 | import scipy.io as sio 5 | 6 | 7 | ind_matrix =np.array([[40, -1, -1, -1, 22, -1, 49, 23, 43, -1, -1, -1, 1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], 8 | [50, 1, -1, -1, 48, 35, -1, -1, 13, -1, 30, -1, -1, 0 , 0, -1, -1, -1, -1, -1, -1, -1, -1, -1], 9 | [39, 50, -1, -1, 4, -1, 2, -1, -1, -1, -1, 49, -1, -1, 0 , 0, -1, -1, -1, -1, -1, -1, -1, -1], 10 | [33, -1, -1, 38, 37, -1, -1, 4, 1, -1, -1, -1, -1, -1, -1, 0 , 0, -1, -1, -1, -1, -1, -1, -1], 11 | [45, -1, -1, -1, 0, 22, -1, -1, 20, 42, -1, -1,-1, -1, -1, -1, 0 , 0, -1, -1, -1, -1, -1, -1], 12 | [51, -1, -1, 48, 35, -1, -1, -1, 44, -1, 18, -1,-1, -1, -1, -1, -1, 0 , 0, -1, -1, -1, -1, -1], 13 | [47, 11, -1, -1, -1, 17, -1, -1, 51, -1, -1, -1, 0, -1, -1, -1, -1, -1, 0 , 0, -1, -1, -1, -1], 14 | [5, -1, 25, -1, 6, -1, 45, -1, 13, 40, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0 , 0, -1, -1, -1], 15 | [33, -1, -1, 34, 24, -1, -1, -1, 23, -1, -1, 46, -1, -1, -1, -1, -1, -1, -1, -1, 0 , 0, -1, -1], 16 | [1, -1, 27, -1, 1, -1, -1, -1, 38, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0 , 0, -1], 17 | [-1, 18, -1, -1, 23, -1, -1, 8, 0, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0 , 0], 18 | [49, -1, 17, -1, 30, -1, -1, -1, 34, -1, -1, 19, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0], 19 | ]) 20 | 21 | Z=54 22 | H= np.empty(Z*24)[np.newaxis,:] 23 | ys = np.array([], dtype=np.int64).reshape(0, 5) 24 | for i in range(ind_matrix.shape[0]): 25 | H_row = np.empty((Z, Z)) 26 | for j in range(ind_matrix.shape[1]): 27 | if ind_matrix[i,j]>=0: 28 | a= np.roll(np.eye(Z),ind_matrix[i,j],axis=1) 29 | else: 30 | a=np.zeros((Z,Z)) 31 | H_row=np.concatenate((H_row,a),axis=1) 32 | H=np.concatenate((H,H_row[:,-1296:]),axis=0) 33 | 34 | H= H[1:,:] 35 | print(np.unique(H.sum(1))) 36 | print((H.sum(1)==8).sum()) 37 | print(np.unique(H.sum(0))) 38 | 39 | dictionary={} 40 | dictionary['H_mat']=H 41 | sio.savemat('WLAN_H.mat',dictionary) 42 | 43 | np.save('WLAN_H', H) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Maximilian Stark, Institute of Communications, 4 | Hamburg University of Technology 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Decoding of LDPC Codes Using the Information Bottleneck Method in Python 2 | 3 | ### Introduction 4 | 5 | This source code is intended to reproduce the results published in [LB18,LBT+18] and [SLB18, Sta18]. In these works, it is explained how the information bottleneck method can be used to design decoders for regular and irregular LDPC codes. 6 | 7 | 8 | ### Requirements 9 | 10 | 1. Download and install Python 3.6 (we recommend Anaconda) 11 | 2. Download and install the [ib_base package](https://collaborating.tuhh.de/cip3725/ib_base) 12 | 2. Clone the git repository 13 | 4. Installation requires the following packages: 14 | * numpy 15 | * [IB_Base](https://collaborating.tuhh.de/cip3725/ib_base) 16 | * [PyOpenCl.2018](https://documen.tician.de/pyopencl/misc.html) 17 | * mako 18 | * [progressbar](https://pypi.org/project/progressbar2/) 19 | 20 | 21 | ### Documentation 22 | 23 | #### Generate a Decoder 24 | To construct a decoder, make use of the scripts `decoder_config_generation.py` in the different folders. Here, you can enter: 25 | - the maximum number of decoding iterations $i_{max}$ 26 | - the cardinality of the exchanged messages $|\mathcal{T}|$ 27 | - the the degree distributions $\mathbf{d}_c$ and $\mathbf{d}_v$ 28 | - the design-$E_b/N_0$ for which you want to construct the decoder 29 | 30 | #### Running a BER Simulation 31 | You can run either benchmark simulations for the belief-propagation decoding, belief-propagation decoding with an channel output quantizer or min-sum decoding. Please make sure that you have OpenCL set up correctly. 32 | 33 | ###### Benchmark simulations 34 | To run the benchmark simulations just use `BER_simulation_OpenCL_min_sum.py` or `BER_simulation_OpenCL_quant_BP.py` in the respective folders. 35 | 36 | ###### Information bottleneck decoding 37 | To run BER simulations just use `BER_simulation_OpenCL_enc.py` in the respective folders. The ending "enc" indicates, that the transmission chain uses the appropriate LDPC encoder, not the all-zeros codeword is transmitted. 38 | 39 | **Note:** Make sure that you have an IB decoder generated before running a simulation. 40 | 41 | 42 | A detailed documentation of all provided functions and a more complete test suite will be available soon. 43 | 44 | ### Citation 45 | 46 | The code is distributed under the MIT license. When using the code for your research please cite our work. 47 | 48 | ### References 49 | [LBT+18] J. Lewandowsky, G. Bauch, M. Tschauner, and P. Oppermann, “Design and Evaluation of Information Bottleneck LDPC Decoders for Software Defined Radios,” in Proc. 12th International Conference on Signal Processing and Communication Systems (ICSPCS), Australia, 2018. 50 | 51 | [SLB18] M. Stark, J. Lewandowsky, G. Bauch. "Information-Bottleneck Decoding of High-Rate Irregular LDPC Codes for Optical Communication using Message Alignment“. Applied Sciences. 2018; 8(10):1884. 52 | 53 | [SLB18a] M. Stark, J. Lewandowsky, and G. Bauch, “Information-Optimum LDPC Decoders with Message Alignment for Irregular Codes,” in 2018 IEEE Global Communications Conference (Globecom2018), Abu Dhabi, United Arab Emirates, 2018. 54 | 55 | [LB18] J. Lewandowsky and G. Bauch, “Information-Optimum LDPC Decoders Based on the Information Bottleneck Method,” IEEE Access, vol. 6, pp. 4054–4071, 2018. https://ieeexplore.ieee.org/document/8268118 56 | 57 | 58 | -------------------------------------------------------------------------------- /Regular_LDPC_Decoding/BPSK/BER_simulation_OpenCL.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import pickle 3 | import time 4 | 5 | import matplotlib as mpl 6 | import numpy as np 7 | 8 | from AWGN_Channel_Transmission.AWGN_Quantizer_BPSK import AWGN_Channel_Quantizer 9 | from AWGN_Channel_Transmission.LDPC_Transmitter import LDPC_BPSK_Transmitter as transmitter 10 | from Discrete_LDPC_decoding.discrete_LDPC_decoder import Discrete_LDPC_Decoder_class as LDPC_Decoder 11 | 12 | mpl.use("pgf") 13 | pgf_with_pdflatex = { 14 | "pgf.texsystem": "pdflatex", 15 | "text.usetex": True, 16 | "font.family": "serif" 17 | } 18 | mpl.rcParams.update(pgf_with_pdflatex) 19 | import matplotlib.pyplot as plt 20 | __author__ = "Maximilian Stark" 21 | __copyright__ = "2016, Institute of Communications, Hamburg University of Technology " 22 | __credits__ = ["Maximilian Stark"] 23 | __version__ = "1.0" 24 | __email__ = "maximilian.stark@tuhh.de" 25 | __status__ = "Production" 26 | __name__ = "Simulation Env" 27 | __doc__ = """This script sets up a proper simulation environment to analyse a purely discrete decoder that works only 28 | on integers. The BER performance of the chosen decoder can be stored and compared.""" 29 | 30 | 31 | #os.environ['PYOPENCL_CTX'] = '1' 32 | np.seterr(all='raise') 33 | 34 | # Load stored data 35 | filepath ="../../LDPC_codes/8000.4000.3.483.matlab" 36 | 37 | decoder_name = 'decoder_config_EbN0_gen_1.05_16.pkl' 38 | pkl_file = open(decoder_name, 'rb') 39 | generated_decoder = pickle.load(pkl_file) 40 | 41 | Trellis_checknodevector_a = generated_decoder['Trellis_checknodevector_a'] 42 | Trellis_varnodevector_a = generated_decoder['Trellis_varnodevector_a'] 43 | 44 | # Encode Codeword 45 | 46 | # Human choice 47 | AD_max_abs = 3 48 | cardinality_Y_channel = 2000 49 | cardinality_T_channel = 16 50 | cardinality_T_decoder_ops = generated_decoder['cardinality_T_decoder_ops'] 51 | msg_at_time = 100 52 | min_errors = 7000 53 | 54 | imax = generated_decoder['imax'] 55 | N_var = 8000 56 | 57 | #sets the start EbN0_dB value 58 | EbN0_dB_max_value = 2 59 | 60 | #simulation runs until this BER is achieved 61 | target_error_rate=1e-6 62 | BER_go_on_in_smaller_steps=1e-6 63 | 64 | #in steps of size.. 65 | EbN0_dB_normal_stepwidth=0.1 66 | EbN0_dB_small_stepwidth=0.1 67 | 68 | # start EbN0 simulation 69 | EbN0_dB = 0 70 | EbN0_dB_ind = 0 71 | BER_vector = np.array([0.]) 72 | EbN0_dB_vector = np.array([EbN0_dB]) 73 | ready = False 74 | NR_BLOCKS_PER_CONTROL_MSG = 100 75 | 76 | transi = transmitter(filepath, msg_at_time) 77 | decodi = LDPC_Decoder(filepath, imax, cardinality_T_channel, cardinality_T_decoder_ops, Trellis_checknodevector_a, 78 | Trellis_varnodevector_a,msg_at_time) 79 | 80 | 81 | while not ready: 82 | EbN0_dB_ind += EbN0_dB_ind 83 | EbN0_dB = EbN0_dB_vector[-1] 84 | 85 | sigma_n2 = 10**(-EbN0_dB/10) / (2*transi.R_c) 86 | 87 | #chani = AWGN_channel(sigma_n2) 88 | 89 | quanti = AWGN_Channel_Quantizer(sigma_n2, AD_max_abs, cardinality_T_channel, cardinality_Y_channel) 90 | quanti.init_OpenCL_quanti(N_var,msg_at_time,return_buffer_only=True) 91 | decodi.init_OpenCL_decoding(msg_at_time,quanti.context) 92 | 93 | 94 | errors = 0 95 | transmitted_blocks = 0 96 | # transmit 97 | start = time.time() 98 | while errors < min_errors: 99 | 100 | rec_data_quantized = quanti.quantize_direct_OpenCL(N_var, msg_at_time) 101 | decoded_mat = decodi.decode_OpenCL(rec_data_quantized,buffer_in=True,return_buffer=True) 102 | 103 | errors += decodi.return_errors_all_zero(decoded_mat) 104 | transmitted_blocks += + msg_at_time 105 | 106 | 107 | if np.mod(transmitted_blocks, NR_BLOCKS_PER_CONTROL_MSG) == 0: 108 | time_so_far = time.time()-start 109 | time_per_error = (time_so_far / (errors+1)) #+1 to avoid devide by 0 errors 110 | estim_minutes_left = ((min_errors * time_per_error) - time_so_far) / 60 111 | 112 | print('EbN0_dB=', EbN0_dB, ', ' 113 | 'errors=', errors, 114 | ' elapsed time this run=', time_so_far, 115 | ' BER_estimate=','{:.2e}'.format( (errors / (transmitted_blocks * N_var))), 116 | ' datarate_Bps =', '{:.2e}'.format( (transmitted_blocks * N_var) / time_so_far), 117 | ' estim_minutes_left=',estim_minutes_left) 118 | 119 | 120 | 121 | end = time.time() 122 | 123 | BER_vector[-1] = errors / (transmitted_blocks * N_var) 124 | spent = end-start 125 | datarate_Bps = (transmitted_blocks * N_var) / spent 126 | print(EbN0_dB_vector[-1], '{:.2e}'.format(BER_vector[-1]), ' Bitrate:','{:.2e}'.format(datarate_Bps) ) 127 | 128 | if (BER_vector[-1] > target_error_rate) and (EbN0_dB < EbN0_dB_max_value): 129 | if BER_vector[-1] < BER_go_on_in_smaller_steps: 130 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_small_stepwidth) 131 | else: 132 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_normal_stepwidth) 133 | 134 | BER_vector = np.append(BER_vector, 0) 135 | else: 136 | ready = True 137 | 138 | 139 | 140 | import time 141 | import scipy.io as sio 142 | 143 | timestr = time.strftime("%y%m%d-%H%M") 144 | 145 | filename = os.path.splitext(decoder_name)[0].replace('.','').replace('_config','')+'_'+timestr 146 | pathname = os.path.join('BER_Results', filename.replace('.', '')) 147 | os.makedirs(os.path.dirname(os.path.join(pathname,' ')), exist_ok=True) 148 | 149 | 150 | #Plot 151 | plt.semilogy(EbN0_dB_vector,BER_vector) 152 | plt.xlabel('Eb/N0') 153 | plt.ylabel('Bit Error Rate ') 154 | plt.grid(True) 155 | plt.show() 156 | 157 | np.savez(os.path.join(pathname,'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 158 | 159 | plt.savefig(os.path.join(pathname,'BER_figure.pgf')) 160 | plt.savefig(os.path.join(pathname,'BER_figure.pdf')) 161 | 162 | res_dict = {"EbN0_dB_vector" : EbN0_dB_vector, "BER_vector":BER_vector 163 | , "decoder_name":decoder_name} 164 | 165 | sio.savemat(os.path.join(pathname,'BER_results.mat'),res_dict) -------------------------------------------------------------------------------- /Regular_LDPC_Decoding/BPSK/BER_simulation_OpenCL_min_sum.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import time 3 | 4 | import matplotlib as mpl 5 | import numpy as np 6 | import scipy.io as sio 7 | 8 | from AWGN_Channel_Transmission.AWGN_Quantizer_BPSK import AWGN_Channel_Quantizer 9 | from AWGN_Channel_Transmission.LDPC_Transmitter import LDPC_BPSK_Transmitter as transmitter 10 | from Continous_LDPC_Decoding.min_sum_decoder_irreg import Min_Sum_Decoder_class_irregular as LDPC_Decoder 11 | 12 | mpl.use("pgf") 13 | pgf_with_pdflatex = { 14 | "pgf.texsystem": "pdflatex", 15 | "text.usetex": True, 16 | "font.family": "serif" 17 | } 18 | mpl.rcParams.update(pgf_with_pdflatex) 19 | import matplotlib.pyplot as plt 20 | 21 | __author__ = "Maximilian Stark" 22 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 23 | __credits__ = ["Maximilian Stark"] 24 | __version__ = "1.0" 25 | __email__ = "maximilian.stark@tuhh.de" 26 | __status__ = "Production" 27 | __name__ = "Simulation Env" 28 | __doc__ = """This script sets up a proper simulation environment to analyse a min-sum decoder. 29 | The BER performance of the chosen decoder can be stored and compared.""" 30 | 31 | #os.environ['PYOPENCL_CTX'] = '1' # CPU 32 | os.environ['PYOPENCL_CTX'] = '0' # GraKA 33 | os.environ['PYOPENCL_COMPILER_OUTPUT']='1' 34 | np.seterr(all='raise') 35 | 36 | filepath ="../LDPC_codes/8000.4000.3.483.matlab" 37 | 38 | # Encode Codeword 39 | 40 | # Human choice 41 | AD_max_abs = 3 42 | cardinality_Y_channel = 2000 43 | cardinality_T_channel = 16 44 | cardinality_T_decoder_ops = 16 45 | msg_at_time = 400 46 | min_errors = 7000 47 | 48 | imax =50 49 | N_var = 8000 50 | 51 | decoder_name = 'minsum_'+str(cardinality_T_decoder_ops) 52 | 53 | timestr = time.strftime("%y%m%d-%H%M") 54 | 55 | filename = os.path.splitext(decoder_name)[0].replace('.','').replace('_config','')+'_'+timestr 56 | pathname = os.path.join('BER_Results', filename.replace('.', '')) 57 | os.makedirs(os.path.dirname(os.path.join(pathname,' ')), exist_ok=True) 58 | 59 | 60 | #sets the start EbN0_dB value 61 | EbN0_dB_max_value = 2.5 62 | 63 | #simulation runs until this BER is achieved 64 | target_error_rate=1e-6 65 | BER_go_on_in_smaller_steps=1e-6 66 | 67 | #in steps of size.. 68 | EbN0_dB_normal_stepwidth=0.1 69 | EbN0_dB_small_stepwidth=0.1 70 | 71 | # start EbN0 simulation 72 | EbN0_dB = 0 73 | EbN0_dB_ind = 0 74 | BER_vector = np.array([0.]) 75 | EbN0_dB_vector = np.array([EbN0_dB]) 76 | ready = False 77 | NR_BLOCKS_PER_CONTROL_MSG = 100 78 | 79 | transi = transmitter(filepath, msg_at_time) 80 | decodi = LDPC_Decoder(filepath, imax, cardinality_T_channel, msg_at_time) 81 | 82 | 83 | while not ready: 84 | EbN0_dB_ind += EbN0_dB_ind 85 | EbN0_dB = EbN0_dB_vector[-1] 86 | 87 | sigma_n2 = 10**(-EbN0_dB/10) / (2*transi.R_c) 88 | 89 | quanti = AWGN_Channel_Quantizer(sigma_n2, AD_max_abs, cardinality_T_channel, cardinality_Y_channel) 90 | quanti.init_OpenCL_quanti(N_var,msg_at_time,return_buffer_only=True) 91 | decodi.init_OpenCL_decoding(msg_at_time,quanti.context) 92 | 93 | 94 | errors = 0 95 | errors1 = 0 96 | 97 | transmitted_blocks = 0 98 | # transmit 99 | start = time.time() 100 | while errors < min_errors: 101 | 102 | 103 | rec_data_quantized = quanti.quantize_direct_OpenCL_LLR(N_var, msg_at_time) 104 | decoded_mat = decodi.decode_OpenCL_min_sum(rec_data_quantized,buffer_in=True,return_buffer=True) 105 | errors += decodi.return_errors_all_zero(decoded_mat) 106 | transmitted_blocks += + msg_at_time 107 | 108 | if np.mod(transmitted_blocks, NR_BLOCKS_PER_CONTROL_MSG) == 0: 109 | time_so_far = time.time()-start 110 | time_per_error = (time_so_far / (errors+1)) #+1 to avoid devide by 0 errors 111 | estim_minutes_left = ((min_errors * time_per_error) - time_so_far) / 60 112 | 113 | print('EbN0_dB=', EbN0_dB, ', ' 114 | 'errors=', errors, 115 | ' elapsed time this run=', time_so_far, 116 | ' BER_estimate=','{:.2e}'.format( (errors / (transi.R_c*transmitted_blocks * N_var))), 117 | ' datarate_Bps =', '{:.2e}'.format( (transi.R_c*transmitted_blocks * N_var) / time_so_far), 118 | ' estim_minutes_left=',estim_minutes_left) 119 | 120 | 121 | 122 | end = time.time() 123 | 124 | BER_vector[-1] = errors / (transi.R_c*transmitted_blocks * N_var) 125 | spent = end-start 126 | datarate_Bps = (transi.R_c*transmitted_blocks * N_var) / spent 127 | print(EbN0_dB_vector[-1], '{:.2e}'.format(BER_vector[-1]), ' Bitrate:','{:.2e}'.format(datarate_Bps) ) 128 | np.savez(os.path.join(pathname, 'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 129 | 130 | if (BER_vector[-1] > target_error_rate) and (EbN0_dB < EbN0_dB_max_value): 131 | if BER_vector[-1] < BER_go_on_in_smaller_steps: 132 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_small_stepwidth) 133 | else: 134 | EbN0_dB_vector = np.append(EbN0_dB_vector, EbN0_dB_vector[-1] + EbN0_dB_normal_stepwidth) 135 | 136 | BER_vector = np.append(BER_vector, 0) 137 | else: 138 | ready = True 139 | 140 | 141 | 142 | #Plot 143 | plt.figure() 144 | plt.semilogy(EbN0_dB_vector,BER_vector) 145 | plt.xlabel('Eb/N0') 146 | plt.ylabel('Bit Error Rate ') 147 | plt.grid(True) 148 | plt.show() 149 | 150 | np.savez(os.path.join(pathname,'BER_results'), EbN0_dB_vector=EbN0_dB_vector, BER_vector=BER_vector) 151 | 152 | plt.savefig(os.path.join(pathname,'BER_figure.pgf')) 153 | plt.savefig(os.path.join(pathname,'BER_figure.pdf')) 154 | 155 | res_dict = {"EbN0_dB_vector" : EbN0_dB_vector, "BER_vector":BER_vector 156 | , "decoder_name":decoder_name} 157 | 158 | sio.savemat(os.path.join(pathname,'BER_results.mat'),res_dict) 159 | -------------------------------------------------------------------------------- /Regular_LDPC_Decoding/BPSK/decoder_config_generation.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | from AWGN_Channel_Transmission.AWGN_Discrete_Density_Evolution import AWGN_Discrete_Density_Evolution_class as DDE 5 | 6 | __author__ = "Maximilian Stark" 7 | __copyright__ = "2016, Institute of Communications, University of Technology Hamburg" 8 | __credits__ = ["Maximilian Stark"] 9 | __version__ = "1.0" 10 | __email__ = "maximilian.stark@tuhh.de" 11 | __status__ = "Production" 12 | __name__ = "Decoder Generation" 13 | __doc__ = """This script generates a discrete decoder for the desired design-Eb/N0.""" 14 | # set noise level for DE 15 | 16 | EbN0_dB_mapping_gen = 1.25 17 | 18 | 19 | # set quantizer limits 20 | AD_Max_abs = 3 21 | 22 | 23 | cardinality_Y_channel = 2000 24 | cardinality_T_channel = 16 25 | cardinality_T_decoder_ops = 16 26 | i_max = 250 27 | nror = 5 28 | 29 | # set code related parameters 30 | d_v = 3 31 | d_c = 6 32 | 33 | R_c = 1-d_v/d_c # code rate 34 | 35 | sigma_n2 = 10**(-EbN0_dB_mapping_gen/10) / (2*R_c) 36 | 37 | # generate decoder config 38 | DDE_inst = DDE(sigma_n2, AD_Max_abs, cardinality_Y_channel, cardinality_T_channel, 39 | cardinality_T_decoder_ops, d_v, d_c, i_max, nror ) 40 | DDE_inst.run_discrete_density_evolution() 41 | #DDE_inst.save_config() 42 | 43 | # generate trajectory 44 | 45 | x_vec = np.zeros(2*i_max-1) 46 | y_vec = np.zeros(2*i_max-1) 47 | 48 | x_vec[0] = 0 49 | y_vec[0] = DDE_inst.DDE_inst_data['ext_mi_varnode_in_iter'][0] 50 | 51 | 52 | for i in range(1,i_max): 53 | x_vec[2*i-1] = DDE_inst.DDE_inst_data['ext_mi_checknode_in_iter'][i-1] 54 | y_vec[2*i-1] = y_vec[2*i-2] 55 | 56 | x_vec[2 * i] = x_vec[2*i-1] 57 | y_vec[2 * i] = DDE_inst.DDE_inst_data['ext_mi_varnode_in_iter'][i] 58 | 59 | plt.plot(x_vec,y_vec) 60 | 61 | plt.show() 62 | -------------------------------------------------------------------------------- /setup_mult_c.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | 3 | import numpy 4 | from Cython.Build import cythonize 5 | 6 | setup( 7 | ext_modules=cythonize("Discrete_LDPC_decoding/GF2MatrixMul_c.pyx"), 8 | include_dirs=[numpy.get_include()] 9 | ) --------------------------------------------------------------------------------