├── LICENSE ├── README.md ├── examples ├── example_channel │ ├── 1_channel.py │ ├── 2_ctle.py │ ├── 3_ffe_dfe.py │ ├── 4_xtalk.py │ ├── 5_BER_test.py │ └── 6_FEC.py └── problem_set │ ├── HW1.py │ ├── HW2.py │ ├── HW3.py │ ├── HW4.py │ ├── HW6.py │ ├── HW7.py │ └── problem_set.pdf ├── serdespy ├── __init__.py ├── chmodel.py ├── eye_diagram.py ├── four_port_to_diff.py ├── prs.py ├── receiver.py ├── reedsolo.py ├── rs_code.py ├── signal.py └── transmitter.py └── setup.py /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2022 Richard Barrie 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # serdespy 2 | A python library for system-level SerDes modelling and simulation 3 | 4 | ## Table of Contents 5 | * [General Info](#general-information) 6 | * [Features](#features) 7 | * [Setup](#setup) 8 | * [Examples](#examples) 9 | * [Project Status](#project-status) 10 | 11 | ## General Information 12 | 13 | Richard Barrie's undergraduate thesis project. University of Toronto, Engineering Science. ESC499 2021/2022. 14 | Supervisors: Tony Chan Causone, Ming Yang 15 | Authors: Richard Barrie, Katherine Liang 16 | 17 | ## Features 18 | Includes functions and classes for time-domain model of serdes system 19 | - Channel Modelling 20 | - TX FIR Filter 21 | - TX Jitter 22 | - Continuous-Time Linear Equalizer (CTLE) 23 | - Feed-Forward Equalizer (FFE) 24 | - Decision Feedback Equalizer (DFE) 25 | - Maximum-Likelihood Sequence Estimation (MLSE) 26 | - PRBS/PRQS generation 27 | - Eye Diagram Plotter 28 | - Bit Error Rate Checker 29 | - Forward Error Correction with Reed-Solomon Codes 30 | 31 | ## Setup 32 | Python 3.7+ required 33 | `pip install serdespy` 34 | 35 | ## Examples 36 | The 'examples' directory contains some scripts and documentation on how to use serdespy. It contains 2 subdirectories: 37 | 38 | - /problem_set 39 | - contains a problem set on wireline links "problem_set.pdf", and scripts that complete the problems using the serdespy package 40 | - files should be run in numerical order 41 | 42 | - /example_channel 43 | - this directory contains a series of scripts that perform modelling and simulation on a model of a 100G PAM-4 copper channel from 4-port s-paramaters 44 | - instructions for downloading the s-paramater files and running the scripts are in the header of "1_channel.py" 45 | - files should be run in numerical order 46 | -------------------------------------------------------------------------------- /examples/example_channel/1_channel.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file shows example of loading in touchstone file for differential channel and generating bode plot and impulse response 3 | 4 | To run this file: 5 | - download zipped touchstone files from : https://www.ieee802.org/3/ck/public/tools/cucable/mellitz_3ck_04_1119_CACR.zip 6 | - Place the file Tp0_Tp5_28p5db_FQSFP_thru.s4p in the working directory, with this file. It contains s-paramater measurements for 2m copper cable connector with 28dB insertion loss at 26.56Ghz from IEEE 802.df public channels 7 | - create a subdirectory called 'data' in the working directory. this is where data generated from this script will be saved for use in other examples 8 | """ 9 | 10 | #import packages 11 | import serdespy as sdp 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | import skrf as rf 15 | import scipy as sp 16 | 17 | #load in touchstone file containing s-params of 2m copper cable connector 18 | thru_file = "./Tp0_Tp5_28p5db_FQSFP_thru.s4p" 19 | thru_network = rf.Network(thru_file) 20 | 21 | #port definition, is defined in the header of the touchstone file 22 | port_def = np.array([[0, 1],[2, 3]]) 23 | 24 | #for pam-4 signalling at 106.24 Gb/s 25 | nyquist_f = 26.56e9 26 | 27 | #time per pam-4 symbol 28 | symbol_t = 1/(2*nyquist_f) 29 | 30 | #oversampling ratio is 64 samples per 4-pam symbol, to get smooth eye diagrams 31 | samples_per_symbol = 64 32 | 33 | #compute desired timestep for impulse response 34 | t_d = symbol_t/samples_per_symbol 35 | 36 | #load and source impedance are matched 50 ohms, because charictaristic empedance of the the channel is 50 ohms 37 | Zs = 50 38 | Zl = 50 39 | 40 | #compute differential transfer function and impulse response from s-params 41 | H_thru, f, h_thru, t = sdp.four_port_to_diff(thru_network, port_def, Zs, Zl, option = 1, t_d = t_d) 42 | 43 | #Plot transfer function of Channel 44 | plt.figure(dpi = 1200) 45 | plt.plot(1e-9*f,20*np.log10(abs(H_thru)), color = "blue", label = "THRU channel", linewidth = 0.8) 46 | plt.ylabel('Mag. Response [dB]') 47 | plt.xlabel('Frequency [GHz]') 48 | plt.axvline(x=26.56,color = 'grey', label = "Nyquist Frequency") 49 | plt.title("Bode Plot") 50 | plt.grid() 51 | plt.legend() 52 | 53 | #visualize pulse response 54 | pulse_response = sp.signal.fftconvolve(h_thru, np.ones(samples_per_symbol), mode = "same") 55 | sdp.channel_coefficients(pulse_response, t, samples_per_symbol, 3, 20) 56 | 57 | #crop impulse response and save 58 | plt.figure(dpi = 1200) 59 | h_thru_crop = h_thru[44500:47500] 60 | plt.plot(h_thru_crop) 61 | 62 | #save pulse response, transfer function, and frequency vector, used in other example files 63 | 64 | #save data 65 | np.save("./data/h_thru.npy",h_thru_crop) 66 | np.save("./data/f.npy",f) 67 | np.save("./data/TF_thru.npy",H_thru) -------------------------------------------------------------------------------- /examples/example_channel/2_ctle.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file shows example of CTLE modelling, and generates the impulse response of the channel and CTLE 3 | 4 | Requires running 1_channel.py first 5 | """ 6 | 7 | #import useful packages 8 | import serdespy as sdp 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | import skrf as rf 12 | import scipy as sp 13 | 14 | #load in data generated by 1_channel.py 15 | f = np.load("./data/f.npy") 16 | h = np.load("./data/h_thru.npy") 17 | H = np.load("./data/TF_thru.npy") 18 | 19 | #frequency vector in rad/s 20 | w = f*(2*np.pi) 21 | 22 | #oversampling ratio used in 1_channel.py 23 | samples_per_symbol = 64 24 | 25 | #set poles and zeroes for peaking at nyquist freq 26 | #high peaking because channel is high insertion loss 27 | z = 2e10 28 | p = 1.7e11 29 | k = p**2/z 30 | 31 | #calculate Frequency response of CTLE at given frequencies 32 | w, H_ctle = sp.signal.freqs([k/p**2, k*z/p**2], [1/p**2, 2/p, 1], w) 33 | 34 | #bode plot of CTLE transfer function 35 | plt.figure(dpi=600) 36 | plt.semilogx(1e-9*f,20*np.log10(abs(H_ctle)), color = "red", label = 'CTLE') 37 | plt.title("CTLE Frequency Response") 38 | plt.grid() 39 | plt.axvline(x=25,color = 'grey', label = "Nyquist Frequency") 40 | plt.axvline(x=z/(2*np.pi)*1e-9,color = 'green', label = "Zero Location") 41 | plt.axvline(x=p/(2*np.pi)*1e-9,color = 'blue', label = "Pole Location") 42 | plt.legend() 43 | 44 | 45 | #%% compute and save impulse response of CTLE transfer function 46 | h_ctle, t_ctle = sdp.freq2impulse(H_ctle,f) 47 | h_ctle = h_ctle[0:200] 48 | plt.figure(dpi=600) 49 | plt.plot(h_ctle) 50 | 51 | np.save("./data/h_ctle.npy", h_ctle) 52 | 53 | #%% plot eye diagram with and without CTLE 54 | voltage_levels = np.array([-3,-1,1,3]) 55 | 56 | nyquist_f = 26.56e9 57 | 58 | data = sdp.prqs10(1) 59 | 60 | TX = sdp.Transmitter(data[:10000], voltage_levels, nyquist_f) 61 | 62 | TX.oversample(samples_per_symbol) 63 | 64 | signal_out = sp.signal.fftconvolve(TX.signal_ideal, h, mode = "same")[:64*1000*5] 65 | 66 | sdp.simple_eye(signal_out[1000:], samples_per_symbol*3, 1000, TX.UI/TX.samples_per_symbol, "Eye Diagram") 67 | 68 | signal_out_ctle = sp.signal.fftconvolve(signal_out, h_ctle, mode = "same") 69 | 70 | sdp.simple_eye(signal_out_ctle[1000:], samples_per_symbol*3, 1000, TX.UI/TX.samples_per_symbol, "Eye Diagram with CTLE") 71 | -------------------------------------------------------------------------------- /examples/example_channel/3_ffe_dfe.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file shows example of FFE and DFE. Equalization of the signal with CTLE generated in 2_ctle.py 3 | """ 4 | 5 | #import useful packages 6 | import serdespy as sdp 7 | import numpy as np 8 | import matplotlib.pyplot as plt 9 | import skrf as rf 10 | import scipy as sp 11 | 12 | #load in data, and set up paramaters 13 | h = np.load("./data/h_thru.npy") 14 | h_ctle = np.load("./data/h_ctle.npy") 15 | samples_per_symbol = 64 16 | nyquist_f = 26.56e9 17 | 18 | #pulse response of channel is convolution of impulse response and 1 UI of ones 19 | pulse_response = sp.signal.fftconvolve(h, np.ones(samples_per_symbol), mode = "full") 20 | 21 | #plot channel coefficients for pulse response 22 | sdp.channel_coefficients(pulse_response, np.linspace(1,pulse_response.size,pulse_response.size), samples_per_symbol, 3, 20, title = "Pulse Response") 23 | 24 | #pulse response with CTLE 25 | pulse_response_ctle = sp.signal.fftconvolve(pulse_response, h_ctle, mode = "full") 26 | 27 | #plot channel coefficients. observer lower post-cursor ISI 28 | sdp.channel_coefficients(pulse_response_ctle, np.linspace(1,pulse_response_ctle.size,pulse_response_ctle.size), samples_per_symbol, 3, 20, title = "Pulse Response with CTLE") 29 | 30 | #%% pick 1 tap TX FIR FILTER 31 | 32 | #arbitrarily take -0.05 tap weight to reduce precursor ISI 33 | tx_fir_tap_weights = np.array([-0.05, 1]) 34 | 35 | #oversample 36 | pulse_response_fir = sp.signal.fftconvolve(h, np.repeat(tx_fir_tap_weights,samples_per_symbol), mode = "full") 37 | 38 | #convolution with pulse response 39 | pulse_response_fir_ctle = sp.signal.fftconvolve(pulse_response_fir, h_ctle, mode = "full") 40 | 41 | #plot channel coefficients 42 | channel_coefficients = sdp.channel_coefficients(pulse_response_fir_ctle, np.linspace(1,pulse_response_fir_ctle.size,pulse_response_fir_ctle.size), samples_per_symbol, 3, 20, title = "Pulse Response with FIR and CTLE")[:4] 43 | 44 | 45 | #%% Pick 3 precursor tap FFE 46 | 47 | n_taps_pre = 3 48 | 49 | #calculate RX FFE coeffiecients to force precursor ISI to 0 50 | ffe_tap_weights = sdp.forcing_ffe(n_taps_pre, channel_coefficients) 51 | 52 | #oversample ffe tap weights so we can perform convolution on 64X oversampled sign 53 | rx_ffe_conv = np.zeros(64*ffe_tap_weights.size) 54 | 55 | for i in range(ffe_tap_weights.size): 56 | rx_ffe_conv[i*64] = ffe_tap_weights[i] 57 | 58 | #apply FFE to pulse response 59 | pulse_response_fir_ctle_ffe = sp.signal.fftconvolve(pulse_response_fir_ctle, rx_ffe_conv, mode = "full") 60 | 61 | #plot channel coefficients. observe lower pre-cursor ISI 62 | channel_coefficients = sdp.channel_coefficients(pulse_response_fir_ctle_ffe, np.linspace(1,pulse_response_fir_ctle_ffe.size,pulse_response_fir_ctle_ffe.size), samples_per_symbol, 3, 20) 63 | 64 | #amplitude of main cursor 65 | main_cursor = channel_coefficients[3] 66 | 67 | #zero-forcing DFE weights 68 | dfe_tap_weights = channel_coefficients[4:] 69 | 70 | #%% do time-domain simulation and plot eye diagrams 71 | 72 | voltage_levels = np.array([-3,-1,1,3]) 73 | 74 | data = sdp.prqs10(1) 75 | 76 | TX = sdp.Transmitter(data[:10000], voltage_levels, nyquist_f) 77 | 78 | #apply FIR filter to transmitter waveform 79 | TX.FIR(tx_fir_tap_weights) 80 | 81 | #oversample data to 64 samples/symbols 82 | TX.oversample(samples_per_symbol) 83 | 84 | #eye diagram of transmitter waveform 85 | sdp.simple_eye(TX.signal_ideal[64*3:], samples_per_symbol*3, 500, TX.UI/TX.samples_per_symbol, "TX Ideal Eye Diagram with FIR filter") 86 | 87 | #signal at output of channel 88 | signal_out = sp.signal.fftconvolve(TX.signal_ideal, h, mode = "same")[:64*500*12] 89 | 90 | #signal at output of CTLE 91 | signal_out_ctle = sp.signal.fftconvolve(signal_out, h_ctle, mode = "same") 92 | 93 | #plot eye diagram 94 | sdp.simple_eye(signal_out_ctle, samples_per_symbol*3, 1000, TX.UI/TX.samples_per_symbol, "Eye Diagram with CTLE") 95 | 96 | #%% 97 | RX = sdp.Receiver(signal_out_ctle, samples_per_symbol, nyquist_f, voltage_levels, shift = True, main_cursor = main_cursor) 98 | 99 | #sdp.simple_eye(RX.signal, samples_per_symbol*3, 800, TX.UI/TX.samples_per_symbol, "Eye Diagram with CTLE") 100 | 101 | #signal after FFE 102 | RX.FFE(ffe_tap_weights, n_taps_pre) 103 | 104 | #sdp.simple_eye(RX.signal, samples_per_symbol*3, 800, TX.UI/TX.samples_per_symbol, "Eye Diagram with CTLE and FFE") 105 | #signal after DFE 106 | RX.pam4_DFE(dfe_tap_weights) 107 | 108 | #plot eye diagram 109 | sdp.simple_eye(RX.signal[64*300:], samples_per_symbol*3, 1000, TX.UI/TX.samples_per_symbol, f"Eye Diagram with CTLE, FFE, and DFE") 110 | 111 | #%%save the tap weights chosen 112 | np.save("./data/rx_ffe_tap_weights.npy",ffe_tap_weights) 113 | np.save("./data/rx_dfe_tap_weights.npy",dfe_tap_weights) 114 | np.save("./data/tx_fir_tap_weights.npy",tx_fir_tap_weights) 115 | -------------------------------------------------------------------------------- /examples/example_channel/4_xtalk.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file shows example of generating response of crosstalk on the channel 3 | 4 | Running this file is optional, you can skip to 5_BER_test.py, but if you skip this file make sure to comment lines 50 and 51 of 5_BER_test.py 5 | 6 | to run this file, it is necessary to have downloaded the entire folder of s-params given in the header of 1_channel.py 7 | 8 | write the path to the unzipped directory below, on line 18 9 | """ 10 | 11 | import serdespy as sdp 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | import skrf as rf 15 | import scipy as sp 16 | 17 | #path to touchstone files here: 18 | s4p_dir = "C:/Users/richa/Downloads/mellitz_3ck_04_1119_CACR/" 19 | 20 | #far-end crosstalk 21 | fext_files = ['Tp0_Tp5_28p5db_FQSFP_fext' + f'{i}' +'.s4p' for i in range(1,8)] 22 | fext_networks = [rf.Network(s4p_dir + fext_file ) for fext_file in fext_files] 23 | 24 | #near-end crosstalk 25 | next_files = ['Tp0_Tp5_28p5db_FQSFP_next' + f'{i}' +'.s4p' for i in range(1,9)] 26 | next_networks = [rf.Network(s4p_dir + next_file ) for next_file in next_files] 27 | 28 | #set-up params 29 | port_def = np.array([[0, 1],[2, 3]]) 30 | 31 | nyquist_f = 26.56e9 32 | symbol_t = 1/(2*nyquist_f) 33 | samples_per_symbol = 64 34 | t_d = symbol_t/samples_per_symbol 35 | 36 | #%%Compute response for each of the crosstalk agressors at the near and far ends 37 | 38 | H_fext = np.zeros((len(fext_files), 169985), dtype = complex) 39 | h_fext = np.zeros((len(fext_files), 339968)) 40 | 41 | for i in range(len(fext_files)): 42 | H, f, h, t = sdp.four_port_to_diff(fext_networks[i], port_def, 50, np.inf, option = 0, t_d = t_d) 43 | H_fext[i] = H 44 | h_fext[i] = h 45 | 46 | H_next = np.zeros((len(next_files), 169985), dtype = complex) 47 | h_next = np.zeros((len(next_files), 339968)) 48 | 49 | for i in range(len(next_files)): 50 | H, f, h, t = sdp.four_port_to_diff(next_networks[i], port_def, 50, np.inf, option = 0, t_d = t_d) 51 | H_next[i] = H 52 | h_next[i] = h 53 | 54 | #%%% Plot magnitude response of crosstalk and thru channel 55 | import matplotlib.patches as mpatches 56 | 57 | plt.figure(dpi = 1200) 58 | 59 | next_patch = mpatches.Patch(color='red', label='FEXT') 60 | fext_patch = mpatches.Patch(color='orange', label='NEXT') 61 | thru_patch = mpatches.Patch(color='blue', label='THRU') 62 | nyquist_patch = mpatches.Patch(color='grey', label='Nyquist Frequency') 63 | 64 | for i in range(len(fext_files)): 65 | plt.plot(1e-9*f,20*np.log10(abs(H_fext[i,:])), color = "red", linewidth = 0.2) 66 | 67 | for i in range(len(next_files)): 68 | plt.plot(1e-9*f,20*np.log10(abs(H_next[i,:])), color = "orange", linewidth = 0.2) 69 | 70 | H_thru = np.load("./data/TF_thru.npy") 71 | plt.plot(1e-9*f,20*np.log10(abs(H_thru)), color = "blue", label = "THRU channel", linewidth = 0.8) 72 | 73 | plt.xlim([0,60]) 74 | plt.ylim([-125,5]) 75 | plt.ylabel('Mag. Response [dB]') 76 | plt.xlabel('Frequency [GHz]') 77 | plt.axvline(x=26.56,color = 'grey', label = "Nyquist Frequency") 78 | plt.title("Channel and Crosstalk Bode Plot") 79 | plt.grid() 80 | plt.legend(loc = "upper right", handles = [next_patch, fext_patch, thru_patch, nyquist_patch]) 81 | #plt.savefig(fig_dir + "12dB_bode", transparent = True) 82 | 83 | #%%Generate crosstalk response to random data 84 | 85 | h_xtalk = np.vstack((h_next, h_fext)) 86 | 87 | voltage_levels = np.array([-3,-1,1,3]) 88 | 89 | data = sdp.prqs10(1) 90 | 91 | TX = sdp.Transmitter(data[:10000], voltage_levels, 26.56e9) 92 | 93 | TX.oversample(samples_per_symbol) 94 | 95 | xt_response = np.zeros([data.size*samples_per_symbol,]) 96 | 97 | for i in range(h_xtalk.shape[0]): 98 | print(i) 99 | 100 | #generate data for each xtalk channel with new random seed 101 | data = sdp.prqs10(int(i+1)) 102 | 103 | #find xtalk response and sum 104 | TX = sdp.Transmitter(data, voltage_levels, 26.56e9) 105 | TX.oversample(samples_per_symbol) 106 | xt_response = xt_response + sp.signal.fftconvolve(TX.signal_ideal, h_xtalk[i][:], mode = "same") 107 | 108 | #plot eye diagram of sum of all crosstalk 109 | sdp.simple_eye(xt_response, samples_per_symbol*3, 500, TX.UI/TX.samples_per_symbol, "XT") 110 | 111 | #save data 112 | np.save("./data/xt_response.npy",xt_response) 113 | 114 | 115 | -------------------------------------------------------------------------------- /examples/example_channel/5_BER_test.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file shows example of simulation to test BER of link with equalization modelled in previous files 3 | 4 | This file also includes gaussian jitter, and a bandwidth-limiting filter applied to the transmitter waveform 5 | AWGN is added to the signal after the channel + CTLE 6 | """ 7 | 8 | import serdespy as sdp 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | import skrf as rf 12 | import scipy as sp 13 | 14 | #load data from previous files 15 | h = np.load("./data/h_thru.npy") 16 | h_ctle = np.load("./data/h_ctle.npy") 17 | ffe_tap_weights = np.load("./data/rx_ffe_tap_weights.npy") 18 | dfe_tap_weights = np.load("./data/rx_dfe_tap_weights.npy") 19 | tx_fir_tap_weights = np.load("./data/tx_fir_tap_weights.npy") 20 | 21 | #set up paramaters 22 | nyquist_f = 26.56e9 23 | samples_per_symbol = 64 24 | voltage_levels = np.array([-3,-1,1,3]) 25 | 26 | #pseudo-random data for simulation 27 | data = sdp.prqs10(1) 28 | 29 | #set up transmitter waveform 30 | TX = sdp.Transmitter(data, voltage_levels, nyquist_f) 31 | 32 | #apply FIR filter 33 | TX.FIR(tx_fir_tap_weights) 34 | 35 | #oversample transmitter waveform 36 | TX.oversample(samples_per_symbol) 37 | 38 | #add 0-mean jitter with standard deviation that is 2.5 % of UI time 39 | TX.gaussian_jitter(stdev_div_UI=0.025) 40 | 41 | #low-pass filter with cutoff frequency 120G 42 | TX.tx_bandwidth(freq_bw=120e9) 43 | 44 | #plot eye diagram of bandwidth-limitied transmitter waveform 45 | sdp.simple_eye(TX.signal, samples_per_symbol*3, 1000, TX.UI/TX.samples_per_symbol, "TX Bandwidth-Limited Eye Diagram (-3dB frequency at 100GHz)") 46 | #%% signal after channel 47 | signal_out = sp.signal.fftconvolve(TX.signal, h, mode = "same") 48 | 49 | #optional: add xtalk interference 50 | #xtalk = np.load("./data/xt_response.npy") 51 | #signal_out = signal_out[:xtalk.size]+xtalk 52 | 53 | signal_out_ctle = sp.signal.fftconvolve(signal_out, h_ctle, mode = "same") 54 | #%%plot eye diagram with ctle 55 | sdp.simple_eye(signal_out_ctle, samples_per_symbol*3, 1000, TX.UI/TX.samples_per_symbol, "Eye Diagram with CTLE") 56 | 57 | main_cursor = np.max(signal_out_ctle)/np.max(voltage_levels) 58 | 59 | RX = sdp.Receiver(signal_out_ctle, samples_per_symbol, nyquist_f, voltage_levels, shift = True, main_cursor = 0.5) 60 | 61 | #slice signal at centre of eye 62 | RX.slice_signal() 63 | 64 | #add noise with standard deviation 0.1 65 | RX.signal_BR = RX.signal_BR + np.random.normal(scale=0.05, size = RX.signal_BR.size) 66 | 67 | #plot baud-rate eye diagram with ctle and noise 68 | sdp.simple_eye(RX.signal_BR, 3, 1000, TX.UI, "Eye Diagram with CTLE and noise BR") 69 | 70 | #apply feed-forward equalization to baud-rate-sampled signal 71 | RX.FFE_BR(ffe_tap_weights, 3) 72 | 73 | #sdp.simple_eye(RX.signal_BR, 3, 800, TX.UI/TX.samples_per_symbol, "Eye Diagram after FFE") 74 | 75 | # apply decision-feedback equalization 76 | RX.pam4_DFE_BR(dfe_tap_weights) 77 | 78 | #plot baud-rate eye diagram after DFE 79 | sdp.simple_eye(RX.signal_BR, 3, 800, TX.UI/TX.samples_per_symbol, "Eye Diagram after DFE") 80 | 81 | #%%check the number of errors and calculate BER 82 | err = sdp.prqs_checker(10, data, RX.symbols_out[10:-10]) 83 | 84 | print("Bits Transmitted =", RX.symbols_out[10:-10].size*2, 'Bit Errors =', err[0]) 85 | 86 | print("Bit Error Ratio = ", err[0]/(RX.symbols_out[10:-10].size*2)) -------------------------------------------------------------------------------- /examples/example_channel/6_FEC.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file contains an example of encoding 1M bits with with the RS KP4 or RS KR4 error correction code 3 | ''' 4 | 5 | import serdespy as sdp 6 | import time 7 | 8 | data = sdp.prbs20(1)[:1000000] 9 | 10 | #encoder = sdp.RS_KR4() 11 | encoder = sdp.RS_KP4() 12 | 13 | t1 = time.time() 14 | data_encoded = sdp.rs_encode(data, encoder, pam4=False) 15 | t2 = time.time() 16 | 17 | print(f'time to encode: {t2-t1}') 18 | 19 | 20 | t1 = time.time() 21 | data_decoded = sdp.rs_decode(data_encoded, encoder, pam4=False) 22 | t2 = time.time() 23 | 24 | print(f'time to decode: {t2-t1}') -------------------------------------------------------------------------------- /examples/problem_set/HW1.py: -------------------------------------------------------------------------------- 1 | import serdespy as sdp 2 | import numpy as np 3 | import scipy as sp 4 | import matplotlib.pyplot as plt 5 | 6 | 7 | #%% 2-PAM 8 | 9 | #generate binary data 10 | data = sdp.prbs13(1) 11 | 12 | #generate Baud-Rate sampled signal from data 13 | signal_BR = sdp.nrz_input_BR(data) 14 | 15 | #data rate in Gbps 16 | for data_rate in [50e9,100e9]: 17 | 18 | #time per bit 19 | UI = 1/data_rate 20 | 21 | #define oversample ratio 22 | samples_per_symbol = 64 23 | 24 | #timestep 25 | dt = UI/samples_per_symbol 26 | 27 | #oversampled signal 28 | signal_ideal = np.repeat(signal_BR, samples_per_symbol) 29 | 30 | #eye diagram of ideal signal 31 | sdp.simple_eye(signal_ideal, samples_per_symbol*3, 100, dt, "{}Gbps 2-PAM Signal".format(data_rate/1e9),linewidth=1.5) 32 | 33 | #cutoff frequency 34 | for freq_bw in [20e9, 30e9, 40e9, 50e9]: 35 | 36 | #max frequency for constructing discrete transfer function 37 | max_f = 1/dt 38 | 39 | #max_f in rad/s 40 | max_w = max_f*2*np.pi 41 | 42 | #heuristic to get a reasonable impulse response length 43 | ir_length = int(4/(freq_bw*dt)) 44 | 45 | #calculate discrete transfer function of low-pass filter with pole at freq_bw 46 | w, H = sp.signal.freqs([freq_bw*(2*np.pi)], [1,freq_bw*(2*np.pi)], np.linspace(0,0.5*max_w,ir_length*4)) 47 | 48 | #frequency vector for discrete transfer function in hz 49 | f = w/(2*np.pi) 50 | 51 | #plot frequency response of the low-pass filter 52 | plt.figure(dpi=800) 53 | plt.semilogx(1e-9*f,20*np.log10(abs(H))) 54 | plt.ylabel('Mag. Response [dB]') 55 | plt.xlabel('Frequency [GHz]') 56 | plt.title("Low Pass Filter with {}GHz Cutoff Magnitude Bode Plot".format(round(freq_bw*1e-9))) 57 | plt.grid() 58 | plt.axvline(x=1e-9*freq_bw,color = 'grey') 59 | plt.show() 60 | 61 | #find impluse response of low-pass filter 62 | h, t = sdp.freq2impulse(H,f) 63 | 64 | #plot impulse response of the low-pass filter 65 | # plt.figure(dpi=800) 66 | # plt.plot(t[:ir_length]*1e12,h[:ir_length]) 67 | # plt.title("Low Pass Filter with {}GHz Cutoff Impulse Response".format(round(freq_bw*1e-9))) 68 | # plt.xlabel('Time [ps]') 69 | # plt.ylabel('[V]') 70 | # plt.show() 71 | 72 | signal_filtered = sp.signal.fftconvolve(signal_ideal, h[:ir_length], mode="full") 73 | 74 | sdp.simple_eye(signal_filtered[samples_per_symbol*100:], samples_per_symbol*3, 100, UI/samples_per_symbol, "{}Gbps 2-PAM Signal with {}GHz Cutoff Filter".format(round(data_rate/1e9),round(freq_bw*1e-9))) 75 | 76 | #%% 4-PAM 77 | 78 | #generate binary data 79 | data = sdp.prqs10(1)[:10000] 80 | 81 | #generate Baud-Rate sampled signal from data 82 | signal_BR = sdp.pam4_input_BR(data) 83 | 84 | #data rate in Gbps 85 | for data_rate in [50e9,100e9]: 86 | 87 | #time per 4-PAM symbol 88 | UI = 2/data_rate 89 | 90 | #define oversample ratio 91 | samples_per_symbol = 64 92 | 93 | #timestep 94 | dt = UI/samples_per_symbol 95 | 96 | #oversampled signal 97 | signal_ideal = np.repeat(signal_BR, samples_per_symbol) 98 | 99 | #eye diagram of ideal signal 100 | sdp.simple_eye(signal_ideal, samples_per_symbol*3, 100, dt, "{}Gbps 4-PAM Signal".format(data_rate/1e9),linewidth=1.5) 101 | 102 | #cutoff frequency 103 | for freq_bw in [20e9, 30e9, 40e9, 50e9]: 104 | 105 | #max frequency for constructing discrete transfer function 106 | max_f = 1/dt 107 | 108 | #max_f in rad/s 109 | max_w = max_f*2*np.pi 110 | 111 | #heuristic to get a reasonable impulse response length 112 | ir_length = int(4/(freq_bw*dt)) 113 | 114 | #calculate discrete transfer function of low-pass filter with pole at freq_bw 115 | w, H = sp.signal.freqs([freq_bw*(2*np.pi)], [1,freq_bw*(2*np.pi)], np.linspace(0,0.5*max_w,ir_length*4)) 116 | 117 | #frequency in hz 118 | f = w/(2*np.pi) 119 | 120 | #plot frequency response of TF 121 | plt.figure(dpi=800) 122 | plt.semilogx(1e-9*f,20*np.log10(abs(H))) 123 | plt.ylabel('Mag. Response [dB]') 124 | plt.xlabel('Frequency [GHz]') 125 | plt.title("Low Pass Filter with {}GHz Cutoff Magnitude Bode Plot".format(round(freq_bw*1e-9))) 126 | plt.grid() 127 | plt.axvline(x=1e-9*freq_bw,color = 'grey') 128 | plt.show() 129 | 130 | #find impluse response of low-pass filter 131 | h, t = sdp.freq2impulse(H,f) 132 | 133 | #plot impulse response of the low-pass filter 134 | # plt.figure(dpi=800) 135 | # plt.plot(t[:ir_length]*1e12,h[:ir_length]) 136 | # plt.title("Low Pass Filter with {}GHz Cutoff Impulse Response".format(round(freq_bw*1e-9))) 137 | # plt.xlabel('Time [ps]') 138 | # plt.ylabel('[V]') 139 | # plt.show() 140 | 141 | signal_filtered = sp.signal.fftconvolve(signal_ideal, h[:ir_length], mode="full") 142 | 143 | sdp.simple_eye(signal_filtered[samples_per_symbol*100:], samples_per_symbol*3, 100, UI/samples_per_symbol, "{}Gbps 4-PAM Signal with {}GHz Cutoff Filter".format(round(data_rate/1e9),round(freq_bw*1e-9))) 144 | -------------------------------------------------------------------------------- /examples/problem_set/HW2.py: -------------------------------------------------------------------------------- 1 | 2 | import serdespy as sdp 3 | import numpy as np 4 | import scipy as sp 5 | import matplotlib.pyplot as plt 6 | import time 7 | 8 | data = sdp.prbs24(1) 9 | 10 | #%% KR4 FEC 11 | KR4_encoder = sdp.RS_KR4() 12 | 13 | N = 528 14 | K = 514 15 | T = 7 16 | 17 | n_bits_FEC_symbol = 10 18 | 19 | n_bits_payload = K*n_bits_FEC_symbol 20 | 21 | prob_err = 0.0005 22 | 23 | i = 0 24 | 25 | total_bit_error_pre_FEC= 0 26 | total_bit_pre_FEC = 0 27 | 28 | total_bit_error_post_FEC= 0 29 | total_bit_post_FEC = 0 30 | 31 | total_frame_error= 0 32 | total_frame = 0 33 | 34 | while i<10000: 35 | 36 | payload = data[(i%1000)*n_bits_payload:((i%1000)+1)*n_bits_payload] 37 | codeword = sdp.rs_encode(payload, KR4_encoder,pam4=False) 38 | codeword_rx = np.copy(codeword) 39 | 40 | #add errors 41 | for bit in range(codeword_rx.size): 42 | if np.random.rand() < prob_err: 43 | len_burst = int(np.ceil(np.random.rand()*20)) 44 | 45 | for err in range(len_burst): 46 | if bit+err time step = 500fs 25 | fmax=1e12 26 | 27 | #frequency vector (Hz) 28 | k = 14 29 | f = np.linspace(0,fmax,2**k+1) 30 | 31 | #frequency vector (rad/s) 32 | w = f*2*np.pi 33 | 34 | # Constants 35 | 36 | #speed of light [m/s] 37 | c = 2.998e8 38 | 39 | #Vacuum permittivity [F/m] 40 | eps_0 = 8.85*1e-12 41 | 42 | # Transmission line parameters 43 | 44 | #Effective relative dielectric constant 45 | eps_r = 4.9 46 | 47 | #Propagation velocity of the transmission line [m/s] 48 | v0 = np.sqrt(1/eps_r)*c 49 | 50 | #Characteristic impedance [ohm] 51 | Z0 = 50 52 | 53 | #Inductance [H/m] 54 | L0 = Z0/v0 55 | 56 | #Capacitance [F/m] 57 | C0 = 1/(Z0*v0) 58 | 59 | #Conductance [S/m] 60 | G0 = 1e-12 61 | 62 | #Resistance 63 | RAC = (k_r*(1+1j)*np.sqrt(w/w_0)) 64 | 65 | #%% Generate frequency-dependent RLGC for the lossy transmission line 66 | R=np.sqrt(RDC**2 + RAC**2) 67 | L=L0*np.ones(np.size(f)) 68 | G=G0*np.ones(np.size(f)) 69 | C= C0 * (1j*w/w_0)**(-2*theta_0/np.pi) 70 | 71 | if (f[0]==0): 72 | C[0] = C[1] 73 | 74 | # transmission line length [m] 75 | d = 0.1 76 | 77 | #create transmission ABCD paramaters 78 | tline = sdp.rlgc(R,L,G,C,d,f); 79 | 80 | #%% source impedance 81 | r_source = 50 82 | source = sdp.impedance(r_source*np.ones(np.size(f))) 83 | 84 | #termination admittance 85 | r_term = 50 86 | termination = sdp.admittance(np.ones(np.size(f))/r_term) 87 | 88 | #channel is the series connection of the source, transmission line, and termination 89 | channel = sdp.series(np.array([source,tline,termination])) 90 | 91 | #%% 92 | # frequency domain response 93 | Hchannel = 1/channel[:,0,0] 94 | 95 | #Hchannel = Hchannel/abs(Hchannel[0]) 96 | 97 | np.save("./data/Hchannel",Hchannel) 98 | 99 | # impulse response 100 | h,t = sdp.freq2impulse(Hchannel,f); 101 | np.save("./data/h.npy",h) 102 | np.save("./data/t.npy",t) 103 | 104 | #step response 105 | hstep = sp.signal.convolve(h,np.ones(np.shape(h)))[:np.size(h)] 106 | 107 | #100Gbps 108 | data_rate = 100e9 109 | 110 | #Pam-4 Signalling 111 | t_symbol = 2/data_rate 112 | 113 | #time step between samples of impulse response 114 | t_sample = 1/(2*fmax) 115 | 116 | #number of time samples in one PAM-4 symbol 117 | samples_per_symbol = int(t_symbol/t_sample) 118 | 119 | #response of transmission line to one UI pulse 120 | hpulse = sp.signal.convolve(h,np.ones(np.array([samples_per_symbol,])))[:np.size(h)] 121 | np.save("./data/hpulse.npy",hpulse) 122 | 123 | #%% Plots 124 | 125 | plt.figure(dpi=600) 126 | plt.title('Transmission Line Frequency Response') 127 | plt.semilogx(1e-9*f,20*np.log10(np.abs(Hchannel))) 128 | plt.xlim([0.1, 100]) 129 | plt.ylim([-40, 2]) 130 | plt.xlabel('Frequency [GHz]') 131 | plt.ylabel('Mag Response [dB]') 132 | plt.grid() 133 | #nyquist f for 100G 4-PAM is 25G 134 | plt.axvline(x=25,color = 'grey', label = "Nyquist Frequency") 135 | plt.show() 136 | 137 | plt.figure(dpi=600) 138 | plt.plot((t*1e9),h) 139 | plt.title('Transmission Line Impulse Response') 140 | plt.ylabel('Impulse Response') 141 | plt.xlabel('Time (ns)') 142 | plt.xlim([0, 5]) 143 | #plt.ylim([-0.01, 0.08]) 144 | plt.show() 145 | 146 | plt.figure(dpi=600) 147 | plt.plot((t*1e9),hpulse) 148 | plt.title('Transmission Line Pulse Response') 149 | plt.ylabel('Pulse Response') 150 | plt.xlabel('Time (ns)') 151 | plt.xlim([0, 5]) 152 | #plt.ylim([-0.01, 0.08]) 153 | plt.show() 154 | 155 | plt.figure(dpi=600) 156 | plt.plot(t*1e9,hstep) 157 | plt.title('Transmission Line Step Response') 158 | plt.ylabel('Step Response [V]') 159 | plt.xlabel('Time (ns)') 160 | plt.xlim([0, 5]) 161 | plt.show() 162 | 163 | 164 | #%% Eye Diagram 165 | 166 | #generate binary data 167 | data = sdp.prqs10(1)[:10000] 168 | 169 | data_rate = 100e9 170 | 171 | #generate Baud-Rate sampled signal from data 172 | signal_BR = sdp.pam4_input_BR(data) 173 | 174 | #oversampled signal 175 | signal_ideal = np.repeat(signal_BR, samples_per_symbol) 176 | 177 | #eye diagram of ideal signal 178 | 179 | signal_out = sp.signal.convolve(h,signal_ideal) 180 | 181 | sdp.simple_eye(signal_out[100*samples_per_symbol:], samples_per_symbol*3, 500, t_sample, "{}Gbps 4-PAM Signal".format(data_rate/1e9)) 182 | 183 | #%% Add parallel shunt capacitance to source and termination 184 | 185 | #200 fF capacitance 186 | C = 200*1e-15 187 | 188 | #ABCD paramaters of shunt cap network 189 | cap_network = sdp.shunt_cap(C,w*1j) 190 | 191 | #channel is the series connection of the networks 192 | channel = sdp.series(np.array([source,cap_network,tline,cap_network,termination])) 193 | 194 | #%% 195 | # frequency domain response 196 | Hchannel = 1/channel[:,0,0] 197 | 198 | #Hchannel = Hchannel/abs(Hchannel[0]) 199 | 200 | np.save("./data/Hchannel_cap",Hchannel) 201 | 202 | # impulse response 203 | h,t = sdp.freq2impulse(Hchannel,f); 204 | 205 | #step response 206 | hstep = sp.signal.convolve(h,np.ones(np.shape(h)))[:np.size(h)] 207 | 208 | #100Gbps 209 | data_rate = 100e9 210 | 211 | #Pam-4 Signalling 212 | t_symbol = 2/data_rate 213 | 214 | #time step between samples of impulse response 215 | t_sample = 1/(2*fmax) 216 | 217 | #number of time samples in one PAM-4 symbol 218 | samples_per_symbol = int(t_symbol/t_sample) 219 | 220 | #response of transmission line to one UI pulse 221 | hpulse = sp.signal.convolve(h,np.ones(np.array([samples_per_symbol,])))[:np.size(h)] 222 | np.save("./data/hpulse_cap.npy",hpulse) 223 | 224 | #%% Plots 225 | 226 | plt.figure(dpi=600) 227 | plt.title('Transmission Line (with cap) Frequency Response Magnitude') 228 | plt.semilogx(1e-9*f,20*np.log10(np.abs(Hchannel))) 229 | plt.xlim([0.1, 100]) 230 | plt.ylim([-40, 2]) 231 | plt.xlabel('Frequency [GHz]') 232 | plt.ylabel('Mag Response [dB]') 233 | plt.grid() 234 | #nyquist f for 100G 4-PAM is 25G 235 | plt.axvline(x=25,color = 'grey', label = "Nyquist Frequency") 236 | plt.show() 237 | 238 | 239 | plt.figure(dpi=600) 240 | plt.plot((t*1e9),h) 241 | plt.title('Transmission Line (with cap) Impulse Response') 242 | plt.ylabel('Impulse Response') 243 | plt.xlabel('Time (ns)') 244 | plt.xlim([0, 5]) 245 | #plt.ylim([-0.01, 0.08]) 246 | plt.show() 247 | 248 | plt.figure(dpi=600) 249 | plt.plot((t*1e9),hpulse) 250 | plt.title('Transmission Line (with cap) Pulse Response') 251 | plt.ylabel('Pulse Response') 252 | plt.xlabel('Time (ns)') 253 | plt.xlim([0, 5]) 254 | #plt.ylim([-0.01, 0.08]) 255 | plt.show() 256 | 257 | plt.figure(dpi=600) 258 | plt.plot(t*1e9,hstep) 259 | plt.title('Transmission Line (with cap) Step Response') 260 | plt.ylabel('Step Response [V]') 261 | plt.xlabel('Time (ns)') 262 | plt.xlim([0, 5]) 263 | plt.show() 264 | 265 | 266 | #%% Eye Diagram 267 | #eye diagram of ideal signal 268 | 269 | signal_out_cap = sp.signal.convolve(h,signal_ideal) 270 | 271 | sdp.simple_eye(signal_out_cap[100*samples_per_symbol:], samples_per_symbol*3, 500, t_sample, "{}Gbps 4-PAM Signal".format(data_rate/1e9)) 272 | 273 | #%% save data for next homework assignment 274 | np.save("./data/signal.npy",signal_out) 275 | np.save("./data/signal_cap.npy",signal_out_cap) 276 | np.save("./data/f.npy",f) 277 | np.save("./data/w.npy",w) 278 | -------------------------------------------------------------------------------- /examples/problem_set/HW4.py: -------------------------------------------------------------------------------- 1 | import serdespy as sdp 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | import skrf as rf 5 | import scipy as sp 6 | 7 | f = np.load("./data/f.npy") 8 | w = np.load("./data/w.npy") 9 | 10 | h_pulse = np.load("./data/hpulse.npy") 11 | t = np.load("./data/t.npy") 12 | 13 | signal = np.load("./data/signal.npy") 14 | #Hchannel = np.load("./data/Hchannel.npy") 15 | 16 | samples_per_symbol = 40 17 | data_rate = 100e9 18 | 19 | #set poles and zeroes for peaking at nyquist freq 20 | #high peaking because channel is high insertion loss 21 | z = 5e10 22 | p = 1.7e11 23 | k = p**2/z 24 | 25 | #calculate Frequency response of CTLE at given frequencies 26 | w, H_ctle = sp.signal.freqs([k/p**2, k*z/p**2], [1/p**2, 2/p, 1], w) 27 | 28 | #bode plot of CTLE transfer function 29 | plt.figure(dpi=600) 30 | plt.semilogx(1e-9*f,20*np.log10(abs(H_ctle)), color = "red", label = 'CTLE') 31 | plt.title("CTLE Frequency Response") 32 | plt.grid() 33 | plt.axvline(x=25,color = 'grey', label = "Nyquist Frequency") 34 | plt.axvline(x=z/(2*np.pi)*1e-9,color = 'green', label = "Zero Location") 35 | plt.axvline(x=p/(2*np.pi)*1e-9,color = 'blue', label = "Pole Location") 36 | plt.legend() 37 | 38 | #%% 39 | h_ctle, t_ctle = sdp.freq2impulse(H_ctle,f) 40 | h_ctle = h_ctle[0:200] 41 | plt.figure(dpi=600) 42 | plt.plot(h_ctle) 43 | 44 | #%% Eye diagram of signal with and without CTLE 45 | 46 | sdp.simple_eye(signal[100*samples_per_symbol:], samples_per_symbol*3, 500, 500*1e-15, "{}Gbps 4-PAM Signal".format(data_rate/1e9)) 47 | 48 | signal_ctle = sp.signal.convolve(signal,h_ctle) 49 | 50 | sdp.simple_eye(signal_ctle[100*samples_per_symbol:], samples_per_symbol*3, 500, 500*1e-15, "{}Gbps 4-PAM Signal with CTLE".format(data_rate/1e9)) 51 | 52 | #%% 53 | h_pulse_ctle = sp.signal.convolve(h_pulse,h_ctle) 54 | 55 | FFE_pre = 2 56 | FFE_taps = 7 57 | FFE_post = FFE_taps - FFE_pre - 1 58 | DFE_taps = 2 59 | 60 | sdp.channel_coefficients(h_pulse[:t.size],t,40,2,4) 61 | 62 | h = sdp.channel_coefficients(h_pulse_ctle[:t.size],t,40,2,4) 63 | #%% 64 | #h /= h.max() 65 | #print('h: ',h) 66 | 67 | channel_main = h.argmax() 68 | 69 | #main_cursor = h[channel_main] 70 | main_cursor = 1 71 | 72 | #generate binary data 73 | data = sdp.prqs10(1)[:10000] 74 | 75 | voltage_levels = np.array([-3, -1, 1, 3]) 76 | 77 | #generate Baud-Rate sampled signal from data 78 | signal_BR = sdp.pam4_input_BR(data) 79 | 80 | signal_rx = sp.signal.fftconvolve(h, signal_BR)[:len(signal_BR)] 81 | 82 | signal_rx_cropped = signal_rx[channel_main:] 83 | 84 | reference_signal = signal_BR[:1000] 85 | 86 | w_ffe_init = np.zeros([7,]) 87 | w_dfe_init = np.zeros([2,]) 88 | 89 | w_ffe, w_dfe, v_combined_ffe, v_combined_dfe, z_combined, e_combined = \ 90 | sdp.lms_equalizer(signal_rx_cropped, 0.001, len(signal_rx_cropped), w_ffe_init, FFE_pre, w_dfe_init, voltage_levels, reference=reference_signal[:1000]) 91 | 92 | #%% 93 | 94 | #voltage_levels = np.array([-3,-1,1,3]) 95 | 96 | nyquist_f = 25e9 97 | 98 | RX = sdp.Receiver(signal_ctle, samples_per_symbol, nyquist_f, voltage_levels,main_cursor=main_cursor) 99 | 100 | #sdp.simple_eye(RX.signal, samples_per_symbol*3, 800, TX.UI/TX.samples_per_symbol, "Eye Diagram with CTLE") 101 | 102 | RX.FFE(w_ffe, FFE_pre) 103 | 104 | sdp.simple_eye(RX.signal[int(100.5*samples_per_symbol):], samples_per_symbol*3, 800, 500*1e-15, "Eye Diagram with CTLE and FFE") 105 | 106 | RX.pam4_DFE(w_dfe) 107 | 108 | sdp.simple_eye(RX.signal[int(100.5*samples_per_symbol):], samples_per_symbol*3, 800, 500*1e-15, "Eye Diagram with CTLE, FFE, and DFE") -------------------------------------------------------------------------------- /examples/problem_set/HW6.py: -------------------------------------------------------------------------------- 1 | import serdespy as sdp 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | import skrf as rf 5 | import scipy as sp 6 | 7 | #load data from homework 4 8 | h = np.load("./data/h.npy") 9 | t = np.load("./data/t.npy") 10 | 11 | #generate binary data 12 | data = sdp.prqs10(1)[:10000] 13 | 14 | data_rate = 100e9 15 | samples_per_symbol = 40 16 | 17 | #generate Baud-Rate sampled signal from data 18 | signal_BR = sdp.pam4_input_BR(data) 19 | 20 | #oversampled signal 21 | signal_ideal = np.repeat(signal_BR, samples_per_symbol) 22 | 23 | sdp.simple_eye(signal_ideal, samples_per_symbol*3, 500, 500*1e-15, "{}Gbps Ideal 4-PAM Signal".format(data_rate/1e9)) 24 | 25 | #%% 26 | 27 | #linewidth for seeing eye diagrams 28 | lw = 0.05 29 | 30 | #TX signal with jitter 31 | signal_jitter = sdp.gaussian_jitter(signal_ideal, 20e-12, 10000, samples_per_symbol, stdev=1000e-15) 32 | 33 | #eye diagram of TX with jitter 34 | sdp.simple_eye(signal_jitter, samples_per_symbol*3, 500, 500*1e-15, "{}Gbps 4-PAM Signal with jitter".format(data_rate/1e9),linewidth=lw) 35 | 36 | #signal at receiver with no jitter 37 | signal_out_ideal = sp.signal.convolve(h,signal_ideal) 38 | sdp.simple_eye(signal_out_ideal[100*samples_per_symbol:], samples_per_symbol, 5000, 500*1e-15, "rx eye diagram no jitter",linewidth=lw) 39 | 40 | #signal at reciever with tx jitter 41 | signal_out_jitter_tx = sp.signal.convolve(h,signal_jitter) 42 | sdp.simple_eye(signal_out_jitter_tx[100*samples_per_symbol:], samples_per_symbol, 5000, 500*1e-15, "rx eye diagram with tx jitter",linewidth=lw) 43 | 44 | #signal at receiver with rx jitter 45 | sdp.rx_jitter_eye(signal_out_ideal[100*samples_per_symbol:],samples_per_symbol,5000,5000,500*1e-15,"rx eye diagram with rx_jitter",stdev=1000e-15,linewidth=lw) 46 | -------------------------------------------------------------------------------- /examples/problem_set/HW7.py: -------------------------------------------------------------------------------- 1 | import serdespy as sdp 2 | import numpy as np 3 | import scipy as sp 4 | import matplotlib.pyplot as plt 5 | 6 | 7 | #generate binary data 8 | data = sdp.prqs10(1)[:10000] 9 | 10 | #generate Baud-Rate sampled signal from data 11 | voltage_levels=np.array([-1,-1/3,1/3,1]) 12 | #voltage_levels=np.array([-3/2,-1/2,1/2,3/2]) 13 | #voltage_levels=np.array([-2,-2/3,2/3,2]) 14 | 15 | signal_BR = sdp.pam4_input_BR(data,voltage_levels=voltage_levels) 16 | 17 | #data rate in Gbps 18 | data_rate = 100e9 19 | 20 | #time per 4-PAM symbol 21 | UI = 2/data_rate 22 | 23 | #define oversample ratio 24 | samples_per_symbol = 64 25 | 26 | #timestep 27 | dt = UI/samples_per_symbol 28 | 29 | #oversampled signal 30 | signal_ideal = np.repeat(signal_BR, samples_per_symbol) 31 | 32 | #eye diagram of ideal signal 33 | sdp.simple_eye(signal_ideal, samples_per_symbol*3, 100, dt, "{}Gbps 4-PAM Signal".format(data_rate/1e9),linewidth=1.5) 34 | 35 | #cutoff frequency 36 | freq_bw = 50e9 37 | 38 | #max frequency for constructing discrete transfer function 39 | max_f = 1/dt 40 | 41 | #max_f in rad/s 42 | max_w = max_f*2*np.pi 43 | 44 | #heuristic to get a reasonable impulse response length 45 | ir_length = int(4/(freq_bw*dt)) 46 | 47 | #calculate discrete transfer function of low-pass filter with pole at freq_bw 48 | w, H = sp.signal.freqs([freq_bw*(2*np.pi)], [1,freq_bw*(2*np.pi)], np.linspace(0,0.5*max_w,ir_length*4)) 49 | 50 | #frequency in hz 51 | f = w/(2*np.pi) 52 | 53 | #plot frequency response of TF 54 | plt.figure(dpi=800) 55 | plt.semilogx(1e-9*f,20*np.log10(abs(H))) 56 | plt.ylabel('Mag. Response [dB]') 57 | plt.xlabel('Frequency [GHz]') 58 | plt.title("Low Pass Filter with {}GHz Cutoff Magnitude Bode Plot".format(round(freq_bw*1e-9))) 59 | plt.grid() 60 | plt.axvline(x=1e-9*freq_bw,color = 'grey') 61 | plt.show() 62 | 63 | #find impluse response of low-pass filter 64 | h, t = sdp.freq2impulse(H,f) 65 | 66 | #convolution of impulse response with ideal signal 67 | signal_filtered = sp.signal.fftconvolve(signal_ideal, h[:ir_length]) 68 | 69 | #plot eye diagram of filtered signal 70 | sdp.simple_eye(signal_filtered[samples_per_symbol*100:], samples_per_symbol*3, 100, UI/samples_per_symbol, "{}Gbps 4-PAM Signal with {}GHz Cutoff Filter".format(round(data_rate/1e9),round(freq_bw*1e-9))) 71 | 72 | #optical modulator nonlinearity 73 | def optical_nonlinearity(signal): 74 | return np.sin(np.pi*signal/5) 75 | 76 | signal_optical = optical_nonlinearity(signal_filtered) 77 | 78 | #eye diagram of optical signal 79 | sdp.simple_eye(signal_optical[samples_per_symbol*100:], samples_per_symbol*3, 100, UI/samples_per_symbol, "{}Gbps Optical 4-PAM Signal".format(round(data_rate/1e9),round(freq_bw*1e-9))) 80 | 81 | #calculate RLM 82 | levels_optical = optical_nonlinearity(voltage_levels) 83 | 84 | Vmin = (levels_optical[0]+levels_optical[3])/2 85 | 86 | ES1 = (levels_optical[1]-Vmin)/(levels_optical[0]-Vmin) 87 | 88 | ES2 = (levels_optical[2]-Vmin)/(levels_optical[3]-Vmin) 89 | 90 | RLM = min((3*ES1),(3*ES2),(2-3*ES1),(2-3*ES2)) 91 | 92 | print("RLM = ",RLM) -------------------------------------------------------------------------------- /examples/problem_set/problem_set.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/richard259/serdespy/5f7a5c81a624427aabad2f2bb56f8726bfaf7c28/examples/problem_set/problem_set.pdf -------------------------------------------------------------------------------- /serdespy/__init__.py: -------------------------------------------------------------------------------- 1 | from .prs import * 2 | from .chmodel import * 3 | from .four_port_to_diff import * 4 | from .eye_diagram import * 5 | from .signal import * 6 | from .transmitter import * 7 | from .receiver import * 8 | from .rs_code import * 9 | -------------------------------------------------------------------------------- /serdespy/chmodel.py: -------------------------------------------------------------------------------- 1 | """Functions for Channel Modelling 2 | 3 | Notes 4 | ----- 5 | Based off of https://github.com/tchancarusone/Wireline-ChModel-Matlab 6 | """ 7 | 8 | #TODO: change comments to docstring 9 | 10 | import numpy as np 11 | 12 | def rlgc (r,l,g,c,d,f): 13 | #r, l, g, and c are transmission line paramaters: 14 | #r = resistance along the line per unit length [Ω/m] 15 | #l = inductance along the line per unit length [H/m] 16 | #g = conductance shunting the line per unit length [S/m] 17 | #c = capacitance shunting the line per unit length [F/m] 18 | #can be scalars or arrays if frequency dependent 19 | 20 | #l is the length of the frequency array 21 | #returns array s of size l*2*2 where s[n] = is the 2*2 paramater matrix 22 | #for the transmission line at frequency f[n] 23 | 24 | w = 2*np.pi*f 25 | gammad = d*np.sqrt( np.multiply( (r+1j * np.multiply(w,l)),(g+ 1j * np.multiply(w,c)) ) ); 26 | z0 = np.sqrt( np.divide((r + 1j*np.multiply(w,l) ),(g+1j*np.multiply(w,c))) ); 27 | A = np.cosh(gammad) 28 | B = np.multiply(z0,np.sinh(gammad)) 29 | C = np.divide(np.sinh(gammad),z0) 30 | D = A 31 | 32 | s = np.zeros((f.size,2,2),dtype=np.complex_) 33 | s[:,0,0] = A 34 | s[:,0,1] = B 35 | s[:,1,0] = C 36 | s[:,1,1] = D 37 | 38 | return s 39 | 40 | def impedance (z): 41 | 42 | #for 2-port network with impedance z (z is array if frequency dependent) 43 | #returns array s of size l*2*2 where s[n] = is the 2*2 paramater matrix 44 | #for the network at frequency f[n] 45 | 46 | l = z.size 47 | s = np.zeros((l,2,2),dtype=np.complex_) 48 | s[:,0,0] = np.ones(l) 49 | s[:,0,1] = z 50 | s[:,1,0] = np.zeros(l) 51 | s[:,1,1] = np.ones(l) 52 | return s 53 | 54 | def admittance (y): 55 | 56 | #for 2-port network with shunt admittance y (y is array if frequency dependent) 57 | #returns array s of size l*2*2 where s[n] = is the 2*2 paramater matrix 58 | #for the network at frequency f[n] 59 | 60 | l = y.size 61 | s = np.zeros((l,2,2),dtype=np.complex_) 62 | s[:,0,0] = np.ones(l) 63 | s[:,0,1] = np.zeros(l) 64 | s[:,1,0] = y 65 | s[:,1,1] = np.ones(l) 66 | return s 67 | 68 | def shunt_cap(c,w): 69 | 70 | #for 2-port network with shunt capacitance c specified at complex frequencies w [rad/s] 71 | #returns array s of size l*2*2 where s[n] = is the 2*2 paramater matrix 72 | #for the network at frequency f[n] 73 | 74 | l = w.size 75 | s = np.zeros((l,2,2),dtype=np.complex_) 76 | s[:,0,0] = np.ones(l) 77 | s[:,0,1] = np.zeros(l) 78 | s[:,1,0] = (c*w) 79 | s[:,1,1] = np.ones(l) 80 | return s 81 | 82 | 83 | def series_cap(c,w): 84 | 85 | #for 2-port network with series capacitance c specified at complex frequencies w [rad/s] 86 | #returns array s of size l*2*2 where s[n] = is the 2*2 paramater matrix 87 | #for the network at frequency f[n] 88 | 89 | l = c.size 90 | s = np.zeros((l,2,2),dtype=np.complex_) 91 | s[:,0,0] = np.ones(l) 92 | s[:,0,1] = 1/(c*w) 93 | s[:,1,0] = np.zeros(l) 94 | s[:,1,1] = np.ones(l) 95 | return s 96 | 97 | def series(networks): 98 | #Series combination of 2-port networks. Networks is a numpy array 99 | # containing the A,B,C,D matrix parameters of networks to be combined 100 | # in series 101 | 102 | out = np.matmul(networks[0],networks[1]) 103 | 104 | for i in range (networks.shape[0]-2): 105 | out = np.matmul(out,networks[i+2]) 106 | 107 | return out 108 | 109 | 110 | def freq2impulse(H, f): 111 | #Returns the impulse response, h, and (optionally) the step response, 112 | #hstep, for a system with complex frequency response stored in the array H 113 | #and corresponding frequency vector f. The time array is 114 | #returned in t. The frequency array must be linearly spaced. 115 | 116 | Hd = np.concatenate((H,np.conj(np.flip(H[1:H.size-1])))) 117 | h = np.real(np.fft.ifft(Hd)) 118 | #hstep = sp.convolve(h,np.ones(h.size)) 119 | #hstep = hstep[0:h.size] 120 | t= np.linspace(0,1/f[1],h.size+1) 121 | t = t[0:-1] 122 | 123 | return h,t 124 | 125 | def sparam(s11,s12,s21,s22,z0,f): 126 | 127 | #ABCD matrix description of a 2-port network with S-parameters 128 | #specified at the frequencies f in row vectors s11,s12,s21,s22 129 | 130 | #f should be a row vector 131 | 132 | #z0 is the characteristic impedance used for the S-parameter 133 | #measurements 134 | 135 | #Returns a structure containing the 2-port A,B,C,D matrix entries 136 | #at the frequencies in f: s.A, s.B, s.C, s.D 137 | 138 | s = np.zeros((f.size,2,2),dtype=np.complex_) 139 | 140 | s[:,0,0] = (1 + s11 - s22 - (s11*s22 - s12*s21))/(2*s21); 141 | s[:,0,1] = (z0*(1+s11 + s22 + (s11*s22 - s12*s21)))/(2*s21); 142 | s[:,1,0] = (1 - s11 - s22 + (s11*s22 - s12*s21))/(2*z0*s21); 143 | s[:,1,1] = (1 - s11 + s22 - (s11*s22 - s12*s21))/(2*s21); 144 | 145 | return s 146 | 147 | 148 | def zero_pad(H, f, t_d): 149 | '''Pads discrete time transfer function with zeros in to meet desired timestep in time domain 150 | 151 | Parameters 152 | ---------- 153 | H: array 154 | Discrete time transfer function 155 | 156 | f: array 157 | frequency vector 158 | 159 | t_d : float 160 | desired timestep 161 | 162 | 163 | Returns 164 | ------- 165 | H_zero_pad: array 166 | zero-padded transfer function 167 | 168 | f_zero_pad: array 169 | extended frequency vector to match H_zero_pad 170 | 171 | h_0: array 172 | impulse response of zero-padded TF 173 | 174 | t_0: array 175 | time vector corresponding to h_0 176 | ''' 177 | 178 | #frequency step 179 | f_step = f[1] 180 | 181 | #max frequency 182 | f_max = f[-1] 183 | 184 | #Desired max frequency to get t_d after IDFT 185 | f_max_d = int(1/(2*t_d)) 186 | 187 | #extend frequency vector to f_max_d 188 | f_zero_pad = np.hstack( (f,np.linspace(f_max,f_max_d,int((f_max_d-f_max)/f_step)))) 189 | 190 | #pad TF with zeros 191 | H_zero_pad = np.hstack((H, np.zeros((f_zero_pad.size-H.size)))) 192 | 193 | #Calculate impulse response of zero-padded TF 194 | h_0,t_0 = freq2impulse(H_zero_pad,f_zero_pad) 195 | 196 | return H_zero_pad, f_zero_pad, h_0, t_0 -------------------------------------------------------------------------------- /serdespy/eye_diagram.py: -------------------------------------------------------------------------------- 1 | """Functions for plotting eye diagrams 2 | 3 | """ 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | from .signal import * 7 | 8 | def simple_eye(signal, window_len, ntraces, tstep, title, res=600, linewidth=0.15): 9 | """Genterates simple eye diagram 10 | 11 | Parameters 12 | ---------- 13 | signal: array 14 | signal to be plotted 15 | 16 | window_len: int 17 | number of time steps in eye diagram x axis 18 | 19 | ntraces: int 20 | number of traces to be plotted 21 | 22 | tstep: float 23 | timestep of time domain signal 24 | 25 | title: 26 | title of the plot 27 | 28 | res: int, optional 29 | DPI resolution of the figure 30 | 31 | linewidth: float, optional 32 | width of lines in figure 33 | """ 34 | 35 | signal_crop = signal[0:ntraces*window_len] 36 | traces = np.split(signal_crop,ntraces) 37 | 38 | t = np.linspace(-(tstep*(window_len-1))/2,(tstep*(window_len-1))/2, window_len) 39 | 40 | plt.figure(dpi=res) 41 | for i in range(ntraces): 42 | plt.plot(t*1e12,np.reshape((traces[i][:]),window_len), color = 'blue', linewidth = linewidth) 43 | plt.title(title) 44 | plt.xlabel('[ps]') 45 | plt.ylabel('[V]') 46 | 47 | return True 48 | 49 | 50 | def rx_jitter_eye(signal, window_len, ntraces, n_symbols, tstep, title, stdev, res=600, linewidth=0.15,): 51 | """Genterates eye diagram with jitter introduved by splitting traces and applying 52 | horizontal shift with gaussian distribution 53 | 54 | Parameters 55 | ---------- 56 | signal: array 57 | signal to be plotted 58 | 59 | window_len: int 60 | number of time steps in eye diagram x axis 61 | 62 | ntraces: int 63 | number of traces to be plotted 64 | 65 | tstep: float 66 | timestep of time domain signal 67 | 68 | title: 69 | title of the plot 70 | 71 | stdev: float 72 | RMS value of gaussian jitter 73 | 74 | res: int, optional 75 | DPI resolution of the figure 76 | 77 | linewidth: float, optional 78 | width of lines in figure 79 | """ 80 | epsilon = np.random.normal(0,stdev,n_symbols) 81 | 82 | epsilon.clip(window_len*tstep) 83 | epsilon[0]=0 84 | 85 | signal_crop = signal[0:ntraces*window_len] 86 | traces = np.split(signal_crop,ntraces) 87 | 88 | plt.figure(dpi=res) 89 | for symbol_index,symbol_epsilon in enumerate(epsilon): 90 | epsilon_duration = int(round(symbol_epsilon/tstep)) 91 | t = np.linspace( -tstep * (((window_len-1))/2 + epsilon_duration ) ,tstep * (((window_len-1))/2 + epsilon_duration ), window_len) 92 | plt.plot(t*1e12,np.reshape((traces[symbol_index][:]),window_len), color = 'blue', linewidth = linewidth) 93 | 94 | 95 | plt.title(title) 96 | plt.xlabel('[ps]') 97 | plt.ylabel('[V]') 98 | plt.xlim([-tstep*1e12*(window_len)/2, tstep*1e12* (window_len)/2]) 99 | 100 | return True -------------------------------------------------------------------------------- /serdespy/four_port_to_diff.py: -------------------------------------------------------------------------------- 1 | """Differential transfer function from 4-port network touchstone file 2 | 3 | """ 4 | 5 | from serdespy.chmodel import * 6 | import numpy as np 7 | import skrf as rf 8 | 9 | 10 | def four_port_to_diff(network, port_def, source, load, option = 0, t_d = None): 11 | """ 12 | 13 | Parameters 14 | ---------- 15 | network : skrf Network 16 | 4 port network object 17 | example: 18 | s4p_file = 'path/to/touchstonefile.s4p' 19 | network = rf.Network(s4p_file) 20 | 21 | port_def: 2*2 array 22 | defines TX and RX side ports of network 23 | example: 24 | port_def = np.array([[TX1_index, RX1_index],[TX2_index, RX2_index]]) 25 | 26 | PORT DEFINITIONS: 27 | Port 1 -----> TX Side G11 RX Side <----- Port 2 28 | Port 3 -----> TX Side G12 RX Side <----- Port 4 29 | 30 | port_def = np.array([[0, 1],[2, 3]]) 31 | 32 | Returns 33 | ------- 34 | H : array 35 | tranfer function of differential channel 36 | 37 | f : array 38 | frequency vector 39 | 40 | h : array 41 | impulse response 42 | 43 | t = array 44 | time vector 45 | 46 | """ 47 | s_params = network.s 48 | f = network.f 49 | pts = f.size 50 | 51 | 52 | #change port def 53 | #ports = np.array([1,3,2,4]) 54 | s_params_new = np.copy(s_params) 55 | 56 | s_params_new[:,1,:] = np.copy(s_params[:,2,:]) 57 | s_params_new[:,2,:] = np.copy(s_params[:,1,:]) 58 | 59 | s_params_new[:,:,1] = np.copy(s_params[:,:,2]) 60 | s_params_new[:,:,2] = np.copy(s_params[:,:,1]) 61 | 62 | s_params_new[:,1,2] = np.copy(s_params[:,1,2]) 63 | s_params_new[:,2,1] = np.copy(s_params[:,2,1]) 64 | 65 | s_params_new[:,1,1] = np.copy(s_params[:,2,2]) 66 | s_params_new[:,2,2] = np.copy(s_params[:,1,1]) 67 | 68 | 69 | # 70 | M = np.array([[1,-1,0,0],[0,0,1,-1],[1,1,0,0],[0,0,1,1]]) 71 | invM = np.transpose(M) 72 | 73 | smm_params = np.zeros((4,4,pts), dtype = complex) 74 | 75 | for i in range(pts): 76 | smm_params[:,:,i] = (M@s_params_new[i,:,:]@invM)/2 77 | 78 | diff_s_params = smm_params[0:2,0:2,:] 79 | 80 | zl = load*np.ones((1,1,pts)) 81 | zs = source*np.ones((1,1,pts)) 82 | z0 = network.z0[0,0]*np.ones((1,1,pts)) 83 | 84 | #reflection coefficients 85 | gammaL = (zl - z0) / (zl + z0) 86 | gammaL[zl == np.inf] = 1 87 | 88 | gammaS = (zs - z0) / (zs + z0) 89 | gammaS[zs == np.inf] = 1 90 | 91 | gammaIn = (diff_s_params[0,0,:] + diff_s_params[0,1,:] * diff_s_params[1,0,:] * gammaL) / (1 - diff_s_params[1,1,:] * gammaL) 92 | 93 | H = diff_s_params[1,0,:] * (1 + gammaL) * (1 - gammaS) / (1 - diff_s_params[1,1,:] * gammaL) / (1 - gammaIn * gammaS) / 2 94 | 95 | H = H.reshape(pts,) 96 | 97 | if option == 1: 98 | H = H/H[0] 99 | 100 | if t_d != None: 101 | H, f, h, t = zero_pad(H,f,t_d) 102 | else: 103 | h, t = freq2impulse(H,f) 104 | 105 | return H, f, h, t -------------------------------------------------------------------------------- /serdespy/prs.py: -------------------------------------------------------------------------------- 1 | """Functions for Pseudo-Random Sequences 2 | 3 | """ 4 | 5 | import numpy as np 6 | from .signal import * 7 | 8 | def prbs31(seed): 9 | """Genterates PRBS31 sequence 10 | 11 | Parameters 12 | ---------- 13 | seed : int 14 | seed used to generate sequence 15 | should be greater than 0 and less than 2^31 16 | 17 | Returns 18 | ------- 19 | array: 20 | PRBS31 sequence 21 | """ 22 | if (type(seed)!= int) or (seed>0x7fffffff) or (seed < 1): 23 | print("seed must be positive int less than 2^31") 24 | return False 25 | 26 | code = seed 27 | seq = np.zeros(2**31-1, dtype=np.uint8) 28 | i = 0 29 | sequence_complete = False 30 | 31 | while(i<2**31): 32 | next_bit = ((code>>30) ^ (code>>27)) & 0x00000001 33 | code = ((code<<1) | next_bit) & 0x7fffffff 34 | seq[i] = next_bit 35 | i = i+1 36 | if (i%1e6 ==0): 37 | print("i =", i) 38 | if (code==seed): 39 | sequence_complete = True 40 | break 41 | 42 | if sequence_complete: 43 | return seq 44 | else: 45 | print ("error, PRBS sequence did not complete") 46 | return False 47 | 48 | def prbs24(seed): 49 | 50 | if (type(seed)!= int) or (seed>2**22) or (seed < 1): 51 | print("seed must be positive int less than 2^26") 52 | return False 53 | 54 | code = seed 55 | seq = np.zeros(2**24-1, dtype=np.uint8) 56 | i = 0 57 | sequence_complete = False 58 | 59 | while(i<2**24): 60 | next_bit = ((code>>23) ^ (code>>22)^(code>>21)^(code>>16)) & 0x000001 61 | code = ((code<<1) | next_bit) & 0xffffff 62 | seq[i] = next_bit 63 | i = i+1 64 | #if (i%1e6 ==0): 65 | # print("i =", i) 66 | if (code==seed): 67 | sequence_complete = True 68 | break 69 | 70 | if sequence_complete: 71 | return seq 72 | else: 73 | print ("error, PRBS sequence did not complete") 74 | return False 75 | 76 | def prbs22(seed): 77 | 78 | if (type(seed)!= int) or (seed>2**22) or (seed < 1): 79 | print("seed must be positive int less than 2^26") 80 | return False 81 | 82 | code = seed 83 | seq = np.zeros(2**22-1, dtype=np.uint8) 84 | i = 0 85 | sequence_complete = False 86 | 87 | while(i<2**22): 88 | next_bit = ((code>>21) ^ (code>>20)) & 0x000001 89 | code = ((code<<1) | next_bit) & 0x3fffff 90 | seq[i] = next_bit 91 | i = i+1 92 | #if (i%1e6 ==0): 93 | # print("i =", i) 94 | if (code==seed): 95 | sequence_complete = True 96 | break 97 | 98 | if sequence_complete: 99 | return seq 100 | else: 101 | print ("error, PRBS sequence did not complete") 102 | return False 103 | 104 | def prbs20(seed): 105 | """Genterates PRBS20 sequence 106 | 107 | Parameters 108 | ---------- 109 | seed : int 110 | seed used to generate sequence 111 | should be greater than 0 and less than 2^20 112 | 113 | Returns 114 | ------- 115 | array: 116 | PRBS20 sequence 117 | """ 118 | if (type(seed)!= int) or (seed>0xfffff) or (seed < 1): 119 | print("seed must be positive int less than 2^20") 120 | return False 121 | 122 | code = seed 123 | seq = np.zeros(2**20-1, dtype=np.uint8) 124 | i = 0 125 | sequence_complete = False 126 | 127 | while(i<2**20): 128 | next_bit = ((code>>19) ^ (code>>2)) & 0x00001 129 | code = ((code<<1) | next_bit) & 0xfffff 130 | seq[i] = next_bit 131 | i = i+1 132 | if (code==seed): 133 | sequence_complete = True 134 | break 135 | 136 | if sequence_complete: 137 | return seq 138 | else: 139 | print ("error, PRBS sequence did not complete") 140 | return False 141 | 142 | def prbs13(seed): 143 | """Genterates PRBS13 sequence 144 | 145 | Parameters 146 | ---------- 147 | seed : int 148 | seed used to generate sequence 149 | should be greater than 0 and less than 2^13 150 | 151 | Returns 152 | ------- 153 | array: 154 | PRBS13 sequence 155 | """ 156 | if (type(seed)!= int) or (seed>0x1fff) or (seed < 1): 157 | print("seed must be positive int less than 2^13") 158 | return False 159 | 160 | code = seed 161 | seq = np.zeros(2**13-1, dtype=np.uint8) 162 | i = 0 163 | sequence_complete = False 164 | 165 | while(i<2**13): 166 | next_bit = ((code>>12) ^ (code>>11) ^ (code>>1) ^ (code) ) & 0x00001 167 | code = ((code<<1) | next_bit) & 0x1fff 168 | seq[i] = next_bit 169 | i = i+1 170 | if (code==seed): 171 | sequence_complete = True 172 | break 173 | 174 | if sequence_complete: 175 | return seq 176 | else: 177 | print ("error, PRBS sequence did not complete") 178 | return False 179 | 180 | def prbs7(seed): 181 | """Genterates PRBS7 sequence 182 | 183 | Parameters 184 | ---------- 185 | seed : int 186 | seed used to generate sequence 187 | should be greater than 0 and less than 2^7 188 | 189 | Returns 190 | ------- 191 | array: 192 | PRBS7 sequence 193 | """ 194 | if (type(seed)!= int) or (seed>0x7f) or (seed < 1): 195 | print("seed must be positive int less than 2^7") 196 | return False 197 | 198 | code = seed 199 | seq = np.zeros(2**7-1, dtype=np.uint8) 200 | i = 0 201 | sequence_complete = False 202 | 203 | while(i<2**7): 204 | next_bit = ((code>>6) ^ (code>>5))&0x01 205 | code = ((code<<1) | next_bit) & 0x7f 206 | seq[i] = next_bit 207 | i = i+1 208 | if (code==seed): 209 | sequence_complete = True 210 | break 211 | 212 | if sequence_complete: 213 | return seq 214 | else: 215 | print ("error, PRBS sequence did not complete") 216 | return False 217 | 218 | def prqs10(seed): 219 | """Genterates PRQS10 sequence 220 | 221 | Parameters 222 | ---------- 223 | seed : int 224 | seed used to generate sequence 225 | should be greater than 0 and less than 2^20 226 | 227 | Returns 228 | ------- 229 | array: 230 | PRQS10 sequence 231 | """ 232 | 233 | a = prbs20(seed) 234 | shift = int((2**20-1)/3) 235 | b = np.hstack((a[shift:],a[:shift])) 236 | 237 | c = np.vstack((a,b)) 238 | 239 | pqrs = np.zeros(a.size,dtype = np.uint8) 240 | 241 | for i in range(a.size): 242 | pqrs[i] = grey_encode(c[:,i]) 243 | 244 | return pqrs 245 | 246 | 247 | def prqs12(seed): 248 | 249 | a = prbs24(seed) 250 | shift = int((2**24-1)/3) 251 | b = np.hstack((a[shift:],a[:shift])) 252 | 253 | c = np.vstack((a,b)) 254 | 255 | pqrs = np.zeros(a.size,dtype = np.uint8) 256 | 257 | for i in range(a.size): 258 | if (i%1e5 == 0): 259 | print("i =", i) 260 | pqrs[i] = grey_encode(c[:,i]) 261 | 262 | return pqrs 263 | 264 | 265 | 266 | def prbs_checker(n, prbs, data): 267 | """Compares array with PRBS array to check bit errors 268 | 269 | Parameters 270 | ---------- 271 | n : int 272 | prbs_n number 273 | 274 | prbs : array 275 | prbs_n sequence 276 | 277 | data: array 278 | seqence to be checked 279 | 280 | Returns 281 | ------- 282 | error_count : int 283 | number of errors in data 284 | 285 | error_idx : list 286 | indexes of error bits in data 287 | """ 288 | #TODO: add condition when there is an error in the first n bits 289 | 290 | test = np.copy(data[:n]) 291 | 292 | start_idx = -1 293 | 294 | for i in range (prbs.size): 295 | if np.array_equal(prbs[i:i+n], test): 296 | start_idx = i 297 | break 298 | 299 | if start_idx == -1: 300 | print ("invalid prbs_seq or incorrect n") 301 | return False 302 | 303 | #print(start_idx) 304 | 305 | error_count = 0 306 | 307 | error_idx = [] 308 | 309 | for i in range(data.size): 310 | if (data[i] != prbs[(start_idx+i)%(2**n-1)]): 311 | error_count = error_count+1 312 | error_idx = error_idx + [i] 313 | if (i==1000) and (error_count>333): 314 | print ("Either error in first n bits of data, or too many errors in data (more than 1/3 or bits are errors)") 315 | return False 316 | 317 | return [error_count, error_idx] 318 | 319 | 320 | def prqs_checker(n, prqs, data): 321 | """Compares array with PRQS array to check bit errors 322 | 323 | Parameters 324 | ---------- 325 | n : int 326 | prqs_n number 327 | 328 | prqs : array 329 | prqs_n sequence 330 | 331 | data: array 332 | seqence to be checked 333 | 334 | Returns 335 | ------- 336 | error_count : int 337 | number of errors in data 338 | 339 | error_idx : list 340 | indexes of error bits in data 341 | """ 342 | #TODO: add condition when there is an error in the first n bits 343 | 344 | test = np.copy(data[:n]) 345 | 346 | start_idx = -1 347 | 348 | for i in range (prqs.size): 349 | if np.array_equal(prqs[i:i+n], test): 350 | start_idx = i 351 | break 352 | 353 | if start_idx == -1: 354 | print ("invalid prqs or incorrect n") 355 | return False 356 | 357 | #print(start_idx) 358 | 359 | error_count = 0 360 | 361 | error_idx = [] 362 | 363 | for i in range(data.size): 364 | if (data[i] != prqs[(start_idx+i)%(2**(n*2)-1)]): 365 | if (abs(data[i]-prqs[(start_idx+i)%(2**(n*2)-1)]) == 2): 366 | print('2 errors') 367 | error_count = error_count+2 368 | else: 369 | error_count = error_count+1 370 | error_idx = error_idx + [i] 371 | 372 | return [error_count, error_idx] -------------------------------------------------------------------------------- /serdespy/receiver.py: -------------------------------------------------------------------------------- 1 | from .signal import * 2 | from .chmodel import * 3 | import numpy as np 4 | import skrf as rf 5 | import scipy as sp 6 | import matplotlib.pyplot as plt 7 | 8 | 9 | class Receiver: 10 | """Class to build model of time domain signal at receiver 11 | 12 | """ 13 | 14 | def __init__(self, signal, samples_per_symbol, f_nyquist , voltage_levels, shift = True, main_cursor = 1): 15 | """Initialize Receiver 16 | 17 | Parameters 18 | ---------- 19 | signal : array 20 | voltage waveform at input to reciever (after CTLE) 21 | 22 | samples_per_symbol: 23 | number of samples per UI 24 | 25 | t_step: 26 | timestep of signal 27 | 28 | voltage_levels: 29 | 30 | shift : bool 31 | if True, shifts signal so centre of eye opening is at beginning of signal 32 | 33 | main_cursor : float 34 | peak of pulse response, used for determining voltage level corresponding to pam4 symbols 35 | """ 36 | 37 | self.samples_per_symbol = samples_per_symbol 38 | 39 | self.voltage_levels = voltage_levels 40 | 41 | 42 | self.f_nyquist = f_nyquist 43 | 44 | self.main_cursor = main_cursor 45 | 46 | #signal_org maintains a copy of the original signal 47 | if (shift): 48 | #shift signal so that every index i*samples_per_symbol is the index at wich to slice the signal 49 | self.signal_org = shift_signal(np.copy(signal), samples_per_symbol) 50 | 51 | else: 52 | self.signal_org = np.copy(signal) 53 | 54 | 55 | self.signal = np.copy(self.signal_org) 56 | 57 | def reset(self): 58 | """Resets Signal to original (unequalized, no noise) signal""" 59 | 60 | self.signal = np.copy(self.signal_org) 61 | 62 | def noise(self, stdev): 63 | """Adds 0-mean gaussian noise to signal 64 | 65 | Parameters 66 | ---------- 67 | stdev : float 68 | standard deviation of noise 69 | """ 70 | 71 | self.signal = np.copy(self.signal_org) + np.random.normal(scale=stdev, size = self.signal_org.size) 72 | 73 | 74 | 75 | def slice_signal(self): 76 | 77 | signal_BR = np.zeros(int(np.floor((self.signal.size/self.samples_per_symbol)))) 78 | 79 | for i in range(signal_BR.size): 80 | signal_BR[i] = self.signal[i*self.samples_per_symbol] 81 | 82 | self.signal_BR = signal_BR 83 | 84 | def FFE(self,tap_weights, n_taps_pre): 85 | """Behavioural model of FFE. Input signal is self.signal, this method modifies self.signal 86 | 87 | Parameters 88 | ---------- 89 | tap_weights: array 90 | DFE tap weights 91 | 92 | n_taps_pre: int 93 | number of precursor taps 94 | """ 95 | 96 | n_taps = tap_weights.size 97 | 98 | tap_filter = np.zeros((n_taps-1)*self.samples_per_symbol+1) 99 | 100 | for i in range(n_taps): 101 | tap_filter[i*self.samples_per_symbol] = tap_weights[i] 102 | 103 | length = self.signal.size 104 | self.signal = np.convolve(self.signal,tap_filter) 105 | #shift = round((n_taps_pre-n_taps)*self.samples_per_symbol) 106 | self.signal = self.signal[n_taps_pre*self.samples_per_symbol:n_taps_pre*self.samples_per_symbol+length] 107 | 108 | def FFE_BR(self, tap_weights, n_taps_pre): 109 | """Behavioural model of FFE. Input signal is self.signal, this method modifies self.signal 110 | 111 | Parameters 112 | ---------- 113 | tap_weights: array 114 | DFE tap weights 115 | 116 | n_taps_pre: int 117 | number of precursor taps 118 | """ 119 | 120 | self.signal_BR = sp.signal.fftconvolve(self.signal_BR,tap_weights, mode="same") 121 | 122 | def nrz_DFE(self, tap_weights): 123 | """Behavioural model of DFE for NRZ signal. Input signal is self.signal, this method modifies self.signal 124 | 125 | Parameters 126 | ---------- 127 | tap_weights: array 128 | DFE tap weights 129 | """ 130 | 131 | signal_out = np.copy(self.signal) 132 | n_taps = tap_weights.size 133 | n_symbols = int(round(self.signal.size/self.samples_per_symbol)) 134 | half_symbol = int(round(self.samples_per_symbol/2)) 135 | taps = np.zeros(n_taps) 136 | 137 | t = self.main_cursor*((self.voltage_levels[0]+self.voltage_levels[1])/2) 138 | 139 | for symbol_idx in range(n_symbols-1): 140 | 141 | idx = symbol_idx*self.samples_per_symbol 142 | 143 | #decide on value of current bit 144 | symbol = nrz_decision(signal_out[idx],t) 145 | 146 | #update_taps 147 | taps = np.hstack((self.voltage_levels[symbol], taps[:-1])) 148 | 149 | #apply feedback to signal 150 | feedback = np.sum(taps*tap_weights) 151 | 152 | signal_out[idx+half_symbol:idx+self.samples_per_symbol+half_symbol] -= feedback 153 | 154 | 155 | self.signal = signal_out 156 | 157 | def nrz_DFE_BR(self, tap_weights): 158 | """Behavioural model of DFE for NRZ signal. Input signal is self.signal, this method modifies self.signal 159 | 160 | Parameters 161 | ---------- 162 | tap_weights: array 163 | DFE tap weights 164 | """ 165 | 166 | signal_out = np.copy(self.signal_BR) 167 | symbols_out = np.zeros(self.signal_BR.size, dtype = np.uint8) 168 | n_taps = tap_weights.size 169 | taps = np.zeros(n_taps) 170 | 171 | t = self.main_cursor*((self.voltage_levels[0]+self.voltage_levels[1])/2) 172 | 173 | for symbol_idx in range(len(self.signal_BR)-1): 174 | 175 | #decide on value of current bit 176 | symbols_out[symbol_idx] = nrz_decision(signal_out[symbol_idx],t) 177 | 178 | #update taps 179 | taps[1:] = taps[:-1] 180 | taps[0] = self.voltage_levels[symbols_out[symbol_idx]] 181 | 182 | #apply decision feedback to next bit 183 | for i in range(n_taps): 184 | signal_out[symbol_idx+1] -= taps[i]*tap_weights[i] 185 | 186 | 187 | self.signal_BR = signal_out 188 | 189 | def pam4_DFE(self, tap_weights): 190 | """Behavioural model of DFE for PAM-4 signal. Input signal is self.signal, this method modifies self.signal. 191 | 192 | Parameters 193 | ---------- 194 | tap_weights: array 195 | DFE tap weights 196 | """ 197 | 198 | signal_out = np.copy(self.signal) 199 | n_taps = tap_weights.size 200 | n_symbols = int(round(self.signal.size/self.samples_per_symbol)) 201 | half_symbol = int(round(self.samples_per_symbol/2)) 202 | taps = np.zeros(n_taps) 203 | 204 | l = self.main_cursor*((self.voltage_levels[0]+self.voltage_levels[1])/2) 205 | m = self.main_cursor*((self.voltage_levels[1]+self.voltage_levels[2])/2) 206 | h = self.main_cursor*((self.voltage_levels[2]+self.voltage_levels[3])/2) 207 | 208 | for symbol_idx in range(n_symbols-1): 209 | 210 | idx = symbol_idx*self.samples_per_symbol 211 | 212 | #decide on value of current bit 213 | symbol = pam4_decision(signal_out[idx],l,m,h) 214 | 215 | #update taps 216 | taps[1:] = taps[:-1] 217 | taps[0] = self.voltage_levels[symbol] 218 | 219 | #apply feedback to signal 220 | feedback = np.sum(taps*tap_weights) 221 | 222 | signal_out[idx+half_symbol:idx+self.samples_per_symbol+half_symbol] -= feedback 223 | 224 | self.signal = signal_out 225 | 226 | 227 | def pam4_DFE_BR(self, tap_weights): 228 | 229 | signal_out = np.copy(self.signal_BR) 230 | 231 | symbols_out = np.zeros(self.signal_BR.size, dtype = np.uint8) 232 | 233 | n_taps = tap_weights.size 234 | 235 | taps = np.zeros(n_taps) 236 | 237 | l = self.main_cursor*((self.voltage_levels[0]+self.voltage_levels[1])/2) 238 | m = self.main_cursor*((self.voltage_levels[1]+self.voltage_levels[2])/2) 239 | h = self.main_cursor*((self.voltage_levels[2]+self.voltage_levels[3])/2) 240 | 241 | for symbol_idx in range(len(self.signal_BR)-1): 242 | 243 | #decide on value of current bit 244 | symbols_out[symbol_idx] = pam4_decision(signal_out[symbol_idx],l,m,h) 245 | 246 | #update taps 247 | taps[1:] = taps[:-1] 248 | taps[0] = self.voltage_levels[symbols_out[symbol_idx]] 249 | 250 | #apply decision feedback to next bit 251 | for i in range(n_taps): 252 | signal_out[symbol_idx+1] -= taps[i]*tap_weights[i] 253 | 254 | self.signal_BR = signal_out 255 | self.symbols_out = symbols_out 256 | 257 | 258 | 259 | -------------------------------------------------------------------------------- /serdespy/reedsolo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Copyright (c) 2012-2015 Tomer Filiba 5 | # Copyright (c) 2015 rotorgit 6 | # Copyright (c) 2015-2020 Stephen Larroque 7 | 8 | ''' 9 | Reed Solomon 10 | ============ 11 | 12 | A pure-python `universal errors-and-erasures Reed-Solomon Codec `_ 13 | , based on the wonderful tutorial at 14 | `wikiversity `_, 15 | written by "Bobmath" and "LRQ3000". 16 | 17 | The code of wikiversity is here consolidated into a nice API with exceptions handling. 18 | The algorithm can correct up to 2*e+v <= nsym, where e is the number of errors, 19 | v the number of erasures and nsym = n-k = the number of ECC (error correction code) symbols. 20 | This means that you can either correct exactly floor(nsym/2) errors, or nsym erasures 21 | (errors where you know the position), and a combination of both errors and erasures. 22 | The code should work on pretty much any reasonable version of python (2.4-3.5), 23 | but I'm only testing on 2.7 - 3.4. 24 | 25 | .. note:: 26 | The codec is universal, meaning that it can decode any message encoded by another RS encoder 27 | as long as you provide the correct parameters. 28 | Note however that if you use higher fields (ie, bigger c_exp), the algorithms will be slower, first because 29 | we cannot then use the optimized bytearray() structure but only array.array('i', ...), and also because 30 | Reed-Solomon's complexity is quadratic (both in encoding and decoding), so this means that the longer 31 | your messages, the longer it will take to encode/decode (quadratically!). 32 | 33 | The algorithm itself can handle messages up to (2^c_exp)-1 symbols, including the ECC symbols, 34 | and each symbol can have a value of up to (2^c_exp)-1 (indeed, both the message length and the maximum 35 | value for one character is constrained by the same mathematical reason). By default, we use the field GF(2^8), 36 | which means that you are limited to values between 0 and 255 (perfect to represent a single hexadecimal 37 | symbol on computers, so you can encode any binary stream) and limited to messages+ecc of maximum 38 | length 255. However, you can "chunk" longer messages to fit them into the message length limit. 39 | The ``RSCodec`` class will automatically apply chunking, by splitting longer messages into chunks and 40 | encode/decode them separately; it shouldn't make a difference from an API perspective (ie, from your POV). 41 | 42 | :: 43 | 44 | # Initialization 45 | >>> from reedsolo import RSCodec 46 | >>> rsc = RSCodec(10) # 10 ecc symbols 47 | 48 | # Encoding 49 | >>> rsc.encode([1,2,3,4]) 50 | b'\x01\x02\x03\x04,\x9d\x1c+=\xf8h\xfa\x98M' 51 | >>> rsc.encode(bytearray([1,2,3,4])) 52 | bytearray(b'\x01\x02\x03\x04,\x9d\x1c+=\xf8h\xfa\x98M') 53 | >>> rsc.encode(b'hello world') 54 | b'hello world\xed%T\xc4\xfd\xfd\x89\xf3\xa8\xaa' 55 | # Note that chunking is supported transparently to encode any string length. 56 | 57 | # Decoding (repairing) 58 | >>> rsc.decode(b'hello world\xed%T\xc4\xfd\xfd\x89\xf3\xa8\xaa')[0] 59 | b'hello world' 60 | >>> rsc.decode(b'heXlo worXd\xed%T\xc4\xfdX\x89\xf3\xa8\xaa')[0] # 3 errors 61 | b'hello world' 62 | >>> rsc.decode(b'hXXXo worXd\xed%T\xc4\xfdX\x89\xf3\xa8\xaa')[0] # 5 errors 63 | b'hello world' 64 | >>> rsc.decode(b'hXXXo worXd\xed%T\xc4\xfdXX\xf3\xa8\xaa')[0] # 6 errors - fail 65 | Traceback (most recent call last): 66 | ... 67 | ReedSolomonError: Could not locate error 68 | 69 | >>> rsc = RSCodec(12) # using 2 more ecc symbols (to correct max 6 errors or 12 erasures) 70 | >>> rsc.encode(b'hello world') 71 | b'hello world?Ay\xb2\xbc\xdc\x01q\xb9\xe3\xe2=' 72 | >>> rsc.decode(b'hello worXXXXy\xb2XX\x01q\xb9\xe3\xe2=')[0] # 6 errors - ok 73 | b'hello world' 74 | >>> rsc.decode(b'helXXXXXXXXXXy\xb2XX\x01q\xb9\xe3\xe2=', erase_pos=[3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 16])[0] # 12 erasures - OK 75 | b'hello world' 76 | 77 | # Checking 78 | >> rsc.check(b'hello worXXXXy\xb2XX\x01q\xb9\xe3\xe2=') 79 | [False] 80 | >> rmes, rmesecc = rsc.decode(b'hello worXXXXy\xb2XX\x01q\xb9\xe3\xe2=') 81 | >> rsc.check(rmesecc) 82 | [True] 83 | 84 | # To use longer chunks or bigger values than 255 (may be very slow) 85 | >> rsc = RSCodec(12, nsize=4095) # always use a power of 2 minus 1 86 | >> rsc = RSCodec(12, c_exp=12) # alternative way to set nsize=4095 87 | >> mes = 'a' * (4095-12) 88 | >> mesecc = rsc.encode(mes) 89 | >> mesecc[2] = 1 90 | >> mesecc[-1] = 1 91 | >> rmes, rmesecc = rsc.decode(mesecc) 92 | >> rsc.check(mesecc) 93 | [False] 94 | >> rsc.check(rmesecc) 95 | [True] 96 | 97 | If you want full control, you can skip the API and directly use the library as-is. Here's how: 98 | 99 | First you need to init the precomputed tables: 100 | >> import reedsolo as rs 101 | >> rs.init_tables(0x11d) 102 | Pro tip: if you get the error: ValueError: byte must be in range(0, 256), please check that your prime polynomial is correct for your field. 103 | Pro tip2: by default, you can only encode messages of max length and max symbol value = 256. If you want to encode bigger messages, 104 | please use the following (where c_exp is the exponent of your Galois Field, eg, 12 = max length 2^12 = 4096): 105 | >> prim = rs.find_prime_polys(c_exp=12, fast_primes=True, single=True) 106 | >> rs.init_tables(c_exp=12, prim=prim) 107 | 108 | Let's define our RS message and ecc size: 109 | >> n = 255 # length of total message+ecc 110 | >> nsym = 12 # length of ecc 111 | >> mes = "a" * (n-nsym) # generate a sample message 112 | 113 | To optimize, you can precompute the generator polynomial: 114 | >> gen = rs.rs_generator_poly_all(n) 115 | 116 | Then to encode: 117 | >> mesecc = rs.rs_encode_msg(mes, nsym, gen=gen[nsym]) 118 | 119 | Let's tamper our message: 120 | >> mesecc[1] = 0 121 | 122 | To decode: 123 | >> rmes, recc, errata_pos = rs.rs_correct_msg(mesecc, nsym, erase_pos=erase_pos) 124 | Note that both the message and the ecc are corrected (if possible of course). 125 | Pro tip: if you know a few erasures positions, you can specify them in a list `erase_pos` to double the repair power. But you can also just specify an empty list. 126 | 127 | If the decoding fails, it will normally automatically check and raise a ReedSolomonError exception that you can handle. 128 | However if you want to manually check if the repaired message is correct, you can do so: 129 | >> rs.rs_check(rmes + recc, nsym) 130 | 131 | Note: if you want to use multiple reedsolomon with different parameters, you need to backup the globals and restore them before calling reedsolo functions: 132 | >> rs.init_tables() 133 | >> global gf_log, gf_exp, field_charac 134 | >> bak_gf_log, bak_gf_exp, bak_field_charac = gf_log, gf_exp, field_charac 135 | Then at anytime, you can do: 136 | >> global gf_log, gf_exp, field_charac 137 | >> gf_log, gf_exp, field_charac = bak_gf_log, bak_gf_exp, bak_field_charac 138 | >> mesecc = rs.rs_encode_msg(mes, nsym) 139 | >> rmes, recc, errata_pos = rs.rs_correct_msg(mesecc, nsym) 140 | The globals backup is not necessary if you use RSCodec, it will be automatically managed. 141 | 142 | Read the sourcecode's comments for more info about how it works, and for the various parameters you can setup if 143 | you need to interface with other RS codecs. 144 | 145 | ''' 146 | 147 | # TODO IMPORTANT: try to keep the same convention for the ordering of polynomials inside lists throughout the code and functions (because for now there are a lot of list reversing in order to make it work, you never know the order of a polynomial, ie, if the first coefficient is the major degree or the constant term...). 148 | 149 | import itertools 150 | import math 151 | 152 | 153 | ################### INIT and stuff ################### 154 | 155 | try: # pragma: no cover 156 | bytearray 157 | _bytearray = bytearray 158 | except NameError: # pragma: no cover 159 | from array import array 160 | def _bytearray(obj = 0, encoding = "latin-1"): # pragma: no cover 161 | '''Simple bytearray replacement''' 162 | # always use Latin-1 and not UTF8 because Latin-1 maps the first 256 characters to their bytevalue equivalents. UTF8 may mangle your data (particularly at vale 128) 163 | if isinstance(obj, str): 164 | obj = [ord(ch) for ch in obj.encode(encoding)] 165 | elif isinstance(obj, int): 166 | obj = [0] * obj 167 | return array("B", obj) 168 | 169 | try: # pragma: no cover 170 | xrange 171 | except NameError: # pragma: no cover 172 | # compatibility with Python 3+ 173 | xrange = range 174 | 175 | class ReedSolomonError(Exception): 176 | pass 177 | 178 | gf_exp = _bytearray([1] * 512) # For efficiency, gf_exp[] has size 2*GF_SIZE, so that a simple multiplication of two numbers can be resolved without calling % 255. For more infos on how to generate this extended exponentiation table, see paper: "Fast software implementation of finite field operations", Cheng Huang and Lihao Xu, Washington University in St. Louis, Tech. Rep (2003). 179 | gf_log = _bytearray(256) 180 | field_charac = int(2**8 - 1) 181 | 182 | ################### GALOIS FIELD ELEMENTS MATHS ################### 183 | 184 | def rwh_primes1(n): 185 | # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188 186 | ''' Returns a list of primes < n ''' 187 | sieve = [True] * int(n/2) 188 | for i in xrange(3,int(n**0.5)+1,2): 189 | if sieve[int(i/2)]: 190 | sieve[int((i*i)/2)::i] = [False] * int((n-i*i-1)/(2*i)+1) 191 | return [2] + [2*i+1 for i in xrange(1,int(n/2)) if sieve[i]] 192 | 193 | def find_prime_polys(generator=2, c_exp=8, fast_primes=False, single=False): 194 | '''Compute the list of prime polynomials for the given generator and galois field characteristic exponent.''' 195 | # fast_primes will output less results but will be significantly faster. 196 | # single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that. 197 | 198 | # A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows. 199 | # Why do we need a "prime polynomial"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique. 200 | # For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial. 201 | # Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our "LUT" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial. 202 | 203 | # Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow). 204 | # Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf 205 | # Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. "Finding irreducible polynomials over finite fields." Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986. 206 | 207 | # Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field 208 | root_charac = 2 # we're in GF(2) 209 | field_charac = int(root_charac**c_exp - 1) 210 | field_charac_next = int(root_charac**(c_exp+1) - 1) 211 | 212 | prim_candidates = [] 213 | if fast_primes: 214 | prim_candidates = rwh_primes1(field_charac_next) # generate maybe prime polynomials and check later if they really are irreducible 215 | prim_candidates = [x for x in prim_candidates if x > field_charac] # filter out too small primes 216 | else: 217 | prim_candidates = xrange(field_charac+2, field_charac_next, root_charac) # try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible) 218 | 219 | # Start of the main loop 220 | correct_primes = [] 221 | for prim in prim_candidates: # try potential candidates primitive irreducible polys 222 | seen = _bytearray(field_charac+1) # memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default) 223 | conflict = False # flag to know if there was at least one conflict 224 | 225 | # Second loop, build the whole Galois Field 226 | x = 1 227 | for i in xrange(field_charac): 228 | # Compute the next value in the field (ie, the next power of alpha/generator) 229 | x = gf_mult_noLUT(x, generator, prim, field_charac+1) 230 | 231 | # Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime) 232 | if x > field_charac or seen[x] == 1: 233 | conflict = True 234 | break 235 | # Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha 236 | else: 237 | seen[x] = 1 238 | 239 | # End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial! 240 | if not conflict: 241 | correct_primes.append(prim) 242 | if single: return prim 243 | 244 | # Return the list of all prime polynomials 245 | return correct_primes # you can use the following to print the hexadecimal representation of each prime polynomial: print [hex(i) for i in correct_primes] 246 | 247 | def init_tables(prim=0x11d, generator=2, c_exp=8): 248 | '''Precompute the logarithm and anti-log tables for faster computation later, using the provided primitive polynomial. 249 | These tables are used for multiplication/division since addition/substraction are simple XOR operations inside GF of characteristic 2. 250 | The basic idea is quite simple: since b**(log_b(x), log_b(y)) == x * y given any number b (the base or generator of the logarithm), then we can use any number b to precompute logarithm and anti-log (exponentiation) tables to use for multiplying two numbers x and y. 251 | That's why when we use a different base/generator number, the log and anti-log tables are drastically different, but the resulting computations are the same given any such tables. 252 | For more infos, see https://en.wikipedia.org/wiki/Finite_field_arithmetic#Implementation_tricks 253 | ''' 254 | # generator is the generator number (the "increment" that will be used to walk through the field by multiplication, this must be a prime number). This is basically the base of the logarithm/anti-log tables. Also often noted "alpha" in academic books. 255 | # prim is the primitive/prime (binary) polynomial and must be irreducible (ie, it can't represented as the product of two smaller polynomials). It's a polynomial in the binary sense: each bit is a coefficient, but in fact it's an integer between field_charac+1 and field_charac*2, and not a list of gf values. The prime polynomial will be used to reduce the overflows back into the range of the Galois Field without duplicating values (all values should be unique). See the function find_prime_polys() and: http://research.swtch.com/field and http://www.pclviewer.com/rs2/galois.html 256 | # note that the choice of generator or prime polynomial doesn't matter very much: any two finite fields of size p^n have identical structure, even if they give the individual elements different names (ie, the coefficients of the codeword will be different, but the final result will be the same: you can always correct as many errors/erasures with any choice for those parameters). That's why it makes sense to refer to all the finite fields, and all decoders based on Reed-Solomon, of size p^n as one concept: GF(p^n). It can however impact sensibly the speed (because some parameters will generate sparser tables). 257 | # c_exp is the exponent for the field's characteristic GF(2^c_exp) 258 | 259 | # Redefine _bytearray() in case we need to support integers or messages of length > 256 260 | global _bytearray 261 | if c_exp <= 8: 262 | _bytearray = bytearray 263 | else: 264 | from array import array 265 | def _bytearray(obj = 0, encoding = "latin-1"): 266 | '''Fake bytearray replacement, supporting int values above 255''' 267 | # always use Latin-1 and not UTF8 because Latin-1 maps the first 256 characters to their bytevalue equivalents. UTF8 may mangle your data (particularly at vale 128) 268 | if isinstance(obj, str): # obj is a string, convert to list of ints 269 | obj = obj.encode(encoding) 270 | if isinstance(obj, str): # Py2 str: convert to list of ascii ints 271 | obj = [ord(chr) for chr in obj] 272 | elif isinstance(obj, bytes): # Py3 bytes: characters are bytes, need to convert to int for array.array('i', obj) 273 | obj = [int(chr) for chr in obj] 274 | else: 275 | raise(ValueError, "Type of object not recognized!") 276 | elif isinstance(obj, int): # compatibility with list preallocation bytearray(int) 277 | obj = [0] * obj 278 | # Else obj is a list of int, it's ok 279 | return array("i", obj) 280 | 281 | # Init global tables 282 | global gf_exp, gf_log, field_charac 283 | field_charac = int(2**c_exp - 1) 284 | gf_exp = _bytearray(field_charac * 2) # anti-log (exponential) table. The first two elements will always be [GF256int(1), generator] 285 | gf_log = _bytearray(field_charac+1) # log table, log[0] is impossible and thus unused 286 | 287 | # For each possible value in the galois field 2^8, we will pre-compute the logarithm and anti-logarithm (exponential) of this value 288 | # To do that, we generate the Galois Field F(2^p) by building a list starting with the element 0 followed by the (p-1) successive powers of the generator α : 1, α, α^1, α^2, ..., α^(p-1). 289 | x = 1 290 | for i in xrange(field_charac): # we could skip index 255 which is equal to index 0 because of modulo: g^255==g^0 but either way, this does not change the later outputs (ie, the ecc symbols will be the same either way) 291 | gf_exp[i] = x # compute anti-log for this value and store it in a table 292 | gf_log[x] = i # compute log at the same time 293 | x = gf_mult_noLUT(x, generator, prim, field_charac+1) 294 | 295 | # If you use only generator==2 or a power of 2, you can use the following which is faster than gf_mult_noLUT(): 296 | #x <<= 1 # multiply by 2 (change 1 by another number y to multiply by a power of 2^y) 297 | #if x & 0x100: # similar to x >= 256, but a lot faster (because 0x100 == 256) 298 | #x ^= prim # substract the primary polynomial to the current value (instead of 255, so that we get a unique set made of coprime numbers), this is the core of the tables generation 299 | 300 | # Optimization: double the size of the anti-log table so that we don't need to mod 255 to stay inside the bounds (because we will mainly use this table for the multiplication of two GF numbers, no more). 301 | for i in xrange(field_charac, field_charac * 2): 302 | gf_exp[i] = gf_exp[i - field_charac] 303 | 304 | return [gf_log, gf_exp, field_charac] 305 | 306 | def gf_add(x, y): 307 | return x ^ y 308 | 309 | def gf_sub(x, y): 310 | return x ^ y # in binary galois field, substraction is just the same as addition (since we mod 2) 311 | 312 | def gf_neg(x): 313 | return x 314 | 315 | def gf_inverse(x): 316 | return gf_exp[field_charac - gf_log[x]] # gf_inverse(x) == gf_div(1, x) 317 | 318 | def gf_mul(x, y): 319 | if x == 0 or y == 0: 320 | return 0 321 | return gf_exp[(gf_log[x] + gf_log[y]) % field_charac] 322 | 323 | def gf_div(x, y): 324 | if y == 0: 325 | raise ZeroDivisionError() 326 | if x == 0: 327 | return 0 328 | return gf_exp[(gf_log[x] + field_charac - gf_log[y]) % field_charac] 329 | 330 | def gf_pow(x, power): 331 | return gf_exp[(gf_log[x] * power) % field_charac] 332 | 333 | def gf_mult_noLUT_slow(x, y, prim=0): 334 | '''Multiplication in Galois Fields without using a precomputed look-up table (and thus it's slower) by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.''' 335 | 336 | ### Define bitwise carry-less operations as inner functions ### 337 | def cl_mult(x,y): 338 | '''Bitwise carry-less multiplication on integers''' 339 | z = 0 340 | i = 0 341 | while (y>>i) > 0: 342 | if y & (1<> bits: bits += 1 351 | return bits 352 | 353 | def cl_div(dividend, divisor=None): 354 | '''Bitwise carry-less long division on integers and returns the remainder''' 355 | # Compute the position of the most significant bit for each integers 356 | dl1 = bit_length(dividend) 357 | dl2 = bit_length(divisor) 358 | # If the dividend is smaller than the divisor, just exit 359 | if dl1 < dl2: 360 | return dividend 361 | # Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor) 362 | for i in xrange(dl1-dl2,-1,-1): 363 | # Check that the dividend is divisible (useless for the first iteration but important for the next ones) 364 | if dividend & (1 << i+dl2-1): 365 | # If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction) 366 | dividend ^= divisor << i 367 | return dividend 368 | 369 | ### Main GF multiplication routine ### 370 | 371 | # Multiply the gf numbers 372 | result = cl_mult(x,y) 373 | # Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds 374 | if prim > 0: 375 | result = cl_div(result, prim) 376 | 377 | return result 378 | 379 | def gf_mult_noLUT(x, y, prim=0, field_charac_full=256, carryless=True): 380 | '''Galois Field integer multiplication using Russian Peasant Multiplication algorithm (faster than the standard multiplication + modular reduction). 381 | If prim is 0 and carryless=False, then the function produces the result for a standard integers multiplication (no carry-less arithmetics nor modular reduction).''' 382 | r = 0 383 | while y: # while y is above 0 384 | if y & 1: r = r ^ x if carryless else r + x # y is odd, then add the corresponding x to r (the sum of all x's corresponding to odd y's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!). 385 | y = y >> 1 # equivalent to y // 2 386 | x = x << 1 # equivalent to x*2 387 | if prim > 0 and x & field_charac_full: x = x ^ prim # GF modulo: if x >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR). 388 | 389 | return r 390 | 391 | 392 | ################### GALOIS FIELD POLYNOMIALS MATHS ################### 393 | 394 | def gf_poly_scale(p, x): 395 | return _bytearray([gf_mul(p[i], x) for i in xrange(len(p))]) 396 | 397 | def gf_poly_add(p, q): 398 | r = _bytearray( max(len(p), len(q)) ) 399 | r[len(r)-len(p):len(r)] = p 400 | #for i in xrange(len(p)): 401 | #r[i + len(r) - len(p)] = p[i] 402 | for i in xrange(len(q)): 403 | r[i + len(r) - len(q)] ^= q[i] 404 | return r 405 | 406 | def gf_poly_mul(p, q): 407 | '''Multiply two polynomials, inside Galois Field (but the procedure is generic). Optimized function by precomputation of log.''' 408 | # Pre-allocate the result array 409 | r = _bytearray(len(p) + len(q) - 1) 410 | # Precompute the logarithm of p 411 | lp = [gf_log[p[i]] for i in xrange(len(p))] 412 | # Compute the polynomial multiplication (just like the outer product of two vectors, we multiply each coefficients of p with all coefficients of q) 413 | for j in xrange(len(q)): 414 | qj = q[j] # optimization: load the coefficient once 415 | if qj != 0: # log(0) is undefined, we need to check that 416 | lq = gf_log[qj] # Optimization: precache the logarithm of the current coefficient of q 417 | for i in xrange(len(p)): 418 | if p[i] != 0: # log(0) is undefined, need to check that... 419 | r[i + j] ^= gf_exp[lp[i] + lq] # equivalent to: r[i + j] = gf_add(r[i+j], gf_mul(p[i], q[j])) 420 | return r 421 | 422 | def gf_poly_mul_simple(p, q): # simple equivalent way of multiplying two polynomials without precomputation, but thus it's slower 423 | '''Multiply two polynomials, inside Galois Field''' 424 | # Pre-allocate the result array 425 | r = _bytearray(len(p) + len(q) - 1) 426 | # Compute the polynomial multiplication (just like the outer product of two vectors, we multiply each coefficients of p with all coefficients of q) 427 | for j in xrange(len(q)): 428 | for i in xrange(len(p)): 429 | r[i + j] ^= gf_mul(p[i], q[j]) # equivalent to: r[i + j] = gf_add(r[i+j], gf_mul(p[i], q[j])) -- you can see it's your usual polynomial multiplication 430 | return r 431 | 432 | def gf_poly_neg(poly): 433 | '''Returns the polynomial with all coefficients negated. In GF(2^p), negation does not change the coefficient, so we return the polynomial as-is.''' 434 | return poly 435 | 436 | def gf_poly_div(dividend, divisor): 437 | '''Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (doesn't work with standard polynomials outside of this galois field).''' 438 | # CAUTION: this function expects polynomials to follow the opposite convention at decoding: the terms must go from the biggest to lowest degree (while most other functions here expect a list from lowest to biggest degree). eg: 1 + 2x + 5x^2 = [5, 2, 1], NOT [1, 2, 5] 439 | 440 | msg_out = _bytearray(dividend) # Copy the dividend list and pad with 0 where the ecc bytes will be computed 441 | #normalizer = divisor[0] # precomputing for performance 442 | for i in xrange(len(dividend) - (len(divisor)-1)): 443 | #msg_out[i] /= normalizer # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0]. For more infos, see http://en.wikipedia.org/wiki/Synthetic_division 444 | coef = msg_out[i] # precaching 445 | if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization). In fact if you remove it, it should still work because gf_mul() will take care of the condition. But it's still a good practice to put the condition here. 446 | for j in xrange(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient 447 | if divisor[j] != 0: # log(0) is undefined 448 | msg_out[i + j] ^= gf_mul(divisor[j], coef) # equivalent to the more mathematically correct (but xoring directly is faster): msg_out[i + j] += -divisor[j] * coef 449 | 450 | # The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder. 451 | separator = -(len(divisor)-1) 452 | return msg_out[:separator], msg_out[separator:] # return quotient, remainder. 453 | 454 | def gf_poly_square(poly): # pragma: no cover 455 | '''Linear time implementation of polynomial squaring. For details, see paper: "A fast software implementation for arithmetic operations in GF (2n)". De Win, E., Bosselaers, A., Vandenberghe, S., De Gersem, P., & Vandewalle, J. (1996, January). In Advances in Cryptology - Asiacrypt'96 (pp. 65-76). Springer Berlin Heidelberg.''' 456 | length = len(poly) 457 | out = _bytearray(2*length - 1) 458 | for i in xrange(length-1): 459 | p = poly[i] 460 | k = 2*i 461 | if p != 0: 462 | #out[k] = gf_exp[(2*gf_log[p]) % field_charac] # not necessary to modulo (2^r)-1 since gf_exp is duplicated up to 510. 463 | out[k] = gf_exp[2*gf_log[p]] 464 | #else: # not necessary since the output is already initialized to an array of 0 465 | #out[k] = 0 466 | out[2*length-2] = gf_exp[2*gf_log[poly[length-1]]] 467 | if out[0] == 0: out[0] = 2*poly[1] - 1 468 | return out 469 | 470 | def gf_poly_eval(poly, x): 471 | '''Evaluates a polynomial in GF(2^p) given the value for x. This is based on Horner's scheme for maximum efficiency.''' 472 | y = poly[0] 473 | for i in xrange(1, len(poly)): 474 | y = gf_mul(y, x) ^ poly[i] 475 | return y 476 | 477 | 478 | ################### REED-SOLOMON ENCODING ################### 479 | 480 | def rs_generator_poly(nsym, fcr=0, generator=2): 481 | '''Generate an irreducible generator polynomial (necessary to encode a message into Reed-Solomon)''' 482 | g = _bytearray([1]) 483 | for i in xrange(nsym): 484 | g = gf_poly_mul(g, [1, gf_pow(generator, i+fcr)]) 485 | return g 486 | 487 | def rs_generator_poly_all(max_nsym, fcr=0, generator=2): 488 | '''Generate all irreducible generator polynomials up to max_nsym (usually you can use n, the length of the message+ecc). Very useful to reduce processing time if you want to encode using variable schemes and nsym rates.''' 489 | g_all = {} 490 | g_all[0] = g_all[1] = _bytearray([1]) 491 | for nsym in xrange(max_nsym): 492 | g_all[nsym] = rs_generator_poly(nsym, fcr, generator) 493 | return g_all 494 | 495 | def rs_simple_encode_msg(msg_in, nsym, fcr=0, generator=2, gen=None): 496 | '''Simple Reed-Solomon encoding (mainly an example for you to understand how it works, because it's slower than the inlined function below)''' 497 | global field_charac 498 | if (len(msg_in) + nsym) > field_charac: raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in)+nsym, field_charac)) 499 | if gen is None: gen = rs_generator_poly(nsym, fcr, generator) 500 | 501 | # Pad the message, then divide it by the irreducible generator polynomial 502 | _, remainder = gf_poly_div(msg_in + _bytearray(len(gen)-1), gen) 503 | # The remainder is our RS code! Just append it to our original message to get our full codeword (this represents a polynomial of max 256 terms) 504 | msg_out = msg_in + remainder 505 | # Return the codeword 506 | return msg_out 507 | 508 | def rs_encode_msg(msg_in, nsym, fcr=0, generator=2, gen=None): 509 | '''Reed-Solomon main encoding function, using polynomial division (Extended Synthetic Division, the fastest algorithm available to my knowledge), better explained at http://research.swtch.com/field''' 510 | global field_charac 511 | if (len(msg_in) + nsym) > field_charac: raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in)+nsym, field_charac)) 512 | if gen is None: gen = rs_generator_poly(nsym, fcr, generator) 513 | 514 | msg_in = _bytearray(msg_in) 515 | msg_out = _bytearray(msg_in) + _bytearray(len(gen)-1) # init msg_out with the values inside msg_in and pad with len(gen)-1 bytes (which is the number of ecc symbols). 516 | 517 | # Precompute the logarithm of every items in the generator 518 | lgen = _bytearray([gf_log[gen[j]] for j in xrange(len(gen))]) 519 | 520 | # Extended synthetic division main loop 521 | # Fastest implementation with PyPy (but the Cython version in creedsolo.pyx is about 2x faster) 522 | for i in xrange(len(msg_in)): 523 | coef = msg_out[i] # Note that it's msg_out here, not msg_in. Thus, we reuse the updated value at each iteration (this is how Synthetic Division works: instead of storing in a temporary register the intermediate values, we directly commit them to the output). 524 | # coef = gf_mul(msg_out[i], gf_inverse(gen[0])) # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0] 525 | if coef != 0: # log(0) is undefined, so we need to manually check for this case. There's no need to check the divisor here because we know it can't be 0 since we generated it. 526 | lcoef = gf_log[coef] # precaching 527 | 528 | for j in xrange(1, len(gen)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient (which is here useless since the divisor, the generator polynomial, is always monic) 529 | #if gen[j] != 0: # log(0) is undefined so we need to check that, but it slow things down in fact and it's useless in our case (reed-solomon encoding) since we know that all coefficients in the generator are not 0 530 | msg_out[i + j] ^= gf_exp[lcoef + lgen[j]] # optimization, equivalent to gf_mul(gen[j], msg_out[i]) and we just substract it to msg_out[i+j] (but since we are in GF256, it's equivalent to an addition and to an XOR). In other words, this is simply a "multiply-accumulate operation" 531 | 532 | # Recopy the original message bytes (overwrites the part where the quotient was computed) 533 | msg_out[:len(msg_in)] = msg_in # equivalent to c = mprime - b, where mprime is msg_in padded with [0]*nsym 534 | return msg_out 535 | 536 | 537 | ################### REED-SOLOMON DECODING ################### 538 | 539 | def rs_calc_syndromes(msg, nsym, fcr=0, generator=2): 540 | '''Given the received codeword msg and the number of error correcting symbols (nsym), computes the syndromes polynomial. 541 | Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse). 542 | ''' 543 | # Note the "[0] +" : we add a 0 coefficient for the lowest degree (the constant). This effectively shifts the syndrome, and will shift every computations depending on the syndromes (such as the errors locator polynomial, errors evaluator polynomial, etc. but not the errors positions). 544 | # This is not necessary as anyway syndromes are defined such as there are only non-zero coefficients (the only 0 is the shift of the constant here) and subsequent computations will/must account for the shift by skipping the first iteration (eg, the often seen range(1, n-k+1)), but you can also avoid prepending the 0 coeff and adapt every subsequent computations to start from 0 instead of 1. 545 | return [0] + [gf_poly_eval(msg, gf_pow(generator, i+fcr)) for i in xrange(nsym)] 546 | 547 | def rs_correct_errata(msg_in, synd, err_pos, fcr=0, generator=2): # err_pos is a list of the positions of the errors/erasures/errata 548 | '''Forney algorithm, computes the values (error magnitude) to correct the input message.''' 549 | global field_charac 550 | msg = _bytearray(msg_in) 551 | # calculate errata locator polynomial to correct both errors and erasures (by combining the errors positions given by the error locator polynomial found by BM with the erasures positions given by caller) 552 | coef_pos = [len(msg) - 1 - p for p in err_pos] # need to convert the positions to coefficients degrees for the errata locator algo to work (eg: instead of [0, 1, 2] it will become [len(msg)-1, len(msg)-2, len(msg) -3]) 553 | err_loc = rs_find_errata_locator(coef_pos, generator) 554 | # calculate errata evaluator polynomial (often called Omega or Gamma in academic papers) 555 | err_eval = rs_find_error_evaluator(synd[::-1], err_loc, len(err_loc)-1)[::-1] 556 | 557 | # Second part of Chien search to get the error location polynomial X from the error positions in err_pos (the roots of the error locator polynomial, ie, where it evaluates to 0) 558 | X = [] # will store the position of the errors 559 | for i in xrange(len(coef_pos)): 560 | l = field_charac - coef_pos[i] 561 | X.append( gf_pow(generator, -l) ) 562 | 563 | # Forney algorithm: compute the magnitudes 564 | E = _bytearray(len(msg)) # will store the values that need to be corrected (substracted) to the message containing errors. This is sometimes called the error magnitude polynomial. 565 | Xlength = len(X) 566 | for i, Xi in enumerate(X): 567 | 568 | Xi_inv = gf_inverse(Xi) 569 | 570 | # Compute the formal derivative of the error locator polynomial (see Blahut, Algebraic codes for data transmission, pp 196-197). 571 | # the formal derivative of the errata locator is used as the denominator of the Forney Algorithm, which simply says that the ith error value is given by error_evaluator(gf_inverse(Xi)) / error_locator_derivative(gf_inverse(Xi)). See Blahut, Algebraic codes for data transmission, pp 196-197. 572 | err_loc_prime_tmp = [] 573 | for j in xrange(Xlength): 574 | if j != i: 575 | err_loc_prime_tmp.append( gf_sub(1, gf_mul(Xi_inv, X[j])) ) 576 | # compute the product, which is the denominator of the Forney algorithm (errata locator derivative) 577 | err_loc_prime = 1 578 | for coef in err_loc_prime_tmp: 579 | err_loc_prime = gf_mul(err_loc_prime, coef) 580 | # equivalent to: err_loc_prime = functools.reduce(gf_mul, err_loc_prime_tmp, 1) 581 | 582 | # Test if we could find the errata locator, else we raise an Exception (because else since we divide y by err_loc_prime to compute the magnitude, we will get a ZeroDivisionError exception otherwise) 583 | if err_loc_prime == 0: 584 | raise ReedSolomonError("Decoding failed: Forney algorithm could not properly detect where the errors are located (errata locator prime is 0).") 585 | 586 | # Compute y (evaluation of the errata evaluator polynomial) 587 | # This is a more faithful translation of the theoretical equation contrary to the old forney method. Here it is exactly copy/pasted from the included presentation decoding_rs.pdf: Yl = omega(Xl.inverse()) / prod(1 - Xj*Xl.inverse()) for j in len(X) (in the paper it's for j in s, but it's useless when len(X) < s because we compute neutral terms 1 for nothing, and wrong when correcting more than s erasures or erasures+errors since it prevents computing all required terms). 588 | # Thus here this method works with erasures too because firstly we fixed the equation to be like the theoretical one (don't know why it was modified in _old_forney(), if it's an optimization, it doesn't enhance anything), and secondly because we removed the product bound on s, which prevented computing errors and erasures above the s=(n-k)//2 bound. 589 | y = gf_poly_eval(err_eval[::-1], Xi_inv) # numerator of the Forney algorithm (errata evaluator evaluated) 590 | y = gf_mul(gf_pow(Xi, 1-fcr), y) # adjust to fcr parameter 591 | 592 | # Compute the magnitude 593 | magnitude = gf_div(y, err_loc_prime) # magnitude value of the error, calculated by the Forney algorithm (an equation in fact): dividing the errata evaluator with the errata locator derivative gives us the errata magnitude (ie, value to repair) the ith symbol 594 | E[err_pos[i]] = magnitude # store the magnitude for this error into the magnitude polynomial 595 | 596 | # Apply the correction of values to get our message corrected! (note that the ecc bytes also gets corrected!) 597 | # (this isn't the Forney algorithm, we just apply the result of decoding here) 598 | msg = gf_poly_add(msg, E) # equivalent to Ci = Ri - Ei where Ci is the correct message, Ri the received (senseword) message, and Ei the errata magnitudes (minus is replaced by XOR since it's equivalent in GF(2^p)). So in fact here we substract from the received message the errors magnitude, which logically corrects the value to what it should be. 599 | return msg 600 | 601 | def rs_find_error_locator(synd, nsym, erase_loc=None, erase_count=0): 602 | '''Find error/errata locator and evaluator polynomials with Berlekamp-Massey algorithm''' 603 | # The idea is that BM will iteratively estimate the error locator polynomial. 604 | # To do this, it will compute a Discrepancy term called Delta, which will tell us if the error locator polynomial needs an update or not 605 | # (hence why it's called discrepancy: it tells us when we are getting off board from the correct value). 606 | 607 | # Init the polynomials 608 | if erase_loc: # if the erasure locator polynomial is supplied, we init with its value, so that we include erasures in the final locator polynomial 609 | err_loc = _bytearray(erase_loc) 610 | old_loc = _bytearray(erase_loc) 611 | else: 612 | err_loc = _bytearray([1]) # This is the main variable we want to fill, also called Sigma in other notations or more formally the errors/errata locator polynomial. 613 | old_loc = _bytearray([1]) # BM is an iterative algorithm, and we need the errata locator polynomial of the previous iteration in order to update other necessary variables. 614 | #L = 0 # update flag variable, not needed here because we use an alternative equivalent way of checking if update is needed (but using the flag could potentially be faster depending on if using length(list) is taking linear time in your language, here in Python it's constant so it's as fast. 615 | 616 | # Fix the syndrome shifting: when computing the syndrome, some implementations may prepend a 0 coefficient for the lowest degree term (the constant). This is a case of syndrome shifting, thus the syndrome will be bigger than the number of ecc symbols (I don't know what purpose serves this shifting). If that's the case, then we need to account for the syndrome shifting when we use the syndrome such as inside BM, by skipping those prepended coefficients. 617 | # Another way to detect the shifting is to detect the 0 coefficients: by definition, a syndrome does not contain any 0 coefficient (except if there are no errors/erasures, in this case they are all 0). This however doesn't work with the modified Forney syndrome, which set to 0 the coefficients corresponding to erasures, leaving only the coefficients corresponding to errors. 618 | synd_shift = 0 619 | if len(synd) > nsym: synd_shift = len(synd) - nsym 620 | 621 | for i in xrange(nsym-erase_count): # generally: nsym-erase_count == len(synd), except when you input a partial erase_loc and using the full syndrome instead of the Forney syndrome, in which case nsym-erase_count is more correct (len(synd) will fail badly with IndexError). 622 | if erase_loc: # if an erasures locator polynomial was provided to init the errors locator polynomial, then we must skip the FIRST erase_count iterations (not the last iterations, this is very important!) 623 | K = erase_count+i+synd_shift 624 | else: # if erasures locator is not provided, then either there's no erasures to account or we use the Forney syndromes, so we don't need to use erase_count nor erase_loc (the erasures have been trimmed out of the Forney syndromes). 625 | K = i+synd_shift 626 | 627 | # Compute the discrepancy Delta 628 | # Here is the close-to-the-books operation to compute the discrepancy Delta: it's a simple polynomial multiplication of error locator with the syndromes, and then we get the Kth element. 629 | #delta = gf_poly_mul(err_loc[::-1], synd)[K] # theoretically it should be gf_poly_add(synd[::-1], [1])[::-1] instead of just synd, but it seems it's not absolutely necessary to correctly decode. 630 | # But this can be optimized: since we only need the Kth element, we don't need to compute the polynomial multiplication for any other element but the Kth. Thus to optimize, we compute the polymul only at the item we need, skipping the rest (avoiding a nested loop, thus we are linear time instead of quadratic). 631 | # This optimization is actually described in several figures of the book "Algebraic codes for data transmission", Blahut, Richard E., 2003, Cambridge university press. 632 | delta = synd[K] 633 | for j in xrange(1, len(err_loc)): 634 | delta ^= gf_mul(err_loc[-(j+1)], synd[K - j]) # delta is also called discrepancy. Here we do a partial polynomial multiplication (ie, we compute the polynomial multiplication only for the term of degree K). Should be equivalent to brownanrs.polynomial.mul_at(). 635 | #print "delta", K, delta, list(gf_poly_mul(err_loc[::-1], synd)) # debugline 636 | 637 | # Shift polynomials to compute the next degree 638 | old_loc = old_loc + _bytearray([0]) 639 | 640 | # Iteratively estimate the errata locator and evaluator polynomials 641 | if delta != 0: # Update only if there's a discrepancy 642 | if len(old_loc) > len(err_loc): # Rule B (rule A is implicitly defined because rule A just says that we skip any modification for this iteration) 643 | #if 2*L <= K+erase_count: # equivalent to len(old_loc) > len(err_loc), as long as L is correctly computed 644 | # Computing errata locator polynomial Sigma 645 | new_loc = gf_poly_scale(old_loc, delta) 646 | old_loc = gf_poly_scale(err_loc, gf_inverse(delta)) # effectively we are doing err_loc * 1/delta = err_loc // delta 647 | err_loc = new_loc 648 | # Update the update flag 649 | #L = K - L # the update flag L is tricky: in Blahut's schema, it's mandatory to use `L = K - L - erase_count` (and indeed in a previous draft of this function, if you forgot to do `- erase_count` it would lead to correcting only 2*(errors+erasures) <= (n-k) instead of 2*errors+erasures <= (n-k)), but in this latest draft, this will lead to a wrong decoding in some cases where it should correctly decode! Thus you should try with and without `- erase_count` to update L on your own implementation and see which one works OK without producing wrong decoding failures. 650 | 651 | # Update with the discrepancy 652 | err_loc = gf_poly_add(err_loc, gf_poly_scale(old_loc, delta)) 653 | 654 | # Check if the result is correct, that there's not too many errors to correct 655 | err_loc = list(itertools.dropwhile(lambda x: x == 0, err_loc)) # drop leading 0s, else errs will not be of the correct size 656 | errs = len(err_loc) - 1 657 | if (errs-erase_count) * 2 + erase_count > nsym: 658 | 659 | #RSB 660 | return None 661 | #raise ReedSolomonError("Too many errors to correct") 662 | 663 | return err_loc 664 | 665 | def rs_find_errata_locator(e_pos, generator=2): 666 | '''Compute the erasures/errors/errata locator polynomial from the erasures/errors/errata positions (the positions must be relative to the x coefficient, eg: "hello worldxxxxxxxxx" is tampered to "h_ll_ worldxxxxxxxxx" with xxxxxxxxx being the ecc of length n-k=9, here the string positions are [1, 4], but the coefficients are reversed since the ecc characters are placed as the first coefficients of the polynomial, thus the coefficients of the erased characters are n-1 - [1, 4] = [18, 15] = erasures_loc to be specified as an argument.''' 667 | # See: http://ocw.usu.edu/Electrical_and_Computer_Engineering/Error_Control_Coding/lecture7.pdf and Blahut, Richard E. "Transform techniques for error control codes." IBM Journal of Research and development 23.3 (1979): 299-315. http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.92.600&rep=rep1&type=pdf and also a MatLab implementation here: http://www.mathworks.com/matlabcentral/fileexchange/23567-reed-solomon-errors-and-erasures-decoder/content//RS_E_E_DEC.m 668 | e_loc = [1] # just to init because we will multiply, so it must be 1 so that the multiplication starts correctly without nulling any term 669 | # erasures_loc is very simple to compute: erasures_loc = prod(1 - x*alpha**i) for i in erasures_pos and where alpha is the alpha chosen to evaluate polynomials (here in this library it's gf(3)). To generate c*x where c is a constant, we simply generate a Polynomial([c, 0]) where 0 is the constant and c is positionned to be the coefficient for x^1. 670 | for i in e_pos: 671 | e_loc = gf_poly_mul( e_loc, gf_poly_add(_bytearray([1]), [gf_pow(generator, i), 0]) ) 672 | return e_loc 673 | 674 | def rs_find_error_evaluator(synd, err_loc, nsym): 675 | '''Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma.''' 676 | # Omega(x) = [ Synd(x) * Error_loc(x) ] mod x^(n-k+1) 677 | _, remainder = gf_poly_div( gf_poly_mul(synd, err_loc), ([1] + [0]*(nsym+1)) ) # first multiply syndromes * errata_locator, then do a polynomial division to truncate the polynomial to the required length 678 | 679 | # Faster way that is equivalent 680 | #remainder = gf_poly_mul(synd, err_loc) # first multiply the syndromes with the errata locator polynomial 681 | #remainder = remainder[len(remainder)-(nsym+1):] # then divide by a polynomial of the length we want, which is equivalent to slicing the list (which represents the polynomial) 682 | 683 | return remainder 684 | 685 | def rs_find_errors(err_loc, nmess, generator=2): 686 | '''Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time).''' 687 | # nmess = length of whole codeword (message + ecc symbols) 688 | errs = len(err_loc) - 1 689 | err_pos = [] 690 | for i in xrange(nmess): # normally we should try all 2^8 possible values, but here we optimize to just check the interesting symbols 691 | if gf_poly_eval(err_loc, gf_pow(generator, i)) == 0: # It's a 0? Bingo, it's a root of the error locator polynomial, in other terms this is the location of an error 692 | err_pos.append(nmess - 1 - i) 693 | # Sanity check: the number of errors/errata positions found should be exactly the same as the length of the errata locator polynomial 694 | if len(err_pos) != errs: 695 | # TODO: to decode messages+ecc with length n > 255, we may try to use a bruteforce approach: the correct positions ARE in the final array j, but the problem is because we are above the Galois Field's range, there is a wraparound so that for example if j should be [0, 1, 2, 3], we will also get [255, 256, 257, 258] (because 258 % 255 == 3, same for the other values), so we can't discriminate. The issue is that fixing any errs_nb errors among those will always give a correct output message (in the sense that the syndrome will be all 0), so we may not even be able to check if that's correct or not, so I'm not sure the bruteforce approach may even be possible. 696 | #raise ReedSolomonError("Too many (or few) errors found by Chien Search for the errata locator polynomial!") 697 | #RSB 698 | return None 699 | return err_pos 700 | 701 | def rs_forney_syndromes(synd, pos, nmess, generator=2): 702 | # Compute Forney syndromes, which computes a modified syndromes to compute only errors (erasures are trimmed out). Do not confuse this with Forney algorithm, which allows to correct the message based on the location of errors. 703 | erase_pos_reversed = [nmess-1-p for p in pos] # prepare the coefficient degree positions (instead of the erasures positions) 704 | 705 | # Optimized method, all operations are inlined 706 | fsynd = list(synd[1:]) # make a copy and trim the first coefficient which is always 0 by definition 707 | for i in xrange(len(pos)): 708 | x = gf_pow(generator, erase_pos_reversed[i]) 709 | for j in xrange(len(fsynd) - 1): 710 | fsynd[j] = gf_mul(fsynd[j], x) ^ fsynd[j + 1] 711 | #fsynd.pop() # useless? it doesn't change the results of computations to leave it there 712 | 713 | # Theoretical way of computing the modified Forney syndromes: fsynd = (erase_loc * synd) % x^(n-k) -- although the trimming by using x^(n-k) is maybe not necessary as many books do not even mention it (and it works without trimming) 714 | # See Shao, H. M., Truong, T. K., Deutsch, L. J., & Reed, I. S. (1986, April). A single chip VLSI Reed-Solomon decoder. In Acoustics, Speech, and Signal Processing, IEEE International Conference on ICASSP'86. (Vol. 11, pp. 2151-2154). IEEE.ISO 690 715 | #erase_loc = rs_find_errata_locator(erase_pos_reversed, generator=generator) # computing the erasures locator polynomial 716 | #fsynd = gf_poly_mul(erase_loc[::-1], synd[1:]) # then multiply with the syndrome to get the untrimmed forney syndrome 717 | #fsynd = fsynd[len(pos):] # then trim the first erase_pos coefficients which are useless. Seems to be not necessary, but this reduces the computation time later in BM (thus it's an optimization). 718 | 719 | return fsynd 720 | 721 | def rs_correct_msg(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False): 722 | '''Reed-Solomon main decoding function''' 723 | global field_charac 724 | if len(msg_in) > field_charac: 725 | # Note that it is in fact possible to encode/decode messages that are longer than field_charac, but because this will be above the field, this will generate more error positions during Chien Search than it should, because this will generate duplicate values, which should normally be prevented thank's to the prime polynomial reduction (eg, because it can't discriminate between error at position 1 or 256, both being exactly equal under galois field 2^8). So it's really not advised to do it, but it's possible (but then you're not guaranted to be able to correct any error/erasure on symbols with a position above the length of field_charac -- if you really need a bigger message without chunking, then you should better enlarge c_exp so that you get a bigger field). 726 | raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac)) 727 | 728 | msg_out = _bytearray(msg_in) # copy of message 729 | # erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values) 730 | if erase_pos is None: 731 | erase_pos = [] 732 | else: 733 | for e_pos in erase_pos: 734 | msg_out[e_pos] = 0 735 | # check if there are too many erasures to correct (beyond the Singleton bound) 736 | if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct") 737 | # prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions) 738 | synd = rs_calc_syndromes(msg_out, nsym, fcr, generator) 739 | # check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is. 740 | if max(synd) == 0: 741 | return msg_out[:-nsym], msg_out[-nsym:], [] # no errors 742 | 743 | # Find errors locations 744 | if only_erasures: 745 | err_pos = [] 746 | else: 747 | # compute the Forney syndromes, which hide the erasures from the original syndrome (so that BM will just have to deal with errors, not erasures) 748 | fsynd = rs_forney_syndromes(synd, erase_pos, len(msg_out), generator) 749 | # compute the error locator polynomial using Berlekamp-Massey 750 | err_loc = rs_find_error_locator(fsynd, nsym, erase_count=len(erase_pos)) 751 | 752 | #RSB 753 | if err_loc == None: 754 | return msg_out[:-nsym], msg_out[-nsym:], [] 755 | 756 | # locate the message errors using Chien search (or bruteforce search) 757 | err_pos = rs_find_errors(err_loc[::-1], len(msg_out), generator) 758 | if err_pos is None: 759 | #raise ReedSolomonError("Could not locate error") 760 | 761 | #RSB 762 | return msg_out[:-nsym], msg_out[-nsym:], [] 763 | 764 | # Find errors values and apply them to correct the message 765 | # compute errata evaluator and errata magnitude polynomials, then correct errors and erasures 766 | msg_out = rs_correct_errata(msg_out, synd, erase_pos + err_pos, fcr, generator) # note that we here use the original syndrome, not the forney syndrome (because we will correct both errors and erasures, so we need the full syndrome) 767 | # check if the final message is fully repaired 768 | synd = rs_calc_syndromes(msg_out, nsym, fcr, generator) 769 | if max(synd) > 0: 770 | raise ReedSolomonError("Could not correct message") 771 | # return the successfully decoded message 772 | return msg_out[:-nsym], msg_out[-nsym:], erase_pos + err_pos # also return the corrected ecc block so that the user can check(), and the position of errors to allow for adaptive bitrate algorithm to check how the number of errors vary 773 | 774 | def rs_correct_msg_nofsynd(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False): 775 | '''Reed-Solomon main decoding function, without using the modified Forney syndromes''' 776 | global field_charac 777 | if len(msg_in) > field_charac: 778 | raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac)) 779 | 780 | msg_out = _bytearray(msg_in) # copy of message 781 | # erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values) 782 | if erase_pos is None: 783 | erase_pos = [] 784 | else: 785 | for e_pos in erase_pos: 786 | msg_out[e_pos] = 0 787 | # check if there are too many erasures 788 | if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct") 789 | # prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions) 790 | synd = rs_calc_syndromes(msg_out, nsym, fcr, generator) 791 | # check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is. 792 | if max(synd) == 0: 793 | return msg_out[:-nsym], msg_out[-nsym:], [] # no errors 794 | 795 | # prepare erasures locator and evaluator polynomials 796 | erase_loc = None 797 | #erase_eval = None 798 | erase_count = 0 799 | if erase_pos: 800 | erase_count = len(erase_pos) 801 | erase_pos_reversed = [len(msg_out)-1-eras for eras in erase_pos] 802 | erase_loc = rs_find_errata_locator(erase_pos_reversed, generator=generator) 803 | #erase_eval = rs_find_error_evaluator(synd[::-1], erase_loc, len(erase_loc)-1) 804 | 805 | # prepare errors/errata locator polynomial 806 | if only_erasures: 807 | err_loc = erase_loc[::-1] 808 | #err_eval = erase_eval[::-1] 809 | else: 810 | err_loc = rs_find_error_locator(synd, nsym, erase_loc=erase_loc, erase_count=erase_count) 811 | err_loc = err_loc[::-1] 812 | #err_eval = rs_find_error_evaluator(synd[::-1], err_loc[::-1], len(err_loc)-1)[::-1] # find error/errata evaluator polynomial (not really necessary since we already compute it at the same time as the error locator poly in BM) 813 | 814 | # locate the message errors 815 | err_pos = rs_find_errors(err_loc, len(msg_out), generator) # find the roots of the errata locator polynomial (ie: the positions of the errors/errata) 816 | if err_pos is None: 817 | raise ReedSolomonError("Could not locate error") 818 | 819 | # compute errata evaluator and errata magnitude polynomials, then correct errors and erasures 820 | msg_out = rs_correct_errata(msg_out, synd, err_pos, fcr=fcr, generator=generator) 821 | # check if the final message is fully repaired 822 | synd = rs_calc_syndromes(msg_out, nsym, fcr, generator) 823 | if max(synd) > 0: 824 | raise ReedSolomonError("Could not correct message") 825 | # return the successfully decoded message 826 | return msg_out[:-nsym], msg_out[-nsym:], erase_pos + err_pos # also return the corrected ecc block so that the user can check(), and the position of errors to allow for adaptive bitrate algorithm to check how the number of errors vary 827 | 828 | def rs_check(msg, nsym, fcr=0, generator=2): 829 | '''Returns true if the message + ecc has no error of false otherwise (may not always catch a wrong decoding or a wrong message, particularly if there are too many errors -- above the Singleton bound --, but it usually does)''' 830 | return ( max(rs_calc_syndromes(msg, nsym, fcr, generator)) == 0 ) 831 | 832 | 833 | #=================================================================================================== 834 | # API 835 | #=================================================================================================== 836 | class RSCodec(object): 837 | ''' 838 | A Reed Solomon encoder/decoder. After initializing the object, use ``encode`` to encode a 839 | (byte)string to include the RS correction code, and pass such an encoded (byte)string to 840 | ``decode`` to extract the original message (if the number of errors allows for correct decoding). 841 | The ``nsym`` argument is the length of the correction code, and it determines the number of 842 | error bytes (if I understand this correctly, half of ``nsym`` is correctable) 843 | ''' 844 | ''' 845 | Modifications by rotorgit 2/3/2015: 846 | Added support for US FAA ADSB UAT RS FEC, by allowing user to specify 847 | different primitive polynomial and non-zero first consecutive root (fcr). 848 | For UAT/ADSB use, set fcr=120 and prim=0x187 when instantiating 849 | the class; leaving them out will default for previous values (0 and 850 | 0x11d) 851 | ''' 852 | 853 | def __init__(self, nsym=10, nsize=255, fcr=0, prim=0x11d, generator=2, c_exp=8, single_gen=True): 854 | '''Initialize the Reed-Solomon codec. Note that different parameters change the internal values (the ecc symbols, look-up table values, etc) but not the output result (whether your message can be repaired or not, there is no influence of the parameters). 855 | nsym : number of ecc symbols (you can repair nsym/2 errors and nsym erasures. 856 | nsize : maximum length of each chunk. If higher than 255, will use a higher Galois Field, but the algorithm's complexity and computational cost will raise quadratically... 857 | single_gen : if you want to use the same RSCodec for different nsym parameters (but nsize the same), then set single_gen = False. 858 | ''' 859 | 860 | # Auto-setup if galois field or message length is different than default (exponent 8) 861 | if nsize > 255 and c_exp <= 8: # nsize (chunksize) is larger than the galois field, we resize the galois field 862 | # Get the next closest power of two 863 | c_exp = int(math.log(2 ** (math.floor(math.log(nsize) / math.log(2)) + 1), 2)) 864 | if c_exp != 8 and prim == 0x11d: # prim was not correctly defined, find one 865 | prim = find_prime_polys(generator=generator, c_exp=c_exp, fast_primes=True, single=True) 866 | if nsize == 255: # resize chunk size if not set 867 | nsize = int(2**c_exp - 1) 868 | 869 | # Memorize variables 870 | self.nsym = nsym # number of ecc symbols (ie, the repairing rate will be r=(nsym/2)/nsize, so for example if you have nsym=5 and nsize=10, you have a rate r=0.25, so you can correct up to 0.25% errors (or exactly 2 symbols out of 10), and 0.5% erasures (5 symbols out of 10). 871 | self.nsize = nsize # maximum length of one chunk (ie, message + ecc symbols after encoding, for the message alone it's nsize-nsym) 872 | self.fcr = fcr # first consecutive root, can be any value between 0 and (2**c_exp)-1 873 | self.prim = prim # prime irreducible polynomial, use find_prime_polys() to find a prime poly 874 | self.generator = generator # generator integer, must be prime 875 | self.c_exp = c_exp # exponent of the field's characteristic. This both defines the maximum value per symbol and the maximum length of one chunk. By default it's GF(2^8), do not change if you're not sure what it means. 876 | 877 | # Initialize the look-up tables for easy and quick multiplication/division 878 | self.gf_log, self.gf_exp, self.field_charac = init_tables(prim, generator, c_exp) 879 | # Precompute the generator polynomials 880 | if single_gen: 881 | self.gen = {} 882 | self.gen[nsym] = rs_generator_poly(nsym, fcr=fcr, generator=generator) 883 | else: 884 | self.gen = rs_generator_poly_all(nsize, fcr=fcr, generator=generator) 885 | 886 | def chunk(self, data, chunksize): 887 | '''Split a long message into chunks''' 888 | for i in xrange(0, len(data), chunksize): 889 | # Split the long message in a chunk 890 | chunk = data[i:i+chunksize] 891 | yield chunk 892 | 893 | def encode(self, data, nsym=None): 894 | '''Encode a message (ie, add the ecc symbols) using Reed-Solomon, whatever the length of the message because we use chunking''' 895 | # Restore precomputed tables (allow to use multiple RSCodec in one script) 896 | global gf_log, gf_exp, field_charac 897 | gf_log, gf_exp, field_charac = self.gf_log, self.gf_exp, self.field_charac 898 | 899 | if not nsym: 900 | nsym = self.nsym 901 | 902 | if isinstance(data, str): 903 | data = _bytearray(data) 904 | enc = _bytearray() 905 | for chunk in self.chunk(data, self.nsize - self.nsym): 906 | enc.extend(rs_encode_msg(chunk, self.nsym, fcr=self.fcr, generator=self.generator, gen=self.gen[nsym])) 907 | return enc 908 | 909 | def decode(self, data, nsym=None, erase_pos=None, only_erasures=False): 910 | '''Repair a message, whatever its size is, by using chunking. May return a wrong result if number of errors > nsym. 911 | Note that it returns a couple of vars: the repaired messages, and the repaired messages+ecc (useful for checking). 912 | Usage: rmes, rmesecc = RSCodec.decode(data). 913 | ''' 914 | # erase_pos is a list of positions where you know (or greatly suspect at least) there is an erasure (ie, wrong character but you know it's at this position). Just input the list of all positions you know there are errors, and this method will automatically split the erasures positions to attach to the corresponding data chunk. 915 | 916 | # Restore precomputed tables (allow to use multiple RSCodec in one script) 917 | global gf_log, gf_exp, field_charac 918 | gf_log, gf_exp, field_charac = self.gf_log, self.gf_exp, self.field_charac 919 | 920 | if not nsym: 921 | nsym = self.nsym 922 | 923 | if isinstance(data, str): 924 | data = _bytearray(data) 925 | dec = _bytearray() 926 | dec_full = _bytearray() 927 | errata_pos_all = _bytearray() 928 | for chunk in self.chunk(data, self.nsize): 929 | # Extract the erasures for this chunk 930 | e_pos = [] 931 | if erase_pos: 932 | # First extract the erasures for this chunk (all erasures below the maximum chunk length) 933 | e_pos = [x for x in erase_pos if x <= self.nsize] 934 | # Then remove the extract erasures from the big list and also decrement all subsequent positions values by nsize (the current chunk's size) so as to prepare the correct alignment for the next iteration 935 | erase_pos = [x - (self.nsize+1) for x in erase_pos if x > self.nsize] 936 | # Decode/repair this chunk! 937 | rmes, recc, errata_pos = rs_correct_msg(chunk, nsym, fcr=self.fcr, generator=self.generator, erase_pos=e_pos, only_erasures=only_erasures) 938 | dec.extend(rmes) 939 | dec_full.extend(rmes+recc) 940 | errata_pos_all.extend(errata_pos) 941 | return dec, dec_full, errata_pos_all 942 | 943 | def check(self, data, nsym=None): 944 | '''Check if a message+ecc stream is not corrupted (or fully repaired). Note: may return a wrong result if number of errors > nsym.''' 945 | if not nsym: 946 | nsym = self.nsym 947 | if isinstance(data, str): 948 | data = _bytearray(data) 949 | check = [] 950 | for chunk in self.chunk(data, self.nsize): 951 | check.append(rs_check(chunk, nsym, fcr=self.fcr, generator=self.generator)) 952 | return check 953 | 954 | def maxerrata(self, errors=None, erasures=None, verbose=False): 955 | '''Return the Singleton Bound for the current codec, which is the max number of errata (errors and erasures) that the codec can decode/correct. 956 | Beyond the Singleton Bound (too many errors/erasures), the algorithm will try to raise an exception, but it may also not detect any problem with the message and return 0 errors. 957 | Hence why you should use checksums if your goal is to detect errors (as opposed to correcting them), as checksums have no bounds on the number of errors, the only limitation being the probability of collisions. 958 | By default, return a tuple wth the maximum number of errors (2nd output) OR erasures (2nd output) that can be corrected. 959 | If errors or erasures (not both) is specified as argument, computes the remaining **simultaneous** correction capacity (eg, if errors specified, compute the number of erasures that can be simultaneously corrected). 960 | Set verbose to True to get print a report.''' 961 | nsym = self.nsym 962 | # Compute the maximum number of errors OR erasures 963 | maxerrors = int(nsym/2) # always floor the number, we can't correct half a symbol, it's all or nothing 964 | maxerasures = nsym 965 | # Compute the maximum of simultaneous errors AND erasures 966 | if erasures is not None and erasures >= 0: 967 | # We know the erasures count, we want to know how many errors we can correct simultaneously 968 | if erasures > maxerasures: 969 | raise ReedSolomonError("Specified number of errors or erasures exceeding the Singleton Bound!") 970 | maxerrors = int((nsym-erasures)/2) 971 | if verbose: 972 | print('This codec can correct up to %i errors and %i erasures simultaneously' % (maxerrors, erasures)) 973 | # Return a tuple with the maximum number of simultaneously corrected errors and erasures 974 | return maxerrors, erasures 975 | if errors is not None and errors >= 0: 976 | # We know the errors count, we want to know how many erasures we can correct simultaneously 977 | if errors > maxerrors: 978 | raise ReedSolomonError("Specified number of errors or erasures exceeding the Singleton Bound!") 979 | maxerasures = int(nsym-(errors*2)) 980 | if verbose: 981 | print('This codec can correct up to %i errors and %i erasures simultaneously' % (errors, maxerasures)) 982 | # Return a tuple with the maximum number of simultaneously corrected errors and erasures 983 | return errors, maxerasures 984 | # Return a tuple with the maximum number of errors and erasures (independently corrected) 985 | if verbose: 986 | print('This codec can correct up to %i errors and %i erasures independently' % (maxerrors, maxerasures)) 987 | return maxerrors, maxerasures 988 | -------------------------------------------------------------------------------- /serdespy/rs_code.py: -------------------------------------------------------------------------------- 1 | from .reedsolo import RSCodec, ReedSolomonError 2 | import numpy as np 3 | from .signal import * 4 | 5 | #KP4 = RS (544, 514, 15) 6 | def RS_KP4(): 7 | return RSCodec(nsym = 30, nsize = 544, c_exp = 10 ) 8 | 9 | def RS_KR4(): 10 | #KR4 = RS(528,514) 11 | return RSCodec(nsym = 14, nsize = 528, c_exp = 10 ) 12 | 13 | def bin_seq2int_seq(bin_seq): 14 | 15 | if bin_seq.size % 10 != 0: 16 | print('Error: bin_seq must have length divisible by 10') 17 | return False 18 | 19 | n_words = int(bin_seq.size/10) 20 | 21 | d_len = n_words * 10 22 | 23 | words = bin_seq.reshape((n_words,10)) 24 | 25 | #return words 26 | 27 | int_seq = np.zeros(n_words).astype(int) 28 | 29 | for i in range(n_words): 30 | int_seq[i] = bin2int(words[i]) 31 | 32 | return int_seq 33 | 34 | def int_seq2bin_seq(int_seq): 35 | 36 | n_words = int_seq.size 37 | 38 | words_out = np.zeros((n_words,10),dtype = np.uint8) 39 | 40 | for i in range(n_words): 41 | words_out[i] = int2bin(int_seq[i]) 42 | 43 | bin_seq = np.ndarray.flatten(words_out) 44 | 45 | return bin_seq 46 | 47 | def bin2int(x): 48 | y = 0 49 | y += x[0]*512 50 | y += x[1]*256 51 | y += x[2]*128 52 | y += x[3]*64 53 | y += x[4]*32 54 | y += x[5]*16 55 | y += x[6]*8 56 | y += x[7]*4 57 | y += x[8]*2 58 | y += x[9]*1 59 | return y 60 | 61 | def int2bin(x): 62 | y = np.zeros(10,dtype = np.uint8) 63 | y[0] = x // 512 64 | r = x % 512 65 | y[1] = r // 256 66 | r = r % 256 67 | y[2] = r // 128 68 | r = r % 128 69 | y[3] = r // 64 70 | r = r % 64 71 | y[4] = r // 32 72 | r = r % 32 73 | y[5] = r // 16 74 | r = r % 16 75 | y[6] = r // 8 76 | r = r % 8 77 | y[7] = r // 4 78 | r = r % 4 79 | y[8] = r // 2 80 | r = r % 2 81 | y[9] = r // 1 82 | return y 83 | 84 | def rs_encode(bin_seq, encoder, pam4 = True): 85 | 86 | #format data into 10-bit words 87 | data_in_int = bin_seq2int_seq(bin_seq) 88 | 89 | #encode_data 90 | data_in_enc_int = np.array(encoder.encode(data_in_int)) 91 | 92 | #convert back to binary sequence 93 | data_in_enc = int_seq2bin_seq(data_in_enc_int) 94 | 95 | if pam4: 96 | #convert into pam4 symbols 97 | len_symbols_enc = int(data_in_enc.size/2) 98 | 99 | data_in_enc_reshape = np.reshape(data_in_enc,(2,len_symbols_enc) , order = 'F') 100 | 101 | symbols_in_enc = np.zeros(len_symbols_enc,dtype = np.uint8) 102 | 103 | for i in range(len_symbols_enc): 104 | symbols_in_enc[i] = grey_encode(data_in_enc_reshape[:,i]) 105 | 106 | return symbols_in_enc 107 | 108 | return data_in_enc 109 | 110 | def rs_decode(symbols_out_enc, encoder, pam4 = True): 111 | 112 | if pam4: 113 | data_out_enc = np.zeros(symbols_out_enc.size*2,dtype = np.uint8) 114 | 115 | for i in range(symbols_out_enc.size): 116 | data_out_enc[i*2:i*2+2] = grey_decode(symbols_out_enc[i]) 117 | else: 118 | data_out_enc = symbols_out_enc 119 | 120 | data_out_enc_int = bin_seq2int_seq(data_out_enc) 121 | 122 | data_out_int = np.array(encoder.decode(data_out_enc_int)[0]) 123 | 124 | data_out = int_seq2bin_seq(data_out_int) 125 | 126 | return data_out -------------------------------------------------------------------------------- /serdespy/signal.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file contains miscellaneous functions for digital signal processing 3 | """ 4 | 5 | from .chmodel import * 6 | import numpy as np 7 | import skrf as rf 8 | import scipy as sp 9 | import matplotlib.pyplot as plt 10 | 11 | def grey_encode(x): 12 | """encodes 2 bits into one pam4 symbol with grey encoding 13 | 14 | Parameters 15 | ---------- 16 | x : array 17 | contains the two bits to be encoded 18 | 19 | Returns 20 | ------- 21 | int 22 | the pam4 symbol that represents x 23 | 24 | """ 25 | if (x[0] == 0): 26 | if (x[1] == 0): 27 | return 0 28 | else: 29 | return 1 30 | else: 31 | if (x[1] == 0): 32 | return 3 33 | else: 34 | return 2 35 | 36 | def grey_decode(symbol): 37 | """decodes one pam4 symbol into two bits with grey decoding 38 | 39 | Parameters 40 | ---------- 41 | symbol : int 42 | the pam4 symbol 43 | 44 | Returns 45 | ------- 46 | array 47 | the two bits that represent symbol 48 | """ 49 | if symbol == 0 : 50 | return np.array([0,0],dtype = np.uint8) 51 | if symbol == 1 : 52 | return np.array([0,1],dtype = np.uint8) 53 | if symbol == 2 : 54 | return np.array([1,1],dtype = np.uint8) 55 | if symbol == 3 : 56 | return np.array([1,0],dtype = np.uint8) 57 | 58 | def natural_encode(x): 59 | """encodes 2 bits into one pam4 symbol with natural encoding 60 | 61 | Parameters 62 | ---------- 63 | x : array 64 | contains the two bits to be encoded 65 | 66 | Returns 67 | ------- 68 | int 69 | the pam4 symbol that represents x 70 | 71 | """ 72 | 73 | if (x[0] == 0): 74 | if (x[1] == 0): 75 | return 0 76 | else: 77 | return 1 78 | else: 79 | if (x[1] == 0): 80 | return 2 81 | else: 82 | return 3 83 | 84 | def natural_decode(symbol): 85 | """decodes one pam4 symbol into two bits with natural decoding 86 | 87 | Parameters 88 | ---------- 89 | symbol : int 90 | the pam4 symbol 91 | 92 | Returns 93 | ------- 94 | array 95 | the two bits that represent symbol 96 | """ 97 | if symbol == 0 : 98 | return np.array([0,0],dtype = np.uint8) 99 | if symbol == 1 : 100 | return np.array([0,1],dtype = np.uint8) 101 | if symbol == 2 : 102 | return np.array([1,0],dtype = np.uint8) 103 | if symbol == 3 : 104 | return np.array([1,1],dtype = np.uint8) 105 | 106 | def pam4_decision(x,l,m,h): 107 | """produces pam4 symbol from analog voltage 108 | 109 | Parameters 110 | ---------- 111 | x : float 112 | the analog voltage 113 | 114 | l: float 115 | voltage threshold between 0 and 1 symbol 116 | 117 | m: float 118 | voltage threshold between 1 and 2 symbol 119 | 120 | h: float 121 | voltage threshold between 2 and 3 symbol 122 | 123 | Returns 124 | ------- 125 | int 126 | the pam4 symbol that represents x 127 | """ 128 | if x= len(reference): 559 | y_ref = z[k] 560 | else: 561 | y_ref = reference[k] 562 | 563 | e[k] = v_dfe[k] - y_ref 564 | if (k - min_delay) % update_rate == 0: 565 | #print(k) 566 | if w_ffe is not None: 567 | w_ffe -= mu * e[k] * y_k 568 | if w_dfe is not None: 569 | w_dfe += mu * e[k] * z_k 570 | 571 | if is_cooptimizing and alpha is not None: 572 | w_dfe[:len(alpha)] = alpha 573 | 574 | end = N - FFE_pre 575 | return w_ffe, w_dfe,\ 576 | v_ffe[min_delay:end],\ 577 | v_dfe[min_delay:end],\ 578 | z[min_delay:end],\ 579 | e[min_delay:end] 580 | 581 | def _quantize(signal, voltage_levels): 582 | idx = np.abs(voltage_levels - signal).argmin() 583 | return voltage_levels[idx] 584 | 585 | -------------------------------------------------------------------------------- /serdespy/transmitter.py: -------------------------------------------------------------------------------- 1 | from .signal import * 2 | from .chmodel import * 3 | import numpy as np 4 | import skrf as rf 5 | import scipy as sp 6 | import matplotlib.pyplot as plt 7 | import samplerate 8 | 9 | class Transmitter: 10 | """class to build model of time domain signal at transmitter 11 | 12 | """ 13 | 14 | def __init__(self, data, voltage_levels, frequency): 15 | """ 16 | Initialize transmitter, stores data and converts to baud-rate-sampled voltage waveform 17 | 18 | Parameters 19 | ---------- 20 | data : array 21 | Binary sequence containing {0,1} if NRZ 22 | Quaternary sequence containing {0,1,2,3} symbols if PAM-4 23 | 24 | voltage levels: array 25 | definition of voltages corresponding to symbols. 26 | 27 | frequency: float 28 | 2* symbol rate 29 | 30 | """ 31 | 32 | #frequency and period 33 | self.f = frequency 34 | self.T = 1/self.f 35 | self.UI = self.T/2 36 | 37 | 38 | self.voltage_levels = voltage_levels 39 | self.data = data 40 | self.n_symbols = data.size 41 | 42 | 43 | #self.signal_FIR_BR = None 44 | self.FIR_enable = False 45 | 46 | #create ideal, baud-rate-sampled transmitter waveform 47 | if voltage_levels.size == 2: 48 | self.signal_BR = nrz_input_BR(data,voltage_levels) 49 | 50 | elif voltage_levels.size == 4: 51 | self.signal_BR = pam4_input_BR(data,voltage_levels) 52 | 53 | else: 54 | print ("Error: Voltage levels must have either size = 2 for NRZ signal or size = 4 for PAM4") 55 | 56 | def FIR(self, tap_weights): 57 | 58 | """Implements TX - FIR and creates self.signal_FIR_BR = filtered, baud-rate sampled signal 59 | 60 | Parameters 61 | ---------- 62 | 63 | tap_weights: array 64 | tap weights for tx fir 65 | last element should be 1, eg. for a 2-tap TX-FIR, with -0.1 and -0.2 coefficients, tap_weights = np.array([-0.1, -0.2, 1]) 66 | 67 | """ 68 | self.FIR_enable = True 69 | 70 | #do convolution to implement FIR 71 | self.signal_FIR_BR = sp.signal.fftconvolve(self.signal_BR,tap_weights, mode="same") 72 | 73 | def oversample(self, samples_per_symbol): 74 | """oversample baud-rate signal to create ideal, square transmitter waveform 75 | 76 | Parameters 77 | ---------- 78 | 79 | samples_per_symbol: 80 | samples per UI of tx signal 81 | 82 | """ 83 | 84 | 85 | self.samples_per_symbol = samples_per_symbol 86 | 87 | 88 | 89 | #TODO: use np.repeat 90 | 91 | 92 | #if we have FIR filtered data 93 | if self.FIR_enable: 94 | #oversampled = samplerate.resample(self.signal_FIR_BR,samples_per_symbol,converter_type='zero_order_hold') 95 | self.signal_ideal = np.repeat(self.signal_FIR_BR, samples_per_symbol) 96 | #if we are not using FIR 97 | else: 98 | #oversampled = samplerate.resample(self.signal_BR,samples_per_symbol,converter_type='zero_order_hold') 99 | self.signal_ideal = np.repeat(self.signal_BR, samples_per_symbol) 100 | 101 | def gaussian_jitter(self, stdev_div_UI = 0.025): 102 | """Generates the TX waveform from ideal, square, self.signal_ideal with jitter 103 | 104 | Parameters 105 | ---------- 106 | stdev_div_UI : float 107 | standard deviation of jitter distribution as a pct of UI 108 | """ 109 | 110 | #generate random Gaussian distributed TX jitter values 111 | epsilon = np.random.normal(0,stdev_div_UI*self.UI,self.n_symbols) 112 | 113 | epsilon.clip(self.UI) 114 | epsilon[0]=0 115 | 116 | #calculate time duration of each sample 117 | sample_time = self.UI/self.samples_per_symbol 118 | 119 | #initializes non_ideal (jitter) array 120 | non_ideal = np.zeros_like(self.signal_ideal) 121 | 122 | #populates non_ideal array to create TX jitter waveform 123 | for symbol_index,symbol_epsilon in enumerate(epsilon): 124 | epsilon_duration = int(round(symbol_epsilon/sample_time)) 125 | start = int(symbol_index*self.samples_per_symbol) 126 | end = int(start+epsilon_duration) 127 | flip=1 128 | if symbol_index==0: 129 | continue 130 | if symbol_epsilon<0: 131 | start,end=end,start 132 | flip=-1 133 | non_ideal[start:end]=flip*(self.signal_ideal[symbol_index*self.samples_per_symbol-self.samples_per_symbol]-self.signal_ideal[symbol_index*self.samples_per_symbol]) 134 | 135 | #calculate TX output waveform 136 | self.signal = np.copy(self.signal_ideal+non_ideal) 137 | 138 | def tx_bandwidth(self, freq_bw = None, TF = None): 139 | """Passes TX signal through an LTI system to model non-ideal TX driver 140 | option to use custom transfer function, or use single-pole system with specified -3dB frequency 141 | 142 | Parameters 143 | ---------- 144 | freq_bw: float 145 | bandwidth frequency 146 | 147 | TF: list, optional 148 | TF[0] : numerator coefficients for tranfer function 149 | TF[1] : denominator coefficients for Transfer function 150 | 151 | """ 152 | 153 | #Timestep 154 | dt = self.UI/self.samples_per_symbol 155 | 156 | #max frequency for constructing discrete transfer function 157 | max_f = 1/dt 158 | 159 | #max_f in rad/s 160 | max_w = max_f*2*np.pi 161 | 162 | #heuristic to get a reasonable impulse response length 163 | ir_length = int(4/(freq_bw*dt)) 164 | 165 | #Calculate discrete transfer function 166 | if TF != None: 167 | w, H = sp.signal.freqs(TF[0], TF[1], np.linspace(0,0.5*max_w,ir_length*4)) 168 | else: 169 | #calculate discrete transfer function of low-pass filter with pole at freq_bw 170 | w, H = sp.signal.freqs([freq_bw*(2*np.pi)], [1,freq_bw*(2*np.pi)], np.linspace(0,0.5*max_w,ir_length*4)) 171 | 172 | #frequency vector for discrete transfer function in hz 173 | f = w/(2*np.pi) 174 | 175 | #plot frequency response of the low-pass filter 176 | plt.figure(dpi=800) 177 | plt.semilogx(1e-9*f,20*np.log10(abs(H))) 178 | plt.ylabel('Mag. Response [dB]') 179 | plt.xlabel('Frequency [GHz]') 180 | plt.title("Low Pass Filter with {}MHz Cutoff Magnitude Bode Plot".format(round(freq_bw*1e-6))) 181 | plt.grid() 182 | plt.axvline(x=1e-9*freq_bw,color = 'grey') 183 | plt.show() 184 | 185 | #find impluse response of low-pass filter 186 | h, t = sdp.freq2impulse(H,f) 187 | 188 | #plot impulse response of the low-pass filter 189 | # plt.figure(dpi=800) 190 | # plt.plot(t[:ir_length]*1e12,h[:ir_length]) 191 | # plt.title("Low Pass Filter with {}MHz Cutoff Impulse Response".format(round(freq_bw*1e-6))) 192 | # plt.xlabel('Time [ps]') 193 | # plt.ylabel('[V]') 194 | # plt.show() 195 | 196 | self.signal = sp.signal.fftconvolve(h[:ir_length], self.signal) 197 | 198 | def resample(self,samples_per_symbol): 199 | """ resamples signal to new oversampling ratio 200 | 201 | Parameters 202 | ---------- 203 | samples_per_symbol: int 204 | new number of samples per UI 205 | """ 206 | #TODO: check this 207 | 208 | q = samples_per_symbol/self.samples_per_symbol 209 | 210 | #if (self.samples_per_symbol % q != 0): 211 | # print(r'Must downsample UI with a divisor of {self.samples_per_symbol}') 212 | # return False 213 | 214 | self.samples_per_symbol = samples_per_symbol 215 | 216 | self.signal = samplerate.resample(self.signal, q, 'zero_order_hold') 217 | 218 | 219 | def gaussian_jitter(signal_ideal, UI,n_symbols,samples_per_symbol,stdev): 220 | """Generates the TX waveform from ideal, square, self.signal_ideal with jitter 221 | 222 | Parameters 223 | ---------- 224 | signal_ideal: array 225 | ideal,square transmitter voltage waveform 226 | 227 | UI: float 228 | length of one unit interval in seconds 229 | 230 | n_symbols: int 231 | number of symbols in signal_ideal 232 | 233 | samples_per_symbol: int 234 | number of samples in signal_ideal corresponding to one UI 235 | 236 | stdev: 237 | standard deviation of gaussian jitter in seconds 238 | 239 | stdev_div_UI : float 240 | standard deviation of jitter distribution as a pct of UI 241 | """ 242 | 243 | #generate random Gaussian distributed TX jitter values 244 | epsilon = np.random.normal(0,stdev,n_symbols) 245 | 246 | epsilon.clip(UI) 247 | epsilon[0]=0 248 | 249 | #calculate time duration of each sample 250 | sample_time = UI/samples_per_symbol 251 | 252 | #initializes non_ideal (jitter) array 253 | non_ideal = np.zeros_like(signal_ideal) 254 | 255 | #populates non_ideal array to create TX jitter waveform 256 | for symbol_index,symbol_epsilon in enumerate(epsilon): 257 | epsilon_duration = int(round(symbol_epsilon/sample_time)) 258 | start = int(symbol_index*samples_per_symbol) 259 | end = int(start+epsilon_duration) 260 | flip=1 261 | if symbol_index==0: 262 | continue 263 | if symbol_epsilon<0: 264 | start,end=end,start 265 | flip=-1 266 | non_ideal[start:end]=flip*(signal_ideal[symbol_index*samples_per_symbol-samples_per_symbol]-signal_ideal[symbol_index*samples_per_symbol]) 267 | 268 | #calculate TX output waveform 269 | signal = np.copy(signal_ideal+non_ideal) 270 | return signal 271 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """A setuptools based setup module. 2 | See: 3 | https://packaging.python.org/guides/distributing-packages-using-setuptools/ 4 | https://github.com/pypa/sampleproject 5 | """ 6 | 7 | # Always prefer setuptools over distutils 8 | from setuptools import setup, find_packages 9 | 10 | import pathlib 11 | here = pathlib.Path(__file__).parent.resolve() 12 | 13 | # Get the long description from the README file 14 | long_description = (here / 'README.md').read_text(encoding='utf-8') 15 | 16 | # Arguments marked as "Required" below must be included for upload to PyPI. 17 | # Fields marked as "Optional" may be commented out. 18 | 19 | setup( 20 | # This is the name of your project. The first time you publish this 21 | # package, this name will be registered for you. It will determine how 22 | # users can install this project, e.g.: 23 | # 24 | # $ pip install sampleproject 25 | # 26 | # And where it will live on PyPI: https://pypi.org/project/sampleproject/ 27 | # 28 | # There are some restrictions on what makes a valid project name 29 | # specification here: 30 | # https://packaging.python.org/specifications/core-metadata/#name 31 | name='serdespy', # Required 32 | 33 | # Versions should comply with PEP 440: 34 | # https://www.python.org/dev/peps/pep-0440/ 35 | # 36 | # For a discussion on single-sourcing the version across setup.py and the 37 | # project code, see 38 | # https://packaging.python.org/guides/single-sourcing-package-version/ 39 | version='1.0', # Required 40 | 41 | # This is a one-line description or tagline of what your project does. This 42 | # corresponds to the "Summary" metadata field: 43 | # https://packaging.python.org/specifications/core-metadata/#summary 44 | description='Library for system-level SerDes modelling and Simulation', # Optional 45 | 46 | # This is an optional longer description of your project that represents 47 | # the body of text which users will see when they visit PyPI. 48 | # 49 | # Often, this is the same as your README, so you can just read it in from 50 | # that file directly (as we have already done above) 51 | # 52 | # This field corresponds to the "Description" metadata field: 53 | # https://packaging.python.org/specifications/core-metadata/#description-optional 54 | long_description=long_description, # Optional 55 | 56 | # Denotes that our long_description is in Markdown; valid values are 57 | # text/plain, text/x-rst, and text/markdown 58 | # 59 | # Optional if long_description is written in reStructuredText (rst) but 60 | # required for plain-text or Markdown; if unspecified, "applications should 61 | # attempt to render [the long_description] as text/x-rst; charset=UTF-8 and 62 | # fall back to text/plain if it is not valid rst" (see link below) 63 | # 64 | # This field corresponds to the "Description-Content-Type" metadata field: 65 | # https://packaging.python.org/specifications/core-metadata/#description-content-type-optional 66 | long_description_content_type='text/markdown', # Optional (see note above) 67 | 68 | # This should be a valid link to your project's main homepage. 69 | # 70 | # This field corresponds to the "Home-Page" metadata field: 71 | # https://packaging.python.org/specifications/core-metadata/#home-page-optional 72 | #url='https://github.com/pypa/sampleproject', # Optional 73 | 74 | # This should be your name or the name of the organization which owns the 75 | # project. 76 | author='Richard Barrie', # Optional 77 | 78 | # This should be a valid email address corresponding to the author listed 79 | # above. 80 | author_email='richard.barrie@mail.utoronto.ca', # Optional 81 | 82 | # Classifiers help users find your project by categorizing it. 83 | # 84 | # For a list of valid classifiers, see https://pypi.org/classifiers/ 85 | classifiers=[ # Optional 86 | # How mature is this project? Common values are 87 | # 3 - Alpha 88 | # 4 - Beta 89 | # 5 - Production/Stable 90 | 'Development Status :: 3 - Alpha', 91 | 92 | # Indicate who your project is intended for 93 | #'Intended Audience :: Students', 94 | #'Topic :: Software Development :: Build Tools', 95 | 96 | # Pick your license as you wish 97 | 'License :: OSI Approved :: MIT License', 98 | 99 | # Specify the Python versions you support here. In particular, ensure 100 | # that you indicate you support Python 3. These classifiers are *not* 101 | # checked by 'pip install'. See instead 'python_requires' below. 102 | #'Programming Language :: Python :: 3', 103 | #'Programming Language :: Python :: 3.6', 104 | 'Programming Language :: Python :: 3.7', 105 | 'Programming Language :: Python :: 3.8', 106 | 'Programming Language :: Python :: 3.9', 107 | #"Programming Language :: Python :: 3.10", 108 | #'Programming Language :: Python :: 3 :: Only', 109 | ], 110 | 111 | # This field adds keywords for your project which will appear on the 112 | # project page. What does your project relate to? 113 | # 114 | # Note that this is a list of additional keywords, separated 115 | # by commas, to be used to assist searching for the distribution in a 116 | # larger catalog. 117 | #keywords='sample, setuptools, development', # Optional 118 | 119 | # When your source code is in a subdirectory under the project root, e.g. 120 | # `src/`, it is necessary to specify the `package_dir` argument. 121 | #package_dir={'': 'src'}, # Optional 122 | 123 | # You can just specify package directories manually here if your project is 124 | # simple. Or you can use find_packages(). 125 | # 126 | # Alternatively, if you just want to distribute a single Python file, use 127 | # the `py_modules` argument instead as follows, which will expect a file 128 | # called `my_module.py` to exist: 129 | # 130 | # py_modules=["my_module"], 131 | # 132 | # packages=find_packages(where='src'), # Required 133 | packages = 'serdespy'.split(), 134 | 135 | # Specify which Python versions you support. In contrast to the 136 | # 'Programming Language' classifiers above, 'pip install' will check this 137 | # and refuse to install the project if the version does not match. See 138 | # https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires 139 | #python_requires='3.7', 140 | 141 | # This field lists other packages that your project depends on to run. 142 | # Any package you put here will be installed by pip when your project is 143 | # installed, so they must be valid existing projects. 144 | # 145 | # For an analysis of "install_requires" vs pip's requirements files see: 146 | # https://packaging.python.org/discussions/install-requires-vs-requirements/ 147 | install_requires = [ 148 | 'numpy', 149 | 'scipy', 150 | 'matplotlib', 151 | 'scikit-rf', 152 | 'samplerate', 153 | ] 154 | 155 | 156 | # List additional groups of dependencies here (e.g. development 157 | # dependencies). Users will be able to install these using the "extras" 158 | # syntax, for example: 159 | # 160 | # $ pip install sampleproject[dev] 161 | # 162 | # Similar to `install_requires` above, these must be valid existing 163 | # projects. 164 | #extras_require={ # Optional 165 | # 'dev': ['check-manifest'], 166 | # 'test': ['coverage'], 167 | #}, 168 | 169 | # If there are data files included in your packages that need to be 170 | # installed, specify them here. 171 | #package_data={ # Optional 172 | # 'sample': ['package_data.dat'], 173 | #}, 174 | 175 | # Although 'package_data' is the preferred approach, in some case you may 176 | # need to place data files outside of your packages. See: 177 | # http://docs.python.org/distutils/setupscript.html#installing-additional-files 178 | # 179 | # In this case, 'data_file' will be installed into '/my_data' 180 | #data_files=[('my_data', ['data/data_file'])], # Optional 181 | 182 | # To provide executable scripts, use entry points in preference to the 183 | # "scripts" keyword. Entry points provide cross-platform support and allow 184 | # `pip` to create the appropriate form of executable for the target 185 | # platform. 186 | # 187 | # For example, the following would provide a command called `sample` which 188 | # executes the function `main` from this package when invoked: 189 | #entry_points={ # Optional 190 | # 'console_scripts': [ 191 | # 'sample=sample:main', 192 | # ], 193 | #}, 194 | 195 | # List additional URLs that are relevant to your project as a dict. 196 | # 197 | # This field corresponds to the "Project-URL" metadata fields: 198 | # https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use 199 | # 200 | # Examples listed include a pattern for specifying where the package tracks 201 | # issues, where the source is hosted, where to say thanks to the package 202 | # maintainers, and where to support the project financially. The key is 203 | # what's used to render the link text on PyPI. 204 | #project_urls={ # Optional 205 | # 'Bug Reports': 'https://github.com/pypa/sampleproject/issues', 206 | # 'Funding': 'https://donate.pypi.org', 207 | # 'Say Thanks!': 'http://saythanks.io/to/example', 208 | # 'Source': 'https://github.com/pypa/sampleproject/', 209 | #}, 210 | ) 211 | --------------------------------------------------------------------------------