├── DetectNet.py ├── README.md ├── SoftCombinationNet.py ├── analyze_stats.py ├── generate_dataset.py ├── source_alphabet.py ├── source_material └── gutenberg_shakespeare.txt ├── timeseries_slicer.py ├── transmitters.py └── utils.py /DetectNet.py: -------------------------------------------------------------------------------- 1 | import utils 2 | import os 3 | 4 | from tensorflow.keras.models import load_model 5 | from tensorflow.keras.callbacks import LambdaCallback,EarlyStopping,ModelCheckpoint,TensorBoard 6 | 7 | # GPU usage setup 8 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 9 | 10 | #%% 11 | # hyperparameters 12 | lr = 0.0003 13 | filter_num = 60 14 | kernel_size = 10 15 | lstm_units = 128 16 | drop_ratio = 0.2 17 | lstm_drop_ratio = 0.2 18 | dense_units = 128 19 | sample_length = 128 20 | max_epoch = 100 21 | batch_size = 200 22 | patience = 6 23 | 24 | # load data 25 | filename = 'pkl_data/'+str(sample_length)+'.pkl' 26 | x_train,y_train,x_val,y_val,x_test,y_test,val_SNRs,test_SNRs = utils.radioml_IQ_data(filename) 27 | 28 | # callbacks 29 | early_stopping = EarlyStopping(monitor='val_loss',patience=patience) 30 | best_model_path = 'result/models/DetectNet/'+str(sample_length)+'/best.h5' 31 | checkpointer = ModelCheckpoint(best_model_path,verbose=1,save_best_only=True) 32 | TB_dir = 'result/TB' 33 | tensorboard = TensorBoard(TB_dir) 34 | model = utils.DetectNet(lr,(2,sample_length),filter_num,lstm_units,kernel_size,drop_ratio,lstm_drop_ratio,dense_units) 35 | history = model.fit(x_train,y_train,epochs=max_epoch,batch_size=batch_size,verbose=1,shuffle=True,validation_data=(x_val, y_val), 36 | callbacks=[early_stopping,checkpointer,tensorboard]) 37 | print('Fisrt stage finished, loss is stable') 38 | 39 | 40 | pf_min = 6.5 41 | pf_max = 7.5 42 | pf_test = LambdaCallback( 43 | on_epoch_end=lambda epoch, 44 | logs: utils.get_pf(x_val,y_val,val_SNRs,model,epoch,pf_min,pf_max)) 45 | print('Start second stage, trade-off metrics') 46 | model = load_model(best_model_path) 47 | model.fit(x_train,y_train,epochs=max_epoch,batch_size=200,verbose=1,shuffle=True, 48 | callbacks=[pf_test]) 49 | if model.stop_training: 50 | # save results 51 | model.save('result/models/DetectNet/'+str(sample_length)+'/final.h5') 52 | print('Second stage finished, get the final model') 53 | save_path = 'result/xls/DetectNet/'+str(sample_length)+'/Pds.xls' 54 | utils.performance_evaluation(save_path,x_test,y_test,test_SNRs,model) 55 | else: 56 | print("Can't meet pf lower bound") 57 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DL-based-signal-detection 2 | Source code for paper "Deep Learning for Spectrum Sensing" 3 | ## Dataset 4 | See [source code of RadioML's generation](https://github.com/radioML/dataset). 5 | 6 | -------------------------------------------------------------------------------- /SoftCombinationNet.py: -------------------------------------------------------------------------------- 1 | import utils 2 | import numpy as np 3 | import csv 4 | 5 | from tensorflow.keras.models import load_model 6 | from tensorflow.keras.callbacks import LambdaCallback,EarlyStopping,ModelCheckpoint 7 | 8 | #%% single node sensing 9 | # hyperparameters 10 | lr = 0.0003 11 | drop_ratio = 0.2 12 | sample_length = 128 13 | max_epoch = 100 14 | batch_size = 200 15 | patience = 8 16 | 17 | # load data 18 | dataset,labelset,SNR = utils.radioml_IQ_CO_data('pkl_data/'+str(sample_length)+'_co.pkl') 19 | total_group = dataset.shape[0] 20 | nodes = dataset.shape[1] 21 | total_num = total_group*nodes 22 | 23 | snrs = np.linspace(-20,19,40) 24 | snrs = np.array(snrs,dtype='int16') 25 | snr_type = len(snrs) 26 | 27 | # load single model 28 | model_single = load_model('result/models/DetectNet/'+str(sample_length)+'/final.h5') 29 | flatten_dataset = np.reshape(dataset,(total_num,2,sample_length)) 30 | 31 | predictions = model_single.predict(flatten_dataset,verbose=1) 32 | decisions = np.argmax(predictions,axis=1) 33 | 34 | noise_decisions = decisions[total_num//2:] 35 | pf = 1 - np.mean(noise_decisions) 36 | 37 | signal_decisions = np.reshape(decisions[:total_num//2],(snr_type,total_num//2//snr_type)) #按average snr来分 38 | pd_list = np.zeros((snr_type,1)) 39 | i = 0 40 | while i < snr_type: 41 | pd_list[i] = 1 - np.mean(signal_decisions[i]) 42 | i = i + 1 43 | 44 | pd_list = np.append(pd_list,pf) 45 | with open('result/xls/SoftCombinationNet/Pds.xls','w') as f: 46 | f_csv = csv.writer(f) 47 | f_csv.writerow(pd_list) 48 | 49 | #%% cooperative sensing 50 | noise_decisions_groups = np.reshape(noise_decisions,(noise_decisions.shape[0]//nodes,nodes)) 51 | # 1000 is the number of samples per specific modulation scheme and snr 52 | signal_decisions_groups = np.reshape(signal_decisions,(snr_type,1000,nodes)) 53 | 54 | # Logical OR fusion rule 55 | error = 0 56 | for group in noise_decisions_groups: 57 | error = error + int(np.sum(group) < nodes) 58 | pf_hard = error / (total_group//2) 59 | 60 | pd_hard_list = np.zeros((snr_type,1)) 61 | i = 0 62 | while i < snr_type: 63 | snr_decisions_groups = signal_decisions_groups[i] 64 | correct = 0 65 | for group in snr_decisions_groups: 66 | correct = correct + int(np.sum(group) < nodes) 67 | pd_hard_list[i] = correct / len(snr_decisions_groups) 68 | i = i + 1 69 | 70 | pd_hard_list = np.append(pd_hard_list,pf_hard) 71 | with open('result/xls/SoftCombinationNet/Pds_hard.xls','w') as f: 72 | f_csv = csv.writer(f) 73 | f_csv.writerow(pd_hard_list) 74 | 75 | # SoftCombinationNet 76 | softmax_dataset = np.reshape(predictions,(total_group, nodes, 2)) 77 | shuffle_idx = np.random.choice(range(0,total_group), size=total_group,replace=False) 78 | softmax_dataset = softmax_dataset[shuffle_idx] 79 | SNR = SNR[shuffle_idx] 80 | softmax_labelset = labelset[shuffle_idx] 81 | 82 | co_x_train = softmax_dataset[:int(total_group*0.6)] 83 | co_y_train = softmax_labelset[:int(total_group*0.6)] 84 | co_x_val = softmax_dataset[int(total_group*0.6):int(total_group*0.8)] 85 | co_y_val = softmax_labelset[int(total_group*0.6):int(total_group*0.8)] 86 | co_x_test = softmax_dataset[int(total_group*0.8):] 87 | co_y_test = softmax_labelset[int(total_group*0.8):] 88 | val_SNRs = SNR[int(total_group*0.6):int(total_group*0.8)] 89 | test_SNRs = SNR[int(total_group*0.8):] 90 | 91 | input_shape = (nodes,2) 92 | model_co = utils.SoftCombinationNet(lr,input_shape,drop_ratio) 93 | 94 | early_stopping = EarlyStopping(monitor='val_loss',patience=patience) 95 | best_model_path = 'result/models/SoftCombinationNet/best.h5' 96 | checkpointer = ModelCheckpoint(best_model_path,verbose=1,save_best_only=True) 97 | model_co.fit(co_x_train,co_y_train,epochs=max_epoch,batch_size=batch_size,verbose=1,shuffle=True, 98 | validation_data=(co_x_val, co_y_val), 99 | callbacks=[early_stopping,checkpointer]) 100 | 101 | model_co = load_model(best_model_path) 102 | pf_min = 1.5 103 | pf_max = 2.5 104 | pf_test = LambdaCallback( 105 | on_epoch_end=lambda epoch, 106 | logs: utils.get_pf(co_x_val,co_y_val,val_SNRs,model_co,epoch,pf_min,pf_max)) 107 | model_co.fit(co_x_train,co_y_train,epochs=max_epoch,batch_size=batch_size,verbose=1,shuffle=True, 108 | callbacks=[pf_test]) 109 | 110 | utils.performance_evaluation('result/xls/SoftCombinationNet/Pds_soft.xls',co_x_test,co_y_test,test_SNRs,model_co) -------------------------------------------------------------------------------- /analyze_stats.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import cPickle 4 | import matplotlib.pyplot as plt 5 | 6 | def calc_vec_energy(vec): 7 | isquared = np.power(vec[0],2.0) 8 | qsquared = np.power(vec[1], 2.0) 9 | inst_energy = np.sqrt(isquared+qsquared) 10 | return sum(inst_energy) 11 | 12 | def calc_mod_energies(ds): 13 | for modulation, snr in ds: 14 | avg_energy = 0 15 | nvectors = ds[(modulation,snr)].shape[0] 16 | for vec in ds[(modulation, snr)]: 17 | avg_energy += calc_vec_energy(vec) 18 | avg_energy /= nvectors 19 | print "%s at %i has %i vectors avg energy of %2.1f" % (modulation, snr, nvectors, avg_energy) 20 | 21 | def calc_mod_bias(ds): 22 | for modulation, snr in ds: 23 | avg_bias_re = 0 24 | avg_bias_im = 0 25 | nvectors = ds[(modulation,snr)].shape[0] 26 | for vec in ds[(modulation, snr)]: 27 | avg_bias_re += (np.mean(vec[0])) 28 | avg_bias_im += (np.mean(vec[1])) 29 | #avg_bias_re /= nvectors 30 | #avg_bias_im /= nvectors 31 | print "%s at %i has %i vectors avg bias of %2.1f + %2.1f j" % (modulation, snr, nvectors, avg_bias_re, avg_bias_im) 32 | 33 | def calc_mod_stddev(ds): 34 | for modulation, snr in ds: 35 | avg_stddev = 0 36 | nvectors = ds[(modulation,snr)].shape[0] 37 | for vec in ds[(modulation, snr)]: 38 | avg_stddev += np.abs(np.std(vec[0]+1j*vec[1])) 39 | #avg_stddev /= nvectors 40 | print "%s at %i has %i vectors avg stddev of %2.1f" % (modulation, snr, nvectors, avg_stddev) 41 | 42 | def open_ds(location="X_4_dict.dat"): 43 | f = open(location) 44 | ds = cPickle.load(f) 45 | return ds 46 | 47 | def main(): 48 | ds = open_ds() 49 | #plt.plot(ds[('BPSK', 12)][25][0][:]) 50 | #plt.plot(ds[('BPSK', 12)][25][1][:]) 51 | #plt.show() 52 | #calc_mod_energies(ds) 53 | #calc_mod_stddev(ds) 54 | calc_mod_bias(ds) 55 | 56 | if __name__ == "__main__": 57 | main() 58 | -------------------------------------------------------------------------------- /generate_dataset.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from transmitters import transmitters 3 | from source_alphabet import source_alphabet 4 | import analyze_stats 5 | from gnuradio import channels, gr, blocks 6 | import numpy as np 7 | import numpy.fft, cPickle, gzip 8 | import random 9 | 10 | dataset = {} 11 | # The output format looks like this 12 | # {('mod type', SNR): np.array(nvecs_per_key, 2, vec_length), etc} 13 | 14 | nvecs_per_key = 1000 15 | vec_length_list = [64,128,256,512,1024] 16 | snr_vals = range(-20,20,1) 17 | for vec_length in vec_length_list: 18 | for snr in snr_vals: 19 | print("snr is ", snr) 20 | for alphabet_type in transmitters.keys(): 21 | for i,mod_type in enumerate(transmitters[alphabet_type]): 22 | dataset[(mod_type.modname, snr)] = np.zeros([nvecs_per_key, 2, vec_length], dtype=np.float32) 23 | # more vectors! 24 | insufficient_modsnr_vectors = True 25 | modvec_indx = 0 26 | while insufficient_modsnr_vectors: 27 | tx_len = int(10e3) 28 | if mod_type.modname == "QAM16": 29 | tx_len = int(20e3) 30 | if mod_type.modname == "QAM64": 31 | tx_len = int(30e3) 32 | src = source_alphabet(alphabet_type, tx_len, True) 33 | mod = mod_type() 34 | snk = blocks.vector_sink_c() 35 | tb = gr.top_block() 36 | # connect blocks 37 | tb.connect(src, mod, snk) 38 | tb.run() 39 | raw_output_vector = np.array(snk.data(), dtype=np.complex64) 40 | # start the sampler some random time after channel model transients (arbitrary values here) 41 | sampler_indx = random.randint(50, 500) 42 | while sampler_indx + vec_length < len(raw_output_vector) and modvec_indx < nvecs_per_key: 43 | sampled_vector = raw_output_vector[sampler_indx:sampler_indx+vec_length] 44 | H = np.random.randn(1)+1j*np.random.randn(1) 45 | sampled_vector = sampled_vector*H 46 | random_noise = np.random.randn(vec_length)+1j*np.random.randn(vec_length) 47 | random_noise_energy = np.sum((np.abs(random_noise)**2)) 48 | signal_energy_expected = random_noise_energy*(10**(snr/10.0)) 49 | signal_energy = np.sum((np.abs(sampled_vector)**2)) 50 | sampled_vector = sampled_vector*(signal_energy_expected**0.5)/(signal_energy**0.5) 51 | total_vector = sampled_vector + random_noise #AWGN 52 | total_energy = np.sum(np.abs(total_vector)**2) 53 | # energy normalization 54 | total_vector = total_vector/(total_energy**0.5) 55 | dataset[(mod_type.modname, snr)][modvec_indx,0,:] = np.real(sampled_vector) 56 | dataset[(mod_type.modname, snr)][modvec_indx,1,:] = np.imag(sampled_vector) 57 | # bound the upper end very high so it's likely we get multiple passes 58 | # through independent channels 59 | sampler_indx += random.randint(vec_length, round(len(raw_output_vector)*.05)) 60 | modvec_indx += 1 61 | 62 | if modvec_indx == nvecs_per_key: 63 | # we're all done 64 | insufficient_modsnr_vectors = False 65 | 66 | print("all done. writing to disk") 67 | cPickle.dump(dataset, file("./pkl_data/%d.pkl"%vec_length, "wb" ) ) 68 | -------------------------------------------------------------------------------- /source_alphabet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from gnuradio import gr, blocks 3 | import mediatools 4 | import numpy as np 5 | 6 | class source_alphabet(gr.hier_block2): 7 | def __init__(self, dtype="discrete", limit=10000, randomize=False): 8 | if(dtype == "discrete"): 9 | gr.hier_block2.__init__(self, "source_alphabet", 10 | gr.io_signature(0,0,0), 11 | gr.io_signature(1,1,gr.sizeof_char)) 12 | 13 | self.src = blocks.file_source(gr.sizeof_char, "source_material/gutenberg_shakespeare.txt") 14 | self.convert = blocks.packed_to_unpacked_bb(1, gr.GR_LSB_FIRST); 15 | #self.convert = blocks.packed_to_unpacked_bb(8, gr.GR_LSB_FIRST); 16 | self.limit = blocks.head(gr.sizeof_char, limit) 17 | self.connect(self.src,self.convert) 18 | last = self.convert 19 | 20 | # whiten our sequence with a random block scrambler (optionally) 21 | if(randomize): 22 | rand_len = 256 23 | rand_bits = np.random.randint(2, size=rand_len) 24 | self.randsrc = blocks.vector_source_b(rand_bits, True) 25 | self.xor = blocks.xor_bb() 26 | self.connect(self.randsrc,(self.xor,1)) 27 | self.connect(last, self.xor) 28 | last = self.xor 29 | 30 | else: # "type_continuous" 31 | gr.hier_block2.__init__(self, "source_alphabet", 32 | gr.io_signature(0,0,0), 33 | gr.io_signature(1,1,gr.sizeof_float)) 34 | 35 | self.src = mediatools.audiosource_s(["source_material/serial-s01-e01.mp3"]) 36 | self.convert2 = blocks.interleaved_short_to_complex() 37 | self.convert3 = blocks.multiply_const_cc(1.0/65535) 38 | self.convert = blocks.complex_to_float() 39 | self.limit = blocks.head(gr.sizeof_float, limit) 40 | self.connect(self.src,self.convert2,self.convert3, self.convert) 41 | last = self.convert 42 | 43 | # connect head or not, and connect to output 44 | if(limit==None): 45 | self.connect(last, self) 46 | else: 47 | self.connect(last, self.limit, self) 48 | 49 | 50 | if __name__ == "__main__": 51 | print "QA..." 52 | 53 | # Test discrete source 54 | tb = gr.top_block() 55 | src = source_alphabet("discrete", 1000) 56 | snk = blocks.vector_sink_b() 57 | tb.run() 58 | 59 | # Test continuous source 60 | tb = gr.top_block() 61 | src = source_alphabet("continuous", 1000) 62 | snk = blocks.vector_sink_f() 63 | tb.run() 64 | -------------------------------------------------------------------------------- /timeseries_slicer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import analyze_stats 3 | import matplotlib.pyplot as plt 4 | 5 | def slice_timeseries(x, l=128, d=64, max_k = None): 6 | k = (len(x) - l + 1) / d 7 | if not max_k == None: 8 | k = min(k, max_k) 9 | X = np.zeros([k,2,l], dtype=np.float32) 10 | for i in range(0,k): 11 | # Rect Window 12 | w = np.ones([l]) 13 | # Sin Window 14 | #w = np.sin(np.arange(0,np.pi,np.pi/l)) 15 | x_i = x[i*d:i*d+l] * w 16 | X[i,0,:] = np.real(x_i) 17 | X[i,1,:] = np.imag(x_i) 18 | energy = analyze_stats.calc_vec_energy(X[i]) 19 | X[i,0,:] /= energy 20 | X[i,1,:] /= energy 21 | return X 22 | 23 | def slice_timeseries_dict(td, l=128, d=64, max_k = None): 24 | nd = {} 25 | for k,v in td.iteritems(): 26 | nd[k] = slice_timeseries(v,l,d,max_k) 27 | return nd 28 | 29 | def slice_timeseries_real(x, l=128, d=64, max_k = None): 30 | k = (len(x) - l + 1) / d 31 | if not max_k == None: 32 | k = max(k, max_k) 33 | X = np.zeros([k,1,l], dtype=np.float32) 34 | for i in range(0,k): 35 | x_i = x[i*d:i*d+l] 36 | X[i,0,:] = x_i 37 | return X 38 | 39 | 40 | def slice_timeseries_real_dict(td, l=128, d=64, max_k = None): 41 | nd = {} 42 | for k,v in td.iteritems(): 43 | nd[k] = slice_timeseries_real(v,l,d,max_k) 44 | return nd 45 | -------------------------------------------------------------------------------- /transmitters.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import time, math 3 | from scipy.signal import get_window 4 | from gnuradio import gr, blocks, digital, analog, filter 5 | from gnuradio.filter import firdes 6 | import mapper 7 | sps = 8 8 | ebw = 0.35 9 | 10 | class transmitter_mapper(gr.hier_block2): 11 | def __init__(self, modtype, symvals, txname, samples_per_symbol=2, excess_bw=0.35): 12 | gr.hier_block2.__init__(self, txname, 13 | gr.io_signature(1, 1, gr.sizeof_char), 14 | gr.io_signature(1, 1, gr.sizeof_gr_complex)) 15 | self.mod = mapper.mapper(modtype, symvals) 16 | # pulse shaping filter 17 | nfilts = 32 18 | ntaps = nfilts * 11 * int(samples_per_symbol) # make nfilts filters of ntaps each 19 | rrc_taps = filter.firdes.root_raised_cosine( 20 | nfilts, # gain 21 | nfilts, # sampling rate based on 32 filters in resampler 22 | 1.0, # symbol rate 23 | excess_bw, # excess bandwidth (roll-off factor) 24 | ntaps) 25 | self.rrc_filter = filter.pfb_arb_resampler_ccf(samples_per_symbol, rrc_taps) 26 | self.connect(self, self.mod, self.rrc_filter, self) 27 | #self.rate = const.bits_per_symbol() 28 | 29 | class transmitter_bpsk(transmitter_mapper): 30 | modname = "BPSK" 31 | def __init__(self): 32 | transmitter_mapper.__init__(self, mapper.BPSK, 33 | [0,1], "transmitter_bpsk", sps, ebw) 34 | 35 | class transmitter_qpsk(transmitter_mapper): 36 | modname = "QPSK" 37 | def __init__(self): 38 | transmitter_mapper.__init__(self, mapper.QPSK, 39 | [0,1,3,2], "transmitter_qpsk", sps, ebw) 40 | 41 | class transmitter_8psk(transmitter_mapper): 42 | modname = "8PSK" 43 | def __init__(self): 44 | transmitter_mapper.__init__(self, mapper.PSK8, 45 | [0,1,3,2,7,6,4,5], "transmitter_8psk", sps, ebw) 46 | 47 | class transmitter_pam4(transmitter_mapper): 48 | modname = "PAM4" 49 | def __init__(self): 50 | transmitter_mapper.__init__(self, mapper.PAM4, 51 | [0,1,3,2], "transmitter_pam4", sps, ebw) 52 | 53 | class transmitter_qam16(transmitter_mapper): 54 | modname = "QAM16" 55 | def __init__(self): 56 | transmitter_mapper.__init__(self, mapper.QAM16, 57 | [2,6,14,10,3,7,15,11,1,5,13,9,0,4,12,8], "transmitter_qam16", sps, ebw) 58 | 59 | class transmitter_qam64(transmitter_mapper): 60 | modname = "QAM64" 61 | def __init__(self): 62 | transmitter_mapper.__init__(self, mapper.QAM64, 63 | [0,32,8,40,3,35,11,43, 64 | 48,16,56,24,51,19,59,27, 65 | 12,44,4,36,15,47,7,39, 66 | 60,28,52,20,63,31,55,23, 67 | 2,34,10,42,1,33,9,41, 68 | 50,18,58,26,49,17,57,25, 69 | 14,46,6,38,13,45,5,37, 70 | 62,30,54,22,61,29,53,21], "transmitter_qam64", sps, ebw) 71 | 72 | class transmitter_gfsk(gr.hier_block2): 73 | modname = "GFSK" 74 | def __init__(self): 75 | gr.hier_block2.__init__(self, "transmitter_gfsk", 76 | gr.io_signature(1, 1, gr.sizeof_char), 77 | gr.io_signature(1, 1, gr.sizeof_gr_complex)) 78 | self.repack = blocks.unpacked_to_packed_bb(1, gr.GR_MSB_FIRST) 79 | self.mod = digital.gfsk_mod(sps, sensitivity=0.1, bt=ebw) 80 | self.connect( self, self.repack, self.mod, self ) 81 | 82 | class transmitter_cpfsk(gr.hier_block2): 83 | modname = "CPFSK" 84 | def __init__(self): 85 | gr.hier_block2.__init__(self, "transmitter_cpfsk", 86 | gr.io_signature(1, 1, gr.sizeof_char), 87 | gr.io_signature(1, 1, gr.sizeof_gr_complex)) 88 | self.mod = analog.cpfsk_bc(0.5, 1.0, sps) 89 | self.connect( self, self.mod, self ) 90 | 91 | class transmitter_fm(gr.hier_block2): 92 | modname = "WBFM" 93 | def __init__(self): 94 | gr.hier_block2.__init__(self, "transmitter_fm", 95 | gr.io_signature(1, 1, gr.sizeof_float), 96 | gr.io_signature(1, 1, gr.sizeof_gr_complex)) 97 | self.mod = analog.wfm_tx( audio_rate=44100.0, quad_rate=220.5e3 ) 98 | self.connect( self, self.mod, self ) 99 | self.rate = 200e3/44.1e3 100 | 101 | class transmitter_am(gr.hier_block2): 102 | modname = "AM-DSB" 103 | def __init__(self): 104 | gr.hier_block2.__init__(self, "transmitter_am", 105 | gr.io_signature(1, 1, gr.sizeof_float), 106 | gr.io_signature(1, 1, gr.sizeof_gr_complex)) 107 | self.rate = 44.1e3/200e3 108 | #self.rate = 200e3/44.1e3 109 | self.interp = filter.fractional_interpolator_ff(0.0, self.rate) 110 | self.cnv = blocks.float_to_complex() 111 | self.mul = blocks.multiply_const_cc(1.0) 112 | self.add = blocks.add_const_cc(1.0) 113 | self.src = analog.sig_source_c(200e3, analog.GR_SIN_WAVE, 0e3, 1.0) 114 | #self.src = analog.sig_source_c(200e3, analog.GR_SIN_WAVE, 50e3, 1.0) 115 | self.mod = blocks.multiply_cc() 116 | self.connect( self, self.interp, self.cnv, self.mul, self.add, self.mod, self ) 117 | self.connect( self.src, (self.mod,1) ) 118 | 119 | class transmitter_amssb(gr.hier_block2): 120 | modname = "AM-SSB" 121 | def __init__(self): 122 | gr.hier_block2.__init__(self, "transmitter_amssb", 123 | gr.io_signature(1, 1, gr.sizeof_float), 124 | gr.io_signature(1, 1, gr.sizeof_gr_complex)) 125 | self.rate = 44.1e3/200e3 126 | #self.rate = 200e3/44.1e3 127 | self.interp = filter.fractional_interpolator_ff(0.0, self.rate) 128 | # self.cnv = blocks.float_to_complex() 129 | self.mul = blocks.multiply_const_ff(1.0) 130 | self.add = blocks.add_const_ff(1.0) 131 | self.src = analog.sig_source_f(200e3, analog.GR_SIN_WAVE, 0e3, 1.0) 132 | #self.src = analog.sig_source_c(200e3, analog.GR_SIN_WAVE, 50e3, 1.0) 133 | self.mod = blocks.multiply_ff() 134 | #self.filt = filter.fir_filter_ccf(1, firdes.band_pass(1.0, 200e3, 10e3, 60e3, 0.25e3, firdes.WIN_HAMMING, 6.76)) 135 | self.filt = filter.hilbert_fc(401) 136 | self.connect( self, self.interp, self.mul, self.add, self.mod, self.filt, self ) 137 | self.connect( self.src, (self.mod,1) ) 138 | 139 | 140 | transmitters = { 141 | "discrete":[transmitter_bpsk, transmitter_qpsk, transmitter_8psk, transmitter_pam4, transmitter_qam16, transmitter_qam64, transmitter_gfsk, transmitter_cpfsk]} 142 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | # necessary python libraries 2 | import numpy as np 3 | import pickle 4 | from sklearn.metrics import confusion_matrix 5 | from tensorflow.keras.optimizers import Adam 6 | from tensorflow.keras.layers import Input,Dense,LSTM,concatenate,Convolution1D,Dropout,Flatten,Reshape 7 | from tensorflow.keras.models import Model 8 | 9 | 10 | #%% 11 | def get_pf(x_val,y_val,val_SNRs,model,epoch,pf_min,pf_max): 12 | ''' 13 | callback for pfs evaluation at evert epoch end 14 | ''' 15 | y_val_hat = model.predict(x_val,verbose=0) 16 | cm = confusion_matrix(np.argmax(y_val,1), np.argmax(y_val_hat, 1)) 17 | cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] 18 | cm_norm = np.nan_to_num(cm_norm) 19 | pf = 100*cm_norm[1][0] 20 | print("False Alarm:%.3f%%"%pf) 21 | # set the pf stop interval for a CFAR detector 22 | if (pf>pf_min) & (pf