├── newfeatures3.mat ├── Data Preprcoessing ├── normalize.m ├── labelPreProcess.m ├── tabledata.m └── mySMOTE.m ├── evaluate.m ├── main.m ├── README.md └── NN classification └── NNscript_new.m /newfeatures3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/earthat/Automatic-Digital-Modulation-Detection-/HEAD/newfeatures3.mat -------------------------------------------------------------------------------- /Data Preprcoessing/normalize.m: -------------------------------------------------------------------------------- 1 | function output= normalize(input) 2 | for ii=1:size(input,2) 3 | output(:,ii)=(input(:,ii)-min(input(:,ii)))/(max(input(:,ii))-min(input(:,ii))); 4 | end -------------------------------------------------------------------------------- /Data Preprcoessing/labelPreProcess.m: -------------------------------------------------------------------------------- 1 | function multiclass=labelPreProcess(newlabels) 2 | % converting into multiclass 3 | [class]=unique(newlabels); 4 | multiclass=zeros([size(newlabels,1),numel(class)]); 5 | for ii=1:numel(class) 6 | multiclass(newlabels==class(ii),ii)=1; 7 | end -------------------------------------------------------------------------------- /evaluate.m: -------------------------------------------------------------------------------- 1 | function acc=evaluate(index,trainigdata ,trainiglabels,testingdata,testinglabels) 2 | newtrainigdata=trainigdata(:,find(index)); 3 | newtestingdata=testingdata(:,find(index)); 4 | % create a neural network 5 | n=10; % no og hidden neurons 6 | net = feedforwardnet(n); 7 | 8 | % configure the neural network for this dataset 9 | net = configure(net, newtrainigdata', trainiglabels'); 10 | y=sim(net,newtestingdata'); 11 | y=round(y); 12 | cp=classperf(testinglabels,y); 13 | acc=cp.CorrectRate; -------------------------------------------------------------------------------- /Data Preprcoessing/tabledata.m: -------------------------------------------------------------------------------- 1 | %% This function load features and normalise them. 2 | % The op matrix will be in format of 3 | % _Image ID...... features (2-13)....... Feature label_ 4 | %% 5 | function [Table]=tabledata(Table) 6 | 7 | % normalise the features values which are from column 2 to 14 in the Table 8 | % variable 9 | features=Table(:,1:end-1); 10 | cnt=1; 11 | for ii=1:size(features,2) % remove those feature columns which are having more zeros 12 | index(ii)=numel(find(features(:,ii))); 13 | index_perc(ii)=(index(ii)/size(features,1))*100; 14 | if index_perc(ii)>50 % use only those features which have non zero values greater than 50% 15 | Table_final(:,cnt)=features(:,ii); 16 | if iiThe complete description can be checked at https://free-thesis.com/product/automatic-digital-modulation-detection-by-neural-network/ 13 | -------------------------------------------------------------------------------- /Data Preprcoessing/mySMOTE.m: -------------------------------------------------------------------------------- 1 | function allData_smote = mySMOTE(allData,N, k,sortedIDX) 2 | % mySMOTE Synthetic Minority Oversampling Technique. A technique to 3 | % generate synthetic samples as given in: https://www.jair.org/media/953/live-953-2037-jair.pdf 4 | % Usage: 5 | % X_smote = mySMOTE(X, N, k) 6 | % 7 | % Inputs: 8 | % allData: Original dataset 9 | % k: number of nearest neighbors to consider while performing 10 | % augmentation 11 | % sortedIDX: sorted labels 12 | % 13 | % Outputs: 14 | % X_smote: augmented dataset containing original data as well. 15 | % 16 | % See also datasample, randsample 17 | % %% plot the bar plot for number of classes 18 | % figure 19 | % barh(sortedIDX) 20 | % ylabel('number of classes-->') 21 | % xlabel('Sampels in each class-->') 22 | % title('Original imbalance data distirbution') 23 | %% number of each classes 24 | labels=allData(:,end); 25 | class=unique(sortedIDX); 26 | for ii=1:numel(class) 27 | classNo(ii)=numel(find(labels==class(ii))); 28 | end 29 | 30 | %% required addon samples in each minority class 31 | %add on samples will be calculated by taking the difference of each 32 | %classSamples with highest number of class samples 33 | 34 | [maximumSamples,sampleClass]=max(classNo); % number of maximum samples 35 | for ii=1:numel(class) 36 | samplediff(ii)=maximumSamples-classNo(ii); 37 | N (ii) = ceil(samplediff(ii)/ 100); 38 | 39 | end 40 | %% oversample the minority classes 41 | allData_smote=[]; 42 | for ii=1:numel(class) 43 | X=allData(labels==class(ii),:); 44 | T = size(X, 1); 45 | X_smote = X; 46 | for i = 1:T 47 | y = X(i,:); 48 | % find k-nearest samples 49 | [idx, ~] = knnsearch(X,y,'k',k); 50 | % retain only N out of k nearest samples 51 | idx = datasample(idx, N(ii)); 52 | x_nearest = X(idx,:); 53 | x_syn = bsxfun(@plus, bsxfun(@times, bsxfun(@minus,x_nearest,y), rand(N(ii),1)), y); 54 | X_smote = cat(1, X_smote, x_syn); 55 | end 56 | allData_smote=cat(1,allData_smote,X_smote); 57 | end 58 | %% 59 | balanced_sortedIDX=allData_smote(:,end); 60 | % figure 61 | % barh(balanced_sortedIDX) 62 | % ylabel('number of classes-->') 63 | % xlabel('Sampels in each class-->') 64 | % title('Balanced data distirbution') 65 | %% randomize the data 66 | shuffleindex=randperm(size(allData_smote,1)); 67 | allData_smote=allData_smote(shuffleindex,:); 68 | end -------------------------------------------------------------------------------- /NN classification/NNscript_new.m: -------------------------------------------------------------------------------- 1 | function [trainPerformance,y]=NNscript_new(features,labels) 2 | 3 | % 4 | % This script assumes these variables are defined: 5 | % 6 | % features - input data. 7 | % labels - target data. 8 | 9 | x = features'; 10 | t = labels'; 11 | 12 | % Choose a Training Function 13 | % For a list of all training functions type: help nntrain 14 | % 'trainlm' is usually fastest. 15 | % 'trainbr' takes longer but may be better for challenging problems. 16 | % 'trainscg' uses less memory. Suitable in low memory situations. 17 | trainFcn = 'trainscg'; % Scaled conjugate gradient backpropagation. 18 | 19 | % Create a Pattern Recognition Network 20 | hiddenLayerSize = 20; 21 | net = patternnet(hiddenLayerSize, trainFcn); 22 | 23 | % Choose Input and Output Pre/Post-Processing Functions 24 | % For a list of all processing functions type: help nnprocess 25 | % net.input.processFcns = {'removeconstantrows','mapminmax'}; 26 | % net.output.processFcns = {'removeconstantrows','mapminmax'}; 27 | 28 | % Setup Division of Data for Training, Validation, Testing 29 | % For a list of all data division functions type: help nndivide 30 | net.divideFcn = 'dividerand'; % Divide data randomly 31 | % net.divideMode = 'sample'; % Divide up every sample 32 | net.divideParam.trainRatio = 80/100; 33 | % net.divideParam.valRatio = 15/100; 34 | net.divideParam.testRatio = 20/100; 35 | 36 | % Choose a Performance Function 37 | % For a list of all performance functions type: help nnperformance 38 | net.performFcn = 'mse'; % Cross-Entropy 39 | 40 | % Choose Plot Functions 41 | % For a list of all plot functions type: help nnplot 42 | net.plotFcns = {'plotperform','plottrainstate','ploterrhist', ... 43 | 'plotconfusion', 'plotroc'}; 44 | 45 | % Train the Network 46 | [net,tr] = train(net,x,t); 47 | 48 | % Test the Network 49 | y = net(x); 50 | y(y<0)=0; 51 | y=round(y); 52 | cp=classperf(t,(y)); 53 | trainPerformance=cp.CorrectRate; 54 | e = gsubtract(t,round(y)); 55 | % performance = perform(net,t,round(y)) 56 | % tind = vec2ind(t); 57 | % yind = vec2ind(y); 58 | % percentErrors = sum(tind ~= yind)/numel(tind); 59 | 60 | % Recalculate Training, Validation and Test Performance 61 | % trainTargets = t .* tr.trainMask{1}; 62 | % valTargets = t .* tr.valMask{1}; 63 | % testTargets = t .* tr.testMask{1}; 64 | % trainPerformance = perform(net,trainTargets,y) 65 | % valPerformance = perform(net,valTargets,y) 66 | % testPerformance = perform(net,testTargets,y) 67 | 68 | % View the Network 69 | view(net) 70 | 71 | % Plots 72 | % Uncomment these lines to enable various plots. 73 | %figure, plotperform(tr) 74 | %figure, plottrainstate(tr) 75 | %figure, ploterrhist(e) 76 | % figure, plotconfusion(t,y) 77 | % figure, plotroc(t,y) 78 | 79 | --------------------------------------------------------------------------------