├── Data Preprcoessing ├── labelPreProcess.m ├── mySMOTE.m ├── normalize.m └── tabledata.m ├── NN classification └── NNscript_new.m ├── README.md ├── evaluate.m ├── main.m └── newfeatures3.mat /Data Preprcoessing/labelPreProcess.m: -------------------------------------------------------------------------------- 1 | function multiclass=labelPreProcess(newlabels) 2 | % converting into multiclass 3 | [class]=unique(newlabels); 4 | multiclass=zeros([size(newlabels,1),numel(class)]); 5 | for ii=1:numel(class) 6 | multiclass(newlabels==class(ii),ii)=1; 7 | end -------------------------------------------------------------------------------- /Data Preprcoessing/mySMOTE.m: -------------------------------------------------------------------------------- 1 | function allData_smote = mySMOTE(allData,N, k,sortedIDX) 2 | % mySMOTE Synthetic Minority Oversampling Technique. A technique to 3 | % generate synthetic samples as given in: https://www.jair.org/media/953/live-953-2037-jair.pdf 4 | % Usage: 5 | % X_smote = mySMOTE(X, N, k) 6 | % 7 | % Inputs: 8 | % allData: Original dataset 9 | % k: number of nearest neighbors to consider while performing 10 | % augmentation 11 | % sortedIDX: sorted labels 12 | % 13 | % Outputs: 14 | % X_smote: augmented dataset containing original data as well. 15 | % 16 | % See also datasample, randsample 17 | % %% plot the bar plot for number of classes 18 | % figure 19 | % barh(sortedIDX) 20 | % ylabel('number of classes-->') 21 | % xlabel('Sampels in each class-->') 22 | % title('Original imbalance data distirbution') 23 | %% number of each classes 24 | labels=allData(:,end); 25 | class=unique(sortedIDX); 26 | for ii=1:numel(class) 27 | classNo(ii)=numel(find(labels==class(ii))); 28 | end 29 | 30 | %% required addon samples in each minority class 31 | %add on samples will be calculated by taking the difference of each 32 | %classSamples with highest number of class samples 33 | 34 | [maximumSamples,sampleClass]=max(classNo); % number of maximum samples 35 | for ii=1:numel(class) 36 | samplediff(ii)=maximumSamples-classNo(ii); 37 | N (ii) = ceil(samplediff(ii)/ 100); 38 | 39 | end 40 | %% oversample the minority classes 41 | allData_smote=[]; 42 | for ii=1:numel(class) 43 | X=allData(labels==class(ii),:); 44 | T = size(X, 1); 45 | X_smote = X; 46 | for i = 1:T 47 | y = X(i,:); 48 | % find k-nearest samples 49 | [idx, ~] = knnsearch(X,y,'k',k); 50 | % retain only N out of k nearest samples 51 | idx = datasample(idx, N(ii)); 52 | x_nearest = X(idx,:); 53 | x_syn = bsxfun(@plus, bsxfun(@times, bsxfun(@minus,x_nearest,y), rand(N(ii),1)), y); 54 | X_smote = cat(1, X_smote, x_syn); 55 | end 56 | allData_smote=cat(1,allData_smote,X_smote); 57 | end 58 | %% 59 | balanced_sortedIDX=allData_smote(:,end); 60 | % figure 61 | % barh(balanced_sortedIDX) 62 | % ylabel('number of classes-->') 63 | % xlabel('Sampels in each class-->') 64 | % title('Balanced data distirbution') 65 | %% randomize the data 66 | shuffleindex=randperm(size(allData_smote,1)); 67 | allData_smote=allData_smote(shuffleindex,:); 68 | end -------------------------------------------------------------------------------- /Data Preprcoessing/normalize.m: -------------------------------------------------------------------------------- 1 | function output= normalize(input) 2 | for ii=1:size(input,2) 3 | output(:,ii)=(input(:,ii)-min(input(:,ii)))/(max(input(:,ii))-min(input(:,ii))); 4 | end -------------------------------------------------------------------------------- /Data Preprcoessing/tabledata.m: -------------------------------------------------------------------------------- 1 | %% This function load features and normalise them. 2 | % The op matrix will be in format of 3 | % _Image ID...... features (2-13)....... Feature label_ 4 | %% 5 | function [Table]=tabledata(Table) 6 | 7 | % normalise the features values which are from column 2 to 14 in the Table 8 | % variable 9 | features=Table(:,1:end-1); 10 | cnt=1; 11 | for ii=1:size(features,2) % remove those feature columns which are having more zeros 12 | index(ii)=numel(find(features(:,ii))); 13 | index_perc(ii)=(index(ii)/size(features,1))*100; 14 | if index_perc(ii)>50 % use only those features which have non zero values greater than 50% 15 | Table_final(:,cnt)=features(:,ii); 16 | if iiThe complete description can be checked at https://free-thesis.com/product/automatic-digital-modulation-detection-by-neural-network/ 13 | -------------------------------------------------------------------------------- /evaluate.m: -------------------------------------------------------------------------------- 1 | function acc=evaluate(index,trainigdata ,trainiglabels,testingdata,testinglabels) 2 | newtrainigdata=trainigdata(:,find(index)); 3 | newtestingdata=testingdata(:,find(index)); 4 | % create a neural network 5 | n=10; % no og hidden neurons 6 | net = feedforwardnet(n); 7 | 8 | % configure the neural network for this dataset 9 | net = configure(net, newtrainigdata', trainiglabels'); 10 | y=sim(net,newtestingdata'); 11 | y=round(y); 12 | cp=classperf(testinglabels,y); 13 | acc=cp.CorrectRate; -------------------------------------------------------------------------------- /main.m: -------------------------------------------------------------------------------- 1 | close all 2 | clear 3 | clc 4 | addpath(genpath(cd)) 5 | %% load the data 6 | % load newfeatures_all.mat 7 | load newfeatures3.mat 8 | 9 | data= newfeatures; 10 | % data=data(randperm(size(data,1)),:); 11 | % load features and normalise them 12 | [Table]=tabledata((data)); 13 | %% divide the features into testing and trainig 14 | features=Table(:,1:size(Table,2)-1); 15 | labels=Table(:,end); 16 | % features=features(:,randperm(size(features,2))); 17 | 18 | newfeatures=features; 19 | newlabels=labels; 20 | for ii=1:size(features,2) %remove any Nan value in sample 21 | index=isnan(features(:,ii)); 22 | if find(index) 23 | newfeatures((index),:)=[]; 24 | newlabels(index)=[]; 25 | end 26 | end 27 | % convert the labels into multiclass 28 | multiclass=labelPreProcess(newlabels); 29 | [trainInd,~,testInd]=dividerand(size(newfeatures,1),0.8,0,0.2); %randomly divide the data 30 | traindata=newfeatures(trainInd,:); %training data 31 | trainlabels=multiclass(trainInd,:); %training labels 32 | testdata=newfeatures(testInd,:); %testing data 33 | testlabels=multiclass(testInd,:); %testing label 34 | %% 35 | features_new=mySMOTE([features,newlabels],100,10,newlabels); 36 | multiclass_new=labelPreProcess(features_new(:,end)); 37 | [NNacc,predictedLables_raw]=NNscript_new(features_new,multiclass_new); 38 | predictedLables=vec2ind(predictedLables_raw); 39 | disp(['Detection Accuracy = ',num2str(NNacc*100),'%']) 40 | 41 | -------------------------------------------------------------------------------- /newfeatures3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/earthat/Automatic-Digital-Modulation-Detection-/b1b7b8125e9d4daa022f0459013d51ab92b79906/newfeatures3.mat --------------------------------------------------------------------------------