├── .gitignore ├── .vscode └── settings.json ├── CICIDS_model.py ├── CICIDS_model_dos.py ├── Restrictive_FGSM.py ├── __pycache__ ├── CICIDS_model_dos.cpython-37.pyc ├── meta_data.cpython-37.pyc └── preprocess.cpython-37.pyc ├── dos.csv ├── meta_data.py ├── models ├── IDS_small_dos_alpha.h5 ├── IDS_small_dos_v1.h5 ├── normalized.png └── small_dos.png └── preprocess.py /.gitignore: -------------------------------------------------------------------------------- 1 | /CICIDS2018_dataset 2 | /IDS_dos_v1 3 | /IDSv1 4 | .vscode -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.pythonPath": "/home/jglee/anaconda3/envs/tensorflow2.1/bin/python" 3 | } -------------------------------------------------------------------------------- /CICIDS_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import tensorflow as tf 4 | import pandas as pd 5 | import matplotlib.pyplot as plt 6 | from pandas import DataFrame 7 | from tensorflow import keras 8 | from sklearn.preprocessing import LabelEncoder 9 | from sklearn.model_selection import train_test_split 10 | from sklearn.utils import class_weight 11 | 12 | 13 | os.environ["CUDA_VISIBLE_DEVICES"] = '1' 14 | BASE_PATH = "CICIDS2018_dataset/" 15 | MODEL_NAME = "models/IDS_small_v1" 16 | le = LabelEncoder() 17 | 18 | 19 | def norm(train_dataset, train_stats): 20 | return (train_dataset - train_stats['mean']) / train_stats['std'] 21 | 22 | def load_dataset(filepath): 23 | df = pd.read_csv(filepath) 24 | #df['Label'] = pd.Categorical(df['Label']) 25 | #df['Label'] = df.Label.cat.codes 26 | target = df.pop('Label') 27 | le.fit(target) 28 | target = le.transform(target) 29 | df = df.astype('float32') 30 | #dataset = norm(df, df.describe().transpose()) 31 | dataset = tf.data.Dataset.from_tensor_slices((df.values, target)) 32 | dataset = dataset.shuffle(len(df)).batch(50) 33 | 34 | return dataset 35 | 36 | def show_result(hist): 37 | fig, loss_ax = plt.subplots() 38 | fig = fig 39 | acc_ax = loss_ax.twinx() 40 | loss_ax.plot(hist.history['loss'], 'r', label='train loss') 41 | acc_ax.plot(hist.history['accuracy'], 'b', label='train acc') 42 | #loss_ax.plot(hist.history['val_loss'], 'y', label='val loss') 43 | #acc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc') 44 | loss_ax.set_xlabel('epoch') 45 | loss_ax.set_ylabel('loss') 46 | acc_ax.set_ylabel('accuray') 47 | loss_ax.legend(loc='upper left') 48 | acc_ax.legend(loc='lower left') 49 | plt.show() 50 | 51 | def train_model(): 52 | train_dataset = load_dataset(BASE_PATH+"CICIDS2018_test_small.csv") 53 | #test_dataset = load_dataset(BASE_PATH+"CICIDS2018_test.csv") 54 | 55 | # METRICS = [ 56 | # keras.metrics.TruePositives(name='tp'), 57 | # keras.metrics.FalsePositives(name='fp'), 58 | # keras.metrics.TrueNegatives(name='tn'), 59 | # keras.metrics.FalseNegatives(name='fn'), 60 | # keras.metrics.BinaryAccuracy(name='accuracy'), 61 | # keras.metrics.Precision(name='precision'), 62 | # keras.metrics.Recall(name='recall'), 63 | # keras.metrics.AUC(name='auc'), 64 | # ] 65 | 66 | model = tf.keras.models.Sequential([ 67 | keras.layers.Dense(64, activation='relu'), 68 | #keras.layers.Dropout(0.5), 69 | keras.layers.Dense(64, activation='relu'), 70 | #keras.layers.Dropout(0.5), 71 | keras.layers.Dense(15, activation='softmax') 72 | ]) 73 | 74 | model.compile( 75 | optimizer='adam', 76 | loss='sparse_categorical_crossentropy', 77 | metrics=['accuracy'], 78 | #metrics=METRICS, 79 | ) 80 | 81 | hist = model.fit(train_dataset, 82 | #validation_split = 0.2, 83 | epochs=5, 84 | ) 85 | 86 | show_result(hist) 87 | 88 | model.save(MODEL_NAME) 89 | 90 | #model.evaluate(test_dataset, verbose=2) 91 | 92 | 93 | if __name__ == "__main__": 94 | train_model() -------------------------------------------------------------------------------- /CICIDS_model_dos.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import tensorflow as tf 4 | import pandas as pd 5 | import matplotlib.pyplot as plt 6 | import seaborn as sns 7 | from tensorflow import keras 8 | from sklearn.model_selection import train_test_split 9 | from sklearn.utils import class_weight 10 | 11 | from meta_data import * 12 | 13 | 14 | os.environ["CUDA_VISIBLE_DEVICES"] = '1' 15 | 16 | 17 | def save_feature_dist(data): 18 | FEATURE_LIST.remove('Label') 19 | for feature in FEATURE_LIST: 20 | #for feature in DOS_FEATURE_LIST: 21 | fig, dist = plt.subplots() 22 | plt.title(feature) 23 | dist.hist([100*data[feature]], bins=range(-100, 100)) 24 | dist.set_xlabel('normalized value') 25 | dist.set_ylabel('# of traffics') 26 | plt.savefig(BASE_PATH+'feature_dist_test/'+feature.replace('/', '-')) 27 | plt.close() 28 | 29 | def norm(data, stats): 30 | return ((data - stats['mean']) / (stats['std']+0.00001)) 31 | #return (data-stats['min']) / (stats['max']-stats['min']+0.00001) 32 | 33 | def load_dataset(filepath): 34 | #df = pd.read_csv(filepath)[DOS_FEATURE_LIST].dropna().astype('float32') 35 | df = pd.read_csv(filepath).dropna().astype('float32') 36 | desc = df.describe().drop(['Label'], axis=1).transpose() 37 | df_train, df_test = train_test_split(df, test_size=0.2) 38 | train_labels = df_train.pop('Label') 39 | test_labels = df_test.pop('Label') 40 | train_data = norm(df_train, desc) 41 | test_data = norm(df_test, desc) 42 | 43 | return (train_data, train_labels), (test_data, test_labels) 44 | 45 | def show_result(hist): 46 | fig, loss_ax = plt.subplots() 47 | acc_ax = loss_ax.twinx() 48 | loss_ax.plot(hist.history['loss'], 'r', label='train loss') 49 | acc_ax.plot(hist.history['accuracy'], 'b', label='train acc') 50 | loss_ax.plot(hist.history['val_loss'], 'y', label='val loss') 51 | acc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc') 52 | loss_ax.set_xlabel('epoch') 53 | loss_ax.set_ylabel('loss') 54 | acc_ax.set_ylabel('accuray') 55 | loss_ax.legend(loc='upper left') 56 | acc_ax.legend(loc='lower left') 57 | plt.show() 58 | 59 | def train_model(): 60 | (train_data, train_labels), (test_data, test_labels) = load_dataset(BASE_PATH+"CICIDS2018_small_dos.csv") 61 | #test_dataset = load_dataset(BASE_PATH+"CICIDS2018_test_dos.csv") 62 | 63 | save_feature_dist(train_data) 64 | 65 | METRICS = [ 66 | #keras.metrics.TruePositives(name='tp'), 67 | #keras.metrics.FalsePositives(name='fp'), 68 | #keras.metrics.TrueNegatives(name='tn'), 69 | #keras.metrics.FalseNegatives(name='fn'), 70 | keras.metrics.BinaryAccuracy(name='accuracy'), 71 | keras.metrics.Precision(name='precision'), 72 | keras.metrics.Recall(name='recall'), 73 | #keras.metrics.AUC(name='auc'), 74 | ] 75 | 76 | model = tf.keras.models.Sequential([ 77 | keras.layers.Dense(64, activation='relu', input_shape=(25,)), 78 | keras.layers.BatchNormalization(), 79 | keras.layers.Dropout(0.5), 80 | keras.layers.Dense(128, activation='relu'), 81 | keras.layers.BatchNormalization(), 82 | keras.layers.Dropout(0.5), 83 | keras.layers.Dense(64, activation='relu'), 84 | keras.layers.BatchNormalization(), 85 | keras.layers.Dropout(0.5), 86 | keras.layers.Dense(1, activation='sigmoid') 87 | ]) 88 | 89 | sgd = keras.optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) 90 | 91 | model.compile( 92 | #optimizer='adam', 93 | optimizer=sgd, 94 | loss='binary_crossentropy', 95 | #metrics=['accuracy'], 96 | metrics=METRICS, 97 | ) 98 | 99 | #print(model.summary()) 100 | 101 | hist = model.fit(train_data, train_labels, 102 | batch_size=200, 103 | validation_split=0.2, 104 | epochs=50, 105 | #class_weight = {0: 0.5, 1: 0.5}, 106 | ) 107 | 108 | 109 | show_result(hist) 110 | 111 | model.save("models/IDS_small_dos_alpha.h5") 112 | 113 | loss, acc, precision, recall = model.evaluate(test_data, test_labels, verbose=2) 114 | print("accuracy: {:5.2f}%".format(100*acc)) 115 | print("precision: " + str(precision) + " recall: " + str(recall)) 116 | 117 | 118 | if __name__ == "__main__": 119 | train_model() -------------------------------------------------------------------------------- /Restrictive_FGSM.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | import numpy as np 4 | import pandas as pd 5 | from tensorflow import keras 6 | 7 | from meta_data import * 8 | 9 | 10 | os.environ["CUDA_VISIBLE_DEVICES"] = '1' 11 | 12 | 13 | def test_attack(): 14 | x = pd.read_csv("dos.csv") 15 | x = x[DOS_FEATURE_LIST] 16 | x = x.dropna() 17 | y = x.pop('Label') 18 | x = x.astype('float32') 19 | 20 | df = pd.read_csv(BASE_PATH+"CICIDS2018_small_dos.csv") 21 | df = df[DOS_FEATURE_LIST] 22 | df = df.dropna() 23 | df.pop('Label') 24 | df = df.astype('float32') 25 | norm_params = df.describe().transpose() 26 | 27 | x = (x-norm_params['min']) / (norm_params['max']-norm_params['min']+0.00001) 28 | x = tf.convert_to_tensor(x.iloc[0])[None, ...] 29 | model = keras.models.load_model("models/IDS_small_dos_alpha.h5") 30 | 31 | loss_object = tf.keras.losses.BinaryCrossentropy() 32 | 33 | with tf.GradientTape() as tape: 34 | tape.watch(x) 35 | pred = model(x) 36 | loss = loss_object(x, pred) 37 | gradient = tape.gradient(loss, x) 38 | signed_grad = tf.sign(gradient) 39 | 40 | for i in range(10): 41 | print("\n5 features changed. test: "+str(i+1)) 42 | chosed_features = np.zeros(25) 43 | nums = np.random.choice(25, 5, replace=False) 44 | for num in nums: 45 | chosed_features[num] = 1 46 | epsilons = [0, 0.01, 0.05, 0.1, 0.5, 1] 47 | for eps in epsilons: 48 | adv_x = x + eps*signed_grad[0]*chosed_features.transpose() 49 | print("{0:3.2f} changed. predicion: {1:5.4f}".format(eps, model.predict(adv_x)[0][0])) 50 | print("x: ") 51 | print(x) 52 | print("adv_x: ") 53 | print(adv_x) 54 | 55 | 56 | 57 | if __name__ == "__main__": 58 | test_attack() -------------------------------------------------------------------------------- /__pycache__/CICIDS_model_dos.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/irijije/DeepLearningIDS/edc22879b1ccf1e2e2e50b76b0c0972691ceba67/__pycache__/CICIDS_model_dos.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/meta_data.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/irijije/DeepLearningIDS/edc22879b1ccf1e2e2e50b76b0c0972691ceba67/__pycache__/meta_data.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/preprocess.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/irijije/DeepLearningIDS/edc22879b1ccf1e2e2e50b76b0c0972691ceba67/__pycache__/preprocess.cpython-37.pyc -------------------------------------------------------------------------------- /dos.csv: -------------------------------------------------------------------------------- 1 | Dst Port,Protocol,Flow Duration,Tot Fwd Pkts,Tot Bwd Pkts,TotLen Fwd Pkts,TotLen Bwd Pkts,Fwd Pkt Len Max,Fwd Pkt Len Min,Fwd Pkt Len Mean,Fwd Pkt Len Std,Bwd Pkt Len Max,Bwd Pkt Len Min,Bwd Pkt Len Mean,Bwd Pkt Len Std,Flow Byts/s,Flow Pkts/s,Flow IAT Mean,Flow IAT Std,Flow IAT Max,Flow IAT Min,Fwd IAT Tot,Fwd IAT Mean,Fwd IAT Std,Fwd IAT Max,Fwd IAT Min,Bwd IAT Tot,Bwd IAT Mean,Bwd IAT Std,Bwd IAT Max,Bwd IAT Min,Fwd PSH Flags,Bwd PSH Flags,Fwd URG Flags,Bwd URG Flags,Fwd Header Len,Bwd Header Len,Fwd Pkts/s,Bwd Pkts/s,Pkt Len Min,Pkt Len Max,Pkt Len Mean,Pkt Len Std,Pkt Len Var,FIN Flag Cnt,SYN Flag Cnt,RST Flag Cnt,PSH Flag Cnt,ACK Flag Cnt,URG Flag Cnt,CWE Flag Count,ECE Flag Cnt,Down/Up Ratio,Pkt Size Avg,Fwd Seg Size Avg,Bwd Seg Size Avg,Fwd Byts/b Avg,Fwd Pkts/b Avg,Fwd Blk Rate Avg,Bwd Byts/b Avg,Bwd Pkts/b Avg,Bwd Blk Rate Avg,Subflow Fwd Pkts,Subflow Fwd Byts,Subflow Bwd Pkts,Subflow Bwd Byts,Init Fwd Win Byts,Init Bwd Win Byts,Fwd Act Data Pkts,Fwd Seg Size Min,Active Mean,Active Std,Active Max,Active Min,Idle Mean,Idle Std,Idle Max,Idle Min,Label 2 | 80,6,1012742,3,4,20.0,964.0,20.0,0.0,6.666666667,11.54700538,964.0,0.0,241.0,482.0,971.6196228,6.911928211,168790.3333,412718.5069,1011248.0,7.0,1454.0,727.0,564.2712114,1126.0,328.0,1012735.0,337578.3333,583415.4863,1011248.0,26.0,0,0,0,0,72,92,2.962254947,3.949673263,0.0,964.0,123.0,339.8873763,115523.4286,0,0,1,1,0,0,0,1,1.0,140.5714286,6.666666667,241.0,0,0,0,0,0,0,3,20,4,964,8192,211,1,20,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1 -------------------------------------------------------------------------------- /meta_data.py: -------------------------------------------------------------------------------- 1 | BASE_PATH = "CICIDS2018_dataset/" 2 | FILE_PATH = "Processed Traffic Data for ML Algorithms copy/" 3 | 4 | FEATURE_LIST = ["Dst Port","Protocol","Flow Duration", 5 | "Tot Fwd Pkts","Tot Bwd Pkts", 6 | "TotLen Fwd Pkts","TotLen Bwd Pkts", 7 | "Fwd Pkt Len Max","Fwd Pkt Len Min", 8 | "Fwd Pkt Len Mean","Fwd Pkt Len Std", 9 | "Bwd Pkt Len Max","Bwd Pkt Len Min", 10 | "Bwd Pkt Len Mean","Bwd Pkt Len Std", 11 | "Flow Byts/s","Flow Pkts/s","Flow IAT Mean", 12 | "Flow IAT Std","Flow IAT Max","Flow IAT Min", 13 | "Fwd IAT Tot","Fwd IAT Mean","Fwd IAT Std", 14 | "Fwd IAT Max","Fwd IAT Min","Bwd IAT Tot", 15 | "Bwd IAT Mean","Bwd IAT Std","Bwd IAT Max", 16 | "Bwd IAT Min","Fwd PSH Flags","Bwd PSH Flags", 17 | "Fwd URG Flags","Bwd URG Flags","Fwd Header Len", 18 | "Bwd Header Len","Fwd Pkts/s","Bwd Pkts/s", 19 | "Pkt Len Min","Pkt Len Max","Pkt Len Mean", 20 | "Pkt Len Std","Pkt Len Var","FIN Flag Cnt", 21 | "SYN Flag Cnt","RST Flag Cnt","PSH Flag Cnt", 22 | "ACK Flag Cnt","URG Flag Cnt","CWE Flag Count", 23 | "ECE Flag Cnt","Down/Up Ratio","Pkt Size Avg", 24 | "Fwd Seg Size Avg","Bwd Seg Size Avg", 25 | "Fwd Byts/b Avg","Fwd Pkts/b Avg", 26 | "Fwd Blk Rate Avg","Bwd Byts/b Avg","Bwd Pkts/b Avg", 27 | "Bwd Blk Rate Avg","Subflow Fwd Pkts", 28 | "Subflow Fwd Byts","Subflow Bwd Pkts", 29 | "Subflow Bwd Byts","Init Fwd Win Byts", 30 | "Init Bwd Win Byts","Fwd Act Data Pkts", 31 | "Fwd Seg Size Min","Active Mean","Active Std", 32 | "Active Max","Active Min","Idle Mean","Idle Std", 33 | "Idle Max","Idle Min", "Label"] 34 | 35 | DOS_FEATURE_LIST = ["Dst Port","Protocol","Flow Duration", 36 | "Tot Bwd Pkts","Bwd Pkt Len Max","Bwd Pkt Len Min", 37 | "Bwd Pkt Len Mean","Bwd Pkt Len Std", 38 | "Bwd Header Len", 39 | "Pkt Len Min","Pkt Len Max","Pkt Len Mean", 40 | "Pkt Len Std","Pkt Len Var","Down/Up Ratio", 41 | "Pkt Size Avg","Fwd Seg Size Avg","Bwd Seg Size Avg", 42 | "Init Fwd Win Byts","Init Bwd Win Byts", 43 | "Fwd Seg Size Min","Idle Mean","Idle Std", 44 | "Idle Max","Idle Min", "Label", 45 | # "Flow IAT Mean","Flow IAT Std","Flow IAT Max", 46 | # "Flow IAT Min","Fwd IAT Tot","Fwd IAT Mean", 47 | # "Fwd IAT Std","Fwd IAT Max","Fwd IAT Min", 48 | # "Bwd IAT Tot","Bwd IAT Mean","Bwd IAT Std", 49 | # "Bwd IAT Max","Bwd IAT Min", 50 | ] 51 | 52 | LABELS = ['Benign', 'Bot', 'Brute Force -Web', 'Brute Force -XSS', 53 | 'DDOS attack-HOIC', 'DDOS attack-LOIC-UDP', 'DDoS attacks-LOIC-HTTP', 54 | 'DoS attacks-GoldenEye', 'DoS attacks-Hulk', 55 | 'DoS attacks-SlowHTTPTest','DoS attacks-Slowloris','FTP-BruteForce', 56 | 'Infilteration', 'SQL Injection', 'SSH-Bruteforce'] -------------------------------------------------------------------------------- /models/IDS_small_dos_alpha.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/irijije/DeepLearningIDS/edc22879b1ccf1e2e2e50b76b0c0972691ceba67/models/IDS_small_dos_alpha.h5 -------------------------------------------------------------------------------- /models/IDS_small_dos_v1.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/irijije/DeepLearningIDS/edc22879b1ccf1e2e2e50b76b0c0972691ceba67/models/IDS_small_dos_v1.h5 -------------------------------------------------------------------------------- /models/normalized.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/irijije/DeepLearningIDS/edc22879b1ccf1e2e2e50b76b0c0972691ceba67/models/normalized.png -------------------------------------------------------------------------------- /models/small_dos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/irijije/DeepLearningIDS/edc22879b1ccf1e2e2e50b76b0c0972691ceba67/models/small_dos.png -------------------------------------------------------------------------------- /preprocess.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import pandas as pd 3 | from sklearn.model_selection import train_test_split 4 | from sklearn.utils import shuffle 5 | 6 | from meta_data import * 7 | 8 | 9 | def drop_columns(): 10 | chunks = pd.read_csv(BASE_PATH+FILE_PATH+"Thuesday-20-02-2018_TrafficForML_CICFlowMeter.csv", chunksize=1000000) 11 | df = [] 12 | for chunk in chunks: 13 | print("chunk processing") 14 | chunk = chunk.drop(['Flow ID', 'Src IP', 'Src Port', 'Dst IP'], axis=1) 15 | df.append(chunk) 16 | pd.concat(df).to_csv(BASE_PATH+FILE_PATH+"Thuesday-20-02-2018_TrafficForML_CICFlowMeter.csv", index=False) 17 | 18 | def make_dataset(): 19 | all_data = [] 20 | for f in glob.glob(BASE_PATH+FILE_PATH+"*.csv"): 21 | data = pd.read_csv(f, index_col=None, header=0) 22 | all_data.append(data) 23 | data = pd.concat(all_data, axis=0, ignore_index=True) 24 | data = data.drop(['Timestamp'], axis=1) 25 | data = data[~data['Dst Port'].str.contains("Dst", na=False)] 26 | data_train, data_test = train_test_split(data, test_size=0.2) 27 | data.to_csv(BASE_PATH+"CICIDS2018_all.csv", index=False) 28 | pd.DataFrame(data_train).to_csv(BASE_PATH+"CICIDS2018_train.csv", index=None) 29 | pd.DataFrame(data_test).to_csv(BASE_PATH+"CICIDS2018_test.csv", index=None) 30 | 31 | def make_DoS_dataset(): 32 | all_data = [] 33 | for f in glob.glob(BASE_PATH+FILE_PATH+"*.csv"): 34 | data = pd.read_csv(f, index_col=None, header=0) 35 | all_data.append(data) 36 | data = pd.concat(all_data, axis=0, ignore_index=True) 37 | data = data.drop(['Timestamp'], axis=1) 38 | data = data[~data['Dst Port'].str.contains('Dst', na=False)] 39 | data = data[(data['Label'].str.contains('Benign')) | 40 | (data['Label'].str.contains('DoS')) | 41 | (data['Label'].str.contains('DOS'))] 42 | data['Label'].replace(['Benign', 'DDOS attack-HOIC', 43 | 'DDOS attack-LOIC-UDP', 'DDoS attacks-LOIC-HTTP', 'DoS attacks-GoldenEye', 44 | 'DoS attacks-Hulk', 'DoS attacks-SlowHTTPTest', 'DoS attacks-Slowloris'], 45 | [0, 1, 1, 1, 1, 1, 1, 1], inplace=True) 46 | data_train, data_test = train_test_split(data, test_size=0.2) 47 | data_train, data_val = train_test_split(data_train, test_size=0.2) 48 | data.to_csv(BASE_PATH+"CICIDS2018_all_dos.csv", index=False) 49 | pd.DataFrame(data_test).to_csv(BASE_PATH+"CICIDS2018_test_dos.csv", index=None) 50 | pd.DataFrame(data_train).to_csv(BASE_PATH+"CICIDS2018_train_dos.csv", index=None) 51 | pd.DataFrame(data_val).to_csv(BASE_PATH+"CICIDS2018_val_dos.csv", index=None) 52 | 53 | def make_small_dataset(): 54 | all_data = [] 55 | for f in glob.glob(BASE_PATH+FILE_PATH+"*.csv"): 56 | data = pd.read_csv(f, index_col=None, header=0) 57 | all_data.append(data) 58 | data = pd.concat(all_data, axis=0, ignore_index=True) 59 | data = data.drop(['Timestamp'], axis=1) 60 | data = data[~data['Dst Port'].str.contains("Dst", na=False)] 61 | data_train, data_test = train_test_split(data, test_size=0.1) 62 | pd.DataFrame(data_test).to_csv(BASE_PATH+"CICIDS2018_small.csv", index=None) 63 | 64 | def make_small_DoS_dataset(): 65 | data = pd.read_csv(BASE_PATH+"CICIDS2018_small.csv") 66 | data = data[(data['Label'].str.contains('Benign')) | 67 | (data['Label'].str.contains('DoS')) | 68 | (data['Label'].str.contains('DOS'))] 69 | data['Label'].replace(['Benign', 'DDOS attack-HOIC', 70 | 'DDOS attack-LOIC-UDP', 'DDoS attacks-LOIC-HTTP', 'DoS attacks-GoldenEye', 71 | 'DoS attacks-Hulk', 'DoS attacks-SlowHTTPTest', 'DoS attacks-Slowloris'], 72 | [0, 1, 1, 1, 1, 1, 1, 1], inplace=True) 73 | data_train, data_test = train_test_split(data, test_size=0.2) 74 | data_train.to_csv(BASE_PATH+"CICIDS2018_small_dos_train.csv", index=False) 75 | data_test.to_csv(BASE_PATH+"CICIDS2018_small_dos_test.csv", index=False) 76 | 77 | 78 | if __name__ == "__main__": 79 | #drop_columns() 80 | #make_dataset() 81 | #make_DoS_dataset() 82 | #make_small_dataset() 83 | make_small_DoS_dataset() --------------------------------------------------------------------------------