├── README.md ├── main.py └── model.py /README.md: -------------------------------------------------------------------------------- 1 | # one-dimensional-data-classification 2 | Simple one-dimensional data classification using LSTM, CNN, FC in Pytorch 3 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import torch 4 | import model 5 | from torch.utils import data 6 | from torch import optim, nn 7 | from torch.nn import functional as F 8 | 9 | 10 | def load_data(train_data, train_label, test_data, test_label): 11 | train_data = train_data[:, np.newaxis] 12 | train_label = np.squeeze(train_label, axis=1) 13 | train_data = torch.tensor(train_data, dtype=torch.float32) 14 | train_label = torch.tensor(train_label) 15 | dataset = data.TensorDataset(train_data, train_label) 16 | data_loader = torch.utils.data.DataLoader(dataset, batch_size=batchsize, shuffle=True) 17 | 18 | test_data = test_data[:, np.newaxis] 19 | test_label = np.squeeze(test_label, axis=1) 20 | test_data = torch.tensor(test_data, dtype=torch.float32) 21 | test_label = torch.tensor(test_label) 22 | dataset_test = data.TensorDataset(test_data, test_label) 23 | test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=batchsize, shuffle=False) 24 | return data_loader, test_loader 25 | 26 | 27 | def train(epoch, train_loader): 28 | for batch_idx, (data, target) in enumerate(train_loader): 29 | data, target = data.to(device), target.to(device) 30 | output = net(data) 31 | loss = criterion(output, target) 32 | if batch_idx % 20 == 0: 33 | print('Epoch:[{}/{}] [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( 34 | epoch, epochs, batch_idx * len(data), len(train_loader.dataset), 35 | 100. * batch_idx / len(train_loader), loss.item())) 36 | optimizer.zero_grad() 37 | loss.backward() 38 | optimizer.step() 39 | 40 | 41 | def evaluate(test_loader): 42 | test_loss = 0 43 | correct = 0 44 | for data, target in test_loader: 45 | data, target = data.to(device), target.to(device) 46 | output = net(data) 47 | # sum up batch loss 48 | test_loss += criterion(output, target).item() 49 | # get the index of the max log-probability 50 | pred = output.data.max(1, keepdim=True)[1] 51 | correct += pred.eq(target.data.view_as(pred)).cpu().sum() 52 | 53 | test_loss /= len(test_loader.dataset) 54 | print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( 55 | test_loss, correct, len(test_loader.dataset), 56 | 100. * correct / len(test_loader.dataset))) 57 | 58 | 59 | def run(): 60 | train_loader, test_loader = load_data(train_data, train_label, test_data, test_label) 61 | for epoch in range(1, epochs): 62 | train(epoch, train_loader) 63 | evaluate(test_loader) 64 | 65 | 66 | if __name__ == '__main__': 67 | 68 | batchsize = 128 69 | epochs = 10000 70 | 71 | train_data = np.array(pd.read_csv('data/train.csv', header=None)) 72 | train_label = np.array(pd.read_csv('data/trainlabel.csv', header=None)) 73 | test_data = np.array(pd.read_csv('data/test.csv', header=None)) 74 | test_label = np.array(pd.read_csv('data/testlabel.csv', header=None)) 75 | 76 | device = torch.device("cuda") 77 | net = model.LSTM().to(device) 78 | optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) 79 | criterion = nn.CrossEntropyLoss() 80 | 81 | run() -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from torch.nn import functional as F 3 | 4 | 5 | class LSTM(nn.Module): 6 | def __init__(self): 7 | super(LSTM, self).__init__() 8 | self.rnn = nn.LSTM( 9 | input_size=14, 10 | hidden_size=32, 11 | num_layers=2, 12 | batch_first=True 13 | ) 14 | self.fc = nn.Linear(32, 2) 15 | 16 | def forward(self, x): 17 | out, _ = self.rnn(x) 18 | out = out.view(out.size(0), -1) 19 | out = self.fc(out) 20 | # return F.log_softmax(out) 21 | return out 22 | 23 | 24 | class CNN(nn.Module): 25 | def __init__(self): 26 | super(CNN, self).__init__() 27 | self.cnn = nn.Sequential( 28 | nn.Conv1d(1, 32, 5), 29 | nn.BatchNorm1d(32), 30 | nn.ReLU(), 31 | nn.Conv1d(32, 64, 5), 32 | nn.BatchNorm1d(64), 33 | nn.Conv1d(64, 128, 5), 34 | nn.BatchNorm1d(128), 35 | ) 36 | self.fc = nn.Linear(256, 2) 37 | 38 | def forward(self, x): 39 | out = self.cnn(x) 40 | out = out.view(out.size(0), -1) 41 | out = self.fc(out) 42 | return out 43 | 44 | 45 | class FC(nn.Module): 46 | def __init__(self): 47 | super(FC, self).__init__() 48 | self.fc = nn.Sequential( 49 | nn.Linear(14, 128), 50 | nn.Linear(128, 256), 51 | nn.Linear(256, 128), 52 | nn.Linear(128, 2) 53 | ) 54 | 55 | def forward(self, x): 56 | out = x.view(x.size(0), -1) 57 | out = self.fc(out) 58 | return out --------------------------------------------------------------------------------