├── .gitignore
├── dataset
└── filelocation
├── requirements.txt
├── requirements.jl
├── julia_setting.py
├── data.bat
├── data.sh
├── train.sh
├── Dockerfile
├── utils
├── preprocessing.jl
└── util.py
├── README.md
├── models
└── model.py
├── test.py
└── train.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | dataset/
--------------------------------------------------------------------------------
/dataset/filelocation:
--------------------------------------------------------------------------------
1 | file download location is here.
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | scipy
2 | numpy
3 | pandas
4 | scikit-learn
5 | matplotlib
6 | julia
--------------------------------------------------------------------------------
/requirements.jl:
--------------------------------------------------------------------------------
1 | using Pkg
2 |
3 | Pkg.add(url="https://github.com/JuliaPy/PyCall.jl")
4 | Pkg.add(url="https://github.com/hzgzh/VMD.jl")
--------------------------------------------------------------------------------
/julia_setting.py:
--------------------------------------------------------------------------------
1 | import julia
2 | import subprocess
3 | julia.install()
4 |
5 | subprocess.run("julia ./utils/preprocessing.jl", shell=True)
--------------------------------------------------------------------------------
/data.bat:
--------------------------------------------------------------------------------
1 | curl http://timeseriesclassification.com/Downloads/ECG5000.zip -o ./dataset/ECG5000.zip
2 |
3 | cd ./dataset
4 | tar -xf ECG5000.zip
5 | cd ..
6 | echo "download and unzip complete."
--------------------------------------------------------------------------------
/data.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | curl http://timeseriesclassification.com/Downloads/ECG5000.zip -o ./dataset/ECG5000.zip
4 |
5 | cd dataset/
6 | unzip ECG5000.zip
7 | cd ..
8 | echo "download and unzip complete."
9 |
--------------------------------------------------------------------------------
/train.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | declare MODEL=${1-'mtl'} # cnn / vae / mtl
4 | declare EPOCH=${2-'200'}
5 | declare SEED=${3-'123456'}
6 |
7 | python train.py --model_name=${MODEL} --epoch=${EPOCH} --seed=${SEED}
8 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM pytorch/pytorch:1.12.1-cuda11.3-cudnn8-devel
2 |
3 | RUN apt-get update;
4 | RUN apt-get dist-upgrade --yes;
5 | RUN apt-get install --yes git;
6 | RUN apt-get install --yes wget curl unzip;
7 | RUN apt-get clean;
8 |
9 | ENV JULIA_VERSION=1.8.5
10 |
11 |
12 | RUN mkdir /opt/julia-${JULIA_VERSION} && \
13 | cd /tmp && \
14 | wget -q https://julialang-s3.julialang.org/bin/linux/x64/`echo ${JULIA_VERSION} | cut -d. -f 1,2`/julia-${JULIA_VERSION}-linux-x86_64.tar.gz && \
15 | tar xzf julia-${JULIA_VERSION}-linux-x86_64.tar.gz -C /opt/julia-${JULIA_VERSION} --strip-components=1 && \
16 | rm /tmp/julia-${JULIA_VERSION}-linux-x86_64.tar.gz
17 |
18 | RUN ln -fs /opt/julia-*/bin/julia /usr/local/bin/julia
19 |
20 | COPY ./ ./
21 | RUN bash data.sh
22 | RUN pip install -r requirements.txt
23 | RUN julia requirements.jl
24 | RUN python julia_setting.py
25 |
26 | RUN echo "##########################"
27 | RUN echo "data processing finished. enjoy."
28 | RUN echo "please EXECUTE 'docker run -it --gpus all --name nn-vmd nn-vmd:latest bash train.sh'"
29 | RUN echo "you can modify train.sh"
30 | RUN echo "##########################"
31 |
32 | WORKDIR /workspace
33 |
34 |
35 |
--------------------------------------------------------------------------------
/utils/preprocessing.jl:
--------------------------------------------------------------------------------
1 | using Base.Threads
2 |
3 | using PyCall
4 | using VMD
5 | io = pyimport("scipy.io")
6 |
7 | train_path = "../dataset/ECG5000_TRAIN.arff"
8 | test_path = "../dataset/ECG5000_TEST.arff"
9 |
10 | py"""
11 | import numpy as np
12 | from scipy.io import arff
13 | import pandas as pd
14 |
15 | def data_load(path):
16 | data = arff.loadarff(path)
17 | output = pd.DataFrame(data[0], dtype=np.float32)
18 |
19 | return output.values
20 | """
21 |
22 | function vmd_calculate(path, output_name)
23 | data = py"""data_load"""(path)
24 | α = 2000; # moderate bandwidth constraint
25 | τ = 0; # noise-tolerance (no strict fidelity enforcement)
26 | K = 3; # 3 modes
27 | tol = 1e-7;
28 | sample_frequency = 140;
29 |
30 | output_ch1 = []
31 | output_ch2 = []
32 | output_ch3 = []
33 |
34 | output = []
35 | @threads for i ∈ 1:length(data[:,1])
36 |
37 | v = vmd(data[i, 1:140] ;
38 | alpha = α,
39 | tau = τ,
40 | K = K,
41 | DC = false,
42 | init = 1,
43 | tol = tol,
44 | sample_frequency = sample_frequency);
45 |
46 | output_ch1 = push!(output_ch1, v.signal_d[:, 1]);
47 | output_ch2 = push!(output_ch2, v.signal_d[:, 2]);
48 | output_ch3 = push!(output_ch3, v.signal_d[:, 3]);
49 | end
50 |
51 | output_ch1 = reduce(vcat, transpose.(output_ch1));
52 | output_ch2 = reduce(vcat, transpose.(output_ch2));
53 | output_ch3 = reduce(vcat, transpose.(output_ch3));
54 | output = cat(output_ch1, output_ch2, output_ch3, dims=3);
55 |
56 |
57 | io.savemat("../dataset/$output_name.mat", Dict("data" => output));
58 | end
59 |
60 |
61 | cd("utils")
62 |
63 | vmd_calculate(train_path, "processed_train")
64 | vmd_calculate(test_path, "processed_test")
65 |
66 | cd("../")
--------------------------------------------------------------------------------
/utils/util.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import random
4 | from torch.utils.data import TensorDataset, Dataset
5 |
6 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
7 |
8 | def weight_init_xavier_uniform(submodule):
9 | if isinstance(submodule, torch.nn.Conv1d):
10 | torch.nn.init.xavier_uniform_(submodule.weight)
11 | submodule.bias.data.fill_(0)
12 | elif isinstance(submodule, torch.nn.Linear):
13 | torch.nn.init.xavier_uniform_(submodule.weight)
14 | submodule.bias.data.fill_(0)
15 |
16 | def seed_everything_th(num):
17 | torch.manual_seed(num)
18 | torch.cuda.manual_seed(num)
19 | torch.cuda.manual_seed_all(num) # if use multi-GPU
20 | torch.backends.cudnn.deterministic = True
21 | torch.backends.cudnn.benchmark = False
22 | np.random.seed(num)
23 | random.seed(num)
24 |
25 | class TrainDataset(Dataset):
26 | def __init__(self, x_data, *args):
27 | self.x_data = torch.FloatTensor(x_data)
28 | self.y_data1 = None
29 | self.y_data2 = None
30 | assert len(args) < 3
31 | if len(args) == 1:
32 | self.y_data1 = torch.FloatTensor(args[0])
33 | if len(args) == 2:
34 | self.y_data1 = torch.FloatTensor(args[0])
35 | self.y_data2 = torch.LongTensor(args[1])
36 |
37 |
38 | def __len__(self):
39 | return len(self.x_data)
40 |
41 | def __getitem__(self, idx):
42 | x = torch.FloatTensor(self.x_data[idx])
43 | if self.y_data1 == None:
44 | return x.to(device)
45 | if self.y_data2 == None:
46 | y1 = torch.FloatTensor(self.y_data1[idx])
47 | return x.to(device), y1.to(device)
48 | else:
49 | y1 = torch.FloatTensor(self.y_data1[idx])
50 | y2 = torch.LongTensor(self.y_data2[idx])
51 |
52 | return x.to(device), y1.to(device), y2.to(device)
53 |
54 | class TestDataset(Dataset):
55 | def __init__(self, x_data, *args):
56 | self.x_data = torch.FloatTensor(x_data)
57 | self.y_data1 = None
58 | self.y_data2 = None
59 | assert len(args) < 3
60 | if len(args) == 1:
61 | self.y_data1 = torch.FloatTensor(args[0])
62 | if len(args) == 2:
63 | self.y_data1 = torch.FloatTensor(args[0])
64 | self.y_data2 = torch.LongTensor(args[1])
65 |
66 |
67 | def __len__(self):
68 | return len(self.x_data)
69 |
70 | def __getitem__(self, idx):
71 | x = torch.FloatTensor(self.x_data[idx])
72 | if self.y_data1 == None:
73 | return x.to(device)
74 | if self.y_data2 == None:
75 | y1 = torch.FloatTensor(self.y_data1[idx])
76 | return x.to(device), y1.to(device)
77 | else:
78 | y1 = torch.FloatTensor(self.y_data1[idx])
79 | y2 = torch.LongTensor(self.y_data2[idx])
80 |
81 | return x.to(device), y1.to(device), y2.to(device)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # NN-VMD
2 |
3 | ## Prerequisite
4 |
5 | - Python 3.7+
6 | - Julia 1.7+
7 | - If you use docker gpu, you should install ```nvidia-cuda-toolkit``` and ```nvidia-container-toolkit```
8 |
9 | ```
10 | sudo apt install -y nvidia-cuda-toolkit # nvidia-cuda-toolkit installation
11 | ```
12 |
13 | ```
14 | distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
15 | curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
16 | curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
17 |
18 | sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit # nvidia-container-toolkit installation
19 |
20 | sudo /etc/init.d/docker restart
21 | ```
22 |
23 |
24 | ## How to use (Windows)
25 |
26 | More...
27 |
28 |
29 | 1. ECG5000 Data download from ```timeseriesclassification``` Execute ```./data.bat```
30 |
31 | 2. Install library using ```pip install -r requirements.txt```
32 |
33 | 3. Execute ```julia requirements.jl``` (install lib)
34 |
35 | 4. Execute ```python julia_setting.py```
36 |
37 | 5. Execute python file ```python train.py```
38 |
39 | 4. You can modify VMD setting or AI model via
40 |
41 | ```./utils/util.py``` and ```./utils/preprocessing.jl``` and ```./models/model.py```
42 |
43 |
44 |
45 |
46 | ## How to use (Linux)
47 |
48 | More...
49 |
50 |
51 | 1. ECG5000 Data download from ```timeseriesclassification``` Execute ```sh data.sh```
52 |
53 | 2. Install library using ```pip install -r requirements.txt```
54 |
55 | 3. Execute ```julia requirements.jl``` (install lib)
56 |
57 | 4. Execute ```python julia_setting.py```
58 |
59 | 5. Execute python file ```python train.py```
60 |
61 | 4. You can modify VMD setting or AI model via
62 |
63 | ```./utils/util.py``` and ```./utils/preprocessing.jl``` and ```./models/model.py```
64 |
65 |
66 |
67 |
68 | ## How to use (Docker)
69 |
70 | More...
71 |
72 |
73 | 1. ECG5000 Data download from ```timeseriesclassification``` Execute ```sh data.sh```
74 |
75 | 2. if Docker turn off, Execute ```sudo service docker start```
76 |
77 | 3. Execute ```docker build -t nn-vmd .```
78 |
79 | 4. Execute **GPU version**
80 | ```docker run -it --gpus all --name nn-vmd nn-vmd:latest bash train.sh```(default : MTL)
81 |
82 | Execute **CPU version**
83 | ```docker run -it --name nn-vmd nn-vmd:latest bash train.sh```
84 |
85 | 5. Option Execute
86 |
87 | ```
88 | docker start nn-vmd (required)
89 | docker exec -it nn-vmd bash train.sh cnn
90 | docker exec -it nn-vmd bash train.sh vae
91 | ```
92 |
93 |
94 |
95 |
96 |
97 | ## Plan
98 | - [x] VAE (Variational Auto Encoder)
99 | - [ ] Graph neural nets + Shallow neural nets
100 | - [x] Multi-task learning (e.g. decomposition and classification task)
101 |
102 | ## Citation
103 |
104 | ```
105 | @inproceedings{han2022ai,
106 | title={AI model based variational mode decomposition using signal data},
107 | author={Han, Seungwoo},
108 | journal={proceedings of symposium of the korean institute of communications and information sciences},
109 | pages={1362--1363},
110 | year={2022}
111 | }
112 | ```
113 |
--------------------------------------------------------------------------------
/models/model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | class VMDNet(nn.Module):
5 | def __init__(self):
6 | super(VMDNet, self).__init__()
7 |
8 | self.conv1 = nn.Conv1d(1, 32, 3, 1, padding="same")
9 | self.conv2 = nn.Conv1d(32, 64, 3, 1, padding="same")
10 | self.flatten = nn.Flatten()
11 | self.dense = nn.Linear(8960, 420)
12 |
13 | def forward(self, x):
14 | x = self.conv1(x)
15 | x = torch.sigmoid(x)
16 | x = self.conv2(x)
17 | x = torch.sigmoid(x)
18 | x = self.flatten(x)
19 | x = self.dense(x)
20 | x = x.reshape(-1, 3, 140)
21 | return x
22 |
23 |
24 | class VMD_VAE_DNN(nn.Module):
25 | def __init__(self):
26 | super(VMD_VAE_DNN, self).__init__()
27 |
28 | self.encoder = nn.Sequential(
29 | nn.Linear(140, 20),
30 | nn.Sigmoid()
31 | )
32 |
33 | self.mu = nn.Linear(20, 5)
34 | self.log_var = nn.Linear(20, 5)
35 |
36 | self.decoder = nn.Sequential(
37 | nn.Linear(5, 20),
38 | nn.Sigmoid(),
39 | nn.Linear(20, 140)
40 | )
41 |
42 | def sampling(self, mu, log_var):
43 | std = torch.exp(0.5 * log_var)
44 | epslion = torch.randn_like(std)
45 |
46 | return epslion.mul(std).add_(mu)
47 |
48 | def forward(self, x):
49 | x = self.encoder(x)
50 | mu, log_var = self.mu(x), self.log_var(x)
51 | z = self.sampling(mu, log_var)
52 | out = self.decoder(z)
53 | return out, mu, log_var
54 |
55 | class Classifier(nn.Module):
56 | def __init__(self) -> None:
57 | super(Classifier, self).__init__()
58 |
59 | self.layer1 = nn.Linear(140, 5)
60 | self.flatten = nn.Flatten()
61 |
62 | def forward(self, x):
63 |
64 | x = self.flatten(x)
65 | x = self.layer1(x)
66 |
67 | return x
68 |
69 | class TaskLayer(nn.Module):
70 | def __init__(self):
71 | super(TaskLayer, self).__init__()
72 |
73 | self.conv1 = nn.Conv1d(1, 32, 3, 1, padding=1)
74 | self.conv2 = nn.Conv1d(32, 64, 3, 1, padding=1)
75 | self.conv3 = nn.Conv1d(64, 128, 3, 1, padding=1)
76 | self.flatten = nn.Flatten()
77 | self.maxpool = nn.MaxPool1d(2)
78 | self.decomp1 = nn.Linear(2176, 1000)
79 | self.decomp2 = nn.Linear(1000, 420)
80 | self.classify1 = nn.Linear(2176, 300)
81 | self.classify2 = nn.Linear(300, 5)
82 |
83 | def forward(self, x):
84 | x = self.maxpool(self.conv1(x))
85 | x = torch.relu(x)
86 | x = self.maxpool(self.conv2(x))
87 | x = torch.relu(x)
88 | x = self.maxpool(self.conv3(x))
89 | x = torch.relu(x)
90 | x = self.flatten(x)
91 | class_x = self.classify1(x)
92 | class_x = self.classify2(class_x)
93 | decomp_x = self.decomp1(x)
94 | decomp_x = self.decomp2(decomp_x)
95 | decomp_x = decomp_x.reshape(-1, 3, 140)
96 |
97 | return decomp_x, class_x
98 |
99 |
100 | if __name__ == "__main__":
101 | dummy = torch.rand((1, 1, 140))
102 | model = TaskLayer()
103 |
104 | print(model(dummy))
105 | torch.onnx.export(model, dummy, "./models/model.onnx", input_names=["signals"], output_names=["reconst_signals", "classification"])
106 |
107 |
108 |
109 |
110 |
111 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import argparse
3 | import pandas as pd
4 | import numpy as np
5 | from utils.util import seed_everything_th, weight_init_xavier_uniform, TrainDataset, TestDataset
6 | from models.model import VMDNet, Classifier, TaskLayer, VMD_VAE_DNN
7 | from scipy.io import arff, loadmat
8 |
9 | parser = argparse.ArgumentParser(description="Arguments classifiers.")
10 | parser.add_argument("--model_name", type=str, required=True,
11 | help="model_name.",
12 | dest="model_name")
13 | parser.add_argument("--seed", type=str, required=True,
14 | default="123456",
15 | help="seed numer.",
16 | dest="seed")
17 |
18 | args = parser.parse_args()
19 |
20 | seed = int(args.seed)
21 | seed_everything_th(seed)
22 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
23 |
24 | print(f"current model name: {args.model_name.upper()}")
25 | print(f"current device : {device}")
26 | print(f"seed : {seed}")
27 |
28 | input_s_tr = arff.loadarff("./dataset/ECG5000_TRAIN.arff")
29 | input_s_tr = pd.DataFrame(
30 | input_s_tr[0], dtype=np.float32).values[:, :140].reshape(500, 1, 140)
31 |
32 | input_l_tr = arff.loadarff("./dataset/ECG5000_TRAIN.arff")
33 | input_l_tr = pd.DataFrame(
34 | input_l_tr[0], dtype=np.float32).values[:, 140:141].reshape(500)
35 |
36 | input_l_tr = input_l_tr - 1
37 |
38 | input_s_ts = arff.loadarff("./dataset/ECG5000_TEST.arff")
39 | input_s_ts = pd.DataFrame(
40 | input_s_ts[0], dtype=np.float32).values[:, :140].reshape(4500, 1, 140)
41 |
42 |
43 | input_l_ts = arff.loadarff("./dataset/ECG5000_TEST.arff")
44 | input_l_ts = pd.DataFrame(
45 | input_l_ts[0], dtype=np.float32).values[:, 140:141].reshape(4500)
46 |
47 | input_l_ts = input_l_ts - 1
48 |
49 | target_s_tr = loadmat(
50 | "./dataset/processed_train.mat")["data"].reshape(500, 3, 140)
51 | target_s_ts = loadmat(
52 | "./dataset/processed_test.mat")["data"].reshape(4500, 3, 140)
53 |
54 |
55 | def data_loader(args):
56 | assert len(args) > 0
57 | if args == "cnn":
58 | train_loader = torch.utils.data.DataLoader(dataset=TrainDataset(
59 | input_s_tr, target_s_tr), batch_size=32, shuffle=True)
60 | test_loader = torch.utils.data.DataLoader(dataset=TestDataset(
61 | input_s_ts, target_s_ts), batch_size=32, shuffle=True)
62 | model = VMDNet().to(device)
63 |
64 | if args == "vae":
65 | train_loader = torch.utils.data.DataLoader(
66 | dataset=TrainDataset(input_s_tr), batch_size=32, shuffle=True)
67 | test_loader = torch.utils.data.DataLoader(
68 | dataset=TestDataset(input_s_ts), batch_size=32, shuffle=True)
69 | model = VMD_VAE_DNN().to(device)
70 |
71 | if args == "mtl":
72 | train_loader = torch.utils.data.DataLoader(dataset=TrainDataset(
73 | input_s_tr, target_s_tr, input_l_tr), batch_size=32, shuffle=True)
74 | test_loader = torch.utils.data.DataLoader(dataset=TestDataset(
75 | input_s_ts, target_s_ts, input_l_ts), batch_size=32, shuffle=True)
76 | model = TaskLayer().to(device)
77 |
78 | return train_loader, test_loader, model
79 |
80 | train_loader, test_loader, model = data_loader(args.model_name)
81 | imf1_s = []
82 | imf2_s = []
83 | imf3_s = []
84 | if args.model_name == "cnn":
85 | model.load_state_dict(torch.load("./models/cnn_eph100.pt"))
86 | if args.model_name == "mtl":
87 | model.load_state_dict(torch.load("./models/mtl_eph10.pt"))
88 |
89 |
90 | with torch.no_grad():
91 | correct = 0
92 | total = 0
93 |
94 | if args.model_name == "cnn":
95 | for _, data in enumerate(test_loader):
96 | output = model(data[0])
97 |
98 | for i in range(data[0].shape[0]):
99 | a = data[1][i].to(device).numpy().reshape(-1, 140, 3)
100 | b = output[i].to(device).numpy().reshape(-1, 140, 3)
101 | imf1 = np.corrcoef(a[0, :, 0], b[0, :, 0])[0, 1]
102 | imf2 = np.corrcoef(a[0, :, 1], b[0, :, 1])[0, 1]
103 | imf3 = np.corrcoef(a[0, :, 2], b[0, :, 2])[0, 1]
104 |
105 | imf1_s.append(imf1)
106 | imf2_s.append(imf2)
107 | imf3_s.append(imf3)
108 |
109 | if args.model_name == "mtl":
110 | for _, data in enumerate(test_loader):
111 | output1, output2 = model(data[0])
112 |
113 | for i in range(data[1].shape[0]):
114 | a = data[1][i].to(device).numpy().reshape(-1, 140, 3)
115 | b = output1[i].to(device).numpy().reshape(-1, 140, 3)
116 | imf1 = np.corrcoef(a[0, :, 0], b[0, :, 0])[0, 1]
117 | imf2 = np.corrcoef(a[0, :, 1], b[0, :, 1])[0, 1]
118 | imf3 = np.corrcoef(a[0, :, 2], b[0, :, 2])[0, 1]
119 |
120 | imf1_s.append(imf1)
121 | imf2_s.append(imf2)
122 | imf3_s.append(imf3)
123 |
124 | print(len(imf1_s))
125 | print(np.mean(imf1_s))
126 | print(np.mean(imf2_s))
127 | print(np.mean(imf3_s))
128 | print(np.std(imf1_s))
129 | print(np.std(imf2_s))
130 | print(np.std(imf3_s))
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | import random
4 | import numpy as np
5 | import matplotlib.pyplot as plt
6 | import pandas as pd
7 | import argparse
8 | from scipy.io import arff, loadmat
9 | from sklearn.model_selection import train_test_split
10 | from utils.util import seed_everything_th, weight_init_xavier_uniform, TrainDataset, TestDataset
11 | from models.model import VMDNet, Classifier, TaskLayer, VMD_VAE_DNN
12 |
13 | parser = argparse.ArgumentParser(description="Arguments classifiers.")
14 | parser.add_argument("--model_name", type=str, required=True,
15 | help="model_name.",
16 | dest="model_name")
17 | parser.add_argument("--epoch", type=str, required=False,
18 | default="200",
19 | help="epoch numer.",
20 | dest="epoch")
21 | parser.add_argument("--seed", type=str, required=True,
22 | default="123456",
23 | help="seed numer.",
24 | dest="seed")
25 |
26 | args = parser.parse_args()
27 |
28 | EPOCH = int(args.epoch)
29 | seed = int(args.seed)
30 | seed_everything_th(seed)
31 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
32 |
33 | print(f"current model name: {args.model_name.upper()}")
34 | print(f"current device : {device}")
35 | print(f"EPOCH : {EPOCH}")
36 | print(f"seed : {seed}")
37 |
38 | input_s_tr = arff.loadarff("./dataset/ECG5000_TRAIN.arff")
39 | input_s_tr = pd.DataFrame(
40 | input_s_tr[0], dtype=np.float32).values[:, :140].reshape(500, 1, 140)
41 |
42 | input_l_tr = arff.loadarff("./dataset/ECG5000_TRAIN.arff")
43 | input_l_tr = pd.DataFrame(
44 | input_l_tr[0], dtype=np.float32).values[:, 140:141].reshape(500)
45 |
46 | input_l_tr = input_l_tr - 1
47 |
48 | input_s_ts = arff.loadarff("./dataset/ECG5000_TEST.arff")
49 | input_s_ts = pd.DataFrame(
50 | input_s_ts[0], dtype=np.float32).values[:, :140].reshape(4500, 1, 140)
51 |
52 |
53 | input_l_ts = arff.loadarff("./dataset/ECG5000_TEST.arff")
54 | input_l_ts = pd.DataFrame(
55 | input_l_ts[0], dtype=np.float32).values[:, 140:141].reshape(4500)
56 |
57 | input_l_ts = input_l_ts - 1
58 |
59 | target_s_tr = loadmat(
60 | "./dataset/processed_train.mat")["data"].reshape(500, 3, 140)
61 | target_s_ts = loadmat(
62 | "./dataset/processed_test.mat")["data"].reshape(4500, 3, 140)
63 |
64 |
65 | def data_loader(args):
66 | assert len(args) > 0
67 | if args == "cnn":
68 | train_loader = torch.utils.data.DataLoader(dataset=TrainDataset(
69 | input_s_tr, target_s_tr), batch_size=32, shuffle=True)
70 | test_loader = torch.utils.data.DataLoader(dataset=TestDataset(
71 | input_s_ts, target_s_ts), batch_size=32, shuffle=True)
72 | model = VMDNet().to(device)
73 |
74 | if args == "vae":
75 | train_loader = torch.utils.data.DataLoader(
76 | dataset=TrainDataset(input_s_tr), batch_size=32, shuffle=True)
77 | test_loader = torch.utils.data.DataLoader(
78 | dataset=TestDataset(input_s_ts), batch_size=32, shuffle=True)
79 | model = VMD_VAE_DNN().to(device)
80 |
81 | if args == "mtl":
82 | train_loader = torch.utils.data.DataLoader(dataset=TrainDataset(
83 | input_s_tr, target_s_tr, input_l_tr), batch_size=32, shuffle=True)
84 | test_loader = torch.utils.data.DataLoader(dataset=TestDataset(
85 | input_s_ts, target_s_ts, input_l_ts), batch_size=32, shuffle=True)
86 | model = TaskLayer().to(device)
87 |
88 | return train_loader, test_loader, model
89 |
90 |
91 | train_loader, test_loader, model = data_loader(args.model_name)
92 |
93 | loss_1 = torch.nn.MSELoss()
94 | loss_2 = torch.nn.CrossEntropyLoss()
95 |
96 |
97 | def custom_fn(outputs, inputs, mu, log_var):
98 | BCE = F.mse_loss(outputs, inputs, reduction="sum")
99 | KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
100 | return BCE + KLD
101 |
102 |
103 | def train(model, args):
104 | optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
105 |
106 | print("-" * 100)
107 | alpha = 0.9
108 | for i in range(EPOCH):
109 | avg_cost = 0
110 | if args == "cnn":
111 | for _, data in enumerate(train_loader):
112 | optimizer.zero_grad()
113 | output = model(data[0].to(device))
114 |
115 | cost = loss_1(output, data[1])
116 | cost.backward()
117 | optimizer.step()
118 | avg_cost += cost / len(train_loader)
119 |
120 | if i % 10 == 0:
121 | print("epoch : {0}, loss : {1}".format(i, avg_cost.item()))
122 |
123 | if args == "vae":
124 | for _, data in enumerate(train_loader):
125 | optimizer.zero_grad()
126 | out, mu, log_var = model(data[0])
127 |
128 | cost = custom_fn(out, data[0], mu, log_var)
129 | cost.backward()
130 | optimizer.step()
131 | avg_cost += cost / len(train_loader)
132 |
133 | if i % 10 == 0:
134 | print("epoch : {0}, loss : {1}".format(i, avg_cost.item()))
135 |
136 | if args == "mtl":
137 | for _, data in enumerate(train_loader):
138 | optimizer.zero_grad()
139 |
140 | output1, output2 = model(data[0])
141 | loss1 = loss_1(output1, data[1]) * (1-alpha)
142 | loss2 = loss_2(output2, data[2]) * alpha
143 |
144 | total_loss = loss1 + loss2
145 |
146 | total_loss.backward()
147 |
148 | optimizer.step()
149 | avg_cost += total_loss / len(train_loader)
150 |
151 | if i % 10 == 0:
152 | print("epoch : {0}, loss : {1}".format(i, avg_cost.item()))
153 |
154 | print("-" * 100)
155 |
156 | return model
157 |
158 |
159 | trained_model = train(model, args.model_name)
160 |
161 | torch.save(trained_model.state_dict(), f"./models/{args.model_name}_eph{args.epoch}.pt")
162 |
--------------------------------------------------------------------------------