├── .gitignore
├── README.md
├── data
├── KTHDataset
│ └── KTHDateset
└── mnist
│ └── mnist
├── dataloader
├── KTHDataset
│ ├── KTHDataset.py
│ └── MakeDataset.py
├── MovingMNIST
│ ├── MovingMNIST.py
│ └── demo.py
└── README.md
├── experiments
├── PredRNN
│ └── custom.py
├── README.md
└── video_prediction_demo
│ └── custom.py
├── logs
├── PredRNN.log
└── video_prediction_demo.log
├── model
├── README.md
├── STconvLSTM.py
├── convRNN.py
└── loss
│ ├── L1_L2_Loss.py
│ └── SSIM_Loss.py
├── requirements.txt
├── tools
├── README.md
├── train_config.json
├── train_config_PredRNN.json
├── train_video_prediction_KTHDataset.py
└── train_video_prediction_MNIST.py
└── utils
├── README.md
├── __init__.py
├── average_meter_helper.py
├── config_helper.py
├── log_helper.py
└── memory
├── README.md
├── gpu_mem_track.py
└── modelsize_estimate.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | local_settings.py
56 | db.sqlite3
57 |
58 | # Flask stuff:
59 | instance/
60 | .webassets-cache
61 |
62 | # Scrapy stuff:
63 | .scrapy
64 |
65 | # Sphinx documentation
66 | docs/_build/
67 |
68 | # PyBuilder
69 | target/
70 |
71 | # Jupyter Notebook
72 | .ipynb_checkpoints
73 |
74 | # pyenv
75 | .python-version
76 |
77 | # celery beat schedule file
78 | celerybeat-schedule
79 |
80 | # SageMath parsed files
81 | *.sage.py
82 |
83 | # Environments
84 | .env
85 | .venv
86 | env/
87 | venv/
88 | ENV/
89 | env.bak/
90 | venv.bak/
91 |
92 | # Spyder project settings
93 | .spyderproject
94 | .spyproject
95 |
96 | # Rope project settings
97 | .ropeproject
98 |
99 | # mkdocs documentation
100 | /site
101 |
102 | # mypy
103 | .mypy_cache/
104 |
105 | .DS_Store
106 | data/mnist/raw
107 | data/mnist/processed
108 | data/KTHDataset/raw
109 | data/KTHDataset/processed
110 | .vscode/settings.json
111 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Frame_Video_Prediction_Pytorch
2 | frame video prediction algorithm framework used pytorch.
3 | This project is supposed to be a example of dl to help my frined finish their surf project.
4 | Hope this can help you too~~
5 |
6 | # How to start
7 | Walkthrough the folders ReadMe.md file for a quick look.
8 |
9 | for the first time, you may want to build up a conda environment use:
10 |
11 | `conda create -n torchenv python=3.6`
12 |
13 | `source activate torchenv`
14 |
15 | `pip install -r requirements.txt`
16 |
17 | Then run our demo derectly:
18 |
19 | `python3 ./tools/train_video_prediction.py`
20 |
21 | The data will be download to ./data and the result will be stored in ./board using tensorboardX and ./logs.
22 |
23 | You can use:
24 |
25 | `tensorboard --logdir ./board/video_prediction_demo/`
26 |
27 | to see the results.
28 |
29 | PS: we assume our running root path is current level.
30 |
--------------------------------------------------------------------------------
/data/KTHDataset/KTHDateset:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JaMesLiMers/Frame_Video_Prediction_Pytorch/fec5870a2d2ce8f91085c38c46f86e3b58ee0385/data/KTHDataset/KTHDateset
--------------------------------------------------------------------------------
/data/mnist/mnist:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JaMesLiMers/Frame_Video_Prediction_Pytorch/fec5870a2d2ce8f91085c38c46f86e3b58ee0385/data/mnist/mnist
--------------------------------------------------------------------------------
/dataloader/KTHDataset/KTHDataset.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import torch.utils.data as data
3 | from PIL import Image
4 | import sys
5 | import os
6 | import os.path
7 | import errno
8 | import numpy as np
9 | import torch
10 | import codecs
11 | import joblib
12 | from torchvision import transforms
13 |
14 |
15 | parent_path = os.path.dirname(os.path.dirname(os.getcwd()))
16 | if 'KTHDataset' in os.getcwd():
17 | os.chdir(parent_path)
18 | sys.path.insert(0, os.getcwd())
19 |
20 |
21 | from dataloader.KTHDataset.MakeDataset import make_data
22 |
23 |
24 | class KTHDataset(data.Dataset):
25 | """`KTHDataset `_ Dataset.
26 |
27 | Args:
28 | root (string): Root directory of dataset where ``processed/training.pt``
29 | ``processed/validate.pt`` and ``processed/test.pt`` exist.
30 | train (bool or string, optional): If True, creates dataset from ``training.pt``,
31 | otherwise from ``test.pt``, If string in 'test/train/validate' then create
32 | according dataset.
33 | data_length (int or None): number of data per epoch
34 | download (bool, optional): If true, downloads the dataset from the internet and
35 | puts it in root directory. If dataset is already downloaded, it is not
36 | downloaded again.
37 | transform (callable, optional): A function/transform that takes in an PIL image
38 | and returns a transformed version. E.g, ``transforms.RandomCrop``
39 | target_transform (callable, optional): A function/transform that takes in an PIL
40 | image and returns a transformed version. E.g, ``transforms.RandomCrop``
41 | """
42 | urls = [
43 | 'http://www.nada.kth.se/cvap/actions/boxing.zip',
44 | 'http://www.nada.kth.se/cvap/actions/handclapping.zip',
45 | 'http://www.nada.kth.se/cvap/actions/handwaving.zip',
46 | 'http://www.nada.kth.se/cvap/actions/jogging.zip',
47 | 'http://www.nada.kth.se/cvap/actions/running.zip',
48 | 'http://www.nada.kth.se/cvap/actions/walking.zip'
49 | ]
50 | sequence_url = 'http://www.nada.kth.se/cvap/actions/00sequences.txt'
51 | raw_folder = 'raw'
52 | processed_folder = 'processed'
53 | training_file = 'train.pkl'
54 | test_file = 'test.pkl'
55 | validate_file = 'validate.pkl'
56 | sequence_name = '00sequences.txt'
57 |
58 | def __init__(self, root, train=True, transform=None, target_transform=None, download=False, data_length=None):
59 | self.root = os.path.expanduser(root)
60 | self.transform = transform
61 | self.target_transform = target_transform
62 | self.train = train # training set or test set
63 | self.data_length = data_length
64 |
65 | if download:
66 | self.download()
67 |
68 | if not self._check_exists():
69 | raise RuntimeError('Dataset not found.' +
70 | ' You can use download=True to download it')
71 |
72 | if self.train:
73 | self.data = joblib.load(
74 | os.path.join(self.root, self.processed_folder, self.training_file))
75 | self.data += joblib.load(
76 | os.path.join(self.root, self.processed_folder, self.validate_file))
77 | elif not self.train:
78 | self.data = joblib.load(
79 | os.path.join(self.root, self.processed_folder, self.test_file))
80 | elif self.train is 'train':
81 | self.data = joblib.load(
82 | os.path.join(self.root, self.processed_folder, self.training_file))
83 | elif self.train is 'test':
84 | self.data = joblib.load(
85 | os.path.join(self.root, self.processed_folder, self.test_file))
86 | elif self.train is 'validate':
87 | self.data = joblib.load(
88 | os.path.join(self.root, self.processed_folder, self.validate_file))
89 | else:
90 | raise NotImplementedError("invalied string input for train, need 'train','test' or 'validate'")
91 |
92 | # self.test_data()
93 |
94 | # init index list
95 | # index_list = []
96 | # while len(index_list) <= self.__len__():
97 | # index_list += list(range(len(self.data)))
98 | # self.index_list = index_list[0:self.__len__()]
99 | # or random
100 | self.index_list = np.random.randint(low=0, high=len(self.data), size=self.__len__())
101 |
102 | def test_data(self):
103 | for i in self.data:
104 | sequence = i['sequence']
105 | for j in range(len(sequence)):
106 | if j%2 == 0:
107 | if sequence[j+1] - sequence[j] <= 20:
108 | print(i["filename"] + 'error')
109 |
110 | def __len__(self):
111 | if self.data_length is not int:
112 | length = 0
113 | for i in self.data:
114 | length += i['sequence'][-1]
115 | self.data_length = length
116 | return self.data_length
117 | else:
118 | return self.data_length
119 |
120 | def __getitem__(self, index):
121 | """
122 | Args:
123 | index (int): Index
124 |
125 | Returns:
126 | tuple: (seq, target) where sampled sequences are splitted into a seq
127 | and target part
128 | """
129 |
130 | sequence = self.data[self.index_list[index]]["sequence"]
131 | choice = np.random.randint(low=0, high=len(sequence)//2)*2
132 | frames = np.random.randint(low=sequence[choice]-1, high=sequence[choice+1] - 20 -1)
133 | train_frames = self.data[self.index_list[index]]["frames"][frames:frames+10]
134 | gt_frames = self.data[self.index_list[index]]["frames"][frames+10:frames+20]
135 |
136 |
137 | train_frames = [Image.fromarray(train_frames[i], mode='L') for i in range(10)]
138 | gt_frames = [Image.fromarray(gt_frames[i], mode='L') for i in range(10)]
139 |
140 | if self.transform is not None:
141 | train_frames = torch.stack([self.transform(train_frames[i]) for i in range(10)])
142 |
143 | if self.target_transform is not None:
144 | gt_frames = torch.stack([self.target_transform(gt_frames[i]) for i in range(10)])
145 |
146 | return train_frames, gt_frames
147 |
148 | def _check_exists(self):
149 | return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \
150 | os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file)) and \
151 | os.path.exists(os.path.join(self.root, self.processed_folder, self.validate_file))
152 |
153 |
154 | def download(self):
155 | """Download the KTH data if it doesn't exist in processed_folder already."""
156 | from six.moves import urllib
157 | import zipfile
158 |
159 | if self._check_exists():
160 | return
161 |
162 | # download files
163 | try:
164 | os.makedirs(os.path.join(self.root, self.raw_folder))
165 | os.makedirs(os.path.join(self.root, self.processed_folder))
166 | except OSError as e:
167 | if e.errno == errno.EEXIST:
168 | pass
169 | else:
170 | raise
171 |
172 | for url in self.urls:
173 | print('Downloading ' + url)
174 | data = urllib.request.urlopen(url)
175 | filename = url.rpartition('/')[2]
176 | file_dir = os.path.join(self.root, self.raw_folder)
177 | file_path = os.path.join(self.root, self.raw_folder, filename)
178 | with open(file_path, 'wb') as f:
179 | f.write(data.read())
180 |
181 | try:
182 | os.makedirs(file_path.replace('.zip', ''))
183 | except OSError as e:
184 | if e.errno == errno.EEXIST:
185 | pass
186 | else:
187 | raise
188 |
189 | with zipfile.ZipFile(file_path) as zip_f:
190 | for fileM in zip_f.namelist():
191 | zip_f.extract(fileM, file_path.replace('.zip', ''))
192 | os.unlink(file_path)
193 |
194 | print('downloading sequence file...')
195 | data = urllib.request.urlopen(self.sequence_url)
196 | sequence_name = self.sequence_url.rpartition('/')[2]
197 | file_dir = os.path.join(self.root, self.raw_folder)
198 | file_path = os.path.join(self.root, self.raw_folder, sequence_name)
199 | with open(file_path, 'wb') as f:
200 | f.write(data.read())
201 |
202 | # process and save as torch files
203 | print('Processing...')
204 | train_data, test_data, validate_data = make_data(self.root, self.raw_folder, self.processed_folder, self.sequence_name)
205 | print(len(train_data))
206 | print(len(test_data))
207 | print(len(train_data))
208 |
209 | # Dump data to file.
210 | print('Dumping train data to file...')
211 | with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f:
212 | joblib.dump(train_data, f)
213 | print('Dumping test data to file...')
214 | with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f:
215 | joblib.dump(test_data, f)
216 | print('Dumping validate data to file...')
217 | with open(os.path.join(self.root, self.processed_folder, self.validate_file), 'wb') as f:
218 | joblib.dump(validate_data, f)
219 |
220 | print('Done!')
221 |
222 | def __repr__(self):
223 | fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
224 | fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
225 | fmt_str += ' Number of data: {}\n'.format(len(self.data))
226 | tmp = 'train' if self.train is True else 'test'
227 | fmt_str += ' Train/test: {}\n'.format(tmp)
228 | fmt_str += ' Root Location: {}\n'.format(self.root)
229 | tmp = ' Transforms (if any): '
230 | fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
231 | tmp = ' Target Transforms (if any): '
232 | fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
233 | return fmt_str
234 |
235 | if __name__ == "__main__":
236 | a = KTHDataset('./data/KTHDataset', download=True)
237 |
--------------------------------------------------------------------------------
/dataloader/KTHDataset/MakeDataset.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | import joblib
4 | import re
5 | import cv2
6 |
7 | CATEGORIES = ["boxing", "handclapping", "handwaving", "jogging", "running",
8 | "walking"]
9 |
10 | # Dataset are divided according to the instruction at:
11 | # http://www.nada.kth.se/cvap/actions/00sequences.txt
12 | TRAIN_PEOPLE_ID = [11, 12, 13, 14, 15, 16, 17, 18]
13 | DEV_PEOPLE_ID = [19, 20, 21, 23, 24, 25, 1, 4]
14 | TEST_PEOPLE_ID = [22, 2, 3, 5, 6, 7, 8, 9, 10]
15 |
16 | def make_data(data_path, raw_folder, processed_folder, sequence_name='00sequences.txt'):
17 |
18 | train = []
19 | dev = []
20 | test = []
21 | sequence = match_sequence(os.path.join(data_path, raw_folder, sequence_name))
22 |
23 | frames = []
24 |
25 | n_processed_files = 0
26 |
27 | for category in CATEGORIES:
28 | print("Processing category %s" % category)
29 |
30 | # Get all files in current category's folder.
31 | folder_path = os.path.join(data_path, raw_folder, category)
32 | filenames = os.listdir(folder_path)
33 |
34 | for filename in filenames:
35 | filepath = os.path.join(folder_path, filename)
36 | vid = cv2.VideoCapture(filepath)
37 |
38 | # Store features in current file.
39 | frames_current_file = []
40 |
41 | while vid.isOpened():
42 | ret, frame = vid.read()
43 |
44 | if not ret:
45 | break
46 |
47 | frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
48 |
49 | frames_current_file.append(frame)
50 |
51 | if 'person13_handclapping_d3' in sequence[filename[:-11]]:
52 | continue
53 |
54 | if len(frames_current_file) < sequence[filename[:-11]][-1]:
55 | print('problem: {}, seq {}/c {}, fixed'.format(filename[:-11], sequence[filename[:-11]][-1], len(frames_current_file)))
56 | sequence[filename[:-11]][-1] = len(frames_current_file)
57 |
58 |
59 |
60 | frames.append({
61 | 'filename': filename,
62 | 'category': category,
63 | 'frames': frames_current_file,
64 | 'sequence': sequence[filename[:-11]],
65 | })
66 |
67 | n_processed_files += 1
68 | if n_processed_files % 100 == 0:
69 | print("Done %d files. (total: 600)" % n_processed_files)
70 |
71 | return split_data(data_path, processed_folder, frames)
72 |
73 | def split_data(data_path, processed_folder, all_data):
74 |
75 | train_id = list(map(lambda x: str(x).zfill(2), TRAIN_PEOPLE_ID))
76 | test_id = list(map(lambda x: str(x).zfill(2), TEST_PEOPLE_ID))
77 | validate_id = list(map(lambda x: str(x).zfill(2), DEV_PEOPLE_ID))
78 |
79 | train_data = []
80 | test_data = []
81 | validate_data = []
82 |
83 | def find_id(file_name):
84 | return re.findall(r"([0-9]{2})", file_name)[0]
85 |
86 | # 生成 train的
87 | for notes in all_data:
88 | notes_id = find_id(notes['filename'])
89 |
90 | if notes_id in train_id:
91 | train_data.append(notes)
92 | if notes_id in test_id:
93 | test_data.append(notes)
94 | if notes_id in validate_id:
95 | validate_data.append(notes)
96 |
97 | # Dump data to file.
98 | # print('Dumping train data to file...')
99 | # joblib.dump(all_data, open(os.path.join(data_path, processed_folder, 'train.pkl'), "wb"))
100 | # print('Dumping test data to file...')
101 | # joblib.dump(all_data, open(os.path.join(data_path, processed_folder, 'test.pkl'), "wb"))
102 | # print('Dumping validate data to file...')
103 | # joblib.dump(all_data, open(os.path.join(data_path, processed_folder, 'validate.pkl'), "wb"))
104 | return train_data, test_data, validate_data
105 |
106 | def match_sequence(path):
107 | with open(path, 'r') as f:
108 | data = f.read()
109 | h = re.findall(r"((?:person\d\d_\w{1,}_\w{1,})|(?:\d{1,4}))", data)
110 |
111 | start = False
112 | list_ = []
113 | all_list = []
114 | for note in h:
115 | if 'person' in note and start == False:
116 | start = True
117 | list_.append(note)
118 | elif 'person' not in note and start == True:
119 | list_.append(note)
120 | elif 'person' in note and start == True:
121 | all_list.append(list_)
122 | list_ = []
123 | list_.append(note)
124 | all_list.append(list_)
125 |
126 | dic = {}
127 |
128 | for note in all_list:
129 | if 'person13_handclapping_d3' in note[0]:
130 | continue
131 | to_add = [1,]
132 | for i in range(2,len(note)-1):
133 | if int(note[i])+1 == int(note[i+1]) or int(note[i])-1 == int(note[i-1]):
134 | continue
135 | else:
136 | to_add.append(int(note[i]))
137 | to_add.append(int(note[-1]))
138 | dic[note[0]] = to_add
139 |
140 | return dic
141 |
142 | if __name__ == "__main__":
143 | # print(os.getcwd())
144 | # match_sequence('./data/KTHDataset/raw/00sequences.txt')
145 | make_data(data_path='./data/KTHDataset', raw_folder='raw', processed_folder='processed', sequence_name='00sequences.txt')
146 | pass
147 |
--------------------------------------------------------------------------------
/dataloader/MovingMNIST/MovingMNIST.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import torch.utils.data as data
3 | from PIL import Image
4 | import os
5 | import os.path
6 | import errno
7 | import numpy as np
8 | import torch
9 | import codecs
10 | from torchvision import transforms
11 |
12 |
13 | class MovingMNIST(data.Dataset):
14 | """`MovingMNIST `_ Dataset.
15 |
16 | Args:
17 | root (string): Root directory of dataset where ``processed/training.pt``
18 | and ``processed/test.pt`` exist.
19 | train (bool, optional): If True, creates dataset from ``training.pt``,
20 | otherwise from ``test.pt``.
21 | split (int, optional): Train/test split size. Number defines how many samples
22 | belong to test set.
23 | download (bool, optional): If true, downloads the dataset from the internet and
24 | puts it in root directory. If dataset is already downloaded, it is not
25 | downloaded again.
26 | transform (callable, optional): A function/transform that takes in an PIL image
27 | and returns a transformed version. E.g, ``transforms.RandomCrop``
28 | target_transform (callable, optional): A function/transform that takes in an PIL
29 | image and returns a transformed version. E.g, ``transforms.RandomCrop``
30 | """
31 | urls = [
32 | 'https://github.com/JaMesLiMers/MovingMNIST/raw/master/mnist_test_seq.npy.gz'
33 | ]
34 | raw_folder = 'raw'
35 | processed_folder = 'processed'
36 | training_file = 'moving_mnist_train.pt'
37 | test_file = 'moving_mnist_test.pt'
38 |
39 | def __init__(self, root, train=True, split=1000, transform=None, target_transform=None, download=False):
40 | self.root = os.path.expanduser(root)
41 | self.transform = transform
42 | self.target_transform = target_transform
43 | self.split = split
44 | self.train = train # training set or test set
45 |
46 |
47 | if download:
48 | self.download()
49 |
50 | if not self._check_exists():
51 | raise RuntimeError('Dataset not found.' +
52 | ' You can use download=True to download it')
53 |
54 | if self.train:
55 | self.train_data = torch.load(
56 | os.path.join(self.root, self.processed_folder, self.training_file))
57 | else:
58 | self.test_data = torch.load(
59 | os.path.join(self.root, self.processed_folder, self.test_file))
60 |
61 | def __getitem__(self, index):
62 | """
63 | Args:
64 | index (int): Index
65 |
66 | Returns:
67 | tuple: (seq, target) where sampled sequences are splitted into a seq
68 | and target part
69 | """
70 | if self.train:
71 | seq, target = self.train_data[index, :10], self.train_data[index, 10:]
72 | else:
73 | seq, target = self.test_data[index, :10], self.test_data[index, 10:]
74 |
75 | # doing this so that it is consistent with all other datasets
76 | # to return a PIL Image
77 | seq = [Image.fromarray(seq.numpy()[i, :, :], mode='L') for i in range(10)]
78 | target = [Image.fromarray(target.numpy()[i, :, :], mode='L') for i in range(10)]
79 |
80 | if self.transform is not None:
81 | seq = torch.stack([self.transform(seq[i]) for i in range(10)])
82 |
83 | if self.target_transform is not None:
84 | target = torch.stack([self.target_transform(target[i]) for i in range(10)])
85 |
86 | return seq, target
87 |
88 | def __len__(self):
89 | if self.train:
90 | return len(self.train_data)
91 | else:
92 | return len(self.test_data)
93 |
94 | def _check_exists(self):
95 | return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \
96 | os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file))
97 |
98 | def download(self):
99 | """Download the Moving MNIST data if it doesn't exist in processed_folder already."""
100 | from six.moves import urllib
101 | import gzip
102 |
103 | if self._check_exists():
104 | return
105 |
106 | # download files
107 | try:
108 | os.makedirs(os.path.join(self.root, self.raw_folder))
109 | os.makedirs(os.path.join(self.root, self.processed_folder))
110 | except OSError as e:
111 | if e.errno == errno.EEXIST:
112 | pass
113 | else:
114 | raise
115 |
116 | for url in self.urls:
117 | print('Downloading ' + url)
118 | data = urllib.request.urlopen(url)
119 | filename = url.rpartition('/')[2]
120 | file_path = os.path.join(self.root, self.raw_folder, filename)
121 | with open(file_path, 'wb') as f:
122 | f.write(data.read())
123 | with open(file_path.replace('.gz', ''), 'wb') as out_f, \
124 | gzip.GzipFile(file_path) as zip_f:
125 | out_f.write(zip_f.read())
126 | os.unlink(file_path)
127 |
128 | # process and save as torch files
129 | print('Processing...')
130 |
131 | training_set = torch.from_numpy(
132 | np.load(os.path.join(self.root, self.raw_folder, 'mnist_test_seq.npy')).swapaxes(0, 1)[:-self.split]
133 | )
134 | test_set = torch.from_numpy(
135 | np.load(os.path.join(self.root, self.raw_folder, 'mnist_test_seq.npy')).swapaxes(0, 1)[-self.split:]
136 | )
137 |
138 | with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f:
139 | torch.save(training_set, f)
140 | with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f:
141 | torch.save(test_set, f)
142 |
143 | print('Done!')
144 |
145 | def __repr__(self):
146 | fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
147 | fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
148 | tmp = 'train' if self.train is True else 'test'
149 | fmt_str += ' Train/test: {}\n'.format(tmp)
150 | fmt_str += ' Root Location: {}\n'.format(self.root)
151 | tmp = ' Transforms (if any): '
152 | fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
153 | tmp = ' Target Transforms (if any): '
154 | fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
155 | return fmt_str
156 |
157 |
158 |
159 |
--------------------------------------------------------------------------------
/dataloader/MovingMNIST/demo.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 |
6 | parent_path = os.path.dirname(os.path.dirname(os.getcwd()))
7 | print(os.getcwd())
8 | if 'MovingMNIST' in os.getcwd():
9 | os.chdir(parent_path)
10 | sys.path.insert(0, os.getcwd())
11 |
12 | import torch
13 | import torch.nn as nn
14 | from torch.autograd import Variable
15 | import torchvision.datasets as datasets
16 | import torchvision.transforms as transforms
17 | import torch.nn.functional as F
18 | import torch.optim as optim
19 |
20 | from dataloader.MovingMNIST.MovingMNIST import MovingMNIST
21 |
22 | root = './data'
23 | if not os.path.exists(root):
24 | os.mkdir(root)
25 |
26 |
27 | train_set = MovingMNIST(root=os.path.join('.', 'data','mnist'), train=True, download=True,
28 | transform=transforms.Compose([transforms.ToTensor(),]),
29 | target_transform=transforms.Compose([transforms.ToTensor(),]))
30 | test_set = MovingMNIST(root=os.path.join('.', 'data','mnist'), train=False, download=True,
31 | transform=transforms.Compose([transforms.ToTensor(),]),
32 | target_transform=transforms.Compose([transforms.ToTensor(),]))
33 |
34 | batch_size = 100
35 |
36 | train_loader = torch.utils.data.DataLoader(
37 | dataset=train_set,
38 | batch_size=batch_size,
39 | shuffle=True)
40 | test_loader = torch.utils.data.DataLoader(
41 | dataset=test_set,
42 | batch_size=batch_size,
43 | shuffle=False)
44 |
45 | print('==>>> total trainning batch number: {}'.format(len(train_loader)))
46 | print('==>>> total testing batch number: {}'.format(len(test_loader)))
47 |
48 |
49 | for seq, seq_target in train_loader:
50 | print('--- Sample')
51 | print('Input: ', seq.shape)
52 | print('Target: ', seq_target.shape)
53 | break
--------------------------------------------------------------------------------
/dataloader/README.md:
--------------------------------------------------------------------------------
1 | # Dataloader
2 | This file is to store dataloaders.
3 |
4 | Every dataset will have one dataloader and a demo, you can try demo.py to test the datasets.
--------------------------------------------------------------------------------
/experiments/PredRNN/custom.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from model.STconvLSTM import STConvLSTMCell
4 |
5 | class Custom(nn.Module):
6 | def __init__(self, cfg):
7 | """
8 | All we need to use is in cfg
9 | cfg.somepart["par_name"]
10 | """
11 | super(Custom, self).__init__()
12 | self.input_size=cfg['input_size']
13 | self.hidden_dim=cfg['hidden_dim']
14 | self.input_dim=cfg['input_dim']
15 | self.kernel_size=tuple(cfg['kernel_size'])
16 |
17 | self.stlstm_1 = STConvLSTMCell(input_size=self.input_size,
18 | input_dim=self.input_dim,
19 | hidden_dim=self.hidden_dim,
20 | kernel_size=self.kernel_size,
21 | bias=True)
22 |
23 | self.stlstm_2 = STConvLSTMCell(input_size=self.input_size,
24 | input_dim=self.hidden_dim,
25 | hidden_dim=self.hidden_dim,
26 | kernel_size=self.kernel_size,
27 | bias=True)
28 |
29 | self.stlstm_3 = STConvLSTMCell(input_size=self.input_size,
30 | input_dim=self.hidden_dim,
31 | hidden_dim=self.hidden_dim,
32 | kernel_size=self.kernel_size,
33 | bias=True)
34 |
35 | self.stlstm_4 = STConvLSTMCell(input_size=self.input_size,
36 | input_dim=self.hidden_dim,
37 | hidden_dim=self.hidden_dim,
38 | kernel_size=self.kernel_size,
39 | bias=True)
40 |
41 | self.head = nn.Conv2d(in_channels=self.hidden_dim,
42 | out_channels=self.input_dim,
43 | kernel_size=(1,1))
44 |
45 |
46 | def forward(self, input, hidden=None, future=10):
47 | """
48 | input: (b,t,c,h,w)
49 | hidden: hidden of last time (b, c_hidden, h, w)
50 | future: number of future frame to predict
51 | """
52 | # Init hidden
53 | if hidden is None:
54 | h_t1, c_t1, m_t1 = self.stlstm_1.init_hidden(input.size(0))
55 | h_t2, c_t2, _ = self.stlstm_2.init_hidden(input.size(0))
56 | h_t3, c_t3, _ = self.stlstm_3.init_hidden(input.size(0))
57 | h_t4, c_t4, _ = self.stlstm_4.init_hidden(input.size(0))
58 | else:
59 | # TODO: build a stateful model
60 | raise NotImplementedError
61 |
62 | outputs = []
63 |
64 | seq_len = input.size(1)
65 |
66 | for t in range(seq_len):
67 | if t is not 0:
68 | m_t1 = m_t4
69 |
70 | h_t1, c_t1, m_t1 = self.stlstm_1(input_tensor=input[:,t,:,:,:],
71 | cur_state=[h_t1, c_t1, m_t1])
72 |
73 | h_t2, c_t2, m_t2 = self.stlstm_2(input_tensor=h_t1,
74 | cur_state=[h_t2, c_t2, m_t1])
75 |
76 | h_t3, c_t3, m_t3 = self.stlstm_3(input_tensor=h_t2,
77 | cur_state=[h_t3, c_t3, m_t2])
78 |
79 | h_t4, c_t4, m_t4 = self.stlstm_4(input_tensor=h_t3,
80 | cur_state=[h_t4, c_t4, m_t3])
81 |
82 | output = self.head(h_t4)
83 | output = torch.sigmoid(output)
84 | outputs += [output]
85 |
86 |
87 | for t in range(future):
88 | m_t1 = m_t4
89 |
90 | h_t1, c_t1, m_t1 = self.stlstm_1(input_tensor=outputs[-1],
91 | cur_state=[h_t1, c_t1, m_t1])
92 |
93 | h_t2, c_t2, m_t2 = self.stlstm_2(input_tensor=h_t1,
94 | cur_state=[h_t2, c_t2, m_t1])
95 |
96 | h_t3, c_t3, m_t3 = self.stlstm_3(input_tensor=h_t2,
97 | cur_state=[h_t3, c_t3, m_t2])
98 |
99 | h_t4, c_t4, m_t4 = self.stlstm_4(input_tensor=h_t3,
100 | cur_state=[h_t4, c_t4, m_t3])
101 |
102 | output = self.head(h_t4)
103 | output = torch.sigmoid(output)
104 | outputs += [output]
105 |
106 |
107 | outputs = torch.stack(outputs, 1)
108 |
109 | return outputs
110 |
111 | def test():
112 | pass
113 |
114 | if __name__ == "__main__":
115 | test()
116 |
117 |
118 |
--------------------------------------------------------------------------------
/experiments/README.md:
--------------------------------------------------------------------------------
1 | # Experiments
2 | This folder is to store the experiments models
3 |
4 | You should specify the 'experiment_name' tag as your own folder in the config file when you're using your own models to train.
5 |
6 | If you didn't specify the 'arch' tag in the config file, the training program will automatically find the Custom class in $ROOT/experiments/'experiment_name'/Custom.py as the implemented models.
7 |
8 | If you want to use other arch, you may want to change the training code for your own arch.
9 |
--------------------------------------------------------------------------------
/experiments/video_prediction_demo/custom.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from model.convRNN import ConvGRUCell, ConvLSTMCell
4 |
5 | class Custom(nn.Module):
6 | def __init__(self, cfg):
7 | """
8 | All we need to use is in cfg
9 | cfg.somepart["par_name"]
10 | """
11 | super(Custom, self).__init__()
12 | self.input_size=cfg['input_size']
13 | self.hidden_dim=cfg['hidden_dim']
14 | self.input_dim=cfg['input_dim']
15 |
16 | self.convlstm_1 = ConvLSTMCell(input_size=self.input_size,
17 | input_dim=self.input_dim,
18 | hidden_dim=self.hidden_dim,
19 | kernel_size=(3, 3),
20 | bias=True)
21 |
22 | self.convlstm_2 = ConvLSTMCell(input_size=self.input_size,
23 | input_dim=self.hidden_dim,
24 | hidden_dim=self.hidden_dim,
25 | kernel_size=(3, 3),
26 | bias=True)
27 |
28 | self.conv2d = nn.Conv2d(in_channels=self.hidden_dim,
29 | out_channels=self.input_dim,
30 | bias=False,
31 | kernel_size=(3, 3),
32 | padding=1)
33 |
34 | self.conv3d = nn.Conv3d(in_channels=self.hidden_dim,
35 | out_channels=self.input_dim,
36 | bias=False,
37 | kernel_size=(3, 3, 3),
38 | padding=1)
39 |
40 |
41 | def forward(self, input, hidden=None, future=10):
42 | """
43 | input: (b,t,c,h,w)
44 | hidden: hidden of last time (b, c_hidden, h, w)
45 | future: number of future frame to predict
46 | """
47 | # Init hidden
48 | if hidden is None:
49 | h_t, c_t = self.convlstm_1.init_hidden(input.size(0))
50 | h_t2, c_t2 = self.convlstm_1.init_hidden(input.size(0))
51 | else:
52 | # TODO: build a stateful model
53 | raise NotImplementedError
54 |
55 | outputs = []
56 |
57 | seq_len = input.size(1)
58 |
59 | for t in range(seq_len):
60 |
61 | h_t, c_t = self.convlstm_1(input_tensor=input[:,t,:,:,:],
62 | cur_state=[h_t, c_t])
63 |
64 | h_t2, c_t2 = self.convlstm_2(input_tensor=h_t,
65 | cur_state=[h_t2, c_t2])
66 |
67 | output = self.conv2d(h_t2)
68 | output = nn.Sigmoid()(output)
69 | outputs += [output]
70 |
71 | for i in range(future):
72 |
73 | h_t, c_t = self.convlstm_1(input_tensor=output,
74 | cur_state=[h_t, c_t])
75 |
76 | h_t2, c_t2 = self.convlstm_2(input_tensor=h_t,
77 | cur_state=[h_t2, c_t2])
78 |
79 | output = self.conv2d(h_t2)
80 | output = nn.Sigmoid()(output)
81 | outputs += [output]
82 |
83 | outputs = torch.stack(outputs, 1)
84 |
85 | return outputs
86 |
87 |
--------------------------------------------------------------------------------
/logs/PredRNN.log:
--------------------------------------------------------------------------------
1 | [2019-09-18 14:53:23,691-config_helper.py# 61] [DEBUG] Used config:
2 | {'meta': {'experiment_path': 'experiments', 'arch': 'Custom', 'board_path': 'board', 'experiment_name': 'PredRNN'}, 'train': {'epoches': 50, 'batch_size': 1, 'lr': 0.001, 'print_freq': 20}, 'model': {'input_size': [64, 64], 'input_dim': 1, 'hidden_dim': 64, 'kernel_size': [3, 3], 'input_num': 10}}
3 | [2019-09-18 14:54:32,258-train_video_prediction_KTHDataset.py# 91] [DEBUG] ==>>> total trainning batch number: 186884
4 | [2019-09-18 14:54:32,258-train_video_prediction_KTHDataset.py# 92] [DEBUG] ==>>> total testing batch number: 102124
5 | [2019-09-18 14:54:34,147-train_video_prediction_KTHDataset.py#127] [DEBUG] Input: torch.Size([1, 10, 1, 64, 64])
6 | [2019-09-18 14:54:34,147-train_video_prediction_KTHDataset.py#128] [DEBUG] --- Sample
7 | [2019-09-18 14:54:34,147-train_video_prediction_KTHDataset.py#129] [DEBUG] Target: torch.Size([1, 10, 1, 64, 64])
8 | [2019-09-18 14:54:43,285-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][20/186884] step_time: 0.348360 (0.453101) train_loss: 0.517230 (0.639866) test_loss: 0.726505 (0.766993) train_metric: 0.634010 (0.601406)
9 | [2019-09-18 14:54:43,285-log_helper.py# 77] [INFO] Progress: 20 / 9344200 [0%], Speed: 0.453 s/iter, ETA 49:00:04 (D:H:M)
10 |
11 | [2019-09-18 14:54:49,939-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][40/186884] step_time: 0.325024 (0.390925) train_loss: 0.705032 (0.650462) test_loss: 0.703754 (0.754136) train_metric: 0.632030 (0.652420)
12 | [2019-09-18 14:54:49,939-log_helper.py# 77] [INFO] Progress: 40 / 9344200 [0%], Speed: 0.391 s/iter, ETA 42:06:41 (D:H:M)
13 |
14 | [2019-09-18 14:54:56,857-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][60/186884] step_time: 0.365928 (0.374611) train_loss: 0.749587 (0.650766) test_loss: 0.757015 (0.746311) train_metric: 0.698102 (0.654956)
15 | [2019-09-18 14:54:56,857-log_helper.py# 77] [INFO] Progress: 60 / 9344200 [0%], Speed: 0.375 s/iter, ETA 40:12:20 (D:H:M)
16 |
17 | [2019-09-18 14:55:03,531-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][80/186884] step_time: 0.342730 (0.363407) train_loss: 0.698763 (0.652654) test_loss: 0.704605 (0.737109) train_metric: 0.719438 (0.666852)
18 | [2019-09-18 14:55:03,531-log_helper.py# 77] [INFO] Progress: 80 / 9344200 [0%], Speed: 0.363 s/iter, ETA 39:07:15 (D:H:M)
19 |
20 | [2019-09-18 14:55:10,395-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][100/186884] step_time: 0.322581 (0.358561) train_loss: 0.609662 (0.657957) test_loss: 0.693214 (0.729694) train_metric: 0.631378 (0.665684)
21 | [2019-09-18 14:55:10,395-log_helper.py# 77] [INFO] Progress: 100 / 9344200 [0%], Speed: 0.359 s/iter, ETA 38:18:40 (D:H:M)
22 |
23 | [2019-09-18 14:55:17,484-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][120/186884] step_time: 0.320231 (0.357220) train_loss: 0.711436 (0.655228) test_loss: 0.736374 (0.727588) train_metric: 0.701539 (0.668283)
24 | [2019-09-18 14:55:17,484-log_helper.py# 77] [INFO] Progress: 120 / 9344200 [0%], Speed: 0.357 s/iter, ETA 38:15:11 (D:H:M)
25 |
26 | [2019-09-18 14:55:24,316-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][140/186884] step_time: 0.326212 (0.354441) train_loss: 0.702185 (0.656728) test_loss: 0.676167 (0.725431) train_metric: 0.767893 (0.676842)
27 | [2019-09-18 14:55:24,316-log_helper.py# 77] [INFO] Progress: 140 / 9344200 [0%], Speed: 0.354 s/iter, ETA 38:07:58 (D:H:M)
28 |
29 | [2019-09-18 14:55:31,197-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][160/186884] step_time: 0.338116 (0.352668) train_loss: 0.689022 (0.659467) test_loss: 0.701217 (0.721752) train_metric: 0.686003 (0.674363)
30 | [2019-09-18 14:55:31,197-log_helper.py# 77] [INFO] Progress: 160 / 9344200 [0%], Speed: 0.353 s/iter, ETA 38:03:22 (D:H:M)
31 |
32 | [2019-09-18 14:55:37,944-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][180/186884] step_time: 0.328868 (0.350562) train_loss: 0.693971 (0.659177) test_loss: 0.709595 (0.719115) train_metric: 0.755272 (0.679103)
33 | [2019-09-18 14:55:37,944-log_helper.py# 77] [INFO] Progress: 180 / 9344200 [0%], Speed: 0.351 s/iter, ETA 37:21:54 (D:H:M)
34 |
35 | [2019-09-18 14:55:46,409-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][200/186884] step_time: 0.362261 (0.349478) train_loss: 0.724213 (0.658869) test_loss: 0.729188 (0.718298) train_metric: 0.801228 (0.681950)
36 | [2019-09-18 14:55:46,420-log_helper.py# 77] [INFO] Progress: 200 / 9344200 [0%], Speed: 0.349 s/iter, ETA 37:19:05 (D:H:M)
37 |
38 | [2019-09-18 14:55:53,454-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][220/186884] step_time: 0.360313 (0.349261) train_loss: 0.642676 (0.659909) test_loss: 0.688910 (0.716734) train_metric: 0.673523 (0.684866)
39 | [2019-09-18 14:55:53,454-log_helper.py# 77] [INFO] Progress: 220 / 9344200 [0%], Speed: 0.349 s/iter, ETA 37:18:31 (D:H:M)
40 |
41 | [2019-09-18 14:56:00,186-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][240/186884] step_time: 0.328118 (0.347891) train_loss: 0.704590 (0.660106) test_loss: 0.658484 (0.715130) train_metric: 0.650349 (0.688695)
42 | [2019-09-18 14:56:00,186-log_helper.py# 77] [INFO] Progress: 240 / 9344200 [0%], Speed: 0.348 s/iter, ETA 37:14:57 (D:H:M)
43 |
44 | [2019-09-18 14:56:06,844-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][260/186884] step_time: 0.327543 (0.346452) train_loss: 0.829270 (0.651603) test_loss: 0.804557 (0.718573) train_metric: 0.526103 (0.689696)
45 | [2019-09-18 14:56:06,845-log_helper.py# 77] [INFO] Progress: 260 / 9344200 [0%], Speed: 0.346 s/iter, ETA 37:11:13 (D:H:M)
46 |
47 | [2019-09-18 14:56:13,492-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][280/186884] step_time: 0.329523 (0.345174) train_loss: 0.735278 (0.651227) test_loss: 0.701229 (0.722291) train_metric: 0.648790 (0.687422)
48 | [2019-09-18 14:56:13,492-log_helper.py# 77] [INFO] Progress: 280 / 9344200 [0%], Speed: 0.345 s/iter, ETA 37:07:54 (D:H:M)
49 |
50 | [2019-09-18 14:56:20,162-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][300/186884] step_time: 0.327708 (0.344143) train_loss: 0.696382 (0.651353) test_loss: 0.785641 (0.722203) train_metric: 0.621929 (0.687887)
51 | [2019-09-18 14:56:20,163-log_helper.py# 77] [INFO] Progress: 300 / 9344200 [0%], Speed: 0.344 s/iter, ETA 37:05:14 (D:H:M)
52 |
53 | [2019-09-18 14:56:26,823-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][320/186884] step_time: 0.326851 (0.343209) train_loss: 0.694889 (0.647206) test_loss: 0.781595 (0.722631) train_metric: 0.757142 (0.691552)
54 | [2019-09-18 14:56:26,824-log_helper.py# 77] [INFO] Progress: 320 / 9344200 [0%], Speed: 0.343 s/iter, ETA 37:02:48 (D:H:M)
55 |
56 | [2019-09-18 14:56:33,527-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][340/186884] step_time: 0.331775 (0.342504) train_loss: 0.758644 (0.649206) test_loss: 0.691193 (0.721021) train_metric: 0.619610 (0.689754)
57 | [2019-09-18 14:56:33,528-log_helper.py# 77] [INFO] Progress: 340 / 9344200 [0%], Speed: 0.343 s/iter, ETA 37:00:58 (D:H:M)
58 |
59 | [2019-09-18 14:56:40,402-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][360/186884] step_time: 0.341495 (0.342348) train_loss: 0.695488 (0.650099) test_loss: 0.701947 (0.720252) train_metric: 0.708636 (0.689841)
60 | [2019-09-18 14:56:40,402-log_helper.py# 77] [INFO] Progress: 360 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:34 (D:H:M)
61 |
62 | [2019-09-18 14:56:47,256-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][380/186884] step_time: 0.342752 (0.342159) train_loss: 0.459075 (0.649123) test_loss: 0.681666 (0.719431) train_metric: 0.741584 (0.691283)
63 | [2019-09-18 14:56:47,257-log_helper.py# 77] [INFO] Progress: 380 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:04 (D:H:M)
64 |
65 | [2019-09-18 14:56:54,312-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][400/186884] step_time: 0.331604 (0.342029) train_loss: 0.554359 (0.648302) test_loss: 0.660817 (0.718903) train_metric: 0.545888 (0.691420)
66 | [2019-09-18 14:56:54,312-log_helper.py# 77] [INFO] Progress: 400 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:23:44 (D:H:M)
67 |
68 | [2019-09-18 14:57:01,194-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][420/186884] step_time: 0.335888 (0.341942) train_loss: 0.544910 (0.649137) test_loss: 0.691596 (0.718279) train_metric: 0.750356 (0.693156)
69 | [2019-09-18 14:57:01,194-log_helper.py# 77] [INFO] Progress: 420 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:23:30 (D:H:M)
70 |
71 | [2019-09-18 14:57:08,113-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][440/186884] step_time: 0.357398 (0.341952) train_loss: 0.623607 (0.648168) test_loss: 0.781761 (0.718552) train_metric: 0.722056 (0.694793)
72 | [2019-09-18 14:57:08,114-log_helper.py# 77] [INFO] Progress: 440 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:23:31 (D:H:M)
73 |
74 | [2019-09-18 14:57:15,241-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][460/186884] step_time: 0.326790 (0.342401) train_loss: 0.604105 (0.648317) test_loss: 0.752652 (0.718479) train_metric: 0.590202 (0.696774)
75 | [2019-09-18 14:57:15,241-log_helper.py# 77] [INFO] Progress: 460 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:41 (D:H:M)
76 |
77 | [2019-09-18 14:57:22,112-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][480/186884] step_time: 0.344495 (0.342278) train_loss: 0.425265 (0.648289) test_loss: 0.756242 (0.718228) train_metric: 0.761326 (0.697923)
78 | [2019-09-18 14:57:22,112-log_helper.py# 77] [INFO] Progress: 480 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:22 (D:H:M)
79 |
80 | [2019-09-18 14:57:28,910-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][500/186884] step_time: 0.330012 (0.342030) train_loss: 0.376890 (0.645566) test_loss: 0.669748 (0.718448) train_metric: 0.746557 (0.700582)
81 | [2019-09-18 14:57:28,911-log_helper.py# 77] [INFO] Progress: 500 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:23:43 (D:H:M)
82 |
83 | [2019-09-18 14:57:35,588-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][520/186884] step_time: 0.332534 (0.341573) train_loss: 0.489950 (0.643250) test_loss: 0.758088 (0.719873) train_metric: 0.697546 (0.702059)
84 | [2019-09-18 14:57:35,588-log_helper.py# 77] [INFO] Progress: 520 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:22:32 (D:H:M)
85 |
86 | [2019-09-18 14:57:42,264-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][540/186884] step_time: 0.332781 (0.341147) train_loss: 0.733589 (0.644587) test_loss: 0.689811 (0.721462) train_metric: 0.687096 (0.701822)
87 | [2019-09-18 14:57:42,264-log_helper.py# 77] [INFO] Progress: 540 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:25 (D:H:M)
88 |
89 | [2019-09-18 14:57:49,106-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][560/186884] step_time: 0.325249 (0.341042) train_loss: 0.701638 (0.644935) test_loss: 0.744144 (0.720296) train_metric: 0.613428 (0.703259)
90 | [2019-09-18 14:57:49,106-log_helper.py# 77] [INFO] Progress: 560 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:09 (D:H:M)
91 |
92 | [2019-09-18 14:57:56,151-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][580/186884] step_time: 0.351255 (0.341286) train_loss: 0.498550 (0.644607) test_loss: 0.710355 (0.719491) train_metric: 0.714513 (0.704282)
93 | [2019-09-18 14:57:56,151-log_helper.py# 77] [INFO] Progress: 580 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:47 (D:H:M)
94 |
95 | [2019-09-18 14:58:03,279-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][600/186884] step_time: 0.330402 (0.341356) train_loss: 0.642661 (0.644646) test_loss: 0.682568 (0.718929) train_metric: 0.423996 (0.704366)
96 | [2019-09-18 14:58:03,279-log_helper.py# 77] [INFO] Progress: 600 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:58 (D:H:M)
97 |
98 | [2019-09-18 14:58:10,190-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][620/186884] step_time: 0.350517 (0.341366) train_loss: 0.732954 (0.644604) test_loss: 0.710484 (0.718348) train_metric: 0.764885 (0.704025)
99 | [2019-09-18 14:58:10,190-log_helper.py# 77] [INFO] Progress: 620 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:59 (D:H:M)
100 |
101 | [2019-09-18 14:58:17,084-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][640/186884] step_time: 0.327558 (0.341352) train_loss: 0.672510 (0.643831) test_loss: 0.740274 (0.718899) train_metric: 0.816277 (0.705024)
102 | [2019-09-18 14:58:17,084-log_helper.py# 77] [INFO] Progress: 640 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:57 (D:H:M)
103 |
104 | [2019-09-18 14:58:23,815-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][660/186884] step_time: 0.334197 (0.341089) train_loss: 0.698336 (0.644496) test_loss: 0.692642 (0.718993) train_metric: 0.766523 (0.705627)
105 | [2019-09-18 14:58:23,815-log_helper.py# 77] [INFO] Progress: 660 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:16 (D:H:M)
106 |
107 | [2019-09-18 14:58:30,654-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][680/186884] step_time: 0.341605 (0.341003) train_loss: 0.714170 (0.645157) test_loss: 0.686007 (0.718547) train_metric: 0.733276 (0.706032)
108 | [2019-09-18 14:58:30,655-log_helper.py# 77] [INFO] Progress: 680 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:02 (D:H:M)
109 |
110 | [2019-09-18 14:58:37,676-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][700/186884] step_time: 0.342468 (0.341181) train_loss: 0.526295 (0.645492) test_loss: 0.714966 (0.717509) train_metric: 0.641062 (0.705742)
111 | [2019-09-18 14:58:37,676-log_helper.py# 77] [INFO] Progress: 700 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:30 (D:H:M)
112 |
113 | [2019-09-18 14:58:44,363-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][720/186884] step_time: 0.351441 (0.340888) train_loss: 0.737629 (0.646042) test_loss: 0.697693 (0.717112) train_metric: 0.673805 (0.705640)
114 | [2019-09-18 14:58:44,363-log_helper.py# 77] [INFO] Progress: 720 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:20:44 (D:H:M)
115 |
116 | [2019-09-18 14:58:51,013-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][740/186884] step_time: 0.320224 (0.340550) train_loss: 0.527757 (0.646331) test_loss: 0.693957 (0.716380) train_metric: 0.702426 (0.706620)
117 | [2019-09-18 14:58:51,013-log_helper.py# 77] [INFO] Progress: 740 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:19:51 (D:H:M)
118 |
119 | [2019-09-18 14:58:57,664-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][760/186884] step_time: 0.325731 (0.340239) train_loss: 0.720300 (0.646139) test_loss: 0.693440 (0.716716) train_metric: 0.679519 (0.707454)
120 | [2019-09-18 14:58:57,664-log_helper.py# 77] [INFO] Progress: 760 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:19:03 (D:H:M)
121 |
122 | [2019-09-18 14:59:04,280-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][780/186884] step_time: 0.338799 (0.339903) train_loss: 0.752751 (0.644377) test_loss: 0.825015 (0.717005) train_metric: 0.753655 (0.708552)
123 | [2019-09-18 14:59:04,281-log_helper.py# 77] [INFO] Progress: 780 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:10 (D:H:M)
124 |
125 | [2019-09-18 14:59:11,211-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][800/186884] step_time: 0.332133 (0.339719) train_loss: 0.560865 (0.642237) test_loss: 0.770177 (0.719286) train_metric: 0.739026 (0.709522)
126 | [2019-09-18 14:59:11,211-log_helper.py# 77] [INFO] Progress: 800 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:42 (D:H:M)
127 |
128 | [2019-09-18 14:59:17,909-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][820/186884] step_time: 0.321504 (0.339511) train_loss: 0.704266 (0.641766) test_loss: 0.709862 (0.719648) train_metric: 0.643010 (0.709938)
129 | [2019-09-18 14:59:17,909-log_helper.py# 77] [INFO] Progress: 820 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:09 (D:H:M)
130 |
131 | [2019-09-18 14:59:24,527-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][840/186884] step_time: 0.320519 (0.339218) train_loss: 0.689155 (0.642408) test_loss: 0.675148 (0.719547) train_metric: 0.645189 (0.710892)
132 | [2019-09-18 14:59:24,528-log_helper.py# 77] [INFO] Progress: 840 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:23 (D:H:M)
133 |
134 | [2019-09-18 14:59:31,113-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][860/186884] step_time: 0.323160 (0.338903) train_loss: 0.726251 (0.641878) test_loss: 0.751375 (0.719551) train_metric: 0.667097 (0.711387)
135 | [2019-09-18 14:59:31,114-log_helper.py# 77] [INFO] Progress: 860 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:15:34 (D:H:M)
136 |
137 | [2019-09-18 14:59:37,721-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][880/186884] step_time: 0.322946 (0.338622) train_loss: 0.806910 (0.640650) test_loss: 0.837398 (0.720516) train_metric: 0.612515 (0.711691)
138 | [2019-09-18 14:59:37,721-log_helper.py# 77] [INFO] Progress: 880 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:14:50 (D:H:M)
139 |
140 | [2019-09-18 14:59:44,405-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][900/186884] step_time: 0.361146 (0.338439) train_loss: 0.600179 (0.639960) test_loss: 0.623951 (0.720080) train_metric: 0.688135 (0.711709)
141 | [2019-09-18 14:59:44,629-log_helper.py# 77] [INFO] Progress: 900 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:14:22 (D:H:M)
142 |
143 | [2019-09-18 14:59:51,389-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][920/186884] step_time: 0.326705 (0.338348) train_loss: 0.367926 (0.638655) test_loss: 0.696554 (0.719794) train_metric: 0.794559 (0.712247)
144 | [2019-09-18 14:59:51,389-log_helper.py# 77] [INFO] Progress: 920 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:14:07 (D:H:M)
145 |
146 | [2019-09-18 14:59:58,221-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][940/186884] step_time: 0.328056 (0.338338) train_loss: 0.460637 (0.637110) test_loss: 0.706048 (0.719162) train_metric: 0.783932 (0.713101)
147 | [2019-09-18 14:59:58,221-log_helper.py# 77] [INFO] Progress: 940 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:14:06 (D:H:M)
148 |
149 | [2019-09-18 15:00:05,211-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][960/186884] step_time: 0.333569 (0.338488) train_loss: 0.544254 (0.636465) test_loss: 0.684148 (0.718836) train_metric: 0.602092 (0.712778)
150 | [2019-09-18 15:00:05,211-log_helper.py# 77] [INFO] Progress: 960 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:14:29 (D:H:M)
151 |
152 | [2019-09-18 15:00:11,890-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][980/186884] step_time: 0.334288 (0.338314) train_loss: 0.724455 (0.636037) test_loss: 0.693153 (0.718335) train_metric: 0.692899 (0.713573)
153 | [2019-09-18 15:00:11,890-log_helper.py# 77] [INFO] Progress: 980 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:14:02 (D:H:M)
154 |
155 | [2019-09-18 15:00:18,839-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1000/186884] step_time: 0.336447 (0.338246) train_loss: 0.779174 (0.635582) test_loss: 0.946605 (0.718537) train_metric: 0.587165 (0.714475)
156 | [2019-09-18 15:00:18,839-log_helper.py# 77] [INFO] Progress: 1000 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:13:51 (D:H:M)
157 |
158 | [2019-09-18 15:00:25,975-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1020/186884] step_time: 0.360772 (0.338534) train_loss: 0.694328 (0.634908) test_loss: 0.656792 (0.718328) train_metric: 0.680472 (0.714927)
159 | [2019-09-18 15:00:25,976-log_helper.py# 77] [INFO] Progress: 1020 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:14:36 (D:H:M)
160 |
161 | [2019-09-18 15:00:32,795-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1040/186884] step_time: 0.330965 (0.338509) train_loss: 0.695144 (0.633879) test_loss: 0.683907 (0.717665) train_metric: 0.728950 (0.716356)
162 | [2019-09-18 15:00:32,796-log_helper.py# 77] [INFO] Progress: 1040 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:14:32 (D:H:M)
163 |
164 | [2019-09-18 15:00:39,550-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1060/186884] step_time: 0.336396 (0.338425) train_loss: 0.674261 (0.633220) test_loss: 0.691664 (0.717206) train_metric: 0.872526 (0.717470)
165 | [2019-09-18 15:00:39,550-log_helper.py# 77] [INFO] Progress: 1060 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:14:19 (D:H:M)
166 |
167 | [2019-09-18 15:00:46,327-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1080/186884] step_time: 0.344196 (0.338364) train_loss: 0.663008 (0.632017) test_loss: 0.695228 (0.716542) train_metric: 0.850566 (0.718616)
168 | [2019-09-18 15:00:46,327-log_helper.py# 77] [INFO] Progress: 1080 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:14:09 (D:H:M)
169 |
170 | [2019-09-18 15:00:53,110-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1100/186884] step_time: 0.340802 (0.338309) train_loss: 0.658749 (0.631997) test_loss: 0.664278 (0.717305) train_metric: 0.775733 (0.719373)
171 | [2019-09-18 15:00:53,110-log_helper.py# 77] [INFO] Progress: 1100 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:14:00 (D:H:M)
172 |
173 | [2019-09-18 15:00:59,860-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1120/186884] step_time: 0.337368 (0.338229) train_loss: 0.695592 (0.632056) test_loss: 0.716024 (0.716920) train_metric: 0.663405 (0.719508)
174 | [2019-09-18 15:00:59,860-log_helper.py# 77] [INFO] Progress: 1120 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:13:48 (D:H:M)
175 |
176 | [2019-09-18 15:01:06,615-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1140/186884] step_time: 0.336736 (0.338152) train_loss: 0.678107 (0.632063) test_loss: 0.694422 (0.716378) train_metric: 0.731002 (0.719923)
177 | [2019-09-18 15:01:06,615-log_helper.py# 77] [INFO] Progress: 1140 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:13:36 (D:H:M)
178 |
179 | [2019-09-18 15:01:13,631-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1160/186884] step_time: 0.325226 (0.338303) train_loss: 0.439417 (0.630789) test_loss: 0.908915 (0.716867) train_metric: 0.696818 (0.720913)
180 | [2019-09-18 15:01:13,631-log_helper.py# 77] [INFO] Progress: 1160 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:13:59 (D:H:M)
181 |
182 | [2019-09-18 15:01:20,667-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1180/186884] step_time: 0.349917 (0.338465) train_loss: 0.670865 (0.630503) test_loss: 0.686611 (0.716239) train_metric: 0.783299 (0.721519)
183 | [2019-09-18 15:01:20,667-log_helper.py# 77] [INFO] Progress: 1180 / 9344200 [0%], Speed: 0.338 s/iter, ETA 36:14:24 (D:H:M)
184 |
185 | [2019-09-18 15:01:27,970-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1200/186884] step_time: 0.362407 (0.338706) train_loss: 0.676892 (0.629714) test_loss: 0.686181 (0.715627) train_metric: 0.840240 (0.722423)
186 | [2019-09-18 15:01:27,981-log_helper.py# 77] [INFO] Progress: 1200 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:15:02 (D:H:M)
187 |
188 | [2019-09-18 15:01:35,004-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1220/186884] step_time: 0.345468 (0.338837) train_loss: 0.390380 (0.629406) test_loss: 0.688224 (0.715045) train_metric: 0.767983 (0.723250)
189 | [2019-09-18 15:01:35,004-log_helper.py# 77] [INFO] Progress: 1220 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:15:22 (D:H:M)
190 |
191 | [2019-09-18 15:01:42,263-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1240/186884] step_time: 0.382801 (0.339156) train_loss: 0.679508 (0.628850) test_loss: 0.687383 (0.714595) train_metric: 0.758646 (0.723941)
192 | [2019-09-18 15:01:42,263-log_helper.py# 77] [INFO] Progress: 1240 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:12 (D:H:M)
193 |
194 | [2019-09-18 15:01:49,432-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1260/186884] step_time: 0.348366 (0.339395) train_loss: 0.667521 (0.628094) test_loss: 0.690587 (0.714084) train_metric: 0.875762 (0.725135)
195 | [2019-09-18 15:01:49,432-log_helper.py# 77] [INFO] Progress: 1260 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:49 (D:H:M)
196 |
197 | [2019-09-18 15:01:56,514-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1280/186884] step_time: 0.363514 (0.339564) train_loss: 0.677752 (0.628134) test_loss: 0.682729 (0.713557) train_metric: 0.821317 (0.726152)
198 | [2019-09-18 15:01:56,515-log_helper.py# 77] [INFO] Progress: 1280 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:15 (D:H:M)
199 |
200 | [2019-09-18 15:02:03,596-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1300/186884] step_time: 0.358074 (0.339724) train_loss: 0.429435 (0.627243) test_loss: 0.689146 (0.713027) train_metric: 0.839271 (0.727396)
201 | [2019-09-18 15:02:03,597-log_helper.py# 77] [INFO] Progress: 1300 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:40 (D:H:M)
202 |
203 | [2019-09-18 15:02:10,607-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1320/186884] step_time: 0.325676 (0.339829) train_loss: 0.663904 (0.627961) test_loss: 0.660934 (0.712561) train_metric: 0.833197 (0.728431)
204 | [2019-09-18 15:02:10,607-log_helper.py# 77] [INFO] Progress: 1320 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:56 (D:H:M)
205 |
206 | [2019-09-18 15:02:17,279-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1340/186884] step_time: 0.326099 (0.339680) train_loss: 0.672247 (0.626640) test_loss: 0.690298 (0.712100) train_metric: 0.900709 (0.729749)
207 | [2019-09-18 15:02:17,279-log_helper.py# 77] [INFO] Progress: 1340 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:33 (D:H:M)
208 |
209 | [2019-09-18 15:02:23,990-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1360/186884] step_time: 0.331624 (0.339563) train_loss: 0.675758 (0.625801) test_loss: 0.697068 (0.711678) train_metric: 0.858995 (0.730934)
210 | [2019-09-18 15:02:23,990-log_helper.py# 77] [INFO] Progress: 1360 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:14 (D:H:M)
211 |
212 | [2019-09-18 15:02:30,869-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1380/186884] step_time: 0.326340 (0.339570) train_loss: 0.680005 (0.625467) test_loss: 0.688249 (0.711285) train_metric: 0.876767 (0.731401)
213 | [2019-09-18 15:02:30,869-log_helper.py# 77] [INFO] Progress: 1380 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:15 (D:H:M)
214 |
215 | [2019-09-18 15:02:38,108-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1400/186884] step_time: 0.326065 (0.339700) train_loss: 0.681127 (0.625200) test_loss: 0.656816 (0.710938) train_metric: 0.657701 (0.732169)
216 | [2019-09-18 15:02:38,109-log_helper.py# 77] [INFO] Progress: 1400 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:35 (D:H:M)
217 |
218 | [2019-09-18 15:02:45,164-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1420/186884] step_time: 0.334243 (0.339827) train_loss: 0.682113 (0.624529) test_loss: 0.689582 (0.710516) train_metric: 0.752131 (0.733044)
219 | [2019-09-18 15:02:45,165-log_helper.py# 77] [INFO] Progress: 1420 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:55 (D:H:M)
220 |
221 | [2019-09-18 15:02:52,303-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1440/186884] step_time: 0.340621 (0.340008) train_loss: 0.687102 (0.624059) test_loss: 0.710522 (0.710150) train_metric: 0.753485 (0.733835)
222 | [2019-09-18 15:02:52,304-log_helper.py# 77] [INFO] Progress: 1440 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:23 (D:H:M)
223 |
224 | [2019-09-18 15:02:59,411-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1460/186884] step_time: 0.372243 (0.340167) train_loss: 0.332800 (0.622785) test_loss: 0.664642 (0.709726) train_metric: 0.887718 (0.734974)
225 | [2019-09-18 15:02:59,412-log_helper.py# 77] [INFO] Progress: 1460 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:48 (D:H:M)
226 |
227 | [2019-09-18 15:03:06,598-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1480/186884] step_time: 0.357926 (0.340370) train_loss: 0.689430 (0.622402) test_loss: 0.680774 (0.709453) train_metric: 0.778945 (0.735702)
228 | [2019-09-18 15:03:06,598-log_helper.py# 77] [INFO] Progress: 1480 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:19:19 (D:H:M)
229 |
230 | [2019-09-18 15:03:13,909-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1500/186884] step_time: 0.339111 (0.340653) train_loss: 0.675773 (0.621979) test_loss: 0.667480 (0.709063) train_metric: 0.880763 (0.736269)
231 | [2019-09-18 15:03:13,909-log_helper.py# 77] [INFO] Progress: 1500 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:20:03 (D:H:M)
232 |
233 | [2019-09-18 15:03:21,057-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1520/186884] step_time: 0.364453 (0.340820) train_loss: 0.672631 (0.622101) test_loss: 0.674089 (0.708602) train_metric: 0.831880 (0.736987)
234 | [2019-09-18 15:03:21,057-log_helper.py# 77] [INFO] Progress: 1520 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:20:29 (D:H:M)
235 |
236 | [2019-09-18 15:03:28,037-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1540/186884] step_time: 0.352865 (0.340879) train_loss: 0.689946 (0.621799) test_loss: 0.699086 (0.708267) train_metric: 0.653978 (0.737975)
237 | [2019-09-18 15:03:28,037-log_helper.py# 77] [INFO] Progress: 1540 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:20:38 (D:H:M)
238 |
239 | [2019-09-18 15:03:35,172-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1560/186884] step_time: 0.331213 (0.341029) train_loss: 0.421284 (0.620587) test_loss: 0.663021 (0.707866) train_metric: 0.763609 (0.738657)
240 | [2019-09-18 15:03:35,173-log_helper.py# 77] [INFO] Progress: 1560 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:01 (D:H:M)
241 |
242 | [2019-09-18 15:03:41,732-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1580/186884] step_time: 0.354648 (0.340815) train_loss: 0.668576 (0.619944) test_loss: 0.687252 (0.707630) train_metric: 0.873309 (0.739442)
243 | [2019-09-18 15:03:41,732-log_helper.py# 77] [INFO] Progress: 1580 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:20:28 (D:H:M)
244 |
245 | [2019-09-18 15:03:48,438-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1600/186884] step_time: 0.320349 (0.340596) train_loss: 0.297780 (0.619581) test_loss: 0.655914 (0.707292) train_metric: 0.912116 (0.740484)
246 | [2019-09-18 15:03:48,438-log_helper.py# 77] [INFO] Progress: 1600 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:19:54 (D:H:M)
247 |
248 | [2019-09-18 15:03:55,007-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1620/186884] step_time: 0.320436 (0.340397) train_loss: 0.323908 (0.619266) test_loss: 0.692299 (0.706987) train_metric: 0.837286 (0.741165)
249 | [2019-09-18 15:03:55,008-log_helper.py# 77] [INFO] Progress: 1620 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:19:23 (D:H:M)
250 |
251 | [2019-09-18 15:04:01,695-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1640/186884] step_time: 0.346338 (0.340280) train_loss: 0.308891 (0.619155) test_loss: 0.683858 (0.706657) train_metric: 0.870512 (0.741726)
252 | [2019-09-18 15:04:01,695-log_helper.py# 77] [INFO] Progress: 1640 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:19:04 (D:H:M)
253 |
254 | [2019-09-18 15:04:08,740-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1660/186884] step_time: 0.330658 (0.340381) train_loss: 0.421766 (0.618985) test_loss: 0.689872 (0.706273) train_metric: 0.856727 (0.742567)
255 | [2019-09-18 15:04:08,741-log_helper.py# 77] [INFO] Progress: 1660 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:19:20 (D:H:M)
256 |
257 | [2019-09-18 15:04:15,354-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1680/186884] step_time: 0.321047 (0.340220) train_loss: 0.653995 (0.618826) test_loss: 0.682240 (0.705931) train_metric: 0.851771 (0.743323)
258 | [2019-09-18 15:04:15,354-log_helper.py# 77] [INFO] Progress: 1680 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:55 (D:H:M)
259 |
260 | [2019-09-18 15:04:21,918-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1700/186884] step_time: 0.320628 (0.340035) train_loss: 0.642566 (0.617922) test_loss: 0.700911 (0.705664) train_metric: 0.871956 (0.744413)
261 | [2019-09-18 15:04:21,918-log_helper.py# 77] [INFO] Progress: 1700 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:26 (D:H:M)
262 |
263 | [2019-09-18 15:04:28,429-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1720/186884] step_time: 0.337839 (0.339823) train_loss: 0.660722 (0.617243) test_loss: 0.662977 (0.705349) train_metric: 0.861413 (0.745253)
264 | [2019-09-18 15:04:28,429-log_helper.py# 77] [INFO] Progress: 1720 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:53 (D:H:M)
265 |
266 | [2019-09-18 15:04:35,478-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1740/186884] step_time: 0.360383 (0.339923) train_loss: 0.691947 (0.616715) test_loss: 0.666526 (0.705011) train_metric: 0.654306 (0.745684)
267 | [2019-09-18 15:04:35,478-log_helper.py# 77] [INFO] Progress: 1740 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:08 (D:H:M)
268 |
269 | [2019-09-18 15:04:42,389-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1760/186884] step_time: 0.337389 (0.339943) train_loss: 0.684209 (0.616016) test_loss: 0.667292 (0.704714) train_metric: 0.769740 (0.746238)
270 | [2019-09-18 15:04:42,389-log_helper.py# 77] [INFO] Progress: 1760 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:11 (D:H:M)
271 |
272 | [2019-09-18 15:04:49,033-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1780/186884] step_time: 0.323096 (0.339814) train_loss: 0.428084 (0.615242) test_loss: 0.686182 (0.704434) train_metric: 0.861977 (0.747294)
273 | [2019-09-18 15:04:49,033-log_helper.py# 77] [INFO] Progress: 1780 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:51 (D:H:M)
274 |
275 | [2019-09-18 15:04:56,130-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1800/186884] step_time: 0.399143 (0.339826) train_loss: 0.433885 (0.614590) test_loss: 0.699783 (0.704149) train_metric: 0.835313 (0.747975)
276 | [2019-09-18 15:04:56,131-log_helper.py# 77] [INFO] Progress: 1800 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:53 (D:H:M)
277 |
278 | [2019-09-18 15:05:03,284-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1820/186884] step_time: 0.352691 (0.339977) train_loss: 0.686760 (0.614240) test_loss: 0.693115 (0.703939) train_metric: 0.753715 (0.748748)
279 | [2019-09-18 15:05:03,284-log_helper.py# 77] [INFO] Progress: 1820 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:16 (D:H:M)
280 |
281 | [2019-09-18 15:05:09,916-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1840/186884] step_time: 0.320302 (0.339843) train_loss: 0.320792 (0.613419) test_loss: 0.664572 (0.703682) train_metric: 0.801132 (0.749438)
282 | [2019-09-18 15:05:09,916-log_helper.py# 77] [INFO] Progress: 1840 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:55 (D:H:M)
283 |
284 | [2019-09-18 15:05:16,468-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1860/186884] step_time: 0.324326 (0.339670) train_loss: 0.691821 (0.612435) test_loss: 0.693085 (0.703513) train_metric: 0.733716 (0.750076)
285 | [2019-09-18 15:05:16,468-log_helper.py# 77] [INFO] Progress: 1860 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:28 (D:H:M)
286 |
287 | [2019-09-18 15:05:23,029-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1880/186884] step_time: 0.320946 (0.339505) train_loss: 0.663650 (0.612264) test_loss: 0.654045 (0.703229) train_metric: 0.852761 (0.750837)
288 | [2019-09-18 15:05:23,029-log_helper.py# 77] [INFO] Progress: 1880 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:02 (D:H:M)
289 |
290 | [2019-09-18 15:05:29,588-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1900/186884] step_time: 0.320462 (0.339342) train_loss: 0.658305 (0.611881) test_loss: 0.702369 (0.702931) train_metric: 0.797013 (0.751270)
291 | [2019-09-18 15:05:29,589-log_helper.py# 77] [INFO] Progress: 1900 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:37 (D:H:M)
292 |
293 | [2019-09-18 15:05:36,487-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1920/186884] step_time: 0.337426 (0.339359) train_loss: 0.710683 (0.611742) test_loss: 0.692885 (0.703190) train_metric: 0.743645 (0.751824)
294 | [2019-09-18 15:05:36,487-log_helper.py# 77] [INFO] Progress: 1920 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:39 (D:H:M)
295 |
296 | [2019-09-18 15:05:43,239-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1940/186884] step_time: 0.334291 (0.339302) train_loss: 0.398263 (0.612076) test_loss: 0.708589 (0.702965) train_metric: 0.812421 (0.752197)
297 | [2019-09-18 15:05:43,240-log_helper.py# 77] [INFO] Progress: 1940 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:30 (D:H:M)
298 |
299 | [2019-09-18 15:05:50,051-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1960/186884] step_time: 0.330553 (0.339276) train_loss: 0.676461 (0.612478) test_loss: 0.666192 (0.702931) train_metric: 0.723193 (0.751513)
300 | [2019-09-18 15:05:50,051-log_helper.py# 77] [INFO] Progress: 1960 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:26 (D:H:M)
301 |
302 | [2019-09-18 15:05:56,853-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][1980/186884] step_time: 0.321968 (0.339248) train_loss: 0.681834 (0.612052) test_loss: 1.068004 (0.703063) train_metric: 0.803224 (0.751407)
303 | [2019-09-18 15:05:56,854-log_helper.py# 77] [INFO] Progress: 1980 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:22 (D:H:M)
304 |
305 | [2019-09-18 15:06:04,026-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2000/186884] step_time: 0.382112 (0.339322) train_loss: 0.473564 (0.612679) test_loss: 0.763582 (0.703435) train_metric: 0.779059 (0.751138)
306 | [2019-09-18 15:06:04,026-log_helper.py# 77] [INFO] Progress: 2000 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:33 (D:H:M)
307 |
308 | [2019-09-18 15:06:10,906-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2020/186884] step_time: 0.330272 (0.339332) train_loss: 0.724166 (0.613190) test_loss: 0.745659 (0.703486) train_metric: 0.752029 (0.750356)
309 | [2019-09-18 15:06:10,907-log_helper.py# 77] [INFO] Progress: 2020 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:35 (D:H:M)
310 |
311 | [2019-09-18 15:06:17,700-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2040/186884] step_time: 0.328058 (0.339296) train_loss: 0.733606 (0.613360) test_loss: 0.718025 (0.703615) train_metric: 0.641657 (0.749719)
312 | [2019-09-18 15:06:17,700-log_helper.py# 77] [INFO] Progress: 2040 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:29 (D:H:M)
313 |
314 | [2019-09-18 15:06:24,404-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2060/186884] step_time: 0.327595 (0.339220) train_loss: 0.670299 (0.613747) test_loss: 0.679352 (0.703701) train_metric: 0.797192 (0.749082)
315 | [2019-09-18 15:06:24,404-log_helper.py# 77] [INFO] Progress: 2060 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:17 (D:H:M)
316 |
317 | [2019-09-18 15:06:31,134-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2080/186884] step_time: 0.330226 (0.339156) train_loss: 0.331636 (0.613466) test_loss: 0.696010 (0.703749) train_metric: 0.756392 (0.748847)
318 | [2019-09-18 15:06:31,135-log_helper.py# 77] [INFO] Progress: 2080 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:07 (D:H:M)
319 |
320 | [2019-09-18 15:06:37,851-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2100/186884] step_time: 0.333279 (0.339089) train_loss: 0.679610 (0.613522) test_loss: 0.671792 (0.703519) train_metric: 0.714956 (0.748960)
321 | [2019-09-18 15:06:37,851-log_helper.py# 77] [INFO] Progress: 2100 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:15:56 (D:H:M)
322 |
323 | [2019-09-18 15:06:44,587-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2120/186884] step_time: 0.331695 (0.339032) train_loss: 0.637338 (0.613062) test_loss: 0.691025 (0.703553) train_metric: 0.728809 (0.749325)
324 | [2019-09-18 15:06:44,587-log_helper.py# 77] [INFO] Progress: 2120 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:15:47 (D:H:M)
325 |
326 | [2019-09-18 15:06:51,503-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2140/186884] step_time: 0.358788 (0.339058) train_loss: 0.667919 (0.612593) test_loss: 0.700078 (0.703625) train_metric: 0.773856 (0.749447)
327 | [2019-09-18 15:06:51,504-log_helper.py# 77] [INFO] Progress: 2140 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:15:51 (D:H:M)
328 |
329 | [2019-09-18 15:06:58,600-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2160/186884] step_time: 0.349786 (0.339165) train_loss: 0.696982 (0.613101) test_loss: 0.693890 (0.703417) train_metric: 0.766265 (0.749118)
330 | [2019-09-18 15:06:58,600-log_helper.py# 77] [INFO] Progress: 2160 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:08 (D:H:M)
331 |
332 | [2019-09-18 15:07:05,616-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2180/186884] step_time: 0.331141 (0.339235) train_loss: 0.677303 (0.613621) test_loss: 0.685683 (0.703517) train_metric: 0.818615 (0.748990)
333 | [2019-09-18 15:07:05,617-log_helper.py# 77] [INFO] Progress: 2180 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:18 (D:H:M)
334 |
335 | [2019-09-18 15:07:12,825-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2200/186884] step_time: 0.356738 (0.339341) train_loss: 0.711069 (0.613922) test_loss: 0.700337 (0.703496) train_metric: 0.720510 (0.749076)
336 | [2019-09-18 15:07:13,117-log_helper.py# 77] [INFO] Progress: 2200 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:35 (D:H:M)
337 |
338 | [2019-09-18 15:07:20,139-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2220/186884] step_time: 0.368811 (0.339377) train_loss: 0.709340 (0.614340) test_loss: 0.703405 (0.703536) train_metric: 0.599064 (0.749145)
339 | [2019-09-18 15:07:20,140-log_helper.py# 77] [INFO] Progress: 2220 / 9344200 [0%], Speed: 0.339 s/iter, ETA 36:16:40 (D:H:M)
340 |
341 | [2019-09-18 15:07:27,441-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2240/186884] step_time: 0.367907 (0.339569) train_loss: 0.677962 (0.614681) test_loss: 0.668346 (0.703465) train_metric: 0.734473 (0.748942)
342 | [2019-09-18 15:07:27,441-log_helper.py# 77] [INFO] Progress: 2240 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:10 (D:H:M)
343 |
344 | [2019-09-18 15:07:34,756-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2260/186884] step_time: 0.365262 (0.339764) train_loss: 0.491446 (0.614614) test_loss: 0.759972 (0.703481) train_metric: 0.642830 (0.748549)
345 | [2019-09-18 15:07:34,756-log_helper.py# 77] [INFO] Progress: 2260 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:17:40 (D:H:M)
346 |
347 | [2019-09-18 15:07:41,996-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2280/186884] step_time: 0.346244 (0.339922) train_loss: 0.546154 (0.614915) test_loss: 0.710608 (0.703990) train_metric: 0.577262 (0.747637)
348 | [2019-09-18 15:07:41,996-log_helper.py# 77] [INFO] Progress: 2280 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:05 (D:H:M)
349 |
350 | [2019-09-18 15:07:48,968-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2300/186884] step_time: 0.347959 (0.339963) train_loss: 0.366056 (0.614655) test_loss: 0.774280 (0.704509) train_metric: 0.771952 (0.747273)
351 | [2019-09-18 15:07:48,969-log_helper.py# 77] [INFO] Progress: 2300 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:11 (D:H:M)
352 |
353 | [2019-09-18 15:07:55,979-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2320/186884] step_time: 0.349692 (0.340017) train_loss: 0.691247 (0.615118) test_loss: 0.689029 (0.704589) train_metric: 0.766853 (0.746675)
354 | [2019-09-18 15:07:55,979-log_helper.py# 77] [INFO] Progress: 2320 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:19 (D:H:M)
355 |
356 | [2019-09-18 15:08:03,107-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2340/186884] step_time: 0.354638 (0.340124) train_loss: 0.683198 (0.615275) test_loss: 0.669649 (0.704653) train_metric: 0.774161 (0.746473)
357 | [2019-09-18 15:08:03,107-log_helper.py# 77] [INFO] Progress: 2340 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:18:36 (D:H:M)
358 |
359 | [2019-09-18 15:08:10,421-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2360/186884] step_time: 0.350204 (0.340305) train_loss: 0.678708 (0.615543) test_loss: 0.690328 (0.705077) train_metric: 0.756995 (0.746159)
360 | [2019-09-18 15:08:10,421-log_helper.py# 77] [INFO] Progress: 2360 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:19:04 (D:H:M)
361 |
362 | [2019-09-18 15:08:17,782-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2380/186884] step_time: 0.357844 (0.340499) train_loss: 0.697740 (0.615646) test_loss: 0.702461 (0.705154) train_metric: 0.680055 (0.745879)
363 | [2019-09-18 15:08:17,782-log_helper.py# 77] [INFO] Progress: 2380 / 9344200 [0%], Speed: 0.340 s/iter, ETA 36:19:34 (D:H:M)
364 |
365 | [2019-09-18 15:08:25,197-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2400/186884] step_time: 0.346995 (0.340639) train_loss: 0.672206 (0.615764) test_loss: 0.669343 (0.705044) train_metric: 0.814767 (0.745863)
366 | [2019-09-18 15:08:25,198-log_helper.py# 77] [INFO] Progress: 2400 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:19:56 (D:H:M)
367 |
368 | [2019-09-18 15:08:32,176-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2420/186884] step_time: 0.349961 (0.340675) train_loss: 0.554206 (0.615945) test_loss: 0.691742 (0.705315) train_metric: 0.636597 (0.745688)
369 | [2019-09-18 15:08:32,176-log_helper.py# 77] [INFO] Progress: 2420 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:20:01 (D:H:M)
370 |
371 | [2019-09-18 15:08:39,431-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2440/186884] step_time: 0.367389 (0.340824) train_loss: 0.659442 (0.615977) test_loss: 0.665414 (0.705454) train_metric: 0.765432 (0.745627)
372 | [2019-09-18 15:08:39,431-log_helper.py# 77] [INFO] Progress: 2440 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:20:24 (D:H:M)
373 |
374 | [2019-09-18 15:08:46,602-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2460/186884] step_time: 0.351975 (0.340936) train_loss: 0.695660 (0.616146) test_loss: 0.708749 (0.705628) train_metric: 0.702448 (0.745503)
375 | [2019-09-18 15:08:46,602-log_helper.py# 77] [INFO] Progress: 2460 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:20:42 (D:H:M)
376 |
377 | [2019-09-18 15:08:53,788-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2480/186884] step_time: 0.353857 (0.341053) train_loss: 0.678941 (0.616026) test_loss: 0.738833 (0.705920) train_metric: 0.688962 (0.745352)
378 | [2019-09-18 15:08:53,788-log_helper.py# 77] [INFO] Progress: 2480 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:00 (D:H:M)
379 |
380 | [2019-09-18 15:09:00,935-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2500/186884] step_time: 0.349749 (0.341154) train_loss: 0.698166 (0.615331) test_loss: 0.745251 (0.706057) train_metric: 0.807063 (0.745577)
381 | [2019-09-18 15:09:00,935-log_helper.py# 77] [INFO] Progress: 2500 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:15 (D:H:M)
382 |
383 | [2019-09-18 15:09:08,045-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2520/186884] step_time: 0.350798 (0.341237) train_loss: 0.652172 (0.615233) test_loss: 0.655154 (0.706085) train_metric: 0.809484 (0.745856)
384 | [2019-09-18 15:09:08,045-log_helper.py# 77] [INFO] Progress: 2520 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:28 (D:H:M)
385 |
386 | [2019-09-18 15:09:15,190-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2540/186884] step_time: 0.349326 (0.341333) train_loss: 0.652715 (0.615375) test_loss: 0.739884 (0.705992) train_metric: 0.794064 (0.745953)
387 | [2019-09-18 15:09:15,190-log_helper.py# 77] [INFO] Progress: 2540 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:43 (D:H:M)
388 |
389 | [2019-09-18 15:09:22,290-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2560/186884] step_time: 0.350563 (0.341409) train_loss: 0.691042 (0.615441) test_loss: 0.689507 (0.705977) train_metric: 0.713689 (0.745936)
390 | [2019-09-18 15:09:22,290-log_helper.py# 77] [INFO] Progress: 2560 / 9344200 [0%], Speed: 0.341 s/iter, ETA 36:21:55 (D:H:M)
391 |
392 | [2019-09-18 15:09:29,441-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2580/186884] step_time: 0.354080 (0.341505) train_loss: 0.510097 (0.615288) test_loss: 0.674782 (0.705948) train_metric: 0.678373 (0.745923)
393 | [2019-09-18 15:09:29,442-log_helper.py# 77] [INFO] Progress: 2580 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:22:10 (D:H:M)
394 |
395 | [2019-09-18 15:09:36,771-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2600/186884] step_time: 0.352740 (0.341617) train_loss: 0.650520 (0.615459) test_loss: 0.685542 (0.705881) train_metric: 0.789629 (0.745972)
396 | [2019-09-18 15:09:37,023-log_helper.py# 77] [INFO] Progress: 2600 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:22:27 (D:H:M)
397 |
398 | [2019-09-18 15:09:44,190-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2620/186884] step_time: 0.349205 (0.341681) train_loss: 0.286523 (0.615136) test_loss: 0.672264 (0.705753) train_metric: 0.789198 (0.746104)
399 | [2019-09-18 15:09:44,190-log_helper.py# 77] [INFO] Progress: 2620 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:22:37 (D:H:M)
400 |
401 | [2019-09-18 15:09:51,279-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2640/186884] step_time: 0.339761 (0.341748) train_loss: 0.662067 (0.615319) test_loss: 0.688089 (0.705590) train_metric: 0.869910 (0.746229)
402 | [2019-09-18 15:09:51,279-log_helper.py# 77] [INFO] Progress: 2640 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:22:47 (D:H:M)
403 |
404 | [2019-09-18 15:09:58,353-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2660/186884] step_time: 0.348954 (0.341809) train_loss: 0.693683 (0.614916) test_loss: 0.664491 (0.705599) train_metric: 0.740675 (0.746525)
405 | [2019-09-18 15:09:58,353-log_helper.py# 77] [INFO] Progress: 2660 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:22:57 (D:H:M)
406 |
407 | [2019-09-18 15:10:05,556-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2680/186884] step_time: 0.340809 (0.341915) train_loss: 0.688855 (0.614764) test_loss: 0.654023 (0.705928) train_metric: 0.741052 (0.746741)
408 | [2019-09-18 15:10:05,557-log_helper.py# 77] [INFO] Progress: 2680 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:23:13 (D:H:M)
409 |
410 | [2019-09-18 15:10:12,493-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2700/186884] step_time: 0.336707 (0.341923) train_loss: 0.676316 (0.614594) test_loss: 0.660201 (0.706302) train_metric: 0.717188 (0.746982)
411 | [2019-09-18 15:10:12,494-log_helper.py# 77] [INFO] Progress: 2700 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:23:14 (D:H:M)
412 |
413 | [2019-09-18 15:10:19,717-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2720/186884] step_time: 0.353641 (0.342035) train_loss: 0.669305 (0.614595) test_loss: 0.660251 (0.706158) train_metric: 0.866923 (0.747219)
414 | [2019-09-18 15:10:19,717-log_helper.py# 77] [INFO] Progress: 2720 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:23:31 (D:H:M)
415 |
416 | [2019-09-18 15:10:26,876-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2740/186884] step_time: 0.352218 (0.342123) train_loss: 0.669822 (0.614284) test_loss: 0.694881 (0.705930) train_metric: 0.853308 (0.747622)
417 | [2019-09-18 15:10:26,876-log_helper.py# 77] [INFO] Progress: 2740 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:23:45 (D:H:M)
418 |
419 | [2019-09-18 15:10:33,931-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2760/186884] step_time: 0.349059 (0.342172) train_loss: 0.693378 (0.614601) test_loss: 0.689324 (0.705747) train_metric: 0.729847 (0.747723)
420 | [2019-09-18 15:10:33,931-log_helper.py# 77] [INFO] Progress: 2760 / 9344200 [0%], Speed: 0.342 s/iter, ETA 36:23:52 (D:H:M)
421 |
422 | [2019-09-18 15:10:40,994-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2780/186884] step_time: 0.359815 (0.342223) train_loss: 0.458687 (0.614783) test_loss: 0.685552 (0.705643) train_metric: 0.700083 (0.747831)
423 | [2019-09-18 15:10:40,995-log_helper.py# 77] [INFO] Progress: 2780 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:00 (D:H:M)
424 |
425 | [2019-09-18 15:10:48,134-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2800/186884] step_time: 0.339121 (0.342246) train_loss: 0.700776 (0.614791) test_loss: 0.668141 (0.705569) train_metric: 0.679171 (0.748081)
426 | [2019-09-18 15:10:48,145-log_helper.py# 77] [INFO] Progress: 2800 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:04 (D:H:M)
427 |
428 | [2019-09-18 15:10:55,116-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2820/186884] step_time: 0.346200 (0.342260) train_loss: 0.413118 (0.614871) test_loss: 0.699926 (0.705413) train_metric: 0.766840 (0.748272)
429 | [2019-09-18 15:10:55,116-log_helper.py# 77] [INFO] Progress: 2820 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:06 (D:H:M)
430 |
431 | [2019-09-18 15:11:02,385-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2840/186884] step_time: 0.341805 (0.342382) train_loss: 0.696310 (0.614687) test_loss: 0.711335 (0.705352) train_metric: 0.682346 (0.748679)
432 | [2019-09-18 15:11:02,385-log_helper.py# 77] [INFO] Progress: 2840 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:25 (D:H:M)
433 |
434 | [2019-09-18 15:11:09,309-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2860/186884] step_time: 0.340345 (0.342380) train_loss: 0.437327 (0.614568) test_loss: 0.688568 (0.705255) train_metric: 0.783514 (0.748995)
435 | [2019-09-18 15:11:09,309-log_helper.py# 77] [INFO] Progress: 2860 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:24 (D:H:M)
436 |
437 | [2019-09-18 15:11:16,226-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2880/186884] step_time: 0.339849 (0.342378) train_loss: 0.663737 (0.614427) test_loss: 0.711036 (0.705127) train_metric: 0.877715 (0.749470)
438 | [2019-09-18 15:11:16,226-log_helper.py# 77] [INFO] Progress: 2880 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:24 (D:H:M)
439 |
440 | [2019-09-18 15:11:23,179-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2900/186884] step_time: 0.341311 (0.342386) train_loss: 0.661277 (0.614378) test_loss: 0.669528 (0.704976) train_metric: 0.802307 (0.749824)
441 | [2019-09-18 15:11:23,180-log_helper.py# 77] [INFO] Progress: 2900 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:25 (D:H:M)
442 |
443 | [2019-09-18 15:11:30,154-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2920/186884] step_time: 0.344871 (0.342400) train_loss: 0.689093 (0.614099) test_loss: 0.646674 (0.704772) train_metric: 0.792014 (0.750070)
444 | [2019-09-18 15:11:30,155-log_helper.py# 77] [INFO] Progress: 2920 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:27 (D:H:M)
445 |
446 | [2019-09-18 15:11:37,073-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2940/186884] step_time: 0.342510 (0.342397) train_loss: 0.680970 (0.613873) test_loss: 0.692124 (0.704620) train_metric: 0.780262 (0.750484)
447 | [2019-09-18 15:11:37,073-log_helper.py# 77] [INFO] Progress: 2940 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:26 (D:H:M)
448 |
449 | [2019-09-18 15:11:44,069-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2960/186884] step_time: 0.344117 (0.342418) train_loss: 0.669855 (0.613792) test_loss: 0.688311 (0.704455) train_metric: 0.882797 (0.750667)
450 | [2019-09-18 15:11:44,070-log_helper.py# 77] [INFO] Progress: 2960 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:30 (D:H:M)
451 |
452 | [2019-09-18 15:11:51,008-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][2980/186884] step_time: 0.342722 (0.342421) train_loss: 0.682214 (0.613727) test_loss: 0.700667 (0.704310) train_metric: 0.840276 (0.751151)
453 | [2019-09-18 15:11:51,008-log_helper.py# 77] [INFO] Progress: 2980 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:30 (D:H:M)
454 |
455 | [2019-09-18 15:11:59,134-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][3000/186884] step_time: 0.356227 (0.342476) train_loss: 0.636109 (0.613467) test_loss: 0.691391 (0.704145) train_metric: 0.794771 (0.751446)
456 | [2019-09-18 15:11:59,134-log_helper.py# 77] [INFO] Progress: 3000 / 9344200 [0%], Speed: 0.342 s/iter, ETA 37:00:38 (D:H:M)
457 |
458 | [2019-09-18 15:12:06,461-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][3020/186884] step_time: 0.347317 (0.342602) train_loss: 0.705074 (0.612945) test_loss: 0.686337 (0.704042) train_metric: 0.559816 (0.751756)
459 | [2019-09-18 15:12:06,461-log_helper.py# 77] [INFO] Progress: 3020 / 9344200 [0%], Speed: 0.343 s/iter, ETA 37:00:58 (D:H:M)
460 |
461 | [2019-09-18 15:12:13,634-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][3040/186884] step_time: 0.356799 (0.342682) train_loss: 0.661920 (0.613174) test_loss: 0.691357 (0.703928) train_metric: 0.911168 (0.752024)
462 | [2019-09-18 15:12:13,634-log_helper.py# 77] [INFO] Progress: 3040 / 9344200 [0%], Speed: 0.343 s/iter, ETA 37:01:10 (D:H:M)
463 |
464 | [2019-09-18 15:12:20,811-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][3060/186884] step_time: 0.352016 (0.342759) train_loss: 0.686521 (0.612864) test_loss: 0.707303 (0.703814) train_metric: 0.866792 (0.752292)
465 | [2019-09-18 15:12:20,811-log_helper.py# 77] [INFO] Progress: 3060 / 9344200 [0%], Speed: 0.343 s/iter, ETA 37:01:22 (D:H:M)
466 |
467 | [2019-09-18 15:12:27,998-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][3080/186884] step_time: 0.360009 (0.342837) train_loss: 0.283406 (0.612936) test_loss: 0.692716 (0.703722) train_metric: 0.867087 (0.752478)
468 | [2019-09-18 15:12:27,998-log_helper.py# 77] [INFO] Progress: 3080 / 9344200 [0%], Speed: 0.343 s/iter, ETA 37:01:34 (D:H:M)
469 |
470 | [2019-09-18 15:12:35,198-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][3100/186884] step_time: 0.371403 (0.342920) train_loss: 0.683332 (0.612833) test_loss: 0.692144 (0.703560) train_metric: 0.790639 (0.752734)
471 | [2019-09-18 15:12:35,198-log_helper.py# 77] [INFO] Progress: 3100 / 9344200 [0%], Speed: 0.343 s/iter, ETA 37:01:47 (D:H:M)
472 |
473 | [2019-09-18 15:12:42,497-train_video_prediction_KTHDataset.py#186] [INFO] Epoch: [1][3120/186884] step_time: 0.352279 (0.343028) train_loss: 0.669642 (0.612649) test_loss: 0.682211 (0.703420) train_metric: 0.828880 (0.752999)
474 | [2019-09-18 15:12:42,498-log_helper.py# 77] [INFO] Progress: 3120 / 9344200 [0%], Speed: 0.343 s/iter, ETA 37:02:04 (D:H:M)
475 |
476 |
--------------------------------------------------------------------------------
/logs/video_prediction_demo.log:
--------------------------------------------------------------------------------
1 | [2019-09-16 22:51:45,904-config_helper.py# 61] [DEBUG] Used config:
2 | {'meta': {'experiment_path': 'experiments', 'arch': 'Custom', 'board_path': 'board', 'experiment_name': 'video_prediction_demo'}, 'train': {'epoches': 50, 'batch_size': 2, 'lr': 1e-05, 'print_freq': 20}, 'model': {'input_size': [64, 64], 'input_dim': 1, 'hidden_dim': 64, 'kernel_size': [3, 3], 'input_num': 10}}
3 | [2019-09-16 22:51:46,405-train_video_prediction_MNIST.py# 86] [DEBUG] ==>>> total trainning batch number: 4500
4 | [2019-09-16 22:51:46,405-train_video_prediction_MNIST.py# 87] [DEBUG] ==>>> total testing batch number: 500
5 | [2019-09-16 22:51:47,746-train_video_prediction_MNIST.py#122] [DEBUG] Input: torch.Size([2, 10, 1, 64, 64])
6 | [2019-09-16 22:51:47,746-train_video_prediction_MNIST.py#123] [DEBUG] --- Sample
7 | [2019-09-16 22:51:47,746-train_video_prediction_MNIST.py#124] [DEBUG] Target: torch.Size([2, 10, 1, 64, 64])
8 | [2019-09-16 22:51:53,019-train_video_prediction_MNIST.py#175] [INFO] Epoch: [1][20/4500] step_time: 0.085659 (0.176182) train_loss: 0.001326 (0.001210) test_loss: 0.722058 (0.660619)
9 | [2019-09-16 22:51:53,019-log_helper.py# 77] [INFO] Progress: 20 / 225000 [0%], Speed: 0.176 s/iter, ETA 0:11:00 (D:H:M)
10 |
11 | [2019-09-16 22:51:55,188-train_video_prediction_MNIST.py#175] [INFO] Epoch: [1][40/4500] step_time: 0.083421 (0.132536) train_loss: 0.000929 (0.001134) test_loss: 0.651724 (0.660121)
12 | [2019-09-16 22:51:55,198-log_helper.py# 77] [INFO] Progress: 40 / 225000 [0%], Speed: 0.133 s/iter, ETA 0:08:16 (D:H:M)
13 |
14 | [2019-09-16 22:51:57,431-train_video_prediction_MNIST.py#175] [INFO] Epoch: [1][60/4500] step_time: 0.091918 (0.118670) train_loss: 0.000937 (0.001083) test_loss: 0.643997 (0.663288)
15 | [2019-09-16 22:51:57,441-log_helper.py# 77] [INFO] Progress: 60 / 225000 [0%], Speed: 0.119 s/iter, ETA 0:07:24 (D:H:M)
16 |
17 | [2019-09-16 22:51:59,710-train_video_prediction_MNIST.py#175] [INFO] Epoch: [1][80/4500] step_time: 0.092299 (0.111957) train_loss: 0.001022 (0.001052) test_loss: 0.651355 (0.661283)
18 | [2019-09-16 22:51:59,720-log_helper.py# 77] [INFO] Progress: 80 / 225000 [0%], Speed: 0.112 s/iter, ETA 0:06:59 (D:H:M)
19 |
20 | [2019-09-16 22:52:01,975-train_video_prediction_MNIST.py#175] [INFO] Epoch: [1][100/4500] step_time: 0.091466 (0.107946) train_loss: 0.001237 (0.001037) test_loss: 0.650562 (0.663106)
21 | [2019-09-16 22:52:01,986-log_helper.py# 77] [INFO] Progress: 100 / 225000 [0%], Speed: 0.108 s/iter, ETA 0:06:44 (D:H:M)
22 |
23 | [2019-09-16 22:52:04,247-train_video_prediction_MNIST.py#175] [INFO] Epoch: [1][120/4500] step_time: 0.090912 (0.105267) train_loss: 0.000851 (0.001020) test_loss: 0.644908 (0.663379)
24 | [2019-09-16 22:52:04,247-log_helper.py# 77] [INFO] Progress: 120 / 225000 [0%], Speed: 0.105 s/iter, ETA 0:06:34 (D:H:M)
25 |
26 | [2019-09-16 22:52:06,537-train_video_prediction_MNIST.py#175] [INFO] Epoch: [1][140/4500] step_time: 0.093379 (0.103381) train_loss: 0.000965 (0.001004) test_loss: 0.654391 (0.661926)
27 | [2019-09-16 22:52:06,537-log_helper.py# 77] [INFO] Progress: 140 / 225000 [0%], Speed: 0.103 s/iter, ETA 0:06:27 (D:H:M)
28 |
29 | [2019-09-16 22:52:08,817-train_video_prediction_MNIST.py#175] [INFO] Epoch: [1][160/4500] step_time: 0.090414 (0.102013) train_loss: 0.000988 (0.000988) test_loss: 0.690861 (0.662392)
30 | [2019-09-16 22:52:08,817-log_helper.py# 77] [INFO] Progress: 160 / 225000 [0%], Speed: 0.102 s/iter, ETA 0:06:22 (D:H:M)
31 |
32 |
--------------------------------------------------------------------------------
/model/README.md:
--------------------------------------------------------------------------------
1 | # Model
2 | This folder is meant to store the layer for your models or some abstract classes for your models.
3 |
4 | You may include the layer or models you defined in this folder at your experiments, the default class convLSTM and convGRU are implemented in convRNN.py.
--------------------------------------------------------------------------------
/model/STconvLSTM.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | import torch
3 | import torch.nn.functional as F
4 |
5 | use_cuda = torch.cuda.is_available()
6 | device = torch.device("cuda" if use_cuda else "cpu")
7 |
8 | class STConvLSTMCell(nn.Module):
9 | def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias, forget_bias=1.0, layer_norm=True):
10 | """
11 | Initialize ConvLSTM cell.
12 |
13 | Parameters
14 | ----------
15 | input_size: (int, int)
16 | Height and width of input tensor as (height, width).
17 | input_dim: int
18 | Number of channels of input tensor.
19 | hidden_dim: int
20 | Number of channels of hidden state.
21 | m_dim: int
22 | Number of channels of M state.
23 | kernel_size: (int, int)
24 | Size of the convolutional kernel.
25 | bias: bool
26 | Whether or not to add the bias.
27 | """
28 |
29 | super(STConvLSTMCell, self).__init__()
30 |
31 | # init parameters
32 | self.height, self.width = input_size # 初始化高和宽
33 | self.input_dim = input_dim # 初始化输入的维度
34 | self.hidden_dim = hidden_dim # 初始化输出的维度
35 |
36 | self.kernel_size = kernel_size # 初始化核的大小
37 | self.padding = kernel_size[0] // 2, kernel_size[1] // 2 # 自动算padding的大小
38 | self.bias = bias
39 | self.forget_bias = forget_bias
40 | self.layer_norm = layer_norm
41 |
42 |
43 |
44 | # split the conv gate layers
45 | # for W * X_t
46 | self.conv_wx = nn.Conv2d(in_channels=self.input_dim,
47 | out_channels=7 * self.hidden_dim,
48 | kernel_size=self.kernel_size,
49 | padding=self.padding,
50 | bias=self.bias)
51 |
52 | # for W * H^t_1
53 | self.conv_wht_1 = nn.Conv2d(in_channels=self.hidden_dim,
54 | out_channels=4 * self.hidden_dim,
55 | kernel_size=self.kernel_size,
56 | padding=self.padding,
57 | bias=self.bias)
58 |
59 | # for W * M^l_1
60 | self.conv_wml_1 = nn.Conv2d(in_channels=self.hidden_dim,
61 | out_channels=3 * self.hidden_dim,
62 | kernel_size=self.kernel_size,
63 | padding=self.padding,
64 | bias=self.bias)
65 |
66 | # for W * M^l
67 | self.conv_wml = nn.Conv2d(in_channels=self.hidden_dim,
68 | out_channels= self.hidden_dim,
69 | kernel_size=self.kernel_size,
70 | padding=self.padding,
71 | bias=self.bias)
72 |
73 | # for W * C^l
74 | self.conv_wcl = nn.Conv2d(in_channels=self.hidden_dim,
75 | out_channels=self.hidden_dim,
76 | kernel_size=self.kernel_size,
77 | padding=self.padding,
78 | bias=self.bias)
79 |
80 |
81 | # for generate H^l
82 | self.conv_h = nn.Conv2d(in_channels=self.hidden_dim + self.hidden_dim,
83 | out_channels=self.hidden_dim,
84 | kernel_size=[1,1],
85 | padding=0,
86 | bias=self.bias)
87 |
88 | # init parameters
89 | nn.init.orthogonal(self.conv_wx.weight)
90 | nn.init.orthogonal(self.conv_wht_1.weight)
91 | nn.init.orthogonal(self.conv_wml_1.weight)
92 | nn.init.orthogonal(self.conv_wml.weight)
93 | nn.init.orthogonal(self.conv_wcl.weight)
94 | nn.init.orthogonal(self.conv_h.weight)
95 |
96 | # for layerNorm
97 | if self.layer_norm:
98 | self.conv_wx_norm = nn.BatchNorm2d(7 * self.hidden_dim)
99 | self.conv_wht_1_norm = nn.BatchNorm2d(4 * self.hidden_dim)
100 | self.conv_wml_1_norm = nn.BatchNorm2d(3 * self.hidden_dim)
101 | self.conv_wml_norm = nn.BatchNorm2d(self.hidden_dim)
102 | self.conv_wcl_norm = nn.BatchNorm2d(self.hidden_dim)
103 | self.conv_h_norm = nn.BatchNorm2d(self.hidden_dim)
104 |
105 |
106 | # for bias
107 | self.forget_bias_h = torch.nn.Parameter(torch.tensor(self.forget_bias))
108 | self.forget_bias_m = torch.nn.Parameter(torch.tensor(self.forget_bias))
109 |
110 | def forward(self, input_tensor, cur_state):
111 | """
112 | Forward of Conv LSTM Cell
113 | Inputs:
114 | ---------------------------------------
115 | input_tensor: (b, c, h, w)
116 | cur_state: [ H, C, M ]
117 | cur_state: [(b, c_hidden, h, w), (b, c_hidden, h, w), (b, c_hidden, h, w)]
118 | ---------------------------------------
119 | Returns:
120 | ---------------------------------------
121 | h_next, c_next, m_next : ((b, c_hidden, h, w), (b, c_hidden, h, w), (b, c_hidden, h, w))
122 | next hidden state
123 | """
124 | # state input
125 | h_cur, c_cur, m_cur = cur_state
126 | # conv gate result
127 | conved_wx = self.conv_wx(input_tensor)
128 | conved_wht_1 = self.conv_wht_1(h_cur)
129 | conved_wml_1 = self.conv_wml_1(m_cur)
130 | # for bn
131 | if self.layer_norm:
132 | conved_wx = self.conv_wx_norm(conved_wx)
133 | conved_wht_1 = self.conv_wht_1_norm(conved_wht_1)
134 | conved_wml_1 = self.conv_wml_1_norm(conved_wml_1)
135 | # split gate result
136 | wxg, wxi, wxf, wxg_, wxi_, wxf_, wxo = torch.split(conved_wx, self.hidden_dim, dim=1)
137 | whg, whi, whf, who = torch.split(conved_wht_1, self.hidden_dim, dim=1)
138 | wmg, wmi, wmf = torch.split(conved_wml_1, self.hidden_dim, dim=1)
139 | # for c_next
140 | g_t = torch.tanh(wxg + whg)
141 | i_t = torch.sigmoid(wxi + whi)
142 | f_t = torch.sigmoid(wxf + whf + self.forget_bias_h)
143 | c_next = f_t * c_cur + i_t * g_t
144 | # for m_next
145 | g_t_ = torch.tanh(wxg_ + wmg)
146 | i_t_ = torch.sigmoid(wxi_ + wmi)
147 | f_t_ = torch.sigmoid(wxf_ + wmf + self.forget_bias_m)
148 | m_next = f_t_ * m_cur + i_t_ * g_t_
149 | # for wco, wmo
150 | wco = self.conv_wcl(c_next)
151 | wmo = self.conv_wml(m_next)
152 | # for bn
153 | if self.layer_norm:
154 | wco = self.conv_wcl_norm(wco)
155 | wmo = self.conv_wml_norm(wmo)
156 | # for output gate
157 | o_t = torch.sigmoid(wxo + who + wco + wmo)
158 | # for h_next
159 | combined_cmn = torch.cat([c_next, m_next], dim=1)
160 | h_next = o_t * torch.tanh(self.conv_h(combined_cmn))
161 |
162 | return h_next, c_next, m_next
163 |
164 |
165 | def init_hidden(self, batch_size):
166 | return (torch.zeros(batch_size, self.hidden_dim, self.height, self.width).to(device),
167 | torch.zeros(batch_size, self.hidden_dim, self.height, self.width).to(device),
168 | torch.zeros(batch_size, self.hidden_dim, self.height, self.width).to(device))
169 |
170 |
171 |
172 | def test():
173 | pass
174 |
175 |
176 |
177 | if __name__ == "__main__":
178 | test()
179 |
180 |
--------------------------------------------------------------------------------
/model/convRNN.py:
--------------------------------------------------------------------------------
1 | import torch.nn as nn
2 | from torch.autograd import Variable
3 | import torch
4 | import torch.nn.functional as F
5 |
6 |
7 | use_cuda = torch.cuda.is_available()
8 | device = torch.device("cuda" if use_cuda else "cpu")
9 |
10 | class ConvLSTMCell(nn.Module):
11 |
12 | def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias):
13 | """
14 | Initialize ConvLSTM cell.
15 |
16 | Parameters
17 | ----------
18 | input_size: (int, int)
19 | Height and width of input tensor as (height, width).
20 | input_dim: int
21 | Number of channels of input tensor.
22 | hidden_dim: int
23 | Number of channels of hidden state.
24 | kernel_size: (int, int)
25 | Size of the convolutional kernel.
26 | bias: bool
27 | Whether or not to add the bias.
28 | """
29 |
30 | super(ConvLSTMCell, self).__init__()
31 |
32 | # init parameters
33 | self.height, self.width = input_size # init hight and width
34 | self.input_dim = input_dim # init input dim
35 | self.hidden_dim = hidden_dim # init output dim
36 |
37 | self.kernel_size = kernel_size # init kernel size
38 | self.padding = kernel_size[0] // 2, kernel_size[1] // 2 # caculate padding size automatically
39 | self.bias = bias # init bias
40 |
41 | # all gate conv operation can be include in a big conv layer
42 | self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
43 | out_channels=4 * self.hidden_dim,
44 | kernel_size=self.kernel_size,
45 | padding=self.padding,
46 | bias=self.bias)
47 |
48 | # init parameters
49 | nn.init.orthogonal(self.conv.weight)
50 | if self.bias is True:
51 | nn.init.ones_(self.conv.bias)
52 |
53 | def forward(self, input_tensor, cur_state):
54 | """
55 | Forward of Conv LSTM Cell
56 | Inputs:
57 | ---------------------------------------
58 | input_tensor: (b, c, h, w)
59 | cur_state: [(b, c_hidden, h, w), (b, c_hidden, h, w)]
60 | ---------------------------------------
61 | Returns:
62 | ---------------------------------------
63 | h_next, c_next : ((b, c_hidden, h, w), (b, c_hidden, h, w))
64 | next hidden state
65 | """
66 |
67 | h_cur, c_cur = cur_state
68 |
69 | combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
70 |
71 | combined_conv = self.conv(combined)
72 | cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
73 | i = torch.sigmoid(cc_i)
74 | f = torch.sigmoid(cc_f)
75 | o = torch.sigmoid(cc_o)
76 | g = F.relu(cc_g)
77 |
78 | c_next = f * c_cur + i * g
79 | h_next = o * torch.tanh(c_next)
80 |
81 | return h_next, c_next
82 |
83 | def init_hidden(self, batch_size):
84 | return (torch.zeros(batch_size, self.hidden_dim, self.height, self.width).to(device),
85 | torch.zeros(batch_size, self.hidden_dim, self.height, self.width).to(device))
86 |
87 |
88 | class ConvGRUCell(nn.Module):
89 | def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias):
90 | """
91 | Initialize the ConvLSTM cell
92 | :param input_size: (int, int)
93 | Height and width of input tensor as (height, width).
94 | :param input_dim: int
95 | Number of channels of input tensor.
96 | :param hidden_dim: int
97 | Number of channels of hidden state.
98 | :param kernel_size: (int, int)
99 | Size of the convolutional kernel.
100 | :param bias: bool
101 | Whether or not to add the bias.
102 | """
103 | super(ConvGRUCell, self).__init__()
104 |
105 | # init all parameter
106 | self.height, self.width = input_size # init hight and width
107 | self.input_dim = input_dim # init input dim
108 | self.hidden_dim = hidden_dim # init output dim
109 |
110 | self.kernel_size = kernel_size # init kernel size
111 | self.padding = kernel_size[0] // 2, kernel_size[1] // 2 # caculate padding size automatically
112 | self.bias = bias # init bias
113 |
114 | # all gate conv operation can be include in a big conv layer
115 | self.conv_gates = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
116 | out_channels=2*self.hidden_dim, # for update_gate,reset_gate respectively
117 | kernel_size=self.kernel_size,
118 | padding=self.padding,
119 | bias=self.bias)
120 |
121 | self.conv_can = nn.Conv2d(in_channels=self.input_dim+self.hidden_dim,
122 | out_channels=self.hidden_dim, # for candidate neural memory
123 | kernel_size=self.kernel_size,
124 | padding=self.padding,
125 | bias=self.bias)
126 |
127 | # init conv parameter and bias parameter
128 | nn.init.orthogonal(self.conv_can.weight)
129 | nn.init.orthogonal(self.conv_gates.weight)
130 | if self.bias is True:
131 | nn.init.ones_(self.conv_can.bias)
132 | nn.init.ones_(self.conv_gates.bias)
133 |
134 | def init_hidden(self, batch_size):
135 | return torch.zeros(batch_size, self.hidden_dim, self.height, self.width).to(device)
136 |
137 | def forward(self, input_tensor, cur_state):
138 | """
139 | Forward of Conv GRU Cell
140 | Inputs:
141 | ---------------------------------------
142 | input_tensor: (b, c, h, w)
143 | cur_state: (b, c_hidden, h, w)
144 | ---------------------------------------
145 | Returns:
146 | ---------------------------------------
147 | h_next : ((b, c_hidden, h, w))
148 | next hidden state
149 | """
150 | combined = torch.cat([input_tensor, cur_state], dim=1)
151 | combined_conv = self.conv_gates(combined)
152 |
153 | gamma, beta = torch.split(combined_conv, self.hidden_dim, dim=1)
154 | reset_gate = torch.sigmoid(gamma)
155 | update_gate = torch.sigmoid(beta)
156 |
157 | combined = torch.cat([input_tensor, reset_gate*cur_state], dim=1)
158 | cc_cnm = self.conv_can(combined)
159 | cnm = F.tanh(cc_cnm)
160 |
161 | h_next = (1 - update_gate) * cur_state + update_gate * cnm
162 | return h_next
163 |
164 |
165 | def test():
166 | pass
167 |
168 |
169 |
170 | if __name__ == "__main__":
171 | test()
172 |
173 |
--------------------------------------------------------------------------------
/model/loss/L1_L2_Loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | class L1_L2_Loss(torch.nn.Module):
5 | def __init__(self):
6 | super(L1_L2_Loss, self).__init__()
7 |
8 | def forward(self, target, pred):
9 | diff = target - pred
10 | loss_ = torch.pow(diff, 2) + torch.abs(diff) # L2 + L1
11 | return loss_.mean()
12 |
--------------------------------------------------------------------------------
/model/loss/SSIM_Loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn.functional as F
3 | from math import exp
4 | import numpy as np
5 |
6 |
7 | # 计算一维的高斯分布向量
8 | def gaussian(window_size, sigma):
9 | gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
10 | return gauss/gauss.sum()
11 |
12 | # 创建高斯核,通过两个一维高斯分布向量进行矩阵乘法得到
13 | # 可以设定channel参数拓展为3通道
14 | def create_window(window_size, channel):
15 | _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
16 | _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) # 矩阵乘法
17 | window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
18 | return window
19 |
20 | # 计算SSIM
21 | # 直接使用SSIM的公式,但是在计算均值时,不是直接求像素平均值,而是采用归一化的高斯核卷积来代替。
22 | # 在计算方差和协方差时用到了公式Var(X)=E[X^2]-E[X]^2, cov(X,Y)=E[XY]-E[X]E[Y].
23 | # 正如前面提到的,上面求期望的操作采用高斯核卷积代替。
24 | def _ssim(img1, img2, window, window_size, channel, size_average = True):
25 | # 用生成的窗函数对两个图片做卷积
26 | mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
27 | mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
28 |
29 | # 平方相乘
30 | mu1_sq = mu1.pow(2)
31 | mu2_sq = mu2.pow(2)
32 | mu1_mu2 = mu1*mu2
33 |
34 | # 用窗函数求sigma
35 | sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
36 | sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
37 | sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
38 |
39 | # 通过C1, C2 求ssim的图
40 | C1 = 0.01**2
41 | C2 = 0.03**2
42 |
43 | ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
44 |
45 | # 看是不是要size平均, 如果要的话就整体平均或者分channel平均
46 | if size_average:
47 | return ssim_map.mean()
48 | else:
49 | return ssim_map.mean(1).mean(1).mean(1)
50 |
51 | # loss SSIM
52 | class SSIM(torch.nn.Module):
53 | def __init__(self, window_size = 11, size_average = True):
54 | super(SSIM, self).__init__()
55 | self.window_size = window_size
56 | self.size_average = size_average
57 | # 一开始先做一个初始化
58 | self.channel = 1
59 | self.window = create_window(window_size, self.channel)
60 |
61 | def forward(self, img1, img2):
62 | if len(img1.size()) is 5:
63 | (batch, frames, channel, _, _) = img1.size()
64 | else:
65 | (_, channel, _, _) = img1.size()
66 |
67 | if channel == self.channel and self.window.data.type() == img1.data.type():
68 | window = self.window
69 | else:
70 | window = create_window(self.window_size, channel)
71 |
72 | if img1.is_cuda:
73 | window = window.cuda(img1.get_device())
74 | window = window.type_as(img1)
75 |
76 | self.window = window
77 | self.channel = channel
78 |
79 | if len(img1.size()) is 5:
80 | mean = []
81 | for i in range(frames):
82 | mean.append(_ssim(img1[:,i,:,:,:], img2[:,i,:,:,:], window, self.window_size, channel, self.size_average))
83 | mean = torch.stack(mean, dim=0)
84 | return mean.mean()
85 | else:
86 | return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
87 |
88 |
89 | # 一次性的测试函数
90 | def ssim(img1, img2, window_size = 11, size_average = True):
91 | if len(img1.size()) is 5:
92 | (batch, frames, channel, _, _) = img1.size()
93 | else:
94 | (_, channel, _, _) = img1.size()
95 |
96 | window = create_window(window_size, channel)
97 |
98 | if img1.is_cuda:
99 | window = window.cuda(img1.get_device())
100 | window = window.type_as(img1)
101 |
102 | if len(img1.size()) is 5:
103 | mean = []
104 | for i in range(frames):
105 | mean.append(_ssim(img1[:,i,:,:,:], img2[:,i,:,:,:], window, window_size, channel, size_average))
106 | mean = torch.stack(mean, dim=0)
107 | return mean.mean()
108 | else:
109 | return _ssim(img1, img2, window, window_size, channel, size_average)
110 |
111 |
112 |
113 | if __name__ == "__main__":
114 | # how to use
115 | img1 = torch.rand(10, 10, 3, 256, 256)
116 | img2 = torch.rand(10, 10, 3, 256, 256)
117 |
118 | if torch.cuda.is_available():
119 | img1 = img1.cuda()
120 | img2 = img2.cuda()
121 |
122 | print(ssim(img1, img2))
123 |
124 | ssim_loss = SSIM(window_size = 11)
125 |
126 | print(ssim_loss(img1, img2))
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | Cython==0.29.4
2 | colorama==0.3.9
3 | numpy==1.15.4
4 | requests==2.21.0
5 | fire==0.1.3
6 | matplotlib==2.2.3
7 | numba==0.39.0
8 | scipy==1.1.0
9 | h5py==2.8.0
10 | pandas==0.23.4
11 | tqdm==4.29.1
12 | tensorboardX==1.6
13 | opencv_python==3.4.3.18
14 | torch==1.1.0
15 | torchvision==0.2.1
16 | joblib==0.13.0
17 | moviepy==1.0.0
18 | nvidia-ml-py3==7.352.0
--------------------------------------------------------------------------------
/tools/README.md:
--------------------------------------------------------------------------------
1 | # Tools
2 | This folder include the training/testing/demo files, you may want to use these code with the config when training/testing your models.
3 |
4 | # Train
5 |
6 | The first thing you need to do is build the layers and abstract class in model folder, implement your model in experiments/'experiment_name'/Custom.py file, and the model class name should be the 'Custom'.
7 |
8 | After build your own Custom class, you can specify your experiment folder name ('expriment_name') in config file. Then run the train_video_prediction.py file.
9 |
10 | if you want to customize your class name, you can change the training file code to adjust your model arch.
11 |
12 | A config file template is also included in this folder.
13 |
--------------------------------------------------------------------------------
/tools/train_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "meta":{
3 | "experiment_path": "experiments",
4 | "arch": "Custom",
5 | "board_path": "board",
6 | "experiment_name":"video_prediction_demo"
7 | },
8 | "train":{
9 | "epoches": 50,
10 | "batch_size": 2,
11 | "lr": 1e-5,
12 | "print_freq": 20
13 | },
14 | "model":{
15 | "input_size":[64,64],
16 | "input_dim":1,
17 | "hidden_dim":64,
18 | "kernel_size": [3,3],
19 | "input_num": 10
20 | }
21 | }
--------------------------------------------------------------------------------
/tools/train_config_PredRNN.json:
--------------------------------------------------------------------------------
1 | {
2 | "meta":{
3 | "experiment_path": "experiments",
4 | "arch": "Custom",
5 | "board_path": "board",
6 | "experiment_name":"PredRNN"
7 | },
8 | "train":{
9 | "epoches": 50,
10 | "batch_size": 1,
11 | "lr": 1e-3,
12 | "print_freq": 20
13 | },
14 | "model":{
15 | "input_size":[64,64],
16 | "input_dim":1,
17 | "hidden_dim":64,
18 | "kernel_size": [3,3],
19 | "input_num": 10
20 | }
21 | }
--------------------------------------------------------------------------------
/tools/train_video_prediction_KTHDataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | parent_path = os.path.dirname(os.getcwd())
5 | if 'tools' in os.getcwd():
6 | os.chdir(parent_path)
7 | sys.path.insert(0, os.getcwd())
8 |
9 | import json
10 | import torch
11 | import torch.nn as nn
12 | from torchvision import transforms
13 | from dataloader.KTHDataset.KTHDataset import KTHDataset
14 |
15 | import argparse
16 |
17 | import logging
18 | from utils.log_helper import init_log, add_file_handler, print_speed
19 | from utils.config_helper import Configs
20 | from utils.average_meter_helper import AverageMeter
21 | from model.loss.SSIM_Loss import SSIM
22 | from model.loss.L1_L2_Loss import L1_L2_Loss
23 |
24 | import inspect
25 | from utils.memory.gpu_mem_track import MemTracker
26 | frame = inspect.currentframe()
27 | gpu_tracker = MemTracker(frame)
28 |
29 | # 生成命令行的参数
30 | parser = argparse.ArgumentParser(description='Train moving mnist video prediction algorithm')
31 | parser.add_argument('-c', '--cfg', default=os.path.join(os.getcwd(), "tools", "train_config_PredRNN.json"), type=str, required=False, help='training config file path')
32 |
33 | args = parser.parse_args()
34 |
35 | # 初始化一些变量
36 | cfg = Configs(args.cfg)
37 | # board的路径
38 | board_path = cfg.meta["board_path"]
39 | experiment_path = cfg.meta["experiment_path"]
40 | experiment_name = cfg.meta["experiment_name"]
41 | arch = cfg.meta["arch"]
42 | # 训练时候的一些参数
43 | batch_size = cfg.train['batch_size']
44 | epoches = cfg.train['epoches']
45 | lr = cfg.train['lr']
46 | # 初始化未来帧的数量
47 | num_frame = cfg.model['input_num']
48 | # print freq
49 | print_freq = cfg.train['print_freq']
50 |
51 | # 初始化logger
52 | global_logger = init_log('global', level=logging.INFO)
53 | add_file_handler("global", os.path.join(os.getcwd(), 'logs', '{}.log'.format(experiment_name)), level=logging.DEBUG)
54 |
55 | # 打印cfg信息
56 | cfg.log_dict()
57 |
58 | # 初始化avrager
59 | avg = AverageMeter()
60 |
61 | # cuda
62 | use_cuda = torch.cuda.is_available()
63 | device = torch.device("cuda" if use_cuda else "cpu")
64 | torch.backends.cudnn.benchmark = True
65 |
66 | # 准备数据集
67 | train_set = KTHDataset(root='./data/KTHDataset', train=True, download=True,
68 | transform=transforms.Compose([transforms.Resize(cfg.model["input_size"]), transforms.ToTensor(),]),
69 | target_transform=transforms.Compose([transforms.Resize(cfg.model["input_size"]), transforms.ToTensor(),]))
70 | test_set = KTHDataset(root='./data/KTHDataset', train=False, download=True,
71 | transform=transforms.Compose([transforms.Resize(cfg.model["input_size"]), transforms.ToTensor(),]),
72 | target_transform=transforms.Compose([transforms.Resize(cfg.model["input_size"]), transforms.ToTensor(),]))
73 |
74 | # 建立dataloader
75 | train_loader = torch.utils.data.DataLoader(
76 | dataset=train_set,
77 | batch_size=batch_size,
78 | num_workers=16,
79 | shuffle=True)
80 | test_loader = torch.utils.data.DataLoader(
81 | dataset=test_set,
82 | batch_size=batch_size,
83 | num_workers=16,
84 | shuffle=True)
85 |
86 | # 建立test的iter
87 | test_iter = iter(test_loader)
88 |
89 | # 确定数据集长度
90 | train_lenth = len(train_loader)
91 | test_lenth = len(test_loader)
92 |
93 | global_logger.debug('==>>> total trainning batch number: {}'.format(train_lenth))
94 | global_logger.debug('==>>> total testing batch number: {}'.format(test_lenth))
95 |
96 | # 加载模型
97 | sys.path.append(os.path.join(".", experiment_path, experiment_name))
98 | if arch == "Custom":
99 | from custom import Custom
100 | model = Custom(cfg=cfg.model)
101 | model = model.to(device)
102 | model = torch.nn.DataParallel(model, list(range(torch.cuda.device_count()))).to(device)
103 | else:
104 | raise NotImplementedError
105 |
106 | # 建立tensorboard的实例
107 | from tensorboardX import SummaryWriter
108 | writer = SummaryWriter(os.path.join(".", board_path, experiment_name))
109 |
110 | # 建立优化器
111 | optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9,0.999))
112 | scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[10,20,30,40], gamma=0.5)
113 |
114 | # 建立loss
115 | loss_L1 = nn.MSELoss().to(device)
116 | loss_L2 = nn.L1Loss().to(device)
117 | # crossEntropy
118 | loss_BCE = nn.BCELoss().to(device)
119 | # SSIM
120 | loss_SSIM = SSIM(window_size=11, size_average=True)
121 |
122 | loss_L1_L2 = L1_L2_Loss().to(device)
123 |
124 | # 训练的部分
125 | for epoch in range(epoches):
126 | for step, [seq, seq_target] in enumerate(train_loader):
127 | step_time = time.time()
128 | # 打印测试信息
129 | if epoch is 0 and step is 0:
130 | global_logger.debug('Input: {}'.format(seq.shape))
131 | global_logger.debug('--- Sample')
132 | global_logger.debug('Target: {}'.format(seq_target.shape))
133 |
134 | # 放到cuda中
135 | seq, seq_target = seq.to(device), seq_target.to(device)
136 |
137 | # 优化器归零
138 | optimizer.zero_grad()
139 |
140 | # 送入模型进行推断
141 | layer_output = model(seq, future=num_frame)
142 |
143 | # loss计算
144 | train_loss = loss_BCE(layer_output[:, -num_frame:, :, :, :], seq_target[:, -num_frame:, :, :, :])
145 | with torch.no_grad():
146 | train_metric = loss_SSIM(layer_output[:, -num_frame:, :, :, :], seq_target[:, -num_frame:, :, :, :])
147 | train_loss.backward()
148 |
149 | # 优化器更新
150 | optimizer.step()
151 |
152 | # validate
153 |
154 | with torch.no_grad():
155 | # load random test set
156 | try:
157 | seq_test, gt_seq_test = next(test_iter)
158 | except StopIteration:
159 | test_iter = iter(test_loader)
160 | seq_test, gt_seq_test = next(test_iter)
161 |
162 | seq_test, gt_seq_test = seq_test.to(device), gt_seq_test.to(device)
163 |
164 | # 送入模型进行推断
165 | test_output = model(seq_test, future=num_frame)
166 |
167 | # loss计算
168 | test_loss = loss_BCE(test_output[:, -num_frame:, :, :, :], gt_seq_test[:, -num_frame:, :, :, :])
169 | test_metric = loss_SSIM(test_output[:, -num_frame:, :, :, :], gt_seq_test[:, -num_frame:, :, :, :])
170 |
171 | step_time = time.time() - step_time
172 |
173 | # 将有用的信息存进tensorboard中
174 | if (step+1) % 200 == 0:
175 | writer.add_video('train_seq/feed_seq', seq, epoch*train_lenth + step + 1)
176 | writer.add_video('train_seq/gt_seq', seq_target, epoch*train_lenth + step + 1)
177 | writer.add_video('train_seq/pred_seq', layer_output, epoch*train_lenth + step + 1)
178 | writer.add_video('test_seq/feed_seq', seq_test, epoch*train_lenth + step + 1)
179 | writer.add_video('test_seq/gt_seq', gt_seq_test, epoch*train_lenth + step + 1)
180 | writer.add_video('test_seq/pred_seq', test_output, epoch*train_lenth + step + 1)
181 | writer.add_scalars('loss/merge', {"train_loss": train_loss,"test_loss":test_loss, "train_metric":train_metric, "test_metric":test_metric}, epoch*train_lenth + step + 1)
182 |
183 | # 更新avrager
184 | avg.update(step_time=step_time, train_loss=train_loss, test_loss=test_loss, train_metric=train_metric) # 算平均值
185 |
186 | # 打印结果
187 | if (step+1) % print_freq == 0:
188 | global_logger.info('Epoch: [{0}][{1}/{2}] {step_time:s}\t{train_loss:s}\t{test_loss:s}\t{train_metric:s}'.format(
189 | epoch+1, (step + 1) % train_lenth, train_lenth, step_time=avg.step_time, train_loss=avg.train_loss, test_loss=avg.test_loss, train_metric=avg.train_metric))
190 | print_speed(epoch*train_lenth + step + 1, avg.step_time.avg, epoches * train_lenth)
191 |
192 | # scheduler更新
193 | scheduler.step()
194 |
195 |
196 |
197 |
--------------------------------------------------------------------------------
/tools/train_video_prediction_MNIST.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | parent_path = os.path.dirname(os.getcwd())
5 | print(os.getcwd())
6 | if 'tools' in os.getcwd():
7 | os.chdir(parent_path)
8 | sys.path.insert(0, os.getcwd())
9 |
10 | import json
11 | import torch
12 | import torch.nn as nn
13 | from torchvision import transforms
14 | from dataloader.MovingMNIST.MovingMNIST import MovingMNIST
15 |
16 | import argparse
17 |
18 | import logging
19 | from utils.log_helper import init_log, add_file_handler, print_speed
20 | from utils.config_helper import Configs
21 | from utils.average_meter_helper import AverageMeter
22 | from model.loss.SSIM_Loss import SSIM
23 | from model.loss.L1_L2_Loss import L1_L2_Loss
24 |
25 | import inspect
26 | from utils.memory.gpu_mem_track import MemTracker
27 | frame = inspect.currentframe()
28 | gpu_tracker = MemTracker(frame)
29 |
30 | # 生成命令行的参数
31 | parser = argparse.ArgumentParser(description='Train moving mnist video prediction algorithm')
32 | parser.add_argument('-c', '--cfg', default=os.path.join(os.getcwd(), "tools", "train_config_PredRNN.json"), type=str, required=False, help='training config file path')
33 |
34 | args = parser.parse_args()
35 |
36 | # 初始化一些变量
37 | cfg = Configs(args.cfg)
38 | # board的路径
39 | board_path = cfg.meta["board_path"]
40 | experiment_path = cfg.meta["experiment_path"]
41 | experiment_name = cfg.meta["experiment_name"]
42 | arch = cfg.meta["arch"]
43 | # 训练时候的一些参数
44 | batch_size = cfg.train['batch_size']
45 | epoches = cfg.train['epoches']
46 | lr = cfg.train['lr']
47 | # 初始化未来帧的数量
48 | num_frame = cfg.model['input_num']
49 | # print freq
50 | print_freq = cfg.train['print_freq']
51 |
52 | # 初始化logger
53 | global_logger = init_log('global', level=logging.INFO)
54 | add_file_handler("global", os.path.join(os.getcwd(), 'logs', '{}.log'.format(experiment_name)), level=logging.DEBUG)
55 |
56 | # 打印cfg信息
57 | cfg.log_dict()
58 |
59 | # 初始化avrager
60 | avg = AverageMeter()
61 |
62 | # cuda
63 | use_cuda = torch.cuda.is_available()
64 | device = torch.device("cuda" if use_cuda else "cpu")
65 | torch.backends.cudnn.benchmark = True
66 |
67 | # 准备数据集
68 | train_set = MovingMNIST(root='./data/mnist', train=True, download=True,
69 | transform=transforms.Compose([transforms.ToTensor(),]),
70 | target_transform=transforms.Compose([transforms.ToTensor(),]))
71 | test_set = MovingMNIST(root='./data/mnist', train=False, download=True,
72 | transform=transforms.Compose([transforms.ToTensor(),]),
73 | target_transform=transforms.Compose([transforms.ToTensor(),]))
74 |
75 | # 建立dataloader
76 | train_loader = torch.utils.data.DataLoader(
77 | dataset=train_set,
78 | batch_size=batch_size,
79 | num_workers=16,
80 | shuffle=True)
81 | test_loader = torch.utils.data.DataLoader(
82 | dataset=test_set,
83 | batch_size=batch_size,
84 | num_workers=16,
85 | shuffle=False)
86 |
87 | # 建立test的iter
88 | test_iter = iter(test_loader)
89 |
90 | # 确定数据集长度
91 | train_lenth = len(train_loader)
92 | test_lenth = len(test_loader)
93 |
94 | global_logger.debug('==>>> total trainning batch number: {}'.format(train_lenth))
95 | global_logger.debug('==>>> total testing batch number: {}'.format(test_lenth))
96 |
97 | # 加载模型
98 | sys.path.append(os.path.join(".", experiment_path, experiment_name))
99 | if arch == "Custom":
100 | from custom import Custom
101 | model = Custom(cfg=cfg.model)
102 | model = torch.nn.DataParallel(model, list(range(torch.cuda.device_count()))).to(device)
103 | else:
104 | raise NotImplementedError
105 |
106 | # 建立tensorboard的实例
107 | from tensorboardX import SummaryWriter
108 | writer = SummaryWriter(os.path.join(".", board_path, experiment_name))
109 |
110 | # 建立优化器
111 | optimizer = torch.optim.Adam(model.parameters(), lr=lr)
112 | scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[10,20,30,40], gamma=0.5)
113 |
114 | # 建立loss
115 | loss_L1 = nn.MSELoss().to(device)
116 | loss_L2 = nn.L1Loss().to(device)
117 | # crossEntropy
118 | loss_BCE = nn.BCELoss().to(device)
119 | # SSIM
120 | loss_SSIM = SSIM(window_size=11, size_average=True)
121 | # L1 + L2 mean
122 | loss_L1_L2 = L1_L2_Loss().to(device)
123 |
124 | # 训练的部分
125 | for epoch in range(epoches):
126 | for step, [seq, seq_target] in enumerate(train_loader):
127 | step_time = time.time()
128 | # 打印测试信息
129 | if epoch is 0 and step is 0:
130 | global_logger.debug('Input: {}'.format(seq.shape))
131 | global_logger.debug('--- Sample')
132 | global_logger.debug('Target: {}'.format(seq_target.shape))
133 |
134 | # 放到cuda中
135 | seq, seq_target = seq.to(device), seq_target.to(device)
136 |
137 | # 优化器归零
138 | optimizer.zero_grad()
139 |
140 | # 送入模型进行推断
141 | layer_output = model(seq, future=num_frame)
142 |
143 | # loss计算
144 | train_loss = loss_L1_L2(layer_output[:, -num_frame:, :, :, :], seq_target[:, -num_frame:, :, :, :])
145 | with torch.no_grad():
146 | train_metric = loss_SSIM(layer_output[:, -num_frame:, :, :, :], seq_target[:, -num_frame:, :, :, :])
147 | train_loss.backward()
148 |
149 | # 优化器更新
150 | optimizer.step()
151 |
152 | # validate
153 |
154 | with torch.no_grad():
155 | # load random test set
156 | try:
157 | seq_test, gt_seq_test = next(test_iter)
158 | except StopIteration:
159 | test_iter = iter(test_loader)
160 | seq_test, gt_seq_test = next(test_iter)
161 |
162 | seq_test, gt_seq_test = seq_test.to(device), gt_seq_test.to(device)
163 |
164 | # 送入模型进行推断
165 | test_output = model(seq_test, future=num_frame)
166 |
167 | # loss计算
168 | test_loss = loss_L1_L2(test_output[:, -num_frame:, :, :, :], gt_seq_test[:, -num_frame:, :, :, :])
169 | test_metric = loss_SSIM(test_output[:, -num_frame:, :, :, :], gt_seq_test[:, -num_frame:, :, :, :])
170 |
171 | step_time = time.time() - step_time
172 |
173 | # 将有用的信息存进tensorboard中
174 | if (step+1) % print_freq == 0:
175 | writer.add_video('train_seq/feed_seq', seq, epoch*train_lenth + step + 1)
176 | writer.add_video('train_seq/gt_seq', seq_target, epoch*train_lenth + step + 1)
177 | writer.add_video('train_seq/pred_seq', layer_output, epoch*train_lenth + step + 1)
178 | writer.add_video('test_seq/feed_seq', seq_test, epoch*train_lenth + step + 1)
179 | writer.add_video('test_seq/gt_seq', gt_seq_test, epoch*train_lenth + step + 1)
180 | writer.add_video('test_seq/pred_seq', test_output, epoch*train_lenth + step + 1)
181 | writer.add_scalars('loss/merge', {"train_loss": train_loss,"test_loss":test_loss, "train_metric":train_metric, "test_metric":test_metric}, epoch*train_lenth + step + 1)
182 |
183 | # 更新avrager
184 | avg.update(step_time=step_time, train_loss=train_loss, test_loss=test_loss, train_metric=train_metric) # 算平均值
185 |
186 | # 打印结果
187 | if (step+1) % print_freq == 0:
188 | global_logger.info('Epoch: [{0}][{1}/{2}] {step_time:s}\t{train_loss:s}\t{test_loss:s}\t{train_metric:s}'.format(
189 | epoch+1, (step + 1) % train_lenth, train_lenth, step_time=avg.step_time, train_loss=avg.train_loss, test_loss=avg.test_loss, train_metric=avg.train_metric))
190 | print_speed(epoch*train_lenth + step + 1, avg.step_time.avg, epoches * train_lenth)
191 |
192 | # scheduler更新
193 | scheduler.step()
194 |
195 |
196 |
197 |
--------------------------------------------------------------------------------
/utils/README.md:
--------------------------------------------------------------------------------
1 | # Utils
2 | This folder contains the utilities for train/test your model, the usage is self-concluded in the file.
3 |
4 | # average_meter_helper
5 | Contains a class to calculate dynamic average value.
6 |
7 | # config_helper
8 | To load configs
9 |
10 | # log_helper
11 | To generate the logger handel, which can give multiple stream log message (now contains file and stream handler).
12 |
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/JaMesLiMers/Frame_Video_Prediction_Pytorch/fec5870a2d2ce8f91085c38c46f86e3b58ee0385/utils/__init__.py
--------------------------------------------------------------------------------
/utils/average_meter_helper.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # SiamMask
3 | # Licensed under The MIT License
4 | # Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
5 | # --------------------------------------------------------
6 | import numpy as np
7 |
8 |
9 | class Meter(object): # 度量
10 | def __init__(self, name, val, avg):
11 | self.name = name
12 | self.val = val
13 | self.avg = avg
14 |
15 | def __repr__(self):
16 | return "{name}: {val:.6f} ({avg:.6f})".format(
17 | name=self.name, val=self.val, avg=self.avg
18 | )
19 |
20 | def __format__(self, *tuples, **kwargs):
21 | return self.__repr__()
22 |
23 |
24 | class AverageMeter(object): # 计算和保存平均值
25 | """Computes and stores the average and current value"""
26 | def __init__(self):
27 | self.reset() # 初始化的时候reset
28 |
29 | def reset(self): # 重新设置
30 | self.val = {}
31 | self.sum = {} # sum用来存总的数据
32 | self.count = {}
33 |
34 | def update(self, batch=1, **kwargs): # update数据 update(key = 1)
35 | val = {} # 新的字典
36 | for k in kwargs: # 对于传入的参数
37 | val[k] = kwargs[k] / float(batch) # val中的数据先进行batch级别的平均 # val['key'] = 1/1 = 1
38 | self.val.update(val) # 把val添加到self.val里面
39 | for k in kwargs:
40 | if k not in self.sum: #如果是新的建
41 | self.sum[k] = 0 # 初始化到sum里
42 | self.count[k] = 0
43 | self.sum[k] += kwargs[k] # 再加
44 | self.count[k] += batch
45 |
46 | def __repr__(self): # 给程序员的显示接口, 可以直接打AverageMeter的变量来显示
47 | s = ''
48 | for k in self.sum:
49 | s += self.format_str(k)
50 | return s
51 |
52 | def format_str(self, attr):
53 | return "{name}: {val:.6f} ({avg:.6f}) ".format(
54 | name=attr,
55 | val=float(self.val[attr]),
56 | avg=float(self.sum[attr]) / self.count[attr])
57 |
58 | def __getattr__(self, attr): # 如果访问的属性不再范围内,则调用这个方法
59 | if attr in self.__dict__: # 如果这个attr在dict中的话
60 | return super(AverageMeter, self).__getattr__(attr) # 用他父类的getattr方法
61 | if attr not in self.sum: # 如果不在sum里面的话
62 | # logger.warn("invalid key '{}'".format(attr))
63 | # print("invalid key '{}'".format(attr)) # 报错(提示)
64 | return Meter(attr, 0, 0) # 返回 0 0
65 | return Meter(attr, self.val[attr], self.avg(attr)) # 返回正确的meter
66 |
67 | def avg(self, attr): # 计算avg
68 | return float(self.sum[attr]) / self.count[attr]
69 |
70 |
71 | if __name__ == '__main__':
72 | # 用法
73 | avg = AverageMeter() # 初始化
74 | avg.update(time=1.1, accuracy=.99) # 传入需要avg的参数
75 | avg.update(time=1.0, accuracy=.90) # 多次传入来求平均
76 |
77 | print(avg) # 将所有的平均值进行打印
78 |
79 | print(avg.time) # 打印特定的值(str)
80 | print(avg.time.avg) # 打印特定的平均(float)
81 | print(avg.time.val) # 打印特定的值(float)
82 | print(avg.SS) # 如果出现了没有的默认为0
83 |
84 |
85 |
86 |
87 |
--------------------------------------------------------------------------------
/utils/config_helper.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | from os.path import exists
4 |
5 |
6 | class Configs(object):
7 | def __init__(self, file_path_or_dict, logger_name="global"):
8 | super(Configs, self).__init__()
9 | # 加载logger
10 | self.logger = logging.getLogger(logger_name)
11 | # 加载cfg文件, 处理基础的参数
12 | self.check_meta(self.load_config(file_path_or_dict))
13 |
14 | def load_config(self, file_path_or_dict):
15 | if type(file_path_or_dict) is str:
16 | assert exists(file_path_or_dict), '"{}" not exists'.format(file_path_or_dict) # 确认有文件
17 | config = dict(json.load(open(file_path_or_dict))) # 打开加载json文件
18 | elif type(file_path_or_dict) is dict:
19 | config = file_path_or_dict
20 | else:
21 | raise Exception("The input must be a string path or a dict")
22 | return config
23 |
24 | def check_meta(self, cfg_init):
25 | """
26 | Check 是否将基础的需求写入了config文件中, 没有的话就使用默认的.
27 | 具体check的有:
28 | 是否包含meta标签, 以及重要的experiment_path和arch.
29 | 如果没问题的话就赋值给Config类.
30 | """
31 | self.cfg_init = cfg_init
32 |
33 | # check meta部分
34 | if 'meta' not in self.cfg_init:
35 | self.logger.warning("The cfg not include meta tag, will generate default")
36 | self.logger.warning("Used the default meta configs.")
37 | self.cfg_init['meta'] = {"experiment_path": "experiments",
38 | "arch": "Custom",
39 | "board_path": "board",
40 | "experiment_name":"video_prediction_demo"},
41 | else:
42 | cfg_meta = self.cfg_init['meta']
43 | if 'experiment_path' not in cfg_meta:
44 | self.logger.warning("Not specified experiment_path, used default. ('experiments')")
45 | self.cfg_init['meta']['experiment_path'] = "experiments"
46 | if 'arch' not in cfg_meta:
47 | self.logger.warning("Not specified arch, used default. (Custom)")
48 | self.cfg_init['meta']['arch'] = "Custom"
49 | if 'board_path' not in cfg_meta:
50 | self.logger.warning("Not specified board_path, used default. (board)")
51 | self.cfg_init['meta']['board_path'] = "board"
52 | if 'experiment_name' not in cfg_meta:
53 | self.logger.warning("Not specified experiment_name, used default. (video_prediction_demo)")
54 | self.cfg_init['meta']['experiment_name'] = "video_prediction_demo"
55 |
56 | # 赋值部分
57 | self.__dict__.update(self.cfg_init)
58 |
59 | def log_dict(self, logger_name="global"):
60 | # 打印Config的文件到log
61 | self.logger.debug("Used config: \n {}".format(self.cfg_init))
62 |
63 |
64 |
65 | if __name__ == "__main__":
66 | """
67 | cfg 文件的加载类
68 | cfg文件的格式为json文件或者dict()
69 |
70 | meta部分标明实验的元信息(必须, 会验证),
71 | train部分标明训练时候的参数,
72 | model部分标明模型的参数
73 |
74 | 下面是例子和使用方法:
75 | """
76 |
77 | test_cfg = {
78 | "meta":{
79 | "experiment_path": "./experiments/video_prediction/",
80 | "arch": "Custom"
81 | },
82 | "train":{
83 | "epoches": 50,
84 | "batch_size": 16,
85 | "lr": 1e-3
86 | },
87 | "model":{
88 | "input_size":[64,64],
89 | "input_dim":1,
90 | "hidden_dim":64,
91 | "kernel_size":[3,3],
92 | "num_layers":3,
93 | "bias":True,
94 | "return_all_layers": False,
95 | "predict_num": 10
96 | }
97 | }
98 | test_Configs = Configs(test_cfg)
99 |
100 | # 直接访问名字即可, 返回的是一个dict
101 | print(test_Configs.meta)
102 | print(type(test_Configs.meta))
103 |
104 | print(test_Configs.train)
105 | print(type(test_Configs.train))
106 |
107 | print(test_Configs.model)
108 | print(type(test_Configs.model))
--------------------------------------------------------------------------------
/utils/log_helper.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # SiamMask
3 | # Licensed under The MIT License
4 | # Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
5 | # --------------------------------------------------------
6 | from __future__ import division
7 |
8 | import os
9 | import logging
10 | import sys
11 | import math
12 |
13 | # 初始化用来保存 log 类的set
14 | logs = set()
15 |
16 | def get_format():
17 | """
18 | 如果不是在slurm集群上面运行的话, 就设定level为0
19 | 总之就是生成一个formatter类, 用来构建输出的信息格式
20 | """
21 | format_str = '[%(asctime)s-%(filename)s#%(lineno)3d] [%(levelname)s] %(message)s'
22 | formatter = logging.Formatter(format_str)
23 | return formatter
24 |
25 |
26 | def get_format_custom():
27 | format_str = '[%(asctime)s-%(message)s'
28 | formatter = logging.Formatter(format_str)
29 | return formatter
30 |
31 |
32 | def init_log(name, level = logging.INFO, format_func=get_format):
33 | """
34 | 初始化log类
35 | 返回一个logger的类
36 | 每次使用的时候, 都给予log元祖里面加一个(name, level)的子元祖, 用来防止重复初始化
37 |
38 | """
39 | # 防止重复初始化
40 | if (name, level) in logs:
41 | return logging.getLogger(name)
42 | # 如果没有就将name和level放进去
43 | logs.add((name, level))
44 | logger = logging.getLogger(name) # 可以直接调用, 如果没有的话就自动创建一个.
45 | logger.setLevel(logging.DEBUG) # 设定这个logger 的等级(在这个等级之上的才会被处理)
46 | ch = logging.StreamHandler() # 初始化一个打印在命令行的handler
47 | ch.setLevel(level) # 设定命令行的handler的级别
48 | formatter = format_func() # 构建format
49 | ch.setFormatter(formatter) # 设定format
50 | logger.addHandler(ch) # 为创建的logger增加handler
51 | return logger
52 |
53 |
54 | def add_file_handler(name, log_file, level=logging.DEBUG):
55 | """
56 | 为传入的类增加文件的handler
57 | """
58 | logger = logging.getLogger(name) # 将目标的logger初始化一下, 如果没有就创建
59 | fh = logging.FileHandler(log_file, 'w+') # 文件的名字, 初始化filehandler
60 | fh.setFormatter(get_format()) # setformat
61 | fh.setLevel(level)
62 | logger.addHandler(fh) # 添加handler
63 |
64 |
65 | def print_speed(i, i_time, n, logger_name='global'):
66 | """
67 | 用来生成目标的进度
68 | 传入的时间按照秒来算
69 | print_speed(index, index_time, total_iteration, logger_name)
70 | """
71 | logger = logging.getLogger(logger_name)
72 | average_time = i_time
73 | remaining_time = (n - i) * average_time
74 | remaining_day = math.floor(remaining_time / 86400)
75 | remaining_hour = math.floor(remaining_time / 3600 - remaining_day * 24)
76 | remaining_min = math.floor(remaining_time / 60 - remaining_day * 1440 - remaining_hour * 60)
77 | logger.info('Progress: %d / %d [%d%%], Speed: %.3f s/iter, ETA %d:%02d:%02d (D:H:M)\n' % (i, n, i/n*100, average_time, remaining_day, remaining_hour, remaining_min))
78 |
79 |
80 | # 在一开始先初始化global的logger
81 | # init_log('global')
82 |
83 |
84 | if __name__ == "__main__":
85 | """
86 | Usage
87 | """
88 | # 生成logger, 一开始自动初始化了一个global logger, 如果需要的话可以重新创建一个不重名的logger
89 | # 如果已经存在就直接返回创建好的logger
90 | logger_test= init_log('global', level=logging.INFO)
91 | # 将logger添加一个handler到文件
92 | add_file_handler("global", os.path.join('.', 'test.log'), level=logging.INFO)
93 |
94 | # log的方法
95 | logger_test.debug('this is a debug log')
96 | logger_test.info('hello info')
97 | logger_test.warning('this is a warning log')
98 | logger_test.error('this is a error message')
99 | logger_test.critical('this is critical')
100 |
101 | # 新增的方法 (默认用global来print)
102 | print_speed(1, 1, 10)
--------------------------------------------------------------------------------
/utils/memory/README.md:
--------------------------------------------------------------------------------
1 | # Pytorch-Memory-Utils
2 |
3 | These codes can help you to detect your GPU memory during training with Pytorch.
4 |
5 | A blog about this tool and explain the details : https://oldpan.me/archives/pytorch-gpu-memory-usage-track
6 |
7 | # Requirement:
8 |
9 | ```
10 | pynvml(pip install nvidia-ml-py3)
11 | ```
12 |
13 | ## The following is the print content.
14 |
15 | - Calculate the memory usage of a single model
16 | ```
17 | Model Sequential : params: 0.450304M
18 | Model Sequential : intermedite variables: 336.089600 M (without backward)
19 | Model Sequential : intermedite variables: 672.179200 M (with backward)
20 | ```
21 | - Track the amount of GPU memory usage
22 | ```markdown
23 | # 12-Sep-18-21:48:45-gpu_mem_track.txt
24 |
25 | GPU Memory Track | 12-Sep-18-21:48:45 | Total Used Memory:696.5 Mb
26 |
27 | At __main__ : line 13 Total Used Memory:696.5 Mb
28 |
29 | + | 7 * Size:(512, 512, 3, 3) | Memory: 66.060 M |
30 | + | 1 * Size:(512, 256, 3, 3) | Memory: 4.7185 M |
31 | + | 1 * Size:(64, 64, 3, 3) | Memory: 0.1474 M |
32 | + | 1 * Size:(128, 64, 3, 3) | Memory: 0.2949 M |
33 | + | 1 * Size:(128, 128, 3, 3) | Memory: 0.5898 M |
34 | + | 8 * Size:(512,) | Memory: 0.0163 M |
35 | + | 3 * Size:(256, 256, 3, 3) | Memory: 7.0778 M |
36 | + | 1 * Size:(256, 128, 3, 3) | Memory: 1.1796 M |
37 | + | 2 * Size:(64,) | Memory: 0.0005 M |
38 | + | 4 * Size:(256,) | Memory: 0.0040 M |
39 | + | 2 * Size:(128,) | Memory: 0.0010 M |
40 | + | 1 * Size:(64, 3, 3, 3) | Memory: 0.0069 M |
41 |
42 | At __main__ : line 15 Total Used Memory:1142.0 Mb
43 |
44 | + | 1 * Size:(60, 3, 512, 512) | Memory: 188.74 M |
45 | + | 1 * Size:(30, 3, 512, 512) | Memory: 94.371 M |
46 | + | 1 * Size:(40, 3, 512, 512) | Memory: 125.82 M |
47 |
48 | At __main__ : line 21 Total Used Memory:1550.9 Mb
49 |
50 | + | 1 * Size:(120, 3, 512, 512) | Memory: 377.48 M |
51 | + | 1 * Size:(80, 3, 512, 512) | Memory: 251.65 M |
52 |
53 | At __main__ : line 26 Total Used Memory:2180.1 Mb
54 |
55 | - | 1 * Size:(120, 3, 512, 512) | Memory: 377.48 M |
56 | - | 1 * Size:(40, 3, 512, 512) | Memory: 125.82 M |
57 |
58 | At __main__ : line 32 Total Used Memory:1676.8 Mb
59 | ```
60 |
61 | ## How to use
62 |
63 | ### Track the amount of GPU memory usage
64 | simple example:
65 |
66 | ```python
67 | import torch
68 | import inspect
69 |
70 | from torchvision import models
71 | from gpu_mem_track import MemTracker
72 |
73 | device = torch.device('cuda:0')
74 |
75 | frame = inspect.currentframe() # define a frame to track
76 | gpu_tracker = MemTracker(frame) # define a GPU tracker
77 |
78 | gpu_tracker.track() # run function between the code line where uses GPU
79 | cnn = models.vgg19(pretrained=True).features.to(device).eval()
80 | gpu_tracker.track() # run function between the code line where uses GPU
81 |
82 | dummy_tensor_1 = torch.randn(30, 3, 512, 512).float().to(device) # 30*3*512*512*4/1000/1000 = 94.37M
83 | dummy_tensor_2 = torch.randn(40, 3, 512, 512).float().to(device) # 40*3*512*512*4/1000/1000 = 125.82M
84 | dummy_tensor_3 = torch.randn(60, 3, 512, 512).float().to(device) # 60*3*512*512*4/1000/1000 = 188.74M
85 |
86 | gpu_tracker.track()
87 |
88 | dummy_tensor_4 = torch.randn(120, 3, 512, 512).float().to(device) # 120*3*512*512*4/1000/1000 = 377.48M
89 | dummy_tensor_5 = torch.randn(80, 3, 512, 512).float().to(device) # 80*3*512*512*4/1000/1000 = 251.64M
90 |
91 | gpu_tracker.track()
92 |
93 | dummy_tensor_4 = dummy_tensor_4.cpu()
94 | dummy_tensor_2 = dummy_tensor_2.cpu()
95 | torch.cuda.empty_cache()
96 |
97 | gpu_tracker.track()
98 | ```
99 | This will output a .txt to current dir and the content of output is above(print content).
100 |
101 | # REFERENCE
102 | Part of the code is referenced from:
103 |
104 | http://jacobkimmel.github.io/pytorch_estimating_model_size/
105 | https://gist.github.com/MInner/8968b3b120c95d3f50b8a22a74bf66bc
106 |
107 |
--------------------------------------------------------------------------------
/utils/memory/gpu_mem_track.py:
--------------------------------------------------------------------------------
1 | import gc
2 | import datetime
3 | import pynvml
4 |
5 | import torch
6 | import numpy as np
7 |
8 |
9 | class MemTracker(object):
10 | """
11 | Class used to track pytorch memory usage
12 | Arguments:
13 | frame: a frame to detect current py-file runtime
14 | detail(bool, default True): whether the function shows the detail gpu memory usage
15 | path(str): where to save log file
16 | verbose(bool, default False): whether show the trivial exception
17 | device(int): GPU number, default is 0
18 | """
19 | def __init__(self, frame, detail=True, path='', verbose=False, device=0):
20 | self.frame = frame
21 | self.print_detail = detail
22 | self.last_tensor_sizes = set()
23 | self.gpu_profile_fn = path + f'{datetime.datetime.now():%d-%b-%y-%H:%M:%S}-gpu_mem_track.txt'
24 | self.verbose = verbose
25 | self.begin = True
26 | self.device = device
27 |
28 | self.func_name = frame.f_code.co_name
29 | self.filename = frame.f_globals["__file__"]
30 | if (self.filename.endswith(".pyc") or
31 | self.filename.endswith(".pyo")):
32 | self.filename = self.filename[:-1]
33 | self.module_name = self.frame.f_globals["__name__"]
34 | self.curr_line = self.frame.f_lineno
35 |
36 | def get_tensors(self):
37 | for obj in gc.get_objects():
38 | try:
39 | if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
40 | tensor = obj
41 | else:
42 | continue
43 | if tensor.is_cuda:
44 | yield tensor
45 | except Exception as e:
46 | if self.verbose:
47 | print('A trivial exception occured: {}'.format(e))
48 |
49 | def track(self):
50 | """
51 | Track the GPU memory usage
52 | """
53 | pynvml.nvmlInit()
54 | handle = pynvml.nvmlDeviceGetHandleByIndex(self.device)
55 | meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
56 | self.curr_line = self.frame.f_lineno
57 | where_str = self.module_name + ' ' + self.func_name + ':' + ' line ' + str(self.curr_line)
58 |
59 | with open(self.gpu_profile_fn, 'a+') as f:
60 |
61 | if self.begin:
62 | f.write(f"GPU Memory Track | {datetime.datetime.now():%d-%b-%y-%H:%M:%S} |"
63 | f" Total Used Memory:{meminfo.used/1000**2:<7.1f}Mb\n\n")
64 | self.begin = False
65 |
66 | if self.print_detail is True:
67 | ts_list = [tensor.size() for tensor in self.get_tensors()]
68 | new_tensor_sizes = {(type(x), tuple(x.size()), ts_list.count(x.size()), np.prod(np.array(x.size()))*4/1000**2)
69 | for x in self.get_tensors()}
70 | for t, s, n, m in new_tensor_sizes - self.last_tensor_sizes:
71 | f.write(f'+ | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20}\n')
72 | for t, s, n, m in self.last_tensor_sizes - new_tensor_sizes:
73 | f.write(f'- | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} \n')
74 | self.last_tensor_sizes = new_tensor_sizes
75 |
76 | f.write(f"\nAt {where_str:<50}"
77 | f"Total Used Memory:{meminfo.used/1000**2:<7.1f}Mb\n\n")
78 |
79 | pynvml.nvmlShutdown()
80 |
81 |
--------------------------------------------------------------------------------
/utils/memory/modelsize_estimate.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import numpy as np
4 |
5 |
6 | def modelsize(model, input, type_size=4):
7 | para = sum([np.prod(list(p.size())) for p in model.parameters()])
8 | print('Model {} : Number of params: {}'.format(model._get_name(), para))
9 | print('Model {} : params: {:4f}M'.format(model._get_name(), para * type_size / 1000 / 1000))
10 |
11 | input_ = input.clone()
12 | input_.requires_grad_(requires_grad=False)
13 |
14 | mods = list(model.modules())
15 | out_sizes = []
16 |
17 | for i in range(1, len(mods)):
18 | m = mods[i]
19 | if isinstance(m, nn.ReLU):
20 | if m.inplace:
21 | continue
22 | out = m(input_)
23 | out_sizes.append(np.array(out.size()))
24 | input_ = out
25 |
26 | total_nums = 0
27 | for i in range(len(out_sizes)):
28 | s = out_sizes[i]
29 | nums = np.prod(np.array(s))
30 | total_nums += nums
31 |
32 | # print('Model {} : Number of intermedite variables without backward: {}'.format(model._get_name(), total_nums))
33 | # print('Model {} : Number of intermedite variables with backward: {}'.format(model._get_name(), total_nums*2))
34 | print('Model {} : intermedite variables: {:3f} M (without backward)'
35 | .format(model._get_name(), total_nums * type_size / 1000 / 1000))
36 | print('Model {} : intermedite variables: {:3f} M (with backward)'
37 | .format(model._get_name(), total_nums * type_size*2 / 1000 / 1000))
38 |
39 |
--------------------------------------------------------------------------------