├── .gitignore
├── DataLoaders
├── AFEW.py
├── DFEW.py
├── FERV39K.py
├── MAFW.py
├── __init__.py
├── class_descriptions.yml
├── get_loader.py
└── utils.py
├── LICENSE
├── README.md
├── architecture
├── __init__.py
├── downstream.py
├── transformer.py
└── video_clip.py
├── config.py
├── eval.py
├── experiments
├── AFEW_loo_hpc.sh
├── AFEW_trainsupervised_hpc.sh
├── AFEW_zeroshot_hpc.sh
├── DFEW_loo_hpc.sh
├── DFEW_trainsupervised_hpc.sh
├── DFEW_zeroshot_hpc.sh
├── FERV39K_finetune_hpc.sh
├── FERV39K_loo_hpc.sh
├── FERV39K_train_hpc.sh
├── FERV39K_trainsupervised_hpc.sh
├── FERV39K_zeroshot_hpc.sh
├── MAFW_finetune_hpc.sh
├── MAFW_loo_hpc.sh
├── MAFW_supervised_hpc.sh
└── MAFW_train_hpc.sh
├── figs
└── method_overview.svg
├── requirements.txt
├── train.py
├── train_loo.py
├── train_supervised.py
└── utils.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/*
2 | FaRL-Base-Patch16-LAIONFace20M-ep64.pth
3 | *.pyc
4 | /*.mp4
5 | wandb/*
6 | api_key.txt
7 | models/*
8 | *.yml
9 | *.json
10 |
11 | wandb/
12 | *.sh.o*
13 | !DataLoaders/*.yml
14 | venv/*
--------------------------------------------------------------------------------
/DataLoaders/AFEW.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 |
4 | import torch
5 | import torch.utils.data as data
6 | from imblearn import over_sampling
7 |
8 | from . import utils
9 |
10 | CLASSES = [
11 | "happiness",
12 | "sadness",
13 | "neutral",
14 | "anger",
15 | "surprise",
16 | "disgust",
17 | "fear"
18 | ]
19 | script_dir = os.path.dirname(__file__)
20 |
21 |
22 | class AFEW(data.Dataset):
23 | def __init__(
24 | self,
25 | root_path: str,
26 | transforms: callable = None,
27 | target_transform: callable = None,
28 | load_transform: callable = None,
29 | split: str = None
30 | ):
31 | if split not in ['train', 'test']:
32 | raise ValueError
33 | self.root_path = root_path
34 | self.split = 'validation' if split == 'test' else split
35 | self.transforms = transforms
36 | self.target_transform = target_transform
37 | self.load_transform = load_transform
38 | self.data = self._make_dataset()
39 |
40 | @property
41 | def data(self):
42 | return self._data
43 |
44 | @data.setter
45 | def data(self, data):
46 | self._data = data
47 | self.labels = [x['label'] for x in self.data]
48 | self.indices = list(range(0, len(self.data)))
49 |
50 | def __len__(self):
51 | return len(self.data)
52 |
53 | def __getitem__(self, index):
54 | """
55 | Args:
56 | index (int): Index
57 | Returns:
58 | tuple: (clip, target, video_idx)
59 | """
60 | sample = self.data[index]
61 | video_name = sample['video']
62 | label = sample['label']
63 | description = sample['descr']
64 | emotion = sample['emotion']
65 |
66 | video_path = os.path.join(*[self.root_path, 'AFEW_Face', self.split, emotion, video_name])
67 |
68 | video = utils.load_frames(video_path, time_transform=self.load_transform)
69 | if self.transforms is not None:
70 | video = self.transforms(video)
71 | if self.target_transform is not None:
72 | label = self.target_transform(label)
73 | return video, label, description
74 |
75 | def _make_dataset(self) -> list:
76 | # eg AFEW/AFEW_Face/train/Anger/0_2_001343200/frame.png
77 | videos = list(glob.glob(os.path.join(*[self.root_path, 'AFEW_Face', self.split, '*', '*'])))
78 | dataset = []
79 | for idx, row in enumerate(videos):
80 | row = row.split('/')
81 | video_idx = row[-1]
82 | emotion = row[-2]
83 | label = CLASSES.index(emotion.lower())
84 |
85 | description = ' '
86 | sample = {
87 | 'video': video_idx,
88 | 'descr': description,
89 | 'emotion': emotion,
90 | 'label': label
91 | }
92 | dataset.append(sample)
93 | return dataset
94 |
95 | def resample(self):
96 | sampler = over_sampling.RandomOverSampler()
97 | idx = torch.arange(len(self.data)).reshape(-1, 1)
98 | y = torch.tensor([sample['label'] for sample in self.data]).reshape(-1, 1)
99 | idx, _ = sampler.fit_resample(idx, y)
100 | idx = idx.reshape(-1)
101 | data = [self.data[i] for i in idx]
102 | self.data = data
103 |
--------------------------------------------------------------------------------
/DataLoaders/DFEW.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import json
4 |
5 | import torch
6 | import torch.utils.data as data
7 | from imblearn import over_sampling
8 |
9 | from . import utils
10 |
11 | CLASSES = [
12 | "happiness",
13 | "sadness",
14 | "neutral",
15 | "anger",
16 | "surprise",
17 | "disgust",
18 | "fear"
19 | ]
20 | script_dir = os.path.dirname(__file__)
21 |
22 |
23 | class DFEW(data.Dataset):
24 | def __init__(
25 | self,
26 | root_path: str,
27 | transforms: callable = None,
28 | target_transform: callable = None,
29 | load_transform: callable = None,
30 | split: str = None,
31 | fold: int = None
32 | ):
33 | if split not in ['train', 'test']:
34 | raise ValueError
35 | if fold not in list(range(1, 6)):
36 | raise ValueError
37 | self.root_path = root_path
38 | self.transforms = transforms
39 | self.annotation_path = os.path.join(*[
40 | self.root_path,
41 | 'DFEW_set_{}_{}.txt'.format(fold, split)
42 | ])
43 | self.target_transform = target_transform
44 | self.load_transform = load_transform
45 | self.data = self._make_dataset(
46 | self.annotation_path
47 | )
48 |
49 | @property
50 | def data(self):
51 | return self._data
52 |
53 | @data.setter
54 | def data(self, data):
55 | self._data = data
56 | self.labels = [x['label'] for x in self.data]
57 | self.indices = list(range(0, len(self.data)))
58 |
59 | def __len__(self):
60 | return len(self.data)
61 |
62 | def __getitem__(self, index):
63 | """
64 | Args:
65 | index (int): Index
66 | Returns:
67 | tuple: (clip, target, video_idx)
68 | """
69 | sample = self.data[index]
70 | video_name = sample['video']
71 | label = sample['label']
72 | description = sample['descr']
73 |
74 | video_path = os.path.join(*[self.root_path, 'DFEW_Frame_Face', video_name])
75 |
76 | video = utils.load_frames(video_path, time_transform=self.load_transform)
77 | if self.transforms is not None:
78 | video = self.transforms(video)
79 | if self.target_transform is not None:
80 | label = self.target_transform(label)
81 | return video, label, description
82 |
83 | def _make_dataset(self, annotation_path: str) -> list:
84 | annotations = utils.load_annotation(annotation_path, encoding='UTF-8', separator=' ')
85 | dataset = []
86 | for idx, row in enumerate(annotations):
87 | row = [el.replace('\n', '') for el in row]
88 | video_path = row[0]
89 | num_frames = row[1]
90 | label = int(row[2])
91 |
92 | video_info = video_path.split('/')
93 | video_idx = video_info[-1]
94 |
95 | description = ' '
96 | sample = {
97 | 'video': video_idx,
98 | 'descr': description,
99 | 'label': label
100 | }
101 | dataset.append(sample)
102 | return dataset
103 |
104 | def resample(self):
105 | sampler = over_sampling.RandomOverSampler()
106 | idx = torch.arange(len(self.data)).reshape(-1, 1)
107 | y = torch.tensor([sample['label'] for sample in self.data]).reshape(-1, 1)
108 | idx, _ = sampler.fit_resample(idx, y)
109 | idx = idx.reshape(-1)
110 | data = [self.data[i] for i in idx]
111 | self.data = data
112 |
--------------------------------------------------------------------------------
/DataLoaders/FERV39K.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import json
4 |
5 | import torch
6 | import torch.utils.data as data
7 | from imblearn import over_sampling
8 |
9 | from . import utils
10 |
11 | CLASSES = [
12 | "happiness",
13 | "sadness",
14 | "neutral",
15 | "anger",
16 | "surprise",
17 | "disgust",
18 | "fear"
19 | ]
20 |
21 |
22 | script_dir = os.path.dirname(__file__)
23 |
24 | class FERV39K(data.Dataset):
25 | def __init__(
26 | self,
27 | root_path: str,
28 | transforms: callable = None,
29 | target_transform: callable = None,
30 | load_transform: callable = None,
31 | split: str = None
32 | ):
33 | if split not in ['train', 'test']:
34 | raise ValueError
35 | self.root_path = root_path
36 | self.transforms = transforms
37 | self.annotation_path = os.path.join(self.root_path, '{}_image.txt'.format(split))
38 | self.descriptions = json.load(open(os.path.join(script_dir, '{}_descriptions.json'.format(split))))
39 | self.target_transform = target_transform
40 | self.load_transform = load_transform
41 | self.data = self._make_dataset(
42 | self.annotation_path
43 | )
44 |
45 | @property
46 | def data(self):
47 | return self._data
48 |
49 | @data.setter
50 | def data(self, data):
51 | self._data = data
52 | self.labels = [x['label'] for x in self.data]
53 | self.indices = list(range(0, len(self.data)))
54 |
55 | def __len__(self):
56 | return len(self.data)
57 |
58 | def __getitem__(self, index):
59 | """
60 | Args:
61 | index (int): Index
62 | Returns:
63 | tuple: (clip, target, video_idx)
64 | """
65 | sample = self.data[index]
66 | video_name = sample['video']
67 | label = sample['label']
68 | description = sample['descr']
69 | video_type = sample['video_type']
70 | emotion = sample['emotion']
71 |
72 | video_path = os.path.join(*[self.root_path, '2_ClipsforFaceCrop', video_type, emotion, video_name])
73 |
74 | video = utils.load_frames(video_path, time_transform=self.load_transform)
75 | if self.transforms is not None:
76 | video = self.transforms(video)
77 | if self.target_transform is not None:
78 | label = self.target_transform(label)
79 | return video, label, description
80 |
81 |
82 |
83 | def _make_dataset(self, annotation_path: str) -> list:
84 | annotations = utils.load_annotation(annotation_path, encoding='UTF-8', separator=' ')
85 | dataset = []
86 | for idx, row in enumerate(annotations):
87 | row = [el.replace('\n', '') for el in row]
88 | video_path = row[0]
89 | num_frames = row[1]
90 | label = int(row[2])
91 |
92 | video_info = video_path.split('/')
93 | video_idx = video_info[-1]
94 | video_type = video_info[-3]
95 | emotion = video_info[-2]
96 |
97 | key = '_'.join([video_type, emotion, video_idx])
98 | description = self.descriptions[key]
99 | sample = {
100 | 'video': video_idx,
101 | 'descr': description,
102 | 'label': label,
103 | 'video_type': video_type,
104 | 'emotion': emotion
105 | }
106 | dataset.append(sample)
107 | del self.descriptions
108 | return dataset
109 |
110 | def resample(self):
111 | sampler = over_sampling.RandomOverSampler()
112 | idx = torch.arange(len(self.data)).reshape(-1, 1)
113 | y = torch.tensor([sample['label'] for sample in self.data]).reshape(-1, 1)
114 | idx, _ = sampler.fit_resample(idx, y)
115 | idx = idx.reshape(-1)
116 | data = [self.data[i] for i in idx]
117 | self.data = data
118 |
--------------------------------------------------------------------------------
/DataLoaders/MAFW.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import torch
4 | import torch.utils.data as data
5 | from imblearn import over_sampling
6 |
7 | from . import utils
8 |
9 |
10 | CLASSES = [
11 | "anger",
12 | "disgust",
13 | "fear",
14 | "happiness",
15 | "neutral",
16 | "sadness",
17 | "surprise",
18 | "contempt",
19 | "anxiety",
20 | "helplessness",
21 | "disappointment"
22 | ]
23 |
24 | """
25 | NEUTRAL_DESCR = [
26 | "Neutral facial expressions are characterized by a lack of emotional expression, as if the person's face is in a resting state.",
27 | "The facial muscles are generally relaxed, creating a smooth and even appearance.",
28 | "The mouth is typically closed or slightly open, with the lips not turned up or down.",
29 | "The eyebrows are in a neutral position, not furrowed or raised, and the eyes are generally looking straight ahead or slightly down.",
30 | "While the face may not show any specific emotions, the expression can still convey a sense of attentiveness or alertness."
31 | ]
32 | """
33 |
34 |
35 | class MAFW(data.Dataset):
36 | def __init__(
37 | self,
38 | root_path: str,
39 | transforms: callable = None,
40 | target_transform: callable = None,
41 | load_transform: callable = None,
42 | fold: int = None,
43 | split: str = None,
44 | label_type: str = None,
45 | caption: bool = True
46 | ):
47 | super().__init__()
48 | if fold not in list(range(1, 6)):
49 | raise ValueError
50 | if split not in ['train', 'test']:
51 | raise ValueError
52 | if label_type not in ['compound', 'single']:
53 | raise ValueError
54 | self.root_path = root_path
55 | self.annotation_path = os.path.join(*[
56 | self.root_path,
57 | 'Train_Test_Set',
58 | label_type,
59 | 'with_caption_new' if caption else 'no_caption_new',
60 | 'set_{}'.format(fold),
61 | '{}.txt'.format(split)
62 | ])
63 | self.transforms = transforms
64 | self.target_transform = target_transform
65 | self.load_transform = load_transform
66 | self.data = self._make_dataset(
67 | self.annotation_path
68 | )
69 |
70 | @property
71 | def data(self):
72 | return self._data
73 |
74 | @data.setter
75 | def data(self, data):
76 | self._data = data
77 | self.labels = [x['label'] for x in self.data]
78 | self.indices = list(range(0, len(self.data)))
79 |
80 | def __len__(self):
81 | return len(self.data)
82 |
83 | def __getitem__(self, index):
84 | """
85 | Args:
86 | index (int): Index
87 | Returns:
88 | tuple: (clip, target, video_idx)
89 | """
90 | sample = self.data[index]
91 | video_name = sample['video']
92 | label = sample['label']
93 | description = sample['descr']
94 |
95 | video_path = os.path.join(*[self.root_path, 'data', 'faces', video_name])
96 | video = utils.load_frames(video_path, time_transform=self.load_transform)
97 | if self.transforms is not None:
98 | video = self.transforms(video)
99 | if self.target_transform is not None:
100 | label = self.target_transform(label)
101 | return video, label, description
102 |
103 | @staticmethod
104 | def _make_dataset(
105 | annotation_path: str
106 | ) -> list:
107 | annotations = utils.load_annotation(annotation_path)
108 | dataset = []
109 | for idx, row in enumerate(annotations):
110 | row = [el.replace('\n', '') for el in row]
111 | if len(row) == 4:
112 | filename = row[0].split('.')[0]
113 | emotion = row[1].split('_')
114 | description = row[3]
115 | else:
116 | filename = row[0].split('.')[0]
117 | emotion = row[1].split('_')
118 | description = "-"
119 | label = [CLASSES.index(emo) for emo in emotion]
120 | sample = {
121 | 'video': filename,
122 | 'descr': description,
123 | 'label': label
124 | }
125 | dataset.append(sample)
126 | return dataset
127 |
128 | def resample(self):
129 | sampler = over_sampling.RandomOverSampler()
130 | idx = torch.arange(len(self.data)).reshape(-1, 1)
131 | y = torch.tensor([sample['label'] for sample in self.data]).reshape(-1, 1)
132 | idx, _ = sampler.fit_resample(idx, y)
133 | idx = idx.reshape(-1)
134 | data = [self.data[i] for i in idx]
135 | self.data = data
136 |
--------------------------------------------------------------------------------
/DataLoaders/__init__.py:
--------------------------------------------------------------------------------
1 | from .get_loader import get_loaders
2 | from .get_loader import CLASSES
3 | from .get_loader import CLASS_DESCRIPTION
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/DataLoaders/class_descriptions.yml:
--------------------------------------------------------------------------------
1 | anger: "A facial expression showing irritation and unrest, with a wrinkled forehead, narrowed eyes, and tight lips or a frown"
2 | disgust: "An expression of repulsion and displeasure, with a raised upper lip, a scrunched nose, and a downturned mouth"
3 | fear: "An expression of tension and withdrawal, with wide-open eyes, raised eyebrows, and a slightly open mouth. The face may appear physically tense or frozen in fear"
4 | happiness: "An expression of contentment and pleasure, with a smile and the corners of the mouth turned up, often accompanied by crinkling around the eyes. The face may appear relaxed and at ease"
5 | neutral: "An expression of calm and neutrality, with a neutral mouth and no particular indication of emotion. The eyebrows are usually not raised or furrowed"
6 | sadness: 'An expression of sadness and sorrow, with a downturned mouth or frown, and sometimes tears or a tightness around the eyes. The face may appear physically withdrawn or resigned.'
7 | surprise: "An expression of shock and astonishment, with wide-open eyes and raised eyebrows, sometimes accompanied by a gasp or an open mouth"
8 | contempt: "An expression of disdain and superiority, with a slight smirk or sneer, often accompanied by a raised eyebrow or a lopsided smile"
9 | anxiety: "An expression of worry and apprehension, with furrowed eyebrows and a tight mouth. The eyes may appear wide and darting, and the face may appear physically tense or worried"
10 | helplessness: "An expression of defeat and resignation, with the eyes looking down and the mouth turned down. The eyebrows may be furrowed, and the face may appear resigned or resigned"
11 | disappointment: "An expression of frustration and disillusionment, with a slight frown or drooping of the mouth. The eyebrows may be lowered or furrowed, and the face may appear physically drawn or tired"
12 |
--------------------------------------------------------------------------------
/DataLoaders/get_loader.py:
--------------------------------------------------------------------------------
1 | import json
2 | import yaml
3 | from pathlib import Path
4 | import torch
5 | from torch.utils import data
6 | from torch.utils.data.distributed import DistributedSampler
7 | import torchvision
8 |
9 | from .MAFW import MAFW
10 | from .FERV39K import FERV39K
11 | from .DFEW import DFEW
12 | from .AFEW import AFEW
13 | from .utils import video_collate
14 | from .utils import TemporalDownSample
15 | from .utils import RandomSequence
16 |
17 | from .MAFW import CLASSES as MAFW_CLASSES
18 | from .FERV39K import CLASSES as FERV_CLASSES
19 | from .DFEW import CLASSES as DFEW_CLASSES
20 | from .AFEW import CLASSES as AFEW_CLASSES
21 |
22 | CLASSES = list()
23 | CLASS_DESCRIPTION = list()
24 |
25 |
26 | def set_classinfo(cnf):
27 | class_descr = yaml.safe_load(Path('DataLoaders/class_descriptions.yml').read_text())
28 | if cnf.dataset_name == 'MAFW':
29 | CLASSES.extend(MAFW_CLASSES)
30 | elif cnf.dataset_name == 'FERV39K':
31 | CLASSES.extend(FERV_CLASSES)
32 | elif cnf.dataset_name == 'DFEW':
33 | CLASSES.extend(DFEW_CLASSES)
34 | elif cnf.dataset_name == 'AFEW':
35 | CLASSES.extend(AFEW_CLASSES)
36 | for cls in CLASSES:
37 | CLASS_DESCRIPTION.append(class_descr[cls])
38 |
39 |
40 | def get_loaders(cnf, **kwargs):
41 | set_classinfo(cnf)
42 | if cnf.dataset_name == 'MAFW':
43 | return get_mafw_loaders(cnf)
44 | elif cnf.dataset_name == 'FERV39K':
45 | return get_ferv39k_loaders(cnf)
46 | elif cnf.dataset_name == 'DFEW':
47 | return get_dfew_loaders(cnf)
48 | elif cnf.dataset_name == 'AFEW':
49 | return get_afew_loaders(cnf)
50 | else:
51 | raise NotImplemented
52 |
53 |
54 | def get_dfew_loaders(cnf):
55 | transforms = torchvision.transforms.Compose(
56 | [
57 | torchvision.transforms.Normalize(
58 | mean=(0.48145466, 0.4578275, 0.40821073),
59 | std=(0.26862954, 0.26130258, 0.27577711)
60 | ),
61 | torchvision.transforms.Resize(
62 | size=224,
63 | interpolation=torchvision.transforms.InterpolationMode.BICUBIC,
64 | max_size=None,
65 | antialias=None
66 | ),
67 | torchvision.transforms.CenterCrop(size=(224, 224)),
68 | # torchvision.transforms.ColorJitter(brightness=0.5, hue=.2),
69 | torchvision.transforms.RandomRotation(6),
70 | torchvision.transforms.RandomHorizontalFlip()
71 | ]
72 | )
73 |
74 | load_transform = torchvision.transforms.Compose(
75 | [
76 | #RandomRoll(),
77 | TemporalDownSample(cnf.downsample),
78 | RandomSequence(cnf.clip_len, on_load=True)
79 | ]
80 | )
81 | dfew_train = DFEW(
82 | root_path=cnf.dataset_root,
83 | transforms=transforms,
84 | target_transform=None,
85 | load_transform=load_transform,
86 | split='train',
87 | fold=cnf.fold
88 | )
89 | if cnf.resample:
90 | dfew_train.resample()
91 | transforms = torchvision.transforms.Compose(
92 | [
93 | torchvision.transforms.Normalize(
94 | mean=(0.48145466, 0.4578275, 0.40821073),
95 | std=(0.26862954, 0.26130258, 0.27577711)
96 | ),
97 | torchvision.transforms.Resize(
98 | size=224,
99 | interpolation=torchvision.transforms.InterpolationMode.BICUBIC,
100 | max_size=None,
101 | antialias=None
102 | ),
103 | torchvision.transforms.CenterCrop(size=(224, 224))
104 | ]
105 | )
106 | dfew_test = DFEW(
107 | root_path=cnf.dataset_root,
108 | transforms=transforms,
109 | target_transform=None,
110 | load_transform=load_transform,
111 | split='test',
112 | fold=cnf.fold
113 | )
114 | if cnf.DDP:
115 | train_sampler = DistributedSampler(dfew_train)
116 | test_sampler = DistributedSampler(dfew_test)
117 | else:
118 | train_sampler = None
119 | test_sampler = None
120 |
121 | # print(len(train_sampler))
122 | trainloader = data.DataLoader(
123 | dfew_train,
124 | batch_size=cnf.batch_size,
125 | collate_fn=video_collate,
126 | num_workers=2,
127 | drop_last=True,
128 | sampler=train_sampler
129 | )
130 | testloader = data.DataLoader(
131 | dfew_test,
132 | batch_size=cnf.batch_size,
133 | collate_fn=video_collate,
134 | num_workers=2,
135 | sampler=test_sampler
136 | )
137 |
138 | print('Train Samples: {}, Test samples: {}'.format(len(dfew_train), len(dfew_test)))
139 | return trainloader, testloader
140 |
141 |
142 | def get_afew_loaders(cnf):
143 | transforms = torchvision.transforms.Compose(
144 | [
145 | torchvision.transforms.Normalize(
146 | mean=(0.48145466, 0.4578275, 0.40821073),
147 | std=(0.26862954, 0.26130258, 0.27577711)
148 | ),
149 | torchvision.transforms.Resize(
150 | size=224,
151 | interpolation=torchvision.transforms.InterpolationMode.BICUBIC,
152 | max_size=None,
153 | antialias=None
154 | ),
155 | torchvision.transforms.CenterCrop(size=(224, 224)),
156 | # torchvision.transforms.ColorJitter(brightness=0.5, hue=.2),
157 | torchvision.transforms.RandomRotation(6),
158 | torchvision.transforms.RandomHorizontalFlip()
159 | ]
160 | )
161 |
162 | load_transform = torchvision.transforms.Compose(
163 | [
164 | #RandomRoll(),
165 | TemporalDownSample(cnf.downsample),
166 | RandomSequence(cnf.clip_len, on_load=True)
167 | ]
168 | )
169 | afew_train = AFEW(
170 | root_path=cnf.dataset_root,
171 | transforms=transforms,
172 | target_transform=None,
173 | load_transform=load_transform,
174 | split='train'
175 | )
176 | if cnf.resample:
177 | afew_train.resample()
178 | transforms = torchvision.transforms.Compose(
179 | [
180 | torchvision.transforms.Normalize(
181 | mean=(0.48145466, 0.4578275, 0.40821073),
182 | std=(0.26862954, 0.26130258, 0.27577711)
183 | ),
184 | torchvision.transforms.Resize(
185 | size=224,
186 | interpolation=torchvision.transforms.InterpolationMode.BICUBIC,
187 | max_size=None,
188 | antialias=None
189 | ),
190 | torchvision.transforms.CenterCrop(size=(224, 224))
191 | ]
192 | )
193 | afew_test = AFEW(
194 | root_path=cnf.dataset_root,
195 | transforms=transforms,
196 | target_transform=None,
197 | load_transform=load_transform,
198 | split='test'
199 | )
200 | if cnf.DDP:
201 | train_sampler = DistributedSampler(afew_train)
202 | test_sampler = DistributedSampler(afew_test)
203 | else:
204 | train_sampler = None
205 | test_sampler = None
206 |
207 | # print(len(train_sampler))
208 | trainloader = data.DataLoader(
209 | afew_train,
210 | batch_size=cnf.batch_size,
211 | collate_fn=video_collate,
212 | num_workers=2,
213 | drop_last=True,
214 | sampler=train_sampler
215 | )
216 | testloader = data.DataLoader(
217 | afew_test,
218 | batch_size=cnf.batch_size,
219 | collate_fn=video_collate,
220 | num_workers=2,
221 | sampler=test_sampler
222 | )
223 |
224 | print('Train Samples: {}, Test samples: {}'.format(len(afew_train), len(afew_test)))
225 | return trainloader, testloader
226 |
227 |
228 | def get_ferv39k_loaders(cnf):
229 | transforms = torchvision.transforms.Compose(
230 | [
231 | torchvision.transforms.Normalize(
232 | mean=(0.48145466, 0.4578275, 0.40821073),
233 | std=(0.26862954, 0.26130258, 0.27577711)
234 | ),
235 | torchvision.transforms.Resize(
236 | size=224,
237 | interpolation=torchvision.transforms.InterpolationMode.BICUBIC,
238 | max_size=None,
239 | antialias=None
240 | ),
241 | torchvision.transforms.CenterCrop(size=(224, 224)),
242 | # torchvision.transforms.ColorJitter(brightness=0.5, hue=.2),
243 | torchvision.transforms.RandomRotation(6),
244 | torchvision.transforms.RandomHorizontalFlip()
245 | ]
246 | )
247 |
248 | load_transform = torchvision.transforms.Compose(
249 | [
250 | #RandomRoll(),
251 | TemporalDownSample(cnf.downsample),
252 | RandomSequence(cnf.clip_len, on_load=True)
253 | ]
254 | )
255 | ferv_train = FERV39K(
256 | root_path=cnf.dataset_root,
257 | transforms=transforms,
258 | target_transform=None,
259 | load_transform=load_transform,
260 | split='train'
261 | )
262 | if cnf.resample:
263 | ferv_train.resample()
264 | transforms = torchvision.transforms.Compose(
265 | [
266 | torchvision.transforms.Normalize(
267 | mean=(0.48145466, 0.4578275, 0.40821073),
268 | std=(0.26862954, 0.26130258, 0.27577711)
269 | ),
270 | torchvision.transforms.Resize(
271 | size=224,
272 | interpolation=torchvision.transforms.InterpolationMode.BICUBIC,
273 | max_size=None,
274 | antialias=None
275 | ),
276 | torchvision.transforms.CenterCrop(size=(224, 224))
277 | ]
278 | )
279 | ferv_test = FERV39K(
280 | root_path=cnf.dataset_root,
281 | transforms=transforms,
282 | target_transform=None,
283 | load_transform=load_transform,
284 | split='test'
285 | )
286 | if cnf.DDP:
287 | train_sampler = DistributedSampler(ferv_train)
288 | test_sampler = DistributedSampler(ferv_test)
289 | else:
290 | train_sampler = None
291 | test_sampler = None
292 |
293 | # print(len(train_sampler))
294 | trainloader = data.DataLoader(
295 | ferv_train,
296 | batch_size=cnf.batch_size,
297 | collate_fn=video_collate,
298 | num_workers=2,
299 | drop_last=True,
300 | sampler=train_sampler
301 | )
302 | testloader = data.DataLoader(
303 | ferv_test,
304 | batch_size=cnf.batch_size,
305 | collate_fn=video_collate,
306 | num_workers=2,
307 | sampler=test_sampler
308 | )
309 |
310 | print('Train Samples: {}, Test samples: {}'.format(len(ferv_train), len(ferv_test)))
311 | return trainloader, testloader
312 |
313 |
314 | def get_mafw_loaders(cnf):
315 | transforms = torchvision.transforms.Compose(
316 | [
317 | torchvision.transforms.Normalize(
318 | mean=(0.48145466, 0.4578275, 0.40821073),
319 | std=(0.26862954, 0.26130258, 0.27577711)
320 | ),
321 | torchvision.transforms.Resize(
322 | size=224,
323 | interpolation=torchvision.transforms.InterpolationMode.BICUBIC,
324 | max_size=None,
325 | antialias=None
326 | ),
327 | torchvision.transforms.CenterCrop(size=(224, 224)),
328 | # torchvision.transforms.ColorJitter(0.2, 0.2, 0.2),
329 | # torchvision.transforms.ColorJitter(brightness=0.5),
330 | torchvision.transforms.RandomRotation(6),
331 | torchvision.transforms.RandomHorizontalFlip()
332 | ]
333 | )
334 |
335 | load_transform = torchvision.transforms.Compose(
336 | [
337 | #RandomRoll(),
338 | TemporalDownSample(cnf.downsample),
339 | RandomSequence(cnf.clip_len, on_load=True)
340 | ]
341 | )
342 | mafw_train = MAFW(
343 | root_path=cnf.dataset_root,
344 | transforms=transforms,
345 | target_transform=None,
346 | load_transform=load_transform,
347 | fold=cnf.fold,
348 | split='train',
349 | label_type='single'
350 | )
351 | if cnf.resample:
352 | mafw_train.resample()
353 | transforms = torchvision.transforms.Compose(
354 | [
355 | torchvision.transforms.Normalize(
356 | mean=(0.48145466, 0.4578275, 0.40821073),
357 | std=(0.26862954, 0.26130258, 0.27577711)
358 | ),
359 | torchvision.transforms.Resize(
360 | size=224,
361 | interpolation=torchvision.transforms.InterpolationMode.BICUBIC,
362 | max_size=None,
363 | antialias=None
364 | ),
365 | torchvision.transforms.CenterCrop(size=(224, 224))
366 | ]
367 | )
368 | mafw_test = MAFW(
369 | root_path=cnf.dataset_root,
370 | transforms=transforms,
371 | target_transform=None,
372 | load_transform=load_transform,
373 | fold=cnf.fold,
374 | split='test',
375 | label_type='single',
376 | caption=False
377 | )
378 | if cnf.DDP:
379 | train_sampler = DistributedSampler(mafw_train)
380 | test_sampler = DistributedSampler(mafw_test)
381 | else:
382 | train_sampler = None
383 | test_sampler = None
384 |
385 | # print(len(train_sampler))
386 | trainloader = data.DataLoader(
387 | mafw_train,
388 | batch_size=cnf.batch_size,
389 | collate_fn=video_collate,
390 | # shuffle=True,
391 | num_workers=2,
392 | drop_last=True,
393 | sampler=train_sampler
394 | )
395 | testloader = data.DataLoader(
396 | mafw_test,
397 | batch_size=cnf.batch_size,
398 | collate_fn=video_collate,
399 | # shuffle=True,
400 | num_workers=2,
401 | sampler=test_sampler
402 | )
403 |
404 | print('Train Samples: {}, Test samples: {}'.format(len(mafw_train), len(mafw_test)))
405 | return trainloader, testloader
406 |
407 |
--------------------------------------------------------------------------------
/DataLoaders/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import json
4 | from PIL import Image
5 | import torch
6 | from torchvision import io
7 | from torchvision import transforms
8 | from torch.nn.utils import rnn
9 |
10 |
11 | def load_annotation(file_path: str, encoding="GBK", separator='\t'):
12 | annotations = list()
13 | with open(file_path, 'rU', encoding=encoding) as f:
14 | for ele in f:
15 | line = ele.split(separator)
16 | annotations.append(line)
17 | return annotations
18 |
19 |
20 | def load_video(file_path: str):
21 | video, _, _ = io.read_video(file_path, pts_unit='sec', output_format='TCHW')
22 | video = video.float()
23 | video /= 255
24 | return video
25 |
26 |
27 | def load_frames(folder_path: str, time_transform: callable = None):
28 | frames = os.listdir(folder_path)
29 | frames.sort()
30 | video = list()
31 | if time_transform is not None:
32 | frames = time_transform(frames)
33 | for frame_file in frames:
34 | frame_pth = os.path.join(folder_path, frame_file)
35 | toTensor = transforms.Compose([
36 | transforms.ToTensor()
37 | ])
38 | frame = toTensor(pil_loader(frame_pth))
39 | video.append(frame)
40 | video = torch.stack(video)
41 | return video
42 |
43 |
44 | def pil_loader(path: str):
45 | with open(path, 'rb') as f:
46 | with Image.open(f) as img:
47 | return img.convert('RGB')
48 |
49 |
50 | def load_annotation_data(data_file_path: str) -> dict:
51 | with open(data_file_path, 'r') as data_file:
52 | return json.load(data_file)
53 |
54 |
55 | def get_video_names_and_annotations(data_dict: dict, subset: str = None) -> tuple:
56 | video_names = []
57 | annotations = []
58 | for key, value in data_dict['database'].items():
59 | if subset:
60 | if not data_dict['database'][key]['subset'] == subset:
61 | continue
62 | video_names.append('{0}'.format(key))
63 | annotations.append(value['annotations'])
64 | return video_names, annotations
65 |
66 |
67 | def get_file_names(path: str, file_extension: str) -> list:
68 | filenames = list()
69 | for root, dirs, files in os.walk(path):
70 | for filename in files:
71 | if filename.endswith(file_extension):
72 | filenames.append(os.path.join(root, filename))
73 | return filenames
74 |
75 |
76 | def video_loader(video_dir_path: str, image_loader: callable, **kwargs) -> list:
77 | video = []
78 | frames = os.listdir(video_dir_path)
79 | frames.sort()
80 | idx = np.zeros(len(frames))
81 | for i, frame in enumerate(frames):
82 | if frame.endswith('.jpg'):
83 | idx[i] = 1
84 | idx = (idx == 1)
85 | frames = [b for a, b in zip(idx, frames) if a]
86 | if 'time_transform' in kwargs:
87 | frames = kwargs['time_transform'](frames)
88 | for frame in frames:
89 | image_path = os.path.join(video_dir_path, frame)
90 | video.append(image_loader(image_path))
91 | return video
92 |
93 |
94 | def video_collate(batch):
95 | x_data = [item[0] for item in batch]
96 | target = [item[1] for item in batch]
97 | descr = [item[2] for item in batch]
98 | x_data = rnn.pad_sequence(x_data, batch_first=True)
99 | target = torch.tensor(target)
100 | return x_data, target, descr
101 |
102 |
103 | def series_collate(batch):
104 | x_data = [item[0] for item in batch]
105 | target = [item[1] for item in batch]
106 | idx = [item[2] for item in batch]
107 | target = torch.stack(target, 0)
108 | x_data = torch.stack(x_data)
109 | return x_data, target, idx
110 |
111 |
112 | class TemporalDownSample(object):
113 | def __init__(self, factor: int):
114 | self.factor = factor
115 |
116 | def __call__(self, clip: torch.tensor):
117 | if isinstance(clip, list):
118 | clip = np.asarray(clip)
119 | idx = range(clip.shape[0])
120 | idx = [(idi % self.factor) == 0 for idi in idx]
121 | return clip[idx]
122 |
123 |
124 | class RandomRoll(object):
125 | def __init__(self, seed=0):
126 | self.seed = seed
127 |
128 | def __call__(self, seq: torch.tensor):
129 | if isinstance(seq, list):
130 | seq = np.asarray(seq)
131 | start_idx = np.random.randint(0, seq.size[0], dtype=int)
132 | return np.concatenate([seq[start_idx:], seq[:start_idx]])
133 |
134 |
135 | class RandomSequence(object):
136 | def __init__(self, seq_size, on_load=False):
137 | self.seq_size = seq_size
138 | self.on_load = on_load
139 |
140 | def __call__(self, clip: torch.tensor):
141 | if isinstance(clip, list):
142 | clip = np.asarray(clip)
143 | if self.on_load:
144 | return self.call_on_load(clip)
145 | else:
146 | return self.call_on_video(clip)
147 |
148 | def call_on_video(self, clip: torch.tensor):
149 | rnd_start = torch.randint(len(clip), (1,))
150 | end_idx = rnd_start+self.seq_size
151 | if end_idx < len(clip):
152 | new_clip = clip[rnd_start:end_idx]
153 | else:
154 | end_idx -= len(clip)
155 | new_clip = torch.cat((clip[rnd_start:], clip[:end_idx]))
156 | if len(new_clip) < self.seq_size:
157 | pad = self.seq_size - len(new_clip)
158 | new_clip = torch.cat((new_clip, new_clip[:pad]))
159 | return new_clip
160 |
161 | def call_on_load(self, clip: torch.tensor):
162 | rnd_start = torch.randint(len(clip), (1,))
163 | end_idx = rnd_start+self.seq_size
164 | if end_idx < len(clip):
165 | new_clip = clip[rnd_start:end_idx]
166 | else:
167 | end_idx -= len(clip)
168 | new_clip = np.concatenate((clip[rnd_start:], clip[:end_idx]))
169 | if len(new_clip) < self.seq_size:
170 | pad = self.seq_size - len(new_clip)
171 | new_clip = np.pad(new_clip, (0, pad), 'reflect')
172 | return new_clip
173 |
174 |
175 | class IgnoreFiles(object):
176 | def __init__(self, pattern):
177 | self.pattern = pattern
178 |
179 | def __call__(self, clip):
180 | if not isinstance(clip, np.ndarray):
181 | clip = np.array(clip, dtype=object)
182 | idx = [self.pattern not in frame for frame in clip]
183 | return clip[idx]
184 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2023 Niki Maria Foteinopoulou
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://paperswithcode.com/sota/zero-shot-facial-expression-recognition-on?p=emoclip-a-vision-language-method-for-zero)
2 | # EmoCLIP: A Vision-Language Method for Zero-Shot Video Facial Expression Recognition
3 |
4 | Authors official PyTorch implementation of the **[EmoCLIP: A Vision-Language Method for Zero-Shot Video Facial Expression Recognition](https://arxiv.org/abs/2310.16640)**. If you use this code for your research, please [**cite**](#citation) our paper.
5 |
6 | > **EmoCLIP: A Vision-Language Method for Zero-Shot Video Facial Expression Recognition**
7 | > Niki Maria Foteinopoulou and Ioannis Patras
8 | >
9 | > 
10 | >
11 | > **Abstract**: Facial Expression Recognition (FER) is a crucial task in affective computing, but its conventional focus on the seven basic emotions limits its applicability to the complex and expanding emotional spectrum. To address the issue of new and unseen emotions present in dynamic in-the-wild FER, we propose a novel vision-language model that utilises sample-level text descriptions (i.e. captions of the context, expressions or emotional cues), as natural language supervision, aiming to enhance the learning of rich latent representations, for zero-shot classification. To test this, we evaluate using zero-shot classification of the model trained on sample-level descriptions, on four popular dynamic FER datasets. Our findings show that this approach yields significant improvements when compared to baseline methods. Specifically, for zero-shot video FER, we outperform CLIP by over 10\% in terms of Weighted Average Recall and 5\% in terms of Unweighted Average Recall on several datasets. Furthermore, we evaluate the representations obtained from the network trained using sample-level descriptions on the downstream task of mental health symptom estimation, achieving performance comparable or superior to state-of-the-art methods and strong agreement with human experts. Namely, we achieve a Pearson's Correlation Coefficient of up to 0.85, which is comparable to human experts' agreement.
12 |
13 |
14 | ## Overview
15 |
16 |
17 | In a nutshell, we follow the CLIP contrastive training paradigm to jointly optimise a video and a 18 | text encoder. The video and text encoders of the network are jointly trained using a contrastive loss over 19 | the cosine similarities of the video-text pairings in the mini-batch. 20 | More specifically, the video encoder ($E_V$) is composed of the CLIP image encoder ($E_I$) and a Transformer Encoder, 21 | to learn the temporal relationships of the frame spatial representations. The text encoder ($E_T$) used in our approach 22 | is the CLIP text encoder. The weights of the image and text encoders in our model are initialised using the 23 | large pre-trained weights of CLIP, as FER datasets are not large enough to train a VLM from scratch with adequate 24 | generalisation. Contrary to the previous video VLM works in both action recognition and FER, we propose using sample 25 | level descriptions for better representation learning, rather than embeddings of class prototypes. This leads to more 26 | semantically rich representations which in turn allows for better generalisation. 27 |
28 | 29 | 30 | ## Installation 31 | 32 | We recommend installing the required packages using python's native virtual environment as follows: 33 | 34 | ```bash 35 | $ python -m venv venv 36 | $ source venv/bin/activate 37 | (venv) $ pip install --upgrade pip 38 | (venv) $ pip install -r requirements.txt 39 | ``` 40 | 41 | For using the aforementioned virtual environment in a Jupyter Notebook, you need to manually add the kernel as follows: 42 | 43 | ```bash 44 | (venv) $ python -m ipykernel install --user --name=venv 45 | ``` 46 | 47 | ## Downstream task weights: 48 | The weights used for the downstream task (without the FC layer) can be found [here](https://drive.google.com/file/d/1cPgE0FlBCw5cvXq4-YEM6tUhE1-j8MS3/view?usp=drive_link) 49 | 50 | ## Acknowledgements 51 | 52 | This work is supported by EPSRC DTP studentship (No. EP/R513106/1) and EU H2020 AI4Media (No. 951911). 53 | This research utilised Queen Mary's Apocrita HPC facility, supported by QMUL Research-IT. http://doi.org/10.5281/zenodo.438045 54 | 55 | ## Citation 56 | 57 | ```bibtex 58 | @inproceedings{foteinopoulou_emoclip_2024, 59 | title = {{EmoCLIP}: {A} {Vision}-{Language} {Method} for {Zero}-{Shot} {Video} {Facial} {Expression} {Recognition}}, 60 | author = {Foteinopoulou, Niki Maria and Patras, Ioannis}, 61 | year = {2024}, 62 | booktitle={2024 IEEE 18th International Conference on Automatic Face and Gesture Recognition (FG)} 63 | } 64 | 65 | ``` 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /architecture/__init__.py: -------------------------------------------------------------------------------- 1 | from .video_clip import VClip 2 | from .downstream import DownstreamTask 3 | -------------------------------------------------------------------------------- /architecture/downstream.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | device = "cuda" if torch.cuda.is_available() else "cpu" 5 | 6 | class DownstreamTask(nn.Module): 7 | def __init__(self, clip_model, d_model: int = 512, n_classes: int = 7): 8 | super().__init__() 9 | self.backbone = clip_model.backbone.visual 10 | self.temporal = clip_model.temporal 11 | for name, param in self.backbone.named_parameters(): 12 | param.requires_grad = False 13 | 14 | self.mlp = nn.Sequential( 15 | nn.Linear(d_model, d_model // 2), 16 | nn.ReLU(), 17 | nn.Linear(d_model // 2, n_classes) 18 | ) 19 | 20 | def encode_video(self, x): 21 | B, T, C, H, W = x.shape 22 | x = x.reshape(B*T, C, H, W) 23 | v = self.backbone(x).reshape(B, T, -1) 24 | v = self.temporal(v) 25 | v = v[:, 0] 26 | return v 27 | 28 | def forward(self, x): 29 | v = self.encode_video(x) 30 | out = self.mlp(v) 31 | return out 32 | -------------------------------------------------------------------------------- /architecture/transformer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn, einsum 3 | from einops import rearrange, repeat 4 | import math 5 | 6 | 7 | class TemporalTransformer(nn.Module): 8 | def __init__(self, input_dim, depth, heads, mlp_dim, dim_head): 9 | super().__init__() 10 | dropout = 0.0 11 | self.input_dim = input_dim 12 | self.cls_token = nn.Parameter(torch.randn(1, 1, input_dim)) 13 | self.pos_embedding = nn.Parameter(torch.randn(1, 1024, input_dim)) 14 | self.temporal_transformer = Transformer(input_dim, depth, heads, dim_head, mlp_dim, dropout) 15 | 16 | def forward(self, x): 17 | b, n, _ = x.shape 18 | cls_tokens = self.cls_token.expand(b, -1, -1) 19 | x = torch.cat((cls_tokens, x), dim=1) 20 | x = x + self.pos_embedding[:, :(n+1)] 21 | x = self.temporal_transformer(x) 22 | return x 23 | 24 | 25 | class GELU(nn.Module): 26 | def forward(self, x): 27 | return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) 28 | 29 | 30 | class Residual(nn.Module): 31 | def __init__(self, fn): 32 | super().__init__() 33 | self.fn = fn 34 | 35 | def forward(self, x, **kwargs): 36 | return self.fn(x, **kwargs) + x 37 | 38 | 39 | class PreNorm(nn.Module): 40 | def __init__(self, dim, fn): 41 | super().__init__() 42 | self.norm = nn.LayerNorm(dim) 43 | self.fn = fn 44 | 45 | def forward(self, x, **kwargs): 46 | return self.fn(self.norm(x), **kwargs) 47 | 48 | 49 | class FeedForward(nn.Module): 50 | def __init__(self, dim, hidden_dim, dropout=0.): 51 | super().__init__() 52 | self.net = nn.Sequential( 53 | nn.Linear(dim, hidden_dim), 54 | GELU(), 55 | nn.Dropout(dropout), 56 | nn.Linear(hidden_dim, dim), 57 | nn.Dropout(dropout) 58 | ) 59 | 60 | def forward(self, x): 61 | return self.net(x) 62 | 63 | 64 | class Attention(nn.Module): 65 | def __init__(self, dim, heads=8, dim_head=64, dropout=0.): 66 | super().__init__() 67 | inner_dim = dim_head * heads 68 | project_out = not (heads == 1 and dim_head == dim) 69 | self.heads = heads 70 | self.scale = dim_head ** -0.5 71 | self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False) 72 | self.to_out = nn.Sequential(nn.Linear(inner_dim, dim), 73 | nn.Dropout(dropout)) if project_out else nn.Identity() 74 | 75 | def forward(self, x): 76 | b, n, _, h = *x.shape, self.heads 77 | qkv = self.to_qkv(x).chunk(3, dim=-1) 78 | q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), qkv) 79 | dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale 80 | attn = dots.softmax(dim=-1) 81 | out = einsum('b h i j, b h j d -> b h i d', attn, v) 82 | out = rearrange(out, 'b h n d -> b n (h d)') 83 | out = self.to_out(out) 84 | return out 85 | 86 | 87 | class Transformer(nn.Module): 88 | def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout): 89 | super().__init__() 90 | self.layers = nn.ModuleList([]) 91 | for _ in range(depth): 92 | self.layers.append(nn.ModuleList([Residual(PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout))), 93 | Residual(PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout)))])) 94 | 95 | def forward(self, x): 96 | for attn, ff in self.layers: 97 | x = attn(x) 98 | x = ff(x) 99 | return x 100 | -------------------------------------------------------------------------------- /architecture/video_clip.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.cuda.amp import custom_fwd 4 | 5 | from .transformer import TemporalTransformer 6 | 7 | import clip 8 | 9 | device = "cuda" if torch.cuda.is_available() else "cpu" 10 | 11 | 12 | class VClip(nn.Module): 13 | def __init__( 14 | self, 15 | d_model: int = 512, 16 | nhead: int = 8, 17 | num_layers: int = 4, 18 | dim_forward: int = 2048 19 | ): 20 | super().__init__() 21 | self.d_model = d_model 22 | self.nhead = nhead 23 | self.num_layers = num_layers 24 | self.dim_forward = dim_forward 25 | 26 | model, _ = clip.load("ViT-B/32", device=device, jit=False) 27 | for name, param in model.named_parameters(): 28 | param.requires_grad = False 29 | self.backbone = model 30 | 31 | self.temporal = TemporalTransformer( 32 | input_dim=d_model, 33 | depth=num_layers, 34 | heads=nhead, 35 | mlp_dim=d_model, 36 | dim_head=dim_forward 37 | ) 38 | self.logit_scale = nn.Parameter(self.backbone.logit_scale.clone().detach()) 39 | self.logit_scale.requires_grad = True 40 | 41 | @custom_fwd 42 | def forward(self, x, text): 43 | image_features = self.encode_video(x) 44 | text_features = self.encode_text(text) 45 | 46 | image_features = image_features / image_features.norm(dim=1, keepdim=True) 47 | text_features = text_features / text_features.norm(dim=1, keepdim=True) 48 | 49 | # cosine similarity as logits 50 | logit_scale = self.logit_scale.exp() 51 | logits_per_image = logit_scale * image_features @ text_features.t() 52 | logits_per_text = logits_per_image.t() 53 | 54 | return logits_per_image, logits_per_text 55 | 56 | def encode_video(self, x): 57 | B, T, C, H, W = x.shape 58 | x = x.reshape(B*T, C, H, W) 59 | v = self.backbone.encode_image(x).reshape(B, T, -1) 60 | v = self.temporal(v) 61 | 62 | v = v[:, 0] 63 | return v 64 | 65 | def encode_text(self, text): 66 | encoded_text = self.backbone.encode_text(text) 67 | return encoded_text 68 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from datetime import datetime 3 | import argparse 4 | 5 | import torch 6 | 7 | from DataLoaders.utils import col_index 8 | 9 | def get_config(sysv): 10 | parser = argparse.ArgumentParser(description='Training variables.') 11 | 12 | parser.add_argument('--model_basename', default='baseline') 13 | parser.add_argument('--local_rank', type=int, default=-1, metavar='N', help='Local process rank') 14 | parser.add_argument('--exp_name', default=datetime.now().strftime("%Y_%m_%d-%H%M%S")) 15 | parser.add_argument('--fold', default="1") 16 | parser.add_argument('--emo', type=int, default=0, help='Index of emotion in loo experiments') 17 | 18 | parser.add_argument('--pretrained', default=None, help="Path to pretrained weights") 19 | parser.add_argument('--fromcheckpoint', default=None, help="Path to pretrained weights") 20 | parser.add_argument('--finetune', action='store_true', help="finetune CLIP") 21 | parser.set_defaults(finetune=False) 22 | parser.add_argument('--text', action='store_true', help="finetune CLIP text encoder") 23 | parser.set_defaults(text=False) 24 | parser.add_argument('--visual', action='store_true', help="finetune CLIP visual encoder") 25 | parser.set_defaults(visual=False) 26 | 27 | parser.add_argument('--resample', action='store_true', help="Resample minority classes") 28 | parser.set_defaults(resample=False) 29 | 30 | parser.add_argument('--DDP', action='store_false', help="Use Distributed Data Parallel") 31 | parser.set_defaults(noDDP=True) 32 | 33 | parser.add_argument('--log_dir', default='logs') 34 | parser.add_argument('--batch_size', type=int, default=torch.cuda.device_count()*16) 35 | parser.add_argument('--num_epochs', type=int, default=50) 36 | parser.add_argument('--optim', type=str, default='SGD') 37 | parser.add_argument('--lr', type=float, default=0.001) 38 | parser.add_argument('--wd', type=float, default=0.0005, help='weight decay') 39 | 40 | parser.add_argument('--dataset_root', help="path to dataset") 41 | parser.add_argument('--dataset_name', default='MAFW') 42 | 43 | # input video options 44 | parser.add_argument('--input_shape', nargs='+', default=224) 45 | parser.add_argument('--downsample', type=int, default=2) 46 | parser.add_argument('--clip_len', type=int, default=32) 47 | 48 | parser.add_argument('--debug', action='store_true') 49 | parser.set_defaults(debug=False) 50 | 51 | args, _ = parser.parse_known_args(sysv) 52 | return args 53 | -------------------------------------------------------------------------------- /eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from sklearn import metrics 5 | 6 | import torch 7 | import torch.distributed as dist 8 | from torch.nn.parallel import DistributedDataParallel as DDP 9 | from torch import nn 10 | from torch.cuda.amp import autocast 11 | 12 | import clip 13 | 14 | from config import get_config 15 | from DataLoaders import * 16 | from architecture import VClip 17 | 18 | torch.backends.cudnn.enabled = False 19 | 20 | device = "cuda" if torch.cuda.is_available() else "cpu" 21 | 22 | cnf = get_config(sys.argv) 23 | 24 | 25 | @torch.no_grad() 26 | def evaluate(loader, model): 27 | model.eval() 28 | class_tokens = clip.tokenize(CLASS_DESCRIPTION, context_length=77, truncate=True) 29 | with autocast(): 30 | war = 0 31 | all_labels = torch.zeros(len(loader.sampler)).to(device) 32 | all_predictions = torch.zeros(len(loader.sampler)).to(device) 33 | all_labels_lst = [all_labels.clone().detach() for i in range(int(os.environ['WORLD_SIZE']))] 34 | all_predictions_lst = [all_predictions.clone().detach() for i in range(int(os.environ['WORLD_SIZE']))] 35 | 36 | for batch_idx, (inputs, labels, _) in enumerate(loader): 37 | inputs, class_tokens, labels = inputs.to(device), class_tokens.to(device), labels.to(device) 38 | logits_per_image, logits_per_text = model(inputs, class_tokens) 39 | 40 | predicted = logits_per_image.softmax(dim=-1).argmax(dim=-1, keepdim=True) 41 | war += predicted.eq(labels.view_as(predicted)).sum().item() 42 | 43 | start_idx = loader.batch_size * batch_idx 44 | end_idx = start_idx + loader.batch_size 45 | end_idx = end_idx if end_idx <= all_labels.shape[0] else all_labels.shape[0] 46 | 47 | all_labels[start_idx:end_idx] = labels.reshape(-1) 48 | all_predictions[start_idx:end_idx] = predicted.reshape(-1) 49 | 50 | dist.all_gather(all_labels_lst, all_labels) 51 | dist.all_gather(all_predictions_lst, all_predictions) 52 | return torch.tensor(war, device=device, dtype=torch.float), (torch.cat(all_labels_lst).cpu().numpy(), torch.cat(all_predictions_lst).cpu().numpy()) 53 | 54 | 55 | if __name__ == "__main__": 56 | # region ddp 57 | torch.cuda.set_device(cnf.local_rank) 58 | cnf.is_master = cnf.local_rank == 0 59 | cnf.device = torch.cuda.device(cnf.local_rank) 60 | cnf.world_size = int(os.environ['WORLD_SIZE']) 61 | dist.init_process_group(backend='nccl') 62 | # endregion 63 | 64 | # region set_up 65 | if cnf.is_master: 66 | cnf_dict = vars(cnf) 67 | # endregion 68 | 69 | model = VClip(num_layers=2) 70 | 71 | if cnf.pretrained: 72 | state_pth = os.path.join(cnf.pretrained) 73 | state_dict = torch.load(state_pth) 74 | new_dict = dict() 75 | for key in state_dict: 76 | new_key = key.replace('module.', '') 77 | new_dict[new_key] = state_dict[key] 78 | keys = model.load_state_dict(new_dict, strict=False) 79 | print(keys) 80 | 81 | model = model.cuda() 82 | model = nn.SyncBatchNorm.convert_sync_batchnorm(model) 83 | model = DDP( 84 | model, 85 | device_ids=[cnf.local_rank], 86 | output_device=cnf.local_rank 87 | ) 88 | _, test_loader = get_loaders(cnf) 89 | war, cm = evaluate(loader=test_loader, model=model) 90 | dist.all_reduce(war) 91 | war /= len(test_loader.dataset) 92 | if cnf.is_master: 93 | gt, pd = cm[0], cm[1] 94 | uar = metrics.confusion_matrix(gt, pd, normalize="true").diagonal().mean() 95 | print('WAR: {}'.format(war*100)) 96 | print('UAR: {}'.format(uar*100)) 97 | dist.destroy_process_group() 98 | -------------------------------------------------------------------------------- /experiments/AFEW_loo_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=18:0:0 3 | #$ -l h_vmem=7G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l cluster=andrena 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=loo 16 | log_dir='logs/' 17 | batch_size=64 18 | num_epochs=30 19 | lr=0.001 20 | dataset_root="/datasets/AFEW" 21 | dataset_name=AFEW 22 | downsample=4 23 | clip_len=32 24 | optim="SGD" 25 | 26 | exp_name=$(date "+%Y-%m-%d") 27 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 28 | #torch.distributed parameters 29 | num_gpus=2 30 | 31 | for j in {0..6} 32 | do 33 | WANDB__SERVICE_WAIT=300 python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 train_loo.py \ 34 | --log_dir $log_dir \ 35 | --batch_size $batch_size \ 36 | --dataset_root $dataset_root \ 37 | --dataset_name $dataset_name \ 38 | --model_basename $model_basename\ 39 | --num_epochs $num_epochs \ 40 | --optim $optim \ 41 | --lr $lr \ 42 | --downsample $downsample \ 43 | --exp_name $exp_name \ 44 | --clip_len $clip_len \ 45 | --emo $j 46 | done -------------------------------------------------------------------------------- /experiments/AFEW_trainsupervised_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=24:0:0 3 | #$ -l h_vmem=7.5G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -cwd 7 | #$ -j y 8 | #$ -m be 9 | 10 | module load miniconda/4.12.0 11 | conda activate venv 12 | 13 | #model parameters 14 | model_basename=supervised 15 | log_dir='logs/' 16 | batch_size=64 17 | num_epochs=50 18 | lr=0.001 19 | dataset_root="/datasets/AFEW" 20 | dataset_name=AFEW 21 | downsample=4 22 | clip_len=32 23 | optim="SGD" 24 | 25 | exp_name=$(date "+%Y-%m-%d") 26 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 27 | #torch.distributed parameters 28 | num_gpus=2 29 | 30 | python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 train_supervised.py \ 31 | --log_dir $log_dir \ 32 | --batch_size $batch_size \ 33 | --dataset_root $dataset_root \ 34 | --dataset_name $dataset_name \ 35 | --model_basename $model_basename\ 36 | --num_epochs $num_epochs \ 37 | --optim $optim \ 38 | --lr $lr \ 39 | --downsample $downsample \ 40 | --exp_name $exp_name \ 41 | --clip_len $clip_len -------------------------------------------------------------------------------- /experiments/AFEW_zeroshot_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=1:0:0 3 | #$ -l h_vmem=7G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l cluster=andrena 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=zeroshot 16 | log_dir='logs/' 17 | batch_size=64 18 | dataset_root="/datasets/AFEW" 19 | dataset_name=AFEW 20 | downsample=4 21 | clip_len=32 22 | pretrained='logs/checkpoints/2023-03-08/models/4.pth' 23 | 24 | exp_name=$(date "+%Y-%m-%d") 25 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 26 | #torch.distributed parameters 27 | num_gpus=2 28 | 29 | WANDB__SERVICE_WAIT=300 python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 eval.py \ 30 | --log_dir $log_dir \ 31 | --batch_size $batch_size \ 32 | --dataset_root $dataset_root \ 33 | --dataset_name $dataset_name \ 34 | --model_basename $model_basename\ 35 | --num_epochs $num_epochs \ 36 | --optim $optim \ 37 | --lr $lr \ 38 | --downsample $downsample \ 39 | --exp_name $exp_name \ 40 | --clip_len $clip_len \ 41 | --pretrained $pretrained 42 | -------------------------------------------------------------------------------- /experiments/DFEW_loo_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=48:0:0 3 | #$ -l h_vmem=7.5G 4 | #$ -pe smp 24 5 | #$ -l gpu=3 6 | #$ -l gpu_type=ampere 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=loo 16 | log_dir='logs/' 17 | batch_size=128 18 | num_epochs=30 19 | lr=0.001 20 | dataset_root="/datasets/DFEW" 21 | dataset_name=DFEW 22 | downsample=4 23 | clip_len=32 24 | optim="SGD" 25 | 26 | #exp_name=$(date "+%Y-%m-%d") 27 | exp_name="2023-05-05" 28 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 29 | #torch.distributed parameters 30 | num_gpus=3 31 | 32 | for j in {1..6} 33 | do 34 | for i in {1..5} 35 | do 36 | python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 train_loo.py \ 37 | --fold $i \ 38 | --log_dir $log_dir \ 39 | --batch_size $batch_size \ 40 | --dataset_root $dataset_root \ 41 | --dataset_name $dataset_name \ 42 | --model_basename $model_basename\ 43 | --num_epochs $num_epochs \ 44 | --optim $optim \ 45 | --lr $lr \ 46 | --downsample $downsample \ 47 | --exp_name $exp_name \ 48 | --clip_len $clip_len \ 49 | --emo $j 50 | 51 | done 52 | done 53 | -------------------------------------------------------------------------------- /experiments/DFEW_trainsupervised_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=15:0:0 3 | #$ -l h_vmem=7.5G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l gpu_type=ampere 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=supervised 16 | log_dir='logs/' 17 | batch_size=128 18 | num_epochs=50 19 | lr=0.001 20 | dataset_root="/datasets/DFEW" 21 | dataset_name=DFEW 22 | downsample=4 23 | clip_len=32 24 | optim="SGD" 25 | 26 | exp_name=$(date "+%Y-%m-%d") 27 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 28 | #torch.distributed parameters 29 | num_gpus=2 30 | 31 | for i in {1..5} 32 | do 33 | python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 train_supervised.py \ 34 | --fold $i \ 35 | --log_dir $log_dir \ 36 | --batch_size $batch_size \ 37 | --dataset_root $dataset_root \ 38 | --dataset_name $dataset_name \ 39 | --model_basename $model_basename\ 40 | --num_epochs $num_epochs \ 41 | --optim $optim \ 42 | --lr $lr \ 43 | --downsample $downsample \ 44 | --exp_name $exp_name \ 45 | --clip_len $clip_len 46 | 47 | done 48 | -------------------------------------------------------------------------------- /experiments/DFEW_zeroshot_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=3:0:0 3 | #$ -l h_vmem=7.5G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l gpu_type=ampere 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=zeroshot 16 | log_dir='logs/' 17 | batch_size=128 18 | dataset_root="/datasets/DFEW" 19 | dataset_name=DFEW 20 | downsample=4 21 | clip_len=32 22 | pretrained='logs/checkpoints/2023-03-08/models/4.pth' 23 | 24 | exp_name=$(date "+%Y-%m-%d") 25 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 26 | #torch.distributed parameters 27 | num_gpus=2 28 | 29 | for i in {1..5} 30 | do 31 | python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 eval.py \ 32 | --fold $i \ 33 | --log_dir $log_dir \ 34 | --batch_size $batch_size \ 35 | --dataset_root $dataset_root \ 36 | --dataset_name $dataset_name \ 37 | --model_basename $model_basename\ 38 | --num_epochs $num_epochs \ 39 | --optim $optim \ 40 | --lr $lr \ 41 | --downsample $downsample \ 42 | --exp_name $exp_name \ 43 | --clip_len $clip_len \ 44 | --pretrained $pretrained 45 | done 46 | -------------------------------------------------------------------------------- /experiments/FERV39K_finetune_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=30:0:0 3 | #$ -l h_vmem=7.5G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l gpu_type=ampere 7 | #$ -l cluster=andrena 8 | #$ -cwd 9 | #$ -j y 10 | #$ -m be 11 | 12 | module load miniconda/4.12.0 13 | conda activate venv 14 | 15 | #model parameters 16 | model_basename=finevisual 17 | log_dir='logs/' 18 | batch_size=32 19 | num_epochs=30 20 | lr=0.001 21 | dataset_root="/datasets/FERV39K" 22 | dataset_name=FERV39K 23 | downsample=4 24 | clip_len=32 25 | optim="SGD" 26 | 27 | exp_name=$(date "+%Y-%m-%d") 28 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 29 | #torch.distributed parameters 30 | num_gpus=2 31 | 32 | python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 train.py \ 33 | --log_dir $log_dir \ 34 | --batch_size $batch_size \ 35 | --dataset_root $dataset_root \ 36 | --dataset_name $dataset_name \ 37 | --model_basename $model_basename\ 38 | --num_epochs $num_epochs \ 39 | --optim $optim \ 40 | --lr $lr \ 41 | --downsample $downsample \ 42 | --exp_name $exp_name \ 43 | --clip_len $clip_len \ 44 | --finetune \ 45 | --visual 46 | 47 | -------------------------------------------------------------------------------- /experiments/FERV39K_loo_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=48:0:0 3 | #$ -l h_vmem=7.5G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l gpu_type=ampere 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=loo 16 | log_dir='logs/' 17 | batch_size=128 18 | num_epochs=15 19 | lr=0.001 20 | dataset_root="datasets/FERV39K" 21 | dataset_name=FERV39K 22 | downsample=4 23 | clip_len=32 24 | optim="SGD" 25 | 26 | exp_name=$(date "+%Y-%m-%d") 27 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 28 | #torch.distributed parameters 29 | num_gpus=2 30 | 31 | for j in {0..6} 32 | do 33 | python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 \ 34 | train_loo.py \ 35 | --log_dir $log_dir \ 36 | --batch_size $batch_size \ 37 | --dataset_root $dataset_root \ 38 | --dataset_name $dataset_name \ 39 | --model_basename $model_basename\ 40 | --num_epochs $num_epochs \ 41 | --optim $optim \ 42 | --lr $lr \ 43 | --downsample $downsample \ 44 | --exp_name $exp_name \ 45 | --clip_len $clip_len\ 46 | --emo $j 47 | done -------------------------------------------------------------------------------- /experiments/FERV39K_train_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=18:0:0 3 | #$ -l h_vmem=7.5G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l gpu_type=ampere 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=cb_loss 16 | log_dir='logs/' 17 | batch_size=128 18 | num_epochs=50 19 | lr=0.001 20 | dataset_root="/datasets/FERV39K" 21 | dataset_name=FERV39K 22 | downsample=4 23 | clip_len=32 24 | optim="SGD" 25 | 26 | exp_name=$(date "+%Y-%m-%d") 27 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 28 | #torch.distributed parameters 29 | num_gpus=2 30 | 31 | python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 train.py \ 32 | --log_dir $log_dir \ 33 | --batch_size $batch_size \ 34 | --dataset_root $dataset_root \ 35 | --dataset_name $dataset_name \ 36 | --model_basename $model_basename\ 37 | --num_epochs $num_epochs \ 38 | --optim $optim \ 39 | --lr $lr \ 40 | --downsample $downsample \ 41 | --exp_name $exp_name \ 42 | --clip_len $clip_len \ 43 | --cb 44 | -------------------------------------------------------------------------------- /experiments/FERV39K_trainsupervised_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=24:0:0 3 | #$ -l h_vmem=7.5G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l gpu_type=ampere 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=supervised 16 | log_dir='logs/' 17 | batch_size=128 18 | num_epochs=50 19 | lr=0.001 20 | dataset_root="datasets/FERV39K" 21 | dataset_name=FERV39K 22 | downsample=4 23 | clip_len=32 24 | optim="SGD" 25 | 26 | exp_name=$(date "+%Y-%m-%d") 27 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 28 | #torch.distributed parameters 29 | num_gpus=2 30 | 31 | python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 train_supervised.py \ 32 | --log_dir $log_dir \ 33 | --batch_size $batch_size \ 34 | --dataset_root $dataset_root \ 35 | --dataset_name $dataset_name \ 36 | --model_basename $model_basename\ 37 | --num_epochs $num_epochs \ 38 | --optim $optim \ 39 | --lr $lr \ 40 | --downsample $downsample \ 41 | --exp_name $exp_name \ 42 | --clip_len $clip_len -------------------------------------------------------------------------------- /experiments/FERV39K_zeroshot_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=4:0:0 3 | #$ -l h_vmem=7G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l cluster=andrena 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=zeroshot 16 | log_dir='logs/' 17 | batch_size=128 18 | dataset_root="/datasets/FERV39K" 19 | dataset_name=FERV39K 20 | downsample=4 21 | clip_len=32 22 | pretrained='logs/checkpoints/2023-03-08/models/4.pth' 23 | 24 | exp_name=$(date "+%Y-%m-%d") 25 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 26 | #torch.distributed parameters 27 | num_gpus=2 28 | 29 | WANDB__SERVICE_WAIT=300 python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 eval.py \ 30 | --log_dir $log_dir \ 31 | --batch_size $batch_size \ 32 | --dataset_root $dataset_root \ 33 | --dataset_name $dataset_name \ 34 | --model_basename $model_basename\ 35 | --num_epochs $num_epochs \ 36 | --optim $optim \ 37 | --lr $lr \ 38 | --downsample $downsample \ 39 | --exp_name $exp_name \ 40 | --clip_len $clip_len \ 41 | --pretrained $pretrained 42 | -------------------------------------------------------------------------------- /experiments/MAFW_finetune_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=24:0:0 3 | #$ -l h_vmem=7.5G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l gpu_type=ampere 7 | #$ -l cluster=andrena # use the Andrena nodes 8 | #$ -cwd 9 | #$ -j y 10 | #$ -m be 11 | 12 | module load miniconda/4.12.0 13 | conda activate venv 14 | 15 | #model parameters 16 | model_basename=finetune 17 | log_dir='logs/' 18 | batch_size=128 19 | num_epochs=30 20 | lr=0.001 21 | dataset_root="/datasets/MAFW" 22 | dataset_name=MAFW 23 | downsample=4 24 | clip_len=32 25 | optim="SGD" 26 | 27 | exp_name=$(date "+%Y-%m-%d") 28 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 29 | #torch.distributed parameters 30 | num_gpus=2 31 | 32 | for i in {1..5} 33 | do 34 | #echo $i 35 | python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 \ 36 | train.py \ 37 | --log_dir $log_dir \ 38 | --batch_size $batch_size \ 39 | --dataset_root $dataset_root \ 40 | --dataset_name $dataset_name \ 41 | --model_basename $model_basename\ 42 | --num_epochs $num_epochs \ 43 | --optim $optim \ 44 | --lr $lr \ 45 | --downsample $downsample \ 46 | --exp_name $exp_name \ 47 | --fold $i \ 48 | --clip_len $clip_len \ 49 | --finetune \ 50 | --text \ 51 | --visual 52 | done 53 | -------------------------------------------------------------------------------- /experiments/MAFW_loo_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=48:0:0 3 | #$ -l h_vmem=7G 4 | #$ -pe smp 24 5 | #$ -l gpu=3 6 | #$ -l gpu_type=ampere 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=loo 16 | log_dir='logs/' 17 | batch_size=32 18 | num_epochs=25 19 | lr=0.001 20 | dataset_root="/datasets/MAFW" 21 | dataset_name=MAFW 22 | downsample=4 23 | clip_len=32 24 | optim="SGD" 25 | 26 | #exp_name=$(date "+%Y-%m-%d") 27 | exp_name="2023-05-06" 28 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 29 | #torch.distributed parameters 30 | num_gpus=3 31 | 32 | for j in {5..10} 33 | do 34 | for i in {1..5} 35 | do 36 | #echo $i 37 | WANDB__SERVICE_WAIT=300 python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 \ 38 | train_loo.py \ 39 | --log_dir $log_dir \ 40 | --batch_size $batch_size \ 41 | --dataset_root $dataset_root \ 42 | --dataset_name $dataset_name \ 43 | --model_basename $model_basename\ 44 | --num_epochs $num_epochs \ 45 | --optim $optim \ 46 | --lr $lr \ 47 | --downsample $downsample \ 48 | --exp_name $exp_name \ 49 | --fold $i \ 50 | --clip_len $clip_len \ 51 | --pretrained $pretrained\ 52 | --emo $j 53 | done 54 | done 55 | -------------------------------------------------------------------------------- /experiments/MAFW_supervised_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=30:0:0 3 | #$ -l h_vmem=7G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l gpu_type=ampere 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=supervised 16 | log_dir='logs/' 17 | batch_size=32 18 | num_epochs=30 19 | lr=0.001 20 | dataset_root="datasets/MAFW" 21 | dataset_name=MAFW 22 | downsample=4 23 | clip_len=32 24 | optim="SGD" 25 | 26 | exp_name="2023-04-12" 27 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 28 | #torch.distributed parameters 29 | num_gpus=2 30 | 31 | for i in {3..5} 32 | do 33 | #echo $i 34 | WANDB__SERVICE_WAIT=300 python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 \ 35 | train_downstream.py \ 36 | --log_dir $log_dir \ 37 | --batch_size $batch_size \ 38 | --dataset_root $dataset_root \ 39 | --dataset_name $dataset_name \ 40 | --model_basename $model_basename\ 41 | --num_epochs $num_epochs \ 42 | --optim $optim \ 43 | --lr $lr \ 44 | --downsample $downsample \ 45 | --exp_name $exp_name \ 46 | --fold $i \ 47 | --clip_len $clip_len \ 48 | --pretrained $pretrained 49 | done 50 | -------------------------------------------------------------------------------- /experiments/MAFW_train_hpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #$ -l h_rt=24:0:0 3 | #$ -l h_vmem=7.5G 4 | #$ -pe smp 16 5 | #$ -l gpu=2 6 | #$ -l gpu_type=ampere 7 | #$ -cwd 8 | #$ -j y 9 | #$ -m be 10 | 11 | module load miniconda/4.12.0 12 | conda activate venv 13 | 14 | #model parameters 15 | model_basename=visualfinetune 16 | log_dir='logs/' 17 | batch_size=64 18 | num_epochs=30 19 | lr=0.001 20 | dataset_root="/datasets/MAFW" 21 | dataset_name=MAFW 22 | downsample=2 23 | clip_len=32 24 | optim="SGD" 25 | 26 | exp_name=$(date "+%Y-%m-%d") 27 | exp_name="${exp_name}_${model_basename}_${dataset_name}" 28 | #torch.distributed parameters 29 | num_gpus=2 30 | 31 | for i in {1..5} 32 | do 33 | #echo $i 34 | python -m torch.distributed.launch --nproc_per_node=$num_gpus --nnodes=1 --node_rank 0 \ 35 | train.py \ 36 | --log_dir $log_dir \ 37 | --batch_size $batch_size \ 38 | --dataset_root $dataset_root \ 39 | --dataset_name $dataset_name \ 40 | --model_basename $model_basename\ 41 | --num_epochs $num_epochs \ 42 | --optim $optim \ 43 | --lr $lr \ 44 | --downsample $downsample \ 45 | --exp_name $exp_name \ 46 | --fold $i \ 47 | --clip_len $clip_len 48 | done 49 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | anyio==3.6.2 2 | appdirs==1.4.4 3 | argon2-cffi==21.3.0 4 | argon2-cffi-bindings==21.2.0 5 | arrow==1.2.3 6 | asttokens==2.2.1 7 | attrs==22.2.0 8 | av==10.0.0 9 | backcall==0.2.0 10 | beautifulsoup4==4.11.1 11 | bleach==5.0.1 12 | certifi==2022.12.7 13 | cffi==1.15.1 14 | charset-normalizer==3.0.1 15 | click==8.1.3 16 | clip==1.0 17 | comm==0.1.2 18 | contourpy==1.0.7 19 | cycler==0.11.0 20 | debugpy==1.6.5 21 | decorator==5.1.1 22 | defusedxml==0.7.1 23 | docker-pycreds==0.4.0 24 | entrypoints==0.4 25 | executing==1.2.0 26 | fastjsonschema==2.16.2 27 | filelock==3.9.0 28 | fonttools==4.38.0 29 | fqdn==1.5.1 30 | ftfy==6.1.1 31 | gitdb==4.0.10 32 | GitPython==3.1.30 33 | huggingface-hub==0.12.0 34 | idna==3.4 35 | imbalanced-learn==0.10.1 36 | imblearn==0.0 37 | importlib-metadata==6.0.0 38 | ipykernel==6.20.2 39 | ipython==8.8.0 40 | ipython-genutils==0.2.0 41 | ipywidgets==8.0.4 42 | isoduration==20.11.0 43 | jedi==0.18.2 44 | Jinja2==3.1.2 45 | joblib==1.2.0 46 | jsonpointer==2.3 47 | jsonschema==4.17.3 48 | jupyter==1.0.0 49 | jupyter-console==6.4.4 50 | jupyter-events==0.6.3 51 | jupyter_client==7.4.9 52 | jupyter_core==5.1.3 53 | jupyter_server==2.1.0 54 | jupyter_server_terminals==0.4.4 55 | jupyterlab-pygments==0.2.2 56 | jupyterlab-widgets==3.0.5 57 | kiwisolver==1.4.4 58 | MarkupSafe==2.1.2 59 | matplotlib==3.6.3 60 | matplotlib-inline==0.1.6 61 | mistune==2.0.4 62 | nbclassic==0.4.8 63 | nbclient==0.7.2 64 | nbconvert==7.2.8 65 | nbformat==5.7.3 66 | nest-asyncio==1.5.6 67 | notebook==6.5.2 68 | notebook_shim==0.2.2 69 | numpy==1.24.1 70 | nvidia-cublas-cu11==11.10.3.66 71 | nvidia-cuda-nvrtc-cu11==11.7.99 72 | nvidia-cuda-runtime-cu11==11.7.99 73 | nvidia-cudnn-cu11==8.5.0.96 74 | packaging==23.0 75 | pandas==1.5.3 76 | pandocfilters==1.5.0 77 | parso==0.8.3 78 | pathtools==0.1.2 79 | pexpect==4.8.0 80 | pickleshare==0.7.5 81 | Pillow==9.4.0 82 | platformdirs==2.6.2 83 | prometheus-client==0.15.0 84 | prompt-toolkit==3.0.36 85 | protobuf==4.21.12 86 | psutil==5.9.4 87 | ptyprocess==0.7.0 88 | pure-eval==0.2.2 89 | pycparser==2.21 90 | Pygments==2.14.0 91 | pyparsing==3.0.9 92 | pyrsistent==0.19.3 93 | python-dateutil==2.8.2 94 | python-json-logger==2.0.4 95 | pytz==2022.7.1 96 | PyYAML==6.0 97 | pyzmq==25.0.0 98 | qtconsole==5.4.0 99 | QtPy==2.3.0 100 | regex==2022.10.31 101 | requests==2.28.2 102 | rfc3339-validator==0.1.4 103 | rfc3986-validator==0.1.1 104 | scikit-learn==1.2.0 105 | scipy==1.10.0 106 | seaborn==0.12.2 107 | Send2Trash==1.8.0 108 | sentry-sdk==1.14.0 109 | setproctitle==1.3.2 110 | six==1.16.0 111 | smmap==5.0.0 112 | sniffio==1.3.0 113 | soupsieve==2.3.2.post1 114 | stack-data==0.6.2 115 | terminado==0.17.1 116 | threadpoolctl==3.1.0 117 | tinycss2==1.2.1 118 | tokenizers==0.13.2 119 | torch==1.13.1 120 | torchvision==0.14.1 121 | tornado==6.2 122 | tqdm==4.64.1 123 | traitlets==5.8.1 124 | transformers==4.26.0 125 | typing_extensions==4.4.0 126 | uri-template==1.2.0 127 | urllib3==1.26.14 128 | wandb==0.13.9 129 | wcwidth==0.2.6 130 | webcolors==1.12 131 | webencodings==0.5.1 132 | websocket-client==1.4.2 133 | widgetsnbextension==4.0.5 134 | zipp==3.11.0 135 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from sklearn import metrics 4 | 5 | import torch 6 | import torch.distributed as dist 7 | from torch.nn.parallel import DistributedDataParallel as DDP 8 | from torch import nn 9 | from torch.cuda.amp import autocast 10 | from torch.cuda.amp import GradScaler 11 | from torch import optim 12 | from torch.optim import lr_scheduler 13 | 14 | import clip 15 | 16 | import wandb 17 | 18 | from config import get_config 19 | from DataLoaders import * 20 | from architecture import VClip 21 | 22 | torch.backends.cudnn.enabled = False 23 | 24 | device = "cuda" if torch.cuda.is_available() else "cpu" 25 | 26 | cnf = get_config(sys.argv) 27 | 28 | 29 | @torch.no_grad() 30 | def evaluate(loader, model): 31 | model.eval() 32 | class_tokens = clip.tokenize(CLASS_DESCRIPTION, context_length=77, truncate=True) 33 | with autocast(): 34 | war = 0 35 | all_labels = torch.zeros(len(loader.sampler)).to(device) 36 | all_predictions = torch.zeros(len(loader.sampler)).to(device) 37 | all_labels_lst = [all_labels.clone().detach() for i in range(int(os.environ['WORLD_SIZE']))] 38 | all_predictions_lst = [all_predictions.clone().detach() for i in range(int(os.environ['WORLD_SIZE']))] 39 | 40 | for batch_idx, (inputs, labels, _) in enumerate(loader): 41 | inputs, class_tokens, labels = inputs.to(device), class_tokens.to(device), labels.to(device) 42 | logits_per_image, logits_per_text = model(inputs, class_tokens) 43 | 44 | predicted = logits_per_image.softmax(dim=-1).argmax(dim=-1, keepdim=True) 45 | war += predicted.eq(labels.view_as(predicted)).sum().item() 46 | 47 | start_idx = loader.batch_size * batch_idx 48 | end_idx = start_idx + loader.batch_size 49 | end_idx = end_idx if end_idx <= all_labels.shape[0] else all_labels.shape[0] 50 | 51 | all_labels[start_idx:end_idx] = labels.reshape(-1) 52 | all_predictions[start_idx:end_idx] = predicted.reshape(-1) 53 | 54 | dist.all_gather(all_labels_lst, all_labels) 55 | dist.all_gather(all_predictions_lst, all_predictions) 56 | return torch.tensor(war, device=device, dtype=torch.float), \ 57 | (torch.cat(all_labels_lst).cpu().numpy(), torch.cat(all_predictions_lst).cpu().numpy()) 58 | 59 | 60 | def train(loader, model, loss_criterion, optimizer): 61 | model.train() 62 | losses = torch.zeros(len(loader)).to(device) 63 | for batch_idx, (inputs, labels, descriptions) in enumerate(loader): 64 | with autocast(): 65 | optimizer.zero_grad() 66 | class_tokens = clip.tokenize(descriptions, context_length=77, truncate=True) 67 | inputs, class_tokens = inputs.to(device), class_tokens.to(device) 68 | logits_per_image, logits_per_text = model(inputs, class_tokens) 69 | ground_truth = torch.arange(len(inputs), dtype=torch.long, device=device) 70 | 71 | loss_i = loss_criterion(logits_per_image, ground_truth) 72 | loss_t = loss_criterion(logits_per_text, ground_truth) 73 | loss = (loss_i + loss_t) / 2 74 | 75 | losses[batch_idx] = loss.item() 76 | loss.backward() 77 | optimizer.step() 78 | 79 | sys.stdout.write( 80 | '\r Iter[{}/{}]\t loss: {:.2f} '.format( 81 | batch_idx + 1, 82 | len(loader), 83 | loss.item() 84 | ) 85 | ) 86 | sys.stdout.flush() 87 | 88 | return losses.mean() 89 | 90 | 91 | if __name__ == "__main__": 92 | # region ddp 93 | torch.cuda.set_device(cnf.local_rank) 94 | cnf.is_master = cnf.local_rank == 0 95 | cnf.device = torch.cuda.device(cnf.local_rank) 96 | cnf.world_size = int(os.environ['WORLD_SIZE']) 97 | dist.init_process_group(backend='nccl') 98 | # endregion 99 | 100 | # region set_up 101 | 102 | if cnf.is_master: 103 | ROOT_FOLDER = os.path.join(cnf.log_dir, 'checkpoints') 104 | EXP_FOLDER = os.path.join(ROOT_FOLDER, cnf.exp_name) 105 | MODELS_FOLDER = os.path.join(EXP_FOLDER, 'models') 106 | PREDS_FOLDER = os.path.join(EXP_FOLDER, 'preds') 107 | if not os.path.exists(MODELS_FOLDER): 108 | os.makedirs(MODELS_FOLDER, exist_ok=True) 109 | if not os.path.exists(PREDS_FOLDER): 110 | os.makedirs(PREDS_FOLDER, exist_ok=True) 111 | cnf_dict = vars(cnf) 112 | # endregion 113 | 114 | model = VClip(num_layers=2) 115 | 116 | if cnf.pretrained: 117 | state_pth = os.path.join(cnf.pretrained) 118 | state_dict = torch.load(state_pth) 119 | new_dict = dict() 120 | for key in state_dict: 121 | new_key = key.replace('module.', '') 122 | new_dict[new_key] = state_dict[key] 123 | keys = model.load_state_dict(new_dict, strict=False) 124 | print(keys) 125 | 126 | if cnf.finetune: 127 | if cnf.text: 128 | for name, param in model.backbone.transformer.named_parameters(): 129 | param.requires_grad = True 130 | if cnf.visual: 131 | for name, param in model.backbone.visual.named_parameters(): 132 | param.requires_grad = True 133 | backbone_params = model.backbone.parameters() 134 | other_params = list() 135 | for name, param in model.named_parameters(): 136 | if 'backbone' not in name: 137 | other_params.append(param) 138 | param_groups = [ 139 | {'params': other_params}, 140 | {'params': backbone_params, 'lr': cnf.lr * 0.001} 141 | ] 142 | else: 143 | param_groups = model.parameters() 144 | 145 | if cnf.optim == 'SGD': 146 | optimizer = optim.SGD( 147 | param_groups, 148 | lr=cnf.lr, 149 | momentum=0.9, 150 | weight_decay=cnf.wd 151 | ) 152 | else: 153 | optimizer = optim.Adam( 154 | param_groups, 155 | lr=cnf.lr, 156 | # betas=(0.9, 0.98), 157 | eps=1e-6, 158 | weight_decay=cnf.wd 159 | ) 160 | scaler = GradScaler() 161 | scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[15, 35, 50], gamma=0.1) 162 | # scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=cnf.num_epochs//2) 163 | loss_c = nn.CrossEntropyLoss() 164 | 165 | if cnf.is_master: 166 | wandb.init(project='ZeroShot', group=cnf.exp_name, notes='', config=cnf_dict, job_type='fold={}'.format(cnf.fold)) 167 | wandb.watch(model, log="all", log_freq=25) 168 | model = model.cuda() 169 | model = nn.SyncBatchNorm.convert_sync_batchnorm(model) 170 | model = DDP( 171 | model, 172 | device_ids=[cnf.local_rank], 173 | output_device=cnf.local_rank 174 | ) 175 | train_loader, test_loader = get_loaders(cnf) 176 | w = 0 177 | last_loss = 100 178 | for e in range(cnf.num_epochs): 179 | train_loader.sampler.set_epoch(e) 180 | test_loader.sampler.set_epoch(e) 181 | train_loss = train(loader=train_loader, model=model, loss_criterion=loss_c, optimizer=optimizer) 182 | dist.all_reduce(train_loss) 183 | train_loss /= cnf.world_size 184 | if cnf.is_master and train_loss < last_loss: # lowest loss 185 | last_loss = train_loss 186 | model_filename = os.path.join(MODELS_FOLDER, 'fold_{}_epoch_{}.pth'.format(cnf.fold, e)) 187 | torch.save(model.state_dict(), model_filename) 188 | war, cm = evaluate(loader=test_loader, model=model) 189 | dist.all_reduce(war) 190 | war /= len(test_loader.dataset) 191 | if cnf.is_master: 192 | gt, pd = cm[0], cm[1] 193 | uar = metrics.confusion_matrix(gt, pd, normalize="true").diagonal().mean() 194 | test_dict = { 195 | 'epoch': e, 196 | 'loss': train_loss, 197 | 'war': war, 198 | 'uar': uar, 199 | 'lr': scheduler.get_last_lr()[0] 200 | } 201 | wandb.log(test_dict) 202 | if war > w: 203 | # preds = get_pred(test_loader, model) 204 | w = war 205 | if cnf.is_master: 206 | model_filename = os.path.join(MODELS_FOLDER, '{}.pth'.format(cnf.fold)) 207 | torch.save(model.state_dict(), model_filename) 208 | best = wandb.Table(columns=["WAR", "UAR"], data=[[war, uar]]) 209 | wandb.log({'best_results': best}) 210 | wandb.log({"conf_mat": wandb.plot.confusion_matrix(preds=pd.reshape(-1), y_true=gt.reshape(-1), class_names=CLASSES)}) 211 | scheduler.step() 212 | dist.destroy_process_group() 213 | -------------------------------------------------------------------------------- /train_loo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from sklearn import metrics 5 | 6 | import torch 7 | import torch.distributed as dist 8 | from torch.nn.parallel import DistributedDataParallel as DDP 9 | from torch import nn 10 | from torch.cuda.amp import autocast 11 | from torch.cuda.amp import GradScaler 12 | from torch import optim 13 | from torch.optim import lr_scheduler 14 | from torch.utils import data 15 | from torch.utils.data.distributed import DistributedSampler 16 | 17 | import clip 18 | 19 | import wandb 20 | 21 | from config import get_config 22 | from DataLoaders import * 23 | from DataLoaders.utils import video_collate 24 | from architecture import VClip 25 | 26 | torch.backends.cudnn.enabled = False 27 | 28 | device = "cuda" if torch.cuda.is_available() else "cpu" 29 | 30 | cnf = get_config(sys.argv) 31 | # cnf.local_rank = int(os.environ["LOCAL_RANK"]) 32 | 33 | 34 | @torch.no_grad() 35 | def evaluate(loader, model): 36 | model.eval() 37 | class_tokens = clip.tokenize(CLASS_DESCRIPTION, context_length=77, truncate=True) 38 | with autocast(): 39 | war = 0 40 | all_labels = torch.zeros(len(loader.sampler)).to(device) 41 | all_predictions = torch.zeros(len(loader.sampler)).to(device) 42 | all_labels_lst = [all_labels.clone().detach() for i in range(int(os.environ['WORLD_SIZE']))] 43 | all_predictions_lst = [all_predictions.clone().detach() for i in range(int(os.environ['WORLD_SIZE']))] 44 | 45 | for batch_idx, (inputs, labels, _) in enumerate(loader): 46 | inputs, class_tokens, labels = inputs.to(device), class_tokens.to(device), labels.to(device) 47 | logits_per_image, logits_per_text = model(inputs, class_tokens) 48 | 49 | predicted = logits_per_image.softmax(dim=-1).argmax(dim=-1, keepdim=True) 50 | war += predicted.eq(labels.view_as(predicted)).sum().item() 51 | 52 | start_idx = loader.batch_size * batch_idx 53 | end_idx = start_idx + loader.batch_size 54 | end_idx = end_idx if end_idx <= all_labels.shape[0] else all_labels.shape[0] 55 | 56 | all_labels[start_idx:end_idx] = labels.reshape(-1) 57 | all_predictions[start_idx:end_idx] = predicted.reshape(-1) 58 | 59 | dist.all_gather(all_labels_lst, all_labels) 60 | dist.all_gather(all_predictions_lst, all_predictions) 61 | return torch.tensor(war, device=device, dtype=torch.float), (torch.cat(all_labels_lst).cpu().numpy(), torch.cat(all_predictions_lst).cpu().numpy()) 62 | 63 | 64 | def train(loader, model, loss_criterion, optimizer): 65 | model.train() 66 | losses = torch.zeros(len(loader)).to(device) 67 | for batch_idx, (inputs, labels, _) in enumerate(loader): 68 | labels = labels.reshape(-1).to(device) 69 | with autocast(): 70 | optimizer.zero_grad() 71 | descriptions = [CLASS_DESCRIPTION[i] for i in labels] 72 | class_tokens = clip.tokenize(descriptions, context_length=77, truncate=True) 73 | inputs, class_tokens = inputs.to(device), class_tokens.to(device) 74 | logits_per_image, logits_per_text = model(inputs, class_tokens) 75 | ground_truth = torch.arange(len(inputs), dtype=torch.long, device=device) 76 | 77 | loss_i = loss_criterion(logits_per_image, ground_truth) 78 | loss_t = loss_criterion(logits_per_text, ground_truth) 79 | loss = (loss_i + loss_t) / 2 80 | 81 | losses[batch_idx] = loss.item() 82 | loss.backward() 83 | optimizer.step() 84 | 85 | sys.stdout.write( 86 | '\r Iter[{}/{}]\t loss: {:.2f} '.format( 87 | batch_idx + 1, 88 | len(loader), 89 | loss.item() 90 | ) 91 | ) 92 | sys.stdout.flush() 93 | 94 | return losses.mean() 95 | 96 | 97 | if __name__ == "__main__": 98 | # region ddp 99 | torch.cuda.set_device(cnf.local_rank) 100 | cnf.is_master = cnf.local_rank == 0 101 | cnf.device = torch.cuda.device(cnf.local_rank) 102 | cnf.world_size = int(os.environ['WORLD_SIZE']) 103 | dist.init_process_group(backend='nccl') 104 | # endregion 105 | 106 | # region set_up 107 | 108 | if cnf.is_master: 109 | ROOT_FOLDER = os.path.join(cnf.log_dir, 'checkpoints') 110 | EXP_FOLDER = os.path.join(ROOT_FOLDER, cnf.exp_name) 111 | MODELS_FOLDER = os.path.join(EXP_FOLDER, 'models') 112 | PREDS_FOLDER = os.path.join(EXP_FOLDER, 'preds') 113 | if not os.path.exists(MODELS_FOLDER): 114 | os.makedirs(MODELS_FOLDER, exist_ok=True) 115 | if not os.path.exists(PREDS_FOLDER): 116 | os.makedirs(PREDS_FOLDER, exist_ok=True) 117 | cnf_dict = vars(cnf) 118 | # endregion 119 | 120 | model = VClip(num_layers=2) 121 | 122 | if cnf.pretrained: 123 | state_pth = os.path.join(cnf.pretrained, '{}.pth'.format(cnf.fold)) 124 | state_dict = torch.load(state_pth) 125 | new_dict = dict() 126 | for key in state_dict: 127 | new_key = key.replace('module.', '') 128 | new_dict[new_key] = state_dict[key] 129 | keys = model.load_state_dict(new_dict, strict=False) 130 | print(keys) 131 | 132 | if cnf.finetune: 133 | if cnf.text: 134 | for name, param in model.backbone.transformer.named_parameters(): 135 | param.requires_grad = True 136 | if cnf.visual: 137 | for name, param in model.backbone.visual.named_parameters(): 138 | param.requires_grad = True 139 | backbone_params = model.backbone.parameters() 140 | other_params = list() 141 | for name, param in model.named_parameters(): 142 | if 'backbone' not in name: 143 | other_params.append(param) 144 | param_groups = [ 145 | {'params': other_params}, 146 | {'params': backbone_params, 'lr': cnf.lr * 0.001} 147 | ] 148 | else: 149 | param_groups = model.parameters() 150 | 151 | 152 | start_state = model.state_dict() 153 | 154 | model = model.cuda() 155 | model = nn.SyncBatchNorm.convert_sync_batchnorm(model) 156 | model = DDP( 157 | model, 158 | device_ids=[cnf.local_rank], 159 | output_device=cnf.local_rank 160 | ) 161 | 162 | train_loader, test_loader = get_loaders(cnf, fold=cnf.fold) 163 | # region loo correction 164 | cls = cnf.emo 165 | cls_name = CLASSES[cls] 166 | train_data = train_loader.dataset 167 | test_data = test_loader.dataset 168 | 169 | n_train_data = list() 170 | for el in train_data.data: 171 | if isinstance(el['label'], list): 172 | if cls not in el['label']: 173 | n_train_data.append(el) 174 | else: 175 | if el['label'] != cls: 176 | n_train_data.append(el) 177 | 178 | train_data.data = n_train_data 179 | n_test_data = list() 180 | for el in test_data.data: 181 | if isinstance(el['label'], list): 182 | if cls in el['label']: 183 | n_test_data.append(el) 184 | else: 185 | if el['label'] == cls: 186 | n_test_data.append(el) 187 | 188 | test_data.data = n_test_data 189 | 190 | if cnf.DDP: 191 | train_sampler = DistributedSampler(train_data) 192 | test_sampler = DistributedSampler(test_data) 193 | else: 194 | train_sampler = None 195 | test_sampler = None 196 | 197 | trainloader = data.DataLoader( 198 | train_data, 199 | batch_size=cnf.batch_size, 200 | collate_fn=video_collate, 201 | num_workers=2, 202 | drop_last=True, 203 | sampler=train_sampler 204 | ) 205 | testloader = data.DataLoader( 206 | test_data, 207 | batch_size=cnf.batch_size, 208 | collate_fn=video_collate, 209 | num_workers=2, 210 | sampler=test_sampler 211 | ) 212 | print('Train Samples: {}, Test samples: {}'.format(len(train_data), len(test_data))) 213 | # endregion 214 | if cnf.is_master: 215 | wandb.init(project='ZeroShot', group=cnf.exp_name, notes='', config=cnf_dict, 216 | job_type='fold={},emo={}'.format(cnf.fold, cls_name)) 217 | wandb.watch(model, log="all", log_freq=25) 218 | if cnf.optim == 'SGD': 219 | optimizer = optim.SGD( 220 | param_groups, 221 | lr=cnf.lr, 222 | momentum=0.9, 223 | weight_decay=cnf.wd 224 | ) 225 | else: 226 | optimizer = optim.Adam( 227 | param_groups, 228 | lr=cnf.lr, 229 | # betas=(0.9, 0.98), 230 | eps=1e-6, 231 | weight_decay=cnf.wd 232 | ) 233 | scaler = GradScaler() 234 | scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[25, 50], gamma=0.1) 235 | loss_c = nn.CrossEntropyLoss() 236 | w = 0 237 | for e in range(cnf.num_epochs): 238 | trainloader.sampler.set_epoch(e) 239 | testloader.sampler.set_epoch(e) 240 | train_loss = train(loader=trainloader, model=model, loss_criterion=loss_c, optimizer=optimizer) 241 | dist.all_reduce(train_loss) 242 | train_loss /= cnf.world_size 243 | war, cm = evaluate(loader=testloader, model=model) 244 | dist.all_reduce(war) 245 | war /= len(testloader.dataset) 246 | if cnf.is_master: 247 | gt, pd = cm[0], cm[1] 248 | uar = metrics.confusion_matrix(gt, pd, normalize="true").diagonal().mean() 249 | test_dict = { 250 | 'epoch': e, 251 | 'loss': train_loss, 252 | 'war': war, 253 | 'uar': uar, 254 | 'lr': scheduler.get_last_lr()[0] 255 | } 256 | wandb.log(test_dict) 257 | if war > w: 258 | # preds = get_pred(test_loader, model) 259 | w = war 260 | if cnf.is_master: 261 | model_filename = os.path.join(MODELS_FOLDER, '{}.pth'.format(cnf.fold)) 262 | torch.save(model.state_dict(), model_filename) 263 | best = wandb.Table(columns=["WAR", "UAR"], data=[[war, uar]]) 264 | wandb.log({'best_results': best}) 265 | wandb.log({"conf_mat": wandb.plot.confusion_matrix(preds=pd.reshape(-1), y_true=gt.reshape(-1), 266 | class_names=CLASSES)}) 267 | scheduler.step() 268 | dist.destroy_process_group() 269 | -------------------------------------------------------------------------------- /train_supervised.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from sklearn import metrics 5 | 6 | import torch 7 | import torch.distributed as dist 8 | from torch.nn.parallel import DistributedDataParallel as DDP 9 | from torch import nn 10 | from torch.cuda.amp import autocast 11 | from torch.cuda.amp import GradScaler 12 | from torch import optim 13 | from torch.optim import lr_scheduler 14 | 15 | import clip 16 | 17 | import wandb 18 | 19 | from config import get_config 20 | from DataLoaders import * 21 | from architecture import VClip 22 | 23 | torch.backends.cudnn.enabled = False 24 | 25 | device = "cuda" if torch.cuda.is_available() else "cpu" 26 | 27 | cnf = get_config(sys.argv) 28 | 29 | 30 | @torch.no_grad() 31 | def evaluate(loader, model): 32 | model.eval() 33 | class_tokens = clip.tokenize(CLASS_DESCRIPTION, context_length=77, truncate=True) 34 | with autocast(): 35 | war = 0 36 | all_labels = torch.zeros(len(loader.sampler)).to(device) 37 | all_predictions = torch.zeros(len(loader.sampler)).to(device) 38 | all_labels_lst = [all_labels.clone().detach() for i in range(int(os.environ['WORLD_SIZE']))] 39 | all_predictions_lst = [all_predictions.clone().detach() for i in range(int(os.environ['WORLD_SIZE']))] 40 | 41 | for batch_idx, (inputs, labels, _) in enumerate(loader): 42 | inputs, class_tokens, labels = inputs.to(device), class_tokens.to(device), labels.to(device) 43 | logits_per_image, logits_per_text = model(inputs, class_tokens) 44 | 45 | predicted = logits_per_image.softmax(dim=-1).argmax(dim=-1, keepdim=True) 46 | war += predicted.eq(labels.view_as(predicted)).sum().item() 47 | 48 | start_idx = loader.batch_size * batch_idx 49 | end_idx = start_idx + loader.batch_size 50 | end_idx = end_idx if end_idx <= all_labels.shape[0] else all_labels.shape[0] 51 | 52 | all_labels[start_idx:end_idx] = labels.reshape(-1) 53 | all_predictions[start_idx:end_idx] = predicted.reshape(-1) 54 | 55 | dist.all_gather(all_labels_lst, all_labels) 56 | dist.all_gather(all_predictions_lst, all_predictions) 57 | return torch.tensor(war, device=device, dtype=torch.float), (torch.cat(all_labels_lst).cpu().numpy(), torch.cat(all_predictions_lst).cpu().numpy()) 58 | 59 | 60 | def train(loader, model, loss_criterion, optimizer): 61 | model.train() 62 | losses = torch.zeros(len(loader)).to(device) 63 | for batch_idx, (inputs, labels, _) in enumerate(loader): 64 | labels = labels.reshape(-1).to(device) 65 | with autocast(): 66 | optimizer.zero_grad() 67 | descriptions = [CLASS_DESCRIPTION[i] for i in labels] 68 | class_tokens = clip.tokenize(descriptions, context_length=77, truncate=True) 69 | inputs, class_tokens = inputs.to(device), class_tokens.to(device) 70 | logits_per_image, logits_per_text = model(inputs, class_tokens) 71 | ground_truth = torch.arange(len(inputs), dtype=torch.long, device=device) 72 | 73 | loss_i = loss_criterion(logits_per_image, ground_truth) 74 | loss_t = loss_criterion(logits_per_text, ground_truth) 75 | loss = (loss_i + loss_t) / 2 76 | 77 | losses[batch_idx] = loss.item() 78 | loss.backward() 79 | optimizer.step() 80 | 81 | sys.stdout.write( 82 | '\r Iter[{}/{}]\t loss: {:.2f} '.format( 83 | batch_idx + 1, 84 | len(loader), 85 | loss.item() 86 | ) 87 | ) 88 | sys.stdout.flush() 89 | 90 | return losses.mean() 91 | 92 | 93 | if __name__ == "__main__": 94 | # region ddp 95 | torch.cuda.set_device(cnf.local_rank) 96 | cnf.is_master = cnf.local_rank == 0 97 | cnf.device = torch.cuda.device(cnf.local_rank) 98 | cnf.world_size = int(os.environ['WORLD_SIZE']) 99 | dist.init_process_group(backend='nccl') 100 | # endregion 101 | 102 | # region set_up 103 | 104 | if cnf.is_master: 105 | ROOT_FOLDER = os.path.join(cnf.log_dir, 'checkpoints') 106 | EXP_FOLDER = os.path.join(ROOT_FOLDER, cnf.exp_name) 107 | MODELS_FOLDER = os.path.join(EXP_FOLDER, 'models') 108 | PREDS_FOLDER = os.path.join(EXP_FOLDER, 'preds') 109 | if not os.path.exists(MODELS_FOLDER): 110 | os.makedirs(MODELS_FOLDER, exist_ok=True) 111 | if not os.path.exists(PREDS_FOLDER): 112 | os.makedirs(PREDS_FOLDER, exist_ok=True) 113 | cnf_dict = vars(cnf) 114 | # endregion 115 | 116 | model = VClip(num_layers=2) 117 | 118 | if cnf.pretrained: 119 | state_pth = os.path.join(cnf.pretrained, '{}.pth'.format(cnf.fold)) 120 | state_dict = torch.load(state_pth) 121 | new_dict = dict() 122 | for key in state_dict: 123 | new_key = key.replace('module.', '') 124 | new_dict[new_key] = state_dict[key] 125 | keys = model.load_state_dict(new_dict, strict=False) 126 | print(keys) 127 | 128 | if cnf.finetune: 129 | if cnf.text: 130 | for name, param in model.backbone.transformer.named_parameters(): 131 | param.requires_grad = True 132 | if cnf.visual: 133 | for name, param in model.backbone.visual.named_parameters(): 134 | param.requires_grad = True 135 | backbone_params = model.backbone.parameters() 136 | other_params = list() 137 | for name, param in model.named_parameters(): 138 | if 'backbone' not in name: 139 | other_params.append(param) 140 | param_groups = [ 141 | {'params': other_params}, 142 | {'params': backbone_params, 'lr': cnf.lr * 0.001} 143 | ] 144 | else: 145 | param_groups = model.parameters() 146 | 147 | if cnf.optim == 'SGD': 148 | optimizer = optim.SGD( 149 | param_groups, 150 | lr=cnf.lr, 151 | momentum=0.9, 152 | weight_decay=cnf.wd 153 | ) 154 | else: 155 | optimizer = optim.Adam( 156 | param_groups, 157 | lr=cnf.lr, 158 | # betas=(0.9, 0.98), 159 | eps=1e-6, 160 | weight_decay=cnf.wd 161 | ) 162 | scaler = GradScaler() 163 | scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[25, 50], gamma=0.1) 164 | loss_c = nn.CrossEntropyLoss() 165 | 166 | if cnf.is_master: 167 | wandb.init(project='ZeroShot', group=cnf.exp_name, notes='', config=cnf_dict, job_type='fold={}'.format(cnf.fold)) 168 | wandb.watch(model, log="all", log_freq=25) 169 | model = model.cuda() 170 | model = nn.SyncBatchNorm.convert_sync_batchnorm(model) 171 | model = DDP( 172 | model, 173 | device_ids=[cnf.local_rank], 174 | output_device=cnf.local_rank 175 | ) 176 | train_loader, test_loader = get_loaders(cnf, fold=cnf.fold) 177 | w = 0 178 | for e in range(cnf.num_epochs): 179 | train_loader.sampler.set_epoch(e) 180 | test_loader.sampler.set_epoch(e) 181 | train_loss = train(loader=train_loader, model=model, loss_criterion=loss_c, optimizer=optimizer) 182 | dist.all_reduce(train_loss) 183 | train_loss /= cnf.world_size 184 | war, cm = evaluate(loader=test_loader, model=model) 185 | dist.all_reduce(war) 186 | war /= len(test_loader.dataset) 187 | if cnf.is_master: 188 | gt, pd = cm[0], cm[1] 189 | uar = metrics.confusion_matrix(gt, pd, normalize="true").diagonal().mean() 190 | test_dict = { 191 | 'epoch': e, 192 | 'loss': train_loss, 193 | 'war': war, 194 | 'uar': uar, 195 | 'lr': scheduler.get_last_lr()[0] 196 | } 197 | wandb.log(test_dict) 198 | if war > w: 199 | # preds = get_pred(test_loader, model) 200 | w = war 201 | if cnf.is_master: 202 | model_filename = os.path.join(MODELS_FOLDER, '{}.pth'.format(cnf.fold)) 203 | torch.save(model.state_dict(), model_filename) 204 | best = wandb.Table(columns=["WAR", "UAR"], data=[[war, uar]]) 205 | wandb.log({'best_results': best}) 206 | wandb.log({"conf_mat": wandb.plot.confusion_matrix(preds=pd.reshape(-1), y_true=gt.reshape(-1), 207 | class_names=CLASSES)}) 208 | scheduler.step() 209 | dist.destroy_process_group() 210 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | from torch import nn 4 | from torch.nn import functional as F 5 | 6 | from matplotlib import pyplot as plt 7 | from sklearn.manifold import TSNE 8 | 9 | def PCC(a: torch.tensor, b: torch.tensor): 10 | am = torch.mean(a, dim=0) 11 | bm = torch.mean(b, dim=0) 12 | num = torch.sum((a - am) * (b - bm), dim=0) 13 | den = torch.sqrt(sum((a - am) ** 2) * sum((b - bm) ** 2)) + 1e-5 14 | return num/den 15 | 16 | 17 | def CCC(a: torch.tensor, b: torch.tensor): 18 | rho = 2 * PCC(a,b) * a.std(dim=0, unbiased=False) * b.std(dim=0, unbiased=False) 19 | rho /= (a.var(dim=0, unbiased=False) + b.var(dim=0, unbiased=False) + torch.pow(a.mean(dim=0) - b.mean(dim=0), 2) + 1e-5) 20 | return rho 21 | 22 | 23 | def feat_scatter(data, labels, class_names): 24 | color = ['lightcoral', 'darkorange', 'olive', 'teal', 'violet', 'skyblue', 'magenta', 'indigo', 'cyan', 'slategray', 25 | 'lawngreen'] 26 | tsne = TSNE(n_components=2, perplexity=50, n_iter=250) 27 | results = tsne.fit_transform(data) 28 | fig, ax = plt.subplots() 29 | for i in range(len(class_names)): 30 | mask = labels == i 31 | mask = mask.reshape(-1) 32 | ax.scatter(x=results[mask, 0], y=results[mask, 1], color=color[i], label=class_names[i]) 33 | ax.legend(title='class_name') 34 | return plt 35 | 36 | 37 | class RMSE(nn.Module): 38 | def __init__(self): 39 | super(RMSE, self).__init__() 40 | 41 | def forward(self, output, target): 42 | mse = F.mse_loss(output, target, reduction='mean') 43 | return torch.sqrt(mse) 44 | 45 | 46 | def eval_metrics(y_hat, y): 47 | mae = torch.abs(y_hat - y) 48 | mse = torch.pow(mae, exponent=2).mean(0) 49 | rmse = torch.sqrt(mse) 50 | return mae.mean(0).cpu().numpy(), mse.cpu().numpy(), rmse.cpu().numpy() 51 | --------------------------------------------------------------------------------