├── asserts
├── 0.jpg
├── 1.jpg
└── logo.jpg
├── models
├── __pycache__
│ ├── utils.cpython-310.pyc
│ ├── pipeline_consisid.cpython-310.pyc
│ ├── pipeline_cogvideox.cpython-310.pyc
│ ├── pipeline_ingredients.cpython-310.pyc
│ ├── transformer_consisid.cpython-310.pyc
│ ├── local_facial_extractor.cpython-310.pyc
│ └── transformer_ingredients.cpython-310.pyc
├── eva_clip
│ ├── bpe_simple_vocab_16e6.txt.gz
│ ├── __pycache__
│ │ ├── loss.cpython-310.pyc
│ │ ├── model.cpython-310.pyc
│ │ ├── openai.cpython-310.pyc
│ │ ├── rope.cpython-310.pyc
│ │ ├── utils.cpython-310.pyc
│ │ ├── __init__.cpython-310.pyc
│ │ ├── factory.cpython-310.pyc
│ │ ├── hf_model.cpython-310.pyc
│ │ ├── constants.cpython-310.pyc
│ │ ├── hf_configs.cpython-310.pyc
│ │ ├── pretrained.cpython-310.pyc
│ │ ├── timm_model.cpython-310.pyc
│ │ ├── tokenizer.cpython-310.pyc
│ │ ├── transform.cpython-310.pyc
│ │ ├── transformer.cpython-310.pyc
│ │ ├── eva_vit_model.cpython-310.pyc
│ │ ├── utils_qformer.cpython-310.pyc
│ │ └── modified_resnet.cpython-310.pyc
│ ├── constants.py
│ ├── model_configs
│ │ ├── EVA01-CLIP-B-16.json
│ │ ├── EVA01-CLIP-g-14.json
│ │ ├── EVA01-CLIP-g-14-plus.json
│ │ ├── EVA02-CLIP-bigE-14.json
│ │ ├── EVA02-CLIP-bigE-14-plus.json
│ │ ├── EVA02-CLIP-B-16.json
│ │ ├── EVA02-CLIP-L-14.json
│ │ └── EVA02-CLIP-L-14-336.json
│ ├── __init__.py
│ ├── hf_configs.py
│ ├── transform.py
│ ├── timm_model.py
│ ├── rope.py
│ ├── openai.py
│ ├── utils_qformer.py
│ ├── loss.py
│ ├── tokenizer.py
│ ├── modified_resnet.py
│ ├── hf_model.py
│ ├── pretrained.py
│ ├── utils.py
│ └── model.py
├── local_facial_extractor.py
└── utils.py
├── metric
├── curricularface
│ ├── __pycache__
│ │ ├── common.cpython-310.pyc
│ │ ├── __init__.cpython-310.pyc
│ │ ├── model_irse.cpython-310.pyc
│ │ └── model_resnet.cpython-310.pyc
│ ├── __init__.py
│ ├── common.py
│ ├── model_resnet.py
│ └── model_irse.py
├── clip_score.py
└── face_sim_fid.py
├── infer.sh
├── requirements.txt
├── README.md
├── infer.py
├── LICENSE
└── app.py
/asserts/0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/asserts/0.jpg
--------------------------------------------------------------------------------
/asserts/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/asserts/1.jpg
--------------------------------------------------------------------------------
/asserts/logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/asserts/logo.jpg
--------------------------------------------------------------------------------
/models/__pycache__/utils.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/__pycache__/utils.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/bpe_simple_vocab_16e6.txt.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/bpe_simple_vocab_16e6.txt.gz
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/loss.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/loss.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/model.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/model.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/openai.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/openai.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/rope.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/rope.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/utils.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/utils.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/constants.py:
--------------------------------------------------------------------------------
1 | OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
2 | OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
3 |
--------------------------------------------------------------------------------
/models/__pycache__/pipeline_consisid.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/__pycache__/pipeline_consisid.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/__init__.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/__init__.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/factory.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/factory.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/hf_model.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/hf_model.cpython-310.pyc
--------------------------------------------------------------------------------
/models/__pycache__/pipeline_cogvideox.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/__pycache__/pipeline_cogvideox.cpython-310.pyc
--------------------------------------------------------------------------------
/models/__pycache__/pipeline_ingredients.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/__pycache__/pipeline_ingredients.cpython-310.pyc
--------------------------------------------------------------------------------
/models/__pycache__/transformer_consisid.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/__pycache__/transformer_consisid.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/constants.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/constants.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/hf_configs.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/hf_configs.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/pretrained.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/pretrained.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/timm_model.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/timm_model.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/tokenizer.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/tokenizer.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/transform.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/transform.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/transformer.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/transformer.cpython-310.pyc
--------------------------------------------------------------------------------
/metric/curricularface/__pycache__/common.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/metric/curricularface/__pycache__/common.cpython-310.pyc
--------------------------------------------------------------------------------
/models/__pycache__/local_facial_extractor.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/__pycache__/local_facial_extractor.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/eva_vit_model.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/eva_vit_model.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/utils_qformer.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/utils_qformer.cpython-310.pyc
--------------------------------------------------------------------------------
/metric/curricularface/__pycache__/__init__.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/metric/curricularface/__pycache__/__init__.cpython-310.pyc
--------------------------------------------------------------------------------
/metric/curricularface/__pycache__/model_irse.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/metric/curricularface/__pycache__/model_irse.cpython-310.pyc
--------------------------------------------------------------------------------
/models/__pycache__/transformer_ingredients.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/__pycache__/transformer_ingredients.cpython-310.pyc
--------------------------------------------------------------------------------
/models/eva_clip/__pycache__/modified_resnet.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/models/eva_clip/__pycache__/modified_resnet.cpython-310.pyc
--------------------------------------------------------------------------------
/metric/curricularface/__pycache__/model_resnet.cpython-310.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/feizc/Ingredients/HEAD/metric/curricularface/__pycache__/model_resnet.cpython-310.pyc
--------------------------------------------------------------------------------
/infer.sh:
--------------------------------------------------------------------------------
1 | python infer.py \
2 | --prompt "Two men in half bodies, are seated in a dimly lit room, possibly an office or meeting room, with a formal atmosphere." \
3 | --img_file_path 'asserts/0.jpg' 'asserts/1.jpg'
--------------------------------------------------------------------------------
/models/eva_clip/model_configs/EVA01-CLIP-B-16.json:
--------------------------------------------------------------------------------
1 | {
2 | "embed_dim": 512,
3 | "vision_cfg": {
4 | "image_size": 224,
5 | "layers": 12,
6 | "width": 768,
7 | "patch_size": 16,
8 | "eva_model_name": "eva-clip-b-16",
9 | "ls_init_value": 0.1,
10 | "drop_path_rate": 0.0
11 | },
12 | "text_cfg": {
13 | "context_length": 77,
14 | "vocab_size": 49408,
15 | "width": 512,
16 | "heads": 8,
17 | "layers": 12
18 | }
19 | }
--------------------------------------------------------------------------------
/models/eva_clip/model_configs/EVA01-CLIP-g-14.json:
--------------------------------------------------------------------------------
1 | {
2 | "embed_dim": 1024,
3 | "vision_cfg": {
4 | "image_size": 224,
5 | "layers": 40,
6 | "width": 1408,
7 | "head_width": 88,
8 | "mlp_ratio": 4.3637,
9 | "patch_size": 14,
10 | "eva_model_name": "eva-clip-g-14-x",
11 | "drop_path_rate": 0.4,
12 | "xattn": true,
13 | "fusedLN": true
14 | },
15 | "text_cfg": {
16 | "context_length": 77,
17 | "vocab_size": 49408,
18 | "width": 768,
19 | "heads": 12,
20 | "layers": 12,
21 | "xattn": false,
22 | "fusedLN": true
23 | }
24 | }
--------------------------------------------------------------------------------
/models/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json:
--------------------------------------------------------------------------------
1 | {
2 | "embed_dim": 1024,
3 | "vision_cfg": {
4 | "image_size": 224,
5 | "layers": 40,
6 | "width": 1408,
7 | "head_width": 88,
8 | "mlp_ratio": 4.3637,
9 | "patch_size": 14,
10 | "eva_model_name": "eva-clip-g-14-x",
11 | "drop_path_rate": 0,
12 | "xattn": true,
13 | "fusedLN": true
14 | },
15 | "text_cfg": {
16 | "context_length": 77,
17 | "vocab_size": 49408,
18 | "width": 1024,
19 | "heads": 16,
20 | "layers": 24,
21 | "xattn": false,
22 | "fusedLN": true
23 | }
24 | }
--------------------------------------------------------------------------------
/models/eva_clip/model_configs/EVA02-CLIP-bigE-14.json:
--------------------------------------------------------------------------------
1 | {
2 | "embed_dim": 1024,
3 | "vision_cfg": {
4 | "image_size": 224,
5 | "layers": 64,
6 | "width": 1792,
7 | "head_width": 112,
8 | "mlp_ratio": 8.571428571428571,
9 | "patch_size": 14,
10 | "eva_model_name": "eva-clip-4b-14-x",
11 | "drop_path_rate": 0,
12 | "xattn": true,
13 | "postnorm": true,
14 | "fusedLN": true
15 | },
16 | "text_cfg": {
17 | "context_length": 77,
18 | "vocab_size": 49408,
19 | "width": 1024,
20 | "heads": 16,
21 | "layers": 24,
22 | "xattn": false,
23 | "fusedLN": true
24 | }
25 | }
--------------------------------------------------------------------------------
/models/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json:
--------------------------------------------------------------------------------
1 | {
2 | "embed_dim": 1024,
3 | "vision_cfg": {
4 | "image_size": 224,
5 | "layers": 64,
6 | "width": 1792,
7 | "head_width": 112,
8 | "mlp_ratio": 8.571428571428571,
9 | "patch_size": 14,
10 | "eva_model_name": "eva-clip-4b-14-x",
11 | "drop_path_rate": 0,
12 | "xattn": true,
13 | "postnorm": true,
14 | "fusedLN": true
15 | },
16 | "text_cfg": {
17 | "context_length": 77,
18 | "vocab_size": 49408,
19 | "width": 1280,
20 | "heads": 20,
21 | "layers": 32,
22 | "xattn": false,
23 | "fusedLN": true
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torch==2.5.1
2 | torchaudio==2.5.1
3 | torchvision==0.20.1
4 | xformers==0.0.28.post3
5 | onnx==1.17.0
6 | onnxruntime-gpu==1.19.2
7 | deepspeed==0.15.2
8 | accelerate==1.1.1
9 | git+https://github.com/huggingface/diffusers.git
10 | transformers==4.46.3
11 | tokenizers==0.20.1
12 | peft==0.12.0
13 | decord==0.6.0
14 | sentencepiece==0.2.0
15 | opencv-python==4.10.0.84
16 | pyfacer==0.0.4
17 | numpy==1.26.4
18 | numba==0.60.0
19 | insightface==0.7.3
20 | huggingface-hub==0.27.1
21 | facexlib==0.3.0
22 | timm==1.0.9
23 | func_timeout==4.3.5
24 | tensorboard==2.17.1
25 | gradio==5.6.0
26 | spaces==0.30.4
27 | pillow==10.4.0
28 | spandrel==0.4.0
29 | scikit-video==1.1.11
30 | moviepy==2.1.1
31 | consisid_eva_clip
32 | wandb
33 | imageio-ffmpeg
34 | ftfy
35 | Jinja2
36 | einops
37 | nvitop
38 |
--------------------------------------------------------------------------------
/models/eva_clip/model_configs/EVA02-CLIP-B-16.json:
--------------------------------------------------------------------------------
1 | {
2 | "embed_dim": 512,
3 | "vision_cfg": {
4 | "image_size": 224,
5 | "layers": 12,
6 | "width": 768,
7 | "head_width": 64,
8 | "patch_size": 16,
9 | "mlp_ratio": 2.6667,
10 | "eva_model_name": "eva-clip-b-16-X",
11 | "drop_path_rate": 0.0,
12 | "xattn": true,
13 | "fusedLN": true,
14 | "rope": true,
15 | "pt_hw_seq_len": 16,
16 | "intp_freq": true,
17 | "naiveswiglu": true,
18 | "subln": true
19 | },
20 | "text_cfg": {
21 | "context_length": 77,
22 | "vocab_size": 49408,
23 | "width": 512,
24 | "heads": 8,
25 | "layers": 12,
26 | "xattn": true,
27 | "fusedLN": true
28 | }
29 | }
--------------------------------------------------------------------------------
/models/eva_clip/model_configs/EVA02-CLIP-L-14.json:
--------------------------------------------------------------------------------
1 | {
2 | "embed_dim": 768,
3 | "vision_cfg": {
4 | "image_size": 224,
5 | "layers": 24,
6 | "width": 1024,
7 | "drop_path_rate": 0,
8 | "head_width": 64,
9 | "mlp_ratio": 2.6667,
10 | "patch_size": 14,
11 | "eva_model_name": "eva-clip-l-14",
12 | "xattn": true,
13 | "fusedLN": true,
14 | "rope": true,
15 | "pt_hw_seq_len": 16,
16 | "intp_freq": true,
17 | "naiveswiglu": true,
18 | "subln": true
19 | },
20 | "text_cfg": {
21 | "context_length": 77,
22 | "vocab_size": 49408,
23 | "width": 768,
24 | "heads": 12,
25 | "layers": 12,
26 | "xattn": false,
27 | "fusedLN": true
28 | }
29 | }
--------------------------------------------------------------------------------
/models/eva_clip/model_configs/EVA02-CLIP-L-14-336.json:
--------------------------------------------------------------------------------
1 | {
2 | "embed_dim": 768,
3 | "vision_cfg": {
4 | "image_size": 336,
5 | "layers": 24,
6 | "width": 1024,
7 | "drop_path_rate": 0,
8 | "head_width": 64,
9 | "mlp_ratio": 2.6667,
10 | "patch_size": 14,
11 | "eva_model_name": "eva-clip-l-14-336",
12 | "xattn": true,
13 | "fusedLN": true,
14 | "rope": true,
15 | "pt_hw_seq_len": 16,
16 | "intp_freq": true,
17 | "naiveswiglu": true,
18 | "subln": true
19 | },
20 | "text_cfg": {
21 | "context_length": 77,
22 | "vocab_size": 49408,
23 | "width": 768,
24 | "heads": 12,
25 | "layers": 12,
26 | "xattn": false,
27 | "fusedLN": true
28 | }
29 | }
--------------------------------------------------------------------------------
/models/eva_clip/__init__.py:
--------------------------------------------------------------------------------
1 | from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
2 | from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer, create_transforms
3 | from .factory import list_models, add_model_config, get_model_config, load_checkpoint
4 | from .loss import ClipLoss
5 | from .model import CLIP, CustomCLIP, CLIPTextCfg, CLIPVisionCfg,\
6 | convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype
7 | from .openai import load_openai_model, list_openai_models
8 | from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model,\
9 | get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained
10 | from .tokenizer import SimpleTokenizer, tokenize
11 | from .transform import image_transform
--------------------------------------------------------------------------------
/metric/clip_score.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | from transformers import CLIPModel, CLIPProcessor
4 |
5 | def compute_clip_score(video_path, model, processor, prompt, device, num_frames=24):
6 | cap = cv2.VideoCapture(video_path)
7 | total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
8 |
9 | frames = []
10 |
11 | for i in range(num_frames):
12 | frame_idx = int(i * total_frames / num_frames)
13 | cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
14 | ret, frame = cap.read()
15 | if ret:
16 | frames.append(frame)
17 | else:
18 | print(f"Warning: Frame at index {frame_idx} could not be read.")
19 |
20 | cap.release()
21 |
22 | if model is not None:
23 | inputs = processor(text=prompt, images=frames, return_tensors="pt", padding=True, truncation=True)
24 | inputs.to(device)
25 | outputs = model(**inputs)
26 | logits_per_image = outputs.logits_per_image
27 | average_score = logits_per_image.mean().item()
28 |
29 | return average_score
--------------------------------------------------------------------------------
/metric/curricularface/__init__.py:
--------------------------------------------------------------------------------
1 | # The implementation is adopted from TFace,made pubicly available under the Apache-2.0 license at
2 | # https://github.com/Tencent/TFace/blob/master/recognition/torchkit/backbone
3 | from .model_irse import IR_18, IR_34, IR_50, IR_101, IR_152, IR_200, IR_SE_50, IR_SE_101, IR_SE_152, IR_SE_200
4 | from .model_resnet import ResNet_50, ResNet_101, ResNet_152
5 |
6 |
7 | _model_dict = {
8 | 'ResNet_50': ResNet_50,
9 | 'ResNet_101': ResNet_101,
10 | 'ResNet_152': ResNet_152,
11 | 'IR_18': IR_18,
12 | 'IR_34': IR_34,
13 | 'IR_50': IR_50,
14 | 'IR_101': IR_101,
15 | 'IR_152': IR_152,
16 | 'IR_200': IR_200,
17 | 'IR_SE_50': IR_SE_50,
18 | 'IR_SE_101': IR_SE_101,
19 | 'IR_SE_152': IR_SE_152,
20 | 'IR_SE_200': IR_SE_200
21 | }
22 |
23 |
24 | def get_model(key):
25 | """ Get different backbone network by key,
26 | support ResNet50, ResNet_101, ResNet_152
27 | IR_18, IR_34, IR_50, IR_101, IR_152, IR_200,
28 | IR_SE_50, IR_SE_101, IR_SE_152, IR_SE_200.
29 | """
30 | if key in _model_dict.keys():
31 | return _model_dict[key]
32 | else:
33 | raise KeyError('not support model {}'.format(key))
34 |
--------------------------------------------------------------------------------
/models/eva_clip/hf_configs.py:
--------------------------------------------------------------------------------
1 | # HF architecture dict:
2 | arch_dict = {
3 | # https://huggingface.co/docs/transformers/model_doc/roberta#roberta
4 | "roberta": {
5 | "config_names": {
6 | "context_length": "max_position_embeddings",
7 | "vocab_size": "vocab_size",
8 | "width": "hidden_size",
9 | "heads": "num_attention_heads",
10 | "layers": "num_hidden_layers",
11 | "layer_attr": "layer",
12 | "token_embeddings_attr": "embeddings"
13 | },
14 | "pooler": "mean_pooler",
15 | },
16 | # https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaConfig
17 | "xlm-roberta": {
18 | "config_names": {
19 | "context_length": "max_position_embeddings",
20 | "vocab_size": "vocab_size",
21 | "width": "hidden_size",
22 | "heads": "num_attention_heads",
23 | "layers": "num_hidden_layers",
24 | "layer_attr": "layer",
25 | "token_embeddings_attr": "embeddings"
26 | },
27 | "pooler": "mean_pooler",
28 | },
29 | # https://huggingface.co/docs/transformers/model_doc/mt5#mt5
30 | "mt5": {
31 | "config_names": {
32 | # unlimited seqlen
33 | # https://github.com/google-research/text-to-text-transfer-transformer/issues/273
34 | # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/t5/modeling_t5.py#L374
35 | "context_length": "",
36 | "vocab_size": "vocab_size",
37 | "width": "d_model",
38 | "heads": "num_heads",
39 | "layers": "num_layers",
40 | "layer_attr": "block",
41 | "token_embeddings_attr": "embed_tokens"
42 | },
43 | "pooler": "mean_pooler",
44 | },
45 | "bert": {
46 | "config_names": {
47 | "context_length": "max_position_embeddings",
48 | "vocab_size": "vocab_size",
49 | "width": "hidden_size",
50 | "heads": "num_attention_heads",
51 | "layers": "num_hidden_layers",
52 | "layer_attr": "layer",
53 | "token_embeddings_attr": "embeddings"
54 | },
55 | "pooler": "mean_pooler",
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/metric/curricularface/common.py:
--------------------------------------------------------------------------------
1 | # The implementation is adopted from TFace,made pubicly available under the Apache-2.0 license at
2 | # https://github.com/Tencent/TFace/blob/master/recognition/torchkit/backbone/common.py
3 | import torch.nn as nn
4 | from torch.nn import Conv2d, Module, ReLU, Sigmoid
5 |
6 |
7 | def initialize_weights(modules):
8 | """ Weight initilize, conv2d and linear is initialized with kaiming_normal
9 | """
10 | for m in modules:
11 | if isinstance(m, nn.Conv2d):
12 | nn.init.kaiming_normal_(
13 | m.weight, mode='fan_out', nonlinearity='relu')
14 | if m.bias is not None:
15 | m.bias.data.zero_()
16 | elif isinstance(m, nn.BatchNorm2d):
17 | m.weight.data.fill_(1)
18 | m.bias.data.zero_()
19 | elif isinstance(m, nn.Linear):
20 | nn.init.kaiming_normal_(
21 | m.weight, mode='fan_out', nonlinearity='relu')
22 | if m.bias is not None:
23 | m.bias.data.zero_()
24 |
25 |
26 | class Flatten(Module):
27 | """ Flat tensor
28 | """
29 |
30 | def forward(self, input):
31 | return input.view(input.size(0), -1)
32 |
33 |
34 | class SEModule(Module):
35 | """ SE block
36 | """
37 |
38 | def __init__(self, channels, reduction):
39 | super(SEModule, self).__init__()
40 | self.avg_pool = nn.AdaptiveAvgPool2d(1)
41 | self.fc1 = Conv2d(
42 | channels,
43 | channels // reduction,
44 | kernel_size=1,
45 | padding=0,
46 | bias=False)
47 |
48 | nn.init.xavier_uniform_(self.fc1.weight.data)
49 |
50 | self.relu = ReLU(inplace=True)
51 | self.fc2 = Conv2d(
52 | channels // reduction,
53 | channels,
54 | kernel_size=1,
55 | padding=0,
56 | bias=False)
57 |
58 | self.sigmoid = Sigmoid()
59 |
60 | def forward(self, x):
61 | module_input = x
62 | x = self.avg_pool(x)
63 | x = self.fc1(x)
64 | x = self.relu(x)
65 | x = self.fc2(x)
66 | x = self.sigmoid(x)
67 |
68 | return module_input * x
69 |
--------------------------------------------------------------------------------
/models/eva_clip/transform.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, Sequence, Tuple
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torchvision.transforms.functional as F
6 |
7 | from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
8 | CenterCrop
9 |
10 | from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
11 |
12 |
13 | class ResizeMaxSize(nn.Module):
14 |
15 | def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0):
16 | super().__init__()
17 | if not isinstance(max_size, int):
18 | raise TypeError(f"Size should be int. Got {type(max_size)}")
19 | self.max_size = max_size
20 | self.interpolation = interpolation
21 | self.fn = min if fn == 'min' else min
22 | self.fill = fill
23 |
24 | def forward(self, img):
25 | if isinstance(img, torch.Tensor):
26 | height, width = img.shape[:2]
27 | else:
28 | width, height = img.size
29 | scale = self.max_size / float(max(height, width))
30 | if scale != 1.0:
31 | new_size = tuple(round(dim * scale) for dim in (height, width))
32 | img = F.resize(img, new_size, self.interpolation)
33 | pad_h = self.max_size - new_size[0]
34 | pad_w = self.max_size - new_size[1]
35 | img = F.pad(img, padding=[pad_w//2, pad_h//2, pad_w - pad_w//2, pad_h - pad_h//2], fill=self.fill)
36 | return img
37 |
38 |
39 | def _convert_to_rgb(image):
40 | return image.convert('RGB')
41 |
42 |
43 | # class CatGen(nn.Module):
44 | # def __init__(self, num=4):
45 | # self.num = num
46 | # def mixgen_batch(image, text):
47 | # batch_size = image.shape[0]
48 | # index = np.random.permutation(batch_size)
49 |
50 | # cat_images = []
51 | # for i in range(batch_size):
52 | # # image mixup
53 | # image[i,:] = lam * image[i,:] + (1 - lam) * image[index[i],:]
54 | # # text concat
55 | # text[i] = tokenizer((str(text[i]) + " " + str(text[index[i]])))[0]
56 | # text = torch.stack(text)
57 | # return image, text
58 |
59 |
60 | def image_transform(
61 | image_size: int,
62 | is_train: bool,
63 | mean: Optional[Tuple[float, ...]] = None,
64 | std: Optional[Tuple[float, ...]] = None,
65 | resize_longest_max: bool = False,
66 | fill_color: int = 0,
67 | ):
68 | mean = mean or OPENAI_DATASET_MEAN
69 | if not isinstance(mean, (list, tuple)):
70 | mean = (mean,) * 3
71 |
72 | std = std or OPENAI_DATASET_STD
73 | if not isinstance(std, (list, tuple)):
74 | std = (std,) * 3
75 |
76 | if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
77 | # for square size, pass size as int so that Resize() uses aspect preserving shortest edge
78 | image_size = image_size[0]
79 |
80 | normalize = Normalize(mean=mean, std=std)
81 | if is_train:
82 | return Compose([
83 | RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
84 | _convert_to_rgb,
85 | ToTensor(),
86 | normalize,
87 | ])
88 | else:
89 | if resize_longest_max:
90 | transforms = [
91 | ResizeMaxSize(image_size, fill=fill_color)
92 | ]
93 | else:
94 | transforms = [
95 | Resize(image_size, interpolation=InterpolationMode.BICUBIC),
96 | CenterCrop(image_size),
97 | ]
98 | transforms.extend([
99 | _convert_to_rgb,
100 | ToTensor(),
101 | normalize,
102 | ])
103 | return Compose(transforms)
104 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
4 |
5 |
6 |
7 | [](https://arxiv.org/abs/2501.01790)
8 | [](https://huggingface.co/feizhengcong/Ingredients)
9 | [](https://huggingface.co/datasets/feizhengcong/Ingredients)
10 |
11 |
12 |
13 |
14 |
15 | This repository is the official implementation of Ingredients, a powerful way to customize video creations by incorporating multiple specific identity (ID) photos, with advanced video diffusion Transformers.
16 | This is a research project, and it is recommended to try advanced products:
17 |

18 |
19 |
20 |
21 | ## 📷 1. Gallery
22 |
23 |
24 |
25 |
26 |
27 | ## ⚙️ 2. Environments
28 |
29 | We recommend the requirements as follows.
30 |
31 | ```bash
32 | conda create -n ingredients python=3.11.0
33 | conda activate ingredients
34 | pip install -r requirements.txt
35 | ```
36 |
37 | The weights of model are available at 🤗HuggingFace.
38 |
39 | ## 🗝️ 3. Inference
40 | We provide the inference scripts ```inference.py``` for simple testing. Run the command as examples:
41 |
42 | ```bash
43 | python infer.py \
44 | --prompt "Two men in half bodies, are seated in a dimly lit room, possibly an office or meeting room, with a formal atmosphere." \
45 | --model_path "\path\to\model" \
46 | --seed 2025 \
47 | --img_file_path 'asserts/0.jpg' 'asserts/1.jpg'
48 | ```
49 |
50 | We also include the evaluation metrics code at ```metric``` folder and evaluation data at [](https://huggingface.co/datasets/feizhengcong/Ingredients) for results comparison in multi-id customization tasks.
51 |
52 | Similar to [ConsisID](https://github.com/PKU-YuanGroup/ConsisID), Ingredients also has high requirements for prompt quality.
53 | We suggest referring to formation in the [link](https://github.com/PKU-YuanGroup/ConsisID?tab=readme-ov-file#prompt-refiner).
54 |
55 |
56 | ### Gradio Web UI
57 |
58 | Highly recommend trying out our web demo by the following command, which incorporates all features currently supported by Ingredients.
59 |
60 | ```bash
61 | python app.py
62 | ```
63 |
64 | 
65 |
66 |
67 |
68 | ## ⏰ 4. Training
69 |
70 | Coming soon, including multi-stage training scripts and multi-ID text-video datasets.
71 |
72 | You can prepare the video-text pair data as [formation](datasets.py) and our experiments can be repeated by simply run the training scripts as:
73 |
74 | ```bash
75 | # For stage 1
76 | bash train_face.sh
77 | # For stage 2
78 | bash train_router.sh
79 | ```
80 |
81 |
82 |
83 | ## 🚀 5. Cite
84 |
85 | If you find this work useful for your research and applications, please cite us using this BibTeX:
86 |
87 | ```bibtex
88 | @article{fei2025ingredients,
89 | title={Ingredients: Blending Custom Photos with Video Diffusion Transformers},
90 | author={Fei, Zhengcong and Li, Debang and Qiu, Di and Yu, Changqian and Fan, Mingyuan},
91 | journal={arXiv preprint arXiv:2501.01790},
92 | year={2025}
93 | }
94 | ```
95 | For any question, please feel free to open an issue.
96 |
97 |
98 | ## Acknowledgement
99 |
100 | This project wouldn't be possible without the following open-sourced repositories: [CogVideoX](https://github.com/THUDM/CogVideo), [ConsisID](https://github.com/PKU-YuanGroup/ConsisID), [Uniportrait](https://github.com/junjiehe96/UniPortrait), and [Hunyuan Video](https://github.com/Tencent/HunyuanVideo).
101 |
102 |
103 |
--------------------------------------------------------------------------------
/models/eva_clip/timm_model.py:
--------------------------------------------------------------------------------
1 | """ timm model adapter
2 |
3 | Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
4 | """
5 | import logging
6 | from collections import OrderedDict
7 |
8 | import torch
9 | import torch.nn as nn
10 |
11 | try:
12 | import timm
13 | from timm.models.layers import Mlp, to_2tuple
14 | try:
15 | # old timm imports < 0.8.1
16 | from timm.models.layers.attention_pool2d import RotAttentionPool2d
17 | from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d
18 | except ImportError:
19 | # new timm imports >= 0.8.1
20 | from timm.layers import RotAttentionPool2d
21 | from timm.layers import AttentionPool2d as AbsAttentionPool2d
22 | except ImportError:
23 | timm = None
24 |
25 | from .utils import freeze_batch_norm_2d
26 |
27 |
28 | class TimmModel(nn.Module):
29 | """ timm model adapter
30 | # FIXME this adapter is a work in progress, may change in ways that break weight compat
31 | """
32 |
33 | def __init__(
34 | self,
35 | model_name,
36 | embed_dim,
37 | image_size=224,
38 | pool='avg',
39 | proj='linear',
40 | proj_bias=False,
41 | drop=0.,
42 | pretrained=False):
43 | super().__init__()
44 | if timm is None:
45 | raise RuntimeError("Please `pip install timm` to use timm models.")
46 |
47 | self.image_size = to_2tuple(image_size)
48 | self.trunk = timm.create_model(model_name, pretrained=pretrained)
49 | feat_size = self.trunk.default_cfg.get('pool_size', None)
50 | feature_ndim = 1 if not feat_size else 2
51 | if pool in ('abs_attn', 'rot_attn'):
52 | assert feature_ndim == 2
53 | # if attn pooling used, remove both classifier and default pool
54 | self.trunk.reset_classifier(0, global_pool='')
55 | else:
56 | # reset global pool if pool config set, otherwise leave as network default
57 | reset_kwargs = dict(global_pool=pool) if pool else {}
58 | self.trunk.reset_classifier(0, **reset_kwargs)
59 | prev_chs = self.trunk.num_features
60 |
61 | head_layers = OrderedDict()
62 | if pool == 'abs_attn':
63 | head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)
64 | prev_chs = embed_dim
65 | elif pool == 'rot_attn':
66 | head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
67 | prev_chs = embed_dim
68 | else:
69 | assert proj, 'projection layer needed if non-attention pooling is used.'
70 |
71 | # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
72 | if proj == 'linear':
73 | head_layers['drop'] = nn.Dropout(drop)
74 | head_layers['proj'] = nn.Linear(prev_chs, embed_dim, bias=proj_bias)
75 | elif proj == 'mlp':
76 | head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop, bias=(True, proj_bias))
77 |
78 | self.head = nn.Sequential(head_layers)
79 |
80 | def lock(self, unlocked_groups=0, freeze_bn_stats=False):
81 | """ lock modules
82 | Args:
83 | unlocked_groups (int): leave last n layer groups unlocked (default: 0)
84 | """
85 | if not unlocked_groups:
86 | # lock full model
87 | for param in self.trunk.parameters():
88 | param.requires_grad = False
89 | if freeze_bn_stats:
90 | freeze_batch_norm_2d(self.trunk)
91 | else:
92 | # NOTE: partial freeze requires latest timm (master) branch and is subject to change
93 | try:
94 | # FIXME import here until API stable and in an official release
95 | from timm.models.helpers import group_parameters, group_modules
96 | except ImportError:
97 | raise RuntimeError(
98 | 'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')
99 | matcher = self.trunk.group_matcher()
100 | gparams = group_parameters(self.trunk, matcher)
101 | max_layer_id = max(gparams.keys())
102 | max_layer_id = max_layer_id - unlocked_groups
103 | for group_idx in range(max_layer_id + 1):
104 | group = gparams[group_idx]
105 | for param in group:
106 | self.trunk.get_parameter(param).requires_grad = False
107 | if freeze_bn_stats:
108 | gmodules = group_modules(self.trunk, matcher, reverse=True)
109 | gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
110 | freeze_batch_norm_2d(self.trunk, gmodules)
111 |
112 | @torch.jit.ignore
113 | def set_grad_checkpointing(self, enable=True):
114 | try:
115 | self.trunk.set_grad_checkpointing(enable)
116 | except Exception as e:
117 | logging.warning('grad checkpointing not supported for this timm image tower, continuing without...')
118 |
119 | def forward(self, x):
120 | x = self.trunk(x)
121 | x = self.head(x)
122 | return x
123 |
--------------------------------------------------------------------------------
/metric/curricularface/model_resnet.py:
--------------------------------------------------------------------------------
1 | # The implementation is adopted from TFace,made pubicly available under the Apache-2.0 license at
2 | # https://github.com/Tencent/TFace/blob/master/recognition/torchkit/backbone/model_resnet.py
3 | import torch.nn as nn
4 | from torch.nn import BatchNorm1d, BatchNorm2d, Conv2d, Dropout, Linear, MaxPool2d, Module, ReLU, Sequential
5 |
6 | from .common import initialize_weights
7 |
8 |
9 | def conv3x3(in_planes, out_planes, stride=1):
10 | """ 3x3 convolution with padding
11 | """
12 | return Conv2d(
13 | in_planes,
14 | out_planes,
15 | kernel_size=3,
16 | stride=stride,
17 | padding=1,
18 | bias=False)
19 |
20 |
21 | def conv1x1(in_planes, out_planes, stride=1):
22 | """ 1x1 convolution
23 | """
24 | return Conv2d(
25 | in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
26 |
27 |
28 | class Bottleneck(Module):
29 | expansion = 4
30 |
31 | def __init__(self, inplanes, planes, stride=1, downsample=None):
32 | super(Bottleneck, self).__init__()
33 | self.conv1 = conv1x1(inplanes, planes)
34 | self.bn1 = BatchNorm2d(planes)
35 | self.conv2 = conv3x3(planes, planes, stride)
36 | self.bn2 = BatchNorm2d(planes)
37 | self.conv3 = conv1x1(planes, planes * self.expansion)
38 | self.bn3 = BatchNorm2d(planes * self.expansion)
39 | self.relu = ReLU(inplace=True)
40 | self.downsample = downsample
41 | self.stride = stride
42 |
43 | def forward(self, x):
44 | identity = x
45 |
46 | out = self.conv1(x)
47 | out = self.bn1(out)
48 | out = self.relu(out)
49 |
50 | out = self.conv2(out)
51 | out = self.bn2(out)
52 | out = self.relu(out)
53 |
54 | out = self.conv3(out)
55 | out = self.bn3(out)
56 |
57 | if self.downsample is not None:
58 | identity = self.downsample(x)
59 |
60 | out += identity
61 | out = self.relu(out)
62 |
63 | return out
64 |
65 |
66 | class ResNet(Module):
67 | """ ResNet backbone
68 | """
69 |
70 | def __init__(self, input_size, block, layers, zero_init_residual=True):
71 | """ Args:
72 | input_size: input_size of backbone
73 | block: block function
74 | layers: layers in each block
75 | """
76 | super(ResNet, self).__init__()
77 | assert input_size[0] in [112, 224], \
78 | 'input_size should be [112, 112] or [224, 224]'
79 | self.inplanes = 64
80 | self.conv1 = Conv2d(
81 | 3, 64, kernel_size=7, stride=2, padding=3, bias=False)
82 | self.bn1 = BatchNorm2d(64)
83 | self.relu = ReLU(inplace=True)
84 | self.maxpool = MaxPool2d(kernel_size=3, stride=2, padding=1)
85 | self.layer1 = self._make_layer(block, 64, layers[0])
86 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
87 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
88 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
89 |
90 | self.bn_o1 = BatchNorm2d(2048)
91 | self.dropout = Dropout()
92 | if input_size[0] == 112:
93 | self.fc = Linear(2048 * 4 * 4, 512)
94 | else:
95 | self.fc = Linear(2048 * 7 * 7, 512)
96 | self.bn_o2 = BatchNorm1d(512)
97 |
98 | initialize_weights(self.modules)
99 | if zero_init_residual:
100 | for m in self.modules():
101 | if isinstance(m, Bottleneck):
102 | nn.init.constant_(m.bn3.weight, 0)
103 |
104 | def _make_layer(self, block, planes, blocks, stride=1):
105 | downsample = None
106 | if stride != 1 or self.inplanes != planes * block.expansion:
107 | downsample = Sequential(
108 | conv1x1(self.inplanes, planes * block.expansion, stride),
109 | BatchNorm2d(planes * block.expansion),
110 | )
111 |
112 | layers = []
113 | layers.append(block(self.inplanes, planes, stride, downsample))
114 | self.inplanes = planes * block.expansion
115 | for _ in range(1, blocks):
116 | layers.append(block(self.inplanes, planes))
117 |
118 | return Sequential(*layers)
119 |
120 | def forward(self, x):
121 | x = self.conv1(x)
122 | x = self.bn1(x)
123 | x = self.relu(x)
124 | x = self.maxpool(x)
125 |
126 | x = self.layer1(x)
127 | x = self.layer2(x)
128 | x = self.layer3(x)
129 | x = self.layer4(x)
130 |
131 | x = self.bn_o1(x)
132 | x = self.dropout(x)
133 | x = x.view(x.size(0), -1)
134 | x = self.fc(x)
135 | x = self.bn_o2(x)
136 |
137 | return x
138 |
139 |
140 | def ResNet_50(input_size, **kwargs):
141 | """ Constructs a ResNet-50 model.
142 | """
143 | model = ResNet(input_size, Bottleneck, [3, 4, 6, 3], **kwargs)
144 |
145 | return model
146 |
147 |
148 | def ResNet_101(input_size, **kwargs):
149 | """ Constructs a ResNet-101 model.
150 | """
151 | model = ResNet(input_size, Bottleneck, [3, 4, 23, 3], **kwargs)
152 |
153 | return model
154 |
155 |
156 | def ResNet_152(input_size, **kwargs):
157 | """ Constructs a ResNet-152 model.
158 | """
159 | model = ResNet(input_size, Bottleneck, [3, 8, 36, 3], **kwargs)
160 |
161 | return model
162 |
--------------------------------------------------------------------------------
/models/eva_clip/rope.py:
--------------------------------------------------------------------------------
1 | from math import pi
2 | import torch
3 | from torch import nn
4 | from einops import rearrange, repeat
5 | import logging
6 |
7 | def broadcat(tensors, dim = -1):
8 | num_tensors = len(tensors)
9 | shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
10 | assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
11 | shape_len = list(shape_lens)[0]
12 | dim = (dim + shape_len) if dim < 0 else dim
13 | dims = list(zip(*map(lambda t: list(t.shape), tensors)))
14 | expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
15 | assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
16 | max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
17 | expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
18 | expanded_dims.insert(dim, (dim, dims[dim]))
19 | expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
20 | tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
21 | return torch.cat(tensors, dim = dim)
22 |
23 | def rotate_half(x):
24 | x = rearrange(x, '... (d r) -> ... d r', r = 2)
25 | x1, x2 = x.unbind(dim = -1)
26 | x = torch.stack((-x2, x1), dim = -1)
27 | return rearrange(x, '... d r -> ... (d r)')
28 |
29 |
30 | class VisionRotaryEmbedding(nn.Module):
31 | def __init__(
32 | self,
33 | dim,
34 | pt_seq_len,
35 | ft_seq_len=None,
36 | custom_freqs = None,
37 | freqs_for = 'lang',
38 | theta = 10000,
39 | max_freq = 10,
40 | num_freqs = 1,
41 | ):
42 | super().__init__()
43 | if custom_freqs:
44 | freqs = custom_freqs
45 | elif freqs_for == 'lang':
46 | freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
47 | elif freqs_for == 'pixel':
48 | freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
49 | elif freqs_for == 'constant':
50 | freqs = torch.ones(num_freqs).float()
51 | else:
52 | raise ValueError(f'unknown modality {freqs_for}')
53 |
54 | if ft_seq_len is None: ft_seq_len = pt_seq_len
55 | t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
56 |
57 | freqs_h = torch.einsum('..., f -> ... f', t, freqs)
58 | freqs_h = repeat(freqs_h, '... n -> ... (n r)', r = 2)
59 |
60 | freqs_w = torch.einsum('..., f -> ... f', t, freqs)
61 | freqs_w = repeat(freqs_w, '... n -> ... (n r)', r = 2)
62 |
63 | freqs = broadcat((freqs_h[:, None, :], freqs_w[None, :, :]), dim = -1)
64 |
65 | self.register_buffer("freqs_cos", freqs.cos())
66 | self.register_buffer("freqs_sin", freqs.sin())
67 |
68 | logging.info(f'Shape of rope freq: {self.freqs_cos.shape}')
69 |
70 | def forward(self, t, start_index = 0):
71 | rot_dim = self.freqs_cos.shape[-1]
72 | end_index = start_index + rot_dim
73 | assert rot_dim <= t.shape[-1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}'
74 | t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
75 | t = (t * self.freqs_cos) + (rotate_half(t) * self.freqs_sin)
76 |
77 | return torch.cat((t_left, t, t_right), dim = -1)
78 |
79 | class VisionRotaryEmbeddingFast(nn.Module):
80 | def __init__(
81 | self,
82 | dim,
83 | pt_seq_len,
84 | ft_seq_len=None,
85 | custom_freqs = None,
86 | freqs_for = 'lang',
87 | theta = 10000,
88 | max_freq = 10,
89 | num_freqs = 1,
90 | patch_dropout = 0.
91 | ):
92 | super().__init__()
93 | if custom_freqs:
94 | freqs = custom_freqs
95 | elif freqs_for == 'lang':
96 | freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
97 | elif freqs_for == 'pixel':
98 | freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
99 | elif freqs_for == 'constant':
100 | freqs = torch.ones(num_freqs).float()
101 | else:
102 | raise ValueError(f'unknown modality {freqs_for}')
103 |
104 | if ft_seq_len is None: ft_seq_len = pt_seq_len
105 | t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
106 |
107 | freqs = torch.einsum('..., f -> ... f', t, freqs)
108 | freqs = repeat(freqs, '... n -> ... (n r)', r = 2)
109 | freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim = -1)
110 |
111 | freqs_cos = freqs.cos().view(-1, freqs.shape[-1])
112 | freqs_sin = freqs.sin().view(-1, freqs.shape[-1])
113 |
114 | self.patch_dropout = patch_dropout
115 |
116 | self.register_buffer("freqs_cos", freqs_cos)
117 | self.register_buffer("freqs_sin", freqs_sin)
118 |
119 | logging.info(f'Shape of rope freq: {self.freqs_cos.shape}')
120 |
121 | def forward(self, t, patch_indices_keep=None):
122 | if patch_indices_keep is not None:
123 | batch = t.size()[0]
124 | batch_indices = torch.arange(batch)
125 | batch_indices = batch_indices[..., None]
126 |
127 | freqs_cos = repeat(self.freqs_cos, 'i j -> n i m j', n=t.shape[0], m=t.shape[1])
128 | freqs_sin = repeat(self.freqs_sin, 'i j -> n i m j', n=t.shape[0], m=t.shape[1])
129 |
130 | freqs_cos = freqs_cos[batch_indices, patch_indices_keep]
131 | freqs_cos = rearrange(freqs_cos, 'n i m j -> n m i j')
132 | freqs_sin = freqs_sin[batch_indices, patch_indices_keep]
133 | freqs_sin = rearrange(freqs_sin, 'n i m j -> n m i j')
134 |
135 | return t * freqs_cos + rotate_half(t) * freqs_sin
136 |
137 | return t * self.freqs_cos + rotate_half(t) * self.freqs_sin
--------------------------------------------------------------------------------
/models/eva_clip/openai.py:
--------------------------------------------------------------------------------
1 | """ OpenAI pretrained model functions
2 |
3 | Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4 | """
5 |
6 | import os
7 | import warnings
8 | from typing import List, Optional, Union
9 |
10 | import torch
11 |
12 | from .model import build_model_from_openai_state_dict, convert_weights_to_lp, get_cast_dtype
13 | from .pretrained import get_pretrained_url, list_pretrained_models_by_tag, download_pretrained_from_url
14 |
15 | __all__ = ["list_openai_models", "load_openai_model"]
16 |
17 |
18 | def list_openai_models() -> List[str]:
19 | """Returns the names of available CLIP models"""
20 | return list_pretrained_models_by_tag('openai')
21 |
22 |
23 | def load_openai_model(
24 | name: str,
25 | precision: Optional[str] = None,
26 | device: Optional[Union[str, torch.device]] = None,
27 | jit: bool = True,
28 | cache_dir: Optional[str] = None,
29 | ):
30 | """Load a CLIP model
31 |
32 | Parameters
33 | ----------
34 | name : str
35 | A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
36 | precision: str
37 | Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.
38 | device : Union[str, torch.device]
39 | The device to put the loaded model
40 | jit : bool
41 | Whether to load the optimized JIT model (default) or more hackable non-JIT model.
42 | cache_dir : Optional[str]
43 | The directory to cache the downloaded model weights
44 |
45 | Returns
46 | -------
47 | model : torch.nn.Module
48 | The CLIP model
49 | preprocess : Callable[[PIL.Image], torch.Tensor]
50 | A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
51 | """
52 | if device is None:
53 | device = "cuda" if torch.cuda.is_available() else "cpu"
54 | if precision is None:
55 | precision = 'fp32' if device == 'cpu' else 'fp16'
56 |
57 | if get_pretrained_url(name, 'openai'):
58 | model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
59 | elif os.path.isfile(name):
60 | model_path = name
61 | else:
62 | raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
63 |
64 | try:
65 | # loading JIT archive
66 | model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
67 | state_dict = None
68 | except RuntimeError:
69 | # loading saved state dict
70 | if jit:
71 | warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
72 | jit = False
73 | state_dict = torch.load(model_path, map_location="cpu")
74 |
75 | if not jit:
76 | # Build a non-jit model from the OpenAI jitted model state dict
77 | cast_dtype = get_cast_dtype(precision)
78 | try:
79 | model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)
80 | except KeyError:
81 | sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
82 | model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)
83 |
84 | # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use
85 | model = model.to(device)
86 | if precision.startswith('amp') or precision == 'fp32':
87 | model.float()
88 | elif precision == 'bf16':
89 | convert_weights_to_lp(model, dtype=torch.bfloat16)
90 |
91 | return model
92 |
93 | # patch the device names
94 | device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
95 | device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
96 |
97 | def patch_device(module):
98 | try:
99 | graphs = [module.graph] if hasattr(module, "graph") else []
100 | except RuntimeError:
101 | graphs = []
102 |
103 | if hasattr(module, "forward1"):
104 | graphs.append(module.forward1.graph)
105 |
106 | for graph in graphs:
107 | for node in graph.findAllNodes("prim::Constant"):
108 | if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
109 | node.copyAttributes(device_node)
110 |
111 | model.apply(patch_device)
112 | patch_device(model.encode_image)
113 | patch_device(model.encode_text)
114 |
115 | # patch dtype to float32 (typically for CPU)
116 | if precision == 'fp32':
117 | float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
118 | float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
119 | float_node = float_input.node()
120 |
121 | def patch_float(module):
122 | try:
123 | graphs = [module.graph] if hasattr(module, "graph") else []
124 | except RuntimeError:
125 | graphs = []
126 |
127 | if hasattr(module, "forward1"):
128 | graphs.append(module.forward1.graph)
129 |
130 | for graph in graphs:
131 | for node in graph.findAllNodes("aten::to"):
132 | inputs = list(node.inputs())
133 | for i in [1, 2]: # dtype can be the second or third argument to aten::to()
134 | if inputs[i].node()["value"] == 5:
135 | inputs[i].node().copyAttributes(float_node)
136 |
137 | model.apply(patch_float)
138 | patch_float(model.encode_image)
139 | patch_float(model.encode_text)
140 | model.float()
141 |
142 | # ensure image_size attr available at consistent location for both jit and non-jit
143 | model.visual.image_size = model.input_resolution.item()
144 | return model
145 |
--------------------------------------------------------------------------------
/models/eva_clip/utils_qformer.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import math
3 | import os
4 | import random
5 |
6 | import cv2
7 | import numpy as np
8 | import torch
9 | import torch.nn.functional as F
10 | from torchvision.utils import make_grid
11 | from transformers import PretrainedConfig
12 |
13 |
14 | def seed_everything(seed):
15 | os.environ["PL_GLOBAL_SEED"] = str(seed)
16 | random.seed(seed)
17 | np.random.seed(seed)
18 | torch.manual_seed(seed)
19 | torch.cuda.manual_seed_all(seed)
20 |
21 |
22 | def is_torch2_available():
23 | return hasattr(F, "scaled_dot_product_attention")
24 |
25 |
26 | def instantiate_from_config(config):
27 | if "target" not in config:
28 | if config == '__is_first_stage__' or config == "__is_unconditional__":
29 | return None
30 | raise KeyError("Expected key `target` to instantiate.")
31 | return get_obj_from_str(config["target"])(**config.get("params", {}))
32 |
33 |
34 | def get_obj_from_str(string, reload=False):
35 | module, cls = string.rsplit(".", 1)
36 | if reload:
37 | module_imp = importlib.import_module(module)
38 | importlib.reload(module_imp)
39 | return getattr(importlib.import_module(module, package=None), cls)
40 |
41 |
42 | def drop_seq_token(seq, drop_rate=0.5):
43 | idx = torch.randperm(seq.size(1))
44 | num_keep_tokens = int(len(idx) * (1 - drop_rate))
45 | idx = idx[:num_keep_tokens]
46 | seq = seq[:, idx]
47 | return seq
48 |
49 |
50 | def import_model_class_from_model_name_or_path(
51 | pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
52 | ):
53 | text_encoder_config = PretrainedConfig.from_pretrained(
54 | pretrained_model_name_or_path, subfolder=subfolder, revision=revision
55 | )
56 | model_class = text_encoder_config.architectures[0]
57 |
58 | if model_class == "CLIPTextModel":
59 | from transformers import CLIPTextModel
60 |
61 | return CLIPTextModel
62 | elif model_class == "CLIPTextModelWithProjection": # noqa RET505
63 | from transformers import CLIPTextModelWithProjection
64 |
65 | return CLIPTextModelWithProjection
66 | else:
67 | raise ValueError(f"{model_class} is not supported.")
68 |
69 |
70 | def resize_numpy_image_long(image, resize_long_edge=768):
71 | h, w = image.shape[:2]
72 | if max(h, w) <= resize_long_edge:
73 | return image
74 | k = resize_long_edge / max(h, w)
75 | h = int(h * k)
76 | w = int(w * k)
77 | image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LANCZOS4)
78 | return image
79 |
80 |
81 | # from basicsr
82 | def img2tensor(imgs, bgr2rgb=True, float32=True):
83 | """Numpy array to tensor.
84 |
85 | Args:
86 | imgs (list[ndarray] | ndarray): Input images.
87 | bgr2rgb (bool): Whether to change bgr to rgb.
88 | float32 (bool): Whether to change to float32.
89 |
90 | Returns:
91 | list[tensor] | tensor: Tensor images. If returned results only have
92 | one element, just return tensor.
93 | """
94 |
95 | def _totensor(img, bgr2rgb, float32):
96 | if img.shape[2] == 3 and bgr2rgb:
97 | if img.dtype == 'float64':
98 | img = img.astype('float32')
99 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
100 | img = torch.from_numpy(img.transpose(2, 0, 1))
101 | if float32:
102 | img = img.float()
103 | return img
104 |
105 | if isinstance(imgs, list):
106 | return [_totensor(img, bgr2rgb, float32) for img in imgs]
107 | return _totensor(imgs, bgr2rgb, float32)
108 |
109 |
110 | def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
111 | """Convert torch Tensors into image numpy arrays.
112 |
113 | After clamping to [min, max], values will be normalized to [0, 1].
114 |
115 | Args:
116 | tensor (Tensor or list[Tensor]): Accept shapes:
117 | 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
118 | 2) 3D Tensor of shape (3/1 x H x W);
119 | 3) 2D Tensor of shape (H x W).
120 | Tensor channel should be in RGB order.
121 | rgb2bgr (bool): Whether to change rgb to bgr.
122 | out_type (numpy type): output types. If ``np.uint8``, transform outputs
123 | to uint8 type with range [0, 255]; otherwise, float type with
124 | range [0, 1]. Default: ``np.uint8``.
125 | min_max (tuple[int]): min and max values for clamp.
126 |
127 | Returns:
128 | (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
129 | shape (H x W). The channel order is BGR.
130 | """
131 | if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
132 | raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')
133 |
134 | if torch.is_tensor(tensor):
135 | tensor = [tensor]
136 | result = []
137 | for _tensor in tensor:
138 | _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
139 | _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
140 |
141 | n_dim = _tensor.dim()
142 | if n_dim == 4:
143 | img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()
144 | img_np = img_np.transpose(1, 2, 0)
145 | if rgb2bgr:
146 | img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
147 | elif n_dim == 3:
148 | img_np = _tensor.numpy()
149 | img_np = img_np.transpose(1, 2, 0)
150 | if img_np.shape[2] == 1: # gray image
151 | img_np = np.squeeze(img_np, axis=2)
152 | else:
153 | if rgb2bgr:
154 | img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
155 | elif n_dim == 2:
156 | img_np = _tensor.numpy()
157 | else:
158 | raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}')
159 | if out_type == np.uint8:
160 | # Unlike MATLAB, numpy.unit8() WILL NOT round by default.
161 | img_np = (img_np * 255.0).round()
162 | img_np = img_np.astype(out_type)
163 | result.append(img_np)
164 | if len(result) == 1:
165 | result = result[0]
166 | return result
167 |
--------------------------------------------------------------------------------
/models/eva_clip/loss.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | import torch.nn as nn
4 | from torch.nn import functional as F
5 |
6 | try:
7 | import torch.distributed.nn
8 | from torch import distributed as dist
9 | has_distributed = True
10 | except ImportError:
11 | has_distributed = False
12 |
13 | try:
14 | import horovod.torch as hvd
15 | except ImportError:
16 | hvd = None
17 |
18 | from timm.loss import LabelSmoothingCrossEntropy
19 |
20 |
21 | def gather_features(
22 | image_features,
23 | text_features,
24 | local_loss=False,
25 | gather_with_grad=False,
26 | rank=0,
27 | world_size=1,
28 | use_horovod=False
29 | ):
30 | assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.'
31 | if use_horovod:
32 | assert hvd is not None, 'Please install horovod'
33 | if gather_with_grad:
34 | all_image_features = hvd.allgather(image_features)
35 | all_text_features = hvd.allgather(text_features)
36 | else:
37 | with torch.no_grad():
38 | all_image_features = hvd.allgather(image_features)
39 | all_text_features = hvd.allgather(text_features)
40 | if not local_loss:
41 | # ensure grads for local rank when all_* features don't have a gradient
42 | gathered_image_features = list(all_image_features.chunk(world_size, dim=0))
43 | gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
44 | gathered_image_features[rank] = image_features
45 | gathered_text_features[rank] = text_features
46 | all_image_features = torch.cat(gathered_image_features, dim=0)
47 | all_text_features = torch.cat(gathered_text_features, dim=0)
48 | else:
49 | # We gather tensors from all gpus
50 | if gather_with_grad:
51 | all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0)
52 | all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
53 | # all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features, async_op=True), dim=0)
54 | # all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features, async_op=True), dim=0)
55 | else:
56 | gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)]
57 | gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
58 | dist.all_gather(gathered_image_features, image_features)
59 | dist.all_gather(gathered_text_features, text_features)
60 | if not local_loss:
61 | # ensure grads for local rank when all_* features don't have a gradient
62 | gathered_image_features[rank] = image_features
63 | gathered_text_features[rank] = text_features
64 | all_image_features = torch.cat(gathered_image_features, dim=0)
65 | all_text_features = torch.cat(gathered_text_features, dim=0)
66 |
67 | return all_image_features, all_text_features
68 |
69 |
70 | class ClipLoss(nn.Module):
71 |
72 | def __init__(
73 | self,
74 | local_loss=False,
75 | gather_with_grad=False,
76 | cache_labels=False,
77 | rank=0,
78 | world_size=1,
79 | use_horovod=False,
80 | smoothing=0.,
81 | ):
82 | super().__init__()
83 | self.local_loss = local_loss
84 | self.gather_with_grad = gather_with_grad
85 | self.cache_labels = cache_labels
86 | self.rank = rank
87 | self.world_size = world_size
88 | self.use_horovod = use_horovod
89 | self.label_smoothing_cross_entropy = LabelSmoothingCrossEntropy(smoothing=smoothing) if smoothing > 0 else None
90 |
91 | # cache state
92 | self.prev_num_logits = 0
93 | self.labels = {}
94 |
95 | def forward(self, image_features, text_features, logit_scale=1.):
96 | device = image_features.device
97 | if self.world_size > 1:
98 | all_image_features, all_text_features = gather_features(
99 | image_features, text_features,
100 | self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)
101 |
102 | if self.local_loss:
103 | logits_per_image = logit_scale * image_features @ all_text_features.T
104 | logits_per_text = logit_scale * text_features @ all_image_features.T
105 | else:
106 | logits_per_image = logit_scale * all_image_features @ all_text_features.T
107 | logits_per_text = logits_per_image.T
108 | else:
109 | logits_per_image = logit_scale * image_features @ text_features.T
110 | logits_per_text = logit_scale * text_features @ image_features.T
111 | # calculated ground-truth and cache if enabled
112 | num_logits = logits_per_image.shape[0]
113 | if self.prev_num_logits != num_logits or device not in self.labels:
114 | labels = torch.arange(num_logits, device=device, dtype=torch.long)
115 | if self.world_size > 1 and self.local_loss:
116 | labels = labels + num_logits * self.rank
117 | if self.cache_labels:
118 | self.labels[device] = labels
119 | self.prev_num_logits = num_logits
120 | else:
121 | labels = self.labels[device]
122 |
123 | if self.label_smoothing_cross_entropy:
124 | total_loss = (
125 | self.label_smoothing_cross_entropy(logits_per_image, labels) +
126 | self.label_smoothing_cross_entropy(logits_per_text, labels)
127 | ) / 2
128 | else:
129 | total_loss = (
130 | F.cross_entropy(logits_per_image, labels) +
131 | F.cross_entropy(logits_per_text, labels)
132 | ) / 2
133 |
134 | acc = None
135 | i2t_acc = (logits_per_image.argmax(-1) == labels).sum() / len(logits_per_image)
136 | t2i_acc = (logits_per_text.argmax(-1) == labels).sum() / len(logits_per_text)
137 | acc = {"i2t": i2t_acc, "t2i": t2i_acc}
138 | return total_loss, acc
--------------------------------------------------------------------------------
/models/eva_clip/tokenizer.py:
--------------------------------------------------------------------------------
1 | """ CLIP tokenizer
2 |
3 | Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4 | """
5 | import gzip
6 | import html
7 | import os
8 | from functools import lru_cache
9 | from typing import Union, List
10 |
11 | import ftfy
12 | import regex as re
13 | import torch
14 |
15 | # https://stackoverflow.com/q/62691279
16 | import os
17 | os.environ["TOKENIZERS_PARALLELISM"] = "false"
18 |
19 |
20 | @lru_cache()
21 | def default_bpe():
22 | return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
23 |
24 |
25 | @lru_cache()
26 | def bytes_to_unicode():
27 | """
28 | Returns list of utf-8 byte and a corresponding list of unicode strings.
29 | The reversible bpe codes work on unicode strings.
30 | This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
31 | When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
32 | This is a signficant percentage of your normal, say, 32K bpe vocab.
33 | To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
34 | And avoids mapping to whitespace/control characters the bpe code barfs on.
35 | """
36 | bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
37 | cs = bs[:]
38 | n = 0
39 | for b in range(2**8):
40 | if b not in bs:
41 | bs.append(b)
42 | cs.append(2**8+n)
43 | n += 1
44 | cs = [chr(n) for n in cs]
45 | return dict(zip(bs, cs))
46 |
47 |
48 | def get_pairs(word):
49 | """Return set of symbol pairs in a word.
50 | Word is represented as tuple of symbols (symbols being variable-length strings).
51 | """
52 | pairs = set()
53 | prev_char = word[0]
54 | for char in word[1:]:
55 | pairs.add((prev_char, char))
56 | prev_char = char
57 | return pairs
58 |
59 |
60 | def basic_clean(text):
61 | text = ftfy.fix_text(text)
62 | text = html.unescape(html.unescape(text))
63 | return text.strip()
64 |
65 |
66 | def whitespace_clean(text):
67 | text = re.sub(r'\s+', ' ', text)
68 | text = text.strip()
69 | return text
70 |
71 |
72 | class SimpleTokenizer(object):
73 | def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
74 | self.byte_encoder = bytes_to_unicode()
75 | self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
76 | merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
77 | merges = merges[1:49152-256-2+1]
78 | merges = [tuple(merge.split()) for merge in merges]
79 | vocab = list(bytes_to_unicode().values())
80 | vocab = vocab + [v+'' for v in vocab]
81 | for merge in merges:
82 | vocab.append(''.join(merge))
83 | if not special_tokens:
84 | special_tokens = ['', '']
85 | else:
86 | special_tokens = ['', ''] + special_tokens
87 | vocab.extend(special_tokens)
88 | self.encoder = dict(zip(vocab, range(len(vocab))))
89 | self.decoder = {v: k for k, v in self.encoder.items()}
90 | self.bpe_ranks = dict(zip(merges, range(len(merges))))
91 | self.cache = {t:t for t in special_tokens}
92 | special = "|".join(special_tokens)
93 | self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
94 |
95 | self.vocab_size = len(self.encoder)
96 | self.all_special_ids = [self.encoder[t] for t in special_tokens]
97 |
98 | def bpe(self, token):
99 | if token in self.cache:
100 | return self.cache[token]
101 | word = tuple(token[:-1]) + ( token[-1] + '',)
102 | pairs = get_pairs(word)
103 |
104 | if not pairs:
105 | return token+''
106 |
107 | while True:
108 | bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
109 | if bigram not in self.bpe_ranks:
110 | break
111 | first, second = bigram
112 | new_word = []
113 | i = 0
114 | while i < len(word):
115 | try:
116 | j = word.index(first, i)
117 | new_word.extend(word[i:j])
118 | i = j
119 | except:
120 | new_word.extend(word[i:])
121 | break
122 |
123 | if word[i] == first and i < len(word)-1 and word[i+1] == second:
124 | new_word.append(first+second)
125 | i += 2
126 | else:
127 | new_word.append(word[i])
128 | i += 1
129 | new_word = tuple(new_word)
130 | word = new_word
131 | if len(word) == 1:
132 | break
133 | else:
134 | pairs = get_pairs(word)
135 | word = ' '.join(word)
136 | self.cache[token] = word
137 | return word
138 |
139 | def encode(self, text):
140 | bpe_tokens = []
141 | text = whitespace_clean(basic_clean(text)).lower()
142 | for token in re.findall(self.pat, text):
143 | token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
144 | bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
145 | return bpe_tokens
146 |
147 | def decode(self, tokens):
148 | text = ''.join([self.decoder[token] for token in tokens])
149 | text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ')
150 | return text
151 |
152 |
153 | _tokenizer = SimpleTokenizer()
154 |
155 |
156 | def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
157 | """
158 | Returns the tokenized representation of given input string(s)
159 |
160 | Parameters
161 | ----------
162 | texts : Union[str, List[str]]
163 | An input string or a list of input strings to tokenize
164 | context_length : int
165 | The context length to use; all CLIP models use 77 as the context length
166 |
167 | Returns
168 | -------
169 | A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
170 | """
171 | if isinstance(texts, str):
172 | texts = [texts]
173 |
174 | sot_token = _tokenizer.encoder[""]
175 | eot_token = _tokenizer.encoder[""]
176 | all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
177 | result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
178 |
179 | for i, tokens in enumerate(all_tokens):
180 | if len(tokens) > context_length:
181 | tokens = tokens[:context_length] # Truncate
182 | tokens[-1] = eot_token
183 | result[i, :len(tokens)] = torch.tensor(tokens)
184 |
185 | return result
186 |
187 |
188 | class HFTokenizer:
189 | "HuggingFace tokenizer wrapper"
190 | def __init__(self, tokenizer_name:str):
191 | from transformers import AutoTokenizer
192 | self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
193 |
194 | def __call__(self, texts:Union[str, List[str]], context_length:int=77) -> torch.Tensor:
195 | # same cleaning as for default tokenizer, except lowercasing
196 | # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance
197 | if isinstance(texts, str):
198 | texts = [texts]
199 | texts = [whitespace_clean(basic_clean(text)) for text in texts]
200 | input_ids = self.tokenizer(texts, return_tensors='pt', max_length=context_length, padding='max_length', truncation=True).input_ids
201 | return input_ids
202 |
--------------------------------------------------------------------------------
/models/eva_clip/modified_resnet.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | import torch
5 | from torch import nn
6 | from torch.nn import functional as F
7 | from collections import OrderedDict
8 |
9 | current_file_path = os.path.abspath(__file__)
10 | project_roots = [os.path.dirname(current_file_path)]
11 | for project_root in project_roots:
12 | sys.path.insert(0, project_root) if project_root not in sys.path else None
13 |
14 | from utils import freeze_batch_norm_2d
15 |
16 |
17 | class Bottleneck(nn.Module):
18 | expansion = 4
19 |
20 | def __init__(self, inplanes, planes, stride=1):
21 | super().__init__()
22 |
23 | # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
24 | self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
25 | self.bn1 = nn.BatchNorm2d(planes)
26 | self.act1 = nn.ReLU(inplace=True)
27 |
28 | self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
29 | self.bn2 = nn.BatchNorm2d(planes)
30 | self.act2 = nn.ReLU(inplace=True)
31 |
32 | self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
33 |
34 | self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
35 | self.bn3 = nn.BatchNorm2d(planes * self.expansion)
36 | self.act3 = nn.ReLU(inplace=True)
37 |
38 | self.downsample = None
39 | self.stride = stride
40 |
41 | if stride > 1 or inplanes != planes * Bottleneck.expansion:
42 | # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
43 | self.downsample = nn.Sequential(OrderedDict([
44 | ("-1", nn.AvgPool2d(stride)),
45 | ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
46 | ("1", nn.BatchNorm2d(planes * self.expansion))
47 | ]))
48 |
49 | def forward(self, x: torch.Tensor):
50 | identity = x
51 |
52 | out = self.act1(self.bn1(self.conv1(x)))
53 | out = self.act2(self.bn2(self.conv2(out)))
54 | out = self.avgpool(out)
55 | out = self.bn3(self.conv3(out))
56 |
57 | if self.downsample is not None:
58 | identity = self.downsample(x)
59 |
60 | out += identity
61 | out = self.act3(out)
62 | return out
63 |
64 |
65 | class AttentionPool2d(nn.Module):
66 | def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
67 | super().__init__()
68 | self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
69 | self.k_proj = nn.Linear(embed_dim, embed_dim)
70 | self.q_proj = nn.Linear(embed_dim, embed_dim)
71 | self.v_proj = nn.Linear(embed_dim, embed_dim)
72 | self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
73 | self.num_heads = num_heads
74 |
75 | def forward(self, x):
76 | x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
77 | x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
78 | x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
79 | x, _ = F.multi_head_attention_forward(
80 | query=x, key=x, value=x,
81 | embed_dim_to_check=x.shape[-1],
82 | num_heads=self.num_heads,
83 | q_proj_weight=self.q_proj.weight,
84 | k_proj_weight=self.k_proj.weight,
85 | v_proj_weight=self.v_proj.weight,
86 | in_proj_weight=None,
87 | in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
88 | bias_k=None,
89 | bias_v=None,
90 | add_zero_attn=False,
91 | dropout_p=0.,
92 | out_proj_weight=self.c_proj.weight,
93 | out_proj_bias=self.c_proj.bias,
94 | use_separate_proj_weight=True,
95 | training=self.training,
96 | need_weights=False
97 | )
98 |
99 | return x[0]
100 |
101 |
102 | class ModifiedResNet(nn.Module):
103 | """
104 | A ResNet class that is similar to torchvision's but contains the following changes:
105 | - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
106 | - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
107 | - The final pooling layer is a QKV attention instead of an average pool
108 | """
109 |
110 | def __init__(self, layers, output_dim, heads, image_size=224, width=64):
111 | super().__init__()
112 | self.output_dim = output_dim
113 | self.image_size = image_size
114 |
115 | # the 3-layer stem
116 | self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
117 | self.bn1 = nn.BatchNorm2d(width // 2)
118 | self.act1 = nn.ReLU(inplace=True)
119 | self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
120 | self.bn2 = nn.BatchNorm2d(width // 2)
121 | self.act2 = nn.ReLU(inplace=True)
122 | self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
123 | self.bn3 = nn.BatchNorm2d(width)
124 | self.act3 = nn.ReLU(inplace=True)
125 | self.avgpool = nn.AvgPool2d(2)
126 |
127 | # residual layers
128 | self._inplanes = width # this is a *mutable* variable used during construction
129 | self.layer1 = self._make_layer(width, layers[0])
130 | self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
131 | self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
132 | self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
133 |
134 | embed_dim = width * 32 # the ResNet feature dimension
135 | self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
136 |
137 | self.init_parameters()
138 |
139 | def _make_layer(self, planes, blocks, stride=1):
140 | layers = [Bottleneck(self._inplanes, planes, stride)]
141 |
142 | self._inplanes = planes * Bottleneck.expansion
143 | for _ in range(1, blocks):
144 | layers.append(Bottleneck(self._inplanes, planes))
145 |
146 | return nn.Sequential(*layers)
147 |
148 | def init_parameters(self):
149 | if self.attnpool is not None:
150 | std = self.attnpool.c_proj.in_features ** -0.5
151 | nn.init.normal_(self.attnpool.q_proj.weight, std=std)
152 | nn.init.normal_(self.attnpool.k_proj.weight, std=std)
153 | nn.init.normal_(self.attnpool.v_proj.weight, std=std)
154 | nn.init.normal_(self.attnpool.c_proj.weight, std=std)
155 |
156 | for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
157 | for name, param in resnet_block.named_parameters():
158 | if name.endswith("bn3.weight"):
159 | nn.init.zeros_(param)
160 |
161 | def lock(self, unlocked_groups=0, freeze_bn_stats=False):
162 | assert unlocked_groups == 0, 'partial locking not currently supported for this model'
163 | for param in self.parameters():
164 | param.requires_grad = False
165 | if freeze_bn_stats:
166 | freeze_batch_norm_2d(self)
167 |
168 | @torch.jit.ignore
169 | def set_grad_checkpointing(self, enable=True):
170 | # FIXME support for non-transformer
171 | pass
172 |
173 | def stem(self, x):
174 | x = self.act1(self.bn1(self.conv1(x)))
175 | x = self.act2(self.bn2(self.conv2(x)))
176 | x = self.act3(self.bn3(self.conv3(x)))
177 | x = self.avgpool(x)
178 | return x
179 |
180 | def forward(self, x):
181 | x = self.stem(x)
182 | x = self.layer1(x)
183 | x = self.layer2(x)
184 | x = self.layer3(x)
185 | x = self.layer4(x)
186 | x = self.attnpool(x)
187 |
188 | return x
189 |
--------------------------------------------------------------------------------
/metric/face_sim_fid.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | import cv2
5 | import numpy
6 | import numpy as np
7 | import torch
8 | from insightface.app import FaceAnalysis
9 | from insightface.utils import face_align
10 | from PIL import Image
11 | from torchvision import models, transforms
12 | from .curricularface import get_model
13 |
14 |
15 | def load_image(image):
16 | img = image.convert('RGB')
17 | img = transforms.Resize((299, 299))(img) # Resize to Inception input size
18 | img = transforms.ToTensor()(img)
19 | return img.unsqueeze(0) # Add batch dimension
20 |
21 | def get_face_keypoints(face_model, image_bgr):
22 | face_info = face_model.get(image_bgr)
23 | if len(face_info) > 0:
24 | return sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (x['bbox'][3] - x['bbox'][1]))[-1]
25 | return None
26 |
27 | def sample_video_frames(video_path, num_frames=24):
28 | cap = cv2.VideoCapture(video_path)
29 | total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
30 | frame_indices = np.linspace(0, total_frames - 1, num_frames, dtype=int)
31 |
32 | frames = []
33 | for idx in frame_indices:
34 | cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
35 | ret, frame = cap.read()
36 | if ret:
37 | frames.append(frame)
38 | cap.release()
39 | return frames
40 |
41 | def process_image(face_model, image_path):
42 | if isinstance(image_path, str):
43 | np_faceid_image = np.array(Image.open(image_path).convert("RGB"))
44 | elif isinstance(image_path, numpy.ndarray):
45 | np_faceid_image = image_path
46 | else:
47 | raise TypeError("image_path should be a string or PIL.Image.Image object")
48 |
49 | image_bgr = cv2.cvtColor(np_faceid_image, cv2.COLOR_RGB2BGR)
50 |
51 | face_info = get_face_keypoints(face_model, image_bgr)
52 | if face_info is None:
53 | padded_image, sub_coord = pad_np_bgr_image(image_bgr)
54 | face_info = get_face_keypoints(face_model, padded_image)
55 | if face_info is None:
56 | print("Warning: No face detected in the image. Continuing processing...")
57 | return None, None
58 | face_kps = face_info['kps']
59 | face_kps -= np.array(sub_coord)
60 | else:
61 | face_kps = face_info['kps']
62 | arcface_embedding = face_info['embedding']
63 |
64 | norm_face = face_align.norm_crop(image_bgr, landmark=face_kps, image_size=224)
65 | align_face = cv2.cvtColor(norm_face, cv2.COLOR_BGR2RGB)
66 |
67 | return align_face, arcface_embedding
68 |
69 |
70 | def get_face_keypoints2(face_model, image_bgr):
71 | face_info = face_model.get(image_bgr)
72 | if len(face_info) > 0:
73 | return sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (x['bbox'][3] - x['bbox'][1]))[-2:]
74 | return None
75 |
76 |
77 | def matrix_sqrt(matrix):
78 | eigenvalues, eigenvectors = torch.linalg.eigh(matrix)
79 | sqrt_eigenvalues = torch.sqrt(torch.clamp(eigenvalues, min=0))
80 | sqrt_matrix = (eigenvectors * sqrt_eigenvalues).mm(eigenvectors.T)
81 | return sqrt_matrix
82 |
83 |
84 | def calculate_fid(real_activations, fake_activations, device="cuda"):
85 | real_activations_tensor = torch.tensor(real_activations).to(device)
86 | fake_activations_tensor = torch.tensor(fake_activations).to(device)
87 |
88 | mu1 = real_activations_tensor.mean(dim=0)
89 | sigma1 = torch.cov(real_activations_tensor.T)
90 | mu2 = fake_activations_tensor.mean(dim=0)
91 | sigma2 = torch.cov(fake_activations_tensor.T)
92 |
93 | ssdiff = torch.sum((mu1 - mu2) ** 2)
94 | covmean = matrix_sqrt(sigma1.mm(sigma2))
95 | if torch.is_complex(covmean):
96 | covmean = covmean.real
97 | fid = ssdiff + torch.trace(sigma1 + sigma2 - 2 * covmean)
98 | return fid.item()
99 |
100 |
101 |
102 | def process_image2(face_model, image_path):
103 | if isinstance(image_path, str):
104 | np_faceid_image = np.array(Image.open(image_path).convert("RGB"))
105 | elif isinstance(image_path, numpy.ndarray):
106 | np_faceid_image = image_path
107 | else:
108 | raise TypeError("image_path should be a string or PIL.Image.Image object")
109 |
110 | image_bgr = cv2.cvtColor(np_faceid_image, cv2.COLOR_RGB2BGR)
111 |
112 | face_info = get_face_keypoints2(face_model, image_bgr)
113 | print(len(face_info))
114 | align_face_list = []
115 | arcface_embedding_list = []
116 | for f in face_info:
117 | face_kps = f['kps']
118 | arcface_embedding = f['embedding']
119 | norm_face = face_align.norm_crop(image_bgr, landmark=face_kps, image_size=224)
120 | align_face = cv2.cvtColor(norm_face, cv2.COLOR_BGR2RGB)
121 | align_face_list.append(align_face)
122 | arcface_embedding_list.append(arcface_embedding)
123 | return align_face_list, arcface_embedding_list
124 |
125 |
126 | @torch.no_grad()
127 | def inference(face_model, img, device):
128 | img = cv2.resize(img, (112, 112))
129 | img = np.transpose(img, (2, 0, 1))
130 | img = torch.from_numpy(img).unsqueeze(0).float().to(device)
131 | img.div_(255).sub_(0.5).div_(0.5)
132 | embedding = face_model(img).detach().cpu().numpy()[0]
133 | return embedding / np.linalg.norm(embedding)
134 |
135 |
136 | def get_activations(images, model, batch_size=16):
137 | model.eval()
138 | activations = []
139 | with torch.no_grad():
140 | for i in range(0, len(images), batch_size):
141 | batch = images[i:i + batch_size]
142 | pred = model(batch)
143 | activations.append(pred)
144 | activations = torch.cat(activations, dim=0).cpu().numpy()
145 | if activations.shape[0] == 1:
146 | activations = np.repeat(activations, 2, axis=0)
147 | return activations
148 |
149 |
150 | import math
151 |
152 | def cosine_similarity(list_1, list_2):
153 | cos_list = []
154 | for list1 in list_1:
155 | for list2 in list_2:
156 | dot_product = sum(a * b for a, b in zip(list1, list2))
157 | magnitude1 = math.sqrt(sum(a ** 2 for a in list1))
158 | magnitude2 = math.sqrt(sum(b ** 2 for b in list2))
159 | cos_list.append(dot_product / (magnitude1 * magnitude2))
160 | return max(cos_list)
161 |
162 |
163 | def get_face_sim_and_fid(
164 | image_path_list,
165 | video_path,
166 | model_path,
167 | device="cuda",
168 | ):
169 | face_arc_path = os.path.join(model_path, "face_encoder")
170 | face_cur_path = os.path.join(face_arc_path, "glint360k_curricular_face_r101_backbone.bin")
171 |
172 | # Initialize FaceEncoder model for face detection and embedding extraction
173 | face_arc_model = FaceAnalysis(root=face_arc_path, providers=['CUDAExecutionProvider'])
174 | face_arc_model.prepare(ctx_id=0, det_size=(320, 320))
175 |
176 | # Load face recognition model
177 | face_cur_model = get_model('IR_101')([112, 112])
178 | face_cur_model.load_state_dict(torch.load(face_cur_path, map_location="cpu"))
179 | face_cur_model = face_cur_model.to(device)
180 | face_cur_model.eval()
181 |
182 | # Load InceptionV3 model for FID calculation
183 | fid_model = models.inception_v3(weights=models.Inception_V3_Weights.DEFAULT)
184 | fid_model.fc = torch.nn.Identity() # Remove final classification layer
185 | fid_model.eval()
186 | fid_model = fid_model.to(device)
187 |
188 | align_face_image_list = []
189 | arcface_image_embedding_list = []
190 | real_activations_list = []
191 | cur_image_embedding_list = []
192 | for image_path in image_path_list:
193 | align_face_image, arcface_image_embedding = process_image(face_arc_model, image_path)
194 | if align_face_image is None:
195 | print(f"Error processing image at {image_path}")
196 | return
197 | #print(len(arcface_image_embedding)) #512
198 | align_face_image_list.append(align_face_image)
199 | arcface_image_embedding_list.append(arcface_image_embedding)
200 |
201 | cur_image_embedding = inference(face_cur_model, align_face_image, device)
202 | cur_image_embedding_list.append(cur_image_embedding)
203 | align_face_image_pil = Image.fromarray(align_face_image)
204 | real_image = load_image(align_face_image_pil).to(device)
205 | real_activations = get_activations(real_image, fid_model)
206 | #print(len(real_activations[0])) # 2048
207 | real_activations_list.append(real_activations)
208 |
209 | video_frames = sample_video_frames(video_path, num_frames=24)
210 | print("video frames: ", len(video_frames))
211 |
212 | cur_scores = []
213 | arc_scores = []
214 | fid_face = []
215 |
216 | for frame in video_frames:
217 | # Convert to RGB once at the beginning
218 | frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
219 |
220 | # Process the frame for ArcFace embeddings
221 | align_face_frame_list, arcface_frame_embedding_list = process_image2(face_arc_model, frame_rgb)
222 | print(len(arcface_frame_embedding_list))
223 |
224 | cos = cosine_similarity(arcface_image_embedding_list, arcface_frame_embedding_list)
225 | print("cos: ", cos)
226 | arc_scores.append(cos)
227 |
228 | # Process FID score
229 | f_list = []
230 | for align_face_frame in align_face_frame_list:
231 | align_face_frame_pil = Image.fromarray(align_face_frame)
232 | fake_image = load_image(align_face_frame_pil).to(device)
233 | fake_activations = get_activations(fake_image, fid_model)
234 | for real_activations in real_activations_list:
235 | fid_score = calculate_fid(real_activations, fake_activations, device)
236 | f_list.append(fid_score)
237 | print("fid: ", min(f_list))
238 | fid_face.append(min(f_list))
239 |
240 | # Aggregate results with default values for empty lists
241 | avg_arc_score = np.mean(arc_scores) if arc_scores else 0.0
242 | avg_fid_score = np.mean(fid_face) if fid_face else 0.0
243 | return avg_arc_score, avg_fid_score
244 |
245 |
246 |
--------------------------------------------------------------------------------
/metric/curricularface/model_irse.py:
--------------------------------------------------------------------------------
1 | # The implementation is adopted from TFace,made pubicly available under the Apache-2.0 license at
2 | # https://github.com/Tencent/TFace/blob/master/recognition/torchkit/backbone/model_irse.py
3 | from collections import namedtuple
4 |
5 | from torch.nn import BatchNorm1d, BatchNorm2d, Conv2d, Dropout, Linear, MaxPool2d, Module, PReLU, Sequential
6 |
7 | from .common import Flatten, SEModule, initialize_weights
8 |
9 |
10 | class BasicBlockIR(Module):
11 | """ BasicBlock for IRNet
12 | """
13 |
14 | def __init__(self, in_channel, depth, stride):
15 | super(BasicBlockIR, self).__init__()
16 | if in_channel == depth:
17 | self.shortcut_layer = MaxPool2d(1, stride)
18 | else:
19 | self.shortcut_layer = Sequential(
20 | Conv2d(in_channel, depth, (1, 1), stride, bias=False),
21 | BatchNorm2d(depth))
22 | self.res_layer = Sequential(
23 | BatchNorm2d(in_channel),
24 | Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
25 | BatchNorm2d(depth), PReLU(depth),
26 | Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
27 | BatchNorm2d(depth))
28 |
29 | def forward(self, x):
30 | shortcut = self.shortcut_layer(x)
31 | res = self.res_layer(x)
32 |
33 | return res + shortcut
34 |
35 |
36 | class BottleneckIR(Module):
37 | """ BasicBlock with bottleneck for IRNet
38 | """
39 |
40 | def __init__(self, in_channel, depth, stride):
41 | super(BottleneckIR, self).__init__()
42 | reduction_channel = depth // 4
43 | if in_channel == depth:
44 | self.shortcut_layer = MaxPool2d(1, stride)
45 | else:
46 | self.shortcut_layer = Sequential(
47 | Conv2d(in_channel, depth, (1, 1), stride, bias=False),
48 | BatchNorm2d(depth))
49 | self.res_layer = Sequential(
50 | BatchNorm2d(in_channel),
51 | Conv2d(
52 | in_channel, reduction_channel, (1, 1), (1, 1), 0, bias=False),
53 | BatchNorm2d(reduction_channel), PReLU(reduction_channel),
54 | Conv2d(
55 | reduction_channel,
56 | reduction_channel, (3, 3), (1, 1),
57 | 1,
58 | bias=False), BatchNorm2d(reduction_channel),
59 | PReLU(reduction_channel),
60 | Conv2d(reduction_channel, depth, (1, 1), stride, 0, bias=False),
61 | BatchNorm2d(depth))
62 |
63 | def forward(self, x):
64 | shortcut = self.shortcut_layer(x)
65 | res = self.res_layer(x)
66 |
67 | return res + shortcut
68 |
69 |
70 | class BasicBlockIRSE(BasicBlockIR):
71 |
72 | def __init__(self, in_channel, depth, stride):
73 | super(BasicBlockIRSE, self).__init__(in_channel, depth, stride)
74 | self.res_layer.add_module('se_block', SEModule(depth, 16))
75 |
76 |
77 | class BottleneckIRSE(BottleneckIR):
78 |
79 | def __init__(self, in_channel, depth, stride):
80 | super(BottleneckIRSE, self).__init__(in_channel, depth, stride)
81 | self.res_layer.add_module('se_block', SEModule(depth, 16))
82 |
83 |
84 | class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
85 | '''A named tuple describing a ResNet block.'''
86 |
87 |
88 | def get_block(in_channel, depth, num_units, stride=2):
89 | return [Bottleneck(in_channel, depth, stride)] + \
90 | [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
91 |
92 |
93 | def get_blocks(num_layers):
94 | if num_layers == 18:
95 | blocks = [
96 | get_block(in_channel=64, depth=64, num_units=2),
97 | get_block(in_channel=64, depth=128, num_units=2),
98 | get_block(in_channel=128, depth=256, num_units=2),
99 | get_block(in_channel=256, depth=512, num_units=2)
100 | ]
101 | elif num_layers == 34:
102 | blocks = [
103 | get_block(in_channel=64, depth=64, num_units=3),
104 | get_block(in_channel=64, depth=128, num_units=4),
105 | get_block(in_channel=128, depth=256, num_units=6),
106 | get_block(in_channel=256, depth=512, num_units=3)
107 | ]
108 | elif num_layers == 50:
109 | blocks = [
110 | get_block(in_channel=64, depth=64, num_units=3),
111 | get_block(in_channel=64, depth=128, num_units=4),
112 | get_block(in_channel=128, depth=256, num_units=14),
113 | get_block(in_channel=256, depth=512, num_units=3)
114 | ]
115 | elif num_layers == 100:
116 | blocks = [
117 | get_block(in_channel=64, depth=64, num_units=3),
118 | get_block(in_channel=64, depth=128, num_units=13),
119 | get_block(in_channel=128, depth=256, num_units=30),
120 | get_block(in_channel=256, depth=512, num_units=3)
121 | ]
122 | elif num_layers == 152:
123 | blocks = [
124 | get_block(in_channel=64, depth=256, num_units=3),
125 | get_block(in_channel=256, depth=512, num_units=8),
126 | get_block(in_channel=512, depth=1024, num_units=36),
127 | get_block(in_channel=1024, depth=2048, num_units=3)
128 | ]
129 | elif num_layers == 200:
130 | blocks = [
131 | get_block(in_channel=64, depth=256, num_units=3),
132 | get_block(in_channel=256, depth=512, num_units=24),
133 | get_block(in_channel=512, depth=1024, num_units=36),
134 | get_block(in_channel=1024, depth=2048, num_units=3)
135 | ]
136 |
137 | return blocks
138 |
139 |
140 | class Backbone(Module):
141 |
142 | def __init__(self, input_size, num_layers, mode='ir'):
143 | """ Args:
144 | input_size: input_size of backbone
145 | num_layers: num_layers of backbone
146 | mode: support ir or irse
147 | """
148 | super(Backbone, self).__init__()
149 | assert input_size[0] in [112, 224], \
150 | 'input_size should be [112, 112] or [224, 224]'
151 | assert num_layers in [18, 34, 50, 100, 152, 200], \
152 | 'num_layers should be 18, 34, 50, 100 or 152'
153 | assert mode in ['ir', 'ir_se'], \
154 | 'mode should be ir or ir_se'
155 | self.input_layer = Sequential(
156 | Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64),
157 | PReLU(64))
158 | blocks = get_blocks(num_layers)
159 | if num_layers <= 100:
160 | if mode == 'ir':
161 | unit_module = BasicBlockIR
162 | elif mode == 'ir_se':
163 | unit_module = BasicBlockIRSE
164 | output_channel = 512
165 | else:
166 | if mode == 'ir':
167 | unit_module = BottleneckIR
168 | elif mode == 'ir_se':
169 | unit_module = BottleneckIRSE
170 | output_channel = 2048
171 |
172 | if input_size[0] == 112:
173 | self.output_layer = Sequential(
174 | BatchNorm2d(output_channel), Dropout(0.4), Flatten(),
175 | Linear(output_channel * 7 * 7, 512),
176 | BatchNorm1d(512, affine=False))
177 | else:
178 | self.output_layer = Sequential(
179 | BatchNorm2d(output_channel), Dropout(0.4), Flatten(),
180 | Linear(output_channel * 14 * 14, 512),
181 | BatchNorm1d(512, affine=False))
182 |
183 | modules = []
184 | mid_layer_indices = [] # [2, 15, 45, 48], total 49 layers for IR101
185 | for block in blocks:
186 | if len(mid_layer_indices) == 0:
187 | mid_layer_indices.append(len(block) - 1)
188 | else:
189 | mid_layer_indices.append(len(block) + mid_layer_indices[-1])
190 | for bottleneck in block:
191 | modules.append(
192 | unit_module(bottleneck.in_channel, bottleneck.depth,
193 | bottleneck.stride))
194 | self.body = Sequential(*modules)
195 | self.mid_layer_indices = mid_layer_indices[-4:]
196 |
197 | # self.dtype = next(self.parameters()).dtype
198 | initialize_weights(self.modules())
199 |
200 | def device(self):
201 | return next(self.parameters()).device
202 |
203 | def dtype(self):
204 | return next(self.parameters()).dtype
205 |
206 | def forward(self, x, return_mid_feats=False):
207 | x = self.input_layer(x)
208 | if not return_mid_feats:
209 | x = self.body(x)
210 | x = self.output_layer(x)
211 | return x
212 | else:
213 | out_feats = []
214 | for idx, module in enumerate(self.body):
215 | x = module(x)
216 | if idx in self.mid_layer_indices:
217 | out_feats.append(x)
218 | x = self.output_layer(x)
219 | return x, out_feats
220 |
221 |
222 | def IR_18(input_size):
223 | """ Constructs a ir-18 model.
224 | """
225 | model = Backbone(input_size, 18, 'ir')
226 |
227 | return model
228 |
229 |
230 | def IR_34(input_size):
231 | """ Constructs a ir-34 model.
232 | """
233 | model = Backbone(input_size, 34, 'ir')
234 |
235 | return model
236 |
237 |
238 | def IR_50(input_size):
239 | """ Constructs a ir-50 model.
240 | """
241 | model = Backbone(input_size, 50, 'ir')
242 |
243 | return model
244 |
245 |
246 | def IR_101(input_size):
247 | """ Constructs a ir-101 model.
248 | """
249 | model = Backbone(input_size, 100, 'ir')
250 |
251 | return model
252 |
253 |
254 | def IR_152(input_size):
255 | """ Constructs a ir-152 model.
256 | """
257 | model = Backbone(input_size, 152, 'ir')
258 |
259 | return model
260 |
261 |
262 | def IR_200(input_size):
263 | """ Constructs a ir-200 model.
264 | """
265 | model = Backbone(input_size, 200, 'ir')
266 |
267 | return model
268 |
269 |
270 | def IR_SE_50(input_size):
271 | """ Constructs a ir_se-50 model.
272 | """
273 | model = Backbone(input_size, 50, 'ir_se')
274 |
275 | return model
276 |
277 |
278 | def IR_SE_101(input_size):
279 | """ Constructs a ir_se-101 model.
280 | """
281 | model = Backbone(input_size, 100, 'ir_se')
282 |
283 | return model
284 |
285 |
286 | def IR_SE_152(input_size):
287 | """ Constructs a ir_se-152 model.
288 | """
289 | model = Backbone(input_size, 152, 'ir_se')
290 |
291 | return model
292 |
293 |
294 | def IR_SE_200(input_size):
295 | """ Constructs a ir_se-200 model.
296 | """
297 | model = Backbone(input_size, 200, 'ir_se')
298 |
299 | return model
300 |
--------------------------------------------------------------------------------
/infer.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import argparse
4 | import numpy as np
5 | from PIL import Image, ImageOps
6 | import random
7 | import json
8 | import torch
9 | from diffusers import CogVideoXDPMScheduler
10 |
11 | import insightface
12 | from insightface.app import FaceAnalysis
13 | from facexlib.parsing import init_parsing_model
14 | from facexlib.utils.face_restoration_helper import FaceRestoreHelper
15 | from diffusers.training_utils import free_memory
16 | from diffusers.utils import export_to_video, load_image, load_video
17 |
18 | from models.utils import process_face_embeddings_split
19 | from models.transformer_ingredients import IngredientsTransformer3DModel
20 | from models.pipeline_ingredients import IngredientsPipeline
21 | from models.eva_clip import create_model_and_transforms
22 | from models.eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
23 | from models.eva_clip.utils_qformer import resize_numpy_image_long
24 |
25 | def get_random_seed():
26 | return random.randint(0, 2**32 - 1)
27 |
28 | def generate_video(
29 | prompt: str,
30 | model_path: str,
31 | output_path: str = "./output",
32 | num_inference_steps: int = 50,
33 | guidance_scale: float = 6.0,
34 | num_videos_per_prompt: int = 1,
35 | dtype: torch.dtype = torch.bfloat16,
36 | seed: int = 42,
37 | img_file_path: str = None,
38 | ):
39 | """
40 | Generates a video based on the given prompt and saves it to the specified path.
41 |
42 | Parameters:
43 | - prompt (str): The description of the video to be generated.
44 | - model_path (str): The path of the pre-trained model to be used.
45 | - lora_path (str): The path of the LoRA weights to be used.
46 | - lora_rank (int): The rank of the LoRA weights.
47 | - output_path (str): The path where the generated video will be saved.
48 | - num_inference_steps (int): Number of steps for the inference process. More steps can result in better quality.
49 | - guidance_scale (float): The scale for classifier-free guidance. Higher values can lead to better alignment with the prompt.
50 | - num_videos_per_prompt (int): Number of videos to generate per prompt.
51 | - dtype (torch.dtype): The data type for computation (default is torch.bfloat16).
52 | - seed (int): The seed for reproducibility.
53 | """
54 | device = "cuda"
55 | torch.manual_seed(seed)
56 | torch.cuda.manual_seed_all(seed)
57 | np.random.seed(seed)
58 | random.seed(seed)
59 | torch.backends.cudnn.deterministic = True
60 |
61 | # 0. load main models
62 | if not os.path.exists(output_path):
63 | os.makedirs(output_path, exist_ok=True)
64 |
65 | if os.path.exists(os.path.join(model_path, "transformer_ema")):
66 | subfolder = "transformer_ema"
67 | else:
68 | subfolder = "transformer"
69 |
70 | transformer = IngredientsTransformer3DModel.from_pretrained_cus(model_path, subfolder=subfolder)
71 | transformer.eval()
72 | scheduler = CogVideoXDPMScheduler.from_pretrained(model_path, subfolder="scheduler")
73 |
74 | try:
75 | is_kps = transformer.config.is_kps
76 | except:
77 | is_kps = False
78 |
79 | print("is kps", is_kps)
80 | # 1. load face helper models
81 | face_helper = FaceRestoreHelper(
82 | upscale_factor=1,
83 | face_size=512,
84 | crop_ratio=(1, 1),
85 | det_model='retinaface_resnet50',
86 | save_ext='png',
87 | device=device,
88 | model_rootpath=os.path.join(model_path, "face_encoder")
89 | )
90 | face_helper.face_parse = None
91 | face_helper.face_parse = init_parsing_model(model_name='bisenet', device=device, model_rootpath=os.path.join(model_path, "face_encoder"))
92 | face_helper.face_det.eval()
93 | face_helper.face_parse.eval()
94 |
95 | model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', os.path.join(model_path, "face_encoder", "EVA02_CLIP_L_336_psz14_s6B.pt"), force_custom_clip=True)
96 | face_clip_model = model.visual
97 | face_clip_model.eval()
98 |
99 | eva_transform_mean = getattr(face_clip_model, 'image_mean', OPENAI_DATASET_MEAN)
100 | eva_transform_std = getattr(face_clip_model, 'image_std', OPENAI_DATASET_STD)
101 | if not isinstance(eva_transform_mean, (list, tuple)):
102 | eva_transform_mean = (eva_transform_mean,) * 3
103 | if not isinstance(eva_transform_std, (list, tuple)):
104 | eva_transform_std = (eva_transform_std,) * 3
105 | eva_transform_mean = eva_transform_mean
106 | eva_transform_std = eva_transform_std
107 |
108 | face_main_model = FaceAnalysis(name='antelopev2', root=os.path.join(model_path, "face_encoder"), providers=['CUDAExecutionProvider'])
109 | handler_ante = insightface.model_zoo.get_model(f'{model_path}/face_encoder/models/antelopev2/glintr100.onnx', providers=['CUDAExecutionProvider'])
110 | face_main_model.prepare(ctx_id=0, det_size=(640, 640))
111 | handler_ante.prepare(ctx_id=0)
112 |
113 | face_clip_model.to(device, dtype=dtype)
114 | face_helper.face_det.to(device)
115 | face_helper.face_parse.to(device)
116 | transformer.to(device, dtype=dtype)
117 | free_memory()
118 |
119 | pipe = IngredientsPipeline.from_pretrained(model_path, transformer=transformer, scheduler=scheduler, torch_dtype=dtype)
120 |
121 | # 2. Set Scheduler.
122 | scheduler_args = {}
123 | if "variance_type" in pipe.scheduler.config:
124 | variance_type = pipe.scheduler.config.variance_type
125 | if variance_type in ["learned", "learned_range"]:
126 | variance_type = "fixed_small"
127 | scheduler_args["variance_type"] = variance_type
128 |
129 | pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, **scheduler_args)
130 |
131 | # 3. Enable CPU offload for the model.
132 | pipe.to(device)
133 |
134 | # turn on if you don't have multiple GPUs or enough GPU memory(such as H100) and it will cost more time in inference, it may also reduce the quality
135 | # pipe.enable_model_cpu_offload()
136 | # pipe.enable_sequential_cpu_offload()
137 | # pipe.vae.enable_slicing()
138 | # pipe.vae.enable_tiling()
139 |
140 | # process face data
141 | img_file_path_list = img_file_path
142 |
143 | print(img_file_path_list)
144 | print(prompt)
145 |
146 | id_cond_list = []
147 | id_vit_hidden_list = []
148 |
149 | id_image_list = []
150 | for img_file_path in img_file_path_list:
151 | id_image = np.array(load_image(image=img_file_path).convert("RGB"))
152 | id_image = resize_numpy_image_long(id_image, 1024)
153 | id_image_list.append(id_image)
154 | id_cond_list, id_vit_hidden_list, align_crop_face_image, face_kps, _ = process_face_embeddings_split(face_helper, face_clip_model, handler_ante,
155 | eva_transform_mean, eva_transform_std,
156 | face_main_model, device, dtype, id_image_list,
157 | original_id_images=id_image_list, is_align_face=True,
158 | cal_uncond=False)
159 | if is_kps:
160 | kps_cond = face_kps
161 | else:
162 | kps_cond = None
163 | print("kps_cond: ", kps_cond, "align_face: ", align_crop_face_image.size(), )
164 | print("id_cond: ", len(id_cond_list), )
165 |
166 | tensor = align_crop_face_image.cpu().detach()
167 | tensor = tensor.squeeze()
168 | tensor = tensor.permute(1, 2, 0)
169 | tensor = tensor.numpy() * 255
170 | tensor = tensor.astype(np.uint8)
171 | image = ImageOps.exif_transpose(Image.fromarray(tensor))
172 |
173 | prompt = prompt.strip('"')
174 |
175 | generator = torch.Generator(device).manual_seed(seed) if seed else None
176 |
177 | with torch.no_grad():
178 | video_generate = pipe(
179 | prompt=prompt,
180 | image=image,
181 | num_videos_per_prompt=num_videos_per_prompt,
182 | num_inference_steps=num_inference_steps,
183 | num_frames=49,
184 | use_dynamic_cfg=False,
185 | guidance_scale=guidance_scale,
186 | generator=generator,
187 | id_vit_hidden=id_vit_hidden_list,
188 | id_cond=id_cond_list,
189 | kps_cond=kps_cond,
190 | ).frames[0]
191 |
192 | # 5. Export the generated frames to a video file. fps must be 8 for original video.
193 | file_count = len([f for f in os.listdir(output_path) if os.path.isfile(os.path.join(output_path, f))])
194 | filename = f"{output_path}/{seed}_{file_count:04d}.mp4"
195 | print(filename)
196 | export_to_video(video_generate, filename, fps=8)
197 |
198 |
199 | if __name__ == "__main__":
200 | parser = argparse.ArgumentParser(description="Generate a video from a text prompt using Ingredients")
201 |
202 | # ckpt arguments
203 | parser.add_argument("--model_path", type=str, default="/maindata/data/shared/public/zhengcong.fei/ckpts/cogvideox1.5/consistent_id_sam", help="The path of the pre-trained model to be used")
204 | # input arguments
205 | parser.add_argument("--img_file_path", nargs='+', default=['asserts/0.jpg', 'asserts/1.jpg'])
206 | parser.add_argument("--prompt", type=str, default="Two men in half bodies, are seated in a dimly lit room, possibly an office or meeting room, with a formal atmosphere.")
207 | # output arguments
208 | parser.add_argument("--output_path", type=str, default="./results", help="The path where the generated video will be saved")
209 |
210 | # generation arguments
211 | parser.add_argument("--guidance_scale", type=float, default=6.0, help="The scale for classifier-free guidance")
212 | parser.add_argument("--num_inference_steps", type=int, default=50, help="Number of steps for the inference process")
213 | parser.add_argument("--num_videos_per_prompt", type=int, default=1, help="Number of videos to generate per prompt")
214 | parser.add_argument("--dtype", type=str, default="bfloat16", help="The data type for computation (e.g., 'float16' or 'bfloat16')")
215 | parser.add_argument("--seed", type=int, default=2025, help="The seed for reproducibility")
216 |
217 | args = parser.parse_args()
218 | assert len(args.img_file_path) == 2
219 |
220 | generate_video(
221 | prompt=args.prompt,
222 | model_path=args.model_path,
223 | output_path=args.output_path,
224 | num_inference_steps=args.num_inference_steps,
225 | guidance_scale=args.guidance_scale,
226 | num_videos_per_prompt=args.num_videos_per_prompt,
227 | dtype=torch.float16 if args.dtype == "float16" else torch.bfloat16,
228 | seed=args.seed,
229 | img_file_path=args.img_file_path
230 | )
231 |
--------------------------------------------------------------------------------
/models/eva_clip/hf_model.py:
--------------------------------------------------------------------------------
1 | """ huggingface model adapter
2 |
3 | Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model.
4 | """
5 |
6 | import re
7 |
8 | import torch
9 | import torch.nn as nn
10 | from torch.nn import functional as F
11 | from torch import TensorType
12 | try:
13 | import transformers
14 | from transformers import AutoModel, AutoModelForMaskedLM, AutoTokenizer, AutoConfig, PretrainedConfig
15 | from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, \
16 | BaseModelOutputWithPoolingAndCrossAttentions
17 | except ImportError as e:
18 | transformers = None
19 |
20 |
21 | class BaseModelOutput:
22 | pass
23 |
24 |
25 | class PretrainedConfig:
26 | pass
27 |
28 | from .hf_configs import arch_dict
29 |
30 | # utils
31 | def _camel2snake(s):
32 | return re.sub(r'(? TensorType:
140 | # image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(x.device)
141 | # attn_mask = (x != self.config.pad_token_id).long()
142 | # out = self.transformer(
143 | # input_ids=x,
144 | # attention_mask=attn_mask,
145 | # encoder_hidden_states = image_embeds,
146 | # encoder_attention_mask = image_atts,
147 | # )
148 | # pooled_out = self.pooler(out, attn_mask)
149 |
150 | # return self.itm_proj(pooled_out)
151 |
152 | def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None):
153 | if masked_indices is None:
154 | masked_indices = torch.bernoulli(probability_matrix).bool()
155 |
156 | masked_indices[input_ids == self.tokenizer.pad_token_id] = False
157 | masked_indices[input_ids == self.tokenizer.cls_token_id] = False
158 |
159 | if targets is not None:
160 | targets[~masked_indices] = -100 # We only compute loss on masked tokens
161 |
162 | # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
163 | indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices
164 | input_ids[indices_replaced] = self.tokenizer.mask_token_id
165 |
166 | # 10% of the time, we replace masked input tokens with random word
167 | indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced
168 | random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device)
169 | input_ids[indices_random] = random_words[indices_random]
170 | # The rest of the time (10% of the time) we keep the masked input tokens unchanged
171 |
172 | if targets is not None:
173 | return input_ids, targets
174 | else:
175 | return input_ids
176 |
177 | def forward_mlm(self, input_ids, image_embeds, mlm_probability=0.25):
178 | labels = input_ids.clone()
179 | attn_mask = (input_ids != self.config.pad_token_id).long()
180 | image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(input_ids.device)
181 | vocab_size = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["vocab_size"])
182 | probability_matrix = torch.full(labels.shape, mlm_probability)
183 | input_ids, labels = self.mask(input_ids, vocab_size, input_ids.device, targets=labels,
184 | probability_matrix = probability_matrix)
185 | mlm_output = self.transformer(input_ids,
186 | attention_mask = attn_mask,
187 | encoder_hidden_states = image_embeds,
188 | encoder_attention_mask = image_atts,
189 | return_dict = True,
190 | labels = labels,
191 | )
192 | return mlm_output.loss
193 | # mlm_output = self.transformer(input_ids,
194 | # attention_mask = attn_mask,
195 | # encoder_hidden_states = image_embeds,
196 | # encoder_attention_mask = image_atts,
197 | # return_dict = True,
198 | # ).last_hidden_state
199 | # logits = self.mlm_proj(mlm_output)
200 |
201 | # # logits = logits[:, :-1, :].contiguous().view(-1, vocab_size)
202 | # logits = logits[:, 1:, :].contiguous().view(-1, vocab_size)
203 | # labels = labels[:, 1:].contiguous().view(-1)
204 |
205 | # mlm_loss = F.cross_entropy(
206 | # logits,
207 | # labels,
208 | # # label_smoothing=0.1,
209 | # )
210 | # return mlm_loss
211 |
212 |
213 | def forward(self, x:TensorType) -> TensorType:
214 | attn_mask = (x != self.config.pad_token_id).long()
215 | out = self.transformer(input_ids=x, attention_mask=attn_mask)
216 | pooled_out = self.pooler(out, attn_mask)
217 |
218 | return self.proj(pooled_out)
219 |
220 | def lock(self, unlocked_layers:int=0, freeze_layer_norm:bool=True):
221 | if not unlocked_layers: # full freezing
222 | for n, p in self.transformer.named_parameters():
223 | p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
224 | return
225 |
226 | encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
227 | layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
228 | print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model")
229 | embeddings = getattr(
230 | self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"])
231 | modules = [embeddings, *layer_list][:-unlocked_layers]
232 | # freeze layers
233 | for module in modules:
234 | for n, p in module.named_parameters():
235 | p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
236 |
237 |
238 | @torch.jit.ignore
239 | def set_grad_checkpointing(self, enable=True):
240 | self.transformer.gradient_checkpointing_enable()
241 |
242 | def get_num_layers(self):
243 | encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
244 | layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
245 | return len(layer_list)
246 |
247 | def init_parameters(self):
248 | pass
249 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/models/local_facial_extractor.py:
--------------------------------------------------------------------------------
1 | import math
2 | import torch
3 | import torch.nn as nn
4 |
5 |
6 | # FFN
7 | def FeedForward(dim, mult=4):
8 | inner_dim = int(dim * mult)
9 | return nn.Sequential(
10 | nn.LayerNorm(dim),
11 | nn.Linear(dim, inner_dim, bias=False),
12 | nn.GELU(),
13 | nn.Linear(inner_dim, dim, bias=False),
14 | )
15 |
16 |
17 | def reshape_tensor(x, heads):
18 | bs, length, width = x.shape
19 | # (bs, length, width) --> (bs, length, n_heads, dim_per_head)
20 | x = x.view(bs, length, heads, -1)
21 | # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
22 | x = x.transpose(1, 2)
23 | # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
24 | x = x.reshape(bs, heads, length, -1)
25 | return x
26 |
27 |
28 | class PerceiverAttention(nn.Module):
29 | def __init__(self, *, dim, dim_head=64, heads=8, kv_dim=None):
30 | super().__init__()
31 | self.scale = dim_head ** -0.5
32 | self.dim_head = dim_head
33 | self.heads = heads
34 | inner_dim = dim_head * heads
35 |
36 | self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim)
37 | self.norm2 = nn.LayerNorm(dim)
38 |
39 | self.to_q = nn.Linear(dim, inner_dim, bias=False)
40 | self.to_kv = nn.Linear(dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False)
41 | self.to_out = nn.Linear(inner_dim, dim, bias=False)
42 |
43 | def forward(self, x, latents):
44 | """
45 | Args:
46 | x (torch.Tensor): image features
47 | shape (b, n1, D)
48 | latent (torch.Tensor): latent features
49 | shape (b, n2, D)
50 | """
51 | x = self.norm1(x)
52 | latents = self.norm2(latents)
53 |
54 | b, seq_len, _ = latents.shape
55 |
56 | q = self.to_q(latents)
57 | kv_input = torch.cat((x, latents), dim=-2)
58 | k, v = self.to_kv(kv_input).chunk(2, dim=-1)
59 |
60 | q = reshape_tensor(q, self.heads)
61 | k = reshape_tensor(k, self.heads)
62 | v = reshape_tensor(v, self.heads)
63 |
64 | # attention
65 | scale = 1 / math.sqrt(math.sqrt(self.dim_head))
66 | weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
67 | weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
68 | out = weight @ v
69 |
70 | out = out.permute(0, 2, 1, 3).reshape(b, seq_len, -1)
71 |
72 | return self.to_out(out)
73 |
74 |
75 | class LocalFacialExtractor(nn.Module):
76 | def __init__(
77 | self,
78 | dim=1024,
79 | depth=10,
80 | dim_head=64,
81 | heads=16,
82 | num_id_token=5,
83 | num_queries=32,
84 | output_dim=2048,
85 | ff_mult=4,
86 | ):
87 | """
88 | Initializes the LocalFacialExtractor class.
89 |
90 | Parameters:
91 | - dim (int): The dimensionality of latent features.
92 | - depth (int): Total number of PerceiverAttention and FeedForward layers.
93 | - dim_head (int): Dimensionality of each attention head.
94 | - heads (int): Number of attention heads.
95 | - num_id_token (int): Number of tokens used for identity features.
96 | - num_queries (int): Number of query tokens for the latent representation.
97 | - output_dim (int): Output dimension after projection.
98 | - ff_mult (int): Multiplier for the feed-forward network hidden dimension.
99 | """
100 | super().__init__()
101 |
102 | # Storing identity token and query information
103 | self.num_id_token = num_id_token
104 | self.dim = dim
105 | self.num_queries = num_queries
106 | assert depth % 5 == 0
107 | self.depth = depth // 5
108 | scale = dim ** -0.5
109 |
110 | # Learnable latent query embeddings
111 | self.latents = nn.Parameter(torch.randn(1, num_queries, dim) * scale)
112 | # Projection layer to map the latent output to the desired dimension
113 | self.proj_out = nn.Parameter(scale * torch.randn(dim, output_dim))
114 |
115 | # Attention and FeedForward layer stack
116 | self.layers = nn.ModuleList([])
117 | for _ in range(depth):
118 | self.layers.append(
119 | nn.ModuleList(
120 | [
121 | PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), # Perceiver Attention layer
122 | FeedForward(dim=dim, mult=ff_mult), # FeedForward layer
123 | ]
124 | )
125 | )
126 |
127 | # Mappings for each of the 5 different ViT features
128 | for i in range(5):
129 | setattr(
130 | self,
131 | f'mapping_{i}',
132 | nn.Sequential(
133 | nn.Linear(1024, 1024),
134 | nn.LayerNorm(1024),
135 | nn.LeakyReLU(),
136 | nn.Linear(1024, 1024),
137 | nn.LayerNorm(1024),
138 | nn.LeakyReLU(),
139 | nn.Linear(1024, dim),
140 | ),
141 | )
142 |
143 | # Mapping for identity embedding vectors
144 | self.id_embedding_mapping = nn.Sequential(
145 | nn.Linear(1280, 1024),
146 | nn.LayerNorm(1024),
147 | nn.LeakyReLU(),
148 | nn.Linear(1024, 1024),
149 | nn.LayerNorm(1024),
150 | nn.LeakyReLU(),
151 | nn.Linear(1024, dim * num_id_token),
152 | )
153 |
154 | def forward(self, x, y):
155 | """
156 | Forward pass for LocalFacialExtractor.
157 |
158 | Parameters:
159 | - x (Tensor): The input identity embedding tensor of shape (batch_size, 1280).
160 | - y (list of Tensor): A list of 5 visual feature tensors each of shape (batch_size, 1024).
161 |
162 | Returns:
163 | - Tensor: The extracted latent features of shape (batch_size, num_queries, output_dim).
164 | """
165 |
166 | # Repeat latent queries for the batch size
167 | latents = self.latents.repeat(x.size(0), 1, 1)
168 |
169 | # Map the identity embedding to tokens
170 | x = self.id_embedding_mapping(x)
171 | x = x.reshape(-1, self.num_id_token, self.dim)
172 |
173 | # Concatenate identity tokens with the latent queries
174 | latents = torch.cat((latents, x), dim=1)
175 |
176 | # Process each of the 5 visual feature inputs
177 | for i in range(5):
178 | vit_feature = getattr(self, f'mapping_{i}')(y[i])
179 | ctx_feature = torch.cat((x, vit_feature), dim=1)
180 |
181 | # Pass through the PerceiverAttention and FeedForward layers
182 | for attn, ff in self.layers[i * self.depth: (i + 1) * self.depth]:
183 | latents = attn(ctx_feature, latents) + latents
184 | latents = ff(latents) + latents
185 |
186 | # Retain only the query latents
187 | latents = latents[:, :self.num_queries]
188 | # Project the latents to the output dimension
189 | latents = latents @ self.proj_out
190 | return latents
191 |
192 |
193 | class PerceiverCrossAttention(nn.Module):
194 | """
195 |
196 | Args:
197 | dim (int): Dimension of the input latent and output. Default is 3072.
198 | dim_head (int): Dimension of each attention head. Default is 128.
199 | heads (int): Number of attention heads. Default is 16.
200 | kv_dim (int): Dimension of the key/value input, allowing flexible cross-attention. Default is 2048.
201 |
202 | Attributes:
203 | scale (float): Scaling factor used in dot-product attention for numerical stability.
204 | norm1 (nn.LayerNorm): Layer normalization applied to the input image features.
205 | norm2 (nn.LayerNorm): Layer normalization applied to the latent features.
206 | to_q (nn.Linear): Linear layer for projecting the latent features into queries.
207 | to_kv (nn.Linear): Linear layer for projecting the input features into keys and values.
208 | to_out (nn.Linear): Linear layer for outputting the final result after attention.
209 |
210 | """
211 | def __init__(self, *, dim=3072, dim_head=128, heads=16, kv_dim=2048):
212 | super().__init__()
213 | self.scale = dim_head ** -0.5
214 | self.dim_head = dim_head
215 | self.heads = heads
216 | inner_dim = dim_head * heads
217 |
218 | # Layer normalization to stabilize training
219 | self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim)
220 | self.norm2 = nn.LayerNorm(dim)
221 |
222 | # Linear transformations to produce queries, keys, and values
223 | self.to_q = nn.Linear(dim, inner_dim, bias=False)
224 | self.to_kv = nn.Linear(dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False)
225 | self.to_out = nn.Linear(inner_dim, dim, bias=False)
226 |
227 | def forward(self, x, latents):
228 | """
229 |
230 | Args:
231 | x (torch.Tensor): Input image features with shape (batch_size, n1, D), where:
232 | - batch_size (b): Number of samples in the batch.
233 | - n1: Sequence length (e.g., number of patches or tokens).
234 | - D: Feature dimension.
235 |
236 | latents (torch.Tensor): Latent feature representations with shape (batch_size, n2, D), where:
237 | - n2: Number of latent elements.
238 |
239 | Returns:
240 | torch.Tensor: Attention-modulated features with shape (batch_size, n2, D).
241 |
242 | """
243 | # Apply layer normalization to the input image and latent features
244 | x = self.norm1(x)
245 | latents = self.norm2(latents)
246 |
247 | b, seq_len, _ = latents.shape
248 |
249 | # Compute queries, keys, and values
250 | q = self.to_q(latents)
251 | k, v = self.to_kv(x).chunk(2, dim=-1)
252 |
253 | # Reshape tensors to split into attention heads
254 | q = reshape_tensor(q, self.heads)
255 | k = reshape_tensor(k, self.heads)
256 | v = reshape_tensor(v, self.heads)
257 |
258 | # Compute attention weights
259 | scale = 1 / math.sqrt(math.sqrt(self.dim_head))
260 | weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable scaling than post-division
261 | weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
262 |
263 | # Compute the output via weighted combination of values
264 | out = weight @ v
265 |
266 | # Reshape and permute to prepare for final linear transformation
267 | out = out.permute(0, 2, 1, 3).reshape(b, seq_len, -1)
268 |
269 | return self.to_out(out)
270 |
271 |
272 |
273 | class MultiIPRouter(nn.Module):
274 | def __init__(self, *, dim=3072, id_dim=2048, id_length=32):
275 | super().__init__()
276 |
277 | inner_dim = 2048
278 |
279 | self.norm1 = nn.LayerNorm(id_dim)
280 | self.norm2 = nn.LayerNorm(dim)
281 |
282 | self.id_merge = nn.Linear(id_length, 1, bias=False)
283 | self.to_q = nn.Linear(dim, inner_dim, bias=False)
284 | self.to_kv = nn.Linear(id_dim, inner_dim, bias=False)
285 |
286 | def forward(self, x, latents):
287 | """
288 | Args:
289 | x (torch.Tensor): image features
290 | shape (b, n1, D)
291 | latent (torch.Tensor): latent features
292 | shape (b, n2, D)
293 | """
294 | x = [self.norm1(item) if item is not None else None for item in x] # [N1*32*2048, N2*32*2048]
295 | latents = self.norm2(latents) #B*4096*3072
296 |
297 | x = [self.id_merge(item.transpose(-2, -1)).squeeze(-1) if item is not None else None for item in x] #[N1*2048, N2*2048]
298 |
299 | q = self.to_q(latents) #B*4096*2048
300 | kv = [self.to_kv(item) if item is not None else None for item in x] #[N1*2048, N2*2048], len=B
301 | #print(q.shape, kv[0].shape, kv[1].shape) #torch.Size([2, 1024, 2048]) torch.Size([1, 2048]) torch.Size([4, 2048])
302 |
303 | weights = []
304 | for sub_q, sub_kv in zip(q, kv):
305 | if sub_kv is not None:
306 | weight = sub_q.unsqueeze(0) @ sub_kv.transpose(-2, -1) #1*4096*N
307 | #weight = torch.softmax(weight.float(), dim=-1).to(weight.dtype) # 1*4096*N
308 | else:
309 | weight = None
310 | weights.append(weight)
311 |
312 | return weights
--------------------------------------------------------------------------------
/models/eva_clip/pretrained.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import os
3 | import urllib
4 | import warnings
5 | from functools import partial
6 | from typing import Dict, Union
7 |
8 | from tqdm import tqdm
9 |
10 | try:
11 | from huggingface_hub import hf_hub_download
12 | _has_hf_hub = True
13 | except ImportError:
14 | hf_hub_download = None
15 | _has_hf_hub = False
16 |
17 |
18 | def _pcfg(url='', hf_hub='', filename='', mean=None, std=None):
19 | return dict(
20 | url=url,
21 | hf_hub=hf_hub,
22 | mean=mean,
23 | std=std,
24 | )
25 |
26 | _VITB32 = dict(
27 | openai=_pcfg(
28 | "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
29 | laion400m_e31=_pcfg(
30 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
31 | laion400m_e32=_pcfg(
32 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
33 | laion2b_e16=_pcfg(
34 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth"),
35 | laion2b_s34b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-laion2B-s34B-b79K/')
36 | )
37 |
38 | _VITB32_quickgelu = dict(
39 | openai=_pcfg(
40 | "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
41 | laion400m_e31=_pcfg(
42 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
43 | laion400m_e32=_pcfg(
44 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
45 | )
46 |
47 | _VITB16 = dict(
48 | openai=_pcfg(
49 | "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt"),
50 | laion400m_e31=_pcfg(
51 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt"),
52 | laion400m_e32=_pcfg(
53 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt"),
54 | laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-laion2B-s34B-b88K/'),
55 | )
56 |
57 | _EVAB16 = dict(
58 | eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt'),
59 | eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt'),
60 | eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt'),
61 | eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt'),
62 | )
63 |
64 | _VITB16_PLUS_240 = dict(
65 | laion400m_e31=_pcfg(
66 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt"),
67 | laion400m_e32=_pcfg(
68 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt"),
69 | )
70 |
71 | _VITL14 = dict(
72 | openai=_pcfg(
73 | "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"),
74 | laion400m_e31=_pcfg(
75 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt"),
76 | laion400m_e32=_pcfg(
77 | "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt"),
78 | laion2b_s32b_b82k=_pcfg(
79 | hf_hub='laion/CLIP-ViT-L-14-laion2B-s32B-b82K/',
80 | mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
81 | )
82 |
83 | _EVAL14 = dict(
84 | eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_L_psz14.pt'),
85 | eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_L_psz14.pt'),
86 | eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt'),
87 | eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt'),
88 | )
89 |
90 | _VITL14_336 = dict(
91 | openai=_pcfg(
92 | "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"),
93 | )
94 |
95 | _EVAL14_336 = dict(
96 | eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt'),
97 | eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt'),
98 | eva_clip_224to336=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt'),
99 | eva02_clip_224to336=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt'),
100 | )
101 |
102 | _VITH14 = dict(
103 | laion2b_s32b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-laion2B-s32B-b79K/'),
104 | )
105 |
106 | _VITg14 = dict(
107 | laion2b_s12b_b42k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s12B-b42K/'),
108 | laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s34B-b88K/'),
109 | )
110 |
111 | _EVAg14 = dict(
112 | eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/'),
113 | eva01=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_g_psz14.pt'),
114 | eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt'),
115 | eva01_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt'),
116 | )
117 |
118 | _EVAg14_PLUS = dict(
119 | eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/'),
120 | eva01=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_g_psz14.pt'),
121 | eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt'),
122 | eva01_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt'),
123 | )
124 |
125 | _VITbigG14 = dict(
126 | laion2b_s39b_b160k=_pcfg(hf_hub='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/'),
127 | )
128 |
129 | _EVAbigE14 = dict(
130 | eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
131 | eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
132 | eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt'),
133 | eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt'),
134 | )
135 |
136 | _EVAbigE14_PLUS = dict(
137 | eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
138 | eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
139 | eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt'),
140 | eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt'),
141 | )
142 |
143 |
144 | _PRETRAINED = {
145 | # "ViT-B-32": _VITB32,
146 | "OpenaiCLIP-B-32": _VITB32,
147 | "OpenCLIP-B-32": _VITB32,
148 |
149 | # "ViT-B-32-quickgelu": _VITB32_quickgelu,
150 | "OpenaiCLIP-B-32-quickgelu": _VITB32_quickgelu,
151 | "OpenCLIP-B-32-quickgelu": _VITB32_quickgelu,
152 |
153 | # "ViT-B-16": _VITB16,
154 | "OpenaiCLIP-B-16": _VITB16,
155 | "OpenCLIP-B-16": _VITB16,
156 |
157 | "EVA02-B-16": _EVAB16,
158 | "EVA02-CLIP-B-16": _EVAB16,
159 |
160 | # "ViT-B-16-plus-240": _VITB16_PLUS_240,
161 | "OpenCLIP-B-16-plus-240": _VITB16_PLUS_240,
162 |
163 | # "ViT-L-14": _VITL14,
164 | "OpenaiCLIP-L-14": _VITL14,
165 | "OpenCLIP-L-14": _VITL14,
166 |
167 | "EVA02-L-14": _EVAL14,
168 | "EVA02-CLIP-L-14": _EVAL14,
169 |
170 | # "ViT-L-14-336": _VITL14_336,
171 | "OpenaiCLIP-L-14-336": _VITL14_336,
172 |
173 | "EVA02-CLIP-L-14-336": _EVAL14_336,
174 |
175 | # "ViT-H-14": _VITH14,
176 | # "ViT-g-14": _VITg14,
177 | "OpenCLIP-H-14": _VITH14,
178 | "OpenCLIP-g-14": _VITg14,
179 |
180 | "EVA01-CLIP-g-14": _EVAg14,
181 | "EVA01-CLIP-g-14-plus": _EVAg14_PLUS,
182 |
183 | # "ViT-bigG-14": _VITbigG14,
184 | "OpenCLIP-bigG-14": _VITbigG14,
185 |
186 | "EVA02-CLIP-bigE-14": _EVAbigE14,
187 | "EVA02-CLIP-bigE-14-plus": _EVAbigE14_PLUS,
188 | }
189 |
190 |
191 | def _clean_tag(tag: str):
192 | # normalize pretrained tags
193 | return tag.lower().replace('-', '_')
194 |
195 |
196 | def list_pretrained(as_str: bool = False):
197 | """ returns list of pretrained models
198 | Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
199 | """
200 | return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
201 |
202 |
203 | def list_pretrained_models_by_tag(tag: str):
204 | """ return all models having the specified pretrain tag """
205 | models = []
206 | tag = _clean_tag(tag)
207 | for k in _PRETRAINED.keys():
208 | if tag in _PRETRAINED[k]:
209 | models.append(k)
210 | return models
211 |
212 |
213 | def list_pretrained_tags_by_model(model: str):
214 | """ return all pretrain tags for the specified model architecture """
215 | tags = []
216 | if model in _PRETRAINED:
217 | tags.extend(_PRETRAINED[model].keys())
218 | return tags
219 |
220 |
221 | def is_pretrained_cfg(model: str, tag: str):
222 | if model not in _PRETRAINED:
223 | return False
224 | return _clean_tag(tag) in _PRETRAINED[model]
225 |
226 |
227 | def get_pretrained_cfg(model: str, tag: str):
228 | if model not in _PRETRAINED:
229 | return {}
230 | model_pretrained = _PRETRAINED[model]
231 | return model_pretrained.get(_clean_tag(tag), {})
232 |
233 |
234 | def get_pretrained_url(model: str, tag: str):
235 | cfg = get_pretrained_cfg(model, _clean_tag(tag))
236 | return cfg.get('url', '')
237 |
238 |
239 | def download_pretrained_from_url(
240 | url: str,
241 | cache_dir: Union[str, None] = None,
242 | ):
243 | if not cache_dir:
244 | cache_dir = os.path.expanduser("~/.cache/clip")
245 | os.makedirs(cache_dir, exist_ok=True)
246 | filename = os.path.basename(url)
247 |
248 | if 'openaipublic' in url:
249 | expected_sha256 = url.split("/")[-2]
250 | elif 'mlfoundations' in url:
251 | expected_sha256 = os.path.splitext(filename)[0].split("-")[-1]
252 | else:
253 | expected_sha256 = ''
254 |
255 | download_target = os.path.join(cache_dir, filename)
256 |
257 | if os.path.exists(download_target) and not os.path.isfile(download_target):
258 | raise RuntimeError(f"{download_target} exists and is not a regular file")
259 |
260 | if os.path.isfile(download_target):
261 | if expected_sha256:
262 | if hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
263 | return download_target
264 | else:
265 | warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
266 | else:
267 | return download_target
268 |
269 | with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
270 | with tqdm(total=int(source.headers.get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
271 | while True:
272 | buffer = source.read(8192)
273 | if not buffer:
274 | break
275 |
276 | output.write(buffer)
277 | loop.update(len(buffer))
278 |
279 | if expected_sha256 and not hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
280 | raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
281 |
282 | return download_target
283 |
284 |
285 | def has_hf_hub(necessary=False):
286 | if not _has_hf_hub and necessary:
287 | # if no HF Hub module installed, and it is necessary to continue, raise error
288 | raise RuntimeError(
289 | 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.')
290 | return _has_hf_hub
291 |
292 |
293 | def download_pretrained_from_hf(
294 | model_id: str,
295 | filename: str = 'open_clip_pytorch_model.bin',
296 | revision=None,
297 | cache_dir: Union[str, None] = None,
298 | ):
299 | has_hf_hub(True)
300 | cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)
301 | return cached_file
302 |
303 |
304 | def download_pretrained(
305 | cfg: Dict,
306 | force_hf_hub: bool = False,
307 | cache_dir: Union[str, None] = None,
308 | ):
309 | target = ''
310 | if not cfg:
311 | return target
312 |
313 | download_url = cfg.get('url', '')
314 | download_hf_hub = cfg.get('hf_hub', '')
315 | if download_hf_hub and force_hf_hub:
316 | # use HF hub even if url exists
317 | download_url = ''
318 |
319 | if download_url:
320 | target = download_pretrained_from_url(download_url, cache_dir=cache_dir)
321 | elif download_hf_hub:
322 | has_hf_hub(True)
323 | # we assume the hf_hub entries in pretrained config combine model_id + filename in
324 | # 'org/model_name/filename.pt' form. To specify just the model id w/o filename and
325 | # use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.
326 | model_id, filename = os.path.split(download_hf_hub)
327 | if filename:
328 | target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)
329 | else:
330 | target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
331 |
332 | return target
333 |
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | """
2 | Modified from: https://github.com/PKU-YuanGroup/ConsisID/blob/main/app.py
3 | """
4 |
5 | import math
6 | import os
7 | import random
8 | import threading
9 | import time
10 | from datetime import datetime, timedelta
11 | from PIL import Image, ImageOps
12 | import numpy as np
13 |
14 | import gradio as gr
15 | from diffusers import CogVideoXDPMScheduler
16 | import spaces
17 | import torch
18 | from models.utils import process_face_embeddings_split
19 | from models.pipeline_ingredients import IngredientsPipeline
20 | from moviepy import VideoFileClip
21 |
22 | from diffusers.image_processor import VaeImageProcessor
23 | from diffusers.training_utils import free_memory
24 | import insightface
25 | from insightface.app import FaceAnalysis
26 | from facexlib.parsing import init_parsing_model
27 | from facexlib.utils.face_restoration_helper import FaceRestoreHelper
28 | from diffusers.utils import export_to_video, load_image, load_video
29 | from models.transformer_ingredients import IngredientsTransformer3DModel
30 | from models.eva_clip import create_model_and_transforms
31 | from models.eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
32 | from models.eva_clip.utils_qformer import resize_numpy_image_long
33 |
34 | # 1. prepare configs
35 | model_path = "/maindata/data/shared/public/zhengcong.fei/ckpts/cogvideox1.5/consistent_id_sam"
36 | dtype = torch.bfloat16
37 | device = "cuda" if torch.cuda.is_available() else "cpu"
38 |
39 | os.makedirs("./output", exist_ok=True)
40 | os.makedirs("./gradio_tmp", exist_ok=True)
41 |
42 | output_path = "output"
43 |
44 | # 0. load main models
45 | if os.path.exists(os.path.join(model_path, "transformer_ema")):
46 | subfolder = "transformer_ema"
47 | else:
48 | subfolder = "transformer"
49 |
50 | transformer = IngredientsTransformer3DModel.from_pretrained_cus(model_path, subfolder=subfolder)
51 | transformer.eval()
52 | scheduler = CogVideoXDPMScheduler.from_pretrained(model_path, subfolder="scheduler")
53 |
54 | try:
55 | is_kps = transformer.config.is_kps
56 | except:
57 | is_kps = False
58 |
59 | print("is kps", is_kps)
60 | # 1. load face helper models
61 | face_helper = FaceRestoreHelper(
62 | upscale_factor=1,
63 | face_size=512,
64 | crop_ratio=(1, 1),
65 | det_model='retinaface_resnet50',
66 | save_ext='png',
67 | device=device,
68 | model_rootpath=os.path.join(model_path, "face_encoder")
69 | )
70 | face_helper.face_parse = None
71 | face_helper.face_parse = init_parsing_model(model_name='bisenet', device=device, model_rootpath=os.path.join(model_path, "face_encoder"))
72 | face_helper.face_det.eval()
73 | face_helper.face_parse.eval()
74 |
75 | model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', os.path.join(model_path, "face_encoder", "EVA02_CLIP_L_336_psz14_s6B.pt"), force_custom_clip=True)
76 | face_clip_model = model.visual
77 | face_clip_model.eval()
78 |
79 | eva_transform_mean = getattr(face_clip_model, 'image_mean', OPENAI_DATASET_MEAN)
80 | eva_transform_std = getattr(face_clip_model, 'image_std', OPENAI_DATASET_STD)
81 | if not isinstance(eva_transform_mean, (list, tuple)):
82 | eva_transform_mean = (eva_transform_mean,) * 3
83 | if not isinstance(eva_transform_std, (list, tuple)):
84 | eva_transform_std = (eva_transform_std,) * 3
85 | eva_transform_mean = eva_transform_mean
86 | eva_transform_std = eva_transform_std
87 |
88 | face_main_model = FaceAnalysis(name='antelopev2', root=os.path.join(model_path, "face_encoder"), providers=['CUDAExecutionProvider'])
89 | handler_ante = insightface.model_zoo.get_model(f'{model_path}/face_encoder/models/antelopev2/glintr100.onnx', providers=['CUDAExecutionProvider'])
90 | face_main_model.prepare(ctx_id=0, det_size=(640, 640))
91 | handler_ante.prepare(ctx_id=0)
92 |
93 | face_clip_model.to(device, dtype=dtype)
94 | face_helper.face_det.to(device)
95 | face_helper.face_parse.to(device)
96 | transformer.to(device, dtype=dtype)
97 | free_memory()
98 |
99 | pipe = IngredientsPipeline.from_pretrained(model_path, transformer=transformer, scheduler=scheduler, torch_dtype=dtype)
100 |
101 | # 2. Set Scheduler.
102 | scheduler_args = {}
103 | if "variance_type" in pipe.scheduler.config:
104 | variance_type = pipe.scheduler.config.variance_type
105 | if variance_type in ["learned", "learned_range"]:
106 | variance_type = "fixed_small"
107 | scheduler_args["variance_type"] = variance_type
108 |
109 | pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, **scheduler_args)
110 |
111 | # 3. Enable CPU offload for the model.
112 | pipe.to(device)
113 |
114 |
115 | def convert_to_gif(video_path):
116 | clip = VideoFileClip(video_path)
117 | gif_path = video_path.replace(".mp4", ".gif")
118 | clip.write_gif(gif_path, fps=8)
119 | return gif_path
120 |
121 |
122 | def generate(
123 | prompt,
124 | input_image1,
125 | input_image2,
126 | negative_prompt: str = None,
127 | num_inference_steps: int = 50,
128 | guidance_scale: float = 6.0,
129 | num_videos_per_prompt: int = 1,
130 | seed: int = 42,
131 | ):
132 | """
133 | Generates a video based on the given prompt and saves it to the specified path.
134 |
135 | Parameters:
136 | - prompt (str): The description of the video to be generated.
137 | - model_path (str): The path of the pre-trained model to be used.
138 | - lora_path (str): The path of the LoRA weights to be used.
139 | - lora_rank (int): The rank of the LoRA weights.
140 | - output_path (str): The path where the generated video will be saved.
141 | - num_inference_steps (int): Number of steps for the inference process. More steps can result in better quality.
142 | - guidance_scale (float): The scale for classifier-free guidance. Higher values can lead to better alignment with the prompt.
143 | - num_videos_per_prompt (int): Number of videos to generate per prompt.
144 | - dtype (torch.dtype): The data type for computation (default is torch.bfloat16).
145 | - seed (int): The seed for reproducibility.
146 | """
147 | torch.manual_seed(seed)
148 | torch.cuda.manual_seed_all(seed)
149 | np.random.seed(seed)
150 | random.seed(seed)
151 | torch.backends.cudnn.deterministic = True
152 |
153 | # turn on if you don't have multiple GPUs or enough GPU memory(such as H100) and it will cost more time in inference, it may also reduce the quality
154 | # pipe.enable_model_cpu_offload()
155 | # pipe.enable_sequential_cpu_offload()
156 | # pipe.vae.enable_slicing()
157 | # pipe.vae.enable_tiling()
158 |
159 | # process face data
160 | img_file_path_list = [input_image1, input_image2]
161 |
162 | print(len(img_file_path_list))
163 | print(prompt)
164 |
165 | id_cond_list = []
166 | id_vit_hidden_list = []
167 |
168 | id_image_list = []
169 | for img_file_path in img_file_path_list:
170 | id_image = np.array(ImageOps.exif_transpose(Image.fromarray(img_file_path)).convert("RGB"))
171 | id_image = resize_numpy_image_long(id_image, 1024)
172 | id_image_list.append(id_image)
173 | id_cond_list, id_vit_hidden_list, align_crop_face_image, face_kps, _ = process_face_embeddings_split(face_helper, face_clip_model, handler_ante,
174 | eva_transform_mean, eva_transform_std,
175 | face_main_model, device, dtype, id_image_list,
176 | original_id_images=id_image_list, is_align_face=True,
177 | cal_uncond=False)
178 | if is_kps:
179 | kps_cond = face_kps
180 | else:
181 | kps_cond = None
182 | print("kps_cond: ", kps_cond, "align_face: ", align_crop_face_image.size(), )
183 | print("id_cond: ", len(id_cond_list), )
184 |
185 | tensor = align_crop_face_image.cpu().detach()
186 | tensor = tensor.squeeze()
187 | tensor = tensor.permute(1, 2, 0)
188 | tensor = tensor.numpy() * 255
189 | tensor = tensor.astype(np.uint8)
190 | image = ImageOps.exif_transpose(Image.fromarray(tensor))
191 |
192 | prompt = prompt.strip('"')
193 |
194 | generator = torch.Generator(device).manual_seed(seed) if seed else None
195 |
196 | with torch.no_grad():
197 | video_generate = pipe(
198 | prompt=prompt,
199 | image=image,
200 | num_videos_per_prompt=num_videos_per_prompt,
201 | num_inference_steps=num_inference_steps,
202 | num_frames=49,
203 | use_dynamic_cfg=False,
204 | guidance_scale=guidance_scale,
205 | generator=generator,
206 | id_vit_hidden=id_vit_hidden_list,
207 | id_cond=id_cond_list,
208 | kps_cond=kps_cond,
209 | ).frames[0]
210 |
211 | # 5. Export the generated frames to a video file. fps must be 8 for original video.
212 | filename = f"{output_path}/results.mp4"
213 | print(filename)
214 | export_to_video(video_generate, filename, fps=8)
215 | return filename
216 |
217 |
218 |
219 | with gr.Blocks() as demo:
220 | gr.Markdown("""
221 |
222 |

223 |
224 |
225 | Ingredients Space
226 |
227 |
233 |
234 | ⚠️ This demo is for academic research and experiential use only.
235 |
236 | """)
237 |
238 | with gr.Row():
239 | with gr.Column():
240 | with gr.Accordion("Multi-ID Image Input", open=True):
241 | image_input1 = gr.Image(label="Input Image 1 (should contain clear face, preferably half-body or full-body image)")
242 | image_input2 = gr.Image(label="Input Image 2 (should contain clear face, preferably half-body or full-body image)")
243 | prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here. Ingredients has high requirements for prompt quality. You can use GPT-4o to refine the input text prompt, example can be found on our github.", lines=5)
244 | negative_prompt = gr.Textbox(label="Negative Prompt (Default is None)", placeholder="Enter your negative prompt here. Default is None", lines=1)
245 |
246 | with gr.Group():
247 | with gr.Column():
248 | num_inference_steps = gr.Slider(1, 100, value=50, step=1, label="Number of Inference Steps")
249 | with gr.Row():
250 | seed_param = gr.Number(
251 | label="Inference Seed (Enter a positive number, -1 for random)", value=2025
252 | )
253 | cfg_param = gr.Number(
254 | label="Guidance Scale (Enter a positive number, default = 6.0)", value=6.0
255 | )
256 |
257 | generate_button = gr.Button("🎬 Generate Video")
258 |
259 | with gr.Column():
260 | video_output = gr.Video(label="Ingredients Generate Video", width=720, height=480)
261 | with gr.Row():
262 | download_video_button = gr.File(label="📥 Download Video", visible=False)
263 | download_gif_button = gr.File(label="📥 Download GIF", visible=False)
264 | seed_text = gr.Number(label="Seed Used for Video Generation", visible=False)
265 |
266 | def run(
267 | prompt,
268 | image_input1,
269 | image_input2,
270 | negative_prompt,
271 | num_inference_steps,
272 | cfg_param,
273 | seed_value,
274 | progress=gr.Progress(track_tqdm=True)
275 | ):
276 | video_path = generate(
277 | prompt,
278 | image_input1,
279 | image_input2,
280 | negative_prompt=negative_prompt,
281 | num_inference_steps=num_inference_steps,
282 | guidance_scale=cfg_param,
283 | seed=seed_value,
284 | )
285 |
286 | video_update = gr.update(visible=True, value=video_path)
287 | gif_path = convert_to_gif(video_path)
288 | gif_update = gr.update(visible=True, value=gif_path)
289 | seed_update = gr.update(visible=True, value=seed_value)
290 |
291 | return video_path, video_update, gif_update, seed_update
292 |
293 | generate_button.click(
294 | fn=run,
295 | inputs=[prompt, image_input1, image_input2, negative_prompt, num_inference_steps, cfg_param, seed_param, ],
296 | outputs=[video_output, download_video_button, download_gif_button, seed_text],
297 | )
298 |
299 |
300 |
301 | if __name__ == "__main__":
302 | demo.queue(max_size=15)
303 | demo.launch(share=True)
--------------------------------------------------------------------------------
/models/utils.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import math
3 | import numpy as np
4 | from PIL import Image
5 |
6 | import torch
7 | from torchvision.transforms import InterpolationMode
8 | from torchvision.transforms.functional import normalize, resize
9 | from transformers import T5EncoderModel, T5Tokenizer
10 | from typing import List, Optional, Tuple, Union
11 | from diffusers.models.embeddings import get_3d_rotary_pos_embed
12 | from diffusers.pipelines.cogvideo.pipeline_cogvideox import get_resize_crop_region_for_grid
13 |
14 |
15 | def tensor_to_pil(src_img_tensor):
16 | img = src_img_tensor.clone().detach()
17 | if img.dtype == torch.bfloat16:
18 | img = img.to(torch.float32)
19 | img = img.cpu().numpy()
20 | img = np.transpose(img, (1, 2, 0))
21 | img = img.astype(np.uint8)
22 | pil_image = Image.fromarray(img)
23 | return pil_image
24 |
25 |
26 | def _get_t5_prompt_embeds(
27 | tokenizer: T5Tokenizer,
28 | text_encoder: T5EncoderModel,
29 | prompt: Union[str, List[str]],
30 | num_videos_per_prompt: int = 1,
31 | max_sequence_length: int = 226,
32 | device: Optional[torch.device] = None,
33 | dtype: Optional[torch.dtype] = None,
34 | text_input_ids=None,
35 | ):
36 | prompt = [prompt] if isinstance(prompt, str) else prompt
37 | batch_size = len(prompt)
38 |
39 | if tokenizer is not None:
40 | text_inputs = tokenizer(
41 | prompt,
42 | padding="max_length",
43 | max_length=max_sequence_length,
44 | truncation=True,
45 | add_special_tokens=True,
46 | return_tensors="pt",
47 | )
48 | text_input_ids = text_inputs.input_ids
49 | else:
50 | if text_input_ids is None:
51 | raise ValueError("`text_input_ids` must be provided when the tokenizer is not specified.")
52 |
53 | prompt_embeds = text_encoder(text_input_ids.to(device))[0]
54 | prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
55 |
56 | # duplicate text embeddings for each generation per prompt, using mps friendly method
57 | _, seq_len, _ = prompt_embeds.shape
58 | prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
59 | prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
60 |
61 | return prompt_embeds
62 |
63 |
64 | def encode_prompt(
65 | tokenizer: T5Tokenizer,
66 | text_encoder: T5EncoderModel,
67 | prompt: Union[str, List[str]],
68 | num_videos_per_prompt: int = 1,
69 | max_sequence_length: int = 226,
70 | device: Optional[torch.device] = None,
71 | dtype: Optional[torch.dtype] = None,
72 | text_input_ids=None,
73 | ):
74 | prompt = [prompt] if isinstance(prompt, str) else prompt
75 | prompt_embeds = _get_t5_prompt_embeds(
76 | tokenizer,
77 | text_encoder,
78 | prompt=prompt,
79 | num_videos_per_prompt=num_videos_per_prompt,
80 | max_sequence_length=max_sequence_length,
81 | device=device,
82 | dtype=dtype,
83 | text_input_ids=text_input_ids,
84 | )
85 | return prompt_embeds
86 |
87 |
88 | def compute_prompt_embeddings(
89 | tokenizer, text_encoder, prompt, max_sequence_length, device, dtype, requires_grad: bool = False
90 | ):
91 | if requires_grad:
92 | prompt_embeds = encode_prompt(
93 | tokenizer,
94 | text_encoder,
95 | prompt,
96 | num_videos_per_prompt=1,
97 | max_sequence_length=max_sequence_length,
98 | device=device,
99 | dtype=dtype,
100 | )
101 | else:
102 | with torch.no_grad():
103 | prompt_embeds = encode_prompt(
104 | tokenizer,
105 | text_encoder,
106 | prompt,
107 | num_videos_per_prompt=1,
108 | max_sequence_length=max_sequence_length,
109 | device=device,
110 | dtype=dtype,
111 | )
112 | return prompt_embeds
113 |
114 |
115 | def prepare_rotary_positional_embeddings(
116 | height: int,
117 | width: int,
118 | num_frames: int,
119 | vae_scale_factor_spatial: int = 8,
120 | patch_size: int = 2,
121 | attention_head_dim: int = 64,
122 | device: Optional[torch.device] = None,
123 | base_height: int = 480,
124 | base_width: int = 720,
125 | ) -> Tuple[torch.Tensor, torch.Tensor]:
126 | grid_height = height // (vae_scale_factor_spatial * patch_size)
127 | grid_width = width // (vae_scale_factor_spatial * patch_size)
128 | base_size_width = base_width // (vae_scale_factor_spatial * patch_size)
129 | base_size_height = base_height // (vae_scale_factor_spatial * patch_size)
130 |
131 | grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size_width, base_size_height)
132 | freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
133 | embed_dim=attention_head_dim,
134 | crops_coords=grid_crops_coords,
135 | grid_size=(grid_height, grid_width),
136 | temporal_size=num_frames,
137 | )
138 |
139 | freqs_cos = freqs_cos.to(device=device)
140 | freqs_sin = freqs_sin.to(device=device)
141 | return freqs_cos, freqs_sin
142 |
143 |
144 |
145 | def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
146 | """Convert torch Tensors into image numpy arrays.
147 |
148 | After clamping to [min, max], values will be normalized to [0, 1].
149 |
150 | Args:
151 | tensor (Tensor or list[Tensor]): Accept shapes:
152 | 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
153 | 2) 3D Tensor of shape (3/1 x H x W);
154 | 3) 2D Tensor of shape (H x W).
155 | Tensor channel should be in RGB order.
156 | rgb2bgr (bool): Whether to change rgb to bgr.
157 | out_type (numpy type): output types. If ``np.uint8``, transform outputs
158 | to uint8 type with range [0, 255]; otherwise, float type with
159 | range [0, 1]. Default: ``np.uint8``.
160 | min_max (tuple[int]): min and max values for clamp.
161 |
162 | Returns:
163 | (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
164 | shape (H x W). The channel order is BGR.
165 | """
166 | if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
167 | raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')
168 |
169 | if torch.is_tensor(tensor):
170 | tensor = [tensor]
171 | result = []
172 | for _tensor in tensor:
173 | _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
174 | _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
175 |
176 | n_dim = _tensor.dim()
177 | if n_dim == 4:
178 | img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()
179 | img_np = img_np.transpose(1, 2, 0)
180 | if rgb2bgr:
181 | img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
182 | elif n_dim == 3:
183 | img_np = _tensor.numpy()
184 | img_np = img_np.transpose(1, 2, 0)
185 | if img_np.shape[2] == 1: # gray image
186 | img_np = np.squeeze(img_np, axis=2)
187 | else:
188 | if rgb2bgr:
189 | img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
190 | elif n_dim == 2:
191 | img_np = _tensor.numpy()
192 | else:
193 | raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}')
194 | if out_type == np.uint8:
195 | # Unlike MATLAB, numpy.unit8() WILL NOT round by default.
196 | img_np = (img_np * 255.0).round()
197 | img_np = img_np.astype(out_type)
198 | result.append(img_np)
199 | if len(result) == 1:
200 | result = result[0]
201 | return result
202 |
203 |
204 | def img2tensor(imgs, bgr2rgb=True, float32=True):
205 | """Numpy array to tensor.
206 |
207 | Args:
208 | imgs (list[ndarray] | ndarray): Input images.
209 | bgr2rgb (bool): Whether to change bgr to rgb.
210 | float32 (bool): Whether to change to float32.
211 |
212 | Returns:
213 | list[tensor] | tensor: Tensor images. If returned results only have
214 | one element, just return tensor.
215 | """
216 |
217 | def _totensor(img, bgr2rgb, float32):
218 | if img.shape[2] == 3 and bgr2rgb:
219 | if img.dtype == 'float64':
220 | img = img.astype('float32')
221 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
222 | img = torch.from_numpy(img.transpose(2, 0, 1))
223 | if float32:
224 | img = img.float()
225 | return img
226 |
227 | if isinstance(imgs, list):
228 | return [_totensor(img, bgr2rgb, float32) for img in imgs]
229 | return _totensor(imgs, bgr2rgb, float32)
230 |
231 |
232 | def to_gray(img):
233 | x = 0.299 * img[:, 0:1] + 0.587 * img[:, 1:2] + 0.114 * img[:, 2:3]
234 | x = x.repeat(1, 3, 1, 1)
235 | return x
236 |
237 |
238 | def draw_kps(image_pil, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]):
239 | stickwidth = 4
240 | limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
241 | kps = np.array(kps)
242 |
243 | w, h = image_pil.size
244 | out_img = np.zeros([h, w, 3])
245 |
246 | for i in range(len(limbSeq)):
247 | index = limbSeq[i]
248 | color = color_list[index[0]]
249 |
250 | x = kps[index][:, 0]
251 | y = kps[index][:, 1]
252 | length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
253 | angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
254 | polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
255 | out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
256 | out_img = (out_img * 0.6).astype(np.uint8)
257 |
258 | for idx_kp, kp in enumerate(kps):
259 | color = color_list[idx_kp]
260 | x, y = kp
261 | out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
262 |
263 | out_img_pil = Image.fromarray(out_img.astype(np.uint8))
264 | return out_img_pil
265 |
266 |
267 |
268 | def process_face_embeddings_split(face_helper, clip_vision_model, handler_ante, eva_transform_mean, eva_transform_std, app, device, weight_dtype, images, original_id_images=None, is_align_face=True, cal_uncond=False):
269 | """
270 | only for inference with image set.
271 |
272 | Args:
273 | images: list of numpy rgb image, range [0, 255]
274 | """
275 | print("input image number:", len(images))
276 | id_cond_list = []
277 | id_vit_hidden_list = []
278 | new_img = Image.new("RGB", (720, 480), color=(255, 255, 255))
279 |
280 | i = 0
281 | for image in images:
282 | face_helper.clean_all()
283 | image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # (724, 502, 3)
284 |
285 | height, width, _ = image.shape
286 | face_helper.read_image(image_bgr)
287 | face_helper.get_face_landmarks_5(only_center_face=False)
288 | if len(face_helper.all_landmarks_5) < 1:
289 | return None, None, None, None, None
290 |
291 | id_ante_embedding = None
292 | face_kps = None
293 |
294 | if face_kps is None:
295 | face_kps = face_helper.all_landmarks_5[0]
296 | face_helper.align_warp_face()
297 |
298 | if len(face_helper.cropped_faces) < 1:
299 | print("facexlib align face fail")
300 | return None, None, None, None, None
301 | # raise RuntimeError('facexlib align face fail')
302 | align_face = face_helper.cropped_faces[0] # (512, 512, 3) # RGB
303 |
304 | # incase insightface didn't detect face
305 | if id_ante_embedding is None:
306 | # print('fail to detect face using insightface, extract embedding on align face')
307 | id_ante_embedding = handler_ante.get_feat(align_face)
308 |
309 | id_ante_embedding = torch.from_numpy(id_ante_embedding).to(device, weight_dtype) # torch.Size([512])
310 |
311 | if id_ante_embedding.ndim == 1:
312 | id_ante_embedding = id_ante_embedding.unsqueeze(0) # torch.Size([1, 512])
313 |
314 | # parsing
315 | if is_align_face:
316 | input = img2tensor(align_face, bgr2rgb=True).unsqueeze(0) / 255.0 # torch.Size([1, 3, 512, 512])
317 | input = input.to(device)
318 | return_face_features_image = return_face_features_image_2 = input
319 | else:
320 | original_image_bgr = cv2.cvtColor(original_id_image, cv2.COLOR_RGB2BGR)
321 | input = img2tensor(original_image_bgr, bgr2rgb=True).unsqueeze(0) / 255.0 # torch.Size([1, 3, 512, 512])
322 | input = input.to(device)
323 | return_face_features_image = return_face_features_image_2 = input
324 |
325 | # transform img before sending to eva-clip-vit
326 | face_features_image = resize(return_face_features_image, clip_vision_model.image_size,
327 | InterpolationMode.BICUBIC) # torch.Size([1, 3, 336, 336])
328 | face_features_image = normalize(face_features_image, eva_transform_mean, eva_transform_std)
329 | id_cond_vit, id_vit_hidden = clip_vision_model(face_features_image.to(weight_dtype), return_all_features=False, return_hidden=True, shuffle=False) # torch.Size([1, 768]), list(torch.Size([1, 577, 1024]))
330 | id_cond_vit_norm = torch.norm(id_cond_vit, 2, 1, True)
331 | id_cond_vit = torch.div(id_cond_vit, id_cond_vit_norm)
332 |
333 | id_cond = torch.cat([id_ante_embedding, id_cond_vit], dim=-1) # torch.Size([1, 512]), torch.Size([1, 768]) -> torch.Size([1, 1280])
334 |
335 | id_cond_list.append(id_cond)
336 | id_vit_hidden_list.append(id_vit_hidden)
337 |
338 | img = face_helper.cropped_faces[0]
339 | img = Image.fromarray(img, 'RGB')
340 | img = img.resize((360, 360))
341 | new_img.paste(img, (360*i, 60))
342 | i += 1
343 |
344 | new_img = np.array(new_img)
345 | new_img = img2tensor(new_img, bgr2rgb=True).unsqueeze(0) / 255.0
346 |
347 | return id_cond_list, id_vit_hidden_list, new_img, face_kps, None #return_face_features_image_2, face_kps # torch.Size([1, 1280]), list(torch.Size([1, 577, 1024]))
348 |
349 |
--------------------------------------------------------------------------------
/models/eva_clip/utils.py:
--------------------------------------------------------------------------------
1 | from itertools import repeat
2 | import collections.abc
3 | import logging
4 | import math
5 | import numpy as np
6 |
7 | import torch
8 | from torch import nn as nn
9 | from torchvision.ops.misc import FrozenBatchNorm2d
10 | import torch.nn.functional as F
11 |
12 | # open CLIP
13 | def resize_clip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
14 | # Rescale the grid of position embeddings when loading from state_dict
15 | old_pos_embed = state_dict.get('visual.positional_embedding', None)
16 | if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
17 | return
18 | grid_size = to_2tuple(model.visual.grid_size)
19 | extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
20 | new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
21 | if new_seq_len == old_pos_embed.shape[0]:
22 | return
23 |
24 | if extra_tokens:
25 | pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
26 | else:
27 | pos_emb_tok, pos_emb_img = None, old_pos_embed
28 | old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
29 |
30 | logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
31 | pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
32 | pos_emb_img = F.interpolate(
33 | pos_emb_img,
34 | size=grid_size,
35 | mode=interpolation,
36 | align_corners=True,
37 | )
38 | pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
39 | if pos_emb_tok is not None:
40 | new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
41 | else:
42 | new_pos_embed = pos_emb_img
43 | state_dict['visual.positional_embedding'] = new_pos_embed
44 |
45 |
46 | def resize_visual_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
47 | # Rescale the grid of position embeddings when loading from state_dict
48 | old_pos_embed = state_dict.get('positional_embedding', None)
49 | if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
50 | return
51 | grid_size = to_2tuple(model.visual.grid_size)
52 | extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
53 | new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
54 | if new_seq_len == old_pos_embed.shape[0]:
55 | return
56 |
57 | if extra_tokens:
58 | pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
59 | else:
60 | pos_emb_tok, pos_emb_img = None, old_pos_embed
61 | old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
62 |
63 | logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
64 | pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
65 | pos_emb_img = F.interpolate(
66 | pos_emb_img,
67 | size=grid_size,
68 | mode=interpolation,
69 | align_corners=True,
70 | )
71 | pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
72 | if pos_emb_tok is not None:
73 | new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
74 | else:
75 | new_pos_embed = pos_emb_img
76 | state_dict['positional_embedding'] = new_pos_embed
77 |
78 | def resize_evaclip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
79 | all_keys = list(state_dict.keys())
80 | # interpolate position embedding
81 | if 'visual.pos_embed' in state_dict:
82 | pos_embed_checkpoint = state_dict['visual.pos_embed']
83 | embedding_size = pos_embed_checkpoint.shape[-1]
84 | num_patches = model.visual.patch_embed.num_patches
85 | num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
86 | # height (== width) for the checkpoint position embedding
87 | orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
88 | # height (== width) for the new position embedding
89 | new_size = int(num_patches ** 0.5)
90 | # class_token and dist_token are kept unchanged
91 | if orig_size != new_size:
92 | print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
93 | extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
94 | # only the position tokens are interpolated
95 | pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
96 | pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
97 | pos_tokens = torch.nn.functional.interpolate(
98 | pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
99 | pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
100 | new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
101 | state_dict['visual.pos_embed'] = new_pos_embed
102 |
103 | patch_embed_proj = state_dict['visual.patch_embed.proj.weight']
104 | patch_size = model.visual.patch_embed.patch_size
105 | state_dict['visual.patch_embed.proj.weight'] = torch.nn.functional.interpolate(
106 | patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
107 |
108 |
109 | def resize_eva_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
110 | all_keys = list(state_dict.keys())
111 | # interpolate position embedding
112 | if 'pos_embed' in state_dict:
113 | pos_embed_checkpoint = state_dict['pos_embed']
114 | embedding_size = pos_embed_checkpoint.shape[-1]
115 | num_patches = model.visual.patch_embed.num_patches
116 | num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
117 | # height (== width) for the checkpoint position embedding
118 | orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
119 | # height (== width) for the new position embedding
120 | new_size = int(num_patches ** 0.5)
121 | # class_token and dist_token are kept unchanged
122 | if orig_size != new_size:
123 | print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
124 | extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
125 | # only the position tokens are interpolated
126 | pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
127 | pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
128 | pos_tokens = torch.nn.functional.interpolate(
129 | pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
130 | pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
131 | new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
132 | state_dict['pos_embed'] = new_pos_embed
133 |
134 | patch_embed_proj = state_dict['patch_embed.proj.weight']
135 | patch_size = model.visual.patch_embed.patch_size
136 | state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate(
137 | patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
138 |
139 |
140 | def resize_rel_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
141 | all_keys = list(state_dict.keys())
142 | for key in all_keys:
143 | if "relative_position_index" in key:
144 | state_dict.pop(key)
145 |
146 | if "relative_position_bias_table" in key:
147 | rel_pos_bias = state_dict[key]
148 | src_num_pos, num_attn_heads = rel_pos_bias.size()
149 | dst_num_pos, _ = model.visual.state_dict()[key].size()
150 | dst_patch_shape = model.visual.patch_embed.patch_shape
151 | if dst_patch_shape[0] != dst_patch_shape[1]:
152 | raise NotImplementedError()
153 | num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
154 | src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
155 | dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
156 | if src_size != dst_size:
157 | print("Position interpolate for %s from %dx%d to %dx%d" % (
158 | key, src_size, src_size, dst_size, dst_size))
159 | extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
160 | rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
161 |
162 | def geometric_progression(a, r, n):
163 | return a * (1.0 - r ** n) / (1.0 - r)
164 |
165 | left, right = 1.01, 1.5
166 | while right - left > 1e-6:
167 | q = (left + right) / 2.0
168 | gp = geometric_progression(1, q, src_size // 2)
169 | if gp > dst_size // 2:
170 | right = q
171 | else:
172 | left = q
173 |
174 | # if q > 1.090307:
175 | # q = 1.090307
176 |
177 | dis = []
178 | cur = 1
179 | for i in range(src_size // 2):
180 | dis.append(cur)
181 | cur += q ** (i + 1)
182 |
183 | r_ids = [-_ for _ in reversed(dis)]
184 |
185 | x = r_ids + [0] + dis
186 | y = r_ids + [0] + dis
187 |
188 | t = dst_size // 2.0
189 | dx = np.arange(-t, t + 0.1, 1.0)
190 | dy = np.arange(-t, t + 0.1, 1.0)
191 |
192 | print("Original positions = %s" % str(x))
193 | print("Target positions = %s" % str(dx))
194 |
195 | all_rel_pos_bias = []
196 |
197 | for i in range(num_attn_heads):
198 | z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
199 | f = F.interpolate.interp2d(x, y, z, kind='cubic')
200 | all_rel_pos_bias.append(
201 | torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
202 |
203 | rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
204 |
205 | new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
206 | state_dict[key] = new_rel_pos_bias
207 |
208 | # interpolate position embedding
209 | if 'pos_embed' in state_dict:
210 | pos_embed_checkpoint = state_dict['pos_embed']
211 | embedding_size = pos_embed_checkpoint.shape[-1]
212 | num_patches = model.visual.patch_embed.num_patches
213 | num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
214 | # height (== width) for the checkpoint position embedding
215 | orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
216 | # height (== width) for the new position embedding
217 | new_size = int(num_patches ** 0.5)
218 | # class_token and dist_token are kept unchanged
219 | if orig_size != new_size:
220 | print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
221 | extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
222 | # only the position tokens are interpolated
223 | pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
224 | pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
225 | pos_tokens = torch.nn.functional.interpolate(
226 | pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
227 | pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
228 | new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
229 | state_dict['pos_embed'] = new_pos_embed
230 |
231 | patch_embed_proj = state_dict['patch_embed.proj.weight']
232 | patch_size = model.visual.patch_embed.patch_size
233 | state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate(
234 | patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
235 |
236 |
237 | def freeze_batch_norm_2d(module, module_match={}, name=''):
238 | """
239 | Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
240 | itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
241 | returned. Otherwise, the module is walked recursively and submodules are converted in place.
242 |
243 | Args:
244 | module (torch.nn.Module): Any PyTorch module.
245 | module_match (dict): Dictionary of full module names to freeze (all if empty)
246 | name (str): Full module name (prefix)
247 |
248 | Returns:
249 | torch.nn.Module: Resulting module
250 |
251 | Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
252 | """
253 | res = module
254 | is_match = True
255 | if module_match:
256 | is_match = name in module_match
257 | if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)):
258 | res = FrozenBatchNorm2d(module.num_features)
259 | res.num_features = module.num_features
260 | res.affine = module.affine
261 | if module.affine:
262 | res.weight.data = module.weight.data.clone().detach()
263 | res.bias.data = module.bias.data.clone().detach()
264 | res.running_mean.data = module.running_mean.data
265 | res.running_var.data = module.running_var.data
266 | res.eps = module.eps
267 | else:
268 | for child_name, child in module.named_children():
269 | full_child_name = '.'.join([name, child_name]) if name else child_name
270 | new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
271 | if new_child is not child:
272 | res.add_module(child_name, new_child)
273 | return res
274 |
275 |
276 | # From PyTorch internals
277 | def _ntuple(n):
278 | def parse(x):
279 | if isinstance(x, collections.abc.Iterable):
280 | return x
281 | return tuple(repeat(x, n))
282 | return parse
283 |
284 |
285 | to_1tuple = _ntuple(1)
286 | to_2tuple = _ntuple(2)
287 | to_3tuple = _ntuple(3)
288 | to_4tuple = _ntuple(4)
289 | to_ntuple = lambda n, x: _ntuple(n)(x)
290 |
291 |
292 | def is_logging(args):
293 | def is_global_master(args):
294 | return args.rank == 0
295 |
296 | def is_local_master(args):
297 | return args.local_rank == 0
298 |
299 | def is_master(args, local=False):
300 | return is_local_master(args) if local else is_global_master(args)
301 | return is_master
302 |
303 |
304 | class AllGather(torch.autograd.Function):
305 | """An autograd function that performs allgather on a tensor.
306 | Performs all_gather operation on the provided tensors.
307 | *** Warning ***: torch.distributed.all_gather has no gradient.
308 | """
309 |
310 | @staticmethod
311 | def forward(ctx, tensor, rank, world_size):
312 | tensors_gather = [torch.empty_like(tensor) for _ in range(world_size)]
313 | torch.distributed.all_gather(tensors_gather, tensor)
314 | ctx.rank = rank
315 | ctx.batch_size = tensor.shape[0]
316 | return torch.cat(tensors_gather, 0)
317 |
318 | @staticmethod
319 | def backward(ctx, grad_output):
320 | return (
321 | grad_output[ctx.batch_size * ctx.rank: ctx.batch_size * (ctx.rank + 1)],
322 | None,
323 | None
324 | )
325 |
326 | allgather = AllGather.apply
--------------------------------------------------------------------------------
/models/eva_clip/model.py:
--------------------------------------------------------------------------------
1 | """ CLIP Model
2 |
3 | Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
4 | """
5 | import os
6 | from dataclasses import dataclass
7 | from typing import Optional, Tuple, Union
8 | from functools import partial
9 |
10 | import numpy as np
11 | import torch
12 | import torch.nn.functional as F
13 | from torch import nn
14 |
15 | try:
16 | from .hf_model import HFTextEncoder
17 | except:
18 | HFTextEncoder = None
19 | from .modified_resnet import ModifiedResNet
20 | from .timm_model import TimmModel
21 | from .eva_vit_model import EVAVisionTransformer
22 | from .transformer import LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer
23 |
24 | try:
25 | from apex.normalization import FusedLayerNorm
26 | except:
27 | FusedLayerNorm = LayerNorm
28 | print("Please 'pip install apex'")
29 |
30 | try:
31 | import xformers.ops as xops
32 | except ImportError:
33 | xops = None
34 | print("Please 'pip install xformers'")
35 |
36 | @dataclass
37 | class CLIPVisionCfg:
38 | layers: Union[Tuple[int, int, int, int], int] = 12
39 | width: int = 768
40 | head_width: int = 64
41 | mlp_ratio: float = 4.0
42 | patch_size: int = 16
43 | image_size: Union[Tuple[int, int], int] = 224
44 | ls_init_value: Optional[float] = None # layer scale initial value
45 | patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
46 | global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)
47 | drop_path_rate: Optional[float] = None # drop path rate
48 | timm_model_name: str = None # a valid model name overrides layers, width, patch_size
49 | timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
50 | timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
51 | timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
52 | timm_proj_bias: bool = False # enable bias final projection
53 | eva_model_name: str = None # a valid eva model name overrides layers, width, patch_size
54 | qkv_bias: bool = True
55 | fusedLN: bool = False
56 | xattn: bool = False
57 | postnorm: bool = False
58 | rope: bool = False
59 | pt_hw_seq_len: int = 16 # 224/14
60 | intp_freq: bool = False
61 | naiveswiglu: bool = False
62 | subln: bool = False
63 |
64 |
65 | @dataclass
66 | class CLIPTextCfg:
67 | context_length: int = 77
68 | vocab_size: int = 49408
69 | width: int = 512
70 | heads: int = 8
71 | layers: int = 12
72 | ls_init_value: Optional[float] = None # layer scale initial value
73 | hf_model_name: str = None
74 | hf_tokenizer_name: str = None
75 | hf_model_pretrained: bool = True
76 | proj: str = 'mlp'
77 | pooler_type: str = 'mean_pooler'
78 | masked_language_modeling: bool = False
79 | fusedLN: bool = False
80 | xattn: bool = False
81 | attn_mask: bool = True
82 |
83 | def get_cast_dtype(precision: str):
84 | cast_dtype = None
85 | if precision == 'bf16':
86 | cast_dtype = torch.bfloat16
87 | elif precision == 'fp16':
88 | cast_dtype = torch.float16
89 | return cast_dtype
90 |
91 |
92 | def _build_vision_tower(
93 | embed_dim: int,
94 | vision_cfg: CLIPVisionCfg,
95 | quick_gelu: bool = False,
96 | cast_dtype: Optional[torch.dtype] = None
97 | ):
98 | if isinstance(vision_cfg, dict):
99 | vision_cfg = CLIPVisionCfg(**vision_cfg)
100 |
101 | # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
102 | # memory efficient in recent PyTorch releases (>= 1.10).
103 | # NOTE: timm models always use native GELU regardless of quick_gelu flag.
104 | act_layer = QuickGELU if quick_gelu else nn.GELU
105 |
106 | if vision_cfg.eva_model_name:
107 | vision_heads = vision_cfg.width // vision_cfg.head_width
108 | norm_layer = LayerNorm
109 |
110 | visual = EVAVisionTransformer(
111 | img_size=vision_cfg.image_size,
112 | patch_size=vision_cfg.patch_size,
113 | num_classes=embed_dim,
114 | use_mean_pooling=vision_cfg.global_average_pool, #False
115 | init_values=vision_cfg.ls_init_value,
116 | patch_dropout=vision_cfg.patch_dropout,
117 | embed_dim=vision_cfg.width,
118 | depth=vision_cfg.layers,
119 | num_heads=vision_heads,
120 | mlp_ratio=vision_cfg.mlp_ratio,
121 | qkv_bias=vision_cfg.qkv_bias,
122 | drop_path_rate=vision_cfg.drop_path_rate,
123 | norm_layer= partial(FusedLayerNorm, eps=1e-6) if vision_cfg.fusedLN else partial(norm_layer, eps=1e-6),
124 | xattn=vision_cfg.xattn,
125 | rope=vision_cfg.rope,
126 | postnorm=vision_cfg.postnorm,
127 | pt_hw_seq_len= vision_cfg.pt_hw_seq_len, # 224/14
128 | intp_freq= vision_cfg.intp_freq,
129 | naiveswiglu= vision_cfg.naiveswiglu,
130 | subln= vision_cfg.subln
131 | )
132 | elif vision_cfg.timm_model_name:
133 | visual = TimmModel(
134 | vision_cfg.timm_model_name,
135 | pretrained=vision_cfg.timm_model_pretrained,
136 | pool=vision_cfg.timm_pool,
137 | proj=vision_cfg.timm_proj,
138 | proj_bias=vision_cfg.timm_proj_bias,
139 | embed_dim=embed_dim,
140 | image_size=vision_cfg.image_size
141 | )
142 | act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models
143 | elif isinstance(vision_cfg.layers, (tuple, list)):
144 | vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
145 | visual = ModifiedResNet(
146 | layers=vision_cfg.layers,
147 | output_dim=embed_dim,
148 | heads=vision_heads,
149 | image_size=vision_cfg.image_size,
150 | width=vision_cfg.width
151 | )
152 | else:
153 | vision_heads = vision_cfg.width // vision_cfg.head_width
154 | norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
155 | visual = VisionTransformer(
156 | image_size=vision_cfg.image_size,
157 | patch_size=vision_cfg.patch_size,
158 | width=vision_cfg.width,
159 | layers=vision_cfg.layers,
160 | heads=vision_heads,
161 | mlp_ratio=vision_cfg.mlp_ratio,
162 | ls_init_value=vision_cfg.ls_init_value,
163 | patch_dropout=vision_cfg.patch_dropout,
164 | global_average_pool=vision_cfg.global_average_pool,
165 | output_dim=embed_dim,
166 | act_layer=act_layer,
167 | norm_layer=norm_layer,
168 | )
169 |
170 | return visual
171 |
172 |
173 | def _build_text_tower(
174 | embed_dim: int,
175 | text_cfg: CLIPTextCfg,
176 | quick_gelu: bool = False,
177 | cast_dtype: Optional[torch.dtype] = None,
178 | ):
179 | if isinstance(text_cfg, dict):
180 | text_cfg = CLIPTextCfg(**text_cfg)
181 |
182 | if text_cfg.hf_model_name:
183 | text = HFTextEncoder(
184 | text_cfg.hf_model_name,
185 | output_dim=embed_dim,
186 | tokenizer_name=text_cfg.hf_tokenizer_name,
187 | proj=text_cfg.proj,
188 | pooler_type=text_cfg.pooler_type,
189 | masked_language_modeling=text_cfg.masked_language_modeling
190 | )
191 | else:
192 | act_layer = QuickGELU if quick_gelu else nn.GELU
193 | norm_layer = LayerNorm
194 |
195 | text = TextTransformer(
196 | context_length=text_cfg.context_length,
197 | vocab_size=text_cfg.vocab_size,
198 | width=text_cfg.width,
199 | heads=text_cfg.heads,
200 | layers=text_cfg.layers,
201 | ls_init_value=text_cfg.ls_init_value,
202 | output_dim=embed_dim,
203 | act_layer=act_layer,
204 | norm_layer= FusedLayerNorm if text_cfg.fusedLN else norm_layer,
205 | xattn=text_cfg.xattn,
206 | attn_mask=text_cfg.attn_mask,
207 | )
208 | return text
209 |
210 | class CLIP(nn.Module):
211 | def __init__(
212 | self,
213 | embed_dim: int,
214 | vision_cfg: CLIPVisionCfg,
215 | text_cfg: CLIPTextCfg,
216 | quick_gelu: bool = False,
217 | cast_dtype: Optional[torch.dtype] = None,
218 | ):
219 | super().__init__()
220 | self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
221 |
222 | text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
223 | self.transformer = text.transformer
224 | self.vocab_size = text.vocab_size
225 | self.token_embedding = text.token_embedding
226 | self.positional_embedding = text.positional_embedding
227 | self.ln_final = text.ln_final
228 | self.text_projection = text.text_projection
229 | self.register_buffer('attn_mask', text.attn_mask, persistent=False)
230 |
231 | self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
232 |
233 | def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
234 | # lock image tower as per LiT - https://arxiv.org/abs/2111.07991
235 | self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
236 |
237 | @torch.jit.ignore
238 | def set_grad_checkpointing(self, enable=True):
239 | self.visual.set_grad_checkpointing(enable)
240 | self.transformer.grad_checkpointing = enable
241 |
242 | @torch.jit.ignore
243 | def no_weight_decay(self):
244 | return {'logit_scale'}
245 |
246 | def encode_image(self, image, normalize: bool = False):
247 | features = self.visual(image)
248 | return F.normalize(features, dim=-1) if normalize else features
249 |
250 | def encode_text(self, text, normalize: bool = False):
251 | cast_dtype = self.transformer.get_cast_dtype()
252 |
253 | x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
254 |
255 | x = x + self.positional_embedding.to(cast_dtype)
256 | x = x.permute(1, 0, 2) # NLD -> LND
257 | x = self.transformer(x, attn_mask=self.attn_mask)
258 | x = x.permute(1, 0, 2) # LND -> NLD
259 | x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]
260 | # take features from the eot embedding (eot_token is the highest number in each sequence)
261 | x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
262 | return F.normalize(x, dim=-1) if normalize else x
263 |
264 | def forward(self, image, text):
265 | image_features = self.encode_image(image, normalize=True)
266 | text_features = self.encode_text(text, normalize=True)
267 | return image_features, text_features, self.logit_scale.exp()
268 |
269 |
270 | class CustomCLIP(nn.Module):
271 | def __init__(
272 | self,
273 | embed_dim: int,
274 | vision_cfg: CLIPVisionCfg,
275 | text_cfg: CLIPTextCfg,
276 | quick_gelu: bool = False,
277 | cast_dtype: Optional[torch.dtype] = None,
278 | itm_task: bool = False,
279 | ):
280 | super().__init__()
281 | self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
282 | self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
283 | self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
284 |
285 | def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
286 | # lock image tower as per LiT - https://arxiv.org/abs/2111.07991
287 | self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
288 |
289 | def lock_text_tower(self, unlocked_layers:int=0, freeze_layer_norm:bool=True):
290 | self.text.lock(unlocked_layers, freeze_layer_norm)
291 |
292 | @torch.jit.ignore
293 | def set_grad_checkpointing(self, enable=True):
294 | self.visual.set_grad_checkpointing(enable)
295 | self.text.set_grad_checkpointing(enable)
296 |
297 | @torch.jit.ignore
298 | def no_weight_decay(self):
299 | return {'logit_scale'}
300 |
301 | def encode_image(self, image, normalize: bool = False):
302 | features = self.visual(image)
303 | return F.normalize(features, dim=-1) if normalize else features
304 |
305 | def encode_text(self, text, normalize: bool = False):
306 | features = self.text(text)
307 | return F.normalize(features, dim=-1) if normalize else features
308 |
309 | def forward(self, image, text):
310 | image_features = self.encode_image(image, normalize=True)
311 | text_features = self.encode_text(text, normalize=True)
312 | return image_features, text_features, self.logit_scale.exp()
313 |
314 |
315 | def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):
316 | """Convert applicable model parameters to low-precision (bf16 or fp16)"""
317 |
318 | def _convert_weights(l):
319 |
320 | if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
321 | l.weight.data = l.weight.data.to(dtype)
322 | if l.bias is not None:
323 | l.bias.data = l.bias.data.to(dtype)
324 |
325 | if isinstance(l, (nn.MultiheadAttention, Attention)):
326 | for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
327 | tensor = getattr(l, attr, None)
328 | if tensor is not None:
329 | tensor.data = tensor.data.to(dtype)
330 |
331 | if isinstance(l, nn.Parameter):
332 | l.data = l.data.to(dtype)
333 |
334 | for name in ["text_projection", "proj"]:
335 | if hasattr(l, name) and isinstance(l, nn.Parameter):
336 | attr = getattr(l, name, None)
337 | if attr is not None:
338 | attr.data = attr.data.to(dtype)
339 |
340 | model.apply(_convert_weights)
341 |
342 |
343 | convert_weights_to_fp16 = convert_weights_to_lp # backwards compat
344 |
345 |
346 | # used to maintain checkpoint compatibility
347 | def convert_to_custom_text_state_dict(state_dict: dict):
348 | if 'text_projection' in state_dict:
349 | # old format state_dict, move text tower -> .text
350 | new_state_dict = {}
351 | for k, v in state_dict.items():
352 | if any(k.startswith(p) for p in (
353 | 'text_projection',
354 | 'positional_embedding',
355 | 'token_embedding',
356 | 'transformer',
357 | 'ln_final',
358 | 'logit_scale'
359 | )):
360 | k = 'text.' + k
361 | new_state_dict[k] = v
362 | return new_state_dict
363 | return state_dict
364 |
365 |
366 | def build_model_from_openai_state_dict(
367 | state_dict: dict,
368 | quick_gelu=True,
369 | cast_dtype=torch.float16,
370 | ):
371 | vit = "visual.proj" in state_dict
372 |
373 | if vit:
374 | vision_width = state_dict["visual.conv1.weight"].shape[0]
375 | vision_layers = len(
376 | [k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
377 | vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
378 | grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
379 | image_size = vision_patch_size * grid_size
380 | else:
381 | counts: list = [
382 | len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
383 | vision_layers = tuple(counts)
384 | vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
385 | output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
386 | vision_patch_size = None
387 | assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
388 | image_size = output_width * 32
389 |
390 | embed_dim = state_dict["text_projection"].shape[1]
391 | context_length = state_dict["positional_embedding"].shape[0]
392 | vocab_size = state_dict["token_embedding.weight"].shape[0]
393 | transformer_width = state_dict["ln_final.weight"].shape[0]
394 | transformer_heads = transformer_width // 64
395 | transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
396 |
397 | vision_cfg = CLIPVisionCfg(
398 | layers=vision_layers,
399 | width=vision_width,
400 | patch_size=vision_patch_size,
401 | image_size=image_size,
402 | )
403 | text_cfg = CLIPTextCfg(
404 | context_length=context_length,
405 | vocab_size=vocab_size,
406 | width=transformer_width,
407 | heads=transformer_heads,
408 | layers=transformer_layers
409 | )
410 | model = CLIP(
411 | embed_dim,
412 | vision_cfg=vision_cfg,
413 | text_cfg=text_cfg,
414 | quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU
415 | cast_dtype=cast_dtype,
416 | )
417 |
418 | for key in ["input_resolution", "context_length", "vocab_size"]:
419 | state_dict.pop(key, None)
420 |
421 | convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16
422 | model.load_state_dict(state_dict)
423 | return model.eval()
424 |
425 |
426 | def trace_model(model, batch_size=256, device=torch.device('cpu')):
427 | model.eval()
428 | image_size = model.visual.image_size
429 | example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
430 | example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
431 | model = torch.jit.trace_module(
432 | model,
433 | inputs=dict(
434 | forward=(example_images, example_text),
435 | encode_text=(example_text,),
436 | encode_image=(example_images,)
437 | ))
438 | model.visual.image_size = image_size
439 | return model
440 |
--------------------------------------------------------------------------------