├── .gitignore
├── LICENSE
├── README.md
├── configs
├── openseed
│ ├── openseed_swint_lang_joint.yaml
│ ├── openseed_swint_lang_joint_2st.yaml
│ └── openseed_swint_lang_joint_2st_visual_prompt.yaml
└── semsam
│ └── visual_prompt_encoder.yaml
├── datasets_os
├── __init__.py
├── build.py
├── custom_dataset_dataloader.py
├── dataset_mappers
│ ├── __init__.py
│ ├── coco_instance_new_baseline_dataset_mapper.py
│ ├── coco_instruct_grounding_dataset_interactive_mapper.py
│ ├── coco_instruct_grounding_dataset_mapper.py
│ ├── coco_interactive_panoptic_new_baseline_dataset_mapper.py
│ ├── coco_panoptic_interactive_dataset_mapper.py
│ ├── coco_panoptic_new_baseline_dataset_mapper.py
│ ├── flickr_instance_new_baseline_dataset_mapper.py
│ ├── flickr_instance_new_baseline_dataset_mapper_.py
│ ├── flickr_instance_new_baseline_dataset_mapper_end.py
│ ├── flickr_new_baseline_dataset_mapper.py
│ ├── inference_mapper_with_gt.py
│ ├── llava_dataset_mapper.py
│ ├── refcoco_dataset_mapper.py
│ └── vg_instance_new_baseline_dataset_mapper.py
├── refer.py
├── registration
│ ├── __init__.py
│ ├── register_coco_instruct_grounding_dataset.py
│ ├── register_coco_panoptic_annos_grounding_interactive.py
│ ├── register_flickr_dataset.py
│ └── register_vg_dataset.py
└── semseg_loader.py
├── docs
└── MODEL_ZOO.md
├── gradio_demo
├── LLaVA_G_Demo.py
├── __init__.py
└── examples
│ ├── cow_motor.jpg
│ ├── dog_sleep.jpg
│ ├── meeting.jpg
│ ├── pizza.jpg
│ └── queen_and_dragon.jpg
├── images
├── demo_cli.gif
├── llava_example_cmp.png
└── llava_logo.png
├── llava
├── __init__.py
├── constants.py
├── conversation.py
├── eval
│ ├── LLaVA_G_Eval.py
│ ├── eval_gpt_review.py
│ ├── eval_gpt_review_bench.py
│ ├── eval_gpt_review_visual.py
│ ├── eval_gpt_review_visual2.py
│ ├── eval_science_qa.py
│ ├── eval_science_qa_gpt4.py
│ ├── eval_science_qa_gpt4_requery.py
│ ├── generate_webpage_data_from_table.py
│ ├── llava_mapper.py
│ ├── model_qa.py
│ ├── model_vqa.py
│ ├── model_vqa_science.py
│ ├── qa_baseline_gpt35.py
│ ├── run_llava.py
│ ├── summarize_gpt_review.py
│ ├── table
│ │ ├── answer
│ │ │ ├── answer_alpaca-13b.jsonl
│ │ │ ├── answer_bard.jsonl
│ │ │ ├── answer_gpt35.jsonl
│ │ │ ├── answer_llama-13b.jsonl
│ │ │ └── answer_vicuna-13b.jsonl
│ │ ├── caps_boxes_coco2014_val_80.jsonl
│ │ ├── model.jsonl
│ │ ├── prompt.jsonl
│ │ ├── question.jsonl
│ │ ├── results
│ │ │ └── test_sqa_llava_13b_v0.json
│ │ ├── review
│ │ │ ├── review_alpaca-13b_vicuna-13b.jsonl
│ │ │ ├── review_bard_vicuna-13b.jsonl
│ │ │ ├── review_gpt35_vicuna-13b.jsonl
│ │ │ └── review_llama-13b_vicuna-13b.jsonl
│ │ ├── reviewer.jsonl
│ │ └── rule.json
│ └── webpage
│ │ ├── figures
│ │ ├── alpaca.png
│ │ ├── bard.jpg
│ │ ├── chatgpt.svg
│ │ ├── llama.jpg
│ │ ├── swords_FILL0_wght300_GRAD0_opsz48.svg
│ │ └── vicuna.jpeg
│ │ ├── index.html
│ │ ├── script.js
│ │ └── styles.css
├── mm_utils.py
├── model
│ ├── __init__.py
│ ├── apply_delta.py
│ ├── builder.py
│ ├── consolidate.py
│ ├── language_model
│ │ ├── llava_llama.py
│ │ ├── llava_llama_gd.py
│ │ ├── llava_mpt.py
│ │ └── mpt
│ │ │ ├── adapt_tokenizer.py
│ │ │ ├── attention.py
│ │ │ ├── blocks.py
│ │ │ ├── configuration_mpt.py
│ │ │ ├── custom_embedding.py
│ │ │ ├── flash_attn_triton.py
│ │ │ ├── hf_prefixlm_converter.py
│ │ │ ├── meta_init_context.py
│ │ │ ├── modeling_mpt.py
│ │ │ ├── norm.py
│ │ │ └── param_init_fns.py
│ ├── llava_arch.py
│ ├── make_delta.py
│ ├── multimodal_encoder
│ │ ├── builder.py
│ │ └── clip_encoder.py
│ ├── openseed
│ │ ├── BaseModel.py
│ │ ├── __init__.py
│ │ ├── architectures
│ │ │ ├── __init__.py
│ │ │ ├── build.py
│ │ │ ├── openseed_model.py
│ │ │ ├── openseed_model_decouple_train.py
│ │ │ └── registry.py
│ │ ├── backbone
│ │ │ ├── __init__.py
│ │ │ ├── backbone.py
│ │ │ ├── build.py
│ │ │ ├── focal.py
│ │ │ ├── focal_dw.py
│ │ │ ├── registry.py
│ │ │ └── swin.py
│ │ ├── body
│ │ │ ├── __init__.py
│ │ │ ├── build.py
│ │ │ ├── decoder
│ │ │ │ ├── __init__.py
│ │ │ │ ├── build.py
│ │ │ │ ├── modules.py
│ │ │ │ ├── openseed_decoder.py
│ │ │ │ ├── openseed_decoder_decouple.py
│ │ │ │ ├── registry.py
│ │ │ │ └── utils
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── dino_decoder.py
│ │ │ │ │ └── utils.py
│ │ │ ├── encoder
│ │ │ │ ├── __init__.py
│ │ │ │ ├── build.py
│ │ │ │ ├── encoder_deform.py
│ │ │ │ ├── ops
│ │ │ │ │ ├── functions
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── ms_deform_attn_func.py
│ │ │ │ │ ├── make.sh
│ │ │ │ │ ├── modules
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── ms_deform_attn.py
│ │ │ │ │ ├── setup.py
│ │ │ │ │ ├── src
│ │ │ │ │ │ ├── cpu
│ │ │ │ │ │ │ ├── ms_deform_attn_cpu.cpp
│ │ │ │ │ │ │ └── ms_deform_attn_cpu.h
│ │ │ │ │ │ ├── cuda
│ │ │ │ │ │ │ ├── ms_deform_attn_cuda.cu
│ │ │ │ │ │ │ ├── ms_deform_attn_cuda.h
│ │ │ │ │ │ │ └── ms_deform_im2col_cuda.cuh
│ │ │ │ │ │ ├── ms_deform_attn.h
│ │ │ │ │ │ └── vision.cpp
│ │ │ │ │ └── test.py
│ │ │ │ ├── registry.py
│ │ │ │ └── transformer_encoder_fpn.py
│ │ │ ├── openseed_head.py
│ │ │ ├── registry.py
│ │ │ └── transformer_blocks.py
│ │ ├── language
│ │ │ ├── LangEncoder
│ │ │ │ ├── __init__.py
│ │ │ │ ├── build.py
│ │ │ │ ├── registry.py
│ │ │ │ └── transformer.py
│ │ │ ├── __init__.py
│ │ │ ├── build.py
│ │ │ ├── encoder.py
│ │ │ ├── registry.py
│ │ │ └── vlpencoder.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── attention.py
│ │ │ ├── criterion.py
│ │ │ ├── matcher.py
│ │ │ ├── point_features.py
│ │ │ ├── position_encoding.py
│ │ │ └── postprocessing.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── box_ops.py
│ │ │ ├── config.py
│ │ │ └── misc.py
│ ├── semsam
│ │ ├── BaseModel.py
│ │ ├── __init__.py
│ │ ├── architectures
│ │ │ ├── __init__.py
│ │ │ ├── build.py
│ │ │ ├── idino_model_partwhole_all_llm_ref_feats_all_det_pretrainv1.py
│ │ │ └── registry.py
│ │ ├── backbone
│ │ │ ├── __init__.py
│ │ │ ├── backbone.py
│ │ │ ├── build.py
│ │ │ ├── focal.py
│ │ │ ├── focal_dw.py
│ │ │ ├── registry.py
│ │ │ ├── swin.py
│ │ │ └── swin_new.py
│ │ ├── body
│ │ │ ├── __init__.py
│ │ │ ├── build.py
│ │ │ ├── decoder
│ │ │ │ ├── __init__.py
│ │ │ │ ├── build.py
│ │ │ │ ├── idino_decoder_no_iou_token_partwhole_all_llm.py
│ │ │ │ ├── modules.py
│ │ │ │ ├── registry.py
│ │ │ │ └── utils
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── dino_decoder.py
│ │ │ │ │ └── utils.py
│ │ │ ├── encoder
│ │ │ │ ├── __init__.py
│ │ │ │ ├── build.py
│ │ │ │ ├── encoder_deform.py
│ │ │ │ ├── ops
│ │ │ │ │ ├── functions
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── ms_deform_attn_func.py
│ │ │ │ │ ├── make.sh
│ │ │ │ │ ├── modules
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── ms_deform_attn.py
│ │ │ │ │ ├── setup.py
│ │ │ │ │ ├── src
│ │ │ │ │ │ ├── cpu
│ │ │ │ │ │ │ ├── ms_deform_attn_cpu.cpp
│ │ │ │ │ │ │ └── ms_deform_attn_cpu.h
│ │ │ │ │ │ ├── cuda
│ │ │ │ │ │ │ ├── ms_deform_attn_cuda.cu
│ │ │ │ │ │ │ ├── ms_deform_attn_cuda.h
│ │ │ │ │ │ │ └── ms_deform_im2col_cuda.cuh
│ │ │ │ │ │ ├── ms_deform_attn.h
│ │ │ │ │ │ └── vision.cpp
│ │ │ │ │ └── test.py
│ │ │ │ ├── registry.py
│ │ │ │ └── transformer_encoder_fpn.py
│ │ │ ├── openseed_head.py
│ │ │ ├── registry.py
│ │ │ └── transformer_blocks.py
│ │ ├── language
│ │ │ ├── LangEncoder
│ │ │ │ ├── __init__.py
│ │ │ │ ├── build.py
│ │ │ │ ├── registry.py
│ │ │ │ └── transformer.py
│ │ │ ├── __init__.py
│ │ │ ├── build.py
│ │ │ ├── encoder.py
│ │ │ ├── fixencoder.py
│ │ │ ├── llama_encoder.py
│ │ │ ├── loss.py
│ │ │ ├── misc.py
│ │ │ ├── modeling_llama_os.py
│ │ │ ├── registry.py
│ │ │ └── vlpencoder.py
│ │ ├── modules
│ │ │ ├── __init__.py
│ │ │ ├── attention.py
│ │ │ ├── criterion_id_llm.py
│ │ │ ├── hooks.py
│ │ │ ├── matcher.py
│ │ │ ├── point_features.py
│ │ │ ├── position_encoding.py
│ │ │ └── postprocessing.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── box_ops.py
│ │ │ ├── config.py
│ │ │ └── misc.py
│ └── utils.py
├── serve
│ ├── __init__.py
│ ├── cli.py
│ ├── controller.py
│ ├── examples
│ │ ├── extreme_ironing.jpg
│ │ └── waterview.jpg
│ ├── gradio_web_server.py
│ ├── model_worker.py
│ ├── register_worker.py
│ └── test_message.py
├── train
│ ├── llama_flash_attn_monkey_patch.py
│ ├── llava_trainer.py
│ ├── llava_trainer_gd.py
│ ├── llava_trainer_joint_train.py
│ ├── train.py
│ ├── train_grounding_1st.py
│ ├── train_joint_1st.py
│ ├── train_joint_2st.py
│ ├── train_joint_2st_interactive_refcoco_coco_instruction.py
│ └── train_mem.py
└── utils.py
├── playground
└── data
│ ├── SEED-Bench
│ └── SEED-Bench-llava_with_grounding.jsonl
│ ├── coco2014_val_gpt4_qa_30x3.jsonl
│ ├── coco2014_val_qa_eval
│ ├── q_image_set.txt
│ ├── qa90_gpt4_answer.jsonl
│ ├── qa90_gpt4_v_answer.jsonl
│ ├── qa90_questions.jsonl
│ ├── qa90_questions_with_grounding.jsonl
│ └── qa90_questions_with_grounding_sample.jsonl
│ ├── llava-bench-in-the-wild
│ └── questions_with_grounding.jsonl
│ └── prompts
│ ├── complex_reasoning
│ ├── 000_caps.txt
│ ├── 000_conv.txt
│ ├── 001_caps.txt
│ ├── 001_conv.txt
│ ├── 002_caps.txt
│ ├── 002_conv.txt
│ └── system_message.txt
│ ├── conversation
│ ├── 000_caps.txt
│ ├── 000_conv.txt
│ ├── 001_caps.txt
│ ├── 001_conv.txt
│ └── system_message.txt
│ └── detail_description
│ ├── 000_caps.txt
│ ├── 000_conv.txt
│ ├── 001_caps.txt
│ ├── 001_conv.txt
│ ├── 002_caps.txt
│ ├── 002_conv.txt
│ └── system_message.txt
├── pyproject.toml
├── scripts
├── convert_sqa_to_llava.py
├── convert_sqa_to_llava_base_prompt.py
├── finetune.sh
├── finetune_visual_prompt.sh
├── merge_lora_weights.py
└── pretrain_joint.sh
└── utils
├── Config.py
├── __init__.py
├── arguments.py
├── constants.py
├── constants_ori.py
├── dist.py
├── distributed.py
├── misc.py
├── model.py
├── nms.py
├── prompt_engineering.py
├── utils.py
└── visualizer.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *.err
2 | *.out
3 | *.pyc
4 | wandb
5 | /data_preparation/vis_results/
6 | /data_preparation/vis_results_new/
7 | /LLAVA_Stage1_Pretrained/
8 | /work_dirs/
9 | /llava.egg-info/
10 | /data_preparation/data/
11 | /vis_results/
12 | model_worker*
13 | /playground/
14 | *.jsonl
15 | *.pth
16 | gradio_demo/tmp_files
17 | llava_bench_results
18 | symmary_results
19 | eval_gpt4
20 | vis_results_pdf_precision
21 | vis_results_pdf_recall
22 | output/
23 | datasets/
24 | output
25 | datasets
26 | *.log
27 | *.json
28 | __pycache__/
29 | */__pycache__
30 | */*/__pycache__
31 | */*/*/__pycache__
32 | */*/*/*/__pycache__
33 | gradio_demo/examples/*.mp4
34 |
--------------------------------------------------------------------------------
/datasets_os/__init__.py:
--------------------------------------------------------------------------------
1 | from . import registration
2 | from .build import *
--------------------------------------------------------------------------------
/datasets_os/dataset_mappers/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from .coco_panoptic_interactive_dataset_mapper import COCOPanopticInteractiveDatasetMapper
3 | from .flickr_instance_new_baseline_dataset_mapper import COCOInstanceNewBaselineDatasetMapper as FlickrNewBaselineDatasetMapper
4 | from .coco_instruct_grounding_dataset_mapper import COCOInstanceNewBaselineDatasetMapper as COCOInstructGroundingDatasetMapper
5 | from .coco_instruct_grounding_dataset_interactive_mapper import COCOInstanceNewBaselineDatasetMapper as COCOInterGroundingDatasetMapper
6 | from .vg_instance_new_baseline_dataset_mapper import COCOInstanceNewBaselineDatasetMapper as VGNewBaselineDatasetMapper
--------------------------------------------------------------------------------
/datasets_os/registration/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | from . import (
3 | register_coco_panoptic_annos_grounding_interactive,
4 | register_coco_instruct_grounding_dataset,
5 | register_flickr_dataset,
6 | # register_vg_dataset,
7 | )
--------------------------------------------------------------------------------
/datasets_os/registration/register_flickr_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | # --------------------------------------------------------
3 | # X-Decoder -- Generalized Decoding for Pixel, Image, and Language
4 | # Copyright (c) 2022 Microsoft
5 | # Licensed under The MIT License [see LICENSE for details]
6 | # Modified by Xueyan Zou (xueyan@cs.wisc.edu)
7 | # --------------------------------------------------------
8 | import json
9 | import os
10 | import collections
11 |
12 | from detectron2.data import DatasetCatalog, MetadataCatalog
13 | from detectron2.utils.file_io import PathManager
14 |
15 |
16 | _PREDEFINED_SPLITS = {
17 |
18 | "flickr_val": (
19 | "flickr30k_entities/val", # image_root
20 | "final_flickr_separateGT_val.json", # # anno_path
21 | ),
22 | "flickr_train": (
23 | "flickr30k_entities/train", # image_root
24 | "final_flickr_separateGT_train.json", # # anno_path
25 | ),
26 | }
27 |
28 |
29 | def get_metadata():
30 | meta = {}
31 | return meta
32 |
33 |
34 | def load_flickr_json(image_root, annot_json, metadata):
35 |
36 |
37 | with PathManager.open(annot_json) as f:
38 | json_info = json.load(f)
39 |
40 | # build dictionary for grounding
41 | grd_dict = collections.defaultdict(list)
42 | # for grd_ann in json_info['annotations']:
43 | # image_id = int(grd_ann["image_id"])
44 | # grd_dict[image_id].append(grd_ann)
45 | for grd_ann in json_info['annotations']:
46 | image_id = int(grd_ann["image_id"])
47 | grd_dict[image_id].append(grd_ann)
48 |
49 | ret = []
50 | for image in json_info["images"]:
51 | image_id = int(image["id"])
52 | caption=image['caption']
53 | image_file = os.path.join(image_root, image['file_name'])
54 | grounding_anno = grd_dict[image_id]
55 | ret.append(
56 | {
57 | "file_name": image_file,
58 | "image_id": image_id,
59 | "grounding_info": grounding_anno,
60 | "caption": caption,
61 | }
62 | )
63 | assert len(ret), f"No images found in {image_root}!"
64 | assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
65 | return ret
66 |
67 |
68 | def register_flickr(
69 | name, metadata, image_root, annot_json):
70 | DatasetCatalog.register(
71 | name,
72 | lambda: load_flickr_json(image_root, annot_json, metadata),
73 | )
74 | MetadataCatalog.get(name).set(
75 | image_root=image_root,
76 | json_file=annot_json,
77 | evaluator_type="grounding_refcoco",
78 | ignore_label=255,
79 | label_divisor=1000,
80 | **metadata,
81 | )
82 |
83 |
84 | def register_all_flickr(root,anno_root):
85 | for (
86 | prefix,
87 | (image_root, anno_path),
88 | ) in _PREDEFINED_SPLITS.items():
89 | register_flickr(
90 | prefix,
91 | get_metadata(),
92 | os.path.join(root, image_root),
93 | os.path.join(root,anno_root, anno_path),
94 | )
95 |
96 | _root = os.getenv("DATASET", "datasets")
97 | ann_root = os.getenv("Flickr", "flickr30k_entities/annotations")
98 | register_all_flickr(_root,ann_root)
99 |
--------------------------------------------------------------------------------
/datasets_os/registration/register_vg_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | # --------------------------------------------------------
3 | # X-Decoder -- Generalized Decoding for Pixel, Image, and Language
4 | # Copyright (c) 2022 Microsoft
5 | # Licensed under The MIT License [see LICENSE for details]
6 | # Modified by Xueyan Zou (xueyan@cs.wisc.edu)
7 | # --------------------------------------------------------
8 | import json
9 | import os
10 | import collections
11 |
12 | from detectron2.data import DatasetCatalog, MetadataCatalog
13 | from detectron2.utils.file_io import PathManager
14 |
15 |
16 | _PREDEFINED_SPLITS = {
17 |
18 | "vg_train": (
19 | "vg/images/", # image_root
20 | "train.json", # anno_path
21 | ),
22 | }
23 |
24 |
25 | def get_metadata():
26 | meta = {}
27 | return meta
28 |
29 |
30 | def load_vg_json(image_root, annot_json, metadata):
31 |
32 |
33 | with PathManager.open(annot_json) as f:
34 | json_info = json.load(f)
35 |
36 | # build dictionary for grounding
37 | grd_dict = collections.defaultdict(list)
38 | for grd_ann in json_info['annotations']:
39 | image_id = int(grd_ann["image_id"])
40 | grd_dict[image_id].append(grd_ann)
41 |
42 | ret = []
43 | for image in json_info["images"]:
44 | image_id = int(image["id"])
45 | image_file = os.path.join(image_root, image['file_name'])
46 | grounding_anno = grd_dict[image_id]
47 | ret.append(
48 | {
49 | "file_name": image_file,
50 | "image_id": image_id,
51 | "annotations": grounding_anno,
52 | }
53 | )
54 | assert len(ret), f"No images found in {image_root}!"
55 | assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
56 | return ret
57 |
58 |
59 | def register_vg(
60 | name, metadata, image_root, annot_json):
61 | DatasetCatalog.register(
62 | name,
63 | lambda: load_vg_json(image_root, annot_json, metadata),
64 | )
65 | MetadataCatalog.get(name).set(
66 | image_root=image_root,
67 | json_file=annot_json,
68 | evaluator_type="grounding_refcoco",
69 | ignore_label=255,
70 | label_divisor=1000,
71 | **metadata,
72 | )
73 |
74 |
75 | def register_all_vg(root,anno_root):
76 | for (
77 | prefix,
78 | (image_root, anno_path),
79 | ) in _PREDEFINED_SPLITS.items():
80 | register_vg(
81 | prefix,
82 | get_metadata(),
83 | os.path.join(root, image_root),
84 | os.path.join(root,anno_root, anno_path),
85 | )
86 |
87 | _root = os.getenv("DATASET", "datasets")
88 | anno_root = os.getenv("VG", "vg/annotations/")
89 | register_all_vg(_root,anno_root)
90 |
--------------------------------------------------------------------------------
/datasets_os/semseg_loader.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import scipy.io
3 | import numpy as np
4 |
5 | def load_semseg(filename, loader_type):
6 | if loader_type == 'PIL':
7 | semseg = np.array(Image.open(filename), dtype=np.int)
8 | elif loader_type == 'MAT':
9 | semseg = scipy.io.loadmat(filename)['LabelMap']
10 | return semseg
--------------------------------------------------------------------------------
/docs/MODEL_ZOO.md:
--------------------------------------------------------------------------------
1 | # LLaVA-Grounding Checkpoints
2 |
3 | We will continuously update the model zoo.
4 |
5 | | Model Name | LLM version | Model Config | Weights |
6 | |------------|:---------------:|:-------------:|:-----------:|
7 | | LLaVA_Grounding_v0_7b | vicuna-v0-7b | [[grounding-module-cfg](https://github.com/UX-Decoder/LLaVA-Grounding/blob/main/configs/openseed/openseed_swint_lang_joint_2st_visual_prompt.yaml), [visual-prompt-module-cfg](https://github.com/UX-Decoder/LLaVA-Grounding/blob/main/configs/semsam/visual_prompt_encoder.yaml)]
(0.3B in total) | [HuggingFace](https://huggingface.co/Haozhangcx/llava_grounding_gd_vp) |
8 |
--------------------------------------------------------------------------------
/gradio_demo/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/gradio_demo/__init__.py
--------------------------------------------------------------------------------
/gradio_demo/examples/cow_motor.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/gradio_demo/examples/cow_motor.jpg
--------------------------------------------------------------------------------
/gradio_demo/examples/dog_sleep.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/gradio_demo/examples/dog_sleep.jpg
--------------------------------------------------------------------------------
/gradio_demo/examples/meeting.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/gradio_demo/examples/meeting.jpg
--------------------------------------------------------------------------------
/gradio_demo/examples/pizza.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/gradio_demo/examples/pizza.jpg
--------------------------------------------------------------------------------
/gradio_demo/examples/queen_and_dragon.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/gradio_demo/examples/queen_and_dragon.jpg
--------------------------------------------------------------------------------
/images/demo_cli.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/images/demo_cli.gif
--------------------------------------------------------------------------------
/images/llava_example_cmp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/images/llava_example_cmp.png
--------------------------------------------------------------------------------
/images/llava_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/images/llava_logo.png
--------------------------------------------------------------------------------
/llava/__init__.py:
--------------------------------------------------------------------------------
1 | from .model import LlavaLlamaForCausalLM
2 |
--------------------------------------------------------------------------------
/llava/constants.py:
--------------------------------------------------------------------------------
1 | CONTROLLER_HEART_BEAT_EXPIRATION = 30
2 | WORKER_HEART_BEAT_INTERVAL = 15
3 |
4 | LOGDIR = "."
5 |
6 | # Model Constants
7 | IGNORE_INDEX = -100
8 | IMAGE_TOKEN_INDEX = -200
9 | DEFAULT_IMAGE_TOKEN = ""
10 | DEFAULT_IMAGE_PATCH_TOKEN = ""
11 | DEFAULT_IM_START_TOKEN = ""
12 | DEFAULT_IM_END_TOKEN = ""
13 |
--------------------------------------------------------------------------------
/llava/eval/eval_gpt_review.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import os
4 |
5 | import openai
6 | import tqdm
7 | import ray
8 | import time
9 |
10 | NUM_SECONDS_TO_SLEEP = 3
11 |
12 | @ray.remote(num_cpus=4)
13 | def get_eval(content: str, max_tokens: int):
14 | while True:
15 | try:
16 | response = openai.ChatCompletion.create(
17 | model='gpt-4',
18 | messages=[{
19 | 'role': 'system',
20 | 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
21 | }, {
22 | 'role': 'user',
23 | 'content': content,
24 | }],
25 | temperature=0.2, # TODO: figure out which temperature is best for evaluation
26 | max_tokens=max_tokens,
27 | )
28 | break
29 | except openai.error.RateLimitError:
30 | pass
31 | except Exception as e:
32 | print(e)
33 | time.sleep(NUM_SECONDS_TO_SLEEP)
34 |
35 | print('success!')
36 | return response['choices'][0]['message']['content']
37 |
38 |
39 | def parse_score(review):
40 | try:
41 | score_pair = review.split('\n')[0]
42 | score_pair = score_pair.replace(',', ' ')
43 | sp = score_pair.split(' ')
44 | if len(sp) == 2:
45 | return [float(sp[0]), float(sp[1])]
46 | else:
47 | print('error', review)
48 | return [-1, -1]
49 | except Exception as e:
50 | print(e)
51 | print('error', review)
52 | return [-1, -1]
53 |
54 |
55 | if __name__ == '__main__':
56 | parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
57 | parser.add_argument('-q', '--question')
58 | # parser.add_argument('-a', '--answer')
59 | parser.add_argument('-a', '--answer-list', nargs='+', default=[])
60 | parser.add_argument('-r', '--rule')
61 | parser.add_argument('-o', '--output')
62 | parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
63 | args = parser.parse_args()
64 |
65 | ray.init()
66 |
67 | f_q = open(os.path.expanduser(args.question))
68 | f_ans1 = open(os.path.expanduser(args.answer_list[0]))
69 | f_ans2 = open(os.path.expanduser(args.answer_list[1]))
70 | rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
71 |
72 | review_file = open(f'{args.output}', 'w')
73 |
74 | js_list = []
75 | handles = []
76 | idx = 0
77 | for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
78 | # if idx == 1:
79 | # break
80 |
81 | ques = json.loads(ques_js)
82 | ans1 = json.loads(ans1_js)
83 | ans2 = json.loads(ans2_js)
84 |
85 | category = json.loads(ques_js)['category']
86 | if category in rule_dict:
87 | rule = rule_dict[category]
88 | else:
89 | rule = rule_dict['default']
90 | prompt = rule['prompt']
91 | role = rule['role']
92 | content = (f'[Question]\n{ques["text"]}\n\n'
93 | f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
94 | f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
95 | f'[System]\n{prompt}\n\n')
96 | js_list.append({
97 | 'id': idx+1,
98 | 'question_id': ques['question_id'],
99 | 'answer1_id': ans1['answer_id'],
100 | 'answer2_id': ans2['answer_id'],
101 | 'category': category})
102 | idx += 1
103 | handles.append(get_eval.remote(content, args.max_tokens))
104 | # To avoid the rate limit set by OpenAI
105 | time.sleep(NUM_SECONDS_TO_SLEEP)
106 |
107 | reviews = ray.get(handles)
108 | for idx, review in enumerate(reviews):
109 | scores = parse_score(review)
110 | js_list[idx]['content'] = review
111 | js_list[idx]['tuple'] = scores
112 | review_file.write(json.dumps(js_list[idx]) + '\n')
113 | review_file.close()
114 |
--------------------------------------------------------------------------------
/llava/eval/eval_science_qa.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import os
4 | import re
5 | import random
6 |
7 |
8 | def get_args():
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument('--base-dir', type=str)
11 | parser.add_argument('--result-file', type=str)
12 | parser.add_argument('--output-file', type=str)
13 | parser.add_argument('--output-result', type=str)
14 | parser.add_argument('--split', type=str, default='test')
15 | parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
16 | return parser.parse_args()
17 |
18 |
19 | def convert_caps(results):
20 | fakecaps = []
21 | for result in results:
22 | image_id = result['question_id']
23 | caption = result['text']
24 | fakecaps.append({"image_id": int(image_id), "caption": caption})
25 | return fakecaps
26 |
27 |
28 | def get_pred_idx(prediction, choices, options):
29 | """
30 | Get the index (e.g. 2) from the prediction (e.g. 'C')
31 | """
32 | if prediction in options[:len(choices)]:
33 | return options.index(prediction)
34 | else:
35 | return random.choice(range(len(choices)))
36 |
37 |
38 | if __name__ == "__main__":
39 | args = get_args()
40 |
41 | base_dir = args.base_dir
42 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
43 | problems = json.load(open(os.path.join(base_dir, "problems.json")))
44 | predictions = [json.loads(line) for line in open(args.result_file)]
45 | predictions = {pred['question_id']: pred for pred in predictions}
46 | split_problems = {idx: problems[idx] for idx in split_indices}
47 |
48 | results = {'correct': [], 'incorrect': []}
49 | sqa_results = {}
50 | sqa_results['acc'] = None
51 | sqa_results['correct'] = None
52 | sqa_results['count'] = None
53 | sqa_results['results'] = {}
54 | sqa_results['outputs'] = {}
55 |
56 | for prob_id, prob in split_problems.items():
57 | if prob_id not in predictions:
58 | continue
59 | pred = predictions[prob_id]
60 | pred_text = pred['text']
61 |
62 | pattern = re.compile(r'The answer is ([A-Z]).')
63 | res = pattern.findall(pred_text)
64 | if len(res) == 1:
65 | answer = res[0] # 'A', 'B', ...
66 | else:
67 | answer = "FAILED"
68 |
69 | pred_idx = get_pred_idx(answer, prob['choices'], args.options)
70 |
71 | analysis = {
72 | 'question_id': prob_id,
73 | 'parsed_ans': answer,
74 | 'ground_truth': args.options[prob['answer']],
75 | 'question': pred['prompt'],
76 | 'pred': pred_text,
77 | 'is_multimodal': '' in pred['prompt'],
78 | }
79 |
80 | sqa_results['results'][prob_id] = get_pred_idx(answer, prob['choices'], args.options)
81 | sqa_results['outputs'][prob_id] = pred_text
82 |
83 | if pred_idx == prob['answer']:
84 | results['correct'].append(analysis)
85 | else:
86 | results['incorrect'].append(analysis)
87 |
88 | correct = len(results['correct'])
89 | total = len(results['correct']) + len(results['incorrect'])
90 | print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%')
91 |
92 | sqa_results['acc'] = correct / total * 100
93 | sqa_results['correct'] = correct
94 | sqa_results['count'] = total
95 |
96 | with open(args.output_file, 'w') as f:
97 | json.dump(results, f, indent=2)
98 | with open(args.output_result, 'w') as f:
99 | json.dump(sqa_results, f, indent=2)
100 |
--------------------------------------------------------------------------------
/llava/eval/eval_science_qa_gpt4.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import os
4 | import re
5 | import random
6 | from collections import defaultdict
7 |
8 |
9 | def get_args():
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument('--base-dir', type=str)
12 | parser.add_argument('--gpt4-result', type=str)
13 | parser.add_argument('--our-result', type=str)
14 | parser.add_argument('--split', type=str, default='test')
15 | parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
16 | return parser.parse_args()
17 |
18 |
19 | def convert_caps(results):
20 | fakecaps = []
21 | for result in results:
22 | image_id = result['question_id']
23 | caption = result['text']
24 | fakecaps.append({"image_id": int(image_id), "caption": caption})
25 | return fakecaps
26 |
27 |
28 | def get_pred_idx(prediction, choices, options):
29 | """
30 | Get the index (e.g. 2) from the prediction (e.g. 'C')
31 | """
32 | if prediction in options[:len(choices)]:
33 | return options.index(prediction)
34 | else:
35 | return random.choice(range(len(choices)))
36 |
37 |
38 | if __name__ == "__main__":
39 | args = get_args()
40 |
41 | base_dir = args.base_dir
42 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split]
43 | problems = json.load(open(os.path.join(base_dir, "problems.json")))
44 | our_predictions = [json.loads(line) for line in open(args.our_result)]
45 | our_predictions = {pred['question_id']: pred for pred in our_predictions}
46 | split_problems = {idx: problems[idx] for idx in split_indices}
47 |
48 | gpt4_predictions = json.load(open(args.gpt4_result))['outputs']
49 |
50 | results = defaultdict(lambda: 0)
51 |
52 | for prob_id, prob in split_problems.items():
53 | if prob_id not in our_predictions:
54 | continue
55 | if prob_id not in gpt4_predictions:
56 | continue
57 | our_pred = our_predictions[prob_id]['text']
58 | gpt4_pred = gpt4_predictions[prob_id]
59 |
60 | pattern = re.compile(r'The answer is ([A-Z]).')
61 | our_res = pattern.findall(our_pred)
62 | if len(our_res) == 1:
63 | our_answer = our_res[0] # 'A', 'B', ...
64 | else:
65 | our_answer = "FAILED"
66 | gpt4_res = pattern.findall(gpt4_pred)
67 | if len(gpt4_res) == 1:
68 | gpt4_answer = gpt4_res[0] # 'A', 'B', ...
69 | else:
70 | gpt4_answer = "FAILED"
71 |
72 | our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options)
73 | gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options)
74 |
75 | if gpt4_answer == 'FAILED':
76 | results['gpt4_failed'] += 1
77 | # continue
78 | gpt4_pred_idx = our_pred_idx
79 | # if our_pred_idx != prob['answer']:
80 | # print(our_predictions[prob_id]['prompt'])
81 | # print('-----------------')
82 | # print(f'LECTURE: {prob["lecture"]}')
83 | # print(f'SOLUTION: {prob["solution"]}')
84 | # print('=====================')
85 | else:
86 | # continue
87 | pass
88 | # gpt4_pred_idx = our_pred_idx
89 |
90 | if gpt4_pred_idx == prob['answer']:
91 | results['correct'] += 1
92 | else:
93 | results['incorrect'] += 1
94 |
95 |
96 | if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']:
97 | results['correct_upperbound'] += 1
98 |
99 | correct = results['correct']
100 | total = results['correct'] + results['incorrect']
101 | print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%')
102 | print(f'Total: {total}, Correct (upper): {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%')
103 | print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%')
104 |
105 |
--------------------------------------------------------------------------------
/llava/eval/model_qa.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria
3 | import torch
4 | import os
5 | import json
6 | from tqdm import tqdm
7 | import shortuuid
8 |
9 | from llava.conversation import default_conversation
10 | from llava.utils import disable_torch_init
11 |
12 |
13 | # new stopping implementation
14 | class KeywordsStoppingCriteria(StoppingCriteria):
15 | def __init__(self, keywords, tokenizer, input_ids):
16 | self.keywords = keywords
17 | self.tokenizer = tokenizer
18 | self.start_len = None
19 | self.input_ids = input_ids
20 |
21 | def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
22 | if self.start_len is None:
23 | self.start_len = self.input_ids.shape[1]
24 | else:
25 | outputs = self.tokenizer.batch_decode(output_ids[:, self.start_len:], skip_special_tokens=True)[0]
26 | for keyword in self.keywords:
27 | if keyword in outputs:
28 | return True
29 | return False
30 |
31 |
32 | @torch.inference_mode()
33 | def eval_model(model_name, questions_file, answers_file):
34 | # Model
35 | disable_torch_init()
36 | model_name = os.path.expanduser(model_name)
37 | tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
38 | model = AutoModelForCausalLM.from_pretrained(model_name,
39 | torch_dtype=torch.float16).cuda()
40 |
41 |
42 | ques_file = open(os.path.expanduser(questions_file), "r")
43 | ans_file = open(os.path.expanduser(answers_file), "w")
44 | for i, line in enumerate(tqdm(ques_file)):
45 | idx = json.loads(line)["question_id"]
46 | qs = json.loads(line)["text"]
47 | cat = json.loads(line)["category"]
48 | conv = default_conversation.copy()
49 | conv.append_message(conv.roles[0], qs)
50 | prompt = conv.get_prompt()
51 | inputs = tokenizer([prompt])
52 | input_ids = torch.as_tensor(inputs.input_ids).cuda()
53 | stopping_criteria = KeywordsStoppingCriteria([conv.sep], tokenizer, input_ids)
54 | output_ids = model.generate(
55 | input_ids,
56 | do_sample=True,
57 | use_cache=True,
58 | temperature=0.7,
59 | max_new_tokens=1024,
60 | stopping_criteria=[stopping_criteria])
61 | outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
62 | try:
63 | index = outputs.index(conv.sep, len(prompt))
64 | except ValueError:
65 | outputs += conv.sep
66 | index = outputs.index(conv.sep, len(prompt))
67 |
68 | outputs = outputs[len(prompt) + len(conv.roles[1]) + 2:index].strip()
69 | ans_id = shortuuid.uuid()
70 | ans_file.write(json.dumps({"question_id": idx,
71 | "text": outputs,
72 | "answer_id": ans_id,
73 | "model_id": model_name,
74 | "metadata": {}}) + "\n")
75 | ans_file.flush()
76 | ans_file.close()
77 |
78 | if __name__ == "__main__":
79 | parser = argparse.ArgumentParser()
80 | parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
81 | parser.add_argument("--question-file", type=str, default="tables/question.jsonl")
82 | parser.add_argument("--answers-file", type=str, default="answer.jsonl")
83 | args = parser.parse_args()
84 |
85 | eval_model(args.model_name, args.question_file, args.answers_file)
86 |
--------------------------------------------------------------------------------
/llava/eval/qa_baseline_gpt35.py:
--------------------------------------------------------------------------------
1 | """Generate answers with GPT-3.5"""
2 | # Note: you need to be using OpenAI Python v0.27.0 for the code below to work
3 | import argparse
4 | import json
5 | import os
6 | import time
7 | import concurrent.futures
8 |
9 | import openai
10 | import tqdm
11 | import shortuuid
12 |
13 | MODEL = 'gpt-3.5-turbo'
14 | MODEL_ID = 'gpt-3.5-turbo:20230327'
15 |
16 | def get_answer(question_id: int, question: str, max_tokens: int):
17 | ans = {
18 | 'answer_id': shortuuid.uuid(),
19 | 'question_id': question_id,
20 | 'model_id': MODEL_ID,
21 | }
22 | for _ in range(3):
23 | try:
24 | response = openai.ChatCompletion.create(
25 | model=MODEL,
26 | messages=[{
27 | 'role': 'system',
28 | 'content': 'You are a helpful assistant.'
29 | }, {
30 | 'role': 'user',
31 | 'content': question,
32 | }],
33 | max_tokens=max_tokens,
34 | )
35 | ans['text'] = response['choices'][0]['message']['content']
36 | return ans
37 | except Exception as e:
38 | print('[ERROR]', e)
39 | ans['text'] = '#ERROR#'
40 | time.sleep(1)
41 | return ans
42 |
43 |
44 | if __name__ == '__main__':
45 | parser = argparse.ArgumentParser(description='ChatGPT answer generation.')
46 | parser.add_argument('-q', '--question')
47 | parser.add_argument('-o', '--output')
48 | parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
49 | args = parser.parse_args()
50 |
51 | questions_dict = {}
52 | with open(os.path.expanduser(args.question)) as f:
53 | for line in f:
54 | if not line:
55 | continue
56 | q = json.loads(line)
57 | questions_dict[q['question_id']] = q['text']
58 |
59 | answers = []
60 |
61 | with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
62 | futures = []
63 | for qid, question in questions_dict.items():
64 | future = executor.submit(get_answer, qid, question, args.max_tokens)
65 | futures.append(future)
66 |
67 | for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
68 | answers.append(future.result())
69 |
70 | answers.sort(key=lambda x: x['question_id'])
71 |
72 | with open(os.path.expanduser(args.output), 'w') as f:
73 | table = [json.dumps(ans) for ans in answers]
74 | f.write('\n'.join(table))
75 |
--------------------------------------------------------------------------------
/llava/eval/run_llava.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import torch
3 |
4 | from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
5 | from llava.conversation import conv_templates, SeparatorStyle
6 | from llava.model.builder import load_pretrained_model
7 | from llava.utils import disable_torch_init
8 | from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
9 |
10 | from PIL import Image
11 |
12 | import requests
13 | from PIL import Image
14 | from io import BytesIO
15 |
16 |
17 | def load_image(image_file):
18 | if image_file.startswith('http') or image_file.startswith('https'):
19 | response = requests.get(image_file)
20 | image = Image.open(BytesIO(response.content)).convert('RGB')
21 | else:
22 | image = Image.open(image_file).convert('RGB')
23 | return image
24 |
25 |
26 | def eval_model(args):
27 | # Model
28 | disable_torch_init()
29 |
30 | model_name = get_model_name_from_path(args.model_path)
31 | tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name)
32 |
33 | qs = args.query
34 | if model.config.mm_use_im_start_end:
35 | qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
36 | else:
37 | qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
38 |
39 | if 'llama-2' in model_name.lower():
40 | conv_mode = "llava_llama_2"
41 | elif "v1" in model_name.lower():
42 | conv_mode = "llava_v1"
43 | elif "mpt" in model_name.lower():
44 | conv_mode = "mpt"
45 | else:
46 | conv_mode = "llava_v0"
47 |
48 | if args.conv_mode is not None and conv_mode != args.conv_mode:
49 | print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode))
50 | else:
51 | args.conv_mode = conv_mode
52 |
53 | conv = conv_templates[args.conv_mode].copy()
54 | conv.append_message(conv.roles[0], qs)
55 | conv.append_message(conv.roles[1], None)
56 | prompt = conv.get_prompt()
57 |
58 | image = load_image(args.image_file)
59 | image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'].half().cuda()
60 |
61 | input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
62 |
63 | stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
64 | keywords = [stop_str]
65 | stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
66 |
67 | with torch.inference_mode():
68 | output_ids = model.generate(
69 | input_ids,
70 | images=image_tensor,
71 | do_sample=True,
72 | temperature=0.2,
73 | max_new_tokens=1024,
74 | use_cache=True,
75 | stopping_criteria=[stopping_criteria])
76 |
77 | input_token_len = input_ids.shape[1]
78 | n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
79 | if n_diff_input_output > 0:
80 | print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
81 | outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
82 | outputs = outputs.strip()
83 | if outputs.endswith(stop_str):
84 | outputs = outputs[:-len(stop_str)]
85 | outputs = outputs.strip()
86 | print(outputs)
87 |
88 | if __name__ == "__main__":
89 | parser = argparse.ArgumentParser()
90 | parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
91 | parser.add_argument("--model-base", type=str, default=None)
92 | parser.add_argument("--image-file", type=str, required=True)
93 | parser.add_argument("--query", type=str, required=True)
94 | parser.add_argument("--conv-mode", type=str, default=None)
95 | args = parser.parse_args()
96 |
97 | eval_model(args)
98 |
--------------------------------------------------------------------------------
/llava/eval/summarize_gpt_review.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from collections import defaultdict
4 |
5 | import numpy as np
6 |
7 | import argparse
8 |
9 | def parse_args():
10 | parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
11 | parser.add_argument('-d', '--dir', default=None)
12 | parser.add_argument('-f', '--files', nargs='*', default=None)
13 | parser.add_argument('-i', '--ignore', nargs='*', default=None)
14 | return parser.parse_args()
15 |
16 |
17 | if __name__ == '__main__':
18 | args = parse_args()
19 |
20 | if args.ignore is not None:
21 | args.ignore = [int(x) for x in args.ignore]
22 |
23 | if args.files is not None and len(args.files) > 0:
24 | review_files = args.files
25 | else:
26 | review_files = [x for x in os.listdir(args.dir) if x.endswith('.jsonl') and (x.startswith('gpt4_text') or x.startswith('reviews_') or x.startswith('review_'))]
27 |
28 | for review_file in sorted(review_files):
29 | config = os.path.basename(review_file).replace('gpt4_text_', '').replace('.jsonl', '')
30 | scores = defaultdict(list)
31 | print(config)
32 | with open(os.path.join(args.dir, review_file) if args.dir is not None else review_file) as f:
33 | for review_str in f:
34 | review = json.loads(review_str)
35 | if args.ignore is not None and review['question_id'] in args.ignore:
36 | continue
37 | if 'category' in review:
38 | scores[review['category']].append(review['tuple'])
39 | scores['all'].append(review['tuple'])
40 | else:
41 | if 'tuple' in review:
42 | scores['all'].append(review['tuple'])
43 | else:
44 | scores['all'].append(review['score'])
45 | for k, v in sorted(scores.items()):
46 | stats = np.asarray(v).mean(0).tolist()
47 | stats = [round(x, 3) for x in stats]
48 | # print(k, stats, round(stats[1]/stats[0]*100, 1))
49 | print(k, round(stats[1]/stats[0]*100, 1))
50 | print('=================================')
51 |
--------------------------------------------------------------------------------
/llava/eval/table/model.jsonl:
--------------------------------------------------------------------------------
1 | {"model_id": "vicuna-13b:20230322-clean-lang", "model_name": "vicuna-13b", "model_version": "20230322-clean-lang", "model_metadata": "vicuna-13b-20230322-clean-lang"}
2 | {"model_id": "alpaca-13b:v1", "model_name": "alpaca-13b", "model_version": "v1", "model_metadata": "alpaca-13b"}
3 | {"model_id": "llama-13b:v1", "model_name": "llama-13b", "model_version": "v1", "model_metadata": "hf-llama-13b"}
4 | {"model_id": "bard:20230327", "model_name": "bard", "model_version": "20230327", "model_metadata": "Google Bard 20230327"}
5 | {"model_id": "gpt-3.5-turbo:20230327", "model_name": "gpt-3.5-turbo", "model_version": "20230327", "model_metadata": "OpenAI ChatGPT gpt-3.5-turbo Chat Completion"}
6 |
--------------------------------------------------------------------------------
/llava/eval/table/reviewer.jsonl:
--------------------------------------------------------------------------------
1 | {"reviewer_id": "gpt-4-0328-default", "prompt_id": 1, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for general questions"}
2 | {"reviewer_id": "gpt-4-0328-coding", "prompt_id": 2, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for coding questions"}
3 | {"reviewer_id": "gpt-4-0328-math", "prompt_id": 3, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for math questions"}
4 | {"reviewer_id": "gpt-4-0417-visual", "prompt_id": 4, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for math questions"}
5 |
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/alpaca.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/llava/eval/webpage/figures/alpaca.png
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/bard.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/llava/eval/webpage/figures/bard.jpg
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/chatgpt.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/llama.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/llava/eval/webpage/figures/llama.jpg
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/swords_FILL0_wght300_GRAD0_opsz48.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/vicuna.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/llava/eval/webpage/figures/vicuna.jpeg
--------------------------------------------------------------------------------
/llava/eval/webpage/styles.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
3 | background-color: #f8f9fa;
4 | }
5 |
6 | .navbar-dark .navbar-nav .nav-link {
7 | color: #f1cf68;
8 | font-size: 1.1rem;
9 | padding: 0.5rem 0.6rem;
10 | }
11 |
12 | .card-header {
13 | font-weight: bold;
14 | }
15 |
16 | .card {
17 | box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
18 | transition: 0.3s;
19 | }
20 |
21 | .card:hover {
22 | box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2);
23 | }
24 |
25 | button {
26 | transition: background-color 0.3s;
27 | }
28 |
29 | button:hover {
30 | background-color: #007bff;
31 | }
32 |
33 | @media (max-width: 767px) {
34 | .form-row .form-group {
35 | margin-bottom: 10px;
36 | }
37 | }
38 |
39 | /* Extra styles */
40 |
41 | .expandable-card .card-text-container {
42 | max-height: 200px;
43 | overflow-y: hidden;
44 | position: relative;
45 | }
46 |
47 | .expandable-card.expanded .card-text-container {
48 | max-height: none;
49 | }
50 |
51 | .expand-btn {
52 | position: relative;
53 | display: none;
54 | background-color: rgba(255, 255, 255, 0.8);
55 | color: #510c75;
56 | border-color: transparent;
57 | }
58 |
59 | .expand-btn:hover {
60 | background-color: rgba(200, 200, 200, 0.8);
61 | text-decoration: none;
62 | border-color: transparent;
63 | color: #510c75;
64 | }
65 |
66 | .expand-btn:focus {
67 | outline: none;
68 | text-decoration: none;
69 | }
70 |
71 | .expandable-card:not(.expanded) .card-text-container:after {
72 | content: "";
73 | position: absolute;
74 | bottom: 0;
75 | left: 0;
76 | width: 100%;
77 | height: 90px;
78 | background: linear-gradient(rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 1));
79 | }
80 |
81 | .expandable-card:not(.expanded) .expand-btn {
82 | margin-top: -40px;
83 | }
84 |
85 | .card-body {
86 | padding-bottom: 5px;
87 | }
88 |
89 | .vertical-flex-layout {
90 | justify-content: center;
91 | align-items: center;
92 | height: 100%;
93 | display: flex;
94 | flex-direction: column;
95 | gap: 5px;
96 | }
97 |
98 | .figure-img {
99 | max-width: 100%;
100 | height: auto;
101 | }
102 |
103 | .adjustable-font-size {
104 | font-size: calc(0.5rem + 2vw);
105 | }
106 |
--------------------------------------------------------------------------------
/llava/mm_utils.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | from io import BytesIO
3 | import base64
4 |
5 | import torch
6 | from transformers import StoppingCriteria
7 | from llava.constants import IMAGE_TOKEN_INDEX
8 |
9 |
10 | def load_image_from_base64(image):
11 | return Image.open(BytesIO(base64.b64decode(image)))
12 |
13 |
14 | def process_images(images, image_processor, model_cfg):
15 | return image_processor(images, return_tensors='pt')['pixel_values']
16 |
17 |
18 | def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
19 | prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('')]
20 |
21 | def insert_separator(X, sep):
22 | return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
23 |
24 | input_ids = []
25 | offset = 0
26 | if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
27 | offset = 1
28 | input_ids.append(prompt_chunks[0][0])
29 |
30 | for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
31 | input_ids.extend(x[offset:])
32 |
33 | if return_tensors is not None:
34 | if return_tensors == 'pt':
35 | return torch.tensor(input_ids, dtype=torch.long)
36 | raise ValueError(f'Unsupported tensor type: {return_tensors}')
37 | return input_ids
38 |
39 | def tokenizer_image_token_inter(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
40 | prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('')]
41 |
42 | def insert_separator(X, sep):
43 | return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
44 |
45 | input_ids = []
46 | offset = 0
47 | if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
48 | offset = 1
49 | input_ids.append(prompt_chunks[0][0])
50 |
51 | for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
52 | input_ids.extend(x[offset:])
53 |
54 | if return_tensors is not None:
55 | if return_tensors == 'pt':
56 | return torch.tensor(input_ids, dtype=torch.long)
57 | raise ValueError(f'Unsupported tensor type: {return_tensors}')
58 | return input_ids
59 |
60 | def get_model_name_from_path(model_path):
61 | model_path = model_path.strip("/")
62 | model_paths = model_path.split("/")
63 | if model_paths[-1].startswith('checkpoint-'):
64 | return model_paths[-2] + "_" + model_paths[-1]
65 | else:
66 | return model_paths[-1]
67 |
68 |
69 |
70 |
71 | class KeywordsStoppingCriteria(StoppingCriteria):
72 | def __init__(self, keywords, tokenizer, input_ids):
73 | self.keywords = keywords
74 | self.keyword_ids = []
75 | for keyword in keywords:
76 | cur_keyword_ids = tokenizer(keyword).input_ids
77 | if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
78 | cur_keyword_ids = cur_keyword_ids[1:]
79 | self.keyword_ids.append(torch.tensor(cur_keyword_ids))
80 | self.tokenizer = tokenizer
81 | self.start_len = input_ids.shape[1]
82 |
83 | def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
84 | assert output_ids.shape[0] == 1, "Only support batch size 1 (yet)" # TODO
85 | offset = min(output_ids.shape[1] - self.start_len, 3)
86 | self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
87 | for keyword_id in self.keyword_ids:
88 | if output_ids[0, -keyword_id.shape[0]:] == keyword_id:
89 | return True
90 | outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
91 | for keyword in self.keywords:
92 | if keyword in outputs:
93 | return True
94 | return False
95 |
--------------------------------------------------------------------------------
/llava/model/__init__.py:
--------------------------------------------------------------------------------
1 | from .language_model.llava_llama_gd import LlavaLlamaForCausalLM,LlavaLlamaForCausalLM_gd,LlavaLlamaForCausalLM_joint,LlavaLlamaForCausalLM_joint_2st, LlavaConfig\
2 | ,LlavaLlamaForCausalLM_joint_2st_it_only_ref_instr
3 | from .language_model.llava_mpt import LlavaMPTForCausalLM, LlavaMPTConfig
--------------------------------------------------------------------------------
/llava/model/apply_delta.py:
--------------------------------------------------------------------------------
1 | """
2 | Usage:
3 | python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta
4 | """
5 | import argparse
6 |
7 | import torch
8 | from tqdm import tqdm
9 | from transformers import AutoTokenizer, AutoModelForCausalLM
10 | from llava import LlavaLlamaForCausalLM
11 |
12 |
13 | def apply_delta(base_model_path, target_model_path, delta_path):
14 | print("Loading base model")
15 | base = AutoModelForCausalLM.from_pretrained(
16 | base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
17 |
18 | print("Loading delta")
19 | delta = LlavaLlamaForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
20 | delta_tokenizer = AutoTokenizer.from_pretrained(delta_path)
21 |
22 | print("Applying delta")
23 | for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
24 | if name not in base.state_dict():
25 | assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model'
26 | continue
27 | if param.data.shape == base.state_dict()[name].shape:
28 | param.data += base.state_dict()[name]
29 | else:
30 | assert name in ['model.embed_tokens.weight', 'lm_head.weight'], \
31 | f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}'
32 | bparam = base.state_dict()[name]
33 | param.data[:bparam.shape[0], :bparam.shape[1]] += bparam
34 |
35 | print("Saving target model")
36 | delta.save_pretrained(target_model_path)
37 | delta_tokenizer.save_pretrained(target_model_path)
38 |
39 |
40 | if __name__ == "__main__":
41 | parser = argparse.ArgumentParser()
42 | parser.add_argument("--base-model-path", type=str, required=True)
43 | parser.add_argument("--target-model-path", type=str, required=True)
44 | parser.add_argument("--delta-path", type=str, required=True)
45 |
46 | args = parser.parse_args()
47 |
48 | apply_delta(args.base_model_path, args.target_model_path, args.delta_path)
49 |
--------------------------------------------------------------------------------
/llava/model/consolidate.py:
--------------------------------------------------------------------------------
1 | """
2 | Usage:
3 | python3 -m llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_weights/llava-7b_consolidate
4 | """
5 | import argparse
6 |
7 | import torch
8 | from transformers import AutoTokenizer, AutoModelForCausalLM
9 | from llava.model import *
10 | from llava.model.utils import auto_upgrade
11 |
12 |
13 | def consolidate_ckpt(src_path, dst_path):
14 | print("Loading model")
15 | auto_upgrade(src_path)
16 | src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
17 | src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False)
18 | src_model.save_pretrained(dst_path)
19 | src_tokenizer.save_pretrained(dst_path)
20 |
21 |
22 | if __name__ == "__main__":
23 | parser = argparse.ArgumentParser()
24 | parser.add_argument("--src", type=str, required=True)
25 | parser.add_argument("--dst", type=str, required=True)
26 |
27 | args = parser.parse_args()
28 |
29 | consolidate_ckpt(args.src, args.dst)
30 |
--------------------------------------------------------------------------------
/llava/model/language_model/mpt/adapt_tokenizer.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 | from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
3 | Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
4 | NUM_SENTINEL_TOKENS: int = 100
5 |
6 | def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):
7 | """Adds sentinel tokens and padding token (if missing).
8 |
9 | Expands the tokenizer vocabulary to include sentinel tokens
10 | used in mixture-of-denoiser tasks as well as a padding token.
11 |
12 | All added tokens are added as special tokens. No tokens are
13 | added if sentinel tokens and padding token already exist.
14 | """
15 | sentinels_to_add = [f'' for i in range(NUM_SENTINEL_TOKENS)]
16 | tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
17 | if tokenizer.pad_token is None:
18 | tokenizer.add_tokens('', special_tokens=True)
19 | tokenizer.pad_token = ''
20 | assert tokenizer.pad_token_id is not None
21 | sentinels = ''.join([f'' for i in range(NUM_SENTINEL_TOKENS)])
22 | _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
23 | tokenizer.sentinel_token_ids = _sentinel_token_ids
24 |
25 | class AutoTokenizerForMOD(AutoTokenizer):
26 | """AutoTokenizer + Adaptation for MOD.
27 |
28 | A simple wrapper around AutoTokenizer to make instantiating
29 | an MOD-adapted tokenizer a bit easier.
30 |
31 | MOD-adapted tokenizers have sentinel tokens (e.g., ),
32 | a padding token, and a property to get the token ids of the
33 | sentinel tokens.
34 | """
35 |
36 | @classmethod
37 | def from_pretrained(cls, *args, **kwargs):
38 | """See `AutoTokenizer.from_pretrained` docstring."""
39 | tokenizer = super().from_pretrained(*args, **kwargs)
40 | adapt_tokenizer_for_denoising(tokenizer)
41 | return tokenizer
--------------------------------------------------------------------------------
/llava/model/language_model/mpt/blocks.py:
--------------------------------------------------------------------------------
1 | """GPT Blocks used for the GPT Model."""
2 | from typing import Dict, Optional, Tuple
3 | import torch
4 | import torch.nn as nn
5 | from .attention import ATTN_CLASS_REGISTRY
6 | from .norm import NORM_CLASS_REGISTRY
7 |
8 | class MPTMLP(nn.Module):
9 |
10 | def __init__(self, d_model: int, expansion_ratio: int, device: Optional[str]=None):
11 | super().__init__()
12 | self.up_proj = nn.Linear(d_model, expansion_ratio * d_model, device=device)
13 | self.act = nn.GELU(approximate='none')
14 | self.down_proj = nn.Linear(expansion_ratio * d_model, d_model, device=device)
15 | self.down_proj._is_residual = True
16 |
17 | def forward(self, x):
18 | return self.down_proj(self.act(self.up_proj(x)))
19 |
20 | class MPTBlock(nn.Module):
21 |
22 | def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):
23 | del kwargs
24 | super().__init__()
25 | norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
26 | attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]
27 | self.norm_1 = norm_class(d_model, device=device)
28 | self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)
29 | self.norm_2 = norm_class(d_model, device=device)
30 | self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)
31 | self.resid_attn_dropout = nn.Dropout(resid_pdrop)
32 | self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
33 |
34 | def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:
35 | a = self.norm_1(x)
36 | (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)
37 | x = x + self.resid_attn_dropout(b)
38 | m = self.norm_2(x)
39 | n = self.ffn(m)
40 | x = x + self.resid_ffn_dropout(n)
41 | return (x, attn_weights, past_key_value)
--------------------------------------------------------------------------------
/llava/model/language_model/mpt/custom_embedding.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from torch import Tensor
5 |
6 | class SharedEmbedding(nn.Embedding):
7 |
8 | def forward(self, input: Tensor, unembed: bool=False) -> Tensor:
9 | if unembed:
10 | return F.linear(input, self.weight)
11 | return super().forward(input)
--------------------------------------------------------------------------------
/llava/model/language_model/mpt/meta_init_context.py:
--------------------------------------------------------------------------------
1 | from contextlib import contextmanager
2 | import torch
3 | import torch.nn as nn
4 |
5 | @contextmanager
6 | def init_empty_weights(include_buffers: bool=False):
7 | """Meta initialization context manager.
8 |
9 | A context manager under which models are initialized with all parameters
10 | on the meta device, therefore creating an empty model. Useful when just
11 | initializing the model would blow the available RAM.
12 |
13 | Args:
14 | include_buffers (`bool`, *optional*, defaults to `False`): Whether or
15 | not to also put all buffers on the meta device while initializing.
16 |
17 | Example:
18 | ```python
19 | import torch.nn as nn
20 |
21 | # Initialize a model with 100 billions parameters in no time and without using any RAM.
22 | with init_empty_weights():
23 | tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
24 | ```
25 |
26 |
27 |
28 | Any model created under this context manager has no weights. As such you can't do something like
29 | `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
30 |
31 |
32 | """
33 | with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f:
34 | yield f
35 |
36 | @contextmanager
37 | def init_on_device(device: torch.device, include_buffers: bool=False):
38 | """Device initialization context manager.
39 |
40 | A context manager under which models are initialized with all parameters
41 | on the specified device.
42 |
43 | Args:
44 | device (`torch.device`): Device to initialize all parameters on.
45 | include_buffers (`bool`, *optional*, defaults to `False`): Whether or
46 | not to also put all buffers on the meta device while initializing.
47 |
48 | Example:
49 | ```python
50 | import torch.nn as nn
51 |
52 | with init_on_device(device=torch.device("cuda")):
53 | tst = nn.Liner(100, 100) # on `cuda` device
54 | ```
55 | """
56 | old_register_parameter = nn.Module.register_parameter
57 | if include_buffers:
58 | old_register_buffer = nn.Module.register_buffer
59 |
60 | def register_empty_parameter(module, name, param):
61 | old_register_parameter(module, name, param)
62 | if param is not None:
63 | param_cls = type(module._parameters[name])
64 | kwargs = module._parameters[name].__dict__
65 | module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
66 |
67 | def register_empty_buffer(module, name, buffer):
68 | old_register_buffer(module, name, buffer)
69 | if buffer is not None:
70 | module._buffers[name] = module._buffers[name].to(device)
71 | if include_buffers:
72 | tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full']}
73 | else:
74 | tensor_constructors_to_patch = {}
75 |
76 | def patch_tensor_constructor(fn):
77 |
78 | def wrapper(*args, **kwargs):
79 | kwargs['device'] = device
80 | return fn(*args, **kwargs)
81 | return wrapper
82 | try:
83 | nn.Module.register_parameter = register_empty_parameter
84 | if include_buffers:
85 | nn.Module.register_buffer = register_empty_buffer
86 | for torch_function_name in tensor_constructors_to_patch.keys():
87 | setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
88 | yield
89 | finally:
90 | nn.Module.register_parameter = old_register_parameter
91 | if include_buffers:
92 | nn.Module.register_buffer = old_register_buffer
93 | for (torch_function_name, old_torch_function) in tensor_constructors_to_patch.items():
94 | setattr(torch, torch_function_name, old_torch_function)
--------------------------------------------------------------------------------
/llava/model/language_model/mpt/norm.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | def _cast_if_autocast_enabled(tensor):
4 | if torch.is_autocast_enabled():
5 | if tensor.device.type == 'cuda':
6 | dtype = torch.get_autocast_gpu_dtype()
7 | elif tensor.device.type == 'cpu':
8 | dtype = torch.get_autocast_cpu_dtype()
9 | else:
10 | raise NotImplementedError()
11 | return tensor.to(dtype=dtype)
12 | return tensor
13 |
14 | class LPLayerNorm(torch.nn.LayerNorm):
15 |
16 | def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None):
17 | super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device=device, dtype=dtype)
18 |
19 | def forward(self, x):
20 | module_device = x.device
21 | downcast_x = _cast_if_autocast_enabled(x)
22 | downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
23 | downcast_bias = _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
24 | with torch.autocast(enabled=False, device_type=module_device.type):
25 | return torch.nn.functional.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps)
26 |
27 | def rms_norm(x, weight=None, eps=1e-05):
28 | output = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps)
29 | if weight is not None:
30 | return output * weight
31 | return output
32 |
33 | class RMSNorm(torch.nn.Module):
34 |
35 | def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
36 | super().__init__()
37 | self.eps = eps
38 | if weight:
39 | self.weight = torch.nn.Parameter(torch.ones(normalized_shape, dtype=dtype, device=device))
40 | else:
41 | self.register_parameter('weight', None)
42 |
43 | def forward(self, x):
44 | return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype)
45 |
46 | class LPRMSNorm(RMSNorm):
47 |
48 | def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
49 | super().__init__(normalized_shape=normalized_shape, eps=eps, weight=weight, dtype=dtype, device=device)
50 |
51 | def forward(self, x):
52 | downcast_x = _cast_if_autocast_enabled(x)
53 | downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
54 | with torch.autocast(enabled=False, device_type=x.device.type):
55 | return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype)
56 | NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}
--------------------------------------------------------------------------------
/llava/model/make_delta.py:
--------------------------------------------------------------------------------
1 | """
2 | Usage:
3 | python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta
4 | """
5 | import argparse
6 |
7 | import torch
8 | from tqdm import tqdm
9 | from transformers import AutoTokenizer, AutoModelForCausalLM
10 | from llava.model.utils import auto_upgrade
11 |
12 |
13 | def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
14 | print("Loading base model")
15 | base = AutoModelForCausalLM.from_pretrained(
16 | base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
17 |
18 | print("Loading target model")
19 | auto_upgrade(target_model_path)
20 | target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
21 |
22 | print("Calculating delta")
23 | for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
24 | if name not in base.state_dict():
25 | assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model'
26 | continue
27 | if param.data.shape == base.state_dict()[name].shape:
28 | param.data -= base.state_dict()[name]
29 | else:
30 | assert name in ['model.embed_tokens.weight', 'lm_head.weight'], f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}'
31 | bparam = base.state_dict()[name]
32 | param.data[:bparam.shape[0], :bparam.shape[1]] -= bparam
33 |
34 | print("Saving delta")
35 | if hub_repo_id:
36 | kwargs = {"push_to_hub": True, "repo_id": hub_repo_id}
37 | else:
38 | kwargs = {}
39 | target.save_pretrained(delta_path, **kwargs)
40 | target_tokenizer = AutoTokenizer.from_pretrained(target_model_path)
41 | target_tokenizer.save_pretrained(delta_path, **kwargs)
42 |
43 |
44 | if __name__ == "__main__":
45 | parser = argparse.ArgumentParser()
46 | parser.add_argument("--base-model-path", type=str, required=True)
47 | parser.add_argument("--target-model-path", type=str, required=True)
48 | parser.add_argument("--delta-path", type=str, required=True)
49 | parser.add_argument("--hub-repo-id", type=str, default=None)
50 | args = parser.parse_args()
51 |
52 | make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id)
53 |
--------------------------------------------------------------------------------
/llava/model/multimodal_encoder/builder.py:
--------------------------------------------------------------------------------
1 | from .clip_encoder import CLIPVisionTower
2 |
3 |
4 | def build_vision_tower(vision_tower_cfg, **kwargs):
5 | vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
6 | if vision_tower.startswith("openai") or vision_tower.startswith("laion"):
7 | return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
8 |
9 | raise ValueError(f'Unknown vision tower: {vision_tower}')
10 |
--------------------------------------------------------------------------------
/llava/model/multimodal_encoder/clip_encoder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig
5 |
6 |
7 | class CLIPVisionTower(nn.Module):
8 | def __init__(self, vision_tower, args, delay_load=False):
9 | super().__init__()
10 |
11 | self.is_loaded = False
12 |
13 | self.vision_tower_name = vision_tower
14 | self.select_layer = args.mm_vision_select_layer
15 | self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch')
16 |
17 | if not delay_load:
18 | self.load_model()
19 | else:
20 | self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name)
21 |
22 | def load_model(self):
23 | self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)
24 | self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name,cache_dir="/comp_robot/zhanghao/.cache/hugging_face/")
25 | self.vision_tower.requires_grad_(False)
26 |
27 | self.is_loaded = True
28 |
29 | def feature_select(self, image_forward_outs):
30 | image_features = image_forward_outs.hidden_states[self.select_layer]
31 | if self.select_feature == 'patch':
32 | image_features = image_features[:, 1:]
33 | elif self.select_feature == 'cls_patch':
34 | image_features = image_features
35 | else:
36 | raise ValueError(f'Unexpected select feature: {self.select_feature}')
37 | return image_features
38 |
39 | @torch.no_grad()
40 | def forward(self, images):
41 | if type(images) is list:
42 | image_features = []
43 | for image in images:
44 | image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True)
45 | image_feature = self.feature_select(image_forward_out).to(image.dtype)
46 | image_features.append(image_feature)
47 | else:
48 | image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
49 | image_features = self.feature_select(image_forward_outs).to(images.dtype)
50 |
51 | return image_features
52 |
53 | @property
54 | def dummy_feature(self):
55 | return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
56 |
57 | @property
58 | def dtype(self):
59 | return self.vision_tower.dtype
60 |
61 | @property
62 | def device(self):
63 | return self.vision_tower.device
64 |
65 | @property
66 | def config(self):
67 | if self.is_loaded:
68 | return self.vision_tower.config
69 | else:
70 | return self.cfg_only
71 |
72 | @property
73 | def hidden_size(self):
74 | return self.config.hidden_size
75 |
76 | @property
77 | def num_patches(self):
78 | return (self.config.image_size // self.config.patch_size) ** 2
79 |
--------------------------------------------------------------------------------
/llava/model/openseed/BaseModel.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 |
4 | import torch
5 | import torch.nn as nn
6 |
7 | # from utils.model import align_and_update_state_dicts
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 |
12 | def align_and_update_state_dicts(model_state_dict, ckpt_state_dict):
13 | model_keys = sorted(model_state_dict.keys())
14 | ckpt_keys = sorted(ckpt_state_dict.keys())
15 | result_dicts = {}
16 | matched_log = []
17 | unmatched_log = []
18 | unloaded_log = []
19 | for model_key in model_keys:
20 | model_weight = model_state_dict[model_key]
21 | if model_key in ckpt_keys:
22 | ckpt_weight = ckpt_state_dict[model_key]
23 | if model_weight.shape == ckpt_weight.shape:
24 | result_dicts[model_key] = ckpt_weight
25 | ckpt_keys.pop(ckpt_keys.index(model_key))
26 | matched_log.append("Loaded {}, Model Shape: {} <-> Ckpt Shape: {}".format(model_key, model_weight.shape,
27 | ckpt_weight.shape))
28 | else:
29 | unmatched_log.append(
30 | "*UNMATCHED* {}, Model Shape: {} <-> Ckpt Shape: {}".format(model_key, model_weight.shape,
31 | ckpt_weight.shape))
32 | else:
33 | unloaded_log.append("*UNLOADED* {}, Model Shape: {}".format(model_key, model_weight.shape))
34 |
35 | # if is_main_process():
36 | # for info in matched_log:
37 | # logger.info(info)
38 | # for info in unloaded_log:
39 | # logger.warning(info)
40 | # for key in ckpt_keys:
41 | # logger.warning("$UNUSED$ {}, Ckpt Shape: {}".format(key, ckpt_state_dict[key].shape))
42 | # for info in unmatched_log:
43 | # logger.warning(info)
44 | return result_dicts
45 |
46 | class BaseModel(nn.Module):
47 | def __init__(self, opt, module: nn.Module):
48 | super(BaseModel, self).__init__()
49 | self.opt = opt
50 | self.model = module
51 |
52 | def forward(self, *inputs, **kwargs):
53 | outputs = self.model(*inputs, **kwargs)
54 | return outputs
55 |
56 | def save_pretrained(self, save_dir):
57 | torch.save(self.model.state_dict(), save_dir)
58 |
59 | def from_pretrained(self, load_dir):
60 | state_dict = torch.load(load_dir, map_location='cpu')
61 | state_dict = align_and_update_state_dicts(self.model.state_dict(), state_dict)
62 | self.model.load_state_dict(state_dict, strict=False)
63 | return self
--------------------------------------------------------------------------------
/llava/model/openseed/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | from .architectures import build_model
--------------------------------------------------------------------------------
/llava/model/openseed/architectures/__init__.py:
--------------------------------------------------------------------------------
1 | from .openseed_model import *
2 | # from .openseed_model_decouple_train import *
3 | from .build import build_model
--------------------------------------------------------------------------------
/llava/model/openseed/architectures/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 |
4 | def build_model(config, **kwargs):
5 | model_name = config['MODEL']['NAME']
6 |
7 | if not is_model(model_name):
8 | raise ValueError(f'Unkown model: {model_name}')
9 |
10 | return model_entrypoints(model_name)(config, **kwargs)
--------------------------------------------------------------------------------
/llava/model/openseed/architectures/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 | def register_model(fn):
4 | module_name_split = fn.__module__.split('.')
5 | model_name = module_name_split[-1]
6 | _model_entrypoints[model_name] = fn
7 | return fn
8 |
9 | def model_entrypoints(model_name):
10 | return _model_entrypoints[model_name]
11 |
12 | def is_model(model_name):
13 | return model_name in _model_entrypoints
--------------------------------------------------------------------------------
/llava/model/openseed/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | from .build import build_backbone
2 |
3 | from .focal import *
4 | from .focal_dw import *
5 | from .swin import *
6 | from .backbone import *
--------------------------------------------------------------------------------
/llava/model/openseed/backbone/backbone.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | import torch.nn as nn
3 |
4 | from detectron2.modeling import ShapeSpec
5 |
6 | # from ..layers import ShapeSpec
7 |
8 | __all__ = ["Backbone"]
9 |
10 |
11 | class Backbone(nn.Module):
12 | """
13 | Abstract base class for network backbones.
14 | """
15 |
16 | def __init__(self):
17 | """
18 | The `__init__` method of any subclass can specify its own set of arguments.
19 | """
20 | super().__init__()
21 |
22 | def forward(self):
23 | """
24 | Subclasses must override this method, but adhere to the same return type.
25 |
26 | Returns:
27 | dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor
28 | """
29 | pass
30 |
31 | @property
32 | def size_divisibility(self) -> int:
33 | """
34 | Some backbones require the input height and width to be divisible by a
35 | specific integer. This is typically true for encoder / decoder type networks
36 | with lateral connection (e.g., FPN) for which feature maps need to match
37 | dimension in the "bottom up" and "top down" paths. Set to 0 if no specific
38 | input size divisibility is required.
39 | """
40 | return 0
41 |
42 | def output_shape(self):
43 | """
44 | Returns:
45 | dict[str->ShapeSpec]
46 | """
47 | # this is a backward-compatible default
48 | return {
49 | name: ShapeSpec(
50 | channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
51 | )
52 | for name in self._out_features
53 | }
54 |
--------------------------------------------------------------------------------
/llava/model/openseed/backbone/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 |
4 | from .backbone import *
5 |
6 | def build_backbone(config, **kwargs):
7 | model_name = config['MODEL']['BACKBONE']['NAME']
8 | if not is_model(model_name):
9 | raise ValueError(f'Unkown model: {model_name}')
10 |
11 | return model_entrypoints(model_name)(config, **kwargs)
--------------------------------------------------------------------------------
/llava/model/openseed/backbone/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 |
4 | def register_backbone(fn):
5 | module_name_split = fn.__module__.split('.')
6 | model_name = module_name_split[-1]
7 | _model_entrypoints[model_name] = fn
8 | return fn
9 |
10 | def model_entrypoints(model_name):
11 | return _model_entrypoints[model_name]
12 |
13 | def is_model(model_name):
14 | return model_name in _model_entrypoints
15 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/__init__.py:
--------------------------------------------------------------------------------
1 | from .build import build_openseed_head
--------------------------------------------------------------------------------
/llava/model/openseed/body/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 | from .openseed_head import *
4 |
5 |
6 | def build_openseed_head(config, *args, **kwargs):
7 | model_name = config['MODEL']['HEAD']
8 | if not is_model(model_name):
9 | raise ValueError(f'Unkown model: {model_name}')
10 |
11 | body = model_entrypoints(model_name)(config, *args, **kwargs)
12 | return body
--------------------------------------------------------------------------------
/llava/model/openseed/body/decoder/__init__.py:
--------------------------------------------------------------------------------
1 | from .build import build_decoder
2 | from .openseed_decoder import *
3 | from .openseed_decoder_decouple import *
--------------------------------------------------------------------------------
/llava/model/openseed/body/decoder/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 |
4 |
5 | def build_decoder(config, *args, **kwargs):
6 | model_name = config['MODEL']['DECODER']['NAME']
7 |
8 | if not is_model(model_name):
9 | raise ValueError(f'Unkown model: {model_name}')
10 |
11 | return model_entrypoints(model_name)(config, *args, **kwargs)
--------------------------------------------------------------------------------
/llava/model/openseed/body/decoder/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 | def register_decoder(fn):
4 | module_name_split = fn.__module__.split('.')
5 | model_name = module_name_split[-1]
6 | _model_entrypoints[model_name] = fn
7 | return fn
8 |
9 | def model_entrypoints(model_name):
10 | return _model_entrypoints[model_name]
11 |
12 | def is_model(model_name):
13 | return model_name in _model_entrypoints
--------------------------------------------------------------------------------
/llava/model/openseed/body/decoder/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import *
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/__init__.py:
--------------------------------------------------------------------------------
1 | from .build import build_encoder
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 |
4 | from .transformer_encoder_fpn import *
5 | from .encoder_deform import *
6 |
7 | def build_encoder(config, *args, **kwargs):
8 | model_name = config['MODEL']['ENCODER']['NAME']
9 |
10 | if not is_model(model_name):
11 | raise ValueError(f'Unkown model: {model_name}')
12 |
13 | return model_entrypoints(model_name)(config, *args, **kwargs)
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/ops/functions/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | # Copyright (c) Facebook, Inc. and its affiliates.
10 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
11 |
12 | from .ms_deform_attn_func import MSDeformAttnFunction
13 |
14 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/ops/functions/ms_deform_attn_func.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | # Copyright (c) Facebook, Inc. and its affiliates.
10 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
11 |
12 | from __future__ import absolute_import
13 | from __future__ import print_function
14 | from __future__ import division
15 |
16 | import torch
17 | import torch.nn.functional as F
18 | from torch.autograd import Function
19 | from torch.autograd.function import once_differentiable
20 |
21 | try:
22 | import MultiScaleDeformableAttention as MSDA
23 | except ModuleNotFoundError as e:
24 | info_string = (
25 | "\n\nPlease compile MultiScaleDeformableAttention CUDA op with the following commands:\n"
26 | "\t`cd mask2former/modeling/pixel_decoder/ops`\n"
27 | "\t`sh make.sh`\n"
28 | )
29 | raise ModuleNotFoundError(info_string)
30 |
31 |
32 | class MSDeformAttnFunction(Function):
33 | @staticmethod
34 | def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
35 | ctx.im2col_step = im2col_step
36 | output = MSDA.ms_deform_attn_forward(
37 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step)
38 | ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
39 | return output
40 |
41 | @staticmethod
42 | @once_differentiable
43 | def backward(ctx, grad_output):
44 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors
45 | grad_value, grad_sampling_loc, grad_attn_weight = \
46 | MSDA.ms_deform_attn_backward(
47 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
48 |
49 | return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
50 |
51 |
52 | def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights):
53 | # for debug and test only,
54 | # need to use cuda version instead
55 | N_, S_, M_, D_ = value.shape
56 | _, Lq_, M_, L_, P_, _ = sampling_locations.shape
57 | value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
58 | sampling_grids = 2 * sampling_locations - 1
59 | sampling_value_list = []
60 | for lid_, (H_, W_) in enumerate(value_spatial_shapes):
61 | # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
62 | value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_)
63 | # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
64 | sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
65 | # N_*M_, D_, Lq_, P_
66 | sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_,
67 | mode='bilinear', padding_mode='zeros', align_corners=False)
68 | sampling_value_list.append(sampling_value_l_)
69 | # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
70 | attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
71 | output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
72 | return output.transpose(1, 2).contiguous()
73 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/ops/make.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ------------------------------------------------------------------------------------------------
3 | # Deformable DETR
4 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | # ------------------------------------------------------------------------------------------------
7 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | # ------------------------------------------------------------------------------------------------
9 |
10 | # Copyright (c) Facebook, Inc. and its affiliates.
11 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
12 |
13 | python setup.py build install --user
14 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/ops/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | # Copyright (c) Facebook, Inc. and its affiliates.
10 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
11 |
12 | from .ms_deform_attn import MSDeformAttn
13 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/ops/setup.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | # Copyright (c) Facebook, Inc. and its affiliates.
10 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
11 |
12 | import os
13 | import glob
14 |
15 | import torch
16 |
17 | from torch.utils.cpp_extension import CUDA_HOME
18 | from torch.utils.cpp_extension import CppExtension
19 | from torch.utils.cpp_extension import CUDAExtension
20 |
21 | from setuptools import find_packages
22 | from setuptools import setup
23 |
24 | requirements = ["torch", "torchvision"]
25 |
26 | def get_extensions():
27 | this_dir = os.path.dirname(os.path.abspath(__file__))
28 | extensions_dir = os.path.join(this_dir, "src")
29 |
30 | main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
31 | source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
32 | source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
33 |
34 | sources = main_file + source_cpu
35 | extension = CppExtension
36 | extra_compile_args = {"cxx": []}
37 | define_macros = []
38 |
39 | # Force cuda since torch ask for a device, not if cuda is in fact available.
40 | if (os.environ.get('FORCE_CUDA') or torch.cuda.is_available()) and CUDA_HOME is not None:
41 | extension = CUDAExtension
42 | sources += source_cuda
43 | define_macros += [("WITH_CUDA", None)]
44 | extra_compile_args["nvcc"] = [
45 | "-DCUDA_HAS_FP16=1",
46 | "-D__CUDA_NO_HALF_OPERATORS__",
47 | "-D__CUDA_NO_HALF_CONVERSIONS__",
48 | "-D__CUDA_NO_HALF2_OPERATORS__",
49 | ]
50 | else:
51 | if CUDA_HOME is None:
52 | raise NotImplementedError('CUDA_HOME is None. Please set environment variable CUDA_HOME.')
53 | else:
54 | raise NotImplementedError('No CUDA runtime is found. Please set FORCE_CUDA=1 or test it by running torch.cuda.is_available().')
55 |
56 | sources = [os.path.join(extensions_dir, s) for s in sources]
57 | include_dirs = [extensions_dir]
58 | ext_modules = [
59 | extension(
60 | "MultiScaleDeformableAttention",
61 | sources,
62 | include_dirs=include_dirs,
63 | define_macros=define_macros,
64 | extra_compile_args=extra_compile_args,
65 | )
66 | ]
67 | return ext_modules
68 |
69 | setup(
70 | name="MultiScaleDeformableAttention",
71 | version="1.0",
72 | author="Weijie Su",
73 | url="https://github.com/fundamentalvision/Deformable-DETR",
74 | description="PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention",
75 | packages=find_packages(exclude=("configs", "tests",)),
76 | ext_modules=get_extensions(),
77 | cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
78 | )
79 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/ops/src/cpu/ms_deform_attn_cpu.cpp:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | /*!
12 | * Copyright (c) Facebook, Inc. and its affiliates.
13 | * Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
14 | */
15 |
16 | #include
17 |
18 | #include
19 | #include
20 |
21 |
22 | at::Tensor
23 | ms_deform_attn_cpu_forward(
24 | const at::Tensor &value,
25 | const at::Tensor &spatial_shapes,
26 | const at::Tensor &level_start_index,
27 | const at::Tensor &sampling_loc,
28 | const at::Tensor &attn_weight,
29 | const int im2col_step)
30 | {
31 | AT_ERROR("Not implement on cpu");
32 | }
33 |
34 | std::vector
35 | ms_deform_attn_cpu_backward(
36 | const at::Tensor &value,
37 | const at::Tensor &spatial_shapes,
38 | const at::Tensor &level_start_index,
39 | const at::Tensor &sampling_loc,
40 | const at::Tensor &attn_weight,
41 | const at::Tensor &grad_output,
42 | const int im2col_step)
43 | {
44 | AT_ERROR("Not implement on cpu");
45 | }
46 |
47 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/ops/src/cpu/ms_deform_attn_cpu.h:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | /*!
12 | * Copyright (c) Facebook, Inc. and its affiliates.
13 | * Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
14 | */
15 |
16 | #pragma once
17 | #include
18 |
19 | at::Tensor
20 | ms_deform_attn_cpu_forward(
21 | const at::Tensor &value,
22 | const at::Tensor &spatial_shapes,
23 | const at::Tensor &level_start_index,
24 | const at::Tensor &sampling_loc,
25 | const at::Tensor &attn_weight,
26 | const int im2col_step);
27 |
28 | std::vector
29 | ms_deform_attn_cpu_backward(
30 | const at::Tensor &value,
31 | const at::Tensor &spatial_shapes,
32 | const at::Tensor &level_start_index,
33 | const at::Tensor &sampling_loc,
34 | const at::Tensor &attn_weight,
35 | const at::Tensor &grad_output,
36 | const int im2col_step);
37 |
38 |
39 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/ops/src/cuda/ms_deform_attn_cuda.h:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | /*!
12 | * Copyright (c) Facebook, Inc. and its affiliates.
13 | * Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
14 | */
15 |
16 | #pragma once
17 | #include
18 |
19 | at::Tensor ms_deform_attn_cuda_forward(
20 | const at::Tensor &value,
21 | const at::Tensor &spatial_shapes,
22 | const at::Tensor &level_start_index,
23 | const at::Tensor &sampling_loc,
24 | const at::Tensor &attn_weight,
25 | const int im2col_step);
26 |
27 | std::vector ms_deform_attn_cuda_backward(
28 | const at::Tensor &value,
29 | const at::Tensor &spatial_shapes,
30 | const at::Tensor &level_start_index,
31 | const at::Tensor &sampling_loc,
32 | const at::Tensor &attn_weight,
33 | const at::Tensor &grad_output,
34 | const int im2col_step);
35 |
36 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/ops/src/ms_deform_attn.h:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | /*!
12 | * Copyright (c) Facebook, Inc. and its affiliates.
13 | * Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
14 | */
15 |
16 | #pragma once
17 |
18 | #include "cpu/ms_deform_attn_cpu.h"
19 |
20 | #ifdef WITH_CUDA
21 | #include "cuda/ms_deform_attn_cuda.h"
22 | #endif
23 |
24 |
25 | at::Tensor
26 | ms_deform_attn_forward(
27 | const at::Tensor &value,
28 | const at::Tensor &spatial_shapes,
29 | const at::Tensor &level_start_index,
30 | const at::Tensor &sampling_loc,
31 | const at::Tensor &attn_weight,
32 | const int im2col_step)
33 | {
34 | if (value.type().is_cuda())
35 | {
36 | #ifdef WITH_CUDA
37 | return ms_deform_attn_cuda_forward(
38 | value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
39 | #else
40 | AT_ERROR("Not compiled with GPU support");
41 | #endif
42 | }
43 | AT_ERROR("Not implemented on the CPU");
44 | }
45 |
46 | std::vector
47 | ms_deform_attn_backward(
48 | const at::Tensor &value,
49 | const at::Tensor &spatial_shapes,
50 | const at::Tensor &level_start_index,
51 | const at::Tensor &sampling_loc,
52 | const at::Tensor &attn_weight,
53 | const at::Tensor &grad_output,
54 | const int im2col_step)
55 | {
56 | if (value.type().is_cuda())
57 | {
58 | #ifdef WITH_CUDA
59 | return ms_deform_attn_cuda_backward(
60 | value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
61 | #else
62 | AT_ERROR("Not compiled with GPU support");
63 | #endif
64 | }
65 | AT_ERROR("Not implemented on the CPU");
66 | }
67 |
68 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/ops/src/vision.cpp:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | /*!
12 | * Copyright (c) Facebook, Inc. and its affiliates.
13 | * Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
14 | */
15 |
16 | #include "ms_deform_attn.h"
17 |
18 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
19 | m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward");
20 | m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward");
21 | }
22 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/encoder/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 | def register_encoder(fn):
4 | module_name_split = fn.__module__.split('.')
5 | model_name = module_name_split[-1]
6 | _model_entrypoints[model_name] = fn
7 | return fn
8 |
9 | def model_entrypoints(model_name):
10 | return _model_entrypoints[model_name]
11 |
12 | def is_model(model_name):
13 | return model_name in _model_entrypoints
14 |
--------------------------------------------------------------------------------
/llava/model/openseed/body/openseed_head.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------
2 | # Copyright (c) 2022 IDEA. All Rights Reserved.
3 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
4 | # ------------------------------------------------------------------------
5 | # Modified from Mask2Former https://github.com/facebookresearch/Mask2Former by Feng Li and Hao Zhang.
6 | # ------------------------------------------------------------------------------
7 | import logging
8 | from typing import Callable, Dict, List, Optional, Tuple, Union
9 |
10 | from torch import nn
11 |
12 | from detectron2.layers import Conv2d, ShapeSpec, get_norm
13 | from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
14 |
15 | from .registry import register_body
16 | from .encoder import build_encoder
17 | from .decoder import build_decoder
18 | from ..utils import configurable
19 |
20 |
21 | class OpenSeeDHead(nn.Module):
22 | @configurable
23 | def __init__(
24 | self,
25 | input_shape: Dict[str, ShapeSpec],
26 | *,
27 | num_classes: int,
28 | pixel_decoder: nn.Module,
29 | loss_weight: float = 1.0,
30 | ignore_value: int = -1,
31 | transformer_predictor: nn.Module,
32 | ):
33 | """
34 | Args:
35 | input_shape: shapes (channels and stride) of the input features
36 | num_classes: number of classes to predict
37 | pixel_decoder: the pixel decoder module
38 | loss_weight: loss weight
39 | ignore_value: category id to be ignored during training.
40 | transformer_predictor: the transformer decoder that makes prediction
41 | transformer_in_feature: input feature name to the transformer_predictor
42 | """
43 | super().__init__()
44 | input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
45 | self.in_features = [k for k, v in input_shape]
46 | self.ignore_value = ignore_value
47 | self.common_stride = 4
48 | self.loss_weight = loss_weight
49 |
50 | self.pixel_decoder = pixel_decoder
51 | self.predictor = transformer_predictor
52 |
53 | self.num_classes = num_classes
54 |
55 | @classmethod
56 | def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec], lang_encoder: nn.Module, extra: dict):
57 | enc_cfg = cfg['MODEL']['ENCODER']
58 | dec_cfg = cfg['MODEL']['DECODER']
59 | transformer_predictor_in_channels = enc_cfg['CONVS_DIM']
60 |
61 | return {
62 | "input_shape": {
63 | k: v for k, v in input_shape.items() if k in enc_cfg['IN_FEATURES']
64 | },
65 | "ignore_value": enc_cfg['IGNORE_VALUE'],
66 | "num_classes": enc_cfg.get('NUM_CLASSES', None),
67 | "pixel_decoder": build_encoder(cfg, input_shape),
68 | "loss_weight": enc_cfg['LOSS_WEIGHT'],
69 | "transformer_predictor": build_decoder(
70 | cfg,
71 | transformer_predictor_in_channels,
72 | mask_classification=True,
73 | extra=extra,
74 | ),
75 | }
76 |
77 | def forward(self, features, mask=None,targets=None, target_queries=None, target_vlp=None, task='seg', extra={},default_text_embeddings=None):
78 | mask_features, transformer_encoder_features, multi_scale_features = self.pixel_decoder.forward_features(features, mask)
79 |
80 | predictions = self.predictor(multi_scale_features, mask_features, mask, targets=targets,
81 | target_queries=target_queries, target_vlp=target_vlp, task=task, extra=extra,default_text_embeddings=default_text_embeddings)
82 | return predictions
83 |
84 |
85 | @register_body
86 | def get_maskdino_head(cfg, input_shape, lang_encoder, extra):
87 | return OpenSeeDHead(cfg, input_shape, lang_encoder, extra)
--------------------------------------------------------------------------------
/llava/model/openseed/body/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 |
4 | def register_body(fn):
5 | module_name_split = fn.__module__.split('.')
6 | model_name = module_name_split[-1]
7 | _model_entrypoints[model_name] = fn
8 | return fn
9 |
10 | def model_entrypoints(model_name):
11 | return _model_entrypoints[model_name]
12 |
13 | def is_model(model_name):
14 | return model_name in _model_entrypoints
--------------------------------------------------------------------------------
/llava/model/openseed/language/LangEncoder/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | from .build import build_lang_encoder
6 | from .build import build_tokenizer
7 |
8 | from .transformer import *
--------------------------------------------------------------------------------
/llava/model/openseed/language/LangEncoder/build.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from transformers import CLIPTokenizer, CLIPTokenizerFast
4 | from transformers import AutoTokenizer
5 |
6 | from .registry import lang_encoders
7 | from .registry import is_lang_encoder
8 |
9 |
10 | def build_lang_encoder(config_encoder, tokenizer, verbose, **kwargs):
11 | model_name = config_encoder['NAME']
12 |
13 | if not is_lang_encoder(model_name):
14 | raise ValueError(f'Unkown model: {model_name}')
15 |
16 | return lang_encoders(model_name)(config_encoder, tokenizer, verbose, **kwargs)
17 |
18 |
19 | def build_tokenizer(config_encoder):
20 | tokenizer = None
21 | os.environ['TOKENIZERS_PARALLELISM'] = 'true'
22 | if config_encoder['TOKENIZER'] == 'clip':
23 | pretrained_tokenizer = config_encoder.get(
24 | 'PRETRAINED_TOKENIZER', 'openai/clip-vit-base-patch32'
25 | )
26 | tokenizer = CLIPTokenizer.from_pretrained(pretrained_tokenizer)
27 | tokenizer.add_special_tokens({'cls_token': tokenizer.eos_token})
28 | elif config_encoder['TOKENIZER'] == 'clip-fast':
29 | pretrained_tokenizer = config_encoder.get(
30 | 'PRETRAINED_TOKENIZER', 'openai/clip-vit-base-patch32'
31 | )
32 | tokenizer = CLIPTokenizerFast.from_pretrained(pretrained_tokenizer, from_slow=True)
33 | else:
34 | tokenizer = AutoTokenizer.from_pretrained(config_encoder['TOKENIZER'])
35 |
36 | return tokenizer
37 |
--------------------------------------------------------------------------------
/llava/model/openseed/language/LangEncoder/registry.py:
--------------------------------------------------------------------------------
1 | _lang_encoders = {}
2 |
3 |
4 | def register_lang_encoder(fn):
5 | module_name_split = fn.__module__.split('.')
6 | model_name = module_name_split[-1]
7 |
8 | _lang_encoders[model_name] = fn
9 |
10 | return fn
11 |
12 |
13 | def lang_encoders(model_name):
14 | return _lang_encoders[model_name]
15 |
16 |
17 | def is_lang_encoder(model_name):
18 | return model_name in _lang_encoders
19 |
--------------------------------------------------------------------------------
/llava/model/openseed/language/__init__.py:
--------------------------------------------------------------------------------
1 | # from .vlpencoder import *
2 | # from .encoder import *
3 | # # from .loss import *
4 | # from .build import build_language_encoder
--------------------------------------------------------------------------------
/llava/model/openseed/language/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 |
4 |
5 | def build_language_encoder(config, **kwargs):
6 | model_name = config['MODEL']['TEXT']['ARCH']
7 |
8 | if not is_model(model_name):
9 | raise ValueError(f'Unkown model: {model_name}')
10 |
11 | return model_entrypoints(model_name)(config, **kwargs)
--------------------------------------------------------------------------------
/llava/model/openseed/language/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 | def register_model(fn):
4 | module_name_split = fn.__module__.split('.')
5 | model_name = module_name_split[-1]
6 | _model_entrypoints[model_name] = fn
7 | return fn
8 |
9 | def model_entrypoints(model_name):
10 | return _model_entrypoints[model_name]
11 |
12 | def is_model(model_name):
13 | return model_name in _model_entrypoints
--------------------------------------------------------------------------------
/llava/model/openseed/modules/__init__.py:
--------------------------------------------------------------------------------
1 | from .point_features import *
2 | from .position_encoding import *
3 | from .postprocessing import *
4 | from .attention import *
5 | from .matcher import *
6 | from .criterion import *
--------------------------------------------------------------------------------
/llava/model/openseed/modules/position_encoding.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | # # Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
3 | """
4 | Various positional encodings for the transformer.
5 | """
6 | import math
7 |
8 | import torch
9 | from torch import nn
10 |
11 |
12 | class PositionEmbeddingSine(nn.Module):
13 | """
14 | This is a more standard version of the position embedding, very similar to the one
15 | used by the Attention is all you need paper, generalized to work on images.
16 | """
17 |
18 | def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
19 | super().__init__()
20 | self.num_pos_feats = num_pos_feats
21 | self.temperature = temperature
22 | self.normalize = normalize
23 | if scale is not None and normalize is False:
24 | raise ValueError("normalize should be True if scale is passed")
25 | if scale is None:
26 | scale = 2 * math.pi
27 | self.scale = scale
28 |
29 | def forward(self, x, mask=None):
30 | if mask is None:
31 | mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
32 | not_mask = ~mask
33 | y_embed = not_mask.cumsum(1, dtype=x.dtype)
34 | x_embed = not_mask.cumsum(2, dtype=x.dtype)
35 | if self.normalize:
36 | eps = 1e-6
37 | y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
38 | x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
39 |
40 | dim_t = torch.arange(self.num_pos_feats, dtype=x.dtype, device=x.device)
41 | dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
42 |
43 | pos_x = x_embed[:, :, :, None] / dim_t
44 | pos_y = y_embed[:, :, :, None] / dim_t
45 | pos_x = torch.stack(
46 | (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
47 | ).flatten(3)
48 | pos_y = torch.stack(
49 | (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
50 | ).flatten(3)
51 | pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
52 | return pos
53 |
54 | def __repr__(self, _repr_indent=4):
55 | head = "Positional encoding " + self.__class__.__name__
56 | body = [
57 | "num_pos_feats: {}".format(self.num_pos_feats),
58 | "temperature: {}".format(self.temperature),
59 | "normalize: {}".format(self.normalize),
60 | "scale: {}".format(self.scale),
61 | ]
62 | # _repr_indent = 4
63 | lines = [head] + [" " * _repr_indent + line for line in body]
64 | return "\n".join(lines)
65 |
--------------------------------------------------------------------------------
/llava/model/openseed/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .config import *
2 | from .misc import *
--------------------------------------------------------------------------------
/llava/model/openseed/utils/box_ops.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | """
3 | Utilities for bounding box manipulation and GIoU.
4 | """
5 | import torch
6 | from torchvision.ops.boxes import box_area
7 |
8 |
9 | def box_cxcywh_to_xyxy(x):
10 | x_c, y_c, w, h = x.unbind(-1)
11 | b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
12 | (x_c + 0.5 * w), (y_c + 0.5 * h)]
13 | return torch.stack(b, dim=-1)
14 |
15 |
16 | def box_xyxy_to_cxcywh(x):
17 | x0, y0, x1, y1 = x.unbind(-1)
18 | b = [(x0 + x1) / 2, (y0 + y1) / 2,
19 | (x1 - x0), (y1 - y0)]
20 | return torch.stack(b, dim=-1)
21 |
22 | def box_xywh_to_xyxy(x):
23 | x0, y0, x1, y1 = x.unbind(-1)
24 | b = [x0, y0, (x0 + x1), (y0 + y1)]
25 | return torch.stack(b, dim=-1)
26 |
27 |
28 | # modified from torchvision to also return the union
29 | def box_iou(boxes1, boxes2):
30 | area1 = box_area(boxes1)
31 | area2 = box_area(boxes2)
32 |
33 | lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
34 | rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
35 |
36 | wh = (rb - lt).clamp(min=0) # [N,M,2]
37 | inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
38 |
39 | union = area1[:, None] + area2 - inter
40 |
41 | iou = inter / (union+1e-6)
42 | return iou, union
43 |
44 |
45 | def generalized_box_iou(boxes1, boxes2):
46 | """
47 | Generalized IoU from https://giou.stanford.edu/
48 |
49 | The boxes should be in [x0, y0, x1, y1] format
50 |
51 | Returns a [N, M] pairwise matrix, where N = len(boxes1)
52 | and M = len(boxes2)
53 | """
54 | # degenerate boxes gives inf / nan results
55 | # so do an early check
56 | assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
57 | assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
58 | iou, union = box_iou(boxes1, boxes2)
59 |
60 | lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
61 | rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
62 |
63 | wh = (rb - lt).clamp(min=0) # [N,M,2]
64 | area = wh[:, :, 0] * wh[:, :, 1]
65 |
66 | return iou - (area - union) / (area+1e-6)
67 |
68 |
69 | def masks_to_boxes(masks):
70 | """Compute the bounding boxes around the provided masks
71 |
72 | The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
73 |
74 | Returns a [N, 4] tensors, with the boxes in xyxy format
75 | """
76 | if masks.numel() == 0:
77 | return torch.zeros((0, 4), device=masks.device)
78 |
79 | h, w = masks.shape[-2:]
80 |
81 | y = torch.arange(0, h, dtype=torch.float)
82 | x = torch.arange(0, w, dtype=torch.float)
83 | y, x = torch.meshgrid(y, x)
84 |
85 | x_mask = (masks * x.unsqueeze(0))
86 | x_max = x_mask.flatten(1).max(-1)[0]
87 | x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
88 |
89 | y_mask = (masks * y.unsqueeze(0))
90 | y_max = y_mask.flatten(1).max(-1)[0]
91 | y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
92 |
93 | return torch.stack([x_min, y_min, x_max, y_max], 1)
--------------------------------------------------------------------------------
/llava/model/semsam/BaseModel.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 |
4 | import torch
5 | import torch.nn as nn
6 |
7 | from utils.model import align_and_update_state_dicts
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 |
12 | class BaseModel(nn.Module):
13 | def __init__(self, opt, module: nn.Module):
14 | super(BaseModel, self).__init__()
15 | self.opt = opt
16 | self.model = module
17 |
18 | def forward(self, *inputs, **kwargs):
19 | outputs = self.model(*inputs, **kwargs)
20 | return outputs
21 |
22 | def save_pretrained(self, save_dir):
23 | torch.save(self.model.state_dict(), save_path)
24 |
25 | def from_pretrained(self, load_dir):
26 | state_dict = torch.load(load_dir, map_location='cpu')
27 | # import pdb;pdb.set_trace()
28 | # import pdb;pdb.set_trace()
29 | if 'model' in state_dict:
30 | state_dict=state_dict['model']
31 | state_dict={k[6:]:v for k,v in state_dict.items()}
32 |
33 | # if self.opt['MODEL']['LLAMA'].get('lora_r',0)>0:
34 | # new_sd = dict()
35 | # for k,v in state_dict.items():
36 | # if k.startswith("llama."):
37 | # if k.startswith("llama.base_model."):
38 | # new_sd=state_dict
39 | # break
40 | # new_sd[k.replace("llama.","llama.base_model.model.")]=v
41 | # else:
42 | # new_sd[k]=v
43 | # else:
44 | # new_sd = state_dict
45 | new_sd = align_and_update_state_dicts(self.model.state_dict(), state_dict)
46 | self.model.load_state_dict(new_sd, strict=False)
47 | return self
48 |
--------------------------------------------------------------------------------
/llava/model/semsam/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | from .architectures import build_model
--------------------------------------------------------------------------------
/llava/model/semsam/architectures/__init__.py:
--------------------------------------------------------------------------------
1 | from .idino_model_partwhole_all_llm_ref_feats_all_det_pretrainv1 import *
2 | from .build import build_model
--------------------------------------------------------------------------------
/llava/model/semsam/architectures/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 |
4 | def build_model(config, **kwargs):
5 | model_name = config['MODEL']['NAME']
6 |
7 | if not is_model(model_name):
8 | raise ValueError(f'Unkown model: {model_name}')
9 |
10 | return model_entrypoints(model_name)(config, **kwargs)
--------------------------------------------------------------------------------
/llava/model/semsam/architectures/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 | def register_model(fn):
4 | module_name_split = fn.__module__.split('.')
5 | model_name = module_name_split[-1]
6 | _model_entrypoints[model_name] = fn
7 | return fn
8 |
9 | def model_entrypoints(model_name):
10 | return _model_entrypoints[model_name]
11 |
12 | def is_model(model_name):
13 | return model_name in _model_entrypoints
--------------------------------------------------------------------------------
/llava/model/semsam/backbone/__init__.py:
--------------------------------------------------------------------------------
1 | from .build import build_backbone
2 |
3 | from .focal import *
4 | from .focal_dw import *
5 | from .swin import *
6 | from .backbone import *
--------------------------------------------------------------------------------
/llava/model/semsam/backbone/backbone.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | import torch.nn as nn
3 |
4 | from detectron2.modeling import ShapeSpec
5 |
6 | # from ..layers import ShapeSpec
7 |
8 | __all__ = ["Backbone"]
9 |
10 |
11 | class Backbone(nn.Module):
12 | """
13 | Abstract base class for network backbones.
14 | """
15 |
16 | def __init__(self):
17 | """
18 | The `__init__` method of any subclass can specify its own set of arguments.
19 | """
20 | super().__init__()
21 |
22 | def forward(self):
23 | """
24 | Subclasses must override this method, but adhere to the same return type.
25 |
26 | Returns:
27 | dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor
28 | """
29 | pass
30 |
31 | @property
32 | def size_divisibility(self) -> int:
33 | """
34 | Some backbones require the input height and width to be divisible by a
35 | specific integer. This is typically true for encoder / decoder type networks
36 | with lateral connection (e.g., FPN) for which feature maps need to match
37 | dimension in the "bottom up" and "top down" paths. Set to 0 if no specific
38 | input size divisibility is required.
39 | """
40 | return 0
41 |
42 | def output_shape(self):
43 | """
44 | Returns:
45 | dict[str->ShapeSpec]
46 | """
47 | # this is a backward-compatible default
48 | return {
49 | name: ShapeSpec(
50 | channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
51 | )
52 | for name in self._out_features
53 | }
54 |
--------------------------------------------------------------------------------
/llava/model/semsam/backbone/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 |
4 | from .backbone import *
5 |
6 | def build_backbone(config, **kwargs):
7 | model_name = config['MODEL']['BACKBONE']['NAME']
8 | if not is_model(model_name):
9 | raise ValueError(f'Unkown model: {model_name}')
10 |
11 | return model_entrypoints(model_name)(config, **kwargs)
--------------------------------------------------------------------------------
/llava/model/semsam/backbone/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 |
4 | def register_backbone(fn):
5 | module_name_split = fn.__module__.split('.')
6 | model_name = module_name_split[-1]
7 | _model_entrypoints[model_name] = fn
8 | return fn
9 |
10 | def model_entrypoints(model_name):
11 | return _model_entrypoints[model_name]
12 |
13 | def is_model(model_name):
14 | return model_name in _model_entrypoints
15 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/__init__.py:
--------------------------------------------------------------------------------
1 | from .build import build_openseed_head
--------------------------------------------------------------------------------
/llava/model/semsam/body/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 | from .openseed_head import *
4 |
5 |
6 | def build_openseed_head(config, *args, **kwargs):
7 | model_name = config['MODEL']['HEAD']
8 | if not is_model(model_name):
9 | raise ValueError(f'Unkown model: {model_name}')
10 |
11 | body = model_entrypoints(model_name)(config, *args, **kwargs)
12 | return body
--------------------------------------------------------------------------------
/llava/model/semsam/body/decoder/__init__.py:
--------------------------------------------------------------------------------
1 | from .build import build_decoder
2 | from .idino_decoder_no_iou_token_partwhole_all_llm import *
--------------------------------------------------------------------------------
/llava/model/semsam/body/decoder/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 |
4 |
5 | def build_decoder(config, *args, **kwargs):
6 | model_name = config['MODEL']['DECODER']['NAME']
7 |
8 | if not is_model(model_name):
9 | raise ValueError(f'Unkown model: {model_name}')
10 |
11 | return model_entrypoints(model_name)(config, *args, **kwargs)
--------------------------------------------------------------------------------
/llava/model/semsam/body/decoder/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 | def register_decoder(fn):
4 | module_name_split = fn.__module__.split('.')
5 | model_name = module_name_split[-1]
6 | _model_entrypoints[model_name] = fn
7 | return fn
8 |
9 | def model_entrypoints(model_name):
10 | return _model_entrypoints[model_name]
11 |
12 | def is_model(model_name):
13 | return model_name in _model_entrypoints
--------------------------------------------------------------------------------
/llava/model/semsam/body/decoder/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import *
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/__init__.py:
--------------------------------------------------------------------------------
1 | from .build import build_encoder
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 |
4 | from .transformer_encoder_fpn import *
5 | from .encoder_deform import *
6 |
7 | def build_encoder(config, *args, **kwargs):
8 | model_name = config['MODEL']['ENCODER']['NAME']
9 |
10 | if not is_model(model_name):
11 | raise ValueError(f'Unkown model: {model_name}')
12 |
13 | return model_entrypoints(model_name)(config, *args, **kwargs)
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/ops/functions/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | # Copyright (c) Facebook, Inc. and its affiliates.
10 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
11 |
12 | from .ms_deform_attn_func import MSDeformAttnFunction
13 |
14 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/ops/functions/ms_deform_attn_func.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | # Copyright (c) Facebook, Inc. and its affiliates.
10 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
11 |
12 | from __future__ import absolute_import
13 | from __future__ import print_function
14 | from __future__ import division
15 |
16 | import torch
17 | import torch.nn.functional as F
18 | from torch.autograd import Function
19 | from torch.autograd.function import once_differentiable
20 |
21 | try:
22 | import MultiScaleDeformableAttention as MSDA
23 | except ModuleNotFoundError as e:
24 | info_string = (
25 | "\n\nPlease compile MultiScaleDeformableAttention CUDA op with the following commands:\n"
26 | "\t`cd mask2former/modeling/pixel_decoder/ops`\n"
27 | "\t`sh make.sh`\n"
28 | )
29 | raise ModuleNotFoundError(info_string)
30 |
31 |
32 | class MSDeformAttnFunction(Function):
33 | @staticmethod
34 | def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
35 | ctx.im2col_step = im2col_step
36 | output = MSDA.ms_deform_attn_forward(
37 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step)
38 | ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
39 | return output
40 |
41 | @staticmethod
42 | @once_differentiable
43 | def backward(ctx, grad_output):
44 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors
45 | grad_value, grad_sampling_loc, grad_attn_weight = \
46 | MSDA.ms_deform_attn_backward(
47 | value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
48 |
49 | return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
50 |
51 |
52 | def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights):
53 | # for debug and test only,
54 | # need to use cuda version instead
55 | N_, S_, M_, D_ = value.shape
56 | _, Lq_, M_, L_, P_, _ = sampling_locations.shape
57 | value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
58 | sampling_grids = 2 * sampling_locations - 1
59 | sampling_value_list = []
60 | for lid_, (H_, W_) in enumerate(value_spatial_shapes):
61 | # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
62 | value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_)
63 | # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
64 | sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
65 | # N_*M_, D_, Lq_, P_
66 | sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_,
67 | mode='bilinear', padding_mode='zeros', align_corners=False)
68 | sampling_value_list.append(sampling_value_l_)
69 | # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
70 | attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
71 | output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
72 | return output.transpose(1, 2).contiguous()
73 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/ops/make.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ------------------------------------------------------------------------------------------------
3 | # Deformable DETR
4 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | # ------------------------------------------------------------------------------------------------
7 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | # ------------------------------------------------------------------------------------------------
9 |
10 | # Copyright (c) Facebook, Inc. and its affiliates.
11 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
12 |
13 | python setup.py build install --user
14 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/ops/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | # Copyright (c) Facebook, Inc. and its affiliates.
10 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
11 |
12 | from .ms_deform_attn import MSDeformAttn
13 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/ops/setup.py:
--------------------------------------------------------------------------------
1 | # ------------------------------------------------------------------------------------------------
2 | # Deformable DETR
3 | # Copyright (c) 2020 SenseTime. All Rights Reserved.
4 | # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
5 | # ------------------------------------------------------------------------------------------------
6 | # Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
7 | # ------------------------------------------------------------------------------------------------
8 |
9 | # Copyright (c) Facebook, Inc. and its affiliates.
10 | # Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
11 |
12 | import os
13 | import glob
14 |
15 | import torch
16 |
17 | from torch.utils.cpp_extension import CUDA_HOME
18 | from torch.utils.cpp_extension import CppExtension
19 | from torch.utils.cpp_extension import CUDAExtension
20 |
21 | from setuptools import find_packages
22 | from setuptools import setup
23 |
24 | requirements = ["torch", "torchvision"]
25 |
26 | def get_extensions():
27 | this_dir = os.path.dirname(os.path.abspath(__file__))
28 | extensions_dir = os.path.join(this_dir, "src")
29 |
30 | main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
31 | source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
32 | source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
33 |
34 | sources = main_file + source_cpu
35 | extension = CppExtension
36 | extra_compile_args = {"cxx": []}
37 | define_macros = []
38 |
39 | # Force cuda since torch ask for a device, not if cuda is in fact available.
40 | if (os.environ.get('FORCE_CUDA') or torch.cuda.is_available()) and CUDA_HOME is not None:
41 | extension = CUDAExtension
42 | sources += source_cuda
43 | define_macros += [("WITH_CUDA", None)]
44 | extra_compile_args["nvcc"] = [
45 | "-DCUDA_HAS_FP16=1",
46 | "-D__CUDA_NO_HALF_OPERATORS__",
47 | "-D__CUDA_NO_HALF_CONVERSIONS__",
48 | "-D__CUDA_NO_HALF2_OPERATORS__",
49 | ]
50 | else:
51 | if CUDA_HOME is None:
52 | raise NotImplementedError('CUDA_HOME is None. Please set environment variable CUDA_HOME.')
53 | else:
54 | raise NotImplementedError('No CUDA runtime is found. Please set FORCE_CUDA=1 or test it by running torch.cuda.is_available().')
55 |
56 | sources = [os.path.join(extensions_dir, s) for s in sources]
57 | include_dirs = [extensions_dir]
58 | ext_modules = [
59 | extension(
60 | "MultiScaleDeformableAttention",
61 | sources,
62 | include_dirs=include_dirs,
63 | define_macros=define_macros,
64 | extra_compile_args=extra_compile_args,
65 | )
66 | ]
67 | return ext_modules
68 |
69 | setup(
70 | name="MultiScaleDeformableAttention",
71 | version="1.0",
72 | author="Weijie Su",
73 | url="https://github.com/fundamentalvision/Deformable-DETR",
74 | description="PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention",
75 | packages=find_packages(exclude=("configs", "tests",)),
76 | ext_modules=get_extensions(),
77 | cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
78 | )
79 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/ops/src/cpu/ms_deform_attn_cpu.cpp:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | /*!
12 | * Copyright (c) Facebook, Inc. and its affiliates.
13 | * Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
14 | */
15 |
16 | #include
17 |
18 | #include
19 | #include
20 |
21 |
22 | at::Tensor
23 | ms_deform_attn_cpu_forward(
24 | const at::Tensor &value,
25 | const at::Tensor &spatial_shapes,
26 | const at::Tensor &level_start_index,
27 | const at::Tensor &sampling_loc,
28 | const at::Tensor &attn_weight,
29 | const int im2col_step)
30 | {
31 | AT_ERROR("Not implement on cpu");
32 | }
33 |
34 | std::vector
35 | ms_deform_attn_cpu_backward(
36 | const at::Tensor &value,
37 | const at::Tensor &spatial_shapes,
38 | const at::Tensor &level_start_index,
39 | const at::Tensor &sampling_loc,
40 | const at::Tensor &attn_weight,
41 | const at::Tensor &grad_output,
42 | const int im2col_step)
43 | {
44 | AT_ERROR("Not implement on cpu");
45 | }
46 |
47 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/ops/src/cpu/ms_deform_attn_cpu.h:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | /*!
12 | * Copyright (c) Facebook, Inc. and its affiliates.
13 | * Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
14 | */
15 |
16 | #pragma once
17 | #include
18 |
19 | at::Tensor
20 | ms_deform_attn_cpu_forward(
21 | const at::Tensor &value,
22 | const at::Tensor &spatial_shapes,
23 | const at::Tensor &level_start_index,
24 | const at::Tensor &sampling_loc,
25 | const at::Tensor &attn_weight,
26 | const int im2col_step);
27 |
28 | std::vector
29 | ms_deform_attn_cpu_backward(
30 | const at::Tensor &value,
31 | const at::Tensor &spatial_shapes,
32 | const at::Tensor &level_start_index,
33 | const at::Tensor &sampling_loc,
34 | const at::Tensor &attn_weight,
35 | const at::Tensor &grad_output,
36 | const int im2col_step);
37 |
38 |
39 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/ops/src/cuda/ms_deform_attn_cuda.h:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | /*!
12 | * Copyright (c) Facebook, Inc. and its affiliates.
13 | * Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
14 | */
15 |
16 | #pragma once
17 | #include
18 |
19 | at::Tensor ms_deform_attn_cuda_forward(
20 | const at::Tensor &value,
21 | const at::Tensor &spatial_shapes,
22 | const at::Tensor &level_start_index,
23 | const at::Tensor &sampling_loc,
24 | const at::Tensor &attn_weight,
25 | const int im2col_step);
26 |
27 | std::vector ms_deform_attn_cuda_backward(
28 | const at::Tensor &value,
29 | const at::Tensor &spatial_shapes,
30 | const at::Tensor &level_start_index,
31 | const at::Tensor &sampling_loc,
32 | const at::Tensor &attn_weight,
33 | const at::Tensor &grad_output,
34 | const int im2col_step);
35 |
36 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/ops/src/ms_deform_attn.h:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | /*!
12 | * Copyright (c) Facebook, Inc. and its affiliates.
13 | * Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
14 | */
15 |
16 | #pragma once
17 |
18 | #include "cpu/ms_deform_attn_cpu.h"
19 |
20 | #ifdef WITH_CUDA
21 | #include "cuda/ms_deform_attn_cuda.h"
22 | #endif
23 |
24 |
25 | at::Tensor
26 | ms_deform_attn_forward(
27 | const at::Tensor &value,
28 | const at::Tensor &spatial_shapes,
29 | const at::Tensor &level_start_index,
30 | const at::Tensor &sampling_loc,
31 | const at::Tensor &attn_weight,
32 | const int im2col_step)
33 | {
34 | if (value.type().is_cuda())
35 | {
36 | #ifdef WITH_CUDA
37 | return ms_deform_attn_cuda_forward(
38 | value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);
39 | #else
40 | AT_ERROR("Not compiled with GPU support");
41 | #endif
42 | }
43 | AT_ERROR("Not implemented on the CPU");
44 | }
45 |
46 | std::vector
47 | ms_deform_attn_backward(
48 | const at::Tensor &value,
49 | const at::Tensor &spatial_shapes,
50 | const at::Tensor &level_start_index,
51 | const at::Tensor &sampling_loc,
52 | const at::Tensor &attn_weight,
53 | const at::Tensor &grad_output,
54 | const int im2col_step)
55 | {
56 | if (value.type().is_cuda())
57 | {
58 | #ifdef WITH_CUDA
59 | return ms_deform_attn_cuda_backward(
60 | value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);
61 | #else
62 | AT_ERROR("Not compiled with GPU support");
63 | #endif
64 | }
65 | AT_ERROR("Not implemented on the CPU");
66 | }
67 |
68 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/ops/src/vision.cpp:
--------------------------------------------------------------------------------
1 | /*!
2 | **************************************************************************************************
3 | * Deformable DETR
4 | * Copyright (c) 2020 SenseTime. All Rights Reserved.
5 | * Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6 | **************************************************************************************************
7 | * Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
8 | **************************************************************************************************
9 | */
10 |
11 | /*!
12 | * Copyright (c) Facebook, Inc. and its affiliates.
13 | * Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
14 | */
15 |
16 | #include "ms_deform_attn.h"
17 |
18 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
19 | m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward");
20 | m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward");
21 | }
22 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/encoder/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 | def register_encoder(fn):
4 | module_name_split = fn.__module__.split('.')
5 | model_name = module_name_split[-1]
6 | _model_entrypoints[model_name] = fn
7 | return fn
8 |
9 | def model_entrypoints(model_name):
10 | return _model_entrypoints[model_name]
11 |
12 | def is_model(model_name):
13 | return model_name in _model_entrypoints
14 |
--------------------------------------------------------------------------------
/llava/model/semsam/body/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 |
4 | def register_body(fn):
5 | module_name_split = fn.__module__.split('.')
6 | model_name = module_name_split[-1]
7 | _model_entrypoints[model_name] = fn
8 | return fn
9 |
10 | def model_entrypoints(model_name):
11 | return _model_entrypoints[model_name]
12 |
13 | def is_model(model_name):
14 | return model_name in _model_entrypoints
--------------------------------------------------------------------------------
/llava/model/semsam/language/LangEncoder/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | from .build import build_lang_encoder
6 | from .build import build_tokenizer
7 |
8 | from .transformer import *
--------------------------------------------------------------------------------
/llava/model/semsam/language/LangEncoder/build.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from transformers import CLIPTokenizer, CLIPTokenizerFast
4 | from transformers import AutoTokenizer
5 |
6 | from .registry import lang_encoders
7 | from .registry import is_lang_encoder
8 |
9 |
10 | def build_lang_encoder(config_encoder, tokenizer, verbose, **kwargs):
11 | model_name = config_encoder['NAME']
12 |
13 | if not is_lang_encoder(model_name):
14 | raise ValueError(f'Unkown model: {model_name}')
15 |
16 | return lang_encoders(model_name)(config_encoder, tokenizer, verbose, **kwargs)
17 |
18 |
19 | def build_tokenizer(config_encoder):
20 | tokenizer = None
21 | os.environ['TOKENIZERS_PARALLELISM'] = 'true'
22 | if config_encoder['TOKENIZER'] == 'clip':
23 | pretrained_tokenizer = config_encoder.get(
24 | 'PRETRAINED_TOKENIZER', 'openai/clip-vit-base-patch32'
25 | )
26 | tokenizer = CLIPTokenizer.from_pretrained(pretrained_tokenizer)
27 | tokenizer.add_special_tokens({'cls_token': tokenizer.eos_token})
28 | elif config_encoder['TOKENIZER'] == 'clip-fast':
29 | pretrained_tokenizer = config_encoder.get(
30 | 'PRETRAINED_TOKENIZER', 'openai/clip-vit-base-patch32'
31 | )
32 | tokenizer = CLIPTokenizerFast.from_pretrained(pretrained_tokenizer, from_slow=True)
33 | else:
34 | tokenizer = AutoTokenizer.from_pretrained(config_encoder['TOKENIZER'])
35 |
36 | return tokenizer
37 |
--------------------------------------------------------------------------------
/llava/model/semsam/language/LangEncoder/registry.py:
--------------------------------------------------------------------------------
1 | _lang_encoders = {}
2 |
3 |
4 | def register_lang_encoder(fn):
5 | module_name_split = fn.__module__.split('.')
6 | model_name = module_name_split[-1]
7 |
8 | _lang_encoders[model_name] = fn
9 |
10 | return fn
11 |
12 |
13 | def lang_encoders(model_name):
14 | return _lang_encoders[model_name]
15 |
16 |
17 | def is_lang_encoder(model_name):
18 | return model_name in _lang_encoders
19 |
--------------------------------------------------------------------------------
/llava/model/semsam/language/__init__.py:
--------------------------------------------------------------------------------
1 | # from .vlpencoder import *
2 | # from .encoder import *
3 | # from .fixencoder import *
4 | # from .loss import *
5 | # from .modeling_llama_os import LlamaForCausalLM
6 | # # from .modeling_llama_os_lora import LlamaForCausalLMLora
7 | # from .llama_encoder import *
8 | # from .build import build_language_encoder
9 |
--------------------------------------------------------------------------------
/llava/model/semsam/language/build.py:
--------------------------------------------------------------------------------
1 | from .registry import model_entrypoints
2 | from .registry import is_model
3 |
4 |
5 | def build_language_encoder(config, **kwargs):
6 | model_name = config['MODEL']['TEXT']['ARCH']
7 | if model_name=='noencoder':
8 | return None
9 |
10 | if not is_model(model_name):
11 | raise ValueError(f'Unkown model: {model_name}')
12 |
13 | return model_entrypoints(model_name)(config, **kwargs)
--------------------------------------------------------------------------------
/llava/model/semsam/language/misc.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import torch
4 | import nltk
5 | nltk.data.path.append('/mnt/data/nltk_data')
6 | import numpy as np
7 |
8 | from utils.constants import IMAGENET_DEFAULT_TEMPLATES
9 |
10 |
11 | def vl_similarity(image_feat, text_feat, temperature=1):
12 | # Only support single GPU for now.
13 | logits = torch.matmul(image_feat, text_feat.t())
14 | logits = temperature.exp().clamp(max=100) * logits
15 | return logits
16 |
17 | def get_tag(tokenized, tags):
18 | if not isinstance(tags, (list, tuple)):
19 | tags = [tags]
20 | ret = []
21 | for (word, pos) in nltk.pos_tag(tokenized):
22 | for tag in tags:
23 | if pos == tag:
24 | ret.append(word)
25 | return ret
26 |
27 | def get_noun_phrase(tokenized):
28 | # Taken from Su Nam Kim Paper...
29 | grammar = r"""
30 | NBAR:
31 | {*} # Nouns and Adjectives, terminated with Nouns
32 |
33 | NP:
34 | {}
35 | {} # Above, connected with in/of/etc...
36 | """
37 | chunker = nltk.RegexpParser(grammar)
38 |
39 | chunked = chunker.parse(nltk.pos_tag(tokenized))
40 | continuous_chunk = []
41 | current_chunk = []
42 |
43 | for subtree in chunked:
44 | if isinstance(subtree, nltk.Tree):
45 | current_chunk.append(' '.join([token for token, pos in subtree.leaves()]))
46 | elif current_chunk:
47 | named_entity = ' '.join(current_chunk)
48 | if named_entity not in continuous_chunk:
49 | continuous_chunk.append(named_entity)
50 | current_chunk = []
51 | else:
52 | continue
53 |
54 | return continuous_chunk
55 |
56 | def text_noun_with_prompt_all(text, phrase_prob=0.0, append_text=True):
57 | tokenized = nltk.word_tokenize(text)
58 |
59 | if random.random() >= phrase_prob:
60 | nouns = get_tag(tokenized, ['NN', 'NNS', 'NNP'])
61 | else:
62 | nouns = get_noun_phrase(tokenized)
63 |
64 |
65 | prompt_texts = [np.random.choice(IMAGENET_DEFAULT_TEMPLATES).format(noun) for noun in nouns]
66 |
67 | if append_text:
68 | prompt_texts += [text]
69 | nouns += [text]
70 |
71 | return prompt_texts, nouns
--------------------------------------------------------------------------------
/llava/model/semsam/language/registry.py:
--------------------------------------------------------------------------------
1 | _model_entrypoints = {}
2 |
3 | def register_model(fn):
4 | module_name_split = fn.__module__.split('.')
5 | model_name = module_name_split[-1]
6 | _model_entrypoints[model_name] = fn
7 | return fn
8 |
9 | def model_entrypoints(model_name):
10 | return _model_entrypoints[model_name]
11 |
12 | def is_model(model_name):
13 | return model_name in _model_entrypoints
--------------------------------------------------------------------------------
/llava/model/semsam/modules/__init__.py:
--------------------------------------------------------------------------------
1 | from .point_features import *
2 | from .position_encoding import *
3 | from .postprocessing import *
4 | from .attention import *
5 | from .matcher import *
6 | from .criterion_id_llm import *
7 | from .hooks import HookBase
--------------------------------------------------------------------------------
/llava/model/semsam/modules/position_encoding.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates.
2 | # # Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
3 | """
4 | Various positional encodings for the transformer.
5 | """
6 | import math
7 |
8 | import torch
9 | from torch import nn
10 |
11 |
12 | class PositionEmbeddingSine(nn.Module):
13 | """
14 | This is a more standard version of the position embedding, very similar to the one
15 | used by the Attention is all you need paper, generalized to work on images.
16 | """
17 |
18 | def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
19 | super().__init__()
20 | self.num_pos_feats = num_pos_feats
21 | self.temperature = temperature
22 | self.normalize = normalize
23 | if scale is not None and normalize is False:
24 | raise ValueError("normalize should be True if scale is passed")
25 | if scale is None:
26 | scale = 2 * math.pi
27 | self.scale = scale
28 |
29 | def forward(self, x, mask=None):
30 | if mask is None:
31 | mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
32 | not_mask = ~mask
33 | y_embed = not_mask.cumsum(1, dtype=x.dtype)
34 | x_embed = not_mask.cumsum(2, dtype=x.dtype)
35 | if self.normalize:
36 | eps = 1e-6
37 | y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
38 | x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
39 |
40 | dim_t = torch.arange(self.num_pos_feats, dtype=x.dtype, device=x.device)
41 | dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
42 |
43 | pos_x = x_embed[:, :, :, None] / dim_t
44 | pos_y = y_embed[:, :, :, None] / dim_t
45 | pos_x = torch.stack(
46 | (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
47 | ).flatten(3)
48 | pos_y = torch.stack(
49 | (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
50 | ).flatten(3)
51 | pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
52 | return pos
53 |
54 | def __repr__(self, _repr_indent=4):
55 | head = "Positional encoding " + self.__class__.__name__
56 | body = [
57 | "num_pos_feats: {}".format(self.num_pos_feats),
58 | "temperature: {}".format(self.temperature),
59 | "normalize: {}".format(self.normalize),
60 | "scale: {}".format(self.scale),
61 | ]
62 | # _repr_indent = 4
63 | lines = [head] + [" " * _repr_indent + line for line in body]
64 | return "\n".join(lines)
65 |
--------------------------------------------------------------------------------
/llava/model/semsam/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .config import *
2 | from .misc import *
3 | # from .dist import *
--------------------------------------------------------------------------------
/llava/model/semsam/utils/box_ops.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 | """
3 | Utilities for bounding box manipulation and GIoU.
4 | """
5 | import torch
6 | from torchvision.ops.boxes import box_area
7 |
8 |
9 | def box_cxcywh_to_xyxy(x):
10 | x_c, y_c, w, h = x.unbind(-1)
11 | b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
12 | (x_c + 0.5 * w), (y_c + 0.5 * h)]
13 | return torch.stack(b, dim=-1)
14 |
15 |
16 | def box_xyxy_to_cxcywh(x):
17 | x0, y0, x1, y1 = x.unbind(-1)
18 | b = [(x0 + x1) / 2, (y0 + y1) / 2,
19 | (x1 - x0), (y1 - y0)]
20 | return torch.stack(b, dim=-1)
21 |
22 | def box_xywh_to_xyxy(x):
23 | x0, y0, x1, y1 = x.unbind(-1)
24 | b = [x0, y0, (x0 + x1), (y0 + y1)]
25 | return torch.stack(b, dim=-1)
26 |
27 |
28 | # modified from torchvision to also return the union
29 | def box_iou(boxes1, boxes2):
30 | area1 = box_area(boxes1)
31 | area2 = box_area(boxes2)
32 |
33 | lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
34 | rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
35 |
36 | wh = (rb - lt).clamp(min=0) # [N,M,2]
37 | inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
38 |
39 | union = area1[:, None] + area2 - inter
40 |
41 | iou = inter / (union+1e-6)
42 | return iou, union
43 |
44 |
45 | def generalized_box_iou(boxes1, boxes2):
46 | """
47 | Generalized IoU from https://giou.stanford.edu/
48 |
49 | The boxes should be in [x0, y0, x1, y1] format
50 |
51 | Returns a [N, M] pairwise matrix, where N = len(boxes1)
52 | and M = len(boxes2)
53 | """
54 | # degenerate boxes gives inf / nan results
55 | # so do an early check
56 | assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
57 | assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
58 | iou, union = box_iou(boxes1, boxes2)
59 |
60 | lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
61 | rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
62 |
63 | wh = (rb - lt).clamp(min=0) # [N,M,2]
64 | area = wh[:, :, 0] * wh[:, :, 1]
65 |
66 | return iou - (area - union) / (area+1e-6)
67 |
68 |
69 | def masks_to_boxes(masks):
70 | """Compute the bounding boxes around the provided masks
71 |
72 | The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
73 |
74 | Returns a [N, 4] tensors, with the boxes in xyxy format
75 | """
76 | if masks.numel() == 0:
77 | return torch.zeros((0, 4), device=masks.device)
78 |
79 | h, w = masks.shape[-2:]
80 |
81 | y = torch.arange(0, h, dtype=torch.float)
82 | x = torch.arange(0, w, dtype=torch.float)
83 | y, x = torch.meshgrid(y, x)
84 |
85 | x_mask = (masks * x.unsqueeze(0))
86 | x_max = x_mask.flatten(1).max(-1)[0]
87 | x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
88 |
89 | y_mask = (masks * y.unsqueeze(0))
90 | y_max = y_mask.flatten(1).max(-1)[0]
91 | y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
92 |
93 | return torch.stack([x_min, y_min, x_max, y_max], 1)
--------------------------------------------------------------------------------
/llava/model/utils.py:
--------------------------------------------------------------------------------
1 | from transformers import AutoConfig
2 |
3 |
4 | def auto_upgrade(config):
5 | cfg = AutoConfig.from_pretrained(config)
6 | if 'llava' in config and 'llava' not in cfg.model_type:
7 | assert cfg.model_type == 'llama'
8 | print("You are using newer LLaVA code base, while the checkpoint of v0 is from older code base.")
9 | print("You must upgrade the checkpoint to the new code base (this can be done automatically).")
10 | confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]")
11 | if confirm.lower() in ["y", "yes"]:
12 | print("Upgrading checkpoint...")
13 | assert len(cfg.architectures) == 1
14 | setattr(cfg.__class__, "model_type", "llava")
15 | cfg.architectures[0] = 'LlavaLlamaForCausalLM'
16 | cfg.save_pretrained(config)
17 | print("Checkpoint upgraded.")
18 | else:
19 | print("Checkpoint upgrade aborted.")
20 | exit(1)
21 |
--------------------------------------------------------------------------------
/llava/serve/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/llava/serve/__init__.py
--------------------------------------------------------------------------------
/llava/serve/examples/extreme_ironing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/llava/serve/examples/extreme_ironing.jpg
--------------------------------------------------------------------------------
/llava/serve/examples/waterview.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/llava/serve/examples/waterview.jpg
--------------------------------------------------------------------------------
/llava/serve/register_worker.py:
--------------------------------------------------------------------------------
1 | """
2 | Manually register workers.
3 |
4 | Usage:
5 | python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name http://localhost:21002
6 | """
7 |
8 | import argparse
9 |
10 | import requests
11 |
12 | if __name__ == "__main__":
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument("--controller-address", type=str)
15 | parser.add_argument("--worker-name", type=str)
16 | parser.add_argument("--check-heart-beat", action="store_true")
17 | args = parser.parse_args()
18 |
19 | url = args.controller_address + "/register_worker"
20 | data = {
21 | "worker_name": args.worker_name,
22 | "check_heart_beat": args.check_heart_beat,
23 | "worker_status": None,
24 | }
25 | r = requests.post(url, json=data)
26 | assert r.status_code == 200
27 |
--------------------------------------------------------------------------------
/llava/serve/test_message.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 |
4 | import requests
5 |
6 | from llava.conversation import default_conversation
7 |
8 |
9 | def main():
10 | if args.worker_address:
11 | worker_addr = args.worker_address
12 | else:
13 | controller_addr = args.controller_address
14 | ret = requests.post(controller_addr + "/refresh_all_workers")
15 | ret = requests.post(controller_addr + "/list_models")
16 | models = ret.json()["models"]
17 | models.sort()
18 | print(f"Models: {models}")
19 |
20 | ret = requests.post(controller_addr + "/get_worker_address",
21 | json={"model": args.model_name})
22 | worker_addr = ret.json()["address"]
23 | print(f"worker_addr: {worker_addr}")
24 |
25 | if worker_addr == "":
26 | return
27 |
28 | conv = default_conversation.copy()
29 | conv.append_message(conv.roles[0], args.message)
30 | prompt = conv.get_prompt()
31 |
32 | headers = {"User-Agent": "LLaVA Client"}
33 | pload = {
34 | "model": args.model_name,
35 | "prompt": prompt,
36 | "max_new_tokens": args.max_new_tokens,
37 | "temperature": 0.7,
38 | "stop": conv.sep,
39 | }
40 | response = requests.post(worker_addr + "/worker_generate_stream", headers=headers,
41 | json=pload, stream=True)
42 |
43 | print(prompt.replace(conv.sep, "\n"), end="")
44 | for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"):
45 | if chunk:
46 | data = json.loads(chunk.decode("utf-8"))
47 | output = data["text"].split(conv.sep)[-1]
48 | print(output, end="\r")
49 | print("")
50 |
51 |
52 | if __name__ == "__main__":
53 | parser = argparse.ArgumentParser()
54 | parser.add_argument("--controller-address", type=str, default="http://localhost:21001")
55 | parser.add_argument("--worker-address", type=str)
56 | parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
57 | parser.add_argument("--max-new-tokens", type=int, default=32)
58 | parser.add_argument("--message", type=str, default=
59 | "Tell me a story with more than 1000 words.")
60 | args = parser.parse_args()
61 |
62 | main()
63 |
--------------------------------------------------------------------------------
/llava/train/llava_trainer.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 |
4 | from transformers import Trainer
5 | from typing import Optional
6 |
7 |
8 | def maybe_zero_3(param, ignore_status=False, name=None):
9 | from deepspeed import zero
10 | from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
11 | if hasattr(param, "ds_id"):
12 | if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
13 | if not ignore_status:
14 | print(name, 'no ignore status')
15 | with zero.GatheredParameters([param]):
16 | param = param.data.detach().cpu().clone()
17 | else:
18 | param = param.detach().cpu().clone()
19 | return param
20 |
21 |
22 | def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
23 | to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
24 | to_return = {k: maybe_zero_3(v, ignore_status=True, name=k).cpu() for k, v in to_return.items()}
25 | return to_return
26 |
27 |
28 | class LLaVATrainer(Trainer):
29 |
30 | def _save_checkpoint(self, model, trial, metrics=None):
31 | if getattr(self.args, 'tune_mm_mlp_adapter', False):
32 | from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
33 | checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
34 |
35 | run_dir = self._get_output_dir(trial=trial)
36 | output_dir = os.path.join(run_dir, checkpoint_folder)
37 |
38 | # Only save Adapter
39 | keys_to_match = ['mm_projector']
40 | if getattr(self.args, "use_im_start_end", False) or getattr(self.args, "new_tokens", False):
41 | keys_to_match.extend(['embed_tokens', 'embed_in','lm_head'])
42 | # import pdb; pdb.set_trace()
43 | weight_to_save = get_mm_adapter_state_maybe_zero_3(self.model.named_parameters(), keys_to_match)
44 |
45 | if self.args.local_rank == 0 or self.args.local_rank == -1:
46 | self.model.config.save_pretrained(output_dir)
47 | torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin'))
48 | else:
49 | super(LLaVATrainer, self)._save_checkpoint(model, trial, metrics)
50 |
51 | def _save(self, output_dir: Optional[str] = None, state_dict=None):
52 | if getattr(self.args, 'tune_mm_mlp_adapter', False):
53 | pass
54 | else:
55 | super(LLaVATrainer, self)._save(output_dir, state_dict)
56 |
--------------------------------------------------------------------------------
/llava/train/train_mem.py:
--------------------------------------------------------------------------------
1 | # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
2 | # Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
3 | # Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
4 |
5 | # Need to call this before importing transformers.
6 | from llava.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn
7 |
8 | replace_llama_attn_with_flash_attn()
9 |
10 | from llava.train.train import train
11 |
12 | if __name__ == "__main__":
13 | train()
14 |
--------------------------------------------------------------------------------
/playground/data/coco2014_val_qa_eval/qa90_questions_with_grounding_sample.jsonl:
--------------------------------------------------------------------------------
1 | {"question_id": 0, "image": "000000441147.jpg", "text": "What is the color of the two suitcases in the image? (with grounding)", "category": "conv"}
2 | {"question_id": 1, "image": "000000441147.jpg", "text": "Analyze the image in a comprehensive and detailed manner. (with grounding)", "category": "detail"}
3 | {"question_id": 2, "image": "000000441147.jpg", "text": "What potential factors could make these suitcases valuable? (with grounding)", "category": "complex"}
4 | {"question_id": 3, "image": "000000353536.jpg", "text": "What are the main objects on the table in the image? (with grounding)", "category": "conv"}
5 | {"question_id": 4, "image": "000000353536.jpg", "text": "Describe the following image. (with grounding)", "category": "detail"}
6 |
--------------------------------------------------------------------------------
/playground/data/prompts/complex_reasoning/000_caps.txt:
--------------------------------------------------------------------------------
1 | A man wearing multiple neck ties making a goofy face.
2 | A man in a white shirt wearing very many ties.
3 | a man with ties on poses for a picture
4 | A man wearing multiple ties on his neck.
5 | A young man smiles while wearing several ties.
6 |
7 | tie: [0.574, 0.298, 0.752, 0.704]
8 | tie: [0.464, 0.339, 0.639, 0.789]
9 | tie: [0.349, 0.363, 0.563, 0.732]
10 | tie: [0.259, 0.255, 0.668, 0.805]
11 | person: [0.019, 0.065, 0.962, 0.988]
12 | person: [0.0, 0.24, 0.214, 1.0]
13 | tie: [0.316, 0.778, 0.443, 0.867]
14 | tie: [0.386, 0.707, 0.496, 0.801]
15 | tie: [0.251, 0.354, 0.402, 0.747]
16 | tie: [0.44, 0.318, 0.63, 0.397]
17 | tie: [0.566, 0.33, 0.655, 0.543]
18 | tie: [0.25, 0.409, 0.359, 0.556]
--------------------------------------------------------------------------------
/playground/data/prompts/complex_reasoning/000_conv.txt:
--------------------------------------------------------------------------------
1 | Question:
2 | What is unusual about this photo?
3 | ===
4 | Answer:
5 | In the photo, the man is wearing a total of ten ties around his neck. This is considered unusual because it is not common for someone to wear multiple ties at once. In a typical setting, a person would wear only one tie. The man's goofy facial expression also adds to the unconventional and humorous nature of the image.
--------------------------------------------------------------------------------
/playground/data/prompts/complex_reasoning/001_caps.txt:
--------------------------------------------------------------------------------
1 | A group of people standing outside of a black vehicle with various luggage.
2 | Luggage surrounds a vehicle in an underground parking area
3 | People try to fit all of their luggage in an SUV.
4 | The sport utility vehicle is parked in the public garage, being packed for a trip
5 | Some people with luggage near a van that is transporting it.
6 |
7 | person: [0.681, 0.242, 0.774, 0.694]
8 | person: [0.63, 0.222, 0.686, 0.516]
9 | person: [0.444, 0.233, 0.487, 0.34]
10 | backpack: [0.384, 0.696, 0.485, 0.914]
11 | backpack: [0.755, 0.413, 0.846, 0.692]
12 | suitcase: [0.758, 0.413, 0.845, 0.69]
13 | suitcase: [0.1, 0.497, 0.173, 0.579]
14 | bicycle: [0.282, 0.363, 0.327, 0.442]
15 | car: [0.786, 0.25, 0.848, 0.322]
16 | car: [0.783, 0.27, 0.827, 0.335]
17 | car: [0.86, 0.254, 0.891, 0.3]
18 | car: [0.261, 0.101, 0.787, 0.626]
--------------------------------------------------------------------------------
/playground/data/prompts/complex_reasoning/001_conv.txt:
--------------------------------------------------------------------------------
1 | Question:
2 | What challenges do these people face?
3 | ===
4 | Answer:
5 | In the image, a group of people is standing outside a black SUV in a parking area, surrounded by various pieces of luggage, including suitcases and backpacks. They are facing the challenge of fitting all their luggage into the black SUV. There are multiple suitcases and backpacks to be packed, which suggests that the group has a significant amount of belongings to accommodate. They might have to strategize and arrange the luggage efficiently to ensure that everything fits properly into the vehicle. Additionally, they need to consider the comfort of the passengers and visibility while driving, so the placement of the luggage must not obstruct the driver's view or make the passengers uncomfortable during the trip.
--------------------------------------------------------------------------------
/playground/data/prompts/complex_reasoning/002_caps.txt:
--------------------------------------------------------------------------------
1 | There is a movie theater that displays the show times above the doors.
2 | A red fire hydrant is deep in the snow.
3 | The fire hydrant is in the snow near a recently plowed sidewalk.
4 | This city has had a very hard winter with snow.
5 | A hotel for dogs in the snow in winter.
6 |
7 | fire hydrant: [0.326, 0.612, 0.426, 0.72]
--------------------------------------------------------------------------------
/playground/data/prompts/complex_reasoning/002_conv.txt:
--------------------------------------------------------------------------------
1 | Question:
2 | What challenges might this city face?
3 | ===
4 | Answer:
5 | The city faces challenges due to the harsh winter conditions and heavy snowfall. In the image, a red fire hydrant is almost buried deep in the snow, which indicates the significant amount of snow the city has experienced. This can lead to various challenges such as difficulties in transportation, increased risk of accidents, and disruptions to daily life. For example, the recently plowed sidewalk near the fire hydrant shows that the city has to constantly clear snow from roads and sidewalks to maintain access and safety for pedestrians and vehicles. Moreover, emergency services, like firefighters, might face challenges accessing crucial equipment, such as fire hydrants, during emergencies due to the snow accumulation. This highlights the importance of effective snow management strategies and preparedness in such cities to minimize the impact of harsh winter conditions on residents and essential services.
--------------------------------------------------------------------------------
/playground/data/prompts/complex_reasoning/system_message.txt:
--------------------------------------------------------------------------------
1 | You are an AI visual assistant that can analyze a single image. You receive five sentences, each describing the same image you are observing. In addition, specific object locations within the image are given, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y.
2 |
3 | The task is to use the provided caption and bounding box information, create a plausible question about the image, and provide the answer in detail.
4 |
5 | Create complex questions beyond describing the scene.
6 | To answer such questions, one should require first understanding the visual content, then based on the background knowledge or reasoning, either explain why the things are happening that way, or provide guides and help to user's request. Make the question challenging by not including the visual content details in the question so that the user needs to reason about that first.
7 |
8 | Instead of directly mentioning the bounding box coordinates, utilize this data to explain the scene using natural language. Include details like object counts, position of the objects, relative position between the objects.
9 |
10 | When using the information from the caption and coordinates, directly explain the scene, and do not mention that the information source is the caption or the bounding box. Always answer as if you are directly looking at the image.
--------------------------------------------------------------------------------
/playground/data/prompts/conversation/000_caps.txt:
--------------------------------------------------------------------------------
1 | There is a movie theater that displays the show times above the doors.
2 | A red fire hydrant is deep in the snow.
3 | The fire hydrant is in the snow near a recently plowed sidewalk.
4 | This city has had a very hard winter with snow.
5 | A hotel for dogs in the snow in winter.
--------------------------------------------------------------------------------
/playground/data/prompts/conversation/000_conv.txt:
--------------------------------------------------------------------------------
1 | Question:
2 | What color is the fire hydrant in the image?
3 | ===
4 | Answer:
5 | The fire hydrant in the image is red.
6 | ===
7 | Question:
8 | Is the fire hydrant fully covered by snow or partially visible?
9 | ===
10 | Answer:
11 | The fire hydrant is partially visible, with a significant portion of it covered by the deep snow.
12 | ===
13 | Question:
14 | What is the condition of the sidewalk near the fire hydrant?
15 | ===
16 | Answer:
17 | The sidewalk near the fire hydrant has been recently plowed, so it is relatively clear of snow compared to the surrounding area.
18 | ===
19 | Question:
20 | What does the image suggest about the winter season in the city?
21 | ===
22 | Answer:
23 | The image suggests that the winter season in the city has been quite harsh and challenging. The significant snow accumulation seen in the image indicates that the city has experienced heavy snowfall, which can lead to various difficulties and disruptions for its residents.
24 |
25 | For instance, the deep snow covering the fire hydrant could potentially hinder its accessibility in case of emergencies, which poses safety concerns. The recently plowed sidewalk implies that the city's maintenance crews have been actively working to keep the walkways clear and safe for pedestrians, but the sheer amount of snowfall might make it difficult to keep up with the continuous snow removal efforts.
26 |
27 | Furthermore, such extreme winter conditions can impact transportation, leading to traffic issues, delays in public transit, and increased risks of accidents due to slippery roads. It can also cause problems for businesses and schools, as people might face challenges commuting to work or attending classes. Additionally, the heavy snow can put extra strain on infrastructure, such as roofs and power lines, increasing the likelihood of structural damage or power outages.
28 |
29 | In conclusion, the image of the red fire hydrant deep in the snow and the recently plowed sidewalk suggest that the city has faced a particularly severe winter season, with substantial snowfall that has likely caused various challenges and disruptions for its residents and infrastructure.
--------------------------------------------------------------------------------
/playground/data/prompts/conversation/001_caps.txt:
--------------------------------------------------------------------------------
1 | A man is skiing in the open snow covered hills
2 | A skier is making his way into the snow.
3 | A skier on the foothills of a huge mountain range.
4 | A skier looks at mountains while standing near a trail sign.
5 | a single person skiing by an area with a lot of bushes
--------------------------------------------------------------------------------
/playground/data/prompts/conversation/001_conv.txt:
--------------------------------------------------------------------------------
1 | Question:
2 | What is the skier doing?
3 | ===
4 | Answer:
5 | The skier is skiing in the snow-covered hills, making his way through the snow and enjoying the beautiful mountain scenery. They are also standing near a trail sign, which indicates their location on the mountain.
6 | ===
7 | Question:
8 | Are there any other people or skiers visible in the image?
9 | ===
10 | Answer:
11 | No, there is only a single person skiing in the image, and they are surrounded by an area with a lot of bushes.
12 | ===
13 | Question:
14 | What kind of skiing is the person doing? Cross-country or downhill?
15 | ===
16 | Answer:
17 | Based on what I can see in the photo, it is not possible to definitively determine whether the skier is doing cross-country or downhill skiing. However, given the presence of a trail sign and the fact that they are skiing in the foothills, it is more likely that the skier is engaged in cross-country skiing. Downhill skiing typically takes place on more steep slopes and involves ski lifts, whereas cross-country skiing often occurs on relatively flatter terrain with marked trails.
18 | ===
19 | Question:
20 | Can you explain cross-country and downhill skiing a bit more?
21 | ===
22 | Answer:
23 | Cross-country skiing and downhill skiing are two distinct types of skiing, each with its own techniques, equipment, and terrain preferences.
24 |
25 | 1. Cross-Country Skiing:
26 | Also known as Nordic skiing, cross-country skiing involves traversing relatively flat terrain or gentle slopes using skis and poles. It is a popular recreational and competitive sport that focuses on endurance, balance, and aerobic fitness. Cross-country skiers use their own locomotion to move across snow-covered terrain, rather than relying on ski lifts or other forms of assistance.
27 | There are two main techniques in cross-country skiing:
28 | - Classic: This technique involves skiing in parallel tracks, using a forward-and-backward gliding motion while keeping the skis parallel to each other. The skier uses their poles to propel themselves forward.
29 | - Skate skiing: This technique is similar to ice skating, where the skier pushes off one ski at an angle and glides on the other. It requires a smooth, groomed surface and is typically faster than the classic technique.
30 | Cross-country ski equipment consists of lightweight skis, boots, bindings, and poles. The boots are more flexible than those used in downhill skiing, allowing for better ankle movement and control.
31 |
32 | 2. Downhill Skiing:
33 | Also known as alpine skiing, downhill skiing involves descending slopes at high speeds using skis and poles for balance and control. This sport is more focused on speed, technique, and navigating challenging terrain, including steep slopes, moguls, and even jumps.
34 | Downhill skiing can be further categorized into several disciplines, such as slalom, giant slalom, super-G, and downhill racing. Each discipline has its own set of rules, courses, and ski equipment.
35 | Downhill ski equipment includes heavier and stiffer skis, boots, bindings, and poles than those used in cross-country skiing. The boots are more rigid to provide better support and control during high-speed descents and sharp turns.
36 |
37 | In summary, cross-country skiing is an endurance-based sport that involves traveling across flat or gently sloping terrain, while downhill skiing is focused on speed and technique as skiers navigate steeper slopes and challenging terrain. Both sports require specialized equipment and techniques, but they offer different experiences and challenges to participants.
--------------------------------------------------------------------------------
/playground/data/prompts/conversation/system_message.txt:
--------------------------------------------------------------------------------
1 | You are an AI visual assistant, and you are seeing a single image. What you see are provided with five sentences, describing the same image you are looking at. Answer all questions as you are seeing the image.
2 |
3 | Design a conversation between you and a person asking about this photo. The answers should be in a tone that a visual AI assistant is seeing the image and answering the question.
4 | Ask diverse questions and give corresponding answers.
5 |
6 | Include questions asking about the visual content of the image, including the object types, counting the objects, object actions, object locations, relative positions between objects, etc. Only include questions that have definite answers:
7 | (1) one can see the content in the image that the question asks about and can answer confidently;
8 | (2) one can determine confidently from the image that it is not in the image.
9 | Do not ask any question that cannot be answered confidently.
10 |
11 | Also include complex questions that are relevant to the content in the image, for example, asking about background knowledge of the objects in the image, asking to discuss about events happening in the image, etc. Again, do not ask about uncertain details.
12 | Provide detailed answers when answering complex questions. For example, give detailed examples or reasoning steps to make the content more convincing and well-organized. You can include multiple paragraphs if necessary.
--------------------------------------------------------------------------------
/playground/data/prompts/detail_description/000_caps.txt:
--------------------------------------------------------------------------------
1 | A harbor filled with lots of boats next to a building.
2 | A bicycle parked in front of several boats at a dock.
3 | A red bicycle in front of a line of docked white yachts
4 | A bike sits before boats which sit before a long building.
5 | A bicycle is a convenient means of land transportation when you live on a boat.
6 |
7 | bicycle: [0.287, 0.641, 0.507, 0.874]
8 | bicycle: [0.566, 0.667, 0.63, 0.731]
9 | boat: [0.318, 0.579, 0.575, 0.724]
10 | boat: [0.704, 0.607, 0.818, 0.727]
11 | boat: [0.818, 0.601, 0.942, 0.744]
12 | boat: [0.002, 0.53, 0.243, 0.71]
13 | boat: [0.541, 0.611, 0.668, 0.731]
14 | person: [0.778, 0.527, 0.797, 0.57]
15 | cup: [0.708, 0.733, 0.724, 0.758]
16 | boat: [0.236, 0.532, 0.404, 0.64]
17 | boat: [0.81, 0.632, 0.836, 0.676]
18 | boat: [0.957, 0.526, 1.0, 0.752]
--------------------------------------------------------------------------------
/playground/data/prompts/detail_description/000_conv.txt:
--------------------------------------------------------------------------------
1 | It is a harbor filled with numerous boats of various sizes docked next to a long building. Among the boats, there are a few white yachts lined up, standing out from the rest. There is a red bicycle prominently parked in front of the line of docked boats, serving as a convenient means of land transportation for those living on the boats. Another bicycle can be seen further back in the scene, near the middle of the harbor.
2 |
3 | A person is visible near the right side of the harbor, possibly enjoying the view or attending to their boat. Additionally, there is a cup placed on a surface near the middle of the scene.
--------------------------------------------------------------------------------
/playground/data/prompts/detail_description/001_caps.txt:
--------------------------------------------------------------------------------
1 | A group of people standing outside of a black vehicle with various luggage.
2 | Luggage surrounds a vehicle in an underground parking area
3 | People try to fit all of their luggage in an SUV.
4 | The sport utility vehicle is parked in the public garage, being packed for a trip
5 | Some people with luggage near a van that is transporting it.
6 |
7 | person: [0.681, 0.242, 0.774, 0.694]
8 | person: [0.63, 0.222, 0.686, 0.516]
9 | person: [0.444, 0.233, 0.487, 0.34]
10 | backpack: [0.384, 0.696, 0.485, 0.914]
11 | backpack: [0.755, 0.413, 0.846, 0.692]
12 | suitcase: [0.758, 0.413, 0.845, 0.69]
13 | suitcase: [0.1, 0.497, 0.173, 0.579]
14 | bicycle: [0.282, 0.363, 0.327, 0.442]
15 | car: [0.786, 0.25, 0.848, 0.322]
16 | car: [0.783, 0.27, 0.827, 0.335]
17 | car: [0.86, 0.254, 0.891, 0.3]
18 | car: [0.261, 0.101, 0.787, 0.626]
--------------------------------------------------------------------------------
/playground/data/prompts/detail_description/001_conv.txt:
--------------------------------------------------------------------------------
1 | The image is an underground parking area with a black sport utility vehicle (SUV) parked. There are three people in the scene, with one person standing closer to the left side of the vehicle, another person in the middle, and the third person on the right side. They are all working together to pack their luggage into the SUV for a trip.
2 |
3 | In the parking area, various luggage items are scattered around the vehicle. There are two backpacks, one located near the left rear wheel and the other closer to the right side of the vehicle. Additionally, there are two suitcases, one on the right side of the car and another further away near the center of the parking area. A bicycle can also be seen on the left side of the vehicle.
4 |
5 | Other cars are parked around the main SUV, with one car positioned behind it and slightly to the left, another behind and slightly to the right, and the third car further behind on the right side.
--------------------------------------------------------------------------------
/playground/data/prompts/detail_description/002_caps.txt:
--------------------------------------------------------------------------------
1 | A man holds a Wii-mote above his head while another looks on.
2 | A guy and his friend are playing Nintendo Wii.
3 | A young man is holding a video game remote over his head.
4 | two men standing in a room while one plays with a wii mote
5 | Some guys standing and playing a video game.
6 |
7 | couch: [0.697, 0.759, 0.995, 1.0]
8 | dining table: [0.426, 0.755, 1.0, 0.987]
9 | person: [0.082, 0.252, 0.342, 1.0]
10 | person: [0.399, 0.085, 0.742, 0.982]
11 | remote: [0.477, 0.135, 0.516, 0.187]
12 | sink: [0.016, 0.501, 0.063, 0.52]
13 | potted plant: [0.798, 0.384, 0.888, 0.645]
14 | refrigerator: [0.305, 0.389, 0.414, 0.547]
15 | chair: [0.72, 0.509, 0.858, 0.725]
--------------------------------------------------------------------------------
/playground/data/prompts/detail_description/002_conv.txt:
--------------------------------------------------------------------------------
1 | The image shows two men standing in a room, engaged in playing a video game on a Nintendo Wii console. One of the men is holding a Wii remote above his head with enthusiasm, while the other man looks on, likely enjoying the friendly competition.
2 |
3 | The room appears to be a living space with a couch located in the background and a dining table nearby. A potted plant can be seen placed close to the couch, and a chair is situated in the middle of the room. The room also features a kitchen area with a sink and a refrigerator visible in the background.
--------------------------------------------------------------------------------
/playground/data/prompts/detail_description/system_message.txt:
--------------------------------------------------------------------------------
1 | You are an AI visual assistant that can analyze a single image. You receive five sentences, each describing the same image you are observing. In addition, specific object locations within the image are given, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y.
2 |
3 | Using the provided caption and bounding box information, describe the scene in a detailed manner.
4 |
5 | Instead of directly mentioning the bounding box coordinates, utilize this data to explain the scene using natural language. Include details like object counts, position of the objects, relative position between the objects.
6 |
7 | When using the information from the caption and coordinates, directly explain the scene, and do not mention that the information source is the caption or the bounding box. Always answer as if you are directly looking at the image.
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=61.0"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "llava"
7 | version = "1.0.1"
8 | description = "Towards GPT-4 like large language and visual assistant."
9 | readme = "README.md"
10 | requires-python = ">=3.8"
11 | classifiers = [
12 | "Programming Language :: Python :: 3",
13 | "License :: OSI Approved :: Apache Software License",
14 | ]
15 | dependencies = [
16 | "einops", "fastapi", "gradio==3.39.0", "markdown2[all]", "numpy",
17 | "requests", "sentencepiece", "tokenizers>=0.12.1",
18 | "torch", "torchvision", "uvicorn", "wandb",
19 | "shortuuid", "httpx==0.24.0",
20 | "deepspeed==0.9.5",
21 | "peft==0.4.0",
22 | "transformers==4.31.0",
23 | "accelerate==0.21.0",
24 | "bitsandbytes==0.41.0",
25 | "scikit-learn==1.2.2",
26 | "sentencepiece==0.1.99",
27 | "einops==0.6.1", "einops-exts==0.0.4", "timm==0.6.13",
28 | "gradio_client==0.7.0"
29 | ]
30 |
31 | [project.urls]
32 | "Homepage" = "https://llava-vl.github.io"
33 | "Bug Tracker" = "https://github.com/haotian-liu/LLaVA/issues"
34 |
35 | [tool.setuptools.packages.find]
36 | exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"]
37 |
38 | [tool.wheel]
39 | exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"]
40 |
--------------------------------------------------------------------------------
/scripts/convert_sqa_to_llava.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import fire
4 | import re
5 | from convert_sqa_to_llava_base_prompt import build_prompt_chatbot
6 |
7 |
8 | def convert_to_llava(base_dir, split, prompt_format="QCM-LEPA"):
9 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split]
10 | problems = json.load(open(os.path.join(base_dir, "problems.json")))
11 |
12 | split_problems = build_prompt_chatbot(
13 | problems, split_indices, prompt_format,
14 | use_caption=False, is_test=False)
15 |
16 | target_format = []
17 | for prob_id, (input, output) in split_problems.items():
18 | if input.startswith('Question: '):
19 | input = input.replace('Question: ', '')
20 | if output.startswith('Answer: '):
21 | output = output.replace('Answer: ', '')
22 |
23 | raw_prob_data = problems[prob_id]
24 | if raw_prob_data['image'] is None:
25 | target_format.append({
26 | "id": prob_id,
27 | "conversations": [
28 | {'from': 'human', 'value': f"{input}"},
29 | {'from': 'gpt', 'value': f"{output}"},
30 | ],
31 | })
32 |
33 | else:
34 | target_format.append({
35 | "id": prob_id,
36 | "image": os.path.join(prob_id, raw_prob_data['image']),
37 | "conversations": [
38 | {'from': 'human', 'value': f"{input}\n"},
39 | {'from': 'gpt', 'value': f"{output}"},
40 | ],
41 | })
42 |
43 | print(f'Number of samples: {len(target_format)}')
44 |
45 | with open(os.path.join(base_dir, f"llava_{split}_{prompt_format}.json"), "w") as f:
46 | json.dump(target_format, f, indent=2)
47 |
48 |
49 | def convert_to_jsonl(base_dir, split, prompt_format="QCM-LEPA"):
50 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split]
51 | problems = json.load(open(os.path.join(base_dir, "problems.json")))
52 |
53 | split_problems = build_prompt_chatbot(
54 | problems, split_indices, prompt_format,
55 | use_caption=False, is_test=False)
56 |
57 | writer = open(os.path.join(base_dir, f"scienceqa_{split}_{prompt_format}.jsonl"), "w")
58 | for prob_id, (input, output) in split_problems.items():
59 | if input.startswith('Question: '):
60 | input = input.replace('Question: ', '')
61 | if output.startswith('Answer: '):
62 | output = output.replace('Answer: ', '')
63 |
64 | raw_prob_data = problems[prob_id]
65 | if raw_prob_data['image'] is None:
66 | data = {
67 | "id": prob_id,
68 | "instruction": f"{input}",
69 | "output": f"{output}",
70 | }
71 |
72 | else:
73 | data = {
74 | "id": prob_id,
75 | "image": os.path.join(prob_id, raw_prob_data['image']),
76 | "instruction": f"{input}\n",
77 | "output": f"{output}",
78 | }
79 | writer.write(json.dumps(data) + '\n')
80 | writer.close()
81 |
82 |
83 | def main(task, **kwargs):
84 | globals()[task](**kwargs)
85 |
86 |
87 | if __name__ == "__main__":
88 | fire.Fire(main)
89 |
--------------------------------------------------------------------------------
/scripts/finetune.sh:
--------------------------------------------------------------------------------
1 | # Uncomment and set the following variables correspondingly to run this script:
2 |
3 | ################## VICUNA ##################
4 | PROMPT_VERSION=v1
5 | # MODEL_VERSION="vicuna-v1-3-7b"
6 | ################## VICUNA ##################
7 |
8 | ################## LLaMA-2 ##################
9 | # PROMPT_VERSION="llava_llama_2"
10 | # MODEL_VERSION="llama-2-7b-chat"
11 | ################## LLaMA-2 ##################
12 | out_dir=output/llava_grounding_stage2
13 | load=output/llava_grounding_stage1
14 | mkdir -p $out_dir
15 | echo $out_dir/log
16 | export DATASET=datasets/
17 |
18 | num_gpu=8
19 | bs=$(( 8 * $num_gpu ))
20 | deepspeed llava/train/train_joint_2st.py \
21 | --deepspeed scripts/zero2.json \
22 | --model_name_or_path ckpts/vicuna/vicuna-7b-v1.3/ \
23 | --whole_model $load \
24 | --load_model True \
25 | --version $PROMPT_VERSION \
26 | --data_path datasets/llava/annotations/llava_instruct_150k.json \
27 | --image_folder datasets/coco/train2017/ \
28 | --vision_tower openai/clip-vit-large-patch14 \
29 | --pretrain_mm_mlp_adapter output/llava_stage1/mm_projector.bin \
30 | --mm_vision_select_layer -2 \
31 | --mm_use_im_start_end False \
32 | --mm_use_im_patch_token False \
33 | --bf16 True \
34 | --output_dir $out_dir \
35 | --num_train_epochs 1 \
36 | --per_device_train_batch_size 8 \
37 | --per_device_eval_batch_size 4 \
38 | --gradient_accumulation_steps 1 \
39 | --evaluation_strategy "no" \
40 | --save_strategy "steps" \
41 | --save_steps 1000 \
42 | --save_total_limit 10 \
43 | --learning_rate 2e-5 \
44 | --weight_decay 0. \
45 | --warmup_ratio 0.03 \
46 | --lr_scheduler_type "cosine" \
47 | --logging_steps 1 \
48 | --tf32 True \
49 | --model_max_length 2400 \
50 | --gradient_checkpointing True \
51 | --dataloader_num_workers 4 \
52 | --lazy_preprocess True \
53 | --report_to wandb \
54 | --max_steps 10000 \
55 | --config_file \
56 | configs/openseed/openseed_swint_lang_joint_2st.yaml \
57 | --opt \
58 | MODEL.DECODER.WEIGHT_MULTIPLIER=0.1,MODEL.DECODER.COST_CLASS_WEIGHT=4.0,flickr.TRAIN.BATCH_SIZE_TOTAL=6,coco_instruct.TEST.BATCH_SIZE_TOTAL=${bs},coco_instruct.TRAIN.BATCH_SIZE_TOTAL=${bs},MODEL.WEIGHTS=ckpts/openseed_o365.pt \
59 | >> $out_dir/log 2>&1
60 |
--------------------------------------------------------------------------------
/scripts/finetune_visual_prompt.sh:
--------------------------------------------------------------------------------
1 | # Uncomment and set the following variables correspondingly to run this script:
2 |
3 | ################## VICUNA ##################
4 | PROMPT_VERSION=v1
5 | # MODEL_VERSION="vicuna-v1-3-7b"
6 | ################## VICUNA ##################
7 |
8 | ################## LLaMA-2 ##################
9 | # PROMPT_VERSION="llava_llama_2"
10 | # MODEL_VERSION="llama-2-7b-chat"
11 | ################## LLaMA-2 ##################
12 | out_dir=output/llava_stage2_visual_prompt
13 | load=output/llava_grounding_stage2/
14 | mkdir -p $out_dir
15 | echo $out_dir/log
16 | export DATASET=datasets/
17 |
18 | num_gpu=8
19 | bs=$(( 8 * $num_gpu ))
20 | deepspeed llava/train/train_joint_2st_interactive_refcoco_coco_instruction.py \
21 | --deepspeed scripts/zero2.json \
22 | --model_name_or_path ckpts/vicuna/vicuna-7b-v1.3/ \
23 | --whole_model $load \
24 | --load_model True \
25 | --version $PROMPT_VERSION \
26 | --data_path datasets/llava/annotations/llava_instruct_150k.json \
27 | --image_folder datasets/coco/train2017/ \
28 | --vision_tower openai/clip-vit-large-patch14 \
29 | --pretrain_mm_mlp_adapter output/llava_stage1/mm_projector.bin \
30 | --mm_vision_select_layer -2 \
31 | --mm_use_im_start_end False \
32 | --tune_prompt_adapter True \
33 | --mm_use_im_patch_token False \
34 | --bf16 True \
35 | --output_dir $out_dir \
36 | --num_train_epochs 1 \
37 | --per_device_train_batch_size 2 \
38 | --per_device_eval_batch_size 4 \
39 | --gradient_accumulation_steps 1 \
40 | --evaluation_strategy "no" \
41 | --save_strategy "steps" \
42 | --save_steps 1000 \
43 | --save_total_limit 10 \
44 | --learning_rate 2e-5 \
45 | --weight_decay 0. \
46 | --warmup_ratio 0.03 \
47 | --lr_scheduler_type "cosine" \
48 | --logging_steps 1 \
49 | --tf32 True \
50 | --model_max_length 2400 \
51 | --gradient_checkpointing True \
52 | --dataloader_num_workers 4 \
53 | --lazy_preprocess True \
54 | --report_to wandb \
55 | --max_steps 20000 \
56 | --config_file_gd \
57 | configs/openseed/openseed_swint_lang_joint_2st_visual_prompt.yaml \
58 | --config_file_it \
59 | configs/semsam/visual_prompt_encoder.yaml \
60 | --opt \
61 | "detach_seg=True,MODEL.DECODER.WEIGHT_MULTIPLIER=0.1,MODEL.DECODER.COST_CLASS_WEIGHT=4.0,flickr.TEST.BATCH_SIZE_TOTAL=${bs},flickr.TRAIN.BATCH_SIZE_TOTAL=${bs},coco_interactive.TRAIN.BATCH_SIZE_TOTAL=${bs},coco_instruct.TRAIN.BATCH_SIZE_TOTAL=${bs},MODEL.WEIGHTS=ckpts/openseed_o365.pt;MODEL.DECODER.WEIGHT_MULTIPLIER=0.2,coco_interactive.TEST.BATCH_SIZE_TOTAL=${bs},coco_interactive.TRAIN.BATCH_SIZE_TOTAL=${bs},MODEL.WEIGHTS=ckpts/visual_prompt_enc.pth" \
62 | >> $out_dir/log 2>&1
--------------------------------------------------------------------------------
/scripts/merge_lora_weights.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from llava.model.builder import load_pretrained_model
3 | from llava.mm_utils import get_model_name_from_path
4 |
5 |
6 | def merge_lora(args):
7 | model_name = get_model_name_from_path(args.model_path)
8 | tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, device_map='cpu')
9 |
10 | model.save_pretrained(args.save_model_path)
11 | tokenizer.save_pretrained(args.save_model_path)
12 |
13 |
14 | if __name__ == "__main__":
15 | parser = argparse.ArgumentParser()
16 | parser.add_argument("--model-path", type=str, required=True)
17 | parser.add_argument("--model-base", type=str, required=True)
18 | parser.add_argument("--save-model-path", type=str, required=True)
19 |
20 | args = parser.parse_args()
21 |
22 | merge_lora(args)
23 |
--------------------------------------------------------------------------------
/scripts/pretrain_joint.sh:
--------------------------------------------------------------------------------
1 | # Uncomment and set the following variables correspondingly to run this script:
2 |
3 | # MODEL_VERSION=vicuna-v1-3-7b
4 | # MODEL_VERSION=llama-2-7b-chat
5 |
6 | ########### DO NOT CHANGE ###########
7 | ########### USE THIS FOR BOTH ###########
8 | PROMPT_VERSION=v1
9 | ########### DO NOT CHANGE ###########
10 | out_dir=output/llava_grounding_stage1
11 | mkdir -p $out_dir
12 | echo $out_dir/log
13 | export DATASET=datasets/
14 |
15 | n_gpu=4
16 |
17 | deepspeed --include=localhost:1,2,3,7 llava/train/train_joint_1st.py \
18 | --deepspeed scripts/zero2.json \
19 | --model_name_or_path ckpts/vicuna/vicuna-7b-v1.3/ \
20 | --version $PROMPT_VERSION \
21 | --data_path datasets/llava/annotations/cap600k_brackets_all.json \
22 | --image_folder datasets/ConceptualCaptionsFiltered/ \
23 | --vision_tower openai/clip-vit-large-patch14 \
24 | --pretrain_mm_mlp_adapter output/llava_stage1/mm_projector.bin \
25 | --tune_mm_mlp_adapter True \
26 | --mm_vision_select_layer -2 \
27 | --mm_use_im_start_end False \
28 | --mm_use_im_patch_token False \
29 | --bf16 True \
30 | --output_dir $out_dir \
31 | --max_steps 30000 \
32 | --num_train_epochs 1 \
33 | --per_device_train_batch_size 8 \
34 | --per_device_eval_batch_size 4 \
35 | --gradient_accumulation_steps 1 \
36 | --evaluation_strategy "no" \
37 | --save_strategy "steps" \
38 | --save_steps 1000 \
39 | --save_total_limit 100 \
40 | --learning_rate 1e-4 \
41 | --weight_decay 0. \
42 | --warmup_ratio 0.03 \
43 | --lr_scheduler_type "cosine" \
44 | --logging_steps 1 \
45 | --tf32 True \
46 | --model_max_length 2048 \
47 | --gradient_checkpointing True \
48 | --dataloader_num_workers 4 \
49 | --lazy_preprocess True \
50 | --report_to wandb \
51 | --config_file \
52 | configs/openseed/openseed_swint_lang_joint.yaml \
53 | --opt \
54 | flickr.TRAIN.BATCH_SIZE_TOTAL=8,COCO.TRAIN.BATCH_SIZE_TOTAL=24,MODEL.WEIGHTS=ckpts/openseed_o365.pt \
55 | >> $out_dir/log 2>&1
56 |
--------------------------------------------------------------------------------
/utils/Config.py:
--------------------------------------------------------------------------------
1 | from fvcore.common.config import CfgNode as _CfgNode
2 |
3 | class CfgNode(_CfgNode):
4 | """
5 | The same as `fvcore.common.config.CfgNode`, but different in:
6 |
7 | 1. Use unsafe yaml loading by default.
8 | Note that this may lead to arbitrary code execution: you must not
9 | load a config file from untrusted sources before manually inspecting
10 | the content of the file.
11 | 2. Support config versioning.
12 | When attempting to merge an old config, it will convert the old config automatically.
13 |
14 | .. automethod:: clone
15 | .. automethod:: freeze
16 | .. automethod:: defrost
17 | .. automethod:: is_frozen
18 | .. automethod:: load_yaml_with_base
19 | .. automethod:: merge_from_list
20 | .. automethod:: merge_from_other_cfg
21 | """
22 |
23 | def merge_from_dict(self, dict):
24 | pass
25 |
26 | node = CfgNode()
--------------------------------------------------------------------------------
/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UX-Decoder/LLaVA-Grounding/668b7cc3d536ee721d09d04eeff56f8221d72e62/utils/__init__.py
--------------------------------------------------------------------------------
/utils/arguments.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import json
3 | import argparse
4 | import logging
5 |
6 | logger = logging.getLogger(__name__)
7 |
8 |
9 | def load_config_dict_to_opt(opt, config_dict):
10 | """
11 | Load the key, value pairs from config_dict to opt, overriding existing values in opt
12 | if there is any.
13 | """
14 | if not isinstance(config_dict, dict):
15 | raise TypeError("Config must be a Python dictionary")
16 | for k, v in config_dict.items():
17 | k_parts = k.split('.')
18 | pointer = opt
19 | for k_part in k_parts[:-1]:
20 | if k_part not in pointer:
21 | pointer[k_part] = {}
22 | pointer = pointer[k_part]
23 | assert isinstance(pointer, dict), "Overriding key needs to be inside a Python dict."
24 | ori_value = pointer.get(k_parts[-1])
25 | pointer[k_parts[-1]] = v
26 | if ori_value:
27 | logger.warning(f"Overrided {k} from {ori_value} to {pointer[k_parts[-1]]}")
28 |
29 |
30 | def load_opt_from_config_files(conf_files):
31 | """
32 | Load opt from the config files, settings in later files can override those in previous files.
33 |
34 | Args:
35 | conf_files (list): a list of config file paths
36 |
37 | Returns:
38 | dict: a dictionary of opt settings
39 | """
40 | opt = {}
41 | for conf_file in conf_files:
42 | with open(conf_file, encoding='utf-8') as f:
43 | config_dict = yaml.safe_load(f)
44 |
45 | load_config_dict_to_opt(opt, config_dict)
46 |
47 | return opt
48 |
49 |
50 | def load_opt_command(args):
51 | parser = argparse.ArgumentParser(description='Pretrain or fine-tune models for NLP tasks.')
52 | parser.add_argument('command', help='Command: train/evaluate/train-and-evaluate')
53 | parser.add_argument('--conf_files', nargs='+', required=True, help='Path(s) to the config file(s).')
54 | parser.add_argument('--user_dir', help='Path to the user defined module for tasks (models, criteria), optimizers, and lr schedulers.')
55 | parser.add_argument('--config_overrides', nargs='*', help='Override parameters on config with a json style string, e.g. {"": , "..": }. A key with "." updates the object in the corresponding nested dict. Remember to escape " in command line.')
56 | parser.add_argument('--overrides', help='arguments that used to override the config file in cmdline', nargs=argparse.REMAINDER)
57 |
58 | cmdline_args = parser.parse_args() if not args else parser.parse_args(args)
59 |
60 | opt = load_opt_from_config_files(cmdline_args.conf_files)
61 |
62 | if cmdline_args.config_overrides:
63 | config_overrides_string = ' '.join(cmdline_args.config_overrides)
64 | logger.warning(f"Command line config overrides: {config_overrides_string}")
65 | config_dict = json.loads(config_overrides_string)
66 | load_config_dict_to_opt(opt, config_dict)
67 |
68 | if cmdline_args.overrides:
69 | assert len(cmdline_args.overrides) % 2 == 0, "overrides arguments is not paired, required: key value"
70 | keys = [cmdline_args.overrides[idx*2] for idx in range(len(cmdline_args.overrides)//2)]
71 | vals = [cmdline_args.overrides[idx*2+1] for idx in range(len(cmdline_args.overrides)//2)]
72 | vals = [val.replace('false', '').replace('False','') if len(val.replace(' ', '')) == 5 else val for val in vals]
73 |
74 | types = []
75 | for key in keys:
76 | key = key.split('.')
77 | ele = opt.copy()
78 | while len(key) > 0:
79 | ele = ele[key.pop(0)]
80 | types.append(type(ele))
81 |
82 | config_dict = {x:z(y) for x,y,z in zip(keys, vals, types)}
83 | load_config_dict_to_opt(opt, config_dict)
84 |
85 | # combine cmdline_args into opt dictionary
86 | for key, val in cmdline_args.__dict__.items():
87 | if val is not None:
88 | opt[key] = val
89 |
90 | return opt, cmdline_args
91 |
92 |
93 | def save_opt_to_json(opt, conf_file):
94 | with open(conf_file, 'w', encoding='utf-8') as f:
95 | json.dump(opt, f, indent=4)
96 |
97 |
98 | def save_opt_to_yaml(opt, conf_file):
99 | with open(conf_file, 'w', encoding='utf-8') as f:
100 | yaml.dump(opt, f)
101 |
--------------------------------------------------------------------------------
/utils/dist.py:
--------------------------------------------------------------------------------
1 | import functools
2 | import io
3 | import os
4 | import random
5 | import subprocess
6 | import time
7 | from collections import OrderedDict, defaultdict, deque
8 | import datetime
9 | import pickle
10 | from typing import Optional, List
11 |
12 | import json, time
13 | import numpy as np
14 | import torch
15 | import torch.distributed as dist
16 | from torch import Tensor
17 |
18 | import colorsys
19 | def init_distributed_mode(args):
20 | if 'WORLD_SIZE' in os.environ and os.environ['WORLD_SIZE'] != '': # 'RANK' in os.environ and
21 | args.rank = int(os.environ["RANK"])
22 | args.world_size = int(os.environ['WORLD_SIZE'])
23 | args.gpu = args.local_rank = int(os.environ['LOCAL_RANK'])
24 |
25 | # launch by torch.distributed.launch
26 | # Single node
27 | # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ...
28 | # Multi nodes
29 | # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 0 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
30 | # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 1 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
31 | # args.rank = int(os.environ.get('OMPI_COMM_WORLD_RANK'))
32 | # local_world_size = int(os.environ['GPU_PER_NODE_COUNT'])
33 | # args.world_size = args.world_size * local_world_size
34 | # args.gpu = args.local_rank = int(os.environ['LOCAL_RANK'])
35 | # args.rank = args.rank * local_world_size + args.local_rank
36 | print('world size: {}, rank: {}, local rank: {}'.format(args.world_size, args.rank, args.local_rank))
37 | print(json.dumps(dict(os.environ), indent=2))
38 | elif 'SLURM_PROCID' in os.environ:
39 | args.rank = int(os.environ['SLURM_PROCID'])
40 | args.gpu = args.local_rank = int(os.environ['SLURM_LOCALID'])
41 | args.world_size = int(os.environ['SLURM_NPROCS'])
42 |
43 | if os.environ.get('HAND_DEFINE_DIST_URL', 0) == '1':
44 | pass
45 | else:
46 | import util.hostlist as uh
47 | nodenames = uh.parse_nodelist(os.environ['SLURM_JOB_NODELIST'])
48 | gpu_ids = [int(node[3:]) for node in nodenames]
49 | fixid = int(os.environ.get('FIX_DISTRIBUTED_PORT_NUMBER', 0))
50 | # fixid += random.randint(0, 300)
51 | port = str(3137 + int(min(gpu_ids)) + fixid)
52 | args.dist_url = "tcp://{ip}:{port}".format(ip=uh.nodename_to_ip(nodenames[0]), port=port)
53 |
54 | print('world size: {}, world rank: {}, local rank: {}, device_count: {}'.format(args.world_size, args.rank, args.local_rank, torch.cuda.device_count()))
55 |
56 |
57 | else:
58 | print('Not using distributed mode')
59 | args.distributed = False
60 | args.world_size = 1
61 | args.rank = 0
62 | args.local_rank = 0
63 | return
64 |
65 | print("world_size:{} rank:{} local_rank:{}".format(args.world_size, args.rank, args.local_rank))
66 | args.distributed = True
67 | torch.cuda.set_device(args.local_rank)
68 | args.dist_backend = 'nccl'
69 | print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
70 |
71 | torch.distributed.init_process_group(
72 | backend=args.dist_backend,
73 | world_size=args.world_size,
74 | rank=args.rank,
75 | init_method=args.dist_url,
76 | )
77 |
78 | print("Before torch.distributed.barrier()")
79 | torch.distributed.barrier()
80 | print("End torch.distributed.barrier()")
--------------------------------------------------------------------------------
/utils/distributed.py:
--------------------------------------------------------------------------------
1 | # import os
2 | # import time
3 | # import torch
4 | # import pickle
5 | # import subprocess
6 | #
7 | # from mpi4py import MPI
8 | # import torch.distributed as dist
9 | #
10 | #
11 | # def apply_distributed(opt):
12 | # if opt['rank'] == 0:
13 | # hostname_cmd = ["hostname -I"]
14 | # result = subprocess.check_output(hostname_cmd, shell=True)
15 | # master_address = result.decode('utf-8').split()[0]
16 | # master_port = opt['PORT']
17 | # else:
18 | # master_address = None
19 | # master_port = None
20 | #
21 | # master_address = MPI.COMM_WORLD.bcast(master_address, root=0)
22 | # master_port = MPI.COMM_WORLD.bcast(master_port, root=0)
23 | #
24 | # if torch.distributed.is_available() and opt['world_size'] > 1:
25 | # init_method_url = 'tcp://{}:{}'.format(master_address, master_port)
26 | # backend = 'nccl'
27 | # world_size = opt['world_size']
28 | # rank = opt['rank']
29 | # torch.distributed.init_process_group(backend=backend,
30 | # init_method=init_method_url,
31 | # world_size=world_size,
32 | # rank=rank)
33 | #
34 | # def init_distributed(opt):
35 | # opt['CUDA'] = opt.get('CUDA', True) and torch.cuda.is_available()
36 | # if 'OMPI_COMM_WORLD_SIZE' not in os.environ:
37 | # # application was started without MPI
38 | # # default to single node with single process
39 | # opt['env_info'] = 'no MPI'
40 | # opt['world_size'] = 1
41 | # opt['local_size'] = 1
42 | # opt['rank'] = 0
43 | # opt['local_rank'] = 0
44 | # opt['master_address'] = '127.0.0.1'
45 | # opt['master_port'] = '8673'
46 | # else:
47 | # # application was started with MPI
48 | # # get MPI parameters
49 | # opt['world_size'] = int(os.environ['OMPI_COMM_WORLD_SIZE'])
50 | # opt['local_size'] = int(os.environ['OMPI_COMM_WORLD_LOCAL_SIZE'])
51 | # opt['rank'] = int(os.environ['OMPI_COMM_WORLD_RANK'])
52 | # opt['local_rank'] = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
53 | #
54 | # # set up device
55 | # if not opt['CUDA']:
56 | # assert opt['world_size'] == 1, 'multi-GPU training without CUDA is not supported since we use NCCL as communication backend'
57 | # opt['device'] = torch.device("cpu")
58 | # else:
59 | # torch.cuda.set_device(opt['local_rank'])
60 | # opt['device'] = torch.device("cuda", opt['local_rank'])
61 | #
62 | # apply_distributed(opt)
63 | # return opt
64 | #
65 | # def is_main_process():
66 | # rank = 0
67 | # if 'OMPI_COMM_WORLD_SIZE' in os.environ:
68 | # rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
69 | #
70 | # return rank == 0
71 | #
72 | # def get_world_size():
73 | # if not dist.is_available():
74 | # return 1
75 | # if not dist.is_initialized():
76 | # return 1
77 | # return dist.get_world_size()
78 | #
79 | # def get_rank():
80 | # if not dist.is_available():
81 | # return 0
82 | # if not dist.is_initialized():
83 | # return 0
84 | # return dist.get_rank()
85 | #
86 | #
87 | # def synchronize():
88 | # """
89 | # Helper function to synchronize (barrier) among all processes when
90 | # using distributed training
91 | # """
92 | # if not dist.is_available():
93 | # return
94 | # if not dist.is_initialized():
95 | # return
96 | # world_size = dist.get_world_size()
97 | # rank = dist.get_rank()
98 | # if world_size == 1:
99 | # return
100 | #
101 | # def _send_and_wait(r):
102 | # if rank == r:
103 | # tensor = torch.tensor(0, device="cuda")
104 | # else:
105 | # tensor = torch.tensor(1, device="cuda")
106 | # dist.broadcast(tensor, r)
107 | # while tensor.item() == 1:
108 | # time.sleep(1)
109 | #
110 | # _send_and_wait(0)
111 | # # now sync on the main process
112 | # _send_and_wait(1)
--------------------------------------------------------------------------------
/utils/misc.py:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # X-Decoder -- Generalized Decoding for Pixel, Image, and Language
3 | # Copyright (c) 2022 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Xueyan Zou (xueyan@cs.wisc.edu)
6 | # --------------------------------------------------------
7 | import math
8 |
9 |
10 | # HACK for evalution
11 | def hook_metadata(metadata, name):
12 | if name == 'cityscapes_fine_sem_seg_val':
13 | metadata.__setattr__("keep_sem_bgd", False)
14 | return metadata
15 |
16 | def hook_opt(model, name):
17 | if name in ['cityscapes_fine_panoptic_val', 'ade20k_panoptic_val', 'bdd10k_40_panoptic_val', 'cityscapes_fine_panoptic_val', 'scannet_21_panoptic_val']:
18 | model.model.object_mask_threshold = 0.4
19 | else:
20 | model.model.object_mask_threshold = 0.8
21 |
22 | # HACK for evalution
23 | def hook_switcher(model, name):
24 | mappings = {}
25 | if name in ['cityscapes_fine_sem_seg_val', 'scannet_21_val_seg', 'scannet_38_val_seg', 'scannet_41_val_seg', 'sunrgbd_37_val_seg', 'bdd10k_val_sem_seg', 'ade20k_full_sem_seg_val']:
26 | mappings = {'SEMANTIC_ON': True, 'INSTANCE_ON': False, 'PANOPTIC_ON': False}
27 | elif name in ['cityscapes_fine_instance_seg_val'] or 'seginw' in name:
28 | mappings = {'SEMANTIC_ON': False, 'INSTANCE_ON': True, 'PANOPTIC_ON': False}
29 | elif name in ['cityscapes_fine_panoptic_val', 'scannet_21_panoptic_val', 'bdd10k_40_panoptic_val']:
30 | mappings = {'SEMANTIC_ON': True, 'INSTANCE_ON': False, 'PANOPTIC_ON': True}
31 | elif name in ['coco_2017_val_panoptic_with_sem_seg', 'ade20k_panoptic_val', 'coco_2017_test-dev']:
32 | mappings = {'SEMANTIC_ON': True, 'INSTANCE_ON': True, 'PANOPTIC_ON': True}
33 | else:
34 | if name not in ["vlp_val", "vlp_captioning_val", "vlp_val2017", "vlp_captioning_val2017", "imagenet_val", "refcocog_val_google", "phrasecut_val", "phrasecut_test", "refcocop_val_unc", "refcoco_val_unc", "refcocog_val_umd"]:
35 | assert False, "dataset switcher is not defined"
36 | for key, value in mappings.items():
37 | if key == 'SEMANTIC_ON':
38 | model.model.semantic_on = value
39 | if key == 'INSTANCE_ON':
40 | model.model.instance_on = value
41 | if key == 'PANOPTIC_ON':
42 | model.model.panoptic_on = value
43 |
44 | class AverageMeter(object):
45 | """Computes and stores the average and current value."""
46 | def __init__(self):
47 | self.reset()
48 |
49 | def reset(self):
50 | self.val = 0
51 | self.avg = 0
52 | self.sum = 0
53 | self.count = 0
54 |
55 | def update(self, val, n=1, decay=0):
56 | self.val = val
57 | if decay:
58 | alpha = math.exp(-n / decay) # exponential decay over 100 updates
59 | self.sum = alpha * self.sum + (1 - alpha) * val * n
60 | self.count = alpha * self.count + (1 - alpha) * n
61 | else:
62 | self.sum += val * n
63 | self.count += n
64 | self.avg = self.sum / self.count
65 |
--------------------------------------------------------------------------------
/utils/model.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import time
4 | import pickle
5 | import torch
6 | # from utils.distributed import is_main_process
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | NORM_MODULES = [
12 | torch.nn.BatchNorm1d,
13 | torch.nn.BatchNorm2d,
14 | torch.nn.BatchNorm3d,
15 | torch.nn.SyncBatchNorm,
16 | # NaiveSyncBatchNorm inherits from BatchNorm2d
17 | torch.nn.GroupNorm,
18 | torch.nn.InstanceNorm1d,
19 | torch.nn.InstanceNorm2d,
20 | torch.nn.InstanceNorm3d,
21 | torch.nn.LayerNorm,
22 | torch.nn.LocalResponseNorm,
23 | ]
24 |
25 | def register_norm_module(cls):
26 | NORM_MODULES.append(cls)
27 | return cls
28 |
29 | def align_and_update_state_dicts(model_state_dict, ckpt_state_dict):
30 | model_keys = sorted(model_state_dict.keys())
31 | ckpt_keys = sorted(ckpt_state_dict.keys())
32 | result_dicts = {}
33 | matched_log = []
34 | unmatched_log = []
35 | unloaded_log = []
36 | for model_key in model_keys:
37 | model_weight = model_state_dict[model_key]
38 | if model_key in ckpt_keys:
39 | ckpt_weight = ckpt_state_dict[model_key]
40 | if model_weight.shape == ckpt_weight.shape:
41 | result_dicts[model_key] = ckpt_weight
42 | ckpt_keys.pop(ckpt_keys.index(model_key))
43 | matched_log.append("Loaded {}, Model Shape: {} <-> Ckpt Shape: {}".format(model_key, model_weight.shape, ckpt_weight.shape))
44 | else:
45 | unmatched_log.append("*UNMATCHED* {}, Model Shape: {} <-> Ckpt Shape: {}".format(model_key, model_weight.shape, ckpt_weight.shape))
46 | else:
47 | unloaded_log.append("*UNLOADED* {}, Model Shape: {}".format(model_key, model_weight.shape))
48 |
49 | # if is_main_process():
50 | # for info in matched_log:
51 | # logger.info(info)
52 | # for info in unloaded_log:
53 | # logger.warning(info)
54 | # for key in ckpt_keys:
55 | # logger.warning("$UNUSED$ {}, Ckpt Shape: {}".format(key, ckpt_state_dict[key].shape))
56 | # for info in unmatched_log:
57 | # logger.warning(info)
58 | return result_dicts
--------------------------------------------------------------------------------
/utils/prompt_engineering.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def get_prompt_templates():
5 | prompt_templates = [
6 | '{}.',
7 | 'a photo of a {}.',
8 | 'a bad photo of a {}.',
9 | 'a photo of many {}.',
10 | 'a sculpture of a {}.',
11 | 'a photo of the hard to see {}.',
12 | 'a low resolution photo of the {}.',
13 | 'a rendering of a {}.',
14 | 'graffiti of a {}.',
15 | 'a bad photo of the {}.',
16 | 'a cropped photo of the {}.',
17 | 'a tattoo of a {}.',
18 | 'the embroidered {}.',
19 | 'a photo of a hard to see {}.',
20 | 'a bright photo of a {}.',
21 | 'a photo of a clean {}.',
22 | 'a photo of a dirty {}.',
23 | 'a dark photo of the {}.',
24 | 'a drawing of a {}.',
25 | 'a photo of my {}.',
26 | 'the plastic {}.',
27 | 'a photo of the cool {}.',
28 | 'a close-up photo of a {}.',
29 | 'a black and white photo of the {}.',
30 | 'a painting of the {}.',
31 | 'a painting of a {}.',
32 | 'a pixelated photo of the {}.',
33 | 'a sculpture of the {}.',
34 | 'a bright photo of the {}.',
35 | 'a cropped photo of a {}.',
36 | 'a plastic {}.',
37 | 'a photo of the dirty {}.',
38 | 'a jpeg corrupted photo of a {}.',
39 | 'a blurry photo of the {}.',
40 | 'a photo of the {}.',
41 | 'a good photo of the {}.',
42 | 'a rendering of the {}.',
43 | 'a {} in a video game.',
44 | 'a photo of one {}.',
45 | 'a doodle of a {}.',
46 | 'a close-up photo of the {}.',
47 | 'the origami {}.',
48 | 'the {} in a video game.',
49 | 'a sketch of a {}.',
50 | 'a doodle of the {}.',
51 | 'a origami {}.',
52 | 'a low resolution photo of a {}.',
53 | 'the toy {}.',
54 | 'a rendition of the {}.',
55 | 'a photo of the clean {}.',
56 | 'a photo of a large {}.',
57 | 'a rendition of a {}.',
58 | 'a photo of a nice {}.',
59 | 'a photo of a weird {}.',
60 | 'a blurry photo of a {}.',
61 | 'a cartoon {}.',
62 | 'art of a {}.',
63 | 'a sketch of the {}.',
64 | 'a embroidered {}.',
65 | 'a pixelated photo of a {}.',
66 | 'itap of the {}.',
67 | 'a jpeg corrupted photo of the {}.',
68 | 'a good photo of a {}.',
69 | 'a plushie {}.',
70 | 'a photo of the nice {}.',
71 | 'a photo of the small {}.',
72 | 'a photo of the weird {}.',
73 | 'the cartoon {}.',
74 | 'art of the {}.',
75 | 'a drawing of the {}.',
76 | 'a photo of the large {}.',
77 | 'a black and white photo of a {}.',
78 | 'the plushie {}.',
79 | 'a dark photo of a {}.',
80 | 'itap of a {}.',
81 | 'graffiti of the {}.',
82 | 'a toy {}.',
83 | 'itap of my {}.',
84 | 'a photo of a cool {}.',
85 | 'a photo of a small {}.',
86 | 'a tattoo of the {}.',
87 | ]
88 | return prompt_templates
89 |
90 | def prompt_engineering(classnames, topk=1, suffix='.'):
91 | prompt_templates = get_prompt_templates()
92 | temp_idx = np.random.randint(min(len(prompt_templates), topk))
93 |
94 | if isinstance(classnames, list):
95 | classname = random.choice(classnames)
96 | else:
97 | classname = classnames
98 |
99 | return prompt_templates[temp_idx].replace('.', suffix).format(classname.replace(',', '').replace('+', ' '))
--------------------------------------------------------------------------------
/utils/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 | def slprint(x, name='x'):
5 | if isinstance(x, (torch.Tensor, np.ndarray)):
6 | print(f'{name}.shape:', x.shape)
7 | elif isinstance(x, (tuple, list)):
8 | print('type x:', type(x))
9 | for i in range(min(10, len(x))):
10 | slprint(x[i], f'{name}[{i}]')
11 | elif isinstance(x, dict):
12 | for k,v in x.items():
13 | slprint(v, f'{name}[{k}]')
14 | else:
15 | print(f'{name}.type:', type(x))
--------------------------------------------------------------------------------