├── .gitattributes ├── .gitignore ├── LICENSE ├── README.md ├── cog.yaml ├── docs ├── Customize_Component.md ├── Data.md ├── Evaluation.md ├── Finetune_Custom_Data.md ├── ScienceQA.md ├── table1.png └── teaser_figure.png ├── llava ├── __init__.py ├── constants.py ├── conversation.py ├── eval │ ├── eval_gpt_review.py │ ├── eval_gpt_review_bench.py │ ├── eval_gpt_review_visual.py │ ├── eval_pope.py │ ├── eval_science_qa.py │ ├── eval_science_qa_gpt4.py │ ├── eval_science_qa_gpt4_requery.py │ ├── eval_textvqa.py │ ├── generate_webpage_data_from_table.py │ ├── m4c_evaluator.py │ ├── model_qa.py │ ├── model_vqa.py │ ├── model_vqa_loader.py │ ├── model_vqa_loader_text_only.py │ ├── model_vqa_mmbench.py │ ├── model_vqa_mmbench_text_only.py │ ├── model_vqa_qbench.py │ ├── model_vqa_science.py │ ├── model_vqa_science_text_only.py │ ├── model_vqa_text_only.py │ ├── qa_baseline_gpt35.py │ ├── run_llava.py │ ├── summarize_gpt_review.py │ ├── table │ │ ├── answer │ │ │ ├── answer_alpaca-13b.jsonl │ │ │ ├── answer_bard.jsonl │ │ │ ├── answer_gpt35.jsonl │ │ │ ├── answer_llama-13b.jsonl │ │ │ └── answer_vicuna-13b.jsonl │ │ ├── caps_boxes_coco2014_val_80.jsonl │ │ ├── model.jsonl │ │ ├── prompt.jsonl │ │ ├── question.jsonl │ │ ├── results │ │ │ ├── test_sqa_llava_13b_v0.json │ │ │ └── test_sqa_llava_lcs_558k_sqa_12e_vicuna_v1_3_13b.json │ │ ├── review │ │ │ ├── review_alpaca-13b_vicuna-13b.jsonl │ │ │ ├── review_bard_vicuna-13b.jsonl │ │ │ ├── review_gpt35_vicuna-13b.jsonl │ │ │ └── review_llama-13b_vicuna-13b.jsonl │ │ ├── reviewer.jsonl │ │ └── rule.json │ └── webpage │ │ ├── figures │ │ ├── alpaca.png │ │ ├── bard.jpg │ │ ├── chatgpt.svg │ │ ├── llama.jpg │ │ ├── swords_FILL0_wght300_GRAD0_opsz48.svg │ │ └── vicuna.jpeg │ │ ├── index.html │ │ ├── script.js │ │ └── styles.css ├── mm_utils.py ├── model │ ├── __init__.py │ ├── apply_delta.py │ ├── builder.py │ ├── consolidate.py │ ├── language_model │ │ ├── llava_llama.py │ │ ├── llava_mpt.py │ │ └── mpt │ │ │ ├── adapt_tokenizer.py │ │ │ ├── attention.py │ │ │ ├── blocks.py │ │ │ ├── configuration_mpt.py │ │ │ ├── custom_embedding.py │ │ │ ├── flash_attn_triton.py │ │ │ ├── hf_prefixlm_converter.py │ │ │ ├── meta_init_context.py │ │ │ ├── modeling_mpt.py │ │ │ ├── norm.py │ │ │ └── param_init_fns.py │ ├── llava_arch.py │ ├── make_delta.py │ ├── multimodal_encoder │ │ ├── builder.py │ │ └── clip_encoder.py │ ├── multimodal_projector │ │ └── builder.py │ └── utils.py ├── serve │ ├── __init__.py │ ├── cli.py │ ├── controller.py │ ├── examples │ │ ├── extreme_ironing.jpg │ │ └── waterview.jpg │ ├── gradio_web_server.py │ ├── model_worker.py │ ├── register_worker.py │ └── test_message.py ├── train │ ├── llama_flash_attn_monkey_patch.py │ ├── llama_xformers_attn_monkey_patch.py │ ├── llava_trainer.py │ ├── train.py │ ├── train_mem.py │ └── train_xformers.py └── utils.py ├── playground └── data_VT │ └── README.md ├── preprocess ├── collect_gpt4v_VT │ ├── coco_imageID_61k.txt │ ├── coco_preprocess.py │ ├── gpt4v.py │ ├── gqa_imageID.txt │ ├── mm-vet_imageID.txt │ ├── mmmu_image_ids.txt │ └── mmvp_image_ids.txt ├── gpt_eval │ ├── gpt_eval_mmvp.py │ └── gpt_eval_vqa.py ├── merge_with_VT │ ├── merge_eval_dataset_with_VT.py │ └── merge_scienceqa_with_VT.py └── mmbench │ └── convert_mmbench_images.py ├── pyproject.toml └── scripts ├── LLaVA-VT ├── eval │ ├── eval_multi_datasets_with_VT.sh │ ├── gqa │ │ └── gqa.sh │ ├── llavabench │ │ └── llavabench.sh │ ├── mmbench │ │ └── mmbench.sh │ ├── mmmu │ │ └── mmmu.sh │ ├── mmvet │ │ └── mmvet.sh │ ├── mmvp_mc │ │ ├── eval_mmvp_mc_acc.py │ │ └── mmvp_mc.sh │ ├── pope │ │ └── pope.sh │ ├── scienceqa │ │ └── scienceqa.sh │ ├── textvqa │ │ └── textvqa.sh │ ├── vizwiz │ │ └── vizwiz.sh │ └── vqav2 │ │ └── vqav2_dev.sh └── train │ └── finetune_LLaVA-VT-13B.sh ├── VTGenerator ├── infer │ ├── eval_images_gen_vt.sh │ ├── train_images_gen_vt.sh │ └── train_images_gen_vt │ │ ├── llava_instruct_mix665k_coco_gen_vt.sh │ │ ├── llava_instruct_mix665k_ocrvqa_gen_vt.sh │ │ ├── llava_instruct_mix665k_textcap_gen_vt.sh │ │ ├── llava_instruct_mix665k_vg_gen_vt.sh │ │ └── merge_llava_instruct_mix665k_all_gen_vt.py └── train │ ├── finetune_VTGenerator-13B.sh │ └── pretrain_VTGenerator-Pretrained-13B.sh ├── convert_gqa_for_eval.py ├── convert_mmbench_for_submission.py ├── convert_mmvet_for_eval.py ├── convert_seed_for_submission.py ├── convert_sqa_to_llava.py ├── convert_sqa_to_llava_base_prompt.py ├── convert_vizwiz_for_submission.py ├── convert_vqav2_for_submission.py ├── extract_mm_projector.py ├── finetune.sh ├── finetune_full_schedule.sh ├── finetune_lora.sh ├── finetune_qlora.sh ├── finetune_sqa.sh ├── gpt_eval ├── multi_gpt_eval_gqa.sh ├── multi_gpt_eval_mmmu.sh └── multi_gpt_eval_mmvp.sh ├── merge_lora_weights.py ├── pretrain.sh ├── pretrain_xformers.sh ├── sqa_eval_batch.sh ├── sqa_eval_gather.sh ├── v1_5 ├── eval │ ├── gqa.sh │ ├── llavabench.sh │ ├── mmbench.sh │ ├── mmbench_cn.sh │ ├── mme.sh │ ├── mmvet.sh │ ├── pope.sh │ ├── qbench.sh │ ├── qbench_zh.sh │ ├── seed.sh │ ├── sqa.sh │ ├── textvqa.sh │ ├── vizwiz.sh │ └── vqav2.sh ├── finetune.sh ├── finetune_lora.sh ├── finetune_task.sh ├── finetune_task_lora.sh └── pretrain.sh ├── zero2.json ├── zero3.json └── zero3_offload.json /.gitattributes: -------------------------------------------------------------------------------- 1 | # https://git-scm.com/docs/gitattributes 2 | 3 | # Set the default behavior, in case people don't have core.autocrlf set. 4 | # https://git-scm.com/docs/gitattributes#_end_of_line_conversion 5 | * text=auto 6 | 7 | # common python attributes, taken from https://github.com/alexkaratarakis/gitattributes/blob/710900479a2bedeec7003d381719521ffbb18bf8/Python.gitattributes 8 | # Source files 9 | # ============ 10 | *.pxd text diff=python 11 | *.py text diff=python 12 | *.py3 text diff=python 13 | *.pyw text diff=python 14 | *.pyx text diff=python 15 | *.pyz text diff=python 16 | *.pyi text diff=python 17 | 18 | # Binary files 19 | # ============ 20 | *.db binary 21 | *.p binary 22 | *.pkl binary 23 | *.pickle binary 24 | *.pyc binary export-ignore 25 | *.pyo binary export-ignore 26 | *.pyd binary 27 | 28 | # Jupyter notebook 29 | *.ipynb text eol=lf 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__ 3 | *.pyc 4 | *.egg-info 5 | dist 6 | 7 | # Log 8 | *.log 9 | *.log.* 10 | # *.json 11 | # *.jsonl 12 | playground/data_VT/*.json 13 | playground/data_VT/*.jsonl 14 | playground/data_VT/**/*.json 15 | playground/data_VT/**/*.jsonl 16 | 17 | 18 | # Data 19 | !**/alpaca-data-conversation.json 20 | 21 | # Editor 22 | .idea 23 | *.swp 24 | 25 | # Other 26 | .DS_Store 27 | wandb 28 | output 29 | 30 | checkpoints 31 | ckpts* 32 | 33 | .ipynb_checkpoints 34 | *.ipynb 35 | 36 | # DevContainer 37 | !.devcontainer/* 38 | 39 | # Demo 40 | serve_images/ 41 | 42 | # My 43 | playground/data/eval 44 | playground/data_VT/eval 45 | playground/data_VT.zip 46 | scripts/log -------------------------------------------------------------------------------- /cog.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for Cog ⚙️ 2 | # Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md 3 | 4 | build: 5 | gpu: true 6 | 7 | python_version: "3.11" 8 | 9 | python_packages: 10 | - "torch==2.0.1" 11 | - "accelerate==0.21.0" 12 | - "bitsandbytes==0.41.0" 13 | - "deepspeed==0.9.5" 14 | - "einops-exts==0.0.4" 15 | - "einops==0.6.1" 16 | - "gradio==3.35.2" 17 | - "gradio_client==0.2.9" 18 | - "httpx==0.24.0" 19 | - "markdown2==2.4.10" 20 | - "numpy==1.26.0" 21 | - "peft==0.4.0" 22 | - "scikit-learn==1.2.2" 23 | - "sentencepiece==0.1.99" 24 | - "shortuuid==1.0.11" 25 | - "timm==0.6.13" 26 | - "tokenizers==0.13.3" 27 | - "torch==2.0.1" 28 | - "torchvision==0.15.2" 29 | - "transformers==4.31.0" 30 | - "wandb==0.15.12" 31 | - "wavedrom==2.0.3.post3" 32 | - "Pygments==2.16.1" 33 | run: 34 | - curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/download/v0.0.3/pget" && chmod +x /usr/local/bin/pget 35 | 36 | # predict.py defines how predictions are run on your model 37 | predict: "predict.py:Predictor" 38 | -------------------------------------------------------------------------------- /docs/Customize_Component.md: -------------------------------------------------------------------------------- 1 | # Customize Components in LLaVA 2 | 3 | This is an initial guide on how to replace the LLMs, visual encoders, etc. with your choice of components. 4 | 5 | ## LLM 6 | 7 | It is quite simple to swap out LLaMA to any other LLMs. You can refer to our implementation of [`llava_llama.py`](https://raw.githubusercontent.com/haotian-liu/LLaVA/main/llava/model/language_model/llava_llama.py) for an example of how to replace the LLM. 8 | 9 | Although it may seem that it still needs ~100 lines of code, most of them are copied from the original `llama.py` from HF. The only part that is different is to insert some lines for processing the multimodal inputs. 10 | 11 | In `forward` function, you can see that we call `self.prepare_inputs_labels_for_multimodal` to process the multimodal inputs. This function is defined in `LlavaMetaForCausalLM` and you just need to insert it into the `forward` function of your LLM. 12 | 13 | In `prepare_inputs_for_generation` function, you can see that we add `images` to the `model_inputs`. This is because we need to pass the images to the LLM during generation. 14 | 15 | These are basically all the changes you need to make to replace the LLM. 16 | 17 | ## Visual Encoder 18 | 19 | You can check out [`clip_encoder.py`](https://github.com/haotian-liu/LLaVA/blob/main/llava/model/multimodal_encoder/clip_encoder.py) on how we implement the CLIP visual encoder. 20 | 21 | -------------------------------------------------------------------------------- /docs/Data.md: -------------------------------------------------------------------------------- 1 | ## Data 2 | 3 | | Data file name | Size | 4 | | --- | ---: | 5 | | [llava_instruct_150k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_instruct_150k.json) | 229 MB | 6 | | [llava_instruct_80k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_instruct_80k.json) | 229 MB | 7 | | [conversation_58k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/conversation_58k.json) | 126 MB | 8 | | [detail_23k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/detail_23k.json) | 20.5 MB | 9 | | [complex_reasoning_77k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/complex_reasoning_77k.json) | 79.6 MB | 10 | 11 | ### Pretraining Dataset 12 | The pretraining dataset used in this release is a subset of CC-3M dataset, filtered with a more balanced concept coverage distribution. Please see [here](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K) for a detailed description of the dataset structure and how to download the images. 13 | 14 | If you already have CC-3M dataset on your disk, the image names follow this format: `GCC_train_000000000.jpg`. You may edit the `image` field correspondingly if necessary. 15 | 16 | | Data | Chat File | Meta Data | Size | 17 | | --- | --- | --- | ---: | 18 | | CC-3M Concept-balanced 595K | [chat.json](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K/blob/main/chat.json) | [metadata.json](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K/blob/main/metadata.json) | 211 MB 19 | | LAION/CC/SBU BLIP-Caption Concept-balanced 558K | [blip_laion_cc_sbu_558k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain/blob/main/blip_laion_cc_sbu_558k.json) | [metadata.json](#) | 181 MB 20 | 21 | **Important notice**: Upon the request from the community, as ~15% images of the original CC-3M dataset are no longer accessible, we upload [`images.zip`](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K/blob/main/images.zip) for better reproducing our work in research community. It must not be used for any other purposes. The use of these images must comply with the CC-3M license. This may be taken down at any time when requested by the original CC-3M dataset owner or owners of the referenced images. -------------------------------------------------------------------------------- /docs/Finetune_Custom_Data.md: -------------------------------------------------------------------------------- 1 | # Finetune LLaVA on Custom Datasets 2 | 3 | ## Dataset Format 4 | 5 | Convert your data to a JSON file of a List of all samples. Sample metadata should contain `id` (a unique identifier), `image` (the path to the image), and `conversations` (the conversation data between human and AI). 6 | 7 | A sample JSON for finetuning LLaVA for generating tag-style captions for Stable Diffusion: 8 | 9 | ```json 10 | [ 11 | { 12 | "id": "997bb945-628d-4724-b370-b84de974a19f", 13 | "image": "part-000001/997bb945-628d-4724-b370-b84de974a19f.jpg", 14 | "conversations": [ 15 | { 16 | "from": "human", 17 | "value": "\nWrite a prompt for Stable Diffusion to generate this image." 18 | }, 19 | { 20 | "from": "gpt", 21 | "value": "a beautiful painting of chernobyl by nekro, pascal blanche, john harris, greg rutkowski, sin jong hun, moebius, simon stalenhag. in style of cg art. ray tracing. cel shading. hyper detailed. realistic. ue 5. maya. octane render. " 22 | }, 23 | ] 24 | }, 25 | ... 26 | ] 27 | ``` 28 | 29 | ## Command 30 | 31 | If you have a limited task-specific data, we recommend finetuning from LLaVA checkpoints with LoRA following this [script](https://github.com/haotian-liu/LLaVA/blob/main/scripts/v1_5/finetune_task_lora.sh). 32 | 33 | If the amount of the task-specific data is sufficient, you can also finetune from LLaVA checkpoints with full-model finetuning following this [script](https://github.com/haotian-liu/LLaVA/blob/main/scripts/v1_5/finetune_task.sh). 34 | 35 | You may need to adjust the hyperparameters to fit each specific dataset and your hardware constraint. 36 | 37 | 38 | -------------------------------------------------------------------------------- /docs/ScienceQA.md: -------------------------------------------------------------------------------- 1 | ### ScienceQA 2 | 3 | #### Prepare Data 4 | 1. Please see ScienceQA [repo](https://github.com/lupantech/ScienceQA) for setting up the dataset. 5 | 2. Generate ScienceQA dataset for LLaVA conversation-style format. 6 | 7 | ```Shell 8 | python scripts/convert_sqa_to_llava.py \ 9 | convert_to_llava \ 10 | --base-dir /path/to/ScienceQA/data/scienceqa \ 11 | --prompt-format "QCM-LEA" \ 12 | --split {train,val,minival,test,minitest} 13 | ``` 14 | 15 | #### Training 16 | 17 | 1. Pretraining 18 | 19 | You can download our pretrained projector weights from our [Model Zoo](), or train your own projector weights using [`pretrain.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/pretrain.sh). 20 | 21 | 2. Finetuning 22 | 23 | See [`finetune_sqa.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune_sqa.sh). 24 | 25 | #### Evaluation 26 | 27 | 1. Multiple-GPU inference 28 | You may evaluate this with multiple GPUs, and concatenate the generated jsonl files. Please refer to our script for [batch evaluation](https://github.com/haotian-liu/LLaVA/blob/main/scripts/sqa_eval_batch.sh) and [results gathering](https://github.com/haotian-liu/LLaVA/blob/main/scripts/sqa_eval_gather.sh). 29 | 30 | 2. Single-GPU inference 31 | 32 | (a) Generate LLaVA responses on ScienceQA dataset 33 | 34 | ```Shell 35 | python -m llava.eval.model_vqa_science \ 36 | --model-path liuhaotian/llava-lcs558k-scienceqa-vicuna-13b-v1.3 \ 37 | --question-file /path/to/ScienceQA/data/scienceqa/llava_test_QCM-LEA.json \ 38 | --image-folder /path/to/ScienceQA/data/scienceqa/images/test \ 39 | --answers-file vqa/results/ScienceQA/test_llava-13b.jsonl \ 40 | --conv-mode llava_v1 41 | ``` 42 | 43 | (b) Evaluate the generated responses 44 | 45 | ```Shell 46 | python eval_science_qa.py \ 47 | --base-dir /path/to/ScienceQA/data/scienceqa \ 48 | --result-file vqa/results/ScienceQA/test_llava-13b.jsonl \ 49 | --output-file vqa/results/ScienceQA/test_llava-13b_output.json \ 50 | --output-result vqa/results/ScienceQA/test_llava-13b_result.json \ 51 | ``` 52 | 53 | For reference, we attach our prediction file [`test_sqa_llava_lcs_558k_sqa_12e_vicuna_v1_3_13b.json`](https://github.com/haotian-liu/LLaVA/blob/main/llava/eval/table/results/test_sqa_llava_lcs_558k_sqa_12e_vicuna_v1_3_13b.json) and [`test_sqa_llava_13b_v0.json`](https://github.com/haotian-liu/LLaVA/blob/main/llava/eval/table/results/test_sqa_llava_13b_v0.json) for comparison when reproducing our results, as well as for further analysis in detail. 54 | -------------------------------------------------------------------------------- /docs/table1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LaVi-Lab/Visual-Table/c17ab793bc49e7c8d9d8fa9018da23b047ae19be/docs/table1.png -------------------------------------------------------------------------------- /docs/teaser_figure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LaVi-Lab/Visual-Table/c17ab793bc49e7c8d9d8fa9018da23b047ae19be/docs/teaser_figure.png -------------------------------------------------------------------------------- /llava/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import LlavaLlamaForCausalLM 2 | -------------------------------------------------------------------------------- /llava/constants.py: -------------------------------------------------------------------------------- 1 | CONTROLLER_HEART_BEAT_EXPIRATION = 30 2 | WORKER_HEART_BEAT_INTERVAL = 15 3 | 4 | LOGDIR = "." 5 | 6 | # Model Constants 7 | IGNORE_INDEX = -100 8 | IMAGE_TOKEN_INDEX = -200 9 | DEFAULT_IMAGE_TOKEN = "" 10 | DEFAULT_IMAGE_PATCH_TOKEN = "" 11 | DEFAULT_IM_START_TOKEN = "" 12 | DEFAULT_IM_END_TOKEN = "" 13 | IMAGE_PLACEHOLDER = "" 14 | -------------------------------------------------------------------------------- /llava/eval/eval_gpt_review.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | 5 | import openai 6 | import tqdm 7 | import ray 8 | import time 9 | 10 | NUM_SECONDS_TO_SLEEP = 3 11 | 12 | @ray.remote(num_cpus=4) 13 | def get_eval(content: str, max_tokens: int): 14 | while True: 15 | try: 16 | response = openai.ChatCompletion.create( 17 | model='gpt-4', 18 | messages=[{ 19 | 'role': 'system', 20 | 'content': 'You are a helpful and precise assistant for checking the quality of the answer.' 21 | }, { 22 | 'role': 'user', 23 | 'content': content, 24 | }], 25 | temperature=0.2, # TODO: figure out which temperature is best for evaluation 26 | max_tokens=max_tokens, 27 | ) 28 | break 29 | except openai.error.RateLimitError: 30 | pass 31 | except Exception as e: 32 | print(e) 33 | time.sleep(NUM_SECONDS_TO_SLEEP) 34 | 35 | print('success!') 36 | return response['choices'][0]['message']['content'] 37 | 38 | 39 | def parse_score(review): 40 | try: 41 | score_pair = review.split('\n')[0] 42 | score_pair = score_pair.replace(',', ' ') 43 | sp = score_pair.split(' ') 44 | if len(sp) == 2: 45 | return [float(sp[0]), float(sp[1])] 46 | else: 47 | print('error', review) 48 | return [-1, -1] 49 | except Exception as e: 50 | print(e) 51 | print('error', review) 52 | return [-1, -1] 53 | 54 | 55 | if __name__ == '__main__': 56 | parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.') 57 | parser.add_argument('-q', '--question') 58 | # parser.add_argument('-a', '--answer') 59 | parser.add_argument('-a', '--answer-list', nargs='+', default=[]) 60 | parser.add_argument('-r', '--rule') 61 | parser.add_argument('-o', '--output') 62 | parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output') 63 | args = parser.parse_args() 64 | 65 | ray.init() 66 | 67 | f_q = open(os.path.expanduser(args.question)) 68 | f_ans1 = open(os.path.expanduser(args.answer_list[0])) 69 | f_ans2 = open(os.path.expanduser(args.answer_list[1])) 70 | rule_dict = json.load(open(os.path.expanduser(args.rule), 'r')) 71 | 72 | review_file = open(f'{args.output}', 'w') 73 | 74 | js_list = [] 75 | handles = [] 76 | idx = 0 77 | for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2): 78 | # if idx == 1: 79 | # break 80 | 81 | ques = json.loads(ques_js) 82 | ans1 = json.loads(ans1_js) 83 | ans2 = json.loads(ans2_js) 84 | 85 | category = json.loads(ques_js)['category'] 86 | if category in rule_dict: 87 | rule = rule_dict[category] 88 | else: 89 | rule = rule_dict['default'] 90 | prompt = rule['prompt'] 91 | role = rule['role'] 92 | content = (f'[Question]\n{ques["text"]}\n\n' 93 | f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n' 94 | f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n' 95 | f'[System]\n{prompt}\n\n') 96 | js_list.append({ 97 | 'id': idx+1, 98 | 'question_id': ques['question_id'], 99 | 'answer1_id': ans1['answer_id'], 100 | 'answer2_id': ans2['answer_id'], 101 | 'category': category}) 102 | idx += 1 103 | handles.append(get_eval.remote(content, args.max_tokens)) 104 | # To avoid the rate limit set by OpenAI 105 | time.sleep(NUM_SECONDS_TO_SLEEP) 106 | 107 | reviews = ray.get(handles) 108 | for idx, review in enumerate(reviews): 109 | scores = parse_score(review) 110 | js_list[idx]['content'] = review 111 | js_list[idx]['tuple'] = scores 112 | review_file.write(json.dumps(js_list[idx]) + '\n') 113 | review_file.close() 114 | -------------------------------------------------------------------------------- /llava/eval/eval_gpt_review_visual.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | 5 | import openai 6 | import time 7 | 8 | NUM_SECONDS_TO_SLEEP = 0.5 9 | 10 | 11 | def get_eval(content: str, max_tokens: int): 12 | while True: 13 | try: 14 | response = openai.ChatCompletion.create( 15 | model='gpt-4-0314', 16 | messages=[{ 17 | 'role': 'system', 18 | 'content': 'You are a helpful and precise assistant for checking the quality of the answer.' 19 | }, { 20 | 'role': 'user', 21 | 'content': content, 22 | }], 23 | temperature=0.2, # TODO: figure out which temperature is best for evaluation 24 | max_tokens=max_tokens, 25 | ) 26 | break 27 | except openai.error.RateLimitError: 28 | pass 29 | except Exception as e: 30 | print(e) 31 | time.sleep(NUM_SECONDS_TO_SLEEP) 32 | 33 | return response['choices'][0]['message']['content'] 34 | 35 | 36 | def parse_score(review): 37 | try: 38 | score_pair = review.split('\n')[0] 39 | score_pair = score_pair.replace(',', ' ') 40 | sp = score_pair.split(' ') 41 | if len(sp) == 2: 42 | return [float(sp[0]), float(sp[1])] 43 | else: 44 | print('error', review) 45 | return [-1, -1] 46 | except Exception as e: 47 | print(e) 48 | print('error', review) 49 | return [-1, -1] 50 | 51 | 52 | if __name__ == '__main__': 53 | parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.') 54 | parser.add_argument('-q', '--question') 55 | parser.add_argument('-c', '--context') 56 | parser.add_argument('-a', '--answer-list', nargs='+', default=[]) 57 | parser.add_argument('-r', '--rule') 58 | parser.add_argument('-o', '--output') 59 | parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output') 60 | args = parser.parse_args() 61 | 62 | f_q = open(os.path.expanduser(args.question)) 63 | f_ans1 = open(os.path.expanduser(args.answer_list[0])) 64 | f_ans2 = open(os.path.expanduser(args.answer_list[1])) 65 | rule_dict = json.load(open(os.path.expanduser(args.rule), 'r')) 66 | 67 | if os.path.isfile(os.path.expanduser(args.output)): 68 | cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))] 69 | else: 70 | cur_reviews = [] 71 | 72 | review_file = open(f'{args.output}', 'a') 73 | 74 | context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))] 75 | image_to_context = {context['image']: context for context in context_list} 76 | 77 | handles = [] 78 | idx = 0 79 | for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2): 80 | ques = json.loads(ques_js) 81 | ans1 = json.loads(ans1_js) 82 | ans2 = json.loads(ans2_js) 83 | 84 | inst = image_to_context[ques['image']] 85 | cap_str = '\n'.join(inst['captions']) 86 | box_str = '\n'.join([f'{instance["category"]}: {instance["bbox"]}' for instance in inst['instances']]) 87 | 88 | category = json.loads(ques_js)['category'] 89 | if category in rule_dict: 90 | rule = rule_dict[category] 91 | else: 92 | assert False, f"Visual QA category not found in rule file: {category}." 93 | prompt = rule['prompt'] 94 | role = rule['role'] 95 | content = (f'[Context]\n{cap_str}\n\n{box_str}\n\n' 96 | f'[Question]\n{ques["text"]}\n\n' 97 | f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n' 98 | f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n' 99 | f'[System]\n{prompt}\n\n') 100 | cur_js = { 101 | 'id': idx+1, 102 | 'question_id': ques['question_id'], 103 | 'answer1_id': ans1.get('answer_id', ans1['question_id']), 104 | 'answer2_id': ans2.get('answer_id', ans2['answer_id']), 105 | 'category': category 106 | } 107 | if idx >= len(cur_reviews): 108 | review = get_eval(content, args.max_tokens) 109 | scores = parse_score(review) 110 | cur_js['content'] = review 111 | cur_js['tuple'] = scores 112 | review_file.write(json.dumps(cur_js) + '\n') 113 | review_file.flush() 114 | else: 115 | print(f'Skipping {idx} as we already have it.') 116 | idx += 1 117 | print(idx) 118 | review_file.close() 119 | -------------------------------------------------------------------------------- /llava/eval/eval_pope.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | def eval_pope(answers, label_file): 6 | label_list = [json.loads(q)['label'] for q in open(label_file, 'r')] 7 | 8 | for answer in answers: 9 | text = answer['text'] 10 | 11 | # Only keep the first sentence 12 | if text.find('.') != -1: 13 | text = text.split('.')[0] 14 | 15 | text = text.replace(',', '') 16 | words = text.split(' ') 17 | if 'No' in words or 'not' in words or 'no' in words: 18 | answer['text'] = 'no' 19 | else: 20 | answer['text'] = 'yes' 21 | 22 | for i in range(len(label_list)): 23 | if label_list[i] == 'no': 24 | label_list[i] = 0 25 | else: 26 | label_list[i] = 1 27 | 28 | pred_list = [] 29 | for answer in answers: 30 | if answer['text'] == 'no': 31 | pred_list.append(0) 32 | else: 33 | pred_list.append(1) 34 | 35 | pos = 1 36 | neg = 0 37 | yes_ratio = pred_list.count(1) / len(pred_list) 38 | 39 | TP, TN, FP, FN = 0, 0, 0, 0 40 | for pred, label in zip(pred_list, label_list): 41 | if pred == pos and label == pos: 42 | TP += 1 43 | elif pred == pos and label == neg: 44 | FP += 1 45 | elif pred == neg and label == neg: 46 | TN += 1 47 | elif pred == neg and label == pos: 48 | FN += 1 49 | 50 | print('TP\tFP\tTN\tFN\t') 51 | print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN)) 52 | 53 | precision = float(TP) / float(TP + FP) 54 | recall = float(TP) / float(TP + FN) 55 | f1 = 2*precision*recall / (precision + recall) 56 | acc = (TP + TN) / (TP + TN + FP + FN) 57 | print('Accuracy: {}'.format(acc)) 58 | print('Precision: {}'.format(precision)) 59 | print('Recall: {}'.format(recall)) 60 | print('F1 score: {}'.format(f1)) 61 | print('Yes ratio: {}'.format(yes_ratio)) 62 | print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) ) 63 | return f1 64 | 65 | if __name__ == "__main__": 66 | parser = argparse.ArgumentParser() 67 | parser.add_argument("--annotation-dir", type=str) 68 | parser.add_argument("--question-file", type=str) 69 | parser.add_argument("--result-file", type=str) 70 | args = parser.parse_args() 71 | 72 | questions = [json.loads(line) for line in open(args.question_file)] 73 | questions = {question['question_id']: question for question in questions} 74 | answers = [json.loads(q) for q in open(args.result_file)] 75 | avg_f1 = 0.0 76 | for file in os.listdir(args.annotation_dir): 77 | assert file.startswith('coco_pope_') 78 | assert file.endswith('.json') 79 | category = file[10:-5] 80 | cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category] 81 | print('Category: {}, # samples: {}'.format(category, len(cur_answers))) 82 | avg_f1 += eval_pope(cur_answers, os.path.join(args.annotation_dir, file)) 83 | print("====================================") 84 | 85 | print("avg_f1 = ", avg_f1/3) -------------------------------------------------------------------------------- /llava/eval/eval_science_qa_gpt4.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | import re 5 | import random 6 | from collections import defaultdict 7 | 8 | 9 | def get_args(): 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('--base-dir', type=str) 12 | parser.add_argument('--gpt4-result', type=str) 13 | parser.add_argument('--our-result', type=str) 14 | parser.add_argument('--split', type=str, default='test') 15 | parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"]) 16 | return parser.parse_args() 17 | 18 | 19 | def convert_caps(results): 20 | fakecaps = [] 21 | for result in results: 22 | image_id = result['question_id'] 23 | caption = result['text'] 24 | fakecaps.append({"image_id": int(image_id), "caption": caption}) 25 | return fakecaps 26 | 27 | 28 | def get_pred_idx(prediction, choices, options): 29 | """ 30 | Get the index (e.g. 2) from the prediction (e.g. 'C') 31 | """ 32 | if prediction in options[:len(choices)]: 33 | return options.index(prediction) 34 | else: 35 | return random.choice(range(len(choices))) 36 | 37 | 38 | if __name__ == "__main__": 39 | args = get_args() 40 | 41 | base_dir = args.base_dir 42 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split] 43 | problems = json.load(open(os.path.join(base_dir, "problems.json"))) 44 | our_predictions = [json.loads(line) for line in open(args.our_result)] 45 | our_predictions = {pred['question_id']: pred for pred in our_predictions} 46 | split_problems = {idx: problems[idx] for idx in split_indices} 47 | 48 | gpt4_predictions = json.load(open(args.gpt4_result))['outputs'] 49 | 50 | results = defaultdict(lambda: 0) 51 | 52 | for prob_id, prob in split_problems.items(): 53 | if prob_id not in our_predictions: 54 | continue 55 | if prob_id not in gpt4_predictions: 56 | continue 57 | our_pred = our_predictions[prob_id]['text'] 58 | gpt4_pred = gpt4_predictions[prob_id] 59 | 60 | pattern = re.compile(r'The answer is ([A-Z]).') 61 | our_res = pattern.findall(our_pred) 62 | if len(our_res) == 1: 63 | our_answer = our_res[0] # 'A', 'B', ... 64 | else: 65 | our_answer = "FAILED" 66 | gpt4_res = pattern.findall(gpt4_pred) 67 | if len(gpt4_res) == 1: 68 | gpt4_answer = gpt4_res[0] # 'A', 'B', ... 69 | else: 70 | gpt4_answer = "FAILED" 71 | 72 | our_pred_idx = get_pred_idx(our_answer, prob['choices'], args.options) 73 | gpt4_pred_idx = get_pred_idx(gpt4_answer, prob['choices'], args.options) 74 | 75 | if gpt4_answer == 'FAILED': 76 | results['gpt4_failed'] += 1 77 | # continue 78 | gpt4_pred_idx = our_pred_idx 79 | # if our_pred_idx != prob['answer']: 80 | # print(our_predictions[prob_id]['prompt']) 81 | # print('-----------------') 82 | # print(f'LECTURE: {prob["lecture"]}') 83 | # print(f'SOLUTION: {prob["solution"]}') 84 | # print('=====================') 85 | else: 86 | # continue 87 | pass 88 | # gpt4_pred_idx = our_pred_idx 89 | 90 | if gpt4_pred_idx == prob['answer']: 91 | results['correct'] += 1 92 | else: 93 | results['incorrect'] += 1 94 | 95 | 96 | if gpt4_pred_idx == prob['answer'] or our_pred_idx == prob['answer']: 97 | results['correct_upperbound'] += 1 98 | 99 | correct = results['correct'] 100 | total = results['correct'] + results['incorrect'] 101 | print(f'Total: {total}, Correct: {correct}, Accuracy: {correct / total * 100:.2f}%') 102 | print(f'Total: {total}, Correct (upper): {results["correct_upperbound"]}, Accuracy: {results["correct_upperbound"] / total * 100:.2f}%') 103 | print(f'Total: {total}, GPT-4 NO-ANS (RANDOM): {results["gpt4_failed"]}, Percentage: {results["gpt4_failed"] / total * 100:.2f}%') 104 | 105 | -------------------------------------------------------------------------------- /llava/eval/eval_textvqa.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import json 4 | import re 5 | 6 | from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator 7 | 8 | 9 | def get_args(): 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('--annotation-file', type=str) 12 | parser.add_argument('--result-file', type=str) 13 | parser.add_argument('--result-dir', type=str) 14 | return parser.parse_args() 15 | 16 | 17 | def prompt_processor(prompt): 18 | if prompt.startswith('Visual table:'): 19 | prompt_split = prompt.split('\n') 20 | assert prompt_split[-1] == 'Answer the question using a single word or phrase.' 21 | assert prompt_split[-4] == 'Based on the given image and given visual table, answer the following question:' 22 | question = prompt.split('\n')[-3] 23 | elif prompt.startswith('Detailed caption:'): 24 | prompt_split = prompt.split('\n') 25 | assert prompt_split[-1] == 'Answer the question using a single word or phrase.' 26 | assert prompt_split[-4] == 'Based on the given image and given detailed caption, answer the following question:' 27 | question = prompt.split('\n')[-3] 28 | elif prompt.startswith('Scene graph:'): 29 | prompt_split = prompt.split('\n') 30 | assert prompt_split[-1] == 'Answer the question using a single word or phrase.' 31 | assert prompt_split[-4] == 'Based on the given image and given scene graph, answer the following question:' 32 | question = prompt.split('\n')[-3] 33 | elif prompt.startswith('Caption:'): 34 | prompt_split = prompt.split('\n') 35 | assert prompt_split[-1] == 'Answer the question using a single word or phrase.' 36 | assert prompt_split[-4] == 'Based on the given image and given caption, answer the following question:' 37 | question = prompt.split('\n')[-3] 38 | elif prompt.startswith('OCR tokens: '): 39 | pattern = r"Question: (.*?) Short answer:" 40 | match = re.search(pattern, prompt, re.DOTALL) 41 | question = match.group(1) 42 | elif 'Reference OCR token: ' in prompt and len(prompt.split('\n')) == 3: 43 | if prompt.startswith('Reference OCR token:'): 44 | question = prompt.split('\n')[1] 45 | else: 46 | question = prompt.split('\n')[0] 47 | elif len(prompt.split('\n')) == 2: 48 | question = prompt.split('\n')[0] 49 | else: 50 | assert False 51 | 52 | return question.lower() 53 | 54 | 55 | def eval_single(annotation_file, result_file): 56 | experiment_name = os.path.splitext(os.path.basename(result_file))[0] 57 | print(experiment_name) 58 | annotations = json.load(open(annotation_file))['data'] 59 | annotations = {(annotation['image_id'], annotation['question'].lower()): annotation for annotation in annotations} 60 | results = [json.loads(line) for line in open(result_file)] 61 | 62 | pred_list = [] 63 | for result in results: 64 | annotation = annotations[(result['question_id'], prompt_processor(result['prompt']))] 65 | pred_list.append({ 66 | "pred_answer": result['text'], 67 | "gt_answers": annotation['answers'], 68 | }) 69 | 70 | evaluator = TextVQAAccuracyEvaluator() 71 | print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), 100. * evaluator.eval_pred_list(pred_list))) 72 | 73 | 74 | if __name__ == "__main__": 75 | args = get_args() 76 | 77 | if args.result_file is not None: 78 | eval_single(args.annotation_file, args.result_file) 79 | 80 | if args.result_dir is not None: 81 | for result_file in sorted(os.listdir(args.result_dir)): 82 | if not result_file.endswith('.jsonl'): 83 | print(f'Skipping {result_file}') 84 | continue 85 | eval_single(args.annotation_file, os.path.join(args.result_dir, result_file)) 86 | -------------------------------------------------------------------------------- /llava/eval/generate_webpage_data_from_table.py: -------------------------------------------------------------------------------- 1 | """Generate json file for webpage.""" 2 | import json 3 | import os 4 | import re 5 | 6 | # models = ['llama', 'alpaca', 'gpt35', 'bard'] 7 | models = ['vicuna'] 8 | 9 | 10 | def read_jsonl(path: str, key: str=None): 11 | data = [] 12 | with open(os.path.expanduser(path)) as f: 13 | for line in f: 14 | if not line: 15 | continue 16 | data.append(json.loads(line)) 17 | if key is not None: 18 | data.sort(key=lambda x: x[key]) 19 | data = {item[key]: item for item in data} 20 | return data 21 | 22 | 23 | def trim_hanging_lines(s: str, n: int) -> str: 24 | s = s.strip() 25 | for _ in range(n): 26 | s = s.split('\n', 1)[1].strip() 27 | return s 28 | 29 | 30 | if __name__ == '__main__': 31 | questions = read_jsonl('table/question.jsonl', key='question_id') 32 | 33 | # alpaca_answers = read_jsonl('table/answer/answer_alpaca-13b.jsonl', key='question_id') 34 | # bard_answers = read_jsonl('table/answer/answer_bard.jsonl', key='question_id') 35 | # gpt35_answers = read_jsonl('table/answer/answer_gpt35.jsonl', key='question_id') 36 | # llama_answers = read_jsonl('table/answer/answer_llama-13b.jsonl', key='question_id') 37 | vicuna_answers = read_jsonl('table/answer/answer_vicuna-13b.jsonl', key='question_id') 38 | ours_answers = read_jsonl('table/results/llama-13b-hf-alpaca.jsonl', key='question_id') 39 | 40 | review_vicuna = read_jsonl('table/review/review_vicuna-13b_llama-13b-hf-alpaca.jsonl', key='question_id') 41 | # review_alpaca = read_jsonl('table/review/review_alpaca-13b_vicuna-13b.jsonl', key='question_id') 42 | # review_bard = read_jsonl('table/review/review_bard_vicuna-13b.jsonl', key='question_id') 43 | # review_gpt35 = read_jsonl('table/review/review_gpt35_vicuna-13b.jsonl', key='question_id') 44 | # review_llama = read_jsonl('table/review/review_llama-13b_vicuna-13b.jsonl', key='question_id') 45 | 46 | records = [] 47 | for qid in questions.keys(): 48 | r = { 49 | 'id': qid, 50 | 'category': questions[qid]['category'], 51 | 'question': questions[qid]['text'], 52 | 'answers': { 53 | # 'alpaca': alpaca_answers[qid]['text'], 54 | # 'llama': llama_answers[qid]['text'], 55 | # 'bard': bard_answers[qid]['text'], 56 | # 'gpt35': gpt35_answers[qid]['text'], 57 | 'vicuna': vicuna_answers[qid]['text'], 58 | 'ours': ours_answers[qid]['text'], 59 | }, 60 | 'evaluations': { 61 | # 'alpaca': review_alpaca[qid]['text'], 62 | # 'llama': review_llama[qid]['text'], 63 | # 'bard': review_bard[qid]['text'], 64 | 'vicuna': review_vicuna[qid]['content'], 65 | # 'gpt35': review_gpt35[qid]['text'], 66 | }, 67 | 'scores': { 68 | 'vicuna': review_vicuna[qid]['tuple'], 69 | # 'alpaca': review_alpaca[qid]['score'], 70 | # 'llama': review_llama[qid]['score'], 71 | # 'bard': review_bard[qid]['score'], 72 | # 'gpt35': review_gpt35[qid]['score'], 73 | }, 74 | } 75 | 76 | # cleanup data 77 | cleaned_evals = {} 78 | for k, v in r['evaluations'].items(): 79 | v = v.strip() 80 | lines = v.split('\n') 81 | # trim the first line if it's a pair of numbers 82 | if re.match(r'\d+[, ]+\d+', lines[0]): 83 | lines = lines[1:] 84 | v = '\n'.join(lines) 85 | cleaned_evals[k] = v.replace('Assistant 1', "**Assistant 1**").replace('Assistant 2', '**Assistant 2**') 86 | 87 | r['evaluations'] = cleaned_evals 88 | records.append(r) 89 | 90 | # Reorder the records, this is optional 91 | for r in records: 92 | if r['id'] <= 20: 93 | r['id'] += 60 94 | else: 95 | r['id'] -= 20 96 | for r in records: 97 | if r['id'] <= 50: 98 | r['id'] += 10 99 | elif 50 < r['id'] <= 60: 100 | r['id'] -= 50 101 | for r in records: 102 | if r['id'] == 7: 103 | r['id'] = 1 104 | elif r['id'] < 7: 105 | r['id'] += 1 106 | 107 | records.sort(key=lambda x: x['id']) 108 | 109 | # Write to file 110 | with open('webpage/data.json', 'w') as f: 111 | json.dump({'questions': records, 'models': models}, f, indent=2) 112 | -------------------------------------------------------------------------------- /llava/eval/model_qa.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria 3 | import torch 4 | import os 5 | import json 6 | from tqdm import tqdm 7 | import shortuuid 8 | 9 | from llava.conversation import default_conversation 10 | from llava.utils import disable_torch_init 11 | 12 | 13 | # new stopping implementation 14 | class KeywordsStoppingCriteria(StoppingCriteria): 15 | def __init__(self, keywords, tokenizer, input_ids): 16 | self.keywords = keywords 17 | self.tokenizer = tokenizer 18 | self.start_len = None 19 | self.input_ids = input_ids 20 | 21 | def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: 22 | if self.start_len is None: 23 | self.start_len = self.input_ids.shape[1] 24 | else: 25 | outputs = self.tokenizer.batch_decode(output_ids[:, self.start_len:], skip_special_tokens=True)[0] 26 | for keyword in self.keywords: 27 | if keyword in outputs: 28 | return True 29 | return False 30 | 31 | 32 | @torch.inference_mode() 33 | def eval_model(model_name, questions_file, answers_file): 34 | # Model 35 | disable_torch_init() 36 | model_name = os.path.expanduser(model_name) 37 | tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) 38 | model = AutoModelForCausalLM.from_pretrained(model_name, 39 | torch_dtype=torch.float16).cuda() 40 | 41 | 42 | ques_file = open(os.path.expanduser(questions_file), "r") 43 | ans_file = open(os.path.expanduser(answers_file), "w") 44 | for i, line in enumerate(tqdm(ques_file)): 45 | idx = json.loads(line)["question_id"] 46 | qs = json.loads(line)["text"] 47 | cat = json.loads(line)["category"] 48 | conv = default_conversation.copy() 49 | conv.append_message(conv.roles[0], qs) 50 | prompt = conv.get_prompt() 51 | inputs = tokenizer([prompt]) 52 | input_ids = torch.as_tensor(inputs.input_ids).cuda() 53 | stopping_criteria = KeywordsStoppingCriteria([conv.sep], tokenizer, input_ids) 54 | output_ids = model.generate( 55 | input_ids, 56 | do_sample=True, 57 | use_cache=True, 58 | temperature=0.7, 59 | max_new_tokens=1024, 60 | stopping_criteria=[stopping_criteria]) 61 | outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0] 62 | try: 63 | index = outputs.index(conv.sep, len(prompt)) 64 | except ValueError: 65 | outputs += conv.sep 66 | index = outputs.index(conv.sep, len(prompt)) 67 | 68 | outputs = outputs[len(prompt) + len(conv.roles[1]) + 2:index].strip() 69 | ans_id = shortuuid.uuid() 70 | ans_file.write(json.dumps({"question_id": idx, 71 | "text": outputs, 72 | "answer_id": ans_id, 73 | "model_id": model_name, 74 | "metadata": {}}) + "\n") 75 | ans_file.flush() 76 | ans_file.close() 77 | 78 | if __name__ == "__main__": 79 | parser = argparse.ArgumentParser() 80 | parser.add_argument("--model-name", type=str, default="facebook/opt-350m") 81 | parser.add_argument("--question-file", type=str, default="tables/question.jsonl") 82 | parser.add_argument("--answers-file", type=str, default="answer.jsonl") 83 | args = parser.parse_args() 84 | 85 | eval_model(args.model_name, args.question_file, args.answers_file) 86 | -------------------------------------------------------------------------------- /llava/eval/model_vqa_text_only.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import os 4 | import json 5 | from tqdm import tqdm 6 | import shortuuid 7 | 8 | from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN 9 | from llava.conversation import conv_templates, SeparatorStyle 10 | from llava.model.builder import load_pretrained_model 11 | from llava.utils import disable_torch_init 12 | from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria 13 | import math 14 | 15 | 16 | def split_list(lst, n): 17 | """Split a list into n (roughly) equal-sized chunks""" 18 | chunk_size = math.ceil(len(lst) / n) # integer division 19 | return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] 20 | 21 | 22 | def get_chunk(lst, n, k): 23 | chunks = split_list(lst, n) 24 | return chunks[k] 25 | 26 | 27 | def eval_model(args): 28 | # Model 29 | disable_torch_init() 30 | model_path = os.path.expanduser(args.model_path) 31 | model_name = get_model_name_from_path(model_path) 32 | tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name) 33 | 34 | questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")] 35 | questions = get_chunk(questions, args.num_chunks, args.chunk_idx) 36 | answers_file = os.path.expanduser(args.answers_file) 37 | os.makedirs(os.path.dirname(answers_file), exist_ok=True) 38 | ans_file = open(answers_file, "w") 39 | for line in tqdm(questions): 40 | idx = line["question_id"] 41 | qs = line["text"] 42 | cur_prompt = qs 43 | 44 | conv = conv_templates[args.conv_mode].copy() 45 | conv.append_message(conv.roles[0], qs) 46 | conv.append_message(conv.roles[1], None) 47 | prompt = conv.get_prompt() 48 | 49 | # if text-only, input_ids = tokenizer(prompt) 50 | input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() 51 | 52 | stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 53 | keywords = [stop_str] 54 | stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) 55 | 56 | with torch.inference_mode(): 57 | output_ids = model.generate( 58 | input_ids, 59 | do_sample=True if args.temperature > 0 else False, 60 | temperature=args.temperature, 61 | top_p=args.top_p, 62 | num_beams=args.num_beams, 63 | # no_repeat_ngram_size=3, 64 | max_new_tokens=1024, 65 | use_cache=True) 66 | 67 | input_token_len = input_ids.shape[1] 68 | n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() 69 | if n_diff_input_output > 0: 70 | print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') 71 | outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] 72 | outputs = outputs.strip() 73 | if outputs.endswith(stop_str): 74 | outputs = outputs[:-len(stop_str)] 75 | outputs = outputs.strip() 76 | 77 | ans_id = shortuuid.uuid() 78 | ans_file.write(json.dumps({"question_id": idx, 79 | "prompt": cur_prompt, 80 | "text": outputs, 81 | "answer_id": ans_id, 82 | "model_id": model_name, 83 | "metadata": {}}) + "\n") 84 | ans_file.flush() 85 | ans_file.close() 86 | 87 | if __name__ == "__main__": 88 | parser = argparse.ArgumentParser() 89 | parser.add_argument("--model-path", type=str, default="facebook/opt-350m") 90 | parser.add_argument("--model-base", type=str, default=None) 91 | parser.add_argument("--question-file", type=str, default="tables/question.jsonl") 92 | parser.add_argument("--answers-file", type=str, default="answer.jsonl") 93 | parser.add_argument("--conv-mode", type=str, default="llava_v1") 94 | parser.add_argument("--num-chunks", type=int, default=1) 95 | parser.add_argument("--chunk-idx", type=int, default=0) 96 | parser.add_argument("--temperature", type=float, default=0.2) 97 | parser.add_argument("--top_p", type=float, default=None) 98 | parser.add_argument("--num_beams", type=int, default=1) 99 | args = parser.parse_args() 100 | 101 | eval_model(args) 102 | -------------------------------------------------------------------------------- /llava/eval/qa_baseline_gpt35.py: -------------------------------------------------------------------------------- 1 | """Generate answers with GPT-3.5""" 2 | # Note: you need to be using OpenAI Python v0.27.0 for the code below to work 3 | import argparse 4 | import json 5 | import os 6 | import time 7 | import concurrent.futures 8 | 9 | import openai 10 | import tqdm 11 | import shortuuid 12 | 13 | MODEL = 'gpt-3.5-turbo' 14 | MODEL_ID = 'gpt-3.5-turbo:20230327' 15 | 16 | def get_answer(question_id: int, question: str, max_tokens: int): 17 | ans = { 18 | 'answer_id': shortuuid.uuid(), 19 | 'question_id': question_id, 20 | 'model_id': MODEL_ID, 21 | } 22 | for _ in range(3): 23 | try: 24 | response = openai.ChatCompletion.create( 25 | model=MODEL, 26 | messages=[{ 27 | 'role': 'system', 28 | 'content': 'You are a helpful assistant.' 29 | }, { 30 | 'role': 'user', 31 | 'content': question, 32 | }], 33 | max_tokens=max_tokens, 34 | ) 35 | ans['text'] = response['choices'][0]['message']['content'] 36 | return ans 37 | except Exception as e: 38 | print('[ERROR]', e) 39 | ans['text'] = '#ERROR#' 40 | time.sleep(1) 41 | return ans 42 | 43 | 44 | if __name__ == '__main__': 45 | parser = argparse.ArgumentParser(description='ChatGPT answer generation.') 46 | parser.add_argument('-q', '--question') 47 | parser.add_argument('-o', '--output') 48 | parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output') 49 | args = parser.parse_args() 50 | 51 | questions_dict = {} 52 | with open(os.path.expanduser(args.question)) as f: 53 | for line in f: 54 | if not line: 55 | continue 56 | q = json.loads(line) 57 | questions_dict[q['question_id']] = q['text'] 58 | 59 | answers = [] 60 | 61 | with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor: 62 | futures = [] 63 | for qid, question in questions_dict.items(): 64 | future = executor.submit(get_answer, qid, question, args.max_tokens) 65 | futures.append(future) 66 | 67 | for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)): 68 | answers.append(future.result()) 69 | 70 | answers.sort(key=lambda x: x['question_id']) 71 | 72 | with open(os.path.expanduser(args.output), 'w') as f: 73 | table = [json.dumps(ans) for ans in answers] 74 | f.write('\n'.join(table)) 75 | -------------------------------------------------------------------------------- /llava/eval/summarize_gpt_review.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from collections import defaultdict 4 | 5 | import numpy as np 6 | 7 | import argparse 8 | 9 | def parse_args(): 10 | parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.') 11 | parser.add_argument('-d', '--dir', default=None) 12 | parser.add_argument('-v', '--version', default=None) 13 | parser.add_argument('-s', '--select', nargs='*', default=None) 14 | parser.add_argument('-f', '--files', nargs='*', default=[]) 15 | parser.add_argument('-i', '--ignore', nargs='*', default=[]) 16 | return parser.parse_args() 17 | 18 | 19 | if __name__ == '__main__': 20 | args = parse_args() 21 | 22 | if args.ignore is not None: 23 | args.ignore = [int(x) for x in args.ignore] 24 | 25 | if len(args.files) > 0: 26 | review_files = args.files 27 | else: 28 | review_files = [x for x in os.listdir(args.dir) if x.endswith('.jsonl') and (x.startswith('gpt4_text') or x.startswith('reviews_') or x.startswith('review_') or 'review' in args.dir)] 29 | 30 | for review_file in sorted(review_files): 31 | config = os.path.basename(review_file).replace('gpt4_text_', '').replace('.jsonl', '') 32 | if args.select is not None and any(x not in config for x in args.select): 33 | continue 34 | if '0613' in config: 35 | version = '0613' 36 | else: 37 | version = '0314' 38 | if args.version is not None and args.version != version: 39 | continue 40 | scores = defaultdict(list) 41 | print(config) 42 | with open(os.path.join(args.dir, review_file) if args.dir is not None else review_file) as f: 43 | for review_str in f: 44 | review = json.loads(review_str) 45 | if review['question_id'] in args.ignore: 46 | continue 47 | if 'category' in review: 48 | scores[review['category']].append(review['tuple']) 49 | scores['all'].append(review['tuple']) 50 | else: 51 | if 'tuple' in review: 52 | scores['all'].append(review['tuple']) 53 | else: 54 | scores['all'].append(review['score']) 55 | for k, v in sorted(scores.items()): 56 | stats = np.asarray(v).mean(0).tolist() 57 | stats = [round(x, 3) for x in stats] 58 | # print(k, stats, round(stats[1]/stats[0]*100, 1)) 59 | print(k, round(stats[1]/stats[0]*100, 1), round(stats[0] * 10, 1), round(stats[1] * 10, 1)) 60 | print('=================================') 61 | -------------------------------------------------------------------------------- /llava/eval/table/model.jsonl: -------------------------------------------------------------------------------- 1 | {"model_id": "vicuna-13b:20230322-clean-lang", "model_name": "vicuna-13b", "model_version": "20230322-clean-lang", "model_metadata": "vicuna-13b-20230322-clean-lang"} 2 | {"model_id": "alpaca-13b:v1", "model_name": "alpaca-13b", "model_version": "v1", "model_metadata": "alpaca-13b"} 3 | {"model_id": "llama-13b:v1", "model_name": "llama-13b", "model_version": "v1", "model_metadata": "hf-llama-13b"} 4 | {"model_id": "bard:20230327", "model_name": "bard", "model_version": "20230327", "model_metadata": "Google Bard 20230327"} 5 | {"model_id": "gpt-3.5-turbo:20230327", "model_name": "gpt-3.5-turbo", "model_version": "20230327", "model_metadata": "OpenAI ChatGPT gpt-3.5-turbo Chat Completion"} 6 | -------------------------------------------------------------------------------- /llava/eval/table/reviewer.jsonl: -------------------------------------------------------------------------------- 1 | {"reviewer_id": "gpt-4-0328-default", "prompt_id": 1, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for general questions"} 2 | {"reviewer_id": "gpt-4-0328-coding", "prompt_id": 2, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for coding questions"} 3 | {"reviewer_id": "gpt-4-0328-math", "prompt_id": 3, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for math questions"} 4 | {"reviewer_id": "gpt-4-0417-visual", "prompt_id": 4, "metadata": {"temperature": 0.2, "max_tokens": 1024}, "description": "GPT-4 for math questions"} 5 | -------------------------------------------------------------------------------- /llava/eval/webpage/figures/alpaca.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LaVi-Lab/Visual-Table/c17ab793bc49e7c8d9d8fa9018da23b047ae19be/llava/eval/webpage/figures/alpaca.png -------------------------------------------------------------------------------- /llava/eval/webpage/figures/bard.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LaVi-Lab/Visual-Table/c17ab793bc49e7c8d9d8fa9018da23b047ae19be/llava/eval/webpage/figures/bard.jpg -------------------------------------------------------------------------------- /llava/eval/webpage/figures/chatgpt.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /llava/eval/webpage/figures/llama.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LaVi-Lab/Visual-Table/c17ab793bc49e7c8d9d8fa9018da23b047ae19be/llava/eval/webpage/figures/llama.jpg -------------------------------------------------------------------------------- /llava/eval/webpage/figures/swords_FILL0_wght300_GRAD0_opsz48.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /llava/eval/webpage/figures/vicuna.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LaVi-Lab/Visual-Table/c17ab793bc49e7c8d9d8fa9018da23b047ae19be/llava/eval/webpage/figures/vicuna.jpeg -------------------------------------------------------------------------------- /llava/eval/webpage/styles.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; 3 | background-color: #f8f9fa; 4 | } 5 | 6 | .navbar-dark .navbar-nav .nav-link { 7 | color: #f1cf68; 8 | font-size: 1.1rem; 9 | padding: 0.5rem 0.6rem; 10 | } 11 | 12 | .card-header { 13 | font-weight: bold; 14 | } 15 | 16 | .card { 17 | box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); 18 | transition: 0.3s; 19 | } 20 | 21 | .card:hover { 22 | box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2); 23 | } 24 | 25 | button { 26 | transition: background-color 0.3s; 27 | } 28 | 29 | button:hover { 30 | background-color: #007bff; 31 | } 32 | 33 | @media (max-width: 767px) { 34 | .form-row .form-group { 35 | margin-bottom: 10px; 36 | } 37 | } 38 | 39 | /* Extra styles */ 40 | 41 | .expandable-card .card-text-container { 42 | max-height: 200px; 43 | overflow-y: hidden; 44 | position: relative; 45 | } 46 | 47 | .expandable-card.expanded .card-text-container { 48 | max-height: none; 49 | } 50 | 51 | .expand-btn { 52 | position: relative; 53 | display: none; 54 | background-color: rgba(255, 255, 255, 0.8); 55 | color: #510c75; 56 | border-color: transparent; 57 | } 58 | 59 | .expand-btn:hover { 60 | background-color: rgba(200, 200, 200, 0.8); 61 | text-decoration: none; 62 | border-color: transparent; 63 | color: #510c75; 64 | } 65 | 66 | .expand-btn:focus { 67 | outline: none; 68 | text-decoration: none; 69 | } 70 | 71 | .expandable-card:not(.expanded) .card-text-container:after { 72 | content: ""; 73 | position: absolute; 74 | bottom: 0; 75 | left: 0; 76 | width: 100%; 77 | height: 90px; 78 | background: linear-gradient(rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 1)); 79 | } 80 | 81 | .expandable-card:not(.expanded) .expand-btn { 82 | margin-top: -40px; 83 | } 84 | 85 | .card-body { 86 | padding-bottom: 5px; 87 | } 88 | 89 | .vertical-flex-layout { 90 | justify-content: center; 91 | align-items: center; 92 | height: 100%; 93 | display: flex; 94 | flex-direction: column; 95 | gap: 5px; 96 | } 97 | 98 | .figure-img { 99 | max-width: 100%; 100 | height: auto; 101 | } 102 | 103 | .adjustable-font-size { 104 | font-size: calc(0.5rem + 2vw); 105 | } 106 | -------------------------------------------------------------------------------- /llava/mm_utils.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | from io import BytesIO 3 | import base64 4 | 5 | import torch 6 | from transformers import StoppingCriteria 7 | from llava.constants import IMAGE_TOKEN_INDEX 8 | 9 | 10 | def load_image_from_base64(image): 11 | return Image.open(BytesIO(base64.b64decode(image))) 12 | 13 | 14 | def expand2square(pil_img, background_color): 15 | width, height = pil_img.size 16 | if width == height: 17 | return pil_img 18 | elif width > height: 19 | result = Image.new(pil_img.mode, (width, width), background_color) 20 | result.paste(pil_img, (0, (width - height) // 2)) 21 | return result 22 | else: 23 | result = Image.new(pil_img.mode, (height, height), background_color) 24 | result.paste(pil_img, ((height - width) // 2, 0)) 25 | return result 26 | 27 | 28 | def process_images(images, image_processor, model_cfg): 29 | image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None) 30 | new_images = [] 31 | if image_aspect_ratio == 'pad': 32 | for image in images: 33 | image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean)) 34 | image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] 35 | new_images.append(image) 36 | else: 37 | return image_processor(images, return_tensors='pt')['pixel_values'] 38 | if all(x.shape == new_images[0].shape for x in new_images): 39 | new_images = torch.stack(new_images, dim=0) 40 | return new_images 41 | 42 | 43 | def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None): 44 | prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('')] 45 | 46 | def insert_separator(X, sep): 47 | return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] 48 | 49 | input_ids = [] 50 | offset = 0 51 | if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: 52 | offset = 1 53 | input_ids.append(prompt_chunks[0][0]) 54 | 55 | for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)): 56 | input_ids.extend(x[offset:]) 57 | 58 | if return_tensors is not None: 59 | if return_tensors == 'pt': 60 | return torch.tensor(input_ids, dtype=torch.long) 61 | raise ValueError(f'Unsupported tensor type: {return_tensors}') 62 | return input_ids 63 | 64 | 65 | def get_model_name_from_path(model_path): 66 | model_path = model_path.strip("/") 67 | model_paths = model_path.split("/") 68 | if model_paths[-1].startswith('checkpoint-'): 69 | return model_paths[-2] + "_" + model_paths[-1] 70 | else: 71 | return model_paths[-1] 72 | 73 | class KeywordsStoppingCriteria(StoppingCriteria): 74 | def __init__(self, keywords, tokenizer, input_ids): 75 | self.keywords = keywords 76 | self.keyword_ids = [] 77 | self.max_keyword_len = 0 78 | for keyword in keywords: 79 | cur_keyword_ids = tokenizer(keyword).input_ids 80 | if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id: 81 | cur_keyword_ids = cur_keyword_ids[1:] 82 | if len(cur_keyword_ids) > self.max_keyword_len: 83 | self.max_keyword_len = len(cur_keyword_ids) 84 | self.keyword_ids.append(torch.tensor(cur_keyword_ids)) 85 | self.tokenizer = tokenizer 86 | self.start_len = input_ids.shape[1] 87 | 88 | def call_for_batch(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: 89 | offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len) 90 | self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids] 91 | for keyword_id in self.keyword_ids: 92 | if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all(): 93 | return True 94 | outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0] 95 | for keyword in self.keywords: 96 | if keyword in outputs: 97 | return True 98 | return False 99 | 100 | def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: 101 | outputs = [] 102 | for i in range(output_ids.shape[0]): 103 | outputs.append(self.call_for_batch(output_ids[i].unsqueeze(0), scores)) 104 | return all(outputs) 105 | -------------------------------------------------------------------------------- /llava/model/__init__.py: -------------------------------------------------------------------------------- 1 | from .language_model.llava_llama import LlavaLlamaForCausalLM, LlavaConfig 2 | from .language_model.llava_mpt import LlavaMPTForCausalLM, LlavaMPTConfig 3 | -------------------------------------------------------------------------------- /llava/model/apply_delta.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: 3 | python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta 4 | """ 5 | import argparse 6 | 7 | import torch 8 | from tqdm import tqdm 9 | from transformers import AutoTokenizer, AutoModelForCausalLM 10 | from llava import LlavaLlamaForCausalLM 11 | 12 | 13 | def apply_delta(base_model_path, target_model_path, delta_path): 14 | print("Loading base model") 15 | base = AutoModelForCausalLM.from_pretrained( 16 | base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 17 | 18 | print("Loading delta") 19 | delta = LlavaLlamaForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 20 | delta_tokenizer = AutoTokenizer.from_pretrained(delta_path) 21 | 22 | print("Applying delta") 23 | for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"): 24 | if name not in base.state_dict(): 25 | assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model' 26 | continue 27 | if param.data.shape == base.state_dict()[name].shape: 28 | param.data += base.state_dict()[name] 29 | else: 30 | assert name in ['model.embed_tokens.weight', 'lm_head.weight'], \ 31 | f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}' 32 | bparam = base.state_dict()[name] 33 | param.data[:bparam.shape[0], :bparam.shape[1]] += bparam 34 | 35 | print("Saving target model") 36 | delta.save_pretrained(target_model_path) 37 | delta_tokenizer.save_pretrained(target_model_path) 38 | 39 | 40 | if __name__ == "__main__": 41 | parser = argparse.ArgumentParser() 42 | parser.add_argument("--base-model-path", type=str, required=True) 43 | parser.add_argument("--target-model-path", type=str, required=True) 44 | parser.add_argument("--delta-path", type=str, required=True) 45 | 46 | args = parser.parse_args() 47 | 48 | apply_delta(args.base_model_path, args.target_model_path, args.delta_path) 49 | -------------------------------------------------------------------------------- /llava/model/consolidate.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: 3 | python3 -m llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_weights/llava-7b_consolidate 4 | """ 5 | import argparse 6 | 7 | import torch 8 | from transformers import AutoTokenizer, AutoModelForCausalLM 9 | from llava.model import * 10 | from llava.model.utils import auto_upgrade 11 | 12 | 13 | def consolidate_ckpt(src_path, dst_path): 14 | print("Loading model") 15 | auto_upgrade(src_path) 16 | src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 17 | src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False) 18 | src_model.save_pretrained(dst_path) 19 | src_tokenizer.save_pretrained(dst_path) 20 | 21 | 22 | if __name__ == "__main__": 23 | parser = argparse.ArgumentParser() 24 | parser.add_argument("--src", type=str, required=True) 25 | parser.add_argument("--dst", type=str, required=True) 26 | 27 | args = parser.parse_args() 28 | 29 | consolidate_ckpt(args.src, args.dst) 30 | -------------------------------------------------------------------------------- /llava/model/language_model/llava_llama.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Haotian Liu 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from typing import List, Optional, Tuple, Union 17 | 18 | import torch 19 | import torch.nn as nn 20 | 21 | from transformers import AutoConfig, AutoModelForCausalLM, \ 22 | LlamaConfig, LlamaModel, LlamaForCausalLM 23 | 24 | from transformers.modeling_outputs import CausalLMOutputWithPast 25 | 26 | from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM 27 | 28 | 29 | class LlavaConfig(LlamaConfig): 30 | model_type = "llava" 31 | 32 | 33 | class LlavaLlamaModel(LlavaMetaModel, LlamaModel): 34 | config_class = LlavaConfig 35 | 36 | def __init__(self, config: LlamaConfig): 37 | super(LlavaLlamaModel, self).__init__(config) 38 | 39 | 40 | class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): 41 | config_class = LlavaConfig 42 | 43 | def __init__(self, config): 44 | super(LlamaForCausalLM, self).__init__(config) 45 | self.model = LlavaLlamaModel(config) 46 | self.pretraining_tp = config.pretraining_tp 47 | self.vocab_size = config.vocab_size 48 | self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) 49 | 50 | # Initialize weights and apply final processing 51 | self.post_init() 52 | 53 | def get_model(self): 54 | return self.model 55 | 56 | def forward( 57 | self, 58 | input_ids: torch.LongTensor = None, 59 | attention_mask: Optional[torch.Tensor] = None, 60 | position_ids: Optional[torch.LongTensor] = None, 61 | past_key_values: Optional[List[torch.FloatTensor]] = None, 62 | inputs_embeds: Optional[torch.FloatTensor] = None, 63 | labels: Optional[torch.LongTensor] = None, 64 | use_cache: Optional[bool] = None, 65 | output_attentions: Optional[bool] = None, 66 | output_hidden_states: Optional[bool] = None, 67 | images: Optional[torch.FloatTensor] = None, 68 | return_dict: Optional[bool] = None, 69 | ) -> Union[Tuple, CausalLMOutputWithPast]: 70 | 71 | if inputs_embeds is None: 72 | ( 73 | input_ids, 74 | position_ids, 75 | attention_mask, 76 | past_key_values, 77 | inputs_embeds, 78 | labels 79 | ) = self.prepare_inputs_labels_for_multimodal( 80 | input_ids, 81 | position_ids, 82 | attention_mask, 83 | past_key_values, 84 | labels, 85 | images 86 | ) 87 | 88 | return super().forward( 89 | input_ids=input_ids, 90 | attention_mask=attention_mask, 91 | position_ids=position_ids, 92 | past_key_values=past_key_values, 93 | inputs_embeds=inputs_embeds, 94 | labels=labels, 95 | use_cache=use_cache, 96 | output_attentions=output_attentions, 97 | output_hidden_states=output_hidden_states, 98 | return_dict=return_dict 99 | ) 100 | 101 | def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): 102 | images = kwargs.pop("images", None) 103 | _inputs = super().prepare_inputs_for_generation( 104 | input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs 105 | ) 106 | if images is not None: 107 | _inputs['images'] = images 108 | return _inputs 109 | 110 | AutoConfig.register("llava", LlavaConfig) 111 | AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM) 112 | -------------------------------------------------------------------------------- /llava/model/language_model/mpt/adapt_tokenizer.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast 3 | Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] 4 | NUM_SENTINEL_TOKENS: int = 100 5 | 6 | def adapt_tokenizer_for_denoising(tokenizer: Tokenizer): 7 | """Adds sentinel tokens and padding token (if missing). 8 | 9 | Expands the tokenizer vocabulary to include sentinel tokens 10 | used in mixture-of-denoiser tasks as well as a padding token. 11 | 12 | All added tokens are added as special tokens. No tokens are 13 | added if sentinel tokens and padding token already exist. 14 | """ 15 | sentinels_to_add = [f'' for i in range(NUM_SENTINEL_TOKENS)] 16 | tokenizer.add_tokens(sentinels_to_add, special_tokens=True) 17 | if tokenizer.pad_token is None: 18 | tokenizer.add_tokens('', special_tokens=True) 19 | tokenizer.pad_token = '' 20 | assert tokenizer.pad_token_id is not None 21 | sentinels = ''.join([f'' for i in range(NUM_SENTINEL_TOKENS)]) 22 | _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids 23 | tokenizer.sentinel_token_ids = _sentinel_token_ids 24 | 25 | class AutoTokenizerForMOD(AutoTokenizer): 26 | """AutoTokenizer + Adaptation for MOD. 27 | 28 | A simple wrapper around AutoTokenizer to make instantiating 29 | an MOD-adapted tokenizer a bit easier. 30 | 31 | MOD-adapted tokenizers have sentinel tokens (e.g., ), 32 | a padding token, and a property to get the token ids of the 33 | sentinel tokens. 34 | """ 35 | 36 | @classmethod 37 | def from_pretrained(cls, *args, **kwargs): 38 | """See `AutoTokenizer.from_pretrained` docstring.""" 39 | tokenizer = super().from_pretrained(*args, **kwargs) 40 | adapt_tokenizer_for_denoising(tokenizer) 41 | return tokenizer -------------------------------------------------------------------------------- /llava/model/language_model/mpt/blocks.py: -------------------------------------------------------------------------------- 1 | """GPT Blocks used for the GPT Model.""" 2 | from typing import Dict, Optional, Tuple 3 | import torch 4 | import torch.nn as nn 5 | from .attention import ATTN_CLASS_REGISTRY 6 | from .norm import NORM_CLASS_REGISTRY 7 | 8 | class MPTMLP(nn.Module): 9 | 10 | def __init__(self, d_model: int, expansion_ratio: int, device: Optional[str]=None): 11 | super().__init__() 12 | self.up_proj = nn.Linear(d_model, expansion_ratio * d_model, device=device) 13 | self.act = nn.GELU(approximate='none') 14 | self.down_proj = nn.Linear(expansion_ratio * d_model, d_model, device=device) 15 | self.down_proj._is_residual = True 16 | 17 | def forward(self, x): 18 | return self.down_proj(self.act(self.up_proj(x))) 19 | 20 | class MPTBlock(nn.Module): 21 | 22 | def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs): 23 | del kwargs 24 | super().__init__() 25 | norm_class = NORM_CLASS_REGISTRY[norm_type.lower()] 26 | attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']] 27 | self.norm_1 = norm_class(d_model, device=device) 28 | self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device) 29 | self.norm_2 = norm_class(d_model, device=device) 30 | self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device) 31 | self.resid_attn_dropout = nn.Dropout(resid_pdrop) 32 | self.resid_ffn_dropout = nn.Dropout(resid_pdrop) 33 | 34 | def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: 35 | a = self.norm_1(x) 36 | (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal) 37 | x = x + self.resid_attn_dropout(b) 38 | m = self.norm_2(x) 39 | n = self.ffn(m) 40 | x = x + self.resid_ffn_dropout(n) 41 | return (x, attn_weights, past_key_value) -------------------------------------------------------------------------------- /llava/model/language_model/mpt/custom_embedding.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch import Tensor 5 | 6 | class SharedEmbedding(nn.Embedding): 7 | 8 | def forward(self, input: Tensor, unembed: bool=False) -> Tensor: 9 | if unembed: 10 | return F.linear(input, self.weight) 11 | return super().forward(input) -------------------------------------------------------------------------------- /llava/model/language_model/mpt/meta_init_context.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | import torch 3 | import torch.nn as nn 4 | 5 | @contextmanager 6 | def init_empty_weights(include_buffers: bool=False): 7 | """Meta initialization context manager. 8 | 9 | A context manager under which models are initialized with all parameters 10 | on the meta device, therefore creating an empty model. Useful when just 11 | initializing the model would blow the available RAM. 12 | 13 | Args: 14 | include_buffers (`bool`, *optional*, defaults to `False`): Whether or 15 | not to also put all buffers on the meta device while initializing. 16 | 17 | Example: 18 | ```python 19 | import torch.nn as nn 20 | 21 | # Initialize a model with 100 billions parameters in no time and without using any RAM. 22 | with init_empty_weights(): 23 | tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) 24 | ``` 25 | 26 | 27 | 28 | Any model created under this context manager has no weights. As such you can't do something like 29 | `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. 30 | 31 | 32 | """ 33 | with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f: 34 | yield f 35 | 36 | @contextmanager 37 | def init_on_device(device: torch.device, include_buffers: bool=False): 38 | """Device initialization context manager. 39 | 40 | A context manager under which models are initialized with all parameters 41 | on the specified device. 42 | 43 | Args: 44 | device (`torch.device`): Device to initialize all parameters on. 45 | include_buffers (`bool`, *optional*, defaults to `False`): Whether or 46 | not to also put all buffers on the meta device while initializing. 47 | 48 | Example: 49 | ```python 50 | import torch.nn as nn 51 | 52 | with init_on_device(device=torch.device("cuda")): 53 | tst = nn.Liner(100, 100) # on `cuda` device 54 | ``` 55 | """ 56 | old_register_parameter = nn.Module.register_parameter 57 | if include_buffers: 58 | old_register_buffer = nn.Module.register_buffer 59 | 60 | def register_empty_parameter(module, name, param): 61 | old_register_parameter(module, name, param) 62 | if param is not None: 63 | param_cls = type(module._parameters[name]) 64 | kwargs = module._parameters[name].__dict__ 65 | module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs) 66 | 67 | def register_empty_buffer(module, name, buffer): 68 | old_register_buffer(module, name, buffer) 69 | if buffer is not None: 70 | module._buffers[name] = module._buffers[name].to(device) 71 | if include_buffers: 72 | tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full']} 73 | else: 74 | tensor_constructors_to_patch = {} 75 | 76 | def patch_tensor_constructor(fn): 77 | 78 | def wrapper(*args, **kwargs): 79 | kwargs['device'] = device 80 | return fn(*args, **kwargs) 81 | return wrapper 82 | try: 83 | nn.Module.register_parameter = register_empty_parameter 84 | if include_buffers: 85 | nn.Module.register_buffer = register_empty_buffer 86 | for torch_function_name in tensor_constructors_to_patch.keys(): 87 | setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name))) 88 | yield 89 | finally: 90 | nn.Module.register_parameter = old_register_parameter 91 | if include_buffers: 92 | nn.Module.register_buffer = old_register_buffer 93 | for (torch_function_name, old_torch_function) in tensor_constructors_to_patch.items(): 94 | setattr(torch, torch_function_name, old_torch_function) -------------------------------------------------------------------------------- /llava/model/language_model/mpt/norm.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def _cast_if_autocast_enabled(tensor): 4 | if torch.is_autocast_enabled(): 5 | if tensor.device.type == 'cuda': 6 | dtype = torch.get_autocast_gpu_dtype() 7 | elif tensor.device.type == 'cpu': 8 | dtype = torch.get_autocast_cpu_dtype() 9 | else: 10 | raise NotImplementedError() 11 | return tensor.to(dtype=dtype) 12 | return tensor 13 | 14 | class LPLayerNorm(torch.nn.LayerNorm): 15 | 16 | def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None): 17 | super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device=device, dtype=dtype) 18 | 19 | def forward(self, x): 20 | module_device = x.device 21 | downcast_x = _cast_if_autocast_enabled(x) 22 | downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight 23 | downcast_bias = _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias 24 | with torch.autocast(enabled=False, device_type=module_device.type): 25 | return torch.nn.functional.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps) 26 | 27 | def rms_norm(x, weight=None, eps=1e-05): 28 | output = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps) 29 | if weight is not None: 30 | return output * weight 31 | return output 32 | 33 | class RMSNorm(torch.nn.Module): 34 | 35 | def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None): 36 | super().__init__() 37 | self.eps = eps 38 | if weight: 39 | self.weight = torch.nn.Parameter(torch.ones(normalized_shape, dtype=dtype, device=device)) 40 | else: 41 | self.register_parameter('weight', None) 42 | 43 | def forward(self, x): 44 | return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype) 45 | 46 | class LPRMSNorm(RMSNorm): 47 | 48 | def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None): 49 | super().__init__(normalized_shape=normalized_shape, eps=eps, weight=weight, dtype=dtype, device=device) 50 | 51 | def forward(self, x): 52 | downcast_x = _cast_if_autocast_enabled(x) 53 | downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight 54 | with torch.autocast(enabled=False, device_type=x.device.type): 55 | return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype) 56 | NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm} -------------------------------------------------------------------------------- /llava/model/make_delta.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: 3 | python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta 4 | """ 5 | import argparse 6 | 7 | import torch 8 | from tqdm import tqdm 9 | from transformers import AutoTokenizer, AutoModelForCausalLM 10 | from llava.model.utils import auto_upgrade 11 | 12 | 13 | def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id): 14 | print("Loading base model") 15 | base = AutoModelForCausalLM.from_pretrained( 16 | base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 17 | 18 | print("Loading target model") 19 | auto_upgrade(target_model_path) 20 | target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 21 | 22 | print("Calculating delta") 23 | for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"): 24 | if name not in base.state_dict(): 25 | assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model' 26 | continue 27 | if param.data.shape == base.state_dict()[name].shape: 28 | param.data -= base.state_dict()[name] 29 | else: 30 | assert name in ['model.embed_tokens.weight', 'lm_head.weight'], f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}' 31 | bparam = base.state_dict()[name] 32 | param.data[:bparam.shape[0], :bparam.shape[1]] -= bparam 33 | 34 | print("Saving delta") 35 | if hub_repo_id: 36 | kwargs = {"push_to_hub": True, "repo_id": hub_repo_id} 37 | else: 38 | kwargs = {} 39 | target.save_pretrained(delta_path, **kwargs) 40 | target_tokenizer = AutoTokenizer.from_pretrained(target_model_path) 41 | target_tokenizer.save_pretrained(delta_path, **kwargs) 42 | 43 | 44 | if __name__ == "__main__": 45 | parser = argparse.ArgumentParser() 46 | parser.add_argument("--base-model-path", type=str, required=True) 47 | parser.add_argument("--target-model-path", type=str, required=True) 48 | parser.add_argument("--delta-path", type=str, required=True) 49 | parser.add_argument("--hub-repo-id", type=str, default=None) 50 | args = parser.parse_args() 51 | 52 | make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id) 53 | -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/builder.py: -------------------------------------------------------------------------------- 1 | import os 2 | from .clip_encoder import CLIPVisionTower 3 | 4 | 5 | def build_vision_tower(vision_tower_cfg, **kwargs): 6 | vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None)) 7 | is_absolute_path_exists = os.path.exists(vision_tower) 8 | if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"): 9 | return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs) 10 | 11 | raise ValueError(f'Unknown vision tower: {vision_tower}') 12 | -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/clip_encoder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig 5 | 6 | 7 | class CLIPVisionTower(nn.Module): 8 | def __init__(self, vision_tower, args, delay_load=False): 9 | super().__init__() 10 | 11 | self.is_loaded = False 12 | 13 | self.vision_tower_name = vision_tower 14 | self.select_layer = args.mm_vision_select_layer 15 | self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch') 16 | 17 | if not delay_load: 18 | self.load_model() 19 | else: 20 | self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name) 21 | 22 | def load_model(self): 23 | self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name) 24 | self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name) 25 | self.vision_tower.requires_grad_(False) 26 | 27 | self.is_loaded = True 28 | 29 | def feature_select(self, image_forward_outs): 30 | image_features = image_forward_outs.hidden_states[self.select_layer] 31 | if self.select_feature == 'patch': 32 | image_features = image_features[:, 1:] 33 | elif self.select_feature == 'cls_patch': 34 | image_features = image_features 35 | else: 36 | raise ValueError(f'Unexpected select feature: {self.select_feature}') 37 | return image_features 38 | 39 | @torch.no_grad() 40 | def forward(self, images): 41 | if type(images) is list: 42 | image_features = [] 43 | for image in images: 44 | image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True) 45 | image_feature = self.feature_select(image_forward_out).to(image.dtype) 46 | image_features.append(image_feature) 47 | else: 48 | image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True) 49 | image_features = self.feature_select(image_forward_outs).to(images.dtype) 50 | 51 | return image_features 52 | 53 | @property 54 | def dummy_feature(self): 55 | return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) 56 | 57 | @property 58 | def dtype(self): 59 | return self.vision_tower.dtype 60 | 61 | @property 62 | def device(self): 63 | return self.vision_tower.device 64 | 65 | @property 66 | def config(self): 67 | if self.is_loaded: 68 | return self.vision_tower.config 69 | else: 70 | return self.cfg_only 71 | 72 | @property 73 | def hidden_size(self): 74 | return self.config.hidden_size 75 | 76 | @property 77 | def num_patches(self): 78 | return (self.config.image_size // self.config.patch_size) ** 2 79 | -------------------------------------------------------------------------------- /llava/model/multimodal_projector/builder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import re 4 | 5 | 6 | class IdentityMap(nn.Module): 7 | def __init__(self): 8 | super().__init__() 9 | 10 | def forward(self, x, *args, **kwargs): 11 | return x 12 | 13 | @property 14 | def config(self): 15 | return {"mm_projector_type": 'identity'} 16 | 17 | 18 | class SimpleResBlock(nn.Module): 19 | def __init__(self, channels): 20 | super().__init__() 21 | self.pre_norm = nn.LayerNorm(channels) 22 | 23 | self.proj = nn.Sequential( 24 | nn.Linear(channels, channels), 25 | nn.GELU(), 26 | nn.Linear(channels, channels) 27 | ) 28 | def forward(self, x): 29 | x = self.pre_norm(x) 30 | return x + self.proj(x) 31 | 32 | 33 | def build_vision_projector(config, delay_load=False, **kwargs): 34 | projector_type = getattr(config, 'mm_projector_type', 'linear') 35 | 36 | if projector_type == 'linear': 37 | return nn.Linear(config.mm_hidden_size, config.hidden_size) 38 | 39 | mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) 40 | if mlp_gelu_match: 41 | mlp_depth = int(mlp_gelu_match.group(1)) 42 | modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)] 43 | for _ in range(1, mlp_depth): 44 | modules.append(nn.GELU()) 45 | modules.append(nn.Linear(config.hidden_size, config.hidden_size)) 46 | return nn.Sequential(*modules) 47 | 48 | if projector_type == 'identity': 49 | return IdentityMap() 50 | 51 | raise ValueError(f'Unknown projector type: {projector_type}') 52 | -------------------------------------------------------------------------------- /llava/model/utils.py: -------------------------------------------------------------------------------- 1 | from transformers import AutoConfig 2 | 3 | 4 | def auto_upgrade(config): 5 | cfg = AutoConfig.from_pretrained(config) 6 | if 'llava' in config and 'llava' not in cfg.model_type: 7 | assert cfg.model_type == 'llama' 8 | print("You are using newer LLaVA code base, while the checkpoint of v0 is from older code base.") 9 | print("You must upgrade the checkpoint to the new code base (this can be done automatically).") 10 | confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]") 11 | if confirm.lower() in ["y", "yes"]: 12 | print("Upgrading checkpoint...") 13 | assert len(cfg.architectures) == 1 14 | setattr(cfg.__class__, "model_type", "llava") 15 | cfg.architectures[0] = 'LlavaLlamaForCausalLM' 16 | cfg.save_pretrained(config) 17 | print("Checkpoint upgraded.") 18 | else: 19 | print("Checkpoint upgrade aborted.") 20 | exit(1) 21 | -------------------------------------------------------------------------------- /llava/serve/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LaVi-Lab/Visual-Table/c17ab793bc49e7c8d9d8fa9018da23b047ae19be/llava/serve/__init__.py -------------------------------------------------------------------------------- /llava/serve/examples/extreme_ironing.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LaVi-Lab/Visual-Table/c17ab793bc49e7c8d9d8fa9018da23b047ae19be/llava/serve/examples/extreme_ironing.jpg -------------------------------------------------------------------------------- /llava/serve/examples/waterview.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LaVi-Lab/Visual-Table/c17ab793bc49e7c8d9d8fa9018da23b047ae19be/llava/serve/examples/waterview.jpg -------------------------------------------------------------------------------- /llava/serve/register_worker.py: -------------------------------------------------------------------------------- 1 | """ 2 | Manually register workers. 3 | 4 | Usage: 5 | python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name http://localhost:21002 6 | """ 7 | 8 | import argparse 9 | 10 | import requests 11 | 12 | if __name__ == "__main__": 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument("--controller-address", type=str) 15 | parser.add_argument("--worker-name", type=str) 16 | parser.add_argument("--check-heart-beat", action="store_true") 17 | args = parser.parse_args() 18 | 19 | url = args.controller_address + "/register_worker" 20 | data = { 21 | "worker_name": args.worker_name, 22 | "check_heart_beat": args.check_heart_beat, 23 | "worker_status": None, 24 | } 25 | r = requests.post(url, json=data) 26 | assert r.status_code == 200 27 | -------------------------------------------------------------------------------- /llava/serve/test_message.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | 4 | import requests 5 | 6 | from llava.conversation import default_conversation 7 | 8 | 9 | def main(): 10 | if args.worker_address: 11 | worker_addr = args.worker_address 12 | else: 13 | controller_addr = args.controller_address 14 | ret = requests.post(controller_addr + "/refresh_all_workers") 15 | ret = requests.post(controller_addr + "/list_models") 16 | models = ret.json()["models"] 17 | models.sort() 18 | print(f"Models: {models}") 19 | 20 | ret = requests.post(controller_addr + "/get_worker_address", 21 | json={"model": args.model_name}) 22 | worker_addr = ret.json()["address"] 23 | print(f"worker_addr: {worker_addr}") 24 | 25 | if worker_addr == "": 26 | return 27 | 28 | conv = default_conversation.copy() 29 | conv.append_message(conv.roles[0], args.message) 30 | prompt = conv.get_prompt() 31 | 32 | headers = {"User-Agent": "LLaVA Client"} 33 | pload = { 34 | "model": args.model_name, 35 | "prompt": prompt, 36 | "max_new_tokens": args.max_new_tokens, 37 | "temperature": 0.7, 38 | "stop": conv.sep, 39 | } 40 | response = requests.post(worker_addr + "/worker_generate_stream", headers=headers, 41 | json=pload, stream=True) 42 | 43 | print(prompt.replace(conv.sep, "\n"), end="") 44 | for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"): 45 | if chunk: 46 | data = json.loads(chunk.decode("utf-8")) 47 | output = data["text"].split(conv.sep)[-1] 48 | print(output, end="\r") 49 | print("") 50 | 51 | 52 | if __name__ == "__main__": 53 | parser = argparse.ArgumentParser() 54 | parser.add_argument("--controller-address", type=str, default="http://localhost:21001") 55 | parser.add_argument("--worker-address", type=str) 56 | parser.add_argument("--model-name", type=str, default="facebook/opt-350m") 57 | parser.add_argument("--max-new-tokens", type=int, default=32) 58 | parser.add_argument("--message", type=str, default= 59 | "Tell me a story with more than 1000 words.") 60 | args = parser.parse_args() 61 | 62 | main() 63 | -------------------------------------------------------------------------------- /llava/train/train_mem.py: -------------------------------------------------------------------------------- 1 | # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright: 2 | # Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright: 3 | # Make it more memory efficient by monkey patching the LLaMA model with FlashAttn. 4 | 5 | # Need to call this before importing transformers. 6 | from llava.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn 7 | 8 | replace_llama_attn_with_flash_attn() 9 | 10 | from llava.train.train import train 11 | 12 | if __name__ == "__main__": 13 | train() 14 | -------------------------------------------------------------------------------- /llava/train/train_xformers.py: -------------------------------------------------------------------------------- 1 | # Make it more memory efficient by monkey patching the LLaMA model with xformers attention. 2 | 3 | # Need to call this before importing transformers. 4 | from llava.train.llama_xformers_attn_monkey_patch import ( 5 | replace_llama_attn_with_xformers_attn, 6 | ) 7 | 8 | replace_llama_attn_with_xformers_attn() 9 | 10 | from llava.train.train import train 11 | 12 | if __name__ == "__main__": 13 | train() 14 | -------------------------------------------------------------------------------- /llava/utils.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | import logging.handlers 4 | import os 5 | import sys 6 | 7 | import requests 8 | 9 | from llava.constants import LOGDIR 10 | 11 | server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" 12 | moderation_msg = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN." 13 | 14 | handler = None 15 | 16 | 17 | def build_logger(logger_name, logger_filename): 18 | global handler 19 | 20 | formatter = logging.Formatter( 21 | fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", 22 | datefmt="%Y-%m-%d %H:%M:%S", 23 | ) 24 | 25 | # Set the format of root handlers 26 | if not logging.getLogger().handlers: 27 | logging.basicConfig(level=logging.INFO) 28 | logging.getLogger().handlers[0].setFormatter(formatter) 29 | 30 | # Redirect stdout and stderr to loggers 31 | stdout_logger = logging.getLogger("stdout") 32 | stdout_logger.setLevel(logging.INFO) 33 | sl = StreamToLogger(stdout_logger, logging.INFO) 34 | sys.stdout = sl 35 | 36 | stderr_logger = logging.getLogger("stderr") 37 | stderr_logger.setLevel(logging.ERROR) 38 | sl = StreamToLogger(stderr_logger, logging.ERROR) 39 | sys.stderr = sl 40 | 41 | # Get logger 42 | logger = logging.getLogger(logger_name) 43 | logger.setLevel(logging.INFO) 44 | 45 | # Add a file handler for all loggers 46 | if handler is None: 47 | os.makedirs(LOGDIR, exist_ok=True) 48 | filename = os.path.join(LOGDIR, logger_filename) 49 | handler = logging.handlers.TimedRotatingFileHandler( 50 | filename, when='D', utc=True, encoding='UTF-8') 51 | handler.setFormatter(formatter) 52 | 53 | for name, item in logging.root.manager.loggerDict.items(): 54 | if isinstance(item, logging.Logger): 55 | item.addHandler(handler) 56 | 57 | return logger 58 | 59 | 60 | class StreamToLogger(object): 61 | """ 62 | Fake file-like stream object that redirects writes to a logger instance. 63 | """ 64 | def __init__(self, logger, log_level=logging.INFO): 65 | self.terminal = sys.stdout 66 | self.logger = logger 67 | self.log_level = log_level 68 | self.linebuf = '' 69 | 70 | def __getattr__(self, attr): 71 | return getattr(self.terminal, attr) 72 | 73 | def write(self, buf): 74 | temp_linebuf = self.linebuf + buf 75 | self.linebuf = '' 76 | for line in temp_linebuf.splitlines(True): 77 | # From the io.TextIOWrapper docs: 78 | # On output, if newline is None, any '\n' characters written 79 | # are translated to the system default line separator. 80 | # By default sys.stdout.write() expects '\n' newlines and then 81 | # translates them so this is still cross platform. 82 | if line[-1] == '\n': 83 | self.logger.log(self.log_level, line.rstrip()) 84 | else: 85 | self.linebuf += line 86 | 87 | def flush(self): 88 | if self.linebuf != '': 89 | self.logger.log(self.log_level, self.linebuf.rstrip()) 90 | self.linebuf = '' 91 | 92 | 93 | def disable_torch_init(): 94 | """ 95 | Disable the redundant torch default initialization to accelerate model creation. 96 | """ 97 | import torch 98 | setattr(torch.nn.Linear, "reset_parameters", lambda self: None) 99 | setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None) 100 | 101 | 102 | def violates_moderation(text): 103 | """ 104 | Check whether the text violates OpenAI moderation API. 105 | """ 106 | url = "https://api.openai.com/v1/moderations" 107 | headers = {"Content-Type": "application/json", 108 | "Authorization": "Bearer " + os.environ["OPENAI_API_KEY"]} 109 | text = text.replace("\n", "") 110 | data = "{" + '"input": ' + f'"{text}"' + "}" 111 | data = data.encode("utf-8") 112 | try: 113 | ret = requests.post(url, headers=headers, data=data, timeout=5) 114 | flagged = ret.json()["results"][0]["flagged"] 115 | except requests.exceptions.RequestException as e: 116 | flagged = False 117 | except KeyError as e: 118 | flagged = False 119 | 120 | return flagged 121 | 122 | 123 | def pretty_print_semaphore(semaphore): 124 | if semaphore is None: 125 | return "None" 126 | return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})" 127 | -------------------------------------------------------------------------------- /preprocess/collect_gpt4v_VT/mm-vet_imageID.txt: -------------------------------------------------------------------------------- 1 | v1_178.jpg 2 | v1_216.png 3 | v1_7.png 4 | v1_73.jpg 5 | v1_31.png 6 | v1_94.jpg 7 | v1_211.png 8 | v1_213.jpg 9 | v1_141.jpg 10 | v1_39.jpg 11 | v1_195.jpg 12 | v1_198.jpg 13 | v1_207.jpg 14 | v1_91.jpg 15 | v1_38.jpg 16 | v1_191.jpg 17 | v1_110.jpg 18 | v1_155.png 19 | v1_212.png 20 | v1_150.jpg 21 | v1_15.png 22 | v1_84.png 23 | v1_34.jpg 24 | v1_107.jpg 25 | v1_37.jpg 26 | v1_209.png 27 | v1_194.jpg 28 | v1_77.jpg 29 | v1_134.jpg 30 | v1_89.jpg 31 | v1_87.jpg 32 | v1_93.jpg 33 | v1_126.jpg 34 | v1_169.jpg 35 | v1_208.png 36 | v1_42.png 37 | v1_58.png 38 | v1_75.jpg 39 | v1_64.png 40 | v1_217.png 41 | v1_177.jpg 42 | v1_80.jpg 43 | v1_142.jpg 44 | v1_8.png 45 | v1_127.jpg 46 | v1_21.jpg 47 | v1_153.jpg 48 | v1_67.png 49 | v1_124.jpg 50 | v1_159.jpg 51 | v1_192.jpg 52 | v1_146.jpg 53 | v1_5.png 54 | v1_129.jpg 55 | v1_122.jpg 56 | v1_160.jpg 57 | v1_57.jpg 58 | v1_187.jpg 59 | v1_68.png 60 | v1_148.jpg 61 | v1_76.png 62 | v1_49.jpg 63 | v1_133.jpg 64 | v1_196.jpg 65 | v1_164.jpg 66 | v1_204.jpg 67 | v1_70.png 68 | v1_53.png 69 | v1_83.png 70 | v1_214.png 71 | v1_128.jpg 72 | v1_11.jpg 73 | v1_55.png 74 | v1_152.jpg 75 | v1_173.jpg 76 | v1_44.png 77 | v1_40.png 78 | v1_52.png 79 | v1_156.jpg 80 | v1_189.jpg 81 | v1_135.jpg 82 | v1_143.jpg 83 | v1_149.jpg 84 | v1_25.png 85 | v1_18.jpg 86 | v1_113.jpg 87 | v1_139.jpg 88 | v1_183.jpg 89 | v1_35.png 90 | v1_190.jpg 91 | v1_175.jpg 92 | v1_138.jpg 93 | v1_0.png 94 | v1_97.jpg 95 | v1_202.jpg 96 | v1_145.jpg 97 | v1_104.png 98 | v1_41.png 99 | v1_103.png 100 | v1_174.jpg 101 | v1_115.jpg 102 | v1_154.png 103 | v1_215.jpg 104 | v1_136.jpg 105 | v1_144.jpg 106 | v1_199.jpg 107 | v1_109.jpg 108 | v1_99.png 109 | v1_185.jpg 110 | v1_120.jpg 111 | v1_176.jpg 112 | v1_111.jpg 113 | v1_182.jpg 114 | v1_170.jpg 115 | v1_82.png 116 | v1_132.jpg 117 | v1_79.jpg 118 | v1_27.png 119 | v1_90.jpg 120 | v1_131.jpg 121 | v1_16.jpg 122 | v1_140.jpg 123 | v1_33.png 124 | v1_114.jpg 125 | v1_206.jpg 126 | v1_181.jpg 127 | v1_197.jpg 128 | v1_205.jpg 129 | v1_66.jpg 130 | v1_162.jpg 131 | v1_13.jpg 132 | v1_200.jpg 133 | v1_96.jpg 134 | v1_151.jpg 135 | v1_60.png 136 | v1_121.jpg 137 | v1_98.jpg 138 | v1_71.jpg 139 | v1_210.png 140 | v1_1.png 141 | v1_30.jpg 142 | v1_54.png 143 | v1_108.jpg 144 | v1_193.jpg 145 | v1_95.jpg 146 | v1_157.jpg 147 | v1_28.jpg 148 | v1_56.png 149 | v1_116.jpg 150 | v1_119.jpg 151 | v1_23.png 152 | v1_117.jpg 153 | v1_36.jpg 154 | v1_125.jpg 155 | v1_62.png 156 | v1_86.jpg 157 | v1_105.png 158 | v1_50.jpg 159 | v1_100.png 160 | v1_9.png 161 | v1_78.jpg 162 | v1_32.png 163 | v1_147.jpg 164 | v1_3.png 165 | v1_112.jpg 166 | v1_168.jpg 167 | v1_188.jpg 168 | v1_20.jpg 169 | v1_158.jpg 170 | v1_186.jpg 171 | v1_85.jpg 172 | v1_203.jpg 173 | v1_180.jpg 174 | v1_65.png 175 | v1_81.jpg 176 | v1_72.png 177 | v1_74.jpg 178 | v1_179.jpg 179 | v1_161.jpg 180 | v1_172.jpg 181 | v1_46.png 182 | v1_92.jpg 183 | v1_88.jpg 184 | v1_106.jpg 185 | v1_69.jpg 186 | v1_130.jpg 187 | v1_184.jpg 188 | v1_48.jpg 189 | v1_101.png 190 | v1_102.png 191 | v1_165.jpg 192 | v1_17.jpg 193 | v1_167.jpg 194 | v1_166.jpg 195 | v1_123.jpg 196 | v1_118.jpg 197 | v1_163.jpg 198 | v1_137.jpg 199 | v1_201.jpg 200 | v1_171.jpg 201 | -------------------------------------------------------------------------------- /preprocess/collect_gpt4v_VT/mmvp_image_ids.txt: -------------------------------------------------------------------------------- 1 | mmvp_1 2 | mmvp_2 3 | mmvp_3 4 | mmvp_4 5 | mmvp_5 6 | mmvp_6 7 | mmvp_7 8 | mmvp_8 9 | mmvp_9 10 | mmvp_10 11 | mmvp_11 12 | mmvp_12 13 | mmvp_13 14 | mmvp_14 15 | mmvp_15 16 | mmvp_16 17 | mmvp_17 18 | mmvp_18 19 | mmvp_19 20 | mmvp_20 21 | mmvp_21 22 | mmvp_22 23 | mmvp_23 24 | mmvp_24 25 | mmvp_25 26 | mmvp_26 27 | mmvp_27 28 | mmvp_28 29 | mmvp_29 30 | mmvp_30 31 | mmvp_31 32 | mmvp_32 33 | mmvp_33 34 | mmvp_34 35 | mmvp_35 36 | mmvp_36 37 | mmvp_37 38 | mmvp_38 39 | mmvp_39 40 | mmvp_40 41 | mmvp_41 42 | mmvp_42 43 | mmvp_43 44 | mmvp_44 45 | mmvp_45 46 | mmvp_46 47 | mmvp_47 48 | mmvp_48 49 | mmvp_49 50 | mmvp_50 51 | mmvp_51 52 | mmvp_52 53 | mmvp_53 54 | mmvp_54 55 | mmvp_55 56 | mmvp_56 57 | mmvp_57 58 | mmvp_58 59 | mmvp_59 60 | mmvp_60 61 | mmvp_61 62 | mmvp_62 63 | mmvp_63 64 | mmvp_64 65 | mmvp_65 66 | mmvp_66 67 | mmvp_67 68 | mmvp_68 69 | mmvp_69 70 | mmvp_70 71 | mmvp_71 72 | mmvp_72 73 | mmvp_73 74 | mmvp_74 75 | mmvp_75 76 | mmvp_76 77 | mmvp_77 78 | mmvp_78 79 | mmvp_79 80 | mmvp_80 81 | mmvp_81 82 | mmvp_82 83 | mmvp_83 84 | mmvp_84 85 | mmvp_85 86 | mmvp_86 87 | mmvp_87 88 | mmvp_88 89 | mmvp_89 90 | mmvp_90 91 | mmvp_91 92 | mmvp_92 93 | mmvp_93 94 | mmvp_94 95 | mmvp_95 96 | mmvp_96 97 | mmvp_97 98 | mmvp_98 99 | mmvp_99 100 | mmvp_100 101 | mmvp_101 102 | mmvp_102 103 | mmvp_103 104 | mmvp_104 105 | mmvp_105 106 | mmvp_106 107 | mmvp_107 108 | mmvp_108 109 | mmvp_109 110 | mmvp_110 111 | mmvp_111 112 | mmvp_112 113 | mmvp_113 114 | mmvp_114 115 | mmvp_115 116 | mmvp_116 117 | mmvp_117 118 | mmvp_118 119 | mmvp_119 120 | mmvp_120 121 | mmvp_121 122 | mmvp_122 123 | mmvp_123 124 | mmvp_124 125 | mmvp_125 126 | mmvp_126 127 | mmvp_127 128 | mmvp_128 129 | mmvp_129 130 | mmvp_130 131 | mmvp_131 132 | mmvp_132 133 | mmvp_133 134 | mmvp_134 135 | mmvp_135 136 | mmvp_136 137 | mmvp_137 138 | mmvp_138 139 | mmvp_139 140 | mmvp_140 141 | mmvp_141 142 | mmvp_142 143 | mmvp_143 144 | mmvp_144 145 | mmvp_145 146 | mmvp_146 147 | mmvp_147 148 | mmvp_148 149 | mmvp_149 150 | mmvp_150 151 | mmvp_151 152 | mmvp_152 153 | mmvp_153 154 | mmvp_154 155 | mmvp_155 156 | mmvp_156 157 | mmvp_157 158 | mmvp_158 159 | mmvp_159 160 | mmvp_160 161 | mmvp_161 162 | mmvp_162 163 | mmvp_163 164 | mmvp_164 165 | mmvp_165 166 | mmvp_166 167 | mmvp_167 168 | mmvp_168 169 | mmvp_169 170 | mmvp_170 171 | mmvp_171 172 | mmvp_172 173 | mmvp_173 174 | mmvp_174 175 | mmvp_175 176 | mmvp_176 177 | mmvp_177 178 | mmvp_178 179 | mmvp_179 180 | mmvp_180 181 | mmvp_181 182 | mmvp_182 183 | mmvp_183 184 | mmvp_184 185 | mmvp_185 186 | mmvp_186 187 | mmvp_187 188 | mmvp_188 189 | mmvp_189 190 | mmvp_190 191 | mmvp_191 192 | mmvp_192 193 | mmvp_193 194 | mmvp_194 195 | mmvp_195 196 | mmvp_196 197 | mmvp_197 198 | mmvp_198 199 | mmvp_199 200 | mmvp_200 201 | mmvp_201 202 | mmvp_202 203 | mmvp_203 204 | mmvp_204 205 | mmvp_205 206 | mmvp_206 207 | mmvp_207 208 | mmvp_208 209 | mmvp_209 210 | mmvp_210 211 | mmvp_211 212 | mmvp_212 213 | mmvp_213 214 | mmvp_214 215 | mmvp_215 216 | mmvp_216 217 | mmvp_217 218 | mmvp_218 219 | mmvp_219 220 | mmvp_220 221 | mmvp_221 222 | mmvp_222 223 | mmvp_223 224 | mmvp_224 225 | mmvp_225 226 | mmvp_226 227 | mmvp_227 228 | mmvp_228 229 | mmvp_229 230 | mmvp_230 231 | mmvp_231 232 | mmvp_232 233 | mmvp_233 234 | mmvp_234 235 | mmvp_235 236 | mmvp_236 237 | mmvp_237 238 | mmvp_238 239 | mmvp_239 240 | mmvp_240 241 | mmvp_241 242 | mmvp_242 243 | mmvp_243 244 | mmvp_244 245 | mmvp_245 246 | mmvp_246 247 | mmvp_247 248 | mmvp_248 249 | mmvp_249 250 | mmvp_250 251 | mmvp_251 252 | mmvp_252 253 | mmvp_253 254 | mmvp_254 255 | mmvp_255 256 | mmvp_256 257 | mmvp_257 258 | mmvp_258 259 | mmvp_259 260 | mmvp_260 261 | mmvp_261 262 | mmvp_262 263 | mmvp_263 264 | mmvp_264 265 | mmvp_265 266 | mmvp_266 267 | mmvp_267 268 | mmvp_268 269 | mmvp_269 270 | mmvp_270 271 | mmvp_271 272 | mmvp_272 273 | mmvp_273 274 | mmvp_274 275 | mmvp_275 276 | mmvp_276 277 | mmvp_277 278 | mmvp_278 279 | mmvp_279 280 | mmvp_280 281 | mmvp_281 282 | mmvp_282 283 | mmvp_283 284 | mmvp_284 285 | mmvp_285 286 | mmvp_286 287 | mmvp_287 288 | mmvp_288 289 | mmvp_289 290 | mmvp_290 291 | mmvp_291 292 | mmvp_292 293 | mmvp_293 294 | mmvp_294 295 | mmvp_295 296 | mmvp_296 297 | mmvp_297 298 | mmvp_298 299 | mmvp_299 300 | mmvp_300 301 | -------------------------------------------------------------------------------- /preprocess/merge_with_VT/merge_eval_dataset_with_VT.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | import argparse 4 | 5 | def dataset_with_vt(dataset_path, dataset_vt_path, dataset_with_vt_path): 6 | visual_table_prompt_pre = "Visual table:" 7 | visual_table_prompt_post = "Based on the given image and given visual table, answer the following question:" 8 | 9 | to_print = True 10 | 11 | dict_image_vt = {} 12 | with open(dataset_vt_path, 'r') as file: 13 | for line in file: 14 | json_data = json.loads(line) 15 | if to_print: 16 | print(json_data) 17 | to_print = False 18 | question_id = json_data['question_id'] 19 | text = json_data['text'] 20 | dict_image_vt[question_id] = text 21 | 22 | to_print = True 23 | 24 | with open(dataset_path, 'r') as file, open(dataset_with_vt_path, 'w') as output_file: 25 | for line in file: 26 | json_data = json.loads(line) 27 | image = json_data['image'] 28 | assert image in dict_image_vt, f"image {image} not in dict_image_vt" 29 | json_data['text'] = visual_table_prompt_pre + '\n' + \ 30 | dict_image_vt[image] + '\n' + \ 31 | visual_table_prompt_post + '\n' + \ 32 | json_data['text'] 33 | if to_print: 34 | print(json_data) 35 | to_print = False 36 | output_file.write(json.dumps(json_data) + '\n') 37 | 38 | if __name__ == "__main__": 39 | parser = argparse.ArgumentParser() 40 | parser.add_argument("--dataset_path", type=str, default='./playground/data/eval/mm-vet/llava-mm-vet.jsonl') 41 | parser.add_argument("--dataset_vt_path", type=str, default='./playground/data_VT/eval_images_gen_vt/mmvet_gen_vt/VTGenerator-13B/merge.jsonl') 42 | parser.add_argument("--dataset_with_vt_path", type=str, default='./playground/data_VT/eval/mm-vet/mmvet_with_VTGenerator-13B_gen_vt.jsonl') 43 | args = parser.parse_args() 44 | 45 | print(f"dataset_path = {args.dataset_path}") 46 | print(f"dataset_vt_path = {args.dataset_vt_path}") 47 | print(f"dataset_with_vt_path = {args.dataset_with_vt_path}") 48 | dataset_with_vt(args.dataset_path, args.dataset_vt_path, args.dataset_with_vt_path) -------------------------------------------------------------------------------- /preprocess/merge_with_VT/merge_scienceqa_with_VT.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | import argparse 4 | 5 | def dataset_with_gen_vt(dataset_path, dataset_vt_path, dataset_with_vt_path): 6 | visual_table_prompt_pre = "Visual table:" 7 | visual_table_prompt_post = "Based on the given image and given visual table, answer the following question:" 8 | 9 | dict_image_vt = {} 10 | with open(dataset_vt_path, 'r') as file: 11 | for line in file: 12 | json_data = json.loads(line) 13 | question_id = json_data['question_id'] 14 | text = json_data['text'] 15 | dict_image_vt[question_id] = text 16 | 17 | data_with_gen_vt = [] 18 | with open(dataset_path, 'r') as file: 19 | data = json.load(file) 20 | for line in data: 21 | # { 22 | # "id": "4", 23 | # "conversations": [ 24 | # { 25 | # "from": "human", 26 | # "value": "Which figure of speech is used in this text?\nSing, O goddess, the anger of Achilles son of Peleus, that brought countless ills upon the Achaeans.\n\u2014Homer, The Iliad\nA. chiasmus\nB. apostrophe" 27 | # }, 28 | # { 29 | # "from": "gpt", 30 | # "value": "B" 31 | # } 32 | # ] 33 | # }, 34 | # { 35 | # "id": "5", 36 | # "image": "5/image.png", 37 | # "conversations": [ 38 | # { 39 | # "from": "human", 40 | # "value": "\nContext: People can use the engineering-design process to develop solutions to problems. One step in the process is testing if a potential solution meets the requirements of the design.\nThe passage below describes how the engineering-design process was used to test a solution to a problem. Read the passage. Then answer the question below.\n\nGordon was an aerospace engineer who was developing a parachute for a spacecraft that would land on Mars. He needed to add a vent at the center of the parachute so the spacecraft would land smoothly. However, the spacecraft would have to travel at a high speed before landing. If the vent was too big or too small, the parachute might swing wildly at this speed. The movement could damage the spacecraft.\nSo, to help decide how big the vent should be, Gordon put a parachute with a 1 m vent in a wind tunnel. The wind tunnel made it seem like the parachute was moving at 200 km per hour. He observed the parachute to see how much it swung.\nFigure: a spacecraft's parachute in a wind tunnel.\nWhich of the following could Gordon's test show?\nA. if the spacecraft was damaged when using a parachute with a 1 m vent going 200 km per hour\nB. how steady a parachute with a 1 m vent was at 200 km per hour\nC. whether a parachute with a 1 m vent would swing too much at 400 km per hour" 41 | # }, 42 | # { 43 | # "from": "gpt", 44 | # "value": "B" 45 | # } 46 | # ] 47 | # }, 48 | if 'image' not in line: 49 | data_with_gen_vt.append(line) 50 | continue 51 | 52 | image = line['image'] 53 | assert image in dict_image_vt, f"image {image} not in dict_image_vt" 54 | line["conversations"][0]["value"] = line["conversations"][0]["value"].replace("\n", "") 55 | line["conversations"][0]["value"] = "\n" + visual_table_prompt_pre + '\n' + \ 56 | dict_image_vt[image] + '\n' + \ 57 | visual_table_prompt_post + '\n' + \ 58 | line["conversations"][0]["value"] 59 | data_with_gen_vt.append(line) 60 | 61 | with open(dataset_with_vt_path, 'w') as file: 62 | json.dump(data_with_gen_vt, file, indent=4) 63 | 64 | if __name__ == "__main__": 65 | parser = argparse.ArgumentParser() 66 | parser.add_argument("--dataset_path", type=str, default='./playground/data/eval/scienceqa/llava_test.jsonl') 67 | parser.add_argument("--dataset_vt_path", type=str, default='./playground/data_VT/eval_images_gen_vt/scienceqa_gen_vt/VTGenerator-13B/merge.jsonl') 68 | parser.add_argument("--dataset_with_vt_path", type=str, default='./playground/data_VT/eval/scienceqa/scienceqa_with_VTGenerator-13B_gen_vt.jsonl') 69 | args = parser.parse_args() 70 | print(f"dataset_path = {args.dataset_path}") 71 | print(f"dataset_vt_path = {args.dataset_vt_path}") 72 | print(f"dataset_with_vt_path = {args.dataset_with_vt_path}") 73 | dataset_with_gen_vt(args.dataset_path, args.dataset_vt_path, args.dataset_with_vt_path) -------------------------------------------------------------------------------- /preprocess/mmbench/convert_mmbench_images.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import io 3 | import base64 4 | import json 5 | from PIL import Image 6 | from tqdm import tqdm 7 | import os 8 | import argparse 9 | 10 | def decode_base64_to_image(base64_string): 11 | image_data = base64.b64decode(base64_string) 12 | image = Image.open(io.BytesIO(image_data)) 13 | return image 14 | 15 | def main(args): 16 | mmbench_imageID = set() # for mmbench 17 | 18 | datas = pd.read_csv(args.data_path, sep='\t') 19 | print(datas.shape[0]) 20 | print(datas.columns) 21 | # 4377 22 | # Index(['index', 'question', 'hint', 'A', 'B', 'C', 'D', 'answer', 'category', 23 | # 'image', 'source', 'l2-category', 'comment', 'split'], 24 | # dtype='object') 25 | 26 | image_path = args.image_path 27 | os.makedirs(image_path, exist_ok=True) 28 | 29 | for idx in tqdm(range(len(datas))): 30 | data = datas.iloc[idx] 31 | index = int(data['index']) 32 | imageID = f"{index}.jpg" 33 | if imageID not in mmbench_imageID: 34 | mmbench_imageID.add(imageID) 35 | image = decode_base64_to_image(data['image']) 36 | image.save(f"{image_path}/{imageID}") 37 | 38 | if __name__ == "__main__": 39 | parser = argparse.ArgumentParser() 40 | parser.add_argument("--data_path", type=str, default="./playground/data/eval/mmbench/mmbench_dev_20230712.tsv") 41 | parser.add_argument("--image_path", type=str, default="./playground/data/eval/mmbench/images/mmbench_dev_20230712") 42 | args = parser.parse_args() 43 | main(args) 44 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "llava" 7 | version = "1.1.3" 8 | description = "Towards GPT-4 like large language and visual assistant." 9 | readme = "README.md" 10 | requires-python = ">=3.8" 11 | classifiers = [ 12 | "Programming Language :: Python :: 3", 13 | "License :: OSI Approved :: Apache Software License", 14 | ] 15 | dependencies = [ 16 | "torch==2.0.1", "torchvision==0.15.2", 17 | "transformers==4.31.0", "tokenizers>=0.12.1,<0.14", "sentencepiece==0.1.99", "shortuuid", 18 | "accelerate==0.21.0", "peft==0.4.0", "bitsandbytes==0.41.0", 19 | "pydantic<2,>=1", "markdown2[all]", "numpy", "scikit-learn==1.2.2", 20 | "requests", "httpx==0.24.0", "uvicorn", "fastapi", 21 | "einops==0.6.1", "einops-exts==0.0.4", "timm==0.6.13", 22 | ] 23 | 24 | [project.optional-dependencies] 25 | train = ["deepspeed==0.9.5", "ninja", "wandb"] 26 | 27 | [project.urls] 28 | "Homepage" = "https://llava-vl.github.io" 29 | "Bug Tracker" = "https://github.com/haotian-liu/LLaVA/issues" 30 | 31 | [tool.setuptools.packages.find] 32 | exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"] 33 | 34 | [tool.wheel] 35 | exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"] 36 | -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/eval_multi_datasets_with_VT.sh: -------------------------------------------------------------------------------- 1 | VTGenerator="VTGenerator-13B" 2 | Model="LLaVA-VT-13B" 3 | 4 | mkdir -p scripts/log/eval_multi_datasets_with_VT 5 | 6 | # before this script 7 | # 1. download evaluation images and eval.zip (following https://github.com/haotian-liu/LLaVA/blob/main/docs/Evaluation.md) 8 | # 2. utilize ./scripts/VTGenerator/infer/eval_images_gen_vt.sh to generate VT for each dataset 9 | # or directly utilize the provided VT for each dataset from ./playground/data_VT/eval_images_gen_vt 10 | 11 | # for each dataset 12 | # 1. merge_eval_dataset_with_VT 13 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 14 | # 3. [optional] GPT-assisted evaluation: mmvet, llavabench, mmmu 15 | 16 | CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/mmvet/mmvet.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/mmvet_with_${VTGenerator}_gen_vt_${Model}.txt 17 | 18 | CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/llavabench/llavabench.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/llavabench_with_${VTGenerator}_gen_vt_${Model}.txt 19 | 20 | CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/mmmu/mmmu.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/mmmu_with_${VTGenerator}_gen_vt_${Model}.txt 21 | 22 | CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/mmbench/mmbench.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/mmbench_with_${VTGenerator}_gen_vt_${Model}.txt 23 | 24 | CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/mmvp_mc/mmvp_mc.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/mmvp_mc_with_${VTGenerator}_gen_vt_${Model}.txt 25 | 26 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/eval/pope/pope.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/pope_with_${VTGenerator}_gen_vt_${Model}.txt 27 | 28 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/eval/vizwiz/vizwiz.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/vizwiz_with_${VTGenerator}_gen_vt_${Model}.txt 29 | 30 | CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/scienceqa/scienceqa.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/scienceqa_with_${VTGenerator}_gen_vt_${Model}.txt 31 | 32 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/eval/gqa/gqa.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/gqa_full_with_${VTGenerator}_gen_vt_${Model}.txt 33 | 34 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/eval/vqav2/vqav2_dev.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/vqav2_dev_with_${VTGenerator}_gen_vt_${Model}.txt 35 | 36 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/eval/textvqa/textvqa.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/textvqa_with_${VTGenerator}_gen_vt_${Model}.txt 37 | -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/gqa/gqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/eval/gqa/gqa.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/gqa_full_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 7 | IFS=',' read -ra GPULIST <<< "$gpu_list" 8 | 9 | CHUNKS=${#GPULIST[@]} 10 | 11 | # VTGenerator="VTGenerator-13B" 12 | # Model="LLaVA-VT-13B" 13 | if [ $# -ne 2 ]; then 14 | echo "Usage: $0 " 15 | exit 1 16 | fi 17 | 18 | VTGenerator=$1 19 | Model=$2 20 | 21 | VT_PATH="./playground/data_VT/eval_images_gen_vt/gqa_gen_vt/${VTGenerator}/merge.jsonl" 22 | SPLIT=gqa_with_${VTGenerator}_gen_vt 23 | 24 | # 1. merge_eval_dataset_with_VT 25 | python ./preprocess/merge_with_VT/merge_eval_dataset_with_VT.py \ 26 | --dataset_path "./playground/data/eval/gqa/llava_gqa_testdev_balanced.jsonl" \ 27 | --dataset_vt_path ${VT_PATH} \ 28 | --dataset_with_vt_path "./playground/data_VT/eval/gqa/${SPLIT}.jsonl" 29 | 30 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 31 | for IDX in $(seq 0 $((CHUNKS-1))); do 32 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 33 | --model-path ./checkpoints/$Model \ 34 | --question-file ./playground/data_VT/eval/gqa/${SPLIT}.jsonl \ 35 | --image-folder /path/to/GQA/images \ 36 | --answers-file ./playground/data_VT/eval/gqa/answers/$SPLIT/$Model/${CHUNKS}_${IDX}.jsonl \ 37 | --num-chunks $CHUNKS \ 38 | --chunk-idx $IDX \ 39 | --temperature 0 \ 40 | --conv-mode vicuna_v1 & 41 | done 42 | wait 43 | 44 | 45 | output_file=./playground/data_VT/eval/gqa/answers/$SPLIT/$Model/merge.jsonl 46 | 47 | # Clear out the output file if it exists. 48 | > "$output_file" 49 | 50 | # Loop through the indices and concatenate each file. 51 | for IDX in $(seq 0 $((CHUNKS-1))); do 52 | cat ./playground/data_VT/eval/gqa/answers/$SPLIT/$Model/${CHUNKS}_${IDX}.jsonl >> "$output_file" 53 | done 54 | 55 | # 3. eval gqa 56 | GQADIR="./playground/data_VT/eval/gqa/data" 57 | # download from: https://cs.stanford.edu/people/dorarad/gqa/evaluate.html 58 | # fix some bugs of the official codes following: 59 | # https://github.com/haotian-liu/LLaVA/issues/584 60 | # https://github.com/haotian-liu/LLaVA/issues/625 61 | 62 | python scripts/convert_gqa_for_eval.py --src $output_file --dst $GQADIR/testdev_balanced_predictions.json 63 | 64 | cp $GQADIR/testdev_balanced_predictions.json ./playground/data_VT/eval/gqa/answers/$SPLIT/$Model 65 | 66 | cd $GQADIR 67 | python eval/eval.py --tier testdev_balanced --questions testdev_balanced_questions.json 68 | 69 | # ./playground/data_VT/eval/gqa/answers/gqa_with_VTGenerator-13B_gen_vt/LLaVA-VT-13B/testdev_balanced_predictions.json 70 | # Binary: 81.27% 71 | # Open: 49.30% 72 | # Accuracy: 63.98% 73 | # Validity: 0.00% 74 | # Plausibility: 0.00% 75 | # Distribution: 1.68 (lower is better) 76 | -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/llavabench/llavabench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/llavabench/llavabench.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/llavabench_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | # SPLIT=$1 7 | # Model=$2 8 | 9 | # VTGenerator="VTGenerator-13B" 10 | # Model="LLaVA-VT-13B" 11 | if [ $# -ne 2 ]; then 12 | echo "Usage: $0 " 13 | exit 1 14 | fi 15 | 16 | VTGenerator=$1 17 | Model=$2 18 | 19 | VT_PATH="./playground/data_VT/eval_images_gen_vt/llavabench_gen_vt/${VTGenerator}/merge.jsonl" 20 | SPLIT=llavabench_with_${VTGenerator}_gen_vt 21 | 22 | # 1. merge_eval_dataset_with_VT 23 | # def dataset_with_vt(dataset_path, dataset_vt_path, dataset_with_vt_path) 24 | python ./preprocess/merge_with_VT/merge_eval_dataset_with_VT.py \ 25 | --dataset_path "./playground/data/eval/llava-bench-in-the-wild/questions.jsonl" \ 26 | --dataset_vt_path ${VT_PATH} \ 27 | --dataset_with_vt_path "./playground/data_VT/eval/llava-bench-in-the-wild/${SPLIT}.jsonl" 28 | 29 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 30 | python -m llava.eval.model_vqa \ 31 | --model-path ./checkpoints/$Model \ 32 | --question-file ./playground/data_VT/eval/llava-bench-in-the-wild/${SPLIT}.jsonl \ 33 | --image-folder ./playground/data/eval/llava-bench-in-the-wild/images \ 34 | --answers-file ./playground/data_VT/eval/llava-bench-in-the-wild/answers/$SPLIT/$Model/$Model.jsonl \ 35 | --temperature 0 \ 36 | --conv-mode vicuna_v1 37 | wait 38 | 39 | # The evaluation code provided by llavabench requires openai==0.28.0 40 | # Please change to conda env with openai==0.28.0 41 | # And then excuse the following command 42 | version=$(pip list | grep openai | awk '{print $2}') 43 | if [ $version != "0.28.0" ]; then 44 | echo "Please change to conda env with openai==0.28.0; And then excuse the following command." 45 | exit 1 46 | fi 47 | 48 | gpt_model="gpt-3.5-turbo-1106" 49 | 50 | mkdir -p playground/data/eval/llava-bench-in-the-wild/reviews/$SPLIT/$Model 51 | python llava/eval/eval_gpt_review_bench.py \ 52 | --question playground/data/eval/llava-bench-in-the-wild/questions.jsonl \ 53 | --context playground/data/eval/llava-bench-in-the-wild/context.jsonl \ 54 | --rule llava/eval/table/rule.json \ 55 | --answer-list \ 56 | playground/data/eval/llava-bench-in-the-wild/answers_gpt4.jsonl \ 57 | playground/data/eval/llava-bench-in-the-wild/answers/$SPLIT/$Model/$Model.jsonl \ 58 | --output \ 59 | playground/data/eval/llava-bench-in-the-wild/reviews/$SPLIT/$Model/${Model}_${gpt_model}.jsonl 60 | 61 | python llava/eval/summarize_gpt_review.py -f playground/data/eval/llava-bench-in-the-wild/reviews/$SPLIT/$Model/${Model}_${gpt_model}.jsonl 62 | 63 | # python llava/eval/summarize_gpt_review.py -f ./playground/data_VT/eval/llava-bench-in-the-wild/reviews/llavabench_with_VTGenerator-13B_gen_vt/LLaVA-VT-13B/LLaVA-VT-13B_gpt-3.5-turbo-1106.jsonl 64 | # LLaVA-VT-13B_gpt-3.5-turbo-1106 65 | # all 89.1 76.2 67.8 66 | # llava_bench_complex 95.2 75.0 71.4 67 | # llava_bench_conv 92.1 81.8 75.3 68 | # llava_bench_detail 73.2 72.0 52.7 69 | # ================================= -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/mmbench/mmbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/mmbench/mmbench.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/mmbench_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | # VTGenerator="VTGenerator-13B" 7 | # Model="LLaVA-VT-13B" 8 | if [ $# -ne 2 ]; then 9 | echo "Usage: $0 " 10 | exit 1 11 | fi 12 | 13 | VTGenerator=$1 14 | Model=$2 15 | VT_PATH="./playground/data_VT/eval_images_gen_vt/mmbench_gen_vt/${VTGenerator}/merge.jsonl" 16 | SPLIT=mmbench_with_${VTGenerator}_gen_vt 17 | 18 | mkdir -p playground/data_VT/eval/mmbench/answers/$SPLIT/$Model 19 | mkdir -p playground/data_VT/eval/mmbench/answers_upload/$SPLIT/$Model 20 | 21 | # 1. merge_eval_dataset_with_VT 22 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 23 | python -m llava.eval.model_vqa_mmbench \ 24 | --model-path ./checkpoints/$Model \ 25 | --question-file ./playground/data/eval/mmbench/mmbench_dev_20230712.tsv \ 26 | --answers-file ./playground/data_VT/eval/mmbench/answers/$SPLIT/$Model/$Model.jsonl \ 27 | --single-pred-prompt \ 28 | --temperature 0 \ 29 | --conv-mode vicuna_v1 \ 30 | --with_gen_vt \ 31 | --gen_vt_path ${VT_PATH} 32 | wait 33 | 34 | python scripts/convert_mmbench_for_submission.py \ 35 | --annotation-file ./playground/data/eval/mmbench/mmbench_dev_20230712.tsv \ 36 | --result-dir ./playground/data_VT/eval/mmbench/answers/$SPLIT/$Model \ 37 | --upload-dir ./playground/data_VT/eval/mmbench/answers_upload/$SPLIT/$Model \ 38 | --experiment $Model 39 | 40 | # 3. eval on official eval server 41 | # https://mmbench.opencompass.org.cn/mmbench-submission 42 | # ./playground/data_VT/eval/mmbench/answers_upload/mmbench_with_VTGenerator-13B_gen_vt/LLaVA-VT-13B/LLaVA-VT-13B.xlsx 43 | # key value 44 | # A_Overall (dev) 0.6941580756013745 45 | # B_AR (dev) 0.7185929648241206 46 | # B_CP (dev) 0.7972972972972973 47 | # B_FP-C (dev) 0.6293706293706294 48 | # B_FP-S (dev) 0.7201365187713311 49 | # B_LR (dev) 0.423728813559322 50 | # B_RR (dev) 0.6782608695652174 51 | # C_action_recognition (dev) 0.9074074074074074 52 | # C_attribute_comparison (dev) 0.6136363636363636 53 | # C_attribute_recognition (dev) 0.8378378378378378 54 | # C_celebrity_recognition (dev) 0.8181818181818182 55 | # C_function_reasoning (dev) 0.8227848101265823 56 | # C_future_prediction (dev) 0.575 -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/mmmu/mmmu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/mmmu/mmmu.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/mmmu_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | # VTGenerator="VTGenerator-13B" 7 | # Model="LLaVA-VT-13B" 8 | if [ $# -ne 2 ]; then 9 | echo "Usage: $0 " 10 | exit 1 11 | fi 12 | 13 | VTGenerator=$1 14 | Model=$2 15 | 16 | VT_PATH="./playground/data_VT/eval_images_gen_vt/mmmu_gen_vt/${VTGenerator}/merge.jsonl" 17 | SPLIT=mmmu_with_${VTGenerator}_gen_vt 18 | 19 | # 1. merge_eval_dataset_with_VT 20 | python ./preprocess/merge_with_VT/merge_eval_dataset_with_VT.py \ 21 | --dataset_path "./playground/data_VT/eval/mmmu/mmmu.jsonl" \ 22 | --dataset_vt_path ${VT_PATH} \ 23 | --dataset_with_vt_path "./playground/data_VT/eval/mmmu/${SPLIT}.jsonl" 24 | 25 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 26 | python -m llava.eval.model_vqa \ 27 | --model-path ./checkpoints/$Model \ 28 | --question-file ./playground/data_VT/eval/mmmu/$SPLIT.jsonl \ 29 | --image-folder /path/to/MMMU/images \ 30 | --answers-file ./playground/data_VT/eval/mmmu/answers/$SPLIT/$Model/${SPLIT}_${Model}.jsonl \ 31 | --temperature 0 \ 32 | --conv-mode vicuna_v1 & 33 | wait 34 | 35 | # 3. eval in ./scripts/gpt_eval/multi_gpt_eval_mmmu.sh 36 | # pred_paths=( 37 | # "./playground/data_VT/eval/mmmu/answers/$SPLIT/$Model/${SPLIT}_${Model}.jsonl" 38 | # ) 39 | # Yes count: 358 40 | # No count: 497 41 | # Accuracy: 0.41871345029239765 42 | # Average score: 2.8526315789473684 43 | -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/mmvet/mmvet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/mmvet/mmvet.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/mmvet_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | # VTGenerator="VTGenerator-13B" 7 | # Model="LLaVA-VT-13B" 8 | if [ $# -ne 2 ]; then 9 | echo "Usage: $0 " 10 | exit 1 11 | fi 12 | 13 | VTGenerator=$1 14 | Model=$2 15 | 16 | VT_PATH="./playground/data_VT/eval_images_gen_vt/mmvet_gen_vt/${VTGenerator}/merge.jsonl" 17 | SPLIT=mmvet_with_${VTGenerator}_gen_vt 18 | 19 | # 1. merge_eval_dataset_with_VT 20 | # def dataset_with_vt(dataset_path, dataset_vt_path, dataset_with_vt_path) 21 | python ./preprocess/merge_with_VT/merge_eval_dataset_with_VT.py \ 22 | --dataset_path "./playground/data/eval/mm-vet/llava-mm-vet.jsonl" \ 23 | --dataset_vt_path ${VT_PATH} \ 24 | --dataset_with_vt_path "./playground/data_VT/eval/mm-vet/${SPLIT}.jsonl" 25 | 26 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 27 | python -m llava.eval.model_vqa \ 28 | --model-path ./checkpoints/$Model \ 29 | --question-file ./playground/data_VT/eval/mm-vet/${SPLIT}.jsonl \ 30 | --image-folder ./playground/data/eval/mm-vet/images \ 31 | --answers-file ./playground/data_VT/eval/mm-vet/$SPLIT/answers/${Model}/${SPLIT}_${Model}.jsonl \ 32 | --temperature 0 \ 33 | --conv-mode vicuna_v1 34 | wait 35 | 36 | mkdir -p ./playground/data_VT/eval/mm-vet/$SPLIT/results/${Model} 37 | 38 | python scripts/convert_mmvet_for_eval.py \ 39 | --src ./playground/data_VT/eval/mm-vet/$SPLIT/answers/${Model}/${SPLIT}_${Model}.jsonl \ 40 | --dst ./playground/data_VT/eval/mm-vet/$SPLIT/results/${Model}/${SPLIT}_${Model}.json 41 | 42 | # 3. eval on official eval server 43 | # https://huggingface.co/spaces/whyu/MM-Vet_Evaluator -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/mmvp_mc/eval_mmvp_mc_acc.py: -------------------------------------------------------------------------------- 1 | import json 2 | import sys 3 | import argparse 4 | 5 | def read_jsonl(file_path): 6 | with open(file_path, 'r', encoding='utf-8') as file: 7 | return [json.loads(line) for line in file] 8 | 9 | def calculate_accuracy(path_pred, path_gt): 10 | 11 | data_pred = read_jsonl(path_pred) 12 | data_gt = read_jsonl(path_gt) 13 | 14 | questionID_gt = {item['question_id']: item['answers'] for item in data_gt} 15 | choices = ['A', 'B'] 16 | 17 | correct_pairs = 0 18 | total_pairs = 0 19 | for i in range(0, len(data_pred), 2): # QA paris 20 | if i + 1 < len(data_pred) and data_pred[i]['question_id'] in questionID_gt and data_pred[i + 1]['question_id'] in questionID_gt: 21 | assert data_pred[i]['text'].upper() in choices, f"Invalid answer choice: {data_pred[i]['question_id']}, {data_pred[i]['text'].upper()}" 22 | assert data_pred[i+1]['text'].upper() in choices, f"Invalid answer choice: {data_pred[i+1]['question_id']}, {data_pred[i+1]['text'].upper()}" 23 | answer_correct = data_pred[i]['text'].upper() == questionID_gt[data_pred[i]['question_id']] and \ 24 | data_pred[i+1]['text'].upper() == questionID_gt[data_pred[i + 1]['question_id']] 25 | if answer_correct: 26 | correct_pairs += 1 27 | total_pairs += 1 28 | 29 | accuracy = correct_pairs / total_pairs if total_pairs > 0 else 0 30 | return accuracy 31 | 32 | if __name__ == "__main__": 33 | parser = argparse.ArgumentParser() 34 | parser.add_argument("--path_pred", type=str, default='./playground/data/eval/mm-vet/llava-mm-vet.jsonl') 35 | parser.add_argument("--path_gt", type=str, default='./playground/data_VT/eval_images_gen_vt/mmvet_gen_vt/VTGenerator-13B/merge.jsonl') 36 | args = parser.parse_args() 37 | 38 | accuracy = calculate_accuracy(args.path_pred, args.path_gt) 39 | print(f"path_pred: {args.path_pred}") 40 | print(f"path_gt: {args.path_gt}") 41 | print(f"Accuracy: {accuracy}") -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/mmvp_mc/mmvp_mc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/mmvp_mc/mmvp_mc.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/mmvp_mc_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | # VTGenerator="VTGenerator-13B" 7 | # Model="LLaVA-VT-13B" 8 | if [ $# -ne 2 ]; then 9 | echo "Usage: $0 " 10 | exit 1 11 | fi 12 | 13 | VTGenerator=$1 14 | Model=$2 15 | 16 | VT_PATH="./playground/data_VT/eval_images_gen_vt/mmvp_gen_vt/${VTGenerator}/merge.jsonl" 17 | SPLIT=mmvp_mc_with_${VTGenerator}_gen_vt 18 | 19 | # 1. merge_eval_dataset_with_VT 20 | python ./preprocess/merge_with_VT/merge_eval_dataset_with_VT.py \ 21 | --dataset_path "./playground/data_VT/eval/mmvp_mc/mmvp_mc.jsonl" \ 22 | --dataset_vt_path ${VT_PATH} \ 23 | --dataset_with_vt_path "./playground/data_VT/eval/mmvp_mc/${SPLIT}.jsonl" 24 | 25 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 26 | python -m llava.eval.model_vqa \ 27 | --model-path ./checkpoints/$Model \ 28 | --question-file ./playground/data_VT/eval/mmvp_mc/$SPLIT.jsonl \ 29 | --image-folder /path/to/MMVP/images \ 30 | --answers-file ./playground/data_VT/eval/mmvp_mc/answers/$SPLIT/$Model/${SPLIT}_${Model}.jsonl \ 31 | --temperature 0 \ 32 | --conv-mode vicuna_v1 & 33 | wait 34 | 35 | # 3. eval mmvp_mc 36 | python ./scripts/LLaVA-VT/eval/mmvp_mc/eval_mmvp_mc_acc.py \ 37 | --path_pred "./playground/data_VT/eval/mmvp_mc/answers/$SPLIT/$Model/${SPLIT}_${Model}.jsonl" \ 38 | --path_gt "./playground/data_VT/eval/mmvp_mc/mmvp_mc.jsonl" 39 | 40 | # path_pred: ./playground/data_VT/eval/mmvp_mc/answers/mmvp_mc_with_VTGenerator-13B_gen_vt/LLaVA-VT-13B/mmvp_mc_with_VTGenerator-13B_gen_vt_LLaVA-VT-13B.jsonl 41 | # path_gt: ./playground/data_VT/eval/mmvp_mc/mmvp_mc.jsonl 42 | # Accuracy: 0.36666666666666664 -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/pope/pope.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/eval/pope/pope.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/pope_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 7 | IFS=',' read -ra GPULIST <<< "$gpu_list" 8 | 9 | CHUNKS=${#GPULIST[@]} 10 | 11 | # VTGenerator="VTGenerator-13B" 12 | # Model="LLaVA-VT-13B" 13 | if [ $# -ne 2 ]; then 14 | echo "Usage: $0 " 15 | exit 1 16 | fi 17 | 18 | VTGenerator=$1 19 | Model=$2 20 | 21 | VT_PATH="./playground/data_VT/eval_images_gen_vt/pope_gen_vt/${VTGenerator}/merge.jsonl" 22 | SPLIT=pope_with_${VTGenerator}_gen_vt 23 | 24 | # 1. merge_eval_dataset_with_VT 25 | python ./preprocess/merge_with_VT/merge_eval_dataset_with_VT.py \ 26 | --dataset_path "./playground/data/eval/pope/llava_pope_test.jsonl" \ 27 | --dataset_vt_path ${VT_PATH} \ 28 | --dataset_with_vt_path "./playground/data_VT/eval/pope/${SPLIT}.jsonl" 29 | 30 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 31 | for IDX in $(seq 0 $((CHUNKS-1))); do 32 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 33 | --model-path ./checkpoints/$Model \ 34 | --question-file ./playground/data_VT/eval/pope/$SPLIT.jsonl \ 35 | --image-folder ./playground/data/coco/val2014 \ 36 | --answers-file ./playground/data_VT/eval/pope/answers/$SPLIT/$Model/${CHUNKS}_${IDX}.jsonl \ 37 | --num-chunks $CHUNKS \ 38 | --chunk-idx $IDX \ 39 | --temperature 0 \ 40 | --conv-mode vicuna_v1 & 41 | done 42 | wait 43 | 44 | output_file=./playground/data_VT/eval/pope/answers/$SPLIT/$Model/merge.jsonl 45 | 46 | # Clear out the output file if it exists. 47 | > "$output_file" 48 | 49 | # Loop through the indices and concatenate each file. 50 | for IDX in $(seq 0 $((CHUNKS-1))); do 51 | cat ./playground/data_VT/eval/pope/answers/$SPLIT/$Model/${CHUNKS}_${IDX}.jsonl >> "$output_file" 52 | done 53 | 54 | # 3. eval pope 55 | python llava/eval/eval_pope.py \ 56 | --annotation-dir ./playground/data_VT/eval/pope/pope_coco_commitID_e3e39262c85a6a83f26cf5094022a782cb0df58d \ 57 | --question-file ./playground/data_VT/eval/pope/$SPLIT.jsonl \ 58 | --result-file $output_file 59 | 60 | # ./playground/data_VT/eval/pope/answers/pope_with_VTGenerator-13B_gen_vt/LLaVA-VT-13B/merge.jsonl 61 | # Category: adversarial, # samples: 3000 62 | # TP FP TN FN 63 | # 1210 121 1379 290 64 | # Accuracy: 0.863 65 | # Precision: 0.9090909090909091 66 | # Recall: 0.8066666666666666 67 | # F1 score: 0.8548216178028964 68 | # Yes ratio: 0.44366666666666665 69 | # 0.855, 0.863, 0.909, 0.807, 0.444 70 | # ==================================== 71 | # Category: random, # samples: 2910 72 | # TP FP TN FN 73 | # 1210 25 1385 290 74 | # Accuracy: 0.8917525773195877 75 | # Precision: 0.979757085020243 76 | # Recall: 0.8066666666666666 77 | # F1 score: 0.8848263254113345 78 | # Yes ratio: 0.42439862542955326 79 | # 0.885, 0.892, 0.980, 0.807, 0.424 80 | # ==================================== 81 | # Category: popular, # samples: 3000 82 | # TP FP TN FN 83 | # 1210 62 1438 290 84 | # Accuracy: 0.8826666666666667 85 | # Precision: 0.9512578616352201 86 | # Recall: 0.8066666666666666 87 | # F1 score: 0.8730158730158729 88 | # Yes ratio: 0.424 89 | # 0.873, 0.883, 0.951, 0.807, 0.424 90 | # ==================================== 91 | # avg_f1 = 0.870887938743368 -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/scienceqa/scienceqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0 bash scripts/LLaVA-VT/eval/scienceqa/scienceqa.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/scienceqa_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | # VTGenerator="VTGenerator-13B" 7 | # Model="LLaVA-VT-13B" 8 | if [ $# -ne 2 ]; then 9 | echo "Usage: $0 " 10 | exit 1 11 | fi 12 | 13 | VTGenerator=$1 14 | Model=$2 15 | 16 | VT_PATH="./playground/data_VT/eval_images_gen_vt/scienceqa_gen_vt/${VTGenerator}/merge.jsonl" 17 | SPLIT=scienceqa_with_${VTGenerator}_gen_vt 18 | 19 | # 1. merge_eval_dataset_with_VT 20 | python ./preprocess/merge_with_VT/merge_scienceqa_with_VT.py \ 21 | --dataset_path "./playground/data/eval/scienceqa/llava_test_CQM-A.json" \ 22 | --dataset_vt_path ${VT_PATH} \ 23 | --dataset_with_vt_path "./playground/data_VT/eval/scienceqa/${SPLIT}.json" 24 | 25 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 26 | python -m llava.eval.model_vqa_science \ 27 | --model-path ./checkpoints/$Model \ 28 | --question-file ./playground/data_VT/eval/scienceqa/${SPLIT}.json \ 29 | --image-folder ./playground/data/eval/scienceqa/test \ 30 | --answers-file ./playground/data_VT/eval/scienceqa/answers/$SPLIT/$Model/$Model.jsonl \ 31 | --single-pred-prompt \ 32 | --temperature 0 \ 33 | --conv-mode vicuna_v1 34 | 35 | # 3. eval scienceqa-img 36 | # llava/eval/eval_science_qa.py 37 | # base_dir = args.base_dir 38 | # split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[args.split] 39 | # problems = json.load(open(os.path.join(base_dir, "problems.json"))) 40 | 41 | python llava/eval/eval_science_qa.py \ 42 | --base-dir ./playground/data/eval/scienceqa \ 43 | --result-file ./playground/data_VT/eval/scienceqa/answers/$SPLIT/$Model/$Model.jsonl \ 44 | --output-file ./playground/data_VT/eval/scienceqa/answers/$SPLIT/$Model/${Model}_output.jsonl \ 45 | --output-result ./playground/data_VT/eval/scienceqa/answers/$SPLIT/$Model/${Model}_result.json 46 | 47 | # Total: 4241, Correct: 3189, Accuracy: 75.19%, IMG-Accuracy: 72.58% -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/textvqa/textvqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/eval/textvqa/textvqa.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/textvqa_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 7 | IFS=',' read -ra GPULIST <<< "$gpu_list" 8 | 9 | CHUNKS=${#GPULIST[@]} 10 | 11 | # VTGenerator="VTGenerator-13B" 12 | # Model="LLaVA-VT-13B" 13 | if [ $# -ne 2 ]; then 14 | echo "Usage: $0 " 15 | exit 1 16 | fi 17 | 18 | VTGenerator=$1 19 | Model=$2 20 | 21 | VT_PATH="./playground/data_VT/eval_images_gen_vt/textvqa_gen_vt/${VTGenerator}/merge.jsonl" 22 | SPLIT=textvqa_with_${VTGenerator}_gen_vt 23 | 24 | # 1. merge_eval_dataset_with_VT 25 | python ./preprocess/merge_with_VT/merge_eval_dataset_with_VT.py \ 26 | --dataset_path "./playground/data/eval/textvqa/llava_textvqa_val_v051_ocr.jsonl" \ 27 | --dataset_vt_path ${VT_PATH} \ 28 | --dataset_with_vt_path "./playground/data_VT/eval/textvqa/${SPLIT}.jsonl" 29 | 30 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 31 | for IDX in $(seq 0 $((CHUNKS-1))); do 32 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 33 | --model-path ./checkpoints/$Model \ 34 | --question-file ./playground/data_VT/eval/textvqa/${SPLIT}.jsonl \ 35 | --image-folder ./playground/data/eval/textvqa/train_images \ 36 | --answers-file ./playground/data_VT/eval/textvqa/answers/$SPLIT/$Model/${CHUNKS}_${IDX}.jsonl \ 37 | --num-chunks $CHUNKS \ 38 | --chunk-idx $IDX \ 39 | --temperature 0 \ 40 | --conv-mode vicuna_v1 & 41 | done 42 | wait 43 | 44 | output_file=./playground/data_VT/eval/textvqa/answers/$SPLIT/$Model/merge.jsonl 45 | 46 | # Clear out the output file if it exists. 47 | > "$output_file" 48 | 49 | # Loop through the indices and concatenate each file. 50 | for IDX in $(seq 0 $((CHUNKS-1))); do 51 | cat ./playground/data_VT/eval/textvqa/answers/$SPLIT/$Model/${CHUNKS}_${IDX}.jsonl >> "$output_file" 52 | done 53 | 54 | # 3. eval textvqa 55 | python -m llava.eval.eval_textvqa \ 56 | --annotation-file ./playground/data/eval/textvqa/TextVQA_0.5.1_val.json \ 57 | --result-file $output_file 58 | 59 | # Samples: 5000 60 | # Accuracy: 61.16% -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/vizwiz/vizwiz.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/eval/vizwiz/vizwiz.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/vizwiz_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 7 | IFS=',' read -ra GPULIST <<< "$gpu_list" 8 | 9 | CHUNKS=${#GPULIST[@]} 10 | 11 | # VTGenerator="VTGenerator-13B" 12 | # Model="LLaVA-VT-13B" 13 | if [ $# -ne 2 ]; then 14 | echo "Usage: $0 " 15 | exit 1 16 | fi 17 | 18 | VTGenerator=$1 19 | Model=$2 20 | 21 | VT_PATH="./playground/data_VT/eval_images_gen_vt/vizwiz_gen_vt/${VTGenerator}/merge.jsonl" 22 | SPLIT=vizwiz_with_${VTGenerator}_gen_vt 23 | 24 | # 1. merge_eval_dataset_with_VT 25 | python ./preprocess/merge_with_VT/merge_eval_dataset_with_VT.py \ 26 | --dataset_path "./playground/data/eval/vizwiz/llava_test.jsonl" \ 27 | --dataset_vt_path ${VT_PATH} \ 28 | --dataset_with_vt_path "./playground/data_VT/eval/vizwiz/${SPLIT}.jsonl" 29 | 30 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 31 | for IDX in $(seq 0 $((CHUNKS-1))); do 32 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 33 | --model-path ./checkpoints/$Model \ 34 | --question-file ./playground/data_VT/eval/vizwiz/$SPLIT.jsonl \ 35 | --image-folder ./playground/data/eval/vizwiz/test \ 36 | --answers-file ./playground/data_VT/eval/vizwiz/answers/$SPLIT/$Model/${CHUNKS}_${IDX}.jsonl \ 37 | --num-chunks $CHUNKS \ 38 | --chunk-idx $IDX \ 39 | --temperature 0 \ 40 | --conv-mode vicuna_v1 & 41 | done 42 | wait 43 | 44 | output_file=./playground/data_VT/eval/vizwiz/answers/$SPLIT/$Model/merge.jsonl 45 | 46 | # Clear out the output file if it exists. 47 | > "$output_file" 48 | 49 | # Loop through the indices and concatenate each file. 50 | for IDX in $(seq 0 $((CHUNKS-1))); do 51 | cat ./playground/data_VT/eval/vizwiz/answers/$SPLIT/$Model/${CHUNKS}_${IDX}.jsonl >> "$output_file" 52 | done 53 | 54 | output_file2=./playground/data_VT/eval/vizwiz/answers_upload/$SPLIT/$Model/${Model}.json 55 | mkdir -p ./playground/data_VT/eval/vizwiz/answers_upload/$SPLIT/$Model 56 | 57 | python scripts/convert_vizwiz_for_submission.py \ 58 | --annotation-file ./playground/data/eval/vizwiz/llava_test.jsonl \ 59 | --result-file $output_file \ 60 | --result-upload-file ${output_file2} 61 | 62 | # 3. eval on official eval server 63 | # https://eval.ai/web/challenges/challenge-page/2185/submission 64 | # Phase: test-standard2024-VQA 65 | # ./playground/data_VT/eval/vizwiz/answers_upload/vizwiz_with_VTGenerator-13B_gen_vt/LLaVA-VT-13B/LLaVA-VT-13B.json 66 | # [{"test": {"overall": 57.38, "other": 48.82, "unanswerable": 75.73, "yes/no": 79.6, "number": 48.46}}] -------------------------------------------------------------------------------- /scripts/LLaVA-VT/eval/vqav2/vqav2_dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/eval/vqav2/vqav2_dev.sh ${VTGenerator} ${Model} 2>&1 | tee -a scripts/log/eval_multi_datasets_with_VT/vqav2_dev_with_${VTGenerator}_gen_vt_${Model}.txt 5 | 6 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 7 | IFS=',' read -ra GPULIST <<< "$gpu_list" 8 | 9 | CHUNKS=${#GPULIST[@]} 10 | 11 | # VTGenerator="VTGenerator-13B" 12 | # Model="LLaVA-VT-13B" 13 | if [ $# -ne 2 ]; then 14 | echo "Usage: $0 " 15 | exit 1 16 | fi 17 | 18 | VTGenerator=$1 19 | Model=$2 20 | 21 | VT_PATH="./playground/data_VT/eval_images_gen_vt/vqav2_gen_vt/${VTGenerator}/merge.jsonl" 22 | SPLIT=vqav2_dev_with_${VTGenerator}_gen_vt 23 | 24 | # 1. merge_eval_dataset_with_VT 25 | python ./preprocess/merge_with_VT/merge_eval_dataset_with_VT.py \ 26 | --dataset_path "./playground/data/eval/vqav2/llava_vqav2_mscoco_test-dev2015.jsonl" \ 27 | --dataset_vt_path ${VT_PATH} \ 28 | --dataset_with_vt_path "./playground/data_VT/eval/vqav2/${SPLIT}.jsonl" 29 | 30 | # 2. infer LLaVA-VT-13B on eval_dataset_with_VT 31 | for IDX in $(seq 0 $((CHUNKS-1))); do 32 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 33 | --model-path ./checkpoints/$Model \ 34 | --question-file ./playground/data_VT/eval/vqav2/${SPLIT}.jsonl \ 35 | --image-folder ./playground/data/coco/test2015 \ 36 | --answers-file ./playground/data_VT/eval/vqav2/answers/$SPLIT/$Model/${CHUNKS}_${IDX}.jsonl \ 37 | --num-chunks $CHUNKS \ 38 | --chunk-idx $IDX \ 39 | --temperature 0 \ 40 | --conv-mode vicuna_v1 & 41 | done 42 | wait 43 | 44 | output_file=./playground/data_VT/eval/vqav2/answers/$SPLIT/$Model/merge.jsonl 45 | 46 | # Clear out the output file if it exists. 47 | > "$output_file" 48 | 49 | # Loop through the indices and concatenate each file. 50 | for IDX in $(seq 0 $((CHUNKS-1))); do 51 | cat ./playground/data_VT/eval/vqav2/answers/$SPLIT/$Model/${CHUNKS}_${IDX}.jsonl >> "$output_file" 52 | done 53 | 54 | python scripts/convert_vqav2_for_submission.py --split $SPLIT --Model $Model 55 | 56 | # 3. eval on official eval server 57 | # https://eval.ai/web/challenges/challenge-page/830/submission 58 | # Phase: Test-Dev Phase 59 | # ./playground/data_VT/eval/vqav2/answers_upload/vqav2_dev_with_VTGenerator-13B_gen_vt/LLaVA-VT-13B.json 60 | # [{"test-dev": {"yes/no": 93.9, "number": 63.41, "other": 73.36, "overall": 80.69}}] 61 | -------------------------------------------------------------------------------- /scripts/LLaVA-VT/train/finetune_LLaVA-VT-13B.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # note: 4 | # initialized from LLaVA-13B-Pretrain 5 | # device: 8 A100 (80GB) 6 | # hyper-parameters: batch size = 16*8 = 128, learning rate = 2e-5, epoch = 1 7 | # training datasets: llava_instruct_mix665k_with_VT 8 | 9 | # usage: 10 | # mkdir -p scripts/log/LLaVA-VT 11 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/LLaVA-VT/train/finetune_LLaVA-VT-13B.sh 2>&1 | tee -a scripts/log/LLaVA-VT/finetune_LLaVA-VT-13B.txt 12 | 13 | deepspeed --include localhost:0,1,2,3,4,5,6,7 --master_port 29600 llava/train/train_mem.py \ 14 | --deepspeed ./scripts/zero3.json \ 15 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 16 | --version v1 \ 17 | --data_path ./playground/data_VT/train_LLaVA-VT/llava_instruct_mix665k_with_VT.json \ 18 | --image_folder ./playground/data \ 19 | --vision_tower openai/clip-vit-large-patch14-336 \ 20 | --pretrain_mm_mlp_adapter /path/to/liuhaotian/llava-v1.5-mlp2x-336px-pretrain-vicuna-13b-v1.5/mm_projector.bin \ 21 | --mm_projector_type mlp2x_gelu \ 22 | --mm_vision_select_layer -2 \ 23 | --mm_use_im_start_end False \ 24 | --mm_use_im_patch_token False \ 25 | --image_aspect_ratio pad \ 26 | --group_by_modality_length True \ 27 | --bf16 True \ 28 | --output_dir ./checkpoints/LLaVA-VT-13B \ 29 | --num_train_epochs 1 \ 30 | --per_device_train_batch_size 16 \ 31 | --per_device_eval_batch_size 4 \ 32 | --gradient_accumulation_steps 1 \ 33 | --evaluation_strategy "no" \ 34 | --save_strategy "steps" \ 35 | --save_steps 50000 \ 36 | --save_total_limit 1 \ 37 | --learning_rate 2e-5 \ 38 | --weight_decay 0. \ 39 | --warmup_ratio 0.03 \ 40 | --lr_scheduler_type "cosine" \ 41 | --logging_steps 1 \ 42 | --tf32 True \ 43 | --model_max_length 2048 \ 44 | --gradient_checkpointing True \ 45 | --dataloader_num_workers 4 \ 46 | --lazy_preprocess True \ 47 | --report_to wandb \ 48 | --run_name finetune_LLaVA-VT-13B 49 | -------------------------------------------------------------------------------- /scripts/VTGenerator/infer/eval_images_gen_vt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/infer/eval_images_gen_vt.sh VTGenerator-13B 5 | 6 | if [ $# -ne 1 ]; then 7 | echo "Usage: $0 " 8 | exit 1 9 | fi 10 | 11 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 12 | IFS=',' read -ra GPULIST <<< "$gpu_list" 13 | 14 | CHUNKS=${#GPULIST[@]} 15 | 16 | CKPT=$1 17 | 18 | # download eval datasets and their images first 19 | # following https://github.com/haotian-liu/LLaVA/blob/main/docs/Evaluation.md 20 | 21 | # for mmmu and mmvp, download dataset from their official repos 22 | 23 | # for mmbench, decode_base64_to_image 24 | # store images in ./playground/data/eval/mmbench/images/mmbench_dev_20230712 25 | python ./preprocess/mmbench/convert_mmbench_images.py \ 26 | --data_path "./playground/data/eval/mmbench/mmbench_dev_20230712.tsv" \ 27 | --image_path "./playground/data/eval/mmbench/images/mmbench_dev_20230712" 28 | 29 | SPLITS=( 30 | "mmvet_gen_vt" 31 | "gqa_gen_vt" 32 | "mmmu_gen_vt" 33 | "mmvp_gen_vt" 34 | "llavabench_gen_vt" 35 | "mmbench_gen_vt" 36 | "pope_gen_vt" 37 | "scienceqa_gen_vt" 38 | "textvqa_gen_vt" 39 | "vizwiz_gen_vt" 40 | "vqav2_gen_vt" 41 | ) 42 | 43 | # please modify image path 44 | IMAGE_FOLDERS=( 45 | "/path/to/mm-vet/images" 46 | "/path/to/GQA/raw/images" 47 | "/path/to/MMMU/images" 48 | "/path/to/MMVP/MMVP/images" 49 | "/path/to/llava-bench-in-the-wild/images" 50 | "./playground/data/eval/mmbench/images/mmbench_dev_20230712" 51 | "/path/to/coco/val2014" 52 | "/path/to/scienceqa/test" 53 | "/path/to/textvqa/train_images" 54 | "/path/to/vizwiz/test" 55 | "/path/to/vqav2/test2015" 56 | ) 57 | 58 | 59 | OUT="./playground/data_VT/eval_images_gen_vt" 60 | 61 | for ((i=0; i<${#SPLITS[@]}; i++)); do 62 | SPLIT=${SPLITS[$i]} 63 | IMAGE_FOLDER=${IMAGE_FOLDERS[$i]} 64 | 65 | echo $SPLIT 66 | echo ${IMAGE_FOLDER} 67 | 68 | mkdir -p ${OUT}/${SPLIT}/${CKPT} 69 | 70 | for IDX in $(seq 0 $((CHUNKS-1))); do 71 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa \ 72 | --model-path ./checkpoints/$CKPT \ 73 | --question-file $OUT/$SPLIT.jsonl \ 74 | --image-folder ${IMAGE_FOLDER} \ 75 | --answers-file ${OUT}/${SPLIT}/${CKPT}/${CHUNKS}_${IDX}.jsonl \ 76 | --num-chunks $CHUNKS \ 77 | --chunk-idx $IDX \ 78 | --temperature 0 \ 79 | --conv-mode vicuna_v1 & 80 | done 81 | 82 | wait 83 | 84 | output_file=${OUT}/${SPLIT}/${CKPT}/merge.jsonl 85 | 86 | # Clear out the output file if it exists. 87 | > "$output_file" 88 | 89 | # Loop through the indices and concatenate each file. 90 | for IDX in $(seq 0 $((CHUNKS-1))); do 91 | cat ${OUT}/${SPLIT}/${CKPT}/${CHUNKS}_${IDX}.jsonl >> "$output_file" 92 | done 93 | done -------------------------------------------------------------------------------- /scripts/VTGenerator/infer/train_images_gen_vt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # note: please merge vg_images in one folder for quick inference 4 | # cp -r /path/to/VG_100K/* /path/to/vg_images 5 | # cp -r /path/to/VG_100K_2/* /path/to/vg_images 6 | 7 | # infer VTGenerator-13B on llava_instruct_mix665k 8 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_coco_gen_vt.sh VTGenerator-13B 9 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_ocrvqa_gen_vt.sh VTGenerator-13B 10 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_textcap_gen_vt.sh VTGenerator-13B 11 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_vg_gen_vt.sh VTGenerator-13B 12 | 13 | # merge the inference results & store VTGenerator-13B_VT_292k.json & store llava_instruct_mix665k_with_VT.json 14 | python ./scripts/VTGenerator/infer/train_images_gen_vt/merge_llava_instruct_mix665k_all_gen_vt.py \ 15 | --gen_VT_path './playground/data_VT/train_images_gen_vt/VTGenerator-13B_VT_292k.json' \ 16 | --llava_instruct_mix665k_path '/path/to/liuhaotian/LLaVA-Instruct-150K/llava_v1_5_mix665k.json' \ 17 | --image_path './playground/data' \ 18 | --llava_instruct_mix665k_with_VT './playground/data_VT/train_LLaVA-VT/llava_instruct_mix665k_with_VT.json' -------------------------------------------------------------------------------- /scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_coco_gen_vt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_coco_gen_vt.sh VTGenerator-13B 5 | 6 | if [ $# -ne 1 ]; then 7 | echo "Usage: $0 " 8 | exit 1 9 | fi 10 | 11 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 12 | IFS=',' read -ra GPULIST <<< "$gpu_list" 13 | 14 | CHUNKS=${#GPULIST[@]} 15 | 16 | CKPT=$1 17 | SPLIT="llava_instruct_mix665k_coco_gen_vt" 18 | OUT="./playground/data_VT/train_images_gen_vt" 19 | 20 | mkdir -p ${OUT}/${SPLIT}/${CKPT} 21 | 22 | for IDX in $(seq 0 $((CHUNKS-1))); do 23 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa \ 24 | --model-path ./checkpoints/$CKPT \ 25 | --question-file ./playground/data_VT/train_images_gen_vt/$SPLIT.jsonl \ 26 | --image-folder ./playground/data/coco/train2017 \ 27 | --answers-file ${OUT}/${SPLIT}/${CKPT}/${CHUNKS}_${IDX}.jsonl \ 28 | --num-chunks $CHUNKS \ 29 | --chunk-idx $IDX \ 30 | --temperature 0 \ 31 | --conv-mode vicuna_v1 & 32 | done 33 | 34 | wait 35 | 36 | output_file=${OUT}/${SPLIT}/${CKPT}/merge.jsonl 37 | 38 | # Clear out the output file if it exists. 39 | > "$output_file" 40 | 41 | # Loop through the indices and concatenate each file. 42 | for IDX in $(seq 0 $((CHUNKS-1))); do 43 | cat ${OUT}/${SPLIT}/${CKPT}/${CHUNKS}_${IDX}.jsonl >> "$output_file" 44 | done -------------------------------------------------------------------------------- /scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_ocrvqa_gen_vt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_ocrvqa_gen_vt.sh VTGenerator-13B 5 | 6 | if [ $# -ne 1 ]; then 7 | echo "Usage: $0 " 8 | exit 1 9 | fi 10 | 11 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 12 | IFS=',' read -ra GPULIST <<< "$gpu_list" 13 | 14 | CHUNKS=${#GPULIST[@]} 15 | 16 | CKPT=$1 17 | SPLIT="llava_instruct_mix665k_ocrvqa_gen_vt" 18 | OUT="./playground/data_VT/train_images_gen_vt" 19 | 20 | mkdir -p ${OUT}/${SPLIT}/${CKPT} 21 | 22 | for IDX in $(seq 0 $((CHUNKS-1))); do 23 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa \ 24 | --model-path ./checkpoints/$CKPT \ 25 | --question-file ./playground/data_VT/train_images_gen_vt/$SPLIT.jsonl \ 26 | --image-folder ./playground/data/ocr_vqa/images \ 27 | --answers-file ${OUT}/${SPLIT}/${CKPT}/${CHUNKS}_${IDX}.jsonl \ 28 | --num-chunks $CHUNKS \ 29 | --chunk-idx $IDX \ 30 | --temperature 0 \ 31 | --conv-mode vicuna_v1 & 32 | done 33 | 34 | wait 35 | 36 | output_file=${OUT}/${SPLIT}/${CKPT}/merge.jsonl 37 | 38 | # Clear out the output file if it exists. 39 | > "$output_file" 40 | 41 | # Loop through the indices and concatenate each file. 42 | for IDX in $(seq 0 $((CHUNKS-1))); do 43 | cat ${OUT}/${SPLIT}/${CKPT}/${CHUNKS}_${IDX}.jsonl >> "$output_file" 44 | done -------------------------------------------------------------------------------- /scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_textcap_gen_vt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_textcap_gen_vt.sh VTGenerator-13B 5 | 6 | if [ $# -ne 1 ]; then 7 | echo "Usage: $0 " 8 | exit 1 9 | fi 10 | 11 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 12 | IFS=',' read -ra GPULIST <<< "$gpu_list" 13 | 14 | CHUNKS=${#GPULIST[@]} 15 | 16 | CKPT=$1 17 | SPLIT="llava_instruct_mix665k_textcap_gen_vt" 18 | OUT="./playground/data_VT/train_images_gen_vt" 19 | 20 | mkdir -p ${OUT}/${SPLIT}/${CKPT} 21 | 22 | for IDX in $(seq 0 $((CHUNKS-1))); do 23 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa \ 24 | --model-path ./checkpoints/$CKPT \ 25 | --question-file ./playground/data_VT/train_images_gen_vt/$SPLIT.jsonl \ 26 | --image-folder ./playground/data/textvqa/train_images \ 27 | --answers-file ${OUT}/${SPLIT}/${CKPT}/${CHUNKS}_${IDX}.jsonl \ 28 | --num-chunks $CHUNKS \ 29 | --chunk-idx $IDX \ 30 | --temperature 0 \ 31 | --conv-mode vicuna_v1 & 32 | done 33 | 34 | wait 35 | 36 | output_file=${OUT}/${SPLIT}/${CKPT}/merge.jsonl 37 | 38 | # Clear out the output file if it exists. 39 | > "$output_file" 40 | 41 | # Loop through the indices and concatenate each file. 42 | for IDX in $(seq 0 $((CHUNKS-1))); do 43 | cat ${OUT}/${SPLIT}/${CKPT}/${CHUNKS}_${IDX}.jsonl >> "$output_file" 44 | done -------------------------------------------------------------------------------- /scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_vg_gen_vt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # usage: 4 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/infer/train_images_gen_vt/llava_instruct_mix665k_vg_gen_vt.sh VTGenerator-13B 5 | 6 | if [ $# -ne 1 ]; then 7 | echo "Usage: $0 " 8 | exit 1 9 | fi 10 | 11 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 12 | IFS=',' read -ra GPULIST <<< "$gpu_list" 13 | 14 | CHUNKS=${#GPULIST[@]} 15 | 16 | CKPT=$1 17 | SPLIT="llava_instruct_mix665k_vg_gen_vt" 18 | OUT="./playground/data/eval/train_images_gen_vt" 19 | 20 | mkdir -p ${OUT}/${SPLIT}/${CKPT} 21 | 22 | for IDX in $(seq 0 $((CHUNKS-1))); do 23 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa \ 24 | --model-path ./checkpoints/$CKPT \ 25 | --question-file ./playground/data/eval/train_images_gen_vt/$SPLIT.jsonl \ 26 | --image-folder /path/to/vg_images \ 27 | --answers-file ${OUT}/${SPLIT}/${CKPT}/${CHUNKS}_${IDX}.jsonl \ 28 | --num-chunks $CHUNKS \ 29 | --chunk-idx $IDX \ 30 | --temperature 0 \ 31 | --conv-mode vicuna_v1 & 32 | done 33 | 34 | wait 35 | 36 | output_file=${OUT}/${SPLIT}/${CKPT}/merge.jsonl 37 | 38 | # Clear out the output file if it exists. 39 | > "$output_file" 40 | 41 | # Loop through the indices and concatenate each file. 42 | for IDX in $(seq 0 $((CHUNKS-1))); do 43 | cat ${OUT}/${SPLIT}/${CKPT}/${CHUNKS}_${IDX}.jsonl >> "$output_file" 44 | done -------------------------------------------------------------------------------- /scripts/VTGenerator/train/finetune_VTGenerator-13B.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # note: 4 | # initialized from VTGenerator-Pretrained-13B 5 | # device: 8 A100 (80GB) 6 | # hyper-parameters: batch size = 8*2*8 = 128, learning rate = 2e-5, epoch = 3 7 | # training datasets: finetune_VTGenerator_gpt4v_VT_61k.json 8 | 9 | # usage: 10 | # mkdir -p scripts/log/VTGenerator 11 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/train/finetune_VTGenerator-13B.sh 2>&1 | tee -a scripts/log/VTGenerator/finetune_VTGenerator-13B.txt 12 | 13 | deepspeed --include localhost:0,1,2,3,4,5,6,7 --master_port 29600 llava/train/train_mem.py \ 14 | --deepspeed ./scripts/zero3.json \ 15 | --model_name_or_path HenryHZY/VTGenerator-Pretrained-13B \ 16 | --version v1 \ 17 | --data_path ./playground/data_VT/train_VTGenerator/finetune_VTGenerator_gpt4v_VT_61k.json \ 18 | --image_folder ./playground/data \ 19 | --vision_tower openai/clip-vit-large-patch14-336 \ 20 | --mm_projector_type mlp2x_gelu \ 21 | --mm_vision_select_layer -2 \ 22 | --mm_use_im_start_end False \ 23 | --mm_use_im_patch_token False \ 24 | --image_aspect_ratio pad \ 25 | --group_by_modality_length True \ 26 | --bf16 True \ 27 | --output_dir ./checkpoints/VTGenerator-13B \ 28 | --num_train_epochs 3 \ 29 | --per_device_train_batch_size 8 \ 30 | --per_device_eval_batch_size 4 \ 31 | --gradient_accumulation_steps 2 \ 32 | --evaluation_strategy "no" \ 33 | --save_strategy "steps" \ 34 | --save_steps 50000 \ 35 | --save_total_limit 1 \ 36 | --learning_rate 2e-5 \ 37 | --weight_decay 0. \ 38 | --warmup_ratio 0.03 \ 39 | --lr_scheduler_type "cosine" \ 40 | --logging_steps 1 \ 41 | --tf32 True \ 42 | --model_max_length 2048 \ 43 | --gradient_checkpointing True \ 44 | --dataloader_num_workers 4 \ 45 | --lazy_preprocess True \ 46 | --report_to wandb \ 47 | --run_name finetune_VTGenerator-13B -------------------------------------------------------------------------------- /scripts/VTGenerator/train/pretrain_VTGenerator-Pretrained-13B.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # note: 4 | # initialized from LLaVA-13B-Pretrain 5 | # device: 8 A100 (80GB) 6 | # hyper-parameters: batch size = 16*8 = 128, learning rate = 2e-5, epoch = 1 7 | # training datasets: pretrain_VTGenerator_llava_instruct_mix199k.json 8 | 9 | # usage: 10 | # mkdir -p scripts/log/VTGenerator 11 | # CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/VTGenerator/train/pretrain_VTGenerator-Pretrained-13B.sh 2>&1 | tee -a scripts/log/VTGenerator/pretrain_VTGenerator-Pretrained-13B.txt 12 | 13 | deepspeed --include localhost:0,1,2,3,4,5,6,7 --master_port 29600 llava/train/train_mem.py \ 14 | --deepspeed ./scripts/zero3.json \ 15 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 16 | --version v1 \ 17 | --data_path ./playground/data_VT/train_VTGenerator/pretrain_VTGenerator_llava_instruct_mix199k.json \ 18 | --image_folder ./playground/data \ 19 | --vision_tower openai/clip-vit-large-patch14-336 \ 20 | --pretrain_mm_mlp_adapter /path/to/liuhaotian/llava-v1.5-mlp2x-336px-pretrain-vicuna-13b-v1.5/mm_projector.bin \ 21 | --mm_projector_type mlp2x_gelu \ 22 | --mm_vision_select_layer -2 \ 23 | --mm_use_im_start_end False \ 24 | --mm_use_im_patch_token False \ 25 | --image_aspect_ratio pad \ 26 | --group_by_modality_length True \ 27 | --bf16 True \ 28 | --output_dir ./checkpoints/VTGenerator-Pretrained-13B \ 29 | --num_train_epochs 1 \ 30 | --per_device_train_batch_size 16 \ 31 | --per_device_eval_batch_size 4 \ 32 | --gradient_accumulation_steps 1 \ 33 | --evaluation_strategy "no" \ 34 | --save_strategy "steps" \ 35 | --save_steps 50000 \ 36 | --save_total_limit 1 \ 37 | --learning_rate 2e-5 \ 38 | --weight_decay 0. \ 39 | --warmup_ratio 0.03 \ 40 | --lr_scheduler_type "cosine" \ 41 | --logging_steps 1 \ 42 | --tf32 True \ 43 | --model_max_length 2048 \ 44 | --gradient_checkpointing True \ 45 | --dataloader_num_workers 4 \ 46 | --lazy_preprocess True \ 47 | --report_to wandb \ 48 | --run_name pretrain_VTGenerator-Pretrained-13B 49 | -------------------------------------------------------------------------------- /scripts/convert_gqa_for_eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("--src", type=str) 7 | parser.add_argument("--dst", type=str) 8 | args = parser.parse_args() 9 | 10 | all_answers = [] 11 | for line_idx, line in enumerate(open(args.src)): 12 | res = json.loads(line) 13 | question_id = res['question_id'] 14 | text = res['text'].rstrip('.').lower() 15 | all_answers.append({"questionId": question_id, "prediction": text}) 16 | 17 | with open(args.dst, 'w') as f: 18 | json.dump(all_answers, f) 19 | -------------------------------------------------------------------------------- /scripts/convert_mmbench_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | import pandas as pd 5 | 6 | def get_args(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("--annotation-file", type=str, required=True) 9 | parser.add_argument("--result-dir", type=str, required=True) 10 | parser.add_argument("--upload-dir", type=str, required=True) 11 | parser.add_argument("--experiment", type=str, required=True) 12 | 13 | return parser.parse_args() 14 | 15 | if __name__ == "__main__": 16 | args = get_args() 17 | 18 | df = pd.read_table(args.annotation_file) 19 | 20 | cur_df = df.copy() 21 | cur_df = cur_df.drop(columns=['hint', 'category', 'source', 'image', 'comment', 'l2-category']) 22 | cur_df.insert(6, 'prediction', None) 23 | for pred in open(os.path.join(args.result_dir, f"{args.experiment}.jsonl")): 24 | pred = json.loads(pred) 25 | cur_df.loc[df['index'] == pred['question_id'], 'prediction'] = pred['text'] 26 | 27 | cur_df.to_excel(os.path.join(args.upload_dir, f"{args.experiment}.xlsx"), index=False, engine='openpyxl') 28 | -------------------------------------------------------------------------------- /scripts/convert_mmvet_for_eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("--src", type=str) 7 | parser.add_argument("--dst", type=str) 8 | args = parser.parse_args() 9 | 10 | cur_result = {} 11 | 12 | for line in open(args.src): 13 | data = json.loads(line) 14 | qid = data['question_id'] 15 | cur_result[f'v1_{qid}'] = data['text'] 16 | 17 | with open(args.dst, 'w') as f: 18 | json.dump(cur_result, f, indent=2) 19 | -------------------------------------------------------------------------------- /scripts/convert_seed_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | 6 | def get_args(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("--annotation-file", type=str) 9 | parser.add_argument("--result-file", type=str) 10 | parser.add_argument("--result-upload-file", type=str) 11 | return parser.parse_args() 12 | 13 | 14 | def eval_single(result_file, eval_only_type=None): 15 | results = {} 16 | for line in open(result_file): 17 | row = json.loads(line) 18 | results[row['question_id']] = row 19 | 20 | type_counts = {} 21 | correct_counts = {} 22 | for question_data in data['questions']: 23 | if eval_only_type is not None and question_data['data_type'] != eval_only_type: continue 24 | data_type = question_data['question_type_id'] 25 | type_counts[data_type] = type_counts.get(data_type, 0) + 1 26 | try: 27 | question_id = int(question_data['question_id']) 28 | except: 29 | question_id = question_data['question_id'] 30 | if question_id not in results: 31 | correct_counts[data_type] = correct_counts.get(data_type, 0) 32 | continue 33 | row = results[question_id] 34 | if row['text'] == question_data['answer']: 35 | correct_counts[data_type] = correct_counts.get(data_type, 0) + 1 36 | 37 | total_count = 0 38 | total_correct = 0 39 | for data_type in sorted(type_counts.keys()): 40 | accuracy = correct_counts[data_type] / type_counts[data_type] * 100 41 | if eval_only_type is None: 42 | print(f"{ques_type_id_to_name[data_type]}: {accuracy:.2f}%") 43 | 44 | total_count += type_counts[data_type] 45 | total_correct += correct_counts[data_type] 46 | 47 | total_accuracy = total_correct / total_count * 100 48 | if eval_only_type is None: 49 | print(f"Total accuracy: {total_accuracy:.2f}%") 50 | else: 51 | print(f"{eval_only_type} accuracy: {total_accuracy:.2f}%") 52 | 53 | return results 54 | 55 | if __name__ == "__main__": 56 | args = get_args() 57 | data = json.load(open(args.annotation_file)) 58 | ques_type_id_to_name = {id:n for n,id in data['question_type'].items()} 59 | 60 | results = eval_single(args.result_file) 61 | eval_single(args.result_file, eval_only_type='image') 62 | eval_single(args.result_file, eval_only_type='video') 63 | 64 | with open(args.result_upload_file, 'w') as fp: 65 | for question in data['questions']: 66 | qid = question['question_id'] 67 | if qid in results: 68 | result = results[qid] 69 | else: 70 | result = results[int(qid)] 71 | fp.write(json.dumps({ 72 | 'question_id': qid, 73 | 'prediction': result['text'] 74 | }) + '\n') 75 | -------------------------------------------------------------------------------- /scripts/convert_sqa_to_llava.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import fire 4 | import re 5 | from convert_sqa_to_llava_base_prompt import build_prompt_chatbot 6 | 7 | 8 | def convert_to_llava(base_dir, split, prompt_format="QCM-LEA"): 9 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split] 10 | problems = json.load(open(os.path.join(base_dir, "problems.json"))) 11 | 12 | split_problems = build_prompt_chatbot( 13 | problems, split_indices, prompt_format, 14 | use_caption=False, is_test=False) 15 | 16 | target_format = [] 17 | for prob_id, (input, output) in split_problems.items(): 18 | if input.startswith('Question: '): 19 | input = input.replace('Question: ', '') 20 | if output.startswith('Answer: '): 21 | output = output.replace('Answer: ', '') 22 | 23 | raw_prob_data = problems[prob_id] 24 | if raw_prob_data['image'] is None: 25 | target_format.append({ 26 | "id": prob_id, 27 | "conversations": [ 28 | {'from': 'human', 'value': f"{input}"}, 29 | {'from': 'gpt', 'value': f"{output}"}, 30 | ], 31 | }) 32 | 33 | else: 34 | target_format.append({ 35 | "id": prob_id, 36 | "image": os.path.join(prob_id, raw_prob_data['image']), 37 | "conversations": [ 38 | {'from': 'human', 'value': f"{input}\n"}, 39 | {'from': 'gpt', 'value': f"{output}"}, 40 | ], 41 | }) 42 | 43 | print(f'Number of samples: {len(target_format)}') 44 | 45 | with open(os.path.join(base_dir, f"llava_{split}_{prompt_format}.json"), "w") as f: 46 | json.dump(target_format, f, indent=2) 47 | 48 | 49 | def convert_to_jsonl(base_dir, split, prompt_format="QCM-LEPA"): 50 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split] 51 | problems = json.load(open(os.path.join(base_dir, "problems.json"))) 52 | 53 | split_problems = build_prompt_chatbot( 54 | problems, split_indices, prompt_format, 55 | use_caption=False, is_test=False) 56 | 57 | writer = open(os.path.join(base_dir, f"scienceqa_{split}_{prompt_format}.jsonl"), "w") 58 | for prob_id, (input, output) in split_problems.items(): 59 | if input.startswith('Question: '): 60 | input = input.replace('Question: ', '') 61 | if output.startswith('Answer: '): 62 | output = output.replace('Answer: ', '') 63 | 64 | raw_prob_data = problems[prob_id] 65 | if raw_prob_data['image'] is None: 66 | data = { 67 | "id": prob_id, 68 | "instruction": f"{input}", 69 | "output": f"{output}", 70 | } 71 | 72 | else: 73 | data = { 74 | "id": prob_id, 75 | "image": os.path.join(prob_id, raw_prob_data['image']), 76 | "instruction": f"{input}\n", 77 | "output": f"{output}", 78 | } 79 | writer.write(json.dumps(data) + '\n') 80 | writer.close() 81 | 82 | 83 | def main(task, **kwargs): 84 | globals()[task](**kwargs) 85 | 86 | 87 | if __name__ == "__main__": 88 | fire.Fire(main) 89 | -------------------------------------------------------------------------------- /scripts/convert_vizwiz_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import json 4 | 5 | from llava.eval.m4c_evaluator import EvalAIAnswerProcessor 6 | 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('--annotation-file', type=str, required=True) 11 | parser.add_argument('--result-file', type=str, required=True) 12 | parser.add_argument('--result-upload-file', type=str, required=True) 13 | return parser.parse_args() 14 | 15 | 16 | if __name__ == '__main__': 17 | 18 | args = parse_args() 19 | 20 | os.makedirs(os.path.dirname(args.result_upload_file), exist_ok=True) 21 | 22 | results = [] 23 | error_line = 0 24 | for line_idx, line in enumerate(open(args.result_file)): 25 | try: 26 | results.append(json.loads(line)) 27 | except: 28 | error_line += 1 29 | results = {x['question_id']: x['text'] for x in results} 30 | test_split = [json.loads(line) for line in open(args.annotation_file)] 31 | split_ids = set([x['question_id'] for x in test_split]) 32 | 33 | print(f'total results: {len(results)}, total split: {len(test_split)}, error_line: {error_line}') 34 | 35 | all_answers = [] 36 | 37 | answer_processor = EvalAIAnswerProcessor() 38 | 39 | for x in test_split: 40 | assert x['question_id'] in results 41 | all_answers.append({ 42 | 'image': x['image'], 43 | 'answer': answer_processor(results[x['question_id']]) 44 | }) 45 | 46 | with open(args.result_upload_file, 'w') as f: 47 | json.dump(all_answers, f) 48 | -------------------------------------------------------------------------------- /scripts/convert_vqav2_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import json 4 | 5 | from llava.eval.m4c_evaluator import EvalAIAnswerProcessor 6 | 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('--dir', type=str, default="./playground/data/eval/vqav2") 11 | parser.add_argument('--ckpt', type=str, required=True) 12 | parser.add_argument('--split', type=str, required=True) 13 | return parser.parse_args() 14 | 15 | 16 | if __name__ == '__main__': 17 | 18 | args = parse_args() 19 | 20 | src = os.path.join(args.dir, 'answers', args.split, args.ckpt, 'merge.jsonl') 21 | test_split = os.path.join(args.dir, 'llava_vqav2_mscoco_test2015.jsonl') 22 | dst = os.path.join(args.dir, 'answers_upload', args.split, f'{args.ckpt}.json') 23 | os.makedirs(os.path.dirname(dst), exist_ok=True) 24 | 25 | results = [] 26 | error_line = 0 27 | for line_idx, line in enumerate(open(src)): 28 | try: 29 | results.append(json.loads(line)) 30 | except: 31 | error_line += 1 32 | 33 | results = {x['question_id']: x['text'] for x in results} 34 | test_split = [json.loads(line) for line in open(test_split)] 35 | split_ids = set([x['question_id'] for x in test_split]) 36 | 37 | print(f'total results: {len(results)}, total split: {len(test_split)}, error_line: {error_line}') 38 | 39 | all_answers = [] 40 | 41 | answer_processor = EvalAIAnswerProcessor() 42 | 43 | for x in test_split: 44 | if x['question_id'] not in results: 45 | all_answers.append({ 46 | 'question_id': x['question_id'], 47 | 'answer': '' 48 | }) 49 | else: 50 | all_answers.append({ 51 | 'question_id': x['question_id'], 52 | 'answer': answer_processor(results[x['question_id']]) 53 | }) 54 | 55 | with open(dst, 'w') as f: 56 | json.dump(all_answers, open(dst, 'w')) 57 | -------------------------------------------------------------------------------- /scripts/extract_mm_projector.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is just a utility that I use to extract the projector for quantized models. 3 | It is NOT necessary at all to train, or run inference/serve demos. 4 | Use this script ONLY if you fully understand its implications. 5 | """ 6 | 7 | 8 | import os 9 | import argparse 10 | import torch 11 | import json 12 | from collections import defaultdict 13 | 14 | 15 | def parse_args(): 16 | parser = argparse.ArgumentParser(description='Extract MMProjector weights') 17 | parser.add_argument('--model-path', type=str, help='model folder') 18 | parser.add_argument('--output', type=str, help='output file') 19 | args = parser.parse_args() 20 | return args 21 | 22 | 23 | if __name__ == '__main__': 24 | args = parse_args() 25 | 26 | keys_to_match = ['mm_projector'] 27 | ckpt_to_key = defaultdict(list) 28 | try: 29 | model_indices = json.load(open(os.path.join(args.model_path, 'pytorch_model.bin.index.json'))) 30 | for k, v in model_indices['weight_map'].items(): 31 | if any(key_match in k for key_match in keys_to_match): 32 | ckpt_to_key[v].append(k) 33 | except FileNotFoundError: 34 | # Smaller models or model checkpoints saved by DeepSpeed. 35 | v = 'pytorch_model.bin' 36 | for k in torch.load(os.path.join(args.model_path, v), map_location='cpu').keys(): 37 | if any(key_match in k for key_match in keys_to_match): 38 | ckpt_to_key[v].append(k) 39 | 40 | loaded_weights = {} 41 | 42 | for ckpt_name, weight_keys in ckpt_to_key.items(): 43 | ckpt = torch.load(os.path.join(args.model_path, ckpt_name), map_location='cpu') 44 | for k in weight_keys: 45 | loaded_weights[k] = ckpt[k] 46 | 47 | torch.save(loaded_weights, args.output) 48 | -------------------------------------------------------------------------------- /scripts/finetune.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | # Uncomment and set the following variables correspondingly to run this script: 6 | 7 | ################## VICUNA ################## 8 | # PROMPT_VERSION=v1 9 | # MODEL_VERSION="vicuna-v1-3-7b" 10 | ################## VICUNA ################## 11 | 12 | ################## LLaMA-2 ################## 13 | # PROMPT_VERSION="llava_llama_2" 14 | # MODEL_VERSION="llama-2-7b-chat" 15 | ################## LLaMA-2 ################## 16 | 17 | deepspeed llava/train/train_mem.py \ 18 | --deepspeed ./scripts/zero2.json \ 19 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 20 | --version $PROMPT_VERSION \ 21 | --data_path ./playground/data/llava_instruct_80k.json \ 22 | --image_folder /path/to/coco/train2017 \ 23 | --vision_tower openai/clip-vit-large-patch14 \ 24 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \ 25 | --mm_vision_select_layer -2 \ 26 | --mm_use_im_start_end False \ 27 | --mm_use_im_patch_token False \ 28 | --bf16 True \ 29 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune \ 30 | --num_train_epochs 1 \ 31 | --per_device_train_batch_size 16 \ 32 | --per_device_eval_batch_size 4 \ 33 | --gradient_accumulation_steps 1 \ 34 | --evaluation_strategy "no" \ 35 | --save_strategy "steps" \ 36 | --save_steps 50000 \ 37 | --save_total_limit 1 \ 38 | --learning_rate 2e-5 \ 39 | --weight_decay 0. \ 40 | --warmup_ratio 0.03 \ 41 | --lr_scheduler_type "cosine" \ 42 | --logging_steps 1 \ 43 | --tf32 True \ 44 | --model_max_length 2048 \ 45 | --gradient_checkpointing True \ 46 | --dataloader_num_workers 4 \ 47 | --lazy_preprocess True \ 48 | --report_to wandb 49 | -------------------------------------------------------------------------------- /scripts/finetune_full_schedule.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | # Uncomment and set the following variables correspondingly to run this script: 6 | 7 | ################## VICUNA ################## 8 | # PROMPT_VERSION=v1 9 | # MODEL_VERSION="vicuna-v1-3-7b" 10 | ################## VICUNA ################## 11 | 12 | ################## LLaMA-2 ################## 13 | # PROMPT_VERSION="llava_llama_2" 14 | # MODEL_VERSION="llama-2-7b-chat" 15 | ################## LLaMA-2 ################## 16 | 17 | deepspeed llava/train/train_mem.py \ 18 | --deepspeed ./scripts/zero2.json \ 19 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 20 | --version $PROMPT_VERSION \ 21 | --data_path ./playground/data/llava_instruct_158k.json \ 22 | --image_folder /path/to/coco/train2017 \ 23 | --vision_tower openai/clip-vit-large-patch14 \ 24 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \ 25 | --mm_vision_select_layer -2 \ 26 | --mm_use_im_start_end False \ 27 | --mm_use_im_patch_token False \ 28 | --bf16 True \ 29 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune \ 30 | --num_train_epochs 3 \ 31 | --per_device_train_batch_size 16 \ 32 | --per_device_eval_batch_size 4 \ 33 | --gradient_accumulation_steps 1 \ 34 | --evaluation_strategy "no" \ 35 | --save_strategy "steps" \ 36 | --save_steps 50000 \ 37 | --save_total_limit 1 \ 38 | --learning_rate 2e-5 \ 39 | --weight_decay 0. \ 40 | --warmup_ratio 0.03 \ 41 | --lr_scheduler_type "cosine" \ 42 | --logging_steps 1 \ 43 | --tf32 True \ 44 | --model_max_length 2048 \ 45 | --gradient_checkpointing True \ 46 | --dataloader_num_workers 4 \ 47 | --lazy_preprocess True \ 48 | --report_to wandb 49 | -------------------------------------------------------------------------------- /scripts/finetune_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | # Uncomment and set the following variables correspondingly to run this script: 6 | 7 | ################## VICUNA ################## 8 | # PROMPT_VERSION=v1 9 | # MODEL_VERSION="vicuna-v1-3-7b" 10 | ################## VICUNA ################## 11 | 12 | ################## LLaMA-2 ################## 13 | # PROMPT_VERSION="llava_llama_2" 14 | # MODEL_VERSION="llama-2-7b-chat" 15 | ################## LLaMA-2 ################## 16 | 17 | deepspeed llava/train/train_mem.py \ 18 | --deepspeed ./scripts/zero2.json \ 19 | --lora_enable True \ 20 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 21 | --version $PROMPT_VERSION \ 22 | --data_path ./playground/data/llava_instruct_80k.json \ 23 | --image_folder /path/to/coco/train2017 \ 24 | --vision_tower openai/clip-vit-large-patch14 \ 25 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \ 26 | --mm_vision_select_layer -2 \ 27 | --mm_use_im_start_end False \ 28 | --mm_use_im_patch_token False \ 29 | --bf16 True \ 30 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune_lora \ 31 | --num_train_epochs 1 \ 32 | --per_device_train_batch_size 16 \ 33 | --per_device_eval_batch_size 4 \ 34 | --gradient_accumulation_steps 1 \ 35 | --evaluation_strategy "no" \ 36 | --save_strategy "steps" \ 37 | --save_steps 50000 \ 38 | --save_total_limit 1 \ 39 | --learning_rate 2e-5 \ 40 | --weight_decay 0. \ 41 | --warmup_ratio 0.03 \ 42 | --lr_scheduler_type "cosine" \ 43 | --logging_steps 1 \ 44 | --tf32 True \ 45 | --model_max_length 2048 \ 46 | --gradient_checkpointing True \ 47 | --lazy_preprocess True \ 48 | --dataloader_num_workers 4 \ 49 | --report_to wandb 50 | -------------------------------------------------------------------------------- /scripts/finetune_qlora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | # Uncomment and set the following variables correspondingly to run this script: 6 | 7 | ################## VICUNA ################## 8 | # PROMPT_VERSION=v1 9 | # MODEL_VERSION="vicuna-v1-3-7b" 10 | ################## VICUNA ################## 11 | 12 | ################## LLaMA-2 ################## 13 | # PROMPT_VERSION="llava_llama_2" 14 | # MODEL_VERSION="llama-2-7b-chat" 15 | ################## LLaMA-2 ################## 16 | 17 | deepspeed llava/train/train_mem.py \ 18 | --deepspeed ./scripts/zero2.json \ 19 | --lora_enable True \ 20 | --bits 4 \ 21 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 22 | --version $PROMPT_VERSION \ 23 | --data_path ./playground/data/llava_instruct_80k.json \ 24 | --image_folder /path/to/coco/train2017 \ 25 | --vision_tower openai/clip-vit-large-patch14 \ 26 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \ 27 | --mm_vision_select_layer -2 \ 28 | --mm_use_im_start_end False \ 29 | --mm_use_im_patch_token False \ 30 | --bf16 True \ 31 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune_lora \ 32 | --num_train_epochs 1 \ 33 | --per_device_train_batch_size 16 \ 34 | --per_device_eval_batch_size 4 \ 35 | --gradient_accumulation_steps 1 \ 36 | --evaluation_strategy "no" \ 37 | --save_strategy "steps" \ 38 | --save_steps 50000 \ 39 | --save_total_limit 1 \ 40 | --learning_rate 2e-5 \ 41 | --weight_decay 0. \ 42 | --warmup_ratio 0.03 \ 43 | --lr_scheduler_type "cosine" \ 44 | --logging_steps 1 \ 45 | --tf32 True \ 46 | --model_max_length 2048 \ 47 | --gradient_checkpointing True \ 48 | --lazy_preprocess True \ 49 | --dataloader_num_workers 4 \ 50 | --report_to wandb 51 | -------------------------------------------------------------------------------- /scripts/finetune_sqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | deepspeed llava/train/train_mem.py \ 6 | --deepspeed ./scripts/zero2.json \ 7 | --model_name_or_path lmsys/vicuna-13b-v1.3 \ 8 | --version $PROMPT_VERSION \ 9 | --data_path /Data/ScienceQA/data/scienceqa/llava_train_QCM-LEA.json \ 10 | --image_folder /Data/ScienceQA/data/scienceqa/images/train \ 11 | --vision_tower openai/clip-vit-large-patch14 \ 12 | --pretrain_mm_mlp_adapter ./checkpoints/huggingface/liuhaotian/llava-pretrain-vicuna-13b-v1.3/mm_projector.bin \ 13 | --mm_vision_select_layer -2 \ 14 | --mm_use_im_start_end False \ 15 | --mm_use_im_patch_token False \ 16 | --bf16 True \ 17 | --output_dir ./checkpoints/llava-vicuna-13b-v1.3-pretrain_lcs558k_plain-ScienceQA_QCM_LEA-12e \ 18 | --num_train_epochs 12 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 4 \ 21 | --gradient_accumulation_steps 1 \ 22 | --evaluation_strategy "no" \ 23 | --save_strategy "steps" \ 24 | --save_steps 50000 \ 25 | --save_total_limit 1 \ 26 | --learning_rate 2e-5 \ 27 | --weight_decay 0. \ 28 | --warmup_ratio 0.03 \ 29 | --lr_scheduler_type "cosine" \ 30 | --logging_steps 1 \ 31 | --tf32 True \ 32 | --model_max_length 2048 \ 33 | --gradient_checkpointing True \ 34 | --dataloader_num_workers 4 \ 35 | --lazy_preprocess True \ 36 | --report_to wandb 37 | -------------------------------------------------------------------------------- /scripts/gpt_eval/multi_gpt_eval_gqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | dataset="gqa" 4 | pred_paths=( 5 | # "playground/data/eval/gqa/answers/gqa_with_vt_cap/llava13b_mix177k_with_vt_cap/gqa_with_vt_cap_llava13b_mix177k_with_vt_cap.jsonl" 6 | ) 7 | 8 | mkdir -p scripts/log/gpt_eval 9 | 10 | for pred_path in "${pred_paths[@]}"; do 11 | log_file="scripts/log/gpt_eval/$(basename "$pred_path" .jsonl).txt" 12 | command="python preprocess/gpt_eval/gpt_eval_vqa.py --dataset $dataset --pred_path $pred_path 2>&1 | tee -a $log_file" 13 | echo "$command" 14 | eval "$command" 15 | wait 16 | done -------------------------------------------------------------------------------- /scripts/gpt_eval/multi_gpt_eval_mmmu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | dataset="mmmu" 4 | pred_paths=( 5 | "./playground/data_VT/eval/mmmu/answers/mmmu_with_VTGenerator-13B_gen_vt/LLaVA-VT-13B/mmmu_with_VTGenerator-13B_gen_vt_LLaVA-VT-13B.jsonl" 6 | ) 7 | 8 | mkdir -p scripts/log/gpt_eval 9 | 10 | for pred_path in "${pred_paths[@]}"; do 11 | log_file="scripts/log/gpt_eval/$(basename "$pred_path" .jsonl).txt" 12 | command="python preprocess/gpt_eval/gpt_eval_vqa.py --dataset $dataset --pred_path $pred_path 2>&1 | tee -a $log_file" 13 | echo "$command" 14 | eval "$command" 15 | wait 16 | done 17 | 18 | # Input pred jsonl: ./playground/data_VT/eval/mmmu/answers/mmmu_with_VTGenerator-13B_gen_vt/LLaVA-VT-13B/mmmu_with_VTGenerator-13B_gen_vt_LLaVA-VT-13B.jsonl 19 | # Ground-truth jsonl: ./playground/data_VT/gpt_eval/mmmu/mmmu.jsonl 20 | # Output eval json files: ./playground/data_VT/eval/mmmu/answers/mmmu_with_VTGenerator-13B_gen_vt/LLaVA-VT-13B/gpt_eval_gpt-3.5-turbo-1106 21 | # We will evaluate 855 question-answer pairs! 22 | # completed_files: 855 23 | # incomplete_files: 0 24 | # All evaluation completed! 25 | # Yes count: 358 26 | # No count: 497 27 | # Accuracy: 0.41871345029239765 28 | # Average score: 2.8526315789473684 -------------------------------------------------------------------------------- /scripts/gpt_eval/multi_gpt_eval_mmvp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | dataset="mmvp" 4 | pred_paths=( 5 | # "playground/data/eval/mmvp/answers/mmvp_with_vt_cap/llava13b_mix177k_with_vt_cap/mmvp_with_vt_cap_llava13b_mix177k_with_vt_cap.jsonl" 6 | ) 7 | 8 | mkdir -p scripts/log/gpt_eval 9 | 10 | for pred_path in "${pred_paths[@]}"; do 11 | log_file="scripts/log/gpt_eval/$(basename "$pred_path" .jsonl).txt" 12 | command="python preprocess/gpt_eval/gpt_eval_mmvp.py --pred_path $pred_path 2>&1 | tee -a $log_file" 13 | echo "$command" 14 | eval "$command" 15 | wait 16 | done -------------------------------------------------------------------------------- /scripts/merge_lora_weights.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from llava.model.builder import load_pretrained_model 3 | from llava.mm_utils import get_model_name_from_path 4 | 5 | 6 | def merge_lora(args): 7 | model_name = get_model_name_from_path(args.model_path) 8 | tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, device_map='cpu') 9 | 10 | model.save_pretrained(args.save_model_path) 11 | tokenizer.save_pretrained(args.save_model_path) 12 | 13 | 14 | if __name__ == "__main__": 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("--model-path", type=str, required=True) 17 | parser.add_argument("--model-base", type=str, required=True) 18 | parser.add_argument("--save-model-path", type=str, required=True) 19 | 20 | args = parser.parse_args() 21 | 22 | merge_lora(args) 23 | -------------------------------------------------------------------------------- /scripts/pretrain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | # Uncomment and set the following variables correspondingly to run this script: 6 | 7 | # MODEL_VERSION=vicuna-v1-3-7b 8 | # MODEL_VERSION=llama-2-7b-chat 9 | 10 | ########### DO NOT CHANGE ########### 11 | ########### USE THIS FOR BOTH ########### 12 | PROMPT_VERSION=plain 13 | ########### DO NOT CHANGE ########### 14 | 15 | deepspeed llava/train/train_mem.py \ 16 | --deepspeed ./scripts/zero2.json \ 17 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 18 | --version $PROMPT_VERSION \ 19 | --data_path /path/to/pretrain_data.json \ 20 | --image_folder /path/to/images \ 21 | --vision_tower openai/clip-vit-large-patch14 \ 22 | --tune_mm_mlp_adapter True \ 23 | --mm_vision_select_layer -2 \ 24 | --mm_use_im_start_end False \ 25 | --mm_use_im_patch_token False \ 26 | --bf16 True \ 27 | --output_dir ./checkpoints/llava-$MODEL_VERSION-pretrain \ 28 | --num_train_epochs 1 \ 29 | --per_device_train_batch_size 16 \ 30 | --per_device_eval_batch_size 4 \ 31 | --gradient_accumulation_steps 1 \ 32 | --evaluation_strategy "no" \ 33 | --save_strategy "steps" \ 34 | --save_steps 24000 \ 35 | --save_total_limit 1 \ 36 | --learning_rate 2e-3 \ 37 | --weight_decay 0. \ 38 | --warmup_ratio 0.03 \ 39 | --lr_scheduler_type "cosine" \ 40 | --logging_steps 1 \ 41 | --tf32 True \ 42 | --model_max_length 2048 \ 43 | --gradient_checkpointing True \ 44 | --dataloader_num_workers 4 \ 45 | --lazy_preprocess True \ 46 | --report_to wandb 47 | -------------------------------------------------------------------------------- /scripts/pretrain_xformers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Uncomment and set the following variables correspondingly to run this script: 4 | 5 | # MODEL_VERSION=vicuna-v1-3-7b 6 | # MODEL_VERSION=llama-2-7b-chat 7 | 8 | ########### DO NOT CHANGE ########### 9 | ########### USE THIS FOR BOTH ########### 10 | PROMPT_VERSION=plain 11 | ########### DO NOT CHANGE ########### 12 | 13 | deepspeed llava/train/train_xformers.py \ 14 | --deepspeed ./scripts/zero2.json \ 15 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 16 | --version $PROMPT_VERSION \ 17 | --data_path /path/to/pretrain_data.json \ 18 | --image_folder /path/to/images \ 19 | --vision_tower openai/clip-vit-large-patch14 \ 20 | --tune_mm_mlp_adapter True \ 21 | --mm_vision_select_layer -2 \ 22 | --mm_use_im_start_end False \ 23 | --mm_use_im_patch_token False \ 24 | --bf16 False \ 25 | --output_dir ./checkpoints/llava-$MODEL_VERSION-pretrain \ 26 | --num_train_epochs 1 \ 27 | --per_device_train_batch_size 4 \ 28 | --per_device_eval_batch_size 4 \ 29 | --gradient_accumulation_steps 4 \ 30 | --evaluation_strategy "no" \ 31 | --save_strategy "steps" \ 32 | --save_steps 24000 \ 33 | --save_total_limit 1 \ 34 | --learning_rate 2e-3 \ 35 | --weight_decay 0. \ 36 | --warmup_ratio 0.03 \ 37 | --lr_scheduler_type "cosine" \ 38 | --logging_steps 1 \ 39 | --tf32 False \ 40 | --model_max_length 2048 \ 41 | --gradient_checkpointing True \ 42 | --dataloader_num_workers 4 \ 43 | --lazy_preprocess True \ 44 | --report_to wandb 45 | -------------------------------------------------------------------------------- /scripts/sqa_eval_batch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CHUNKS=8 4 | for IDX in {0..7}; do 5 | CUDA_VISIBLE_DEVICES=$IDX python -m llava.eval.model_vqa_science \ 6 | --model-path liuhaotian/llava-lcs558k-scienceqa-vicuna-13b-v1.3 \ 7 | --question-file ~/haotian/datasets/ScienceQA/data/scienceqa/llava_test_QCM-LEA.json \ 8 | --image-folder ~/haotian/datasets/ScienceQA/data/scienceqa/images/test \ 9 | --answers-file ./test_llava-13b-chunk$CHUNKS_$IDX.jsonl \ 10 | --num-chunks $CHUNKS \ 11 | --chunk-idx $IDX \ 12 | --conv-mode llava_v1 & 13 | done 14 | -------------------------------------------------------------------------------- /scripts/sqa_eval_gather.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CHUNKS=8 4 | output_file="test_llava-13b.jsonl" 5 | 6 | # Clear out the output file if it exists. 7 | > "$output_file" 8 | 9 | # Loop through the indices and concatenate each file. 10 | for idx in $(seq 0 $((CHUNKS-1))); do 11 | cat "./test_llava-13b-chunk${idx}.jsonl" >> "$output_file" 12 | done 13 | 14 | python llava/eval/eval_science_qa.py \ 15 | --base-dir ~/haotian/datasets/ScienceQA/data/scienceqa \ 16 | --result-file ./test_llava-13b.jsonl \ 17 | --output-file ./test_llava-13b_output.json \ 18 | --output-result ./test_llava-13b_result.json 19 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/gqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | CKPT="llava-v1.5-13b" 9 | SPLIT="llava_gqa_testdev_balanced" 10 | GQADIR="./playground/data/eval/gqa/data" 11 | 12 | for IDX in $(seq 0 $((CHUNKS-1))); do 13 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 14 | --model-path liuhaotian/llava-v1.5-13b \ 15 | --question-file ./playground/data/eval/gqa/$SPLIT.jsonl \ 16 | --image-folder ./playground/data/eval/gqa/data/images \ 17 | --answers-file ./playground/data/eval/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \ 18 | --num-chunks $CHUNKS \ 19 | --chunk-idx $IDX \ 20 | --temperature 0 \ 21 | --conv-mode vicuna_v1 & 22 | done 23 | 24 | wait 25 | 26 | output_file=./playground/data/eval/gqa/answers/$SPLIT/$CKPT/merge.jsonl 27 | 28 | # Clear out the output file if it exists. 29 | > "$output_file" 30 | 31 | # Loop through the indices and concatenate each file. 32 | for IDX in $(seq 0 $((CHUNKS-1))); do 33 | cat ./playground/data/eval/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 34 | done 35 | 36 | python scripts/convert_gqa_for_eval.py --src $output_file --dst $GQADIR/testdev_balanced_predictions.json 37 | 38 | cd $GQADIR 39 | python eval/eval.py --tier testdev_balanced 40 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/llavabench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/llava-bench-in-the-wild/questions.jsonl \ 6 | --image-folder ./playground/data/eval/llava-bench-in-the-wild/images \ 7 | --answers-file ./playground/data/eval/llava-bench-in-the-wild/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | mkdir -p playground/data/eval/llava-bench-in-the-wild/reviews 12 | 13 | python llava/eval/eval_gpt_review_bench.py \ 14 | --question playground/data/eval/llava-bench-in-the-wild/questions.jsonl \ 15 | --context playground/data/eval/llava-bench-in-the-wild/context.jsonl \ 16 | --rule llava/eval/table/rule.json \ 17 | --answer-list \ 18 | playground/data/eval/llava-bench-in-the-wild/answers_gpt4.jsonl \ 19 | playground/data/eval/llava-bench-in-the-wild/answers/llava-v1.5-13b.jsonl \ 20 | --output \ 21 | playground/data/eval/llava-bench-in-the-wild/reviews/llava-v1.5-13b.jsonl 22 | 23 | python llava/eval/summarize_gpt_review.py -f playground/data/eval/llava-bench-in-the-wild/reviews/llava-v1.5-13b.jsonl 24 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/mmbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SPLIT="mmbench_dev_20230712" 4 | 5 | python -m llava.eval.model_vqa_mmbench \ 6 | --model-path liuhaotian/llava-v1.5-13b \ 7 | --question-file ./playground/data/eval/mmbench/$SPLIT.tsv \ 8 | --answers-file ./playground/data/eval/mmbench/answers/$SPLIT/llava-v1.5-13b.jsonl \ 9 | --single-pred-prompt \ 10 | --temperature 0 \ 11 | --conv-mode vicuna_v1 12 | 13 | mkdir -p playground/data/eval/mmbench/answers_upload/$SPLIT 14 | 15 | python scripts/convert_mmbench_for_submission.py \ 16 | --annotation-file ./playground/data/eval/mmbench/$SPLIT.tsv \ 17 | --result-dir ./playground/data/eval/mmbench/answers/$SPLIT \ 18 | --upload-dir ./playground/data/eval/mmbench/answers_upload/$SPLIT \ 19 | --experiment llava-v1.5-13b 20 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/mmbench_cn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SPLIT="mmbench_dev_cn_20231003" 4 | 5 | python -m llava.eval.model_vqa_mmbench \ 6 | --model-path liuhaotian/llava-v1.5-13b \ 7 | --question-file ./playground/data/eval/mmbench_cn/$SPLIT.tsv \ 8 | --answers-file ./playground/data/eval/mmbench_cn/answers/$SPLIT/llava-v1.5-13b.jsonl \ 9 | --lang cn \ 10 | --single-pred-prompt \ 11 | --temperature 0 \ 12 | --conv-mode vicuna_v1 13 | 14 | mkdir -p playground/data/eval/mmbench/answers_upload/$SPLIT 15 | 16 | python scripts/convert_mmbench_for_submission.py \ 17 | --annotation-file ./playground/data/eval/mmbench_cn/$SPLIT.tsv \ 18 | --result-dir ./playground/data/eval/mmbench_cn/answers/$SPLIT \ 19 | --upload-dir ./playground/data/eval/mmbench_cn/answers_upload/$SPLIT \ 20 | --experiment llava-v1.5-13b 21 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/mme.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa_loader \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/MME/llava_mme.jsonl \ 6 | --image-folder ./playground/data/eval/MME/MME_Benchmark_release_version \ 7 | --answers-file ./playground/data/eval/MME/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | cd ./playground/data/eval/MME 12 | 13 | python convert_answer_to_mme.py --experiment llava-v1.5-13b 14 | 15 | cd eval_tool 16 | 17 | python calculation.py --results_dir answers/llava-v1.5-13b 18 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/mmvet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/mm-vet/llava-mm-vet.jsonl \ 6 | --image-folder ./playground/data/eval/mm-vet/images \ 7 | --answers-file ./playground/data/eval/mm-vet/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | mkdir -p ./playground/data/eval/mm-vet/results 12 | 13 | python scripts/convert_mmvet_for_eval.py \ 14 | --src ./playground/data/eval/mm-vet/answers/llava-v1.5-13b.jsonl \ 15 | --dst ./playground/data/eval/mm-vet/results/llava-v1.5-13b.json 16 | 17 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/pope.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa_loader \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/pope/llava_pope_test.jsonl \ 6 | --image-folder ./playground/data/eval/pope/val2014 \ 7 | --answers-file ./playground/data/eval/pope/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | python llava/eval/eval_pope.py \ 12 | --annotation-dir ./playground/data/eval/pope/coco \ 13 | --question-file ./playground/data/eval/pope/llava_pope_test.jsonl \ 14 | --result-file ./playground/data/eval/pope/answers/llava-v1.5-13b.jsonl 15 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/qbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$1" = "dev" ]; then 4 | echo "Evaluating in 'dev' split." 5 | elif [ "$1" = "test" ]; then 6 | echo "Evaluating in 'test' split." 7 | else 8 | echo "Unknown split, please choose between 'dev' and 'test'." 9 | exit 1 10 | fi 11 | 12 | python -m llava.eval.model_vqa_qbench \ 13 | --model-path liuhaotian/llava-v1.5-13b \ 14 | --image-folder ./playground/data/eval/qbench/images_llvisionqa/ \ 15 | --questions-file ./playground/data/eval/qbench/llvisionqa_$1.json \ 16 | --answers-file ./playground/data/eval/qbench/llvisionqa_$1_answers.jsonl \ 17 | --conv-mode llava_v1 \ 18 | --lang en 19 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/qbench_zh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$1" = "dev" ]; then 4 | ZH_SPLIT="验证集" 5 | echo "Evaluating in 'dev' split." 6 | elif [ "$1" = "test" ]; then 7 | ZH_SPLIT="测试集" 8 | echo "Evaluating in 'test' split." 9 | else 10 | echo "Unknown split, please choose between 'dev' and 'test'." 11 | exit 1 12 | fi 13 | 14 | python -m llava.eval.model_vqa_qbench \ 15 | --model-path liuhaotian/llava-v1.5-13b \ 16 | --image-folder ./playground/data/eval/qbench/images_llvisionqa/ \ 17 | --questions-file ./playground/data/eval/qbench/质衡-问答-$ZH_SPLIT.json \ 18 | --answers-file ./playground/data/eval/qbench/llvisionqa_zh_$1_answers.jsonl \ 19 | --conv-mode llava_v1 \ 20 | --lang zh 21 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/seed.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | CKPT="llava-v1.5-13b" 9 | 10 | for IDX in $(seq 0 $((CHUNKS-1))); do 11 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 12 | --model-path liuhaotian/llava-v1.5-13b \ 13 | --question-file ./playground/data/eval/seed_bench/llava-seed-bench.jsonl \ 14 | --image-folder ./playground/data/eval/seed_bench \ 15 | --answers-file ./playground/data/eval/seed_bench/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \ 16 | --num-chunks $CHUNKS \ 17 | --chunk-idx $IDX \ 18 | --temperature 0 \ 19 | --conv-mode vicuna_v1 & 20 | done 21 | 22 | wait 23 | 24 | output_file=./playground/data/eval/seed_bench/answers/$CKPT/merge.jsonl 25 | 26 | # Clear out the output file if it exists. 27 | > "$output_file" 28 | 29 | # Loop through the indices and concatenate each file. 30 | for IDX in $(seq 0 $((CHUNKS-1))); do 31 | cat ./playground/data/eval/seed_bench/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 32 | done 33 | 34 | # Evaluate 35 | python scripts/convert_seed_for_submission.py \ 36 | --annotation-file ./playground/data/eval/seed_bench/SEED-Bench.json \ 37 | --result-file $output_file \ 38 | --result-upload-file ./playground/data/eval/seed_bench/answers_upload/llava-v1.5-13b.jsonl 39 | 40 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/sqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa_science \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/scienceqa/llava_test_CQM-A.json \ 6 | --image-folder ./playground/data/eval/scienceqa/images/test \ 7 | --answers-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b.jsonl \ 8 | --single-pred-prompt \ 9 | --temperature 0 \ 10 | --conv-mode vicuna_v1 11 | 12 | python llava/eval/eval_science_qa.py \ 13 | --base-dir ./playground/data/eval/scienceqa \ 14 | --result-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b.jsonl \ 15 | --output-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_output.jsonl \ 16 | --output-result ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_result.json 17 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/textvqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa_loader \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/textvqa/llava_textvqa_val_v051_ocr.jsonl \ 6 | --image-folder ./playground/data/eval/textvqa/train_images \ 7 | --answers-file ./playground/data/eval/textvqa/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | python -m llava.eval.eval_textvqa \ 12 | --annotation-file ./playground/data/eval/textvqa/TextVQA_0.5.1_val.json \ 13 | --result-file ./playground/data/eval/textvqa/answers/llava-v1.5-13b.jsonl 14 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/vizwiz.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa_loader \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/vizwiz/llava_test.jsonl \ 6 | --image-folder ./playground/data/eval/vizwiz/test \ 7 | --answers-file ./playground/data/eval/vizwiz/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | python scripts/convert_vizwiz_for_submission.py \ 12 | --annotation-file ./playground/data/eval/vizwiz/llava_test.jsonl \ 13 | --result-file ./playground/data/eval/vizwiz/answers/llava-v1.5-13b.jsonl \ 14 | --result-upload-file ./playground/data/eval/vizwiz/answers_upload/llava-v1.5-13b.json 15 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/vqav2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | CKPT="llava-v1.5-13b" 9 | SPLIT="llava_vqav2_mscoco_test-dev2015" 10 | 11 | for IDX in $(seq 0 $((CHUNKS-1))); do 12 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 13 | --model-path liuhaotian/llava-v1.5-13b \ 14 | --question-file ./playground/data/eval/vqav2/$SPLIT.jsonl \ 15 | --image-folder ./playground/data/eval/vqav2/test2015 \ 16 | --answers-file ./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \ 17 | --num-chunks $CHUNKS \ 18 | --chunk-idx $IDX \ 19 | --temperature 0 \ 20 | --conv-mode vicuna_v1 & 21 | done 22 | 23 | wait 24 | 25 | output_file=./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/merge.jsonl 26 | 27 | # Clear out the output file if it exists. 28 | > "$output_file" 29 | 30 | # Loop through the indices and concatenate each file. 31 | for IDX in $(seq 0 $((CHUNKS-1))); do 32 | cat ./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 33 | done 34 | 35 | python scripts/convert_vqav2_for_submission.py --split $SPLIT --ckpt $CKPT 36 | 37 | -------------------------------------------------------------------------------- /scripts/v1_5/finetune.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed llava/train/train_mem.py \ 4 | --deepspeed ./scripts/zero3.json \ 5 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 6 | --version v1 \ 7 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 8 | --image_folder ./playground/data \ 9 | --vision_tower openai/clip-vit-large-patch14-336 \ 10 | --pretrain_mm_mlp_adapter ./checkpoints/llava-v1.5-13b-pretrain/mm_projector.bin \ 11 | --mm_projector_type mlp2x_gelu \ 12 | --mm_vision_select_layer -2 \ 13 | --mm_use_im_start_end False \ 14 | --mm_use_im_patch_token False \ 15 | --image_aspect_ratio pad \ 16 | --group_by_modality_length True \ 17 | --bf16 True \ 18 | --output_dir ./checkpoints/llava-v1.5-13b \ 19 | --num_train_epochs 1 \ 20 | --per_device_train_batch_size 16 \ 21 | --per_device_eval_batch_size 4 \ 22 | --gradient_accumulation_steps 1 \ 23 | --evaluation_strategy "no" \ 24 | --save_strategy "steps" \ 25 | --save_steps 50000 \ 26 | --save_total_limit 1 \ 27 | --learning_rate 2e-5 \ 28 | --weight_decay 0. \ 29 | --warmup_ratio 0.03 \ 30 | --lr_scheduler_type "cosine" \ 31 | --logging_steps 1 \ 32 | --tf32 True \ 33 | --model_max_length 2048 \ 34 | --gradient_checkpointing True \ 35 | --dataloader_num_workers 4 \ 36 | --lazy_preprocess True \ 37 | --report_to wandb 38 | -------------------------------------------------------------------------------- /scripts/v1_5/finetune_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed llava/train/train_mem.py \ 4 | --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 \ 5 | --deepspeed ./scripts/zero3.json \ 6 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 7 | --version v1 \ 8 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 9 | --image_folder ./playground/data \ 10 | --vision_tower openai/clip-vit-large-patch14-336 \ 11 | --pretrain_mm_mlp_adapter ./checkpoints/llava-v1.5-13b-pretrain/mm_projector.bin \ 12 | --mm_projector_type mlp2x_gelu \ 13 | --mm_vision_select_layer -2 \ 14 | --mm_use_im_start_end False \ 15 | --mm_use_im_patch_token False \ 16 | --image_aspect_ratio pad \ 17 | --group_by_modality_length True \ 18 | --bf16 True \ 19 | --output_dir ./checkpoints/llava-v1.5-13b-lora \ 20 | --num_train_epochs 1 \ 21 | --per_device_train_batch_size 16 \ 22 | --per_device_eval_batch_size 4 \ 23 | --gradient_accumulation_steps 1 \ 24 | --evaluation_strategy "no" \ 25 | --save_strategy "steps" \ 26 | --save_steps 50000 \ 27 | --save_total_limit 1 \ 28 | --learning_rate 2e-4 \ 29 | --weight_decay 0. \ 30 | --warmup_ratio 0.03 \ 31 | --lr_scheduler_type "cosine" \ 32 | --logging_steps 1 \ 33 | --tf32 True \ 34 | --model_max_length 2048 \ 35 | --gradient_checkpointing True \ 36 | --dataloader_num_workers 4 \ 37 | --lazy_preprocess True \ 38 | --report_to wandb 39 | -------------------------------------------------------------------------------- /scripts/v1_5/finetune_task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed llava/train/train_mem.py \ 4 | --deepspeed ./scripts/zero3.json \ 5 | --model_name_or_path liuhaotian/llava-v1.5-13b \ 6 | --version v1 \ 7 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 8 | --image_folder ./playground/data \ 9 | --vision_tower openai/clip-vit-large-patch14-336 \ 10 | --mm_projector_type mlp2x_gelu \ 11 | --mm_vision_select_layer -2 \ 12 | --mm_use_im_start_end False \ 13 | --mm_use_im_patch_token False \ 14 | --image_aspect_ratio pad \ 15 | --group_by_modality_length True \ 16 | --bf16 True \ 17 | --output_dir ./checkpoints/llava-v1.5-13b-task \ 18 | --num_train_epochs 1 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 4 \ 21 | --gradient_accumulation_steps 1 \ 22 | --evaluation_strategy "no" \ 23 | --save_strategy "steps" \ 24 | --save_steps 50000 \ 25 | --save_total_limit 1 \ 26 | --learning_rate 2e-5 \ 27 | --weight_decay 0. \ 28 | --warmup_ratio 0.03 \ 29 | --lr_scheduler_type "cosine" \ 30 | --logging_steps 1 \ 31 | --tf32 True \ 32 | --model_max_length 2048 \ 33 | --gradient_checkpointing True \ 34 | --dataloader_num_workers 4 \ 35 | --lazy_preprocess True \ 36 | --report_to wandb 37 | -------------------------------------------------------------------------------- /scripts/v1_5/finetune_task_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed llava/train/train_mem.py \ 4 | --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 \ 5 | --deepspeed ./scripts/zero3.json \ 6 | --model_name_or_path liuhaotian/llava-v1.5-13b \ 7 | --version v1 \ 8 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 9 | --image_folder ./playground/data \ 10 | --vision_tower openai/clip-vit-large-patch14-336 \ 11 | --mm_projector_type mlp2x_gelu \ 12 | --mm_vision_select_layer -2 \ 13 | --mm_use_im_start_end False \ 14 | --mm_use_im_patch_token False \ 15 | --image_aspect_ratio pad \ 16 | --group_by_modality_length True \ 17 | --bf16 True \ 18 | --output_dir ./checkpoints/llava-v1.5-13b-task-lora \ 19 | --num_train_epochs 1 \ 20 | --per_device_train_batch_size 16 \ 21 | --per_device_eval_batch_size 4 \ 22 | --gradient_accumulation_steps 1 \ 23 | --evaluation_strategy "no" \ 24 | --save_strategy "steps" \ 25 | --save_steps 50000 \ 26 | --save_total_limit 1 \ 27 | --learning_rate 2e-4 \ 28 | --weight_decay 0. \ 29 | --warmup_ratio 0.03 \ 30 | --lr_scheduler_type "cosine" \ 31 | --logging_steps 1 \ 32 | --tf32 True \ 33 | --model_max_length 2048 \ 34 | --gradient_checkpointing True \ 35 | --dataloader_num_workers 4 \ 36 | --lazy_preprocess True \ 37 | --report_to wandb 38 | -------------------------------------------------------------------------------- /scripts/v1_5/pretrain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed llava/train/train_mem.py \ 4 | --deepspeed ./scripts/zero2.json \ 5 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 6 | --version plain \ 7 | --data_path ./playground/data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json \ 8 | --image_folder ./playground/data/LLaVA-Pretrain/images \ 9 | --vision_tower openai/clip-vit-large-patch14-336 \ 10 | --mm_projector_type mlp2x_gelu \ 11 | --tune_mm_mlp_adapter True \ 12 | --mm_vision_select_layer -2 \ 13 | --mm_use_im_start_end False \ 14 | --mm_use_im_patch_token False \ 15 | --bf16 True \ 16 | --output_dir ./checkpoints/llava-v1.5-13b-pretrain \ 17 | --num_train_epochs 1 \ 18 | --per_device_train_batch_size 32 \ 19 | --per_device_eval_batch_size 4 \ 20 | --gradient_accumulation_steps 1 \ 21 | --evaluation_strategy "no" \ 22 | --save_strategy "steps" \ 23 | --save_steps 24000 \ 24 | --save_total_limit 1 \ 25 | --learning_rate 1e-3 \ 26 | --weight_decay 0. \ 27 | --warmup_ratio 0.03 \ 28 | --lr_scheduler_type "cosine" \ 29 | --logging_steps 1 \ 30 | --tf32 True \ 31 | --model_max_length 2048 \ 32 | --gradient_checkpointing True \ 33 | --dataloader_num_workers 4 \ 34 | --lazy_preprocess True \ 35 | --report_to wandb 36 | -------------------------------------------------------------------------------- /scripts/zero2.json: -------------------------------------------------------------------------------- 1 | { 2 | "fp16": { 3 | "enabled": "auto", 4 | "loss_scale": 0, 5 | "loss_scale_window": 1000, 6 | "initial_scale_power": 16, 7 | "hysteresis": 2, 8 | "min_loss_scale": 1 9 | }, 10 | "bf16": { 11 | "enabled": "auto" 12 | }, 13 | "train_micro_batch_size_per_gpu": "auto", 14 | "train_batch_size": "auto", 15 | "gradient_accumulation_steps": "auto", 16 | "zero_optimization": { 17 | "stage": 2, 18 | "overlap_comm": true, 19 | "contiguous_gradients": true, 20 | "sub_group_size": 1e9, 21 | "reduce_bucket_size": "auto" 22 | } 23 | } -------------------------------------------------------------------------------- /scripts/zero3.json: -------------------------------------------------------------------------------- 1 | { 2 | "fp16": { 3 | "enabled": "auto", 4 | "loss_scale": 0, 5 | "loss_scale_window": 1000, 6 | "initial_scale_power": 16, 7 | "hysteresis": 2, 8 | "min_loss_scale": 1 9 | }, 10 | "bf16": { 11 | "enabled": "auto" 12 | }, 13 | "train_micro_batch_size_per_gpu": "auto", 14 | "train_batch_size": "auto", 15 | "gradient_accumulation_steps": "auto", 16 | "zero_optimization": { 17 | "stage": 3, 18 | "overlap_comm": true, 19 | "contiguous_gradients": true, 20 | "sub_group_size": 1e9, 21 | "reduce_bucket_size": "auto", 22 | "stage3_prefetch_bucket_size": "auto", 23 | "stage3_param_persistence_threshold": "auto", 24 | "stage3_max_live_parameters": 1e9, 25 | "stage3_max_reuse_distance": 1e9, 26 | "stage3_gather_16bit_weights_on_model_save": true 27 | } 28 | } -------------------------------------------------------------------------------- /scripts/zero3_offload.json: -------------------------------------------------------------------------------- 1 | { 2 | "fp16": { 3 | "enabled": "auto", 4 | "loss_scale": 0, 5 | "loss_scale_window": 1000, 6 | "initial_scale_power": 16, 7 | "hysteresis": 2, 8 | "min_loss_scale": 1 9 | }, 10 | "bf16": { 11 | "enabled": "auto" 12 | }, 13 | "optimizer": { 14 | "type": "AdamW", 15 | "params": { 16 | "lr": "auto", 17 | "betas": "auto", 18 | "eps": "auto", 19 | "weight_decay": "auto" 20 | } 21 | }, 22 | "scheduler": { 23 | "type": "WarmupLR", 24 | "params": { 25 | "warmup_min_lr": "auto", 26 | "warmup_max_lr": "auto", 27 | "warmup_num_steps": "auto" 28 | } 29 | }, 30 | "zero_optimization": { 31 | "stage": 3, 32 | "offload_optimizer": { 33 | "device": "cpu", 34 | "pin_memory": true 35 | }, 36 | "offload_param": { 37 | "device": "cpu", 38 | "pin_memory": true 39 | }, 40 | "overlap_comm": true, 41 | "contiguous_gradients": true, 42 | "sub_group_size": 1e9, 43 | "reduce_bucket_size": "auto", 44 | "stage3_prefetch_bucket_size": "auto", 45 | "stage3_param_persistence_threshold": "auto", 46 | "stage3_max_live_parameters": 1e9, 47 | "stage3_max_reuse_distance": 1e9, 48 | "gather_16bit_weights_on_model_save": true 49 | }, 50 | "gradient_accumulation_steps": "auto", 51 | "gradient_clipping": "auto", 52 | "train_batch_size": "auto", 53 | "train_micro_batch_size_per_gpu": "auto", 54 | "steps_per_print": 1e5, 55 | "wall_clock_breakdown": false 56 | } --------------------------------------------------------------------------------