├── .gitignore ├── DataEngine_flowchart.png ├── LICENSE.txt ├── LLaVA ├── LICENSE ├── README.md ├── cog.yaml ├── docs │ ├── Customize_Component.md │ ├── Data.md │ ├── Evaluation.md │ ├── Finetune_Custom_Data.md │ ├── Intel.md │ ├── LLaVA_Bench.md │ ├── LLaVA_from_LLaMA2.md │ ├── LoRA.md │ ├── MODEL_ZOO.md │ ├── ScienceQA.md │ ├── Windows.md │ └── macOS.md ├── llava │ ├── __init__.py │ ├── constants.py │ ├── conversation.py │ ├── eval │ │ ├── eval_gpt_review.py │ │ ├── eval_gpt_review_bench.py │ │ ├── eval_gpt_review_visual.py │ │ ├── eval_pope.py │ │ ├── eval_science_qa.py │ │ ├── eval_science_qa_gpt4.py │ │ ├── eval_science_qa_gpt4_requery.py │ │ ├── eval_textvqa.py │ │ ├── generate_webpage_data_from_table.py │ │ ├── m4c_evaluator.py │ │ ├── model_qa.py │ │ ├── model_vqa.py │ │ ├── model_vqa_loader.py │ │ ├── model_vqa_mmbench.py │ │ ├── model_vqa_science.py │ │ ├── qa_baseline_gpt35.py │ │ ├── run_llava.py │ │ ├── summarize_gpt_review.py │ │ └── webpage │ │ │ ├── figures │ │ │ ├── alpaca.png │ │ │ ├── bard.jpg │ │ │ ├── chatgpt.svg │ │ │ ├── llama.jpg │ │ │ ├── swords_FILL0_wght300_GRAD0_opsz48.svg │ │ │ └── vicuna.jpeg │ │ │ ├── index.html │ │ │ ├── script.js │ │ │ └── styles.css │ ├── mm_utils.py │ ├── model │ │ ├── __init__.py │ │ ├── apply_delta.py │ │ ├── builder.py │ │ ├── consolidate.py │ │ ├── language_model │ │ │ ├── llava_llama.py │ │ │ ├── llava_mistral.py │ │ │ └── llava_mpt.py │ │ ├── llava_arch.py │ │ ├── make_delta.py │ │ ├── multimodal_encoder │ │ │ ├── builder.py │ │ │ └── clip_encoder.py │ │ ├── multimodal_projector │ │ │ └── builder.py │ │ └── utils.py │ ├── serve │ │ ├── __init__.py │ │ ├── cli.py │ │ ├── controller.py │ │ ├── examples │ │ │ ├── extreme_ironing.jpg │ │ │ └── waterview.jpg │ │ ├── gradio_web_server.py │ │ ├── model_worker.py │ │ ├── register_worker.py │ │ ├── sglang_worker.py │ │ └── test_message.py │ ├── train │ │ ├── llama_flash_attn_monkey_patch.py │ │ ├── llama_xformers_attn_monkey_patch.py │ │ ├── llava_trainer.py │ │ ├── train.py │ │ ├── train_mem.py │ │ └── train_xformers.py │ └── utils.py ├── playground │ └── data │ │ └── prompts │ │ ├── complex_reasoning │ │ ├── 000_caps.txt │ │ ├── 000_conv.txt │ │ ├── 001_caps.txt │ │ ├── 001_conv.txt │ │ ├── 002_caps.txt │ │ ├── 002_conv.txt │ │ └── system_message.txt │ │ ├── conversation │ │ ├── 000_caps.txt │ │ ├── 000_conv.txt │ │ ├── 001_caps.txt │ │ ├── 001_conv.txt │ │ └── system_message.txt │ │ └── detail_description │ │ ├── 000_caps.txt │ │ ├── 000_conv.txt │ │ ├── 001_caps.txt │ │ ├── 001_conv.txt │ │ ├── 002_caps.txt │ │ ├── 002_conv.txt │ │ └── system_message.txt ├── predict.py ├── pyproject.toml └── scripts │ ├── convert_gqa_for_eval.py │ ├── convert_mmbench_for_submission.py │ ├── convert_mmvet_for_eval.py │ ├── convert_seed_for_submission.py │ ├── convert_sqa_to_llava.py │ ├── convert_sqa_to_llava_base_prompt.py │ ├── convert_vizwiz_for_submission.py │ ├── convert_vqav2_for_submission.py │ ├── extract_mm_projector.py │ ├── finetune.sh │ ├── finetune_full_schedule.sh │ ├── finetune_lora.sh │ ├── finetune_qlora.sh │ ├── finetune_sqa.sh │ ├── merge_lora_weights.py │ ├── pretrain.sh │ ├── pretrain_xformers.sh │ ├── sqa_eval_batch.sh │ ├── sqa_eval_gather.sh │ ├── upload_pypi.sh │ └── v1_5 │ ├── eval │ ├── gqa.sh │ ├── gqa_lora.sh │ ├── llavabench.sh │ ├── mmbench.sh │ ├── mmbench_cn.sh │ ├── mmbench_lora.sh │ ├── mme.sh │ ├── mme_lora.sh │ ├── mmvet.sh │ ├── pope.sh │ ├── qbench.sh │ ├── qbench_zh.sh │ ├── seed.sh │ ├── seed_lora.sh │ ├── sqa.sh │ ├── sqa_lora.sh │ ├── textvqa.sh │ ├── vizwiz.sh │ ├── vizwiz_lora.sh │ ├── vqav2.sh │ └── vqav2_lora.sh │ ├── finetune.sh │ ├── finetune_lora.sh │ ├── finetune_lora_dataengine.sh │ ├── finetune_task.sh │ ├── finetune_task_lora.sh │ └── pretrain.sh ├── MiniGPT-4 ├── LICENSE.md ├── LICENSE_Lavis.md ├── README.md ├── dataset │ ├── README_1_STAGE.md │ ├── README_2_STAGE.md │ ├── README_MINIGPTv2_FINETUNE.md │ ├── convert_cc_sbu.py │ ├── convert_laion.py │ ├── download_cc_sbu.sh │ └── download_laion.sh ├── environment.yml ├── eval_configs │ ├── minigpt4_eval.yaml │ ├── minigpt4_llama2_eval.yaml │ ├── minigptv2_benchmark_evaluation.yaml │ └── minigptv2_eval.yaml ├── eval_scripts │ ├── EVAL_README.md │ ├── convert_mmbench_for_submission.py │ ├── convert_seed_for_submission_minigpt4.py │ ├── eval_data │ │ ├── refcoco+_testA.json │ │ ├── refcoco+_testB.json │ │ ├── refcoco+_val.json │ │ ├── refcoco_testA.json │ │ ├── refcoco_testB.json │ │ ├── refcoco_val.json │ │ ├── refcocog_test.json │ │ └── refcocog_val.json │ ├── eval_ref.py │ └── eval_vqa.py ├── figs │ ├── demo.png │ ├── examples │ │ ├── ad_1.png │ │ ├── ad_2.png │ │ ├── cook_1.png │ │ ├── cook_2.png │ │ ├── describe_1.png │ │ ├── describe_2.png │ │ ├── fact_1.png │ │ ├── fact_2.png │ │ ├── fix_1.png │ │ ├── fix_2.png │ │ ├── fun_1.png │ │ ├── fun_2.png │ │ ├── logo_1.png │ │ ├── op_1.png │ │ ├── op_2.png │ │ ├── people_1.png │ │ ├── people_2.png │ │ ├── rhyme_1.png │ │ ├── rhyme_2.png │ │ ├── story_1.png │ │ ├── story_2.png │ │ ├── web_1.png │ │ ├── wop_1.png │ │ └── wop_2.png │ ├── minigpt2_demo.png │ ├── online_demo.png │ └── overview.png ├── minigpt4 │ ├── __init__.py │ ├── common │ │ ├── __init__.py │ │ ├── config.py │ │ ├── dist_utils.py │ │ ├── eval_utils.py │ │ ├── gradcam.py │ │ ├── logger.py │ │ ├── optims.py │ │ ├── registry.py │ │ ├── utils.py │ │ └── vqa_tools │ │ │ ├── VQA │ │ │ ├── PythonEvaluationTools │ │ │ │ ├── vqaEvalDemo.py │ │ │ │ └── vqaEvaluation │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── vqaEval.py │ │ │ ├── PythonHelperTools │ │ │ │ ├── vqaDemo.py │ │ │ │ └── vqaTools │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── vqa.py │ │ │ ├── QuestionTypes │ │ │ │ ├── abstract_v002_question_types.txt │ │ │ │ └── mscoco_question_types.txt │ │ │ ├── README.md │ │ │ └── license.txt │ │ │ ├── __init__.py │ │ │ ├── vqa.py │ │ │ └── vqa_eval.py │ ├── configs │ │ ├── datasets │ │ │ ├── aokvqa │ │ │ │ └── defaults.yaml │ │ │ ├── cc_sbu │ │ │ │ ├── align.yaml │ │ │ │ └── defaults.yaml │ │ │ ├── coco │ │ │ │ ├── caption.yaml │ │ │ │ └── defaults_vqa.yaml │ │ │ ├── coco_bbox │ │ │ │ ├── invrefcoco.yaml │ │ │ │ ├── invrefcocog.yaml │ │ │ │ ├── invrefcocop.yaml │ │ │ │ ├── refcoco.yaml │ │ │ │ ├── refcocog.yaml │ │ │ │ └── refcocop.yaml │ │ │ ├── engine │ │ │ │ ├── da.yaml │ │ │ │ ├── mc.yaml │ │ │ │ └── mcp.yaml │ │ │ ├── flickr │ │ │ │ ├── caption_to_phrase.yaml │ │ │ │ ├── default.yaml │ │ │ │ └── object_to_phrase.yaml │ │ │ ├── gqa │ │ │ │ └── balanced_val.yaml │ │ │ ├── laion │ │ │ │ └── defaults.yaml │ │ │ ├── llava │ │ │ │ ├── conversation.yaml │ │ │ │ ├── detail.yaml │ │ │ │ └── reason.yaml │ │ │ ├── multitask_conversation │ │ │ │ └── default.yaml │ │ │ ├── nlp │ │ │ │ └── unnatural_instruction.yaml │ │ │ ├── ocrvqa │ │ │ │ └── ocrvqa.yaml │ │ │ ├── okvqa │ │ │ │ └── defaults.yaml │ │ │ ├── textcaps │ │ │ │ └── caption.yaml │ │ │ └── vg │ │ │ │ └── ref.yaml │ │ ├── default.yaml │ │ └── models │ │ │ ├── minigpt4_llama2.yaml │ │ │ ├── minigpt4_vicuna0.yaml │ │ │ └── minigpt_v2.yaml │ ├── conversation │ │ ├── __init__.py │ │ └── conversation.py │ ├── datasets │ │ ├── __init__.py │ │ ├── builders │ │ │ ├── __init__.py │ │ │ ├── base_dataset_builder.py │ │ │ └── image_text_pair_builder.py │ │ ├── data_utils.py │ │ └── datasets │ │ │ ├── __init__.py │ │ │ ├── aok_vqa_datasets.py │ │ │ ├── base_dataset.py │ │ │ ├── caption_datasets.py │ │ │ ├── cc_sbu_dataset.py │ │ │ ├── coco_caption.py │ │ │ ├── coco_dataset.py │ │ │ ├── coco_vqa_datasets.py │ │ │ ├── dataloader_utils.py │ │ │ ├── engine_vqa_datasets.py │ │ │ ├── flickr.py │ │ │ ├── gqa_datasets.py │ │ │ ├── laion_dataset.py │ │ │ ├── llava_dataset.py │ │ │ ├── multitask_conversation.py │ │ │ ├── ocrvqa_dataset.py │ │ │ ├── text_caps.py │ │ │ ├── unnatural_instruction.py │ │ │ ├── vg_dataset.py │ │ │ └── vqa_datasets.py │ ├── models │ │ ├── Qformer.py │ │ ├── __init__.py │ │ ├── base_model.py │ │ ├── eva_vit.py │ │ ├── minigpt4.py │ │ ├── minigpt_base.py │ │ ├── minigpt_v2.py │ │ └── modeling_llama.py │ ├── processors │ │ ├── __init__.py │ │ ├── base_processor.py │ │ ├── blip_processors.py │ │ └── randaugment.py │ ├── runners │ │ ├── __init__.py │ │ └── runner_base.py │ └── tasks │ │ ├── __init__.py │ │ ├── base_task.py │ │ └── image_text_pretrain.py ├── prompts │ └── alignment.txt ├── train.py └── train_configs │ ├── minigpt4_llama2_stage1_pretrain.yaml │ ├── minigpt4_llama2_stage2_finetune.yaml │ ├── minigpt4_stage1_pretrain.yaml │ ├── minigpt4_stage2_finetune.yaml │ ├── minigptv2_finetune.yaml │ └── minigptv2_finetune_dataengine.yaml ├── README.md └── showcase.png /.gitignore: -------------------------------------------------------------------------------- 1 | **__pycache__** 2 | **.ipynb_checkpoints 3 | LLaVA/.dockerignore 4 | LLaVA/.editorconfig 5 | LLaVA/.gitattributes 6 | LLaVA/.gitignore 7 | -------------------------------------------------------------------------------- /DataEngine_flowchart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/DataEngine_flowchart.png -------------------------------------------------------------------------------- /LLaVA/cog.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for Cog ⚙️ 2 | # Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md 3 | 4 | build: 5 | gpu: true 6 | 7 | python_version: "3.11" 8 | 9 | python_packages: 10 | - "torch==2.0.1" 11 | - "accelerate==0.21.0" 12 | - "bitsandbytes==0.41.0" 13 | - "deepspeed==0.9.5" 14 | - "einops-exts==0.0.4" 15 | - "einops==0.6.1" 16 | - "gradio==3.35.2" 17 | - "gradio_client==0.2.9" 18 | - "httpx==0.24.0" 19 | - "markdown2==2.4.10" 20 | - "numpy==1.26.0" 21 | - "peft==0.4.0" 22 | - "scikit-learn==1.2.2" 23 | - "sentencepiece==0.1.99" 24 | - "shortuuid==1.0.11" 25 | - "timm==0.6.13" 26 | - "tokenizers==0.13.3" 27 | - "torch==2.0.1" 28 | - "torchvision==0.15.2" 29 | - "transformers==4.31.0" 30 | - "wandb==0.15.12" 31 | - "wavedrom==2.0.3.post3" 32 | - "Pygments==2.16.1" 33 | run: 34 | - curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/download/v0.0.3/pget" && chmod +x /usr/local/bin/pget 35 | 36 | # predict.py defines how predictions are run on your model 37 | predict: "predict.py:Predictor" 38 | -------------------------------------------------------------------------------- /LLaVA/docs/Customize_Component.md: -------------------------------------------------------------------------------- 1 | # Customize Components in LLaVA 2 | 3 | This is an initial guide on how to replace the LLMs, visual encoders, etc. with your choice of components. 4 | 5 | ## LLM 6 | 7 | It is quite simple to swap out LLaMA to any other LLMs. You can refer to our implementation of [`llava_llama.py`](https://raw.githubusercontent.com/haotian-liu/LLaVA/main/llava/model/language_model/llava_llama.py) for an example of how to replace the LLM. 8 | 9 | Although it may seem that it still needs ~100 lines of code, most of them are copied from the original `llama.py` from HF. The only part that is different is to insert some lines for processing the multimodal inputs. 10 | 11 | In `forward` function, you can see that we call `self.prepare_inputs_labels_for_multimodal` to process the multimodal inputs. This function is defined in `LlavaMetaForCausalLM` and you just need to insert it into the `forward` function of your LLM. 12 | 13 | In `prepare_inputs_for_generation` function, you can see that we add `images` to the `model_inputs`. This is because we need to pass the images to the LLM during generation. 14 | 15 | These are basically all the changes you need to make to replace the LLM. 16 | 17 | ## Visual Encoder 18 | 19 | You can check out [`clip_encoder.py`](https://github.com/haotian-liu/LLaVA/blob/main/llava/model/multimodal_encoder/clip_encoder.py) on how we implement the CLIP visual encoder. 20 | 21 | -------------------------------------------------------------------------------- /LLaVA/docs/Data.md: -------------------------------------------------------------------------------- 1 | ## Data 2 | 3 | | Data file name | Size | 4 | | --- | ---: | 5 | | [llava_instruct_150k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_instruct_150k.json) | 229 MB | 6 | | [llava_instruct_80k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_instruct_80k.json) | 229 MB | 7 | | [conversation_58k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/conversation_58k.json) | 126 MB | 8 | | [detail_23k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/detail_23k.json) | 20.5 MB | 9 | | [complex_reasoning_77k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/complex_reasoning_77k.json) | 79.6 MB | 10 | 11 | ### Pretraining Dataset 12 | The pretraining dataset used in this release is a subset of CC-3M dataset, filtered with a more balanced concept coverage distribution. Please see [here](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K) for a detailed description of the dataset structure and how to download the images. 13 | 14 | If you already have CC-3M dataset on your disk, the image names follow this format: `GCC_train_000000000.jpg`. You may edit the `image` field correspondingly if necessary. 15 | 16 | | Data | Chat File | Meta Data | Size | 17 | | --- | --- | --- | ---: | 18 | | CC-3M Concept-balanced 595K | [chat.json](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K/blob/main/chat.json) | [metadata.json](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K/blob/main/metadata.json) | 211 MB 19 | | LAION/CC/SBU BLIP-Caption Concept-balanced 558K | [blip_laion_cc_sbu_558k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain/blob/main/blip_laion_cc_sbu_558k.json) | [metadata.json](#) | 181 MB 20 | 21 | **Important notice**: Upon the request from the community, as ~15% images of the original CC-3M dataset are no longer accessible, we upload [`images.zip`](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K/blob/main/images.zip) for better reproducing our work in research community. It must not be used for any other purposes. The use of these images must comply with the CC-3M license. This may be taken down at any time when requested by the original CC-3M dataset owner or owners of the referenced images. 22 | 23 | ### GPT-4 Prompts 24 | 25 | We provide our prompts and few-shot samples for GPT-4 queries, to better facilitate research in this domain. Please check out the [`prompts`](https://github.com/haotian-liu/LLaVA/tree/main/playground/data/prompts) folder for three kinds of questions: conversation, detail description, and complex reasoning. 26 | 27 | They are organized in a format of `system_message.txt` for system message, pairs of `abc_caps.txt` for few-shot sample user input, and `abc_conv.txt` for few-shot sample reference output. 28 | 29 | Note that you may find them in different format. For example, `conversation` is in `jsonl`, and detail description is answer-only. The selected format in our preliminary experiments works slightly better than a limited set of alternatives that we tried: `jsonl`, more natural format, answer-only. If interested, you may try other variants or conduct more careful study in this. Contributions are welcomed! 30 | -------------------------------------------------------------------------------- /LLaVA/docs/Finetune_Custom_Data.md: -------------------------------------------------------------------------------- 1 | # Finetune LLaVA on Custom Datasets 2 | 3 | ## Dataset Format 4 | 5 | Convert your data to a JSON file of a List of all samples. Sample metadata should contain `id` (a unique identifier), `image` (the path to the image), and `conversations` (the conversation data between human and AI). 6 | 7 | A sample JSON for finetuning LLaVA for generating tag-style captions for Stable Diffusion: 8 | 9 | ```json 10 | [ 11 | { 12 | "id": "997bb945-628d-4724-b370-b84de974a19f", 13 | "image": "part-000001/997bb945-628d-4724-b370-b84de974a19f.jpg", 14 | "conversations": [ 15 | { 16 | "from": "human", 17 | "value": "\nWrite a prompt for Stable Diffusion to generate this image." 18 | }, 19 | { 20 | "from": "gpt", 21 | "value": "a beautiful painting of chernobyl by nekro, pascal blanche, john harris, greg rutkowski, sin jong hun, moebius, simon stalenhag. in style of cg art. ray tracing. cel shading. hyper detailed. realistic. ue 5. maya. octane render. " 22 | }, 23 | ] 24 | }, 25 | ... 26 | ] 27 | ``` 28 | 29 | ## Command 30 | 31 | If you have a limited task-specific data, we recommend finetuning from LLaVA checkpoints with LoRA following this [script](https://github.com/haotian-liu/LLaVA/blob/main/scripts/v1_5/finetune_task_lora.sh). 32 | 33 | If the amount of the task-specific data is sufficient, you can also finetune from LLaVA checkpoints with full-model finetuning following this [script](https://github.com/haotian-liu/LLaVA/blob/main/scripts/v1_5/finetune_task.sh). 34 | 35 | You may need to adjust the hyperparameters to fit each specific dataset and your hardware constraint. 36 | 37 | 38 | -------------------------------------------------------------------------------- /LLaVA/docs/Intel.md: -------------------------------------------------------------------------------- 1 | # Intel Platforms 2 | 3 | * Support [Intel GPU Max Series](https://www.intel.com/content/www/us/en/products/details/discrete-gpus/data-center-gpu/max-series.html) 4 | * Support [Intel CPU Sapphire Rapides](https://ark.intel.com/content/www/us/en/ark/products/codename/126212/products-formerly-sapphire-rapids.html) 5 | * Based on [Intel Extension for Pytorch](https://intel.github.io/intel-extension-for-pytorch) 6 | 7 | More details in [**intel branch**](https://github.com/haotian-liu/LLaVA/tree/intel/docs/intel) 8 | -------------------------------------------------------------------------------- /LLaVA/docs/LLaVA_from_LLaMA2.md: -------------------------------------------------------------------------------- 1 | # LLaVA (based on Llama 2 LLM, Preview) 2 | 3 | *NOTE: This is a technical preview. We are still running hyperparameter search, and will release the final model soon. If you'd like to contribute to this, please contact us.* 4 | 5 | :llama: **-Introduction-** [Llama 2 is an open-source LLM released by Meta AI](https://about.fb.com/news/2023/07/llama-2/) today (July 18, 2023). Compared with its early version [Llama 1](https://ai.meta.com/blog/large-language-model-llama-meta-ai/), Llama 2 is more favored in ***stronger language performance***, ***longer context window***, and importantly ***commercially usable***! While Llama 2 is changing the LLM market landscape in the language space, its multimodal ability remains unknown. We quickly develop the LLaVA variant based on the latest Llama 2 checkpoints, and release it to the community for the public use. 6 | 7 | You need to apply for and download the latest Llama 2 checkpoints to start your own training (apply [here](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)) 8 | 9 | 10 | ## Training 11 | 12 | Please checkout [`pretrain.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/pretrain.sh), [`finetune.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune.sh), [`finetune_lora.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune_lora.sh). 13 | 14 | ## LLaVA (based on Llama 2), What is different? 15 | 16 | :volcano: How is the new LLaVA based on Llama 2 different from Llama 1? The comparisons of the training process are described: 17 | - **Pre-training**. The pre-trained base LLM is changed from Llama 1 to Llama 2 18 | - **Language instruction-tuning**. The previous LLaVA model starts with Vicuna, which is instruct tuned on ShareGPT data from Llama 1; The new LLaVA model starts with Llama 2 Chat, which is an instruct tuned checkpoint on dialogue data from Llama 2. 19 | - **Multimodal instruction-tuning**. The same LLaVA-Lighting process is applied. 20 | 21 | 22 | ### Results 23 | 24 | - Llama 2 is better at following the instructions of role playing; Llama 2 fails in following the instructions of translation 25 | - The quantitative evaluation on [LLaVA-Bench](https://github.com/haotian-liu/LLaVA/blob/main/docs/LLaVA_Bench.md) demonstrates on-par performance between Llama 2 and Llama 1 in LLaVA's multimodal chat ability. 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /LLaVA/docs/ScienceQA.md: -------------------------------------------------------------------------------- 1 | ### ScienceQA 2 | 3 | #### Prepare Data 4 | 1. Please see ScienceQA [repo](https://github.com/lupantech/ScienceQA) for setting up the dataset. 5 | 2. Generate ScienceQA dataset for LLaVA conversation-style format. 6 | 7 | ```Shell 8 | python scripts/convert_sqa_to_llava.py \ 9 | convert_to_llava \ 10 | --base-dir /path/to/ScienceQA/data/scienceqa \ 11 | --prompt-format "QCM-LEA" \ 12 | --split {train,val,minival,test,minitest} 13 | ``` 14 | 15 | #### Training 16 | 17 | 1. Pretraining 18 | 19 | You can download our pretrained projector weights from our [Model Zoo](), or train your own projector weights using [`pretrain.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/pretrain.sh). 20 | 21 | 2. Finetuning 22 | 23 | See [`finetune_sqa.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune_sqa.sh). 24 | 25 | #### Evaluation 26 | 27 | 1. Multiple-GPU inference 28 | You may evaluate this with multiple GPUs, and concatenate the generated jsonl files. Please refer to our script for [batch evaluation](https://github.com/haotian-liu/LLaVA/blob/main/scripts/sqa_eval_batch.sh) and [results gathering](https://github.com/haotian-liu/LLaVA/blob/main/scripts/sqa_eval_gather.sh). 29 | 30 | 2. Single-GPU inference 31 | 32 | (a) Generate LLaVA responses on ScienceQA dataset 33 | 34 | ```Shell 35 | python -m llava.eval.model_vqa_science \ 36 | --model-path liuhaotian/llava-lcs558k-scienceqa-vicuna-13b-v1.3 \ 37 | --question-file /path/to/ScienceQA/data/scienceqa/llava_test_QCM-LEA.json \ 38 | --image-folder /path/to/ScienceQA/data/scienceqa/images/test \ 39 | --answers-file vqa/results/ScienceQA/test_llava-13b.jsonl \ 40 | --conv-mode llava_v1 41 | ``` 42 | 43 | (b) Evaluate the generated responses 44 | 45 | ```Shell 46 | python eval_science_qa.py \ 47 | --base-dir /path/to/ScienceQA/data/scienceqa \ 48 | --result-file vqa/results/ScienceQA/test_llava-13b.jsonl \ 49 | --output-file vqa/results/ScienceQA/test_llava-13b_output.json \ 50 | --output-result vqa/results/ScienceQA/test_llava-13b_result.json \ 51 | ``` 52 | 53 | For reference, we attach our prediction file [`test_sqa_llava_lcs_558k_sqa_12e_vicuna_v1_3_13b.json`](https://github.com/haotian-liu/LLaVA/blob/main/llava/eval/table/results/test_sqa_llava_lcs_558k_sqa_12e_vicuna_v1_3_13b.json) and [`test_sqa_llava_13b_v0.json`](https://github.com/haotian-liu/LLaVA/blob/main/llava/eval/table/results/test_sqa_llava_13b_v0.json) for comparison when reproducing our results, as well as for further analysis in detail. 54 | -------------------------------------------------------------------------------- /LLaVA/docs/Windows.md: -------------------------------------------------------------------------------- 1 | # Run LLaVA on Windows 2 | 3 | *NOTE: LLaVA on Windows is not fully supported. Currently we only support 16-bit inference. For a more complete support, please use [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) for now. More functionalities on Windows is to be added soon, stay tuned.* 4 | 5 | ## Installation 6 | 7 | 1. Clone this repository and navigate to LLaVA folder 8 | ```bash 9 | git clone https://github.com/haotian-liu/LLaVA.git 10 | cd LLaVA 11 | ``` 12 | 13 | 2. Install Package 14 | ```Shell 15 | conda create -n llava python=3.10 -y 16 | conda activate llava 17 | python -mpip install --upgrade pip # enable PEP 660 support 18 | pip install torch==2.0.1+cu117 torchvision==0.15.2+cu117 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu117 19 | pip install -e . 20 | pip uninstall bitsandbytes 21 | ``` 22 | 23 | ## Run demo 24 | 25 | See instructions [here](https://github.com/haotian-liu/LLaVA#demo). 26 | 27 | Note that quantization (4-bit, 8-bit) is *NOT* supported on Windows. Stay tuned for the 4-bit support on Windows! 28 | -------------------------------------------------------------------------------- /LLaVA/docs/macOS.md: -------------------------------------------------------------------------------- 1 | # Run LLaVA on macOS 2 | 3 | *NOTE: LLaVA on macOS is not fully supported. Currently we only support 16-bit inference. More functionalities on macOS is to be added soon, stay tuned.* 4 | 5 | ## Installation 6 | 7 | 1. Clone this repository and navigate to LLaVA folder 8 | ```bash 9 | git clone https://github.com/haotian-liu/LLaVA.git 10 | cd LLaVA 11 | ``` 12 | 13 | 2. Install Package 14 | ```Shell 15 | conda create -n llava python=3.10 -y 16 | conda activate llava 17 | python -mpip install --upgrade pip # enable PEP 660 support 18 | pip install -e . 19 | pip install torch==2.1.0 torchvision==0.16.0 20 | pip uninstall bitsandbytes 21 | ``` 22 | 23 | ## Run demo 24 | 25 | Specify `--device mps` when launching model worker or CLI. 26 | 27 | See instructions [here](https://github.com/haotian-liu/LLaVA#demo). 28 | 29 | Note that quantization (4-bit, 8-bit) is *NOT* supported on macOS. Stay tuned for the 4-bit support on macOS! 30 | -------------------------------------------------------------------------------- /LLaVA/llava/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import LlavaLlamaForCausalLM 2 | -------------------------------------------------------------------------------- /LLaVA/llava/constants.py: -------------------------------------------------------------------------------- 1 | CONTROLLER_HEART_BEAT_EXPIRATION = 30 2 | WORKER_HEART_BEAT_INTERVAL = 15 3 | 4 | LOGDIR = "." 5 | 6 | # Model Constants 7 | IGNORE_INDEX = -100 8 | IMAGE_TOKEN_INDEX = -200 9 | DEFAULT_IMAGE_TOKEN = "" 10 | DEFAULT_IMAGE_PATCH_TOKEN = "" 11 | DEFAULT_IM_START_TOKEN = "" 12 | DEFAULT_IM_END_TOKEN = "" 13 | IMAGE_PLACEHOLDER = "" 14 | -------------------------------------------------------------------------------- /LLaVA/llava/eval/eval_pope.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | def eval_pope(answers, label_file): 6 | label_list = [json.loads(q)['label'] for q in open(label_file, 'r')] 7 | 8 | for answer in answers: 9 | text = answer['text'] 10 | 11 | # Only keep the first sentence 12 | if text.find('.') != -1: 13 | text = text.split('.')[0] 14 | 15 | text = text.replace(',', '') 16 | words = text.split(' ') 17 | if 'No' in words or 'not' in words or 'no' in words: 18 | answer['text'] = 'no' 19 | else: 20 | answer['text'] = 'yes' 21 | 22 | for i in range(len(label_list)): 23 | if label_list[i] == 'no': 24 | label_list[i] = 0 25 | else: 26 | label_list[i] = 1 27 | 28 | pred_list = [] 29 | for answer in answers: 30 | if answer['text'] == 'no': 31 | pred_list.append(0) 32 | else: 33 | pred_list.append(1) 34 | 35 | pos = 1 36 | neg = 0 37 | yes_ratio = pred_list.count(1) / len(pred_list) 38 | 39 | TP, TN, FP, FN = 0, 0, 0, 0 40 | for pred, label in zip(pred_list, label_list): 41 | if pred == pos and label == pos: 42 | TP += 1 43 | elif pred == pos and label == neg: 44 | FP += 1 45 | elif pred == neg and label == neg: 46 | TN += 1 47 | elif pred == neg and label == pos: 48 | FN += 1 49 | 50 | print('TP\tFP\tTN\tFN\t') 51 | print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN)) 52 | 53 | precision = float(TP) / float(TP + FP) 54 | recall = float(TP) / float(TP + FN) 55 | f1 = 2*precision*recall / (precision + recall) 56 | acc = (TP + TN) / (TP + TN + FP + FN) 57 | print('Accuracy: {}'.format(acc)) 58 | print('Precision: {}'.format(precision)) 59 | print('Recall: {}'.format(recall)) 60 | print('F1 score: {}'.format(f1)) 61 | print('Yes ratio: {}'.format(yes_ratio)) 62 | print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) ) 63 | 64 | if __name__ == "__main__": 65 | parser = argparse.ArgumentParser() 66 | parser.add_argument("--annotation-dir", type=str) 67 | parser.add_argument("--question-file", type=str) 68 | parser.add_argument("--result-file", type=str) 69 | args = parser.parse_args() 70 | 71 | questions = [json.loads(line) for line in open(args.question_file)] 72 | questions = {question['question_id']: question for question in questions} 73 | answers = [json.loads(q) for q in open(args.result_file)] 74 | for file in os.listdir(args.annotation_dir): 75 | assert file.startswith('coco_pope_') 76 | assert file.endswith('.json') 77 | category = file[10:-5] 78 | cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category] 79 | print('Category: {}, # samples: {}'.format(category, len(cur_answers))) 80 | eval_pope(cur_answers, os.path.join(args.annotation_dir, file)) 81 | print("====================================") 82 | -------------------------------------------------------------------------------- /LLaVA/llava/eval/eval_textvqa.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import json 4 | import re 5 | 6 | from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator 7 | 8 | 9 | def get_args(): 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument('--annotation-file', type=str) 12 | parser.add_argument('--result-file', type=str) 13 | parser.add_argument('--result-dir', type=str) 14 | return parser.parse_args() 15 | 16 | 17 | def prompt_processor(prompt): 18 | if prompt.startswith('OCR tokens: '): 19 | pattern = r"Question: (.*?) Short answer:" 20 | match = re.search(pattern, prompt, re.DOTALL) 21 | question = match.group(1) 22 | elif 'Reference OCR token: ' in prompt and len(prompt.split('\n')) == 3: 23 | if prompt.startswith('Reference OCR token:'): 24 | question = prompt.split('\n')[1] 25 | else: 26 | question = prompt.split('\n')[0] 27 | elif len(prompt.split('\n')) == 2: 28 | question = prompt.split('\n')[0] 29 | else: 30 | assert False 31 | 32 | return question.lower() 33 | 34 | 35 | def eval_single(annotation_file, result_file): 36 | experiment_name = os.path.splitext(os.path.basename(result_file))[0] 37 | print(experiment_name) 38 | annotations = json.load(open(annotation_file))['data'] 39 | annotations = {(annotation['image_id'], annotation['question'].lower()): annotation for annotation in annotations} 40 | results = [json.loads(line) for line in open(result_file)] 41 | 42 | pred_list = [] 43 | for result in results: 44 | annotation = annotations[(result['question_id'], prompt_processor(result['prompt']))] 45 | pred_list.append({ 46 | "pred_answer": result['text'], 47 | "gt_answers": annotation['answers'], 48 | }) 49 | 50 | evaluator = TextVQAAccuracyEvaluator() 51 | print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), 100. * evaluator.eval_pred_list(pred_list))) 52 | 53 | 54 | if __name__ == "__main__": 55 | args = get_args() 56 | 57 | if args.result_file is not None: 58 | eval_single(args.annotation_file, args.result_file) 59 | 60 | if args.result_dir is not None: 61 | for result_file in sorted(os.listdir(args.result_dir)): 62 | if not result_file.endswith('.jsonl'): 63 | print(f'Skipping {result_file}') 64 | continue 65 | eval_single(args.annotation_file, os.path.join(args.result_dir, result_file)) 66 | -------------------------------------------------------------------------------- /LLaVA/llava/eval/model_qa.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria 3 | import torch 4 | import os 5 | import json 6 | from tqdm import tqdm 7 | import shortuuid 8 | 9 | from llava.conversation import default_conversation 10 | from llava.utils import disable_torch_init 11 | 12 | 13 | @torch.inference_mode() 14 | def eval_model(model_name, questions_file, answers_file): 15 | # Model 16 | disable_torch_init() 17 | model_name = os.path.expanduser(model_name) 18 | tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) 19 | model = AutoModelForCausalLM.from_pretrained(model_name, 20 | torch_dtype=torch.float16).cuda() 21 | 22 | 23 | ques_file = open(os.path.expanduser(questions_file), "r") 24 | ans_file = open(os.path.expanduser(answers_file), "w") 25 | for i, line in enumerate(tqdm(ques_file)): 26 | idx = json.loads(line)["question_id"] 27 | qs = json.loads(line)["text"] 28 | cat = json.loads(line)["category"] 29 | conv = default_conversation.copy() 30 | conv.append_message(conv.roles[0], qs) 31 | prompt = conv.get_prompt() 32 | inputs = tokenizer([prompt]) 33 | input_ids = torch.as_tensor(inputs.input_ids).cuda() 34 | output_ids = model.generate( 35 | input_ids, 36 | do_sample=True, 37 | use_cache=True, 38 | temperature=0.7, 39 | max_new_tokens=1024,) 40 | outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0] 41 | try: 42 | index = outputs.index(conv.sep, len(prompt)) 43 | except ValueError: 44 | outputs += conv.sep 45 | index = outputs.index(conv.sep, len(prompt)) 46 | 47 | outputs = outputs[len(prompt) + len(conv.roles[1]) + 2:index].strip() 48 | ans_id = shortuuid.uuid() 49 | ans_file.write(json.dumps({"question_id": idx, 50 | "text": outputs, 51 | "answer_id": ans_id, 52 | "model_id": model_name, 53 | "metadata": {}}) + "\n") 54 | ans_file.flush() 55 | ans_file.close() 56 | 57 | if __name__ == "__main__": 58 | parser = argparse.ArgumentParser() 59 | parser.add_argument("--model-name", type=str, default="facebook/opt-350m") 60 | parser.add_argument("--question-file", type=str, default="tables/question.jsonl") 61 | parser.add_argument("--answers-file", type=str, default="answer.jsonl") 62 | args = parser.parse_args() 63 | 64 | eval_model(args.model_name, args.question_file, args.answers_file) 65 | -------------------------------------------------------------------------------- /LLaVA/llava/eval/qa_baseline_gpt35.py: -------------------------------------------------------------------------------- 1 | """Generate answers with GPT-3.5""" 2 | # Note: you need to be using OpenAI Python v0.27.0 for the code below to work 3 | import argparse 4 | import json 5 | import os 6 | import time 7 | import concurrent.futures 8 | 9 | import openai 10 | import tqdm 11 | import shortuuid 12 | 13 | MODEL = 'gpt-3.5-turbo' 14 | MODEL_ID = 'gpt-3.5-turbo:20230327' 15 | 16 | def get_answer(question_id: int, question: str, max_tokens: int): 17 | ans = { 18 | 'answer_id': shortuuid.uuid(), 19 | 'question_id': question_id, 20 | 'model_id': MODEL_ID, 21 | } 22 | for _ in range(3): 23 | try: 24 | response = openai.ChatCompletion.create( 25 | model=MODEL, 26 | messages=[{ 27 | 'role': 'system', 28 | 'content': 'You are a helpful assistant.' 29 | }, { 30 | 'role': 'user', 31 | 'content': question, 32 | }], 33 | max_tokens=max_tokens, 34 | ) 35 | ans['text'] = response['choices'][0]['message']['content'] 36 | return ans 37 | except Exception as e: 38 | print('[ERROR]', e) 39 | ans['text'] = '#ERROR#' 40 | time.sleep(1) 41 | return ans 42 | 43 | 44 | if __name__ == '__main__': 45 | parser = argparse.ArgumentParser(description='ChatGPT answer generation.') 46 | parser.add_argument('-q', '--question') 47 | parser.add_argument('-o', '--output') 48 | parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output') 49 | args = parser.parse_args() 50 | 51 | questions_dict = {} 52 | with open(os.path.expanduser(args.question)) as f: 53 | for line in f: 54 | if not line: 55 | continue 56 | q = json.loads(line) 57 | questions_dict[q['question_id']] = q['text'] 58 | 59 | answers = [] 60 | 61 | with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor: 62 | futures = [] 63 | for qid, question in questions_dict.items(): 64 | future = executor.submit(get_answer, qid, question, args.max_tokens) 65 | futures.append(future) 66 | 67 | for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)): 68 | answers.append(future.result()) 69 | 70 | answers.sort(key=lambda x: x['question_id']) 71 | 72 | with open(os.path.expanduser(args.output), 'w') as f: 73 | table = [json.dumps(ans) for ans in answers] 74 | f.write('\n'.join(table)) 75 | -------------------------------------------------------------------------------- /LLaVA/llava/eval/summarize_gpt_review.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from collections import defaultdict 4 | 5 | import numpy as np 6 | 7 | import argparse 8 | 9 | def parse_args(): 10 | parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.') 11 | parser.add_argument('-d', '--dir', default=None) 12 | parser.add_argument('-v', '--version', default=None) 13 | parser.add_argument('-s', '--select', nargs='*', default=None) 14 | parser.add_argument('-f', '--files', nargs='*', default=[]) 15 | parser.add_argument('-i', '--ignore', nargs='*', default=[]) 16 | return parser.parse_args() 17 | 18 | 19 | if __name__ == '__main__': 20 | args = parse_args() 21 | 22 | if args.ignore is not None: 23 | args.ignore = [int(x) for x in args.ignore] 24 | 25 | if len(args.files) > 0: 26 | review_files = args.files 27 | else: 28 | review_files = [x for x in os.listdir(args.dir) if x.endswith('.jsonl') and (x.startswith('gpt4_text') or x.startswith('reviews_') or x.startswith('review_') or 'review' in args.dir)] 29 | 30 | for review_file in sorted(review_files): 31 | config = os.path.basename(review_file).replace('gpt4_text_', '').replace('.jsonl', '') 32 | if args.select is not None and any(x not in config for x in args.select): 33 | continue 34 | if '0613' in config: 35 | version = '0613' 36 | else: 37 | version = '0314' 38 | if args.version is not None and args.version != version: 39 | continue 40 | scores = defaultdict(list) 41 | print(config) 42 | with open(os.path.join(args.dir, review_file) if args.dir is not None else review_file) as f: 43 | for review_str in f: 44 | review = json.loads(review_str) 45 | if review['question_id'] in args.ignore: 46 | continue 47 | if 'category' in review: 48 | scores[review['category']].append(review['tuple']) 49 | scores['all'].append(review['tuple']) 50 | else: 51 | if 'tuple' in review: 52 | scores['all'].append(review['tuple']) 53 | else: 54 | scores['all'].append(review['score']) 55 | for k, v in sorted(scores.items()): 56 | stats = np.asarray(v).mean(0).tolist() 57 | stats = [round(x, 3) for x in stats] 58 | # print(k, stats, round(stats[1]/stats[0]*100, 1)) 59 | print(k, round(stats[1]/stats[0]*100, 1), round(stats[0] * 10, 1), round(stats[1] * 10, 1)) 60 | print('=================================') 61 | -------------------------------------------------------------------------------- /LLaVA/llava/eval/webpage/figures/alpaca.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/LLaVA/llava/eval/webpage/figures/alpaca.png -------------------------------------------------------------------------------- /LLaVA/llava/eval/webpage/figures/bard.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/LLaVA/llava/eval/webpage/figures/bard.jpg -------------------------------------------------------------------------------- /LLaVA/llava/eval/webpage/figures/chatgpt.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /LLaVA/llava/eval/webpage/figures/llama.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/LLaVA/llava/eval/webpage/figures/llama.jpg -------------------------------------------------------------------------------- /LLaVA/llava/eval/webpage/figures/swords_FILL0_wght300_GRAD0_opsz48.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /LLaVA/llava/eval/webpage/figures/vicuna.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/LLaVA/llava/eval/webpage/figures/vicuna.jpeg -------------------------------------------------------------------------------- /LLaVA/llava/eval/webpage/styles.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; 3 | background-color: #f8f9fa; 4 | } 5 | 6 | .navbar-dark .navbar-nav .nav-link { 7 | color: #f1cf68; 8 | font-size: 1.1rem; 9 | padding: 0.5rem 0.6rem; 10 | } 11 | 12 | .card-header { 13 | font-weight: bold; 14 | } 15 | 16 | .card { 17 | box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); 18 | transition: 0.3s; 19 | } 20 | 21 | .card:hover { 22 | box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2); 23 | } 24 | 25 | button { 26 | transition: background-color 0.3s; 27 | } 28 | 29 | button:hover { 30 | background-color: #007bff; 31 | } 32 | 33 | @media (max-width: 767px) { 34 | .form-row .form-group { 35 | margin-bottom: 10px; 36 | } 37 | } 38 | 39 | /* Extra styles */ 40 | 41 | .expandable-card .card-text-container { 42 | max-height: 200px; 43 | overflow-y: hidden; 44 | position: relative; 45 | } 46 | 47 | .expandable-card.expanded .card-text-container { 48 | max-height: none; 49 | } 50 | 51 | .expand-btn { 52 | position: relative; 53 | display: none; 54 | background-color: rgba(255, 255, 255, 0.8); 55 | color: #510c75; 56 | border-color: transparent; 57 | } 58 | 59 | .expand-btn:hover { 60 | background-color: rgba(200, 200, 200, 0.8); 61 | text-decoration: none; 62 | border-color: transparent; 63 | color: #510c75; 64 | } 65 | 66 | .expand-btn:focus { 67 | outline: none; 68 | text-decoration: none; 69 | } 70 | 71 | .expandable-card:not(.expanded) .card-text-container:after { 72 | content: ""; 73 | position: absolute; 74 | bottom: 0; 75 | left: 0; 76 | width: 100%; 77 | height: 90px; 78 | background: linear-gradient(rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 1)); 79 | } 80 | 81 | .expandable-card:not(.expanded) .expand-btn { 82 | margin-top: -40px; 83 | } 84 | 85 | .card-body { 86 | padding-bottom: 5px; 87 | } 88 | 89 | .vertical-flex-layout { 90 | justify-content: center; 91 | align-items: center; 92 | height: 100%; 93 | display: flex; 94 | flex-direction: column; 95 | gap: 5px; 96 | } 97 | 98 | .figure-img { 99 | max-width: 100%; 100 | height: auto; 101 | } 102 | 103 | .adjustable-font-size { 104 | font-size: calc(0.5rem + 2vw); 105 | } 106 | -------------------------------------------------------------------------------- /LLaVA/llava/model/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | from .language_model.llava_llama import LlavaLlamaForCausalLM, LlavaConfig 3 | from .language_model.llava_mpt import LlavaMptForCausalLM, LlavaMptConfig 4 | from .language_model.llava_mistral import LlavaMistralForCausalLM, LlavaMistralConfig 5 | except: 6 | pass 7 | -------------------------------------------------------------------------------- /LLaVA/llava/model/apply_delta.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: 3 | python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta 4 | """ 5 | import argparse 6 | 7 | import torch 8 | from tqdm import tqdm 9 | from transformers import AutoTokenizer, AutoModelForCausalLM 10 | from llava import LlavaLlamaForCausalLM 11 | 12 | 13 | def apply_delta(base_model_path, target_model_path, delta_path): 14 | print("Loading base model") 15 | base = AutoModelForCausalLM.from_pretrained( 16 | base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 17 | 18 | print("Loading delta") 19 | delta = LlavaLlamaForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 20 | delta_tokenizer = AutoTokenizer.from_pretrained(delta_path) 21 | 22 | print("Applying delta") 23 | for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"): 24 | if name not in base.state_dict(): 25 | assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model' 26 | continue 27 | if param.data.shape == base.state_dict()[name].shape: 28 | param.data += base.state_dict()[name] 29 | else: 30 | assert name in ['model.embed_tokens.weight', 'lm_head.weight'], \ 31 | f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}' 32 | bparam = base.state_dict()[name] 33 | param.data[:bparam.shape[0], :bparam.shape[1]] += bparam 34 | 35 | print("Saving target model") 36 | delta.save_pretrained(target_model_path) 37 | delta_tokenizer.save_pretrained(target_model_path) 38 | 39 | 40 | if __name__ == "__main__": 41 | parser = argparse.ArgumentParser() 42 | parser.add_argument("--base-model-path", type=str, required=True) 43 | parser.add_argument("--target-model-path", type=str, required=True) 44 | parser.add_argument("--delta-path", type=str, required=True) 45 | 46 | args = parser.parse_args() 47 | 48 | apply_delta(args.base_model_path, args.target_model_path, args.delta_path) 49 | -------------------------------------------------------------------------------- /LLaVA/llava/model/consolidate.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: 3 | python3 -m llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_weights/llava-7b_consolidate 4 | """ 5 | import argparse 6 | 7 | import torch 8 | from transformers import AutoTokenizer, AutoModelForCausalLM 9 | from llava.model import * 10 | from llava.model.utils import auto_upgrade 11 | 12 | 13 | def consolidate_ckpt(src_path, dst_path): 14 | print("Loading model") 15 | auto_upgrade(src_path) 16 | src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 17 | src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False) 18 | src_model.save_pretrained(dst_path) 19 | src_tokenizer.save_pretrained(dst_path) 20 | 21 | 22 | if __name__ == "__main__": 23 | parser = argparse.ArgumentParser() 24 | parser.add_argument("--src", type=str, required=True) 25 | parser.add_argument("--dst", type=str, required=True) 26 | 27 | args = parser.parse_args() 28 | 29 | consolidate_ckpt(args.src, args.dst) 30 | -------------------------------------------------------------------------------- /LLaVA/llava/model/make_delta.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: 3 | python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta 4 | """ 5 | import argparse 6 | 7 | import torch 8 | from tqdm import tqdm 9 | from transformers import AutoTokenizer, AutoModelForCausalLM 10 | from llava.model.utils import auto_upgrade 11 | 12 | 13 | def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id): 14 | print("Loading base model") 15 | base = AutoModelForCausalLM.from_pretrained( 16 | base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 17 | 18 | print("Loading target model") 19 | auto_upgrade(target_model_path) 20 | target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 21 | 22 | print("Calculating delta") 23 | for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"): 24 | if name not in base.state_dict(): 25 | assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model' 26 | continue 27 | if param.data.shape == base.state_dict()[name].shape: 28 | param.data -= base.state_dict()[name] 29 | else: 30 | assert name in ['model.embed_tokens.weight', 'lm_head.weight'], f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}' 31 | bparam = base.state_dict()[name] 32 | param.data[:bparam.shape[0], :bparam.shape[1]] -= bparam 33 | 34 | print("Saving delta") 35 | if hub_repo_id: 36 | kwargs = {"push_to_hub": True, "repo_id": hub_repo_id} 37 | else: 38 | kwargs = {} 39 | target.save_pretrained(delta_path, **kwargs) 40 | target_tokenizer = AutoTokenizer.from_pretrained(target_model_path) 41 | target_tokenizer.save_pretrained(delta_path, **kwargs) 42 | 43 | 44 | if __name__ == "__main__": 45 | parser = argparse.ArgumentParser() 46 | parser.add_argument("--base-model-path", type=str, required=True) 47 | parser.add_argument("--target-model-path", type=str, required=True) 48 | parser.add_argument("--delta-path", type=str, required=True) 49 | parser.add_argument("--hub-repo-id", type=str, default=None) 50 | args = parser.parse_args() 51 | 52 | make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id) 53 | -------------------------------------------------------------------------------- /LLaVA/llava/model/multimodal_encoder/builder.py: -------------------------------------------------------------------------------- 1 | import os 2 | from .clip_encoder import CLIPVisionTower 3 | 4 | 5 | def build_vision_tower(vision_tower_cfg, **kwargs): 6 | vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None)) 7 | is_absolute_path_exists = os.path.exists(vision_tower) 8 | if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion") or "ShareGPT4V" in vision_tower: 9 | return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs) 10 | 11 | raise ValueError(f'Unknown vision tower: {vision_tower}') 12 | -------------------------------------------------------------------------------- /LLaVA/llava/model/multimodal_projector/builder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import re 4 | 5 | 6 | class IdentityMap(nn.Module): 7 | def __init__(self): 8 | super().__init__() 9 | 10 | def forward(self, x, *args, **kwargs): 11 | return x 12 | 13 | @property 14 | def config(self): 15 | return {"mm_projector_type": 'identity'} 16 | 17 | 18 | class SimpleResBlock(nn.Module): 19 | def __init__(self, channels): 20 | super().__init__() 21 | self.pre_norm = nn.LayerNorm(channels) 22 | 23 | self.proj = nn.Sequential( 24 | nn.Linear(channels, channels), 25 | nn.GELU(), 26 | nn.Linear(channels, channels) 27 | ) 28 | def forward(self, x): 29 | x = self.pre_norm(x) 30 | return x + self.proj(x) 31 | 32 | 33 | def build_vision_projector(config, delay_load=False, **kwargs): 34 | projector_type = getattr(config, 'mm_projector_type', 'linear') 35 | 36 | if projector_type == 'linear': 37 | return nn.Linear(config.mm_hidden_size, config.hidden_size) 38 | 39 | mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) 40 | if mlp_gelu_match: 41 | mlp_depth = int(mlp_gelu_match.group(1)) 42 | modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)] 43 | for _ in range(1, mlp_depth): 44 | modules.append(nn.GELU()) 45 | modules.append(nn.Linear(config.hidden_size, config.hidden_size)) 46 | return nn.Sequential(*modules) 47 | 48 | if projector_type == 'identity': 49 | return IdentityMap() 50 | 51 | raise ValueError(f'Unknown projector type: {projector_type}') 52 | -------------------------------------------------------------------------------- /LLaVA/llava/model/utils.py: -------------------------------------------------------------------------------- 1 | from transformers import AutoConfig 2 | 3 | 4 | def auto_upgrade(config): 5 | cfg = AutoConfig.from_pretrained(config) 6 | if 'llava' in config and 'llava' not in cfg.model_type: 7 | assert cfg.model_type == 'llama' 8 | print("You are using newer LLaVA code base, while the checkpoint of v0 is from older code base.") 9 | print("You must upgrade the checkpoint to the new code base (this can be done automatically).") 10 | confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]") 11 | if confirm.lower() in ["y", "yes"]: 12 | print("Upgrading checkpoint...") 13 | assert len(cfg.architectures) == 1 14 | setattr(cfg.__class__, "model_type", "llava") 15 | cfg.architectures[0] = 'LlavaLlamaForCausalLM' 16 | cfg.save_pretrained(config) 17 | print("Checkpoint upgraded.") 18 | else: 19 | print("Checkpoint upgrade aborted.") 20 | exit(1) 21 | -------------------------------------------------------------------------------- /LLaVA/llava/serve/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/LLaVA/llava/serve/__init__.py -------------------------------------------------------------------------------- /LLaVA/llava/serve/examples/extreme_ironing.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/LLaVA/llava/serve/examples/extreme_ironing.jpg -------------------------------------------------------------------------------- /LLaVA/llava/serve/examples/waterview.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/LLaVA/llava/serve/examples/waterview.jpg -------------------------------------------------------------------------------- /LLaVA/llava/serve/register_worker.py: -------------------------------------------------------------------------------- 1 | """ 2 | Manually register workers. 3 | 4 | Usage: 5 | python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name http://localhost:21002 6 | """ 7 | 8 | import argparse 9 | 10 | import requests 11 | 12 | if __name__ == "__main__": 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument("--controller-address", type=str) 15 | parser.add_argument("--worker-name", type=str) 16 | parser.add_argument("--check-heart-beat", action="store_true") 17 | args = parser.parse_args() 18 | 19 | url = args.controller_address + "/register_worker" 20 | data = { 21 | "worker_name": args.worker_name, 22 | "check_heart_beat": args.check_heart_beat, 23 | "worker_status": None, 24 | } 25 | r = requests.post(url, json=data) 26 | assert r.status_code == 200 27 | -------------------------------------------------------------------------------- /LLaVA/llava/serve/test_message.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | 4 | import requests 5 | 6 | from llava.conversation import default_conversation 7 | 8 | 9 | def main(): 10 | if args.worker_address: 11 | worker_addr = args.worker_address 12 | else: 13 | controller_addr = args.controller_address 14 | ret = requests.post(controller_addr + "/refresh_all_workers") 15 | ret = requests.post(controller_addr + "/list_models") 16 | models = ret.json()["models"] 17 | models.sort() 18 | print(f"Models: {models}") 19 | 20 | ret = requests.post(controller_addr + "/get_worker_address", 21 | json={"model": args.model_name}) 22 | worker_addr = ret.json()["address"] 23 | print(f"worker_addr: {worker_addr}") 24 | 25 | if worker_addr == "": 26 | return 27 | 28 | conv = default_conversation.copy() 29 | conv.append_message(conv.roles[0], args.message) 30 | prompt = conv.get_prompt() 31 | 32 | headers = {"User-Agent": "LLaVA Client"} 33 | pload = { 34 | "model": args.model_name, 35 | "prompt": prompt, 36 | "max_new_tokens": args.max_new_tokens, 37 | "temperature": 0.7, 38 | "stop": conv.sep, 39 | } 40 | response = requests.post(worker_addr + "/worker_generate_stream", headers=headers, 41 | json=pload, stream=True) 42 | 43 | print(prompt.replace(conv.sep, "\n"), end="") 44 | for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"): 45 | if chunk: 46 | data = json.loads(chunk.decode("utf-8")) 47 | output = data["text"].split(conv.sep)[-1] 48 | print(output, end="\r") 49 | print("") 50 | 51 | 52 | if __name__ == "__main__": 53 | parser = argparse.ArgumentParser() 54 | parser.add_argument("--controller-address", type=str, default="http://localhost:21001") 55 | parser.add_argument("--worker-address", type=str) 56 | parser.add_argument("--model-name", type=str, default="facebook/opt-350m") 57 | parser.add_argument("--max-new-tokens", type=int, default=32) 58 | parser.add_argument("--message", type=str, default= 59 | "Tell me a story with more than 1000 words.") 60 | args = parser.parse_args() 61 | 62 | main() 63 | -------------------------------------------------------------------------------- /LLaVA/llava/train/train_mem.py: -------------------------------------------------------------------------------- 1 | from llava.train.train import train 2 | 3 | if __name__ == "__main__": 4 | train(attn_implementation="flash_attention_2") 5 | -------------------------------------------------------------------------------- /LLaVA/llava/train/train_xformers.py: -------------------------------------------------------------------------------- 1 | # Make it more memory efficient by monkey patching the LLaMA model with xformers attention. 2 | 3 | # Need to call this before importing transformers. 4 | from llava.train.llama_xformers_attn_monkey_patch import ( 5 | replace_llama_attn_with_xformers_attn, 6 | ) 7 | 8 | replace_llama_attn_with_xformers_attn() 9 | 10 | from llava.train.train import train 11 | 12 | if __name__ == "__main__": 13 | train() 14 | -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/complex_reasoning/000_caps.txt: -------------------------------------------------------------------------------- 1 | A man wearing multiple neck ties making a goofy face. 2 | A man in a white shirt wearing very many ties. 3 | a man with ties on poses for a picture 4 | A man wearing multiple ties on his neck. 5 | A young man smiles while wearing several ties. 6 | 7 | tie: [0.574, 0.298, 0.752, 0.704] 8 | tie: [0.464, 0.339, 0.639, 0.789] 9 | tie: [0.349, 0.363, 0.563, 0.732] 10 | tie: [0.259, 0.255, 0.668, 0.805] 11 | person: [0.019, 0.065, 0.962, 0.988] 12 | person: [0.0, 0.24, 0.214, 1.0] 13 | tie: [0.316, 0.778, 0.443, 0.867] 14 | tie: [0.386, 0.707, 0.496, 0.801] 15 | tie: [0.251, 0.354, 0.402, 0.747] 16 | tie: [0.44, 0.318, 0.63, 0.397] 17 | tie: [0.566, 0.33, 0.655, 0.543] 18 | tie: [0.25, 0.409, 0.359, 0.556] -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/complex_reasoning/000_conv.txt: -------------------------------------------------------------------------------- 1 | Question: 2 | What is unusual about this photo? 3 | === 4 | Answer: 5 | In the photo, the man is wearing a total of ten ties around his neck. This is considered unusual because it is not common for someone to wear multiple ties at once. In a typical setting, a person would wear only one tie. The man's goofy facial expression also adds to the unconventional and humorous nature of the image. -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/complex_reasoning/001_caps.txt: -------------------------------------------------------------------------------- 1 | A group of people standing outside of a black vehicle with various luggage. 2 | Luggage surrounds a vehicle in an underground parking area 3 | People try to fit all of their luggage in an SUV. 4 | The sport utility vehicle is parked in the public garage, being packed for a trip 5 | Some people with luggage near a van that is transporting it. 6 | 7 | person: [0.681, 0.242, 0.774, 0.694] 8 | person: [0.63, 0.222, 0.686, 0.516] 9 | person: [0.444, 0.233, 0.487, 0.34] 10 | backpack: [0.384, 0.696, 0.485, 0.914] 11 | backpack: [0.755, 0.413, 0.846, 0.692] 12 | suitcase: [0.758, 0.413, 0.845, 0.69] 13 | suitcase: [0.1, 0.497, 0.173, 0.579] 14 | bicycle: [0.282, 0.363, 0.327, 0.442] 15 | car: [0.786, 0.25, 0.848, 0.322] 16 | car: [0.783, 0.27, 0.827, 0.335] 17 | car: [0.86, 0.254, 0.891, 0.3] 18 | car: [0.261, 0.101, 0.787, 0.626] -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/complex_reasoning/001_conv.txt: -------------------------------------------------------------------------------- 1 | Question: 2 | What challenges do these people face? 3 | === 4 | Answer: 5 | In the image, a group of people is standing outside a black SUV in a parking area, surrounded by various pieces of luggage, including suitcases and backpacks. They are facing the challenge of fitting all their luggage into the black SUV. There are multiple suitcases and backpacks to be packed, which suggests that the group has a significant amount of belongings to accommodate. They might have to strategize and arrange the luggage efficiently to ensure that everything fits properly into the vehicle. Additionally, they need to consider the comfort of the passengers and visibility while driving, so the placement of the luggage must not obstruct the driver's view or make the passengers uncomfortable during the trip. -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/complex_reasoning/002_caps.txt: -------------------------------------------------------------------------------- 1 | There is a movie theater that displays the show times above the doors. 2 | A red fire hydrant is deep in the snow. 3 | The fire hydrant is in the snow near a recently plowed sidewalk. 4 | This city has had a very hard winter with snow. 5 | A hotel for dogs in the snow in winter. 6 | 7 | fire hydrant: [0.326, 0.612, 0.426, 0.72] -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/complex_reasoning/002_conv.txt: -------------------------------------------------------------------------------- 1 | Question: 2 | What challenges might this city face? 3 | === 4 | Answer: 5 | The city faces challenges due to the harsh winter conditions and heavy snowfall. In the image, a red fire hydrant is almost buried deep in the snow, which indicates the significant amount of snow the city has experienced. This can lead to various challenges such as difficulties in transportation, increased risk of accidents, and disruptions to daily life. For example, the recently plowed sidewalk near the fire hydrant shows that the city has to constantly clear snow from roads and sidewalks to maintain access and safety for pedestrians and vehicles. Moreover, emergency services, like firefighters, might face challenges accessing crucial equipment, such as fire hydrants, during emergencies due to the snow accumulation. This highlights the importance of effective snow management strategies and preparedness in such cities to minimize the impact of harsh winter conditions on residents and essential services. -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/complex_reasoning/system_message.txt: -------------------------------------------------------------------------------- 1 | You are an AI visual assistant that can analyze a single image. You receive five sentences, each describing the same image you are observing. In addition, specific object locations within the image are given, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. 2 | 3 | The task is to use the provided caption and bounding box information, create a plausible question about the image, and provide the answer in detail. 4 | 5 | Create complex questions beyond describing the scene. 6 | To answer such questions, one should require first understanding the visual content, then based on the background knowledge or reasoning, either explain why the things are happening that way, or provide guides and help to user's request. Make the question challenging by not including the visual content details in the question so that the user needs to reason about that first. 7 | 8 | Instead of directly mentioning the bounding box coordinates, utilize this data to explain the scene using natural language. Include details like object counts, position of the objects, relative position between the objects. 9 | 10 | When using the information from the caption and coordinates, directly explain the scene, and do not mention that the information source is the caption or the bounding box. Always answer as if you are directly looking at the image. -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/conversation/000_caps.txt: -------------------------------------------------------------------------------- 1 | There is a movie theater that displays the show times above the doors. 2 | A red fire hydrant is deep in the snow. 3 | The fire hydrant is in the snow near a recently plowed sidewalk. 4 | This city has had a very hard winter with snow. 5 | A hotel for dogs in the snow in winter. -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/conversation/000_conv.txt: -------------------------------------------------------------------------------- 1 | Question: 2 | What color is the fire hydrant in the image? 3 | === 4 | Answer: 5 | The fire hydrant in the image is red. 6 | === 7 | Question: 8 | Is the fire hydrant fully covered by snow or partially visible? 9 | === 10 | Answer: 11 | The fire hydrant is partially visible, with a significant portion of it covered by the deep snow. 12 | === 13 | Question: 14 | What is the condition of the sidewalk near the fire hydrant? 15 | === 16 | Answer: 17 | The sidewalk near the fire hydrant has been recently plowed, so it is relatively clear of snow compared to the surrounding area. 18 | === 19 | Question: 20 | What does the image suggest about the winter season in the city? 21 | === 22 | Answer: 23 | The image suggests that the winter season in the city has been quite harsh and challenging. The significant snow accumulation seen in the image indicates that the city has experienced heavy snowfall, which can lead to various difficulties and disruptions for its residents. 24 | 25 | For instance, the deep snow covering the fire hydrant could potentially hinder its accessibility in case of emergencies, which poses safety concerns. The recently plowed sidewalk implies that the city's maintenance crews have been actively working to keep the walkways clear and safe for pedestrians, but the sheer amount of snowfall might make it difficult to keep up with the continuous snow removal efforts. 26 | 27 | Furthermore, such extreme winter conditions can impact transportation, leading to traffic issues, delays in public transit, and increased risks of accidents due to slippery roads. It can also cause problems for businesses and schools, as people might face challenges commuting to work or attending classes. Additionally, the heavy snow can put extra strain on infrastructure, such as roofs and power lines, increasing the likelihood of structural damage or power outages. 28 | 29 | In conclusion, the image of the red fire hydrant deep in the snow and the recently plowed sidewalk suggest that the city has faced a particularly severe winter season, with substantial snowfall that has likely caused various challenges and disruptions for its residents and infrastructure. -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/conversation/001_caps.txt: -------------------------------------------------------------------------------- 1 | A man is skiing in the open snow covered hills 2 | A skier is making his way into the snow. 3 | A skier on the foothills of a huge mountain range. 4 | A skier looks at mountains while standing near a trail sign. 5 | a single person skiing by an area with a lot of bushes -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/conversation/system_message.txt: -------------------------------------------------------------------------------- 1 | You are an AI visual assistant, and you are seeing a single image. What you see are provided with five sentences, describing the same image you are looking at. Answer all questions as you are seeing the image. 2 | 3 | Design a conversation between you and a person asking about this photo. The answers should be in a tone that a visual AI assistant is seeing the image and answering the question. 4 | Ask diverse questions and give corresponding answers. 5 | 6 | Include questions asking about the visual content of the image, including the object types, counting the objects, object actions, object locations, relative positions between objects, etc. Only include questions that have definite answers: 7 | (1) one can see the content in the image that the question asks about and can answer confidently; 8 | (2) one can determine confidently from the image that it is not in the image. 9 | Do not ask any question that cannot be answered confidently. 10 | 11 | Also include complex questions that are relevant to the content in the image, for example, asking about background knowledge of the objects in the image, asking to discuss about events happening in the image, etc. Again, do not ask about uncertain details. 12 | Provide detailed answers when answering complex questions. For example, give detailed examples or reasoning steps to make the content more convincing and well-organized. You can include multiple paragraphs if necessary. -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/detail_description/000_caps.txt: -------------------------------------------------------------------------------- 1 | A harbor filled with lots of boats next to a building. 2 | A bicycle parked in front of several boats at a dock. 3 | A red bicycle in front of a line of docked white yachts 4 | A bike sits before boats which sit before a long building. 5 | A bicycle is a convenient means of land transportation when you live on a boat. 6 | 7 | bicycle: [0.287, 0.641, 0.507, 0.874] 8 | bicycle: [0.566, 0.667, 0.63, 0.731] 9 | boat: [0.318, 0.579, 0.575, 0.724] 10 | boat: [0.704, 0.607, 0.818, 0.727] 11 | boat: [0.818, 0.601, 0.942, 0.744] 12 | boat: [0.002, 0.53, 0.243, 0.71] 13 | boat: [0.541, 0.611, 0.668, 0.731] 14 | person: [0.778, 0.527, 0.797, 0.57] 15 | cup: [0.708, 0.733, 0.724, 0.758] 16 | boat: [0.236, 0.532, 0.404, 0.64] 17 | boat: [0.81, 0.632, 0.836, 0.676] 18 | boat: [0.957, 0.526, 1.0, 0.752] -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/detail_description/000_conv.txt: -------------------------------------------------------------------------------- 1 | It is a harbor filled with numerous boats of various sizes docked next to a long building. Among the boats, there are a few white yachts lined up, standing out from the rest. There is a red bicycle prominently parked in front of the line of docked boats, serving as a convenient means of land transportation for those living on the boats. Another bicycle can be seen further back in the scene, near the middle of the harbor. 2 | 3 | A person is visible near the right side of the harbor, possibly enjoying the view or attending to their boat. Additionally, there is a cup placed on a surface near the middle of the scene. -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/detail_description/001_caps.txt: -------------------------------------------------------------------------------- 1 | A group of people standing outside of a black vehicle with various luggage. 2 | Luggage surrounds a vehicle in an underground parking area 3 | People try to fit all of their luggage in an SUV. 4 | The sport utility vehicle is parked in the public garage, being packed for a trip 5 | Some people with luggage near a van that is transporting it. 6 | 7 | person: [0.681, 0.242, 0.774, 0.694] 8 | person: [0.63, 0.222, 0.686, 0.516] 9 | person: [0.444, 0.233, 0.487, 0.34] 10 | backpack: [0.384, 0.696, 0.485, 0.914] 11 | backpack: [0.755, 0.413, 0.846, 0.692] 12 | suitcase: [0.758, 0.413, 0.845, 0.69] 13 | suitcase: [0.1, 0.497, 0.173, 0.579] 14 | bicycle: [0.282, 0.363, 0.327, 0.442] 15 | car: [0.786, 0.25, 0.848, 0.322] 16 | car: [0.783, 0.27, 0.827, 0.335] 17 | car: [0.86, 0.254, 0.891, 0.3] 18 | car: [0.261, 0.101, 0.787, 0.626] -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/detail_description/001_conv.txt: -------------------------------------------------------------------------------- 1 | The image is an underground parking area with a black sport utility vehicle (SUV) parked. There are three people in the scene, with one person standing closer to the left side of the vehicle, another person in the middle, and the third person on the right side. They are all working together to pack their luggage into the SUV for a trip. 2 | 3 | In the parking area, various luggage items are scattered around the vehicle. There are two backpacks, one located near the left rear wheel and the other closer to the right side of the vehicle. Additionally, there are two suitcases, one on the right side of the car and another further away near the center of the parking area. A bicycle can also be seen on the left side of the vehicle. 4 | 5 | Other cars are parked around the main SUV, with one car positioned behind it and slightly to the left, another behind and slightly to the right, and the third car further behind on the right side. -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/detail_description/002_caps.txt: -------------------------------------------------------------------------------- 1 | A man holds a Wii-mote above his head while another looks on. 2 | A guy and his friend are playing Nintendo Wii. 3 | A young man is holding a video game remote over his head. 4 | two men standing in a room while one plays with a wii mote 5 | Some guys standing and playing a video game. 6 | 7 | couch: [0.697, 0.759, 0.995, 1.0] 8 | dining table: [0.426, 0.755, 1.0, 0.987] 9 | person: [0.082, 0.252, 0.342, 1.0] 10 | person: [0.399, 0.085, 0.742, 0.982] 11 | remote: [0.477, 0.135, 0.516, 0.187] 12 | sink: [0.016, 0.501, 0.063, 0.52] 13 | potted plant: [0.798, 0.384, 0.888, 0.645] 14 | refrigerator: [0.305, 0.389, 0.414, 0.547] 15 | chair: [0.72, 0.509, 0.858, 0.725] -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/detail_description/002_conv.txt: -------------------------------------------------------------------------------- 1 | The image shows two men standing in a room, engaged in playing a video game on a Nintendo Wii console. One of the men is holding a Wii remote above his head with enthusiasm, while the other man looks on, likely enjoying the friendly competition. 2 | 3 | The room appears to be a living space with a couch located in the background and a dining table nearby. A potted plant can be seen placed close to the couch, and a chair is situated in the middle of the room. The room also features a kitchen area with a sink and a refrigerator visible in the background. -------------------------------------------------------------------------------- /LLaVA/playground/data/prompts/detail_description/system_message.txt: -------------------------------------------------------------------------------- 1 | You are an AI visual assistant that can analyze a single image. You receive five sentences, each describing the same image you are observing. In addition, specific object locations within the image are given, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. 2 | 3 | Using the provided caption and bounding box information, describe the scene in a detailed manner. 4 | 5 | Instead of directly mentioning the bounding box coordinates, utilize this data to explain the scene using natural language. Include details like object counts, position of the objects, relative position between the objects. 6 | 7 | When using the information from the caption and coordinates, directly explain the scene, and do not mention that the information source is the caption or the bounding box. Always answer as if you are directly looking at the image. -------------------------------------------------------------------------------- /LLaVA/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "llava" 7 | version = "1.2.2.post1" 8 | description = "Towards GPT-4 like large language and visual assistant." 9 | readme = "README.md" 10 | requires-python = ">=3.8" 11 | classifiers = [ 12 | "Programming Language :: Python :: 3", 13 | "License :: OSI Approved :: Apache Software License", 14 | ] 15 | dependencies = [ 16 | "torch==2.0.1+cu117", "torchvision==0.15.2+cu117", 17 | "transformers==4.31.0", "tokenizers==0.13.3", "sentencepiece==0.1.99", "shortuuid", 18 | "accelerate==0.22.0", "peft==0.4.0", "bitsandbytes==0.41.0", 19 | "pydantic", "markdown2[all]", "numpy", "scikit-learn==1.2.2", 20 | "gradio==4.16.0", "gradio_client==0.8.1", 21 | "requests", "httpx==0.24.0", "uvicorn", "fastapi", 22 | "einops==0.6.1", "einops-exts==0.0.4", "timm==0.6.13", 23 | ] 24 | 25 | [project.optional-dependencies] 26 | train = ["deepspeed==0.9.5", "ninja", "wandb"] 27 | build = ["build", "twine"] 28 | 29 | [project.urls] 30 | "Homepage" = "https://llava-vl.github.io" 31 | "Bug Tracker" = "https://github.com/haotian-liu/LLaVA/issues" 32 | 33 | [tool.setuptools.packages.find] 34 | exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"] 35 | 36 | [tool.wheel] 37 | exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"] 38 | -------------------------------------------------------------------------------- /LLaVA/scripts/convert_gqa_for_eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("--src", type=str) 7 | parser.add_argument("--dst", type=str) 8 | args = parser.parse_args() 9 | 10 | all_answers = [] 11 | for line_idx, line in enumerate(open(args.src)): 12 | res = json.loads(line) 13 | question_id = res['question_id'] 14 | text = res['text'].rstrip('.').lower() 15 | all_answers.append({"questionId": question_id, "prediction": text}) 16 | 17 | with open(args.dst, 'w') as f: 18 | json.dump(all_answers, f) 19 | -------------------------------------------------------------------------------- /LLaVA/scripts/convert_mmbench_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | import pandas as pd 5 | 6 | def get_args(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("--annotation-file", type=str, required=True) 9 | parser.add_argument("--result-dir", type=str, required=True) 10 | parser.add_argument("--upload-dir", type=str, required=True) 11 | parser.add_argument("--experiment", type=str, required=True) 12 | 13 | return parser.parse_args() 14 | 15 | if __name__ == "__main__": 16 | args = get_args() 17 | 18 | df = pd.read_table(args.annotation_file) 19 | 20 | cur_df = df.copy() 21 | cur_df = cur_df.drop(columns=['hint', 'category', 'source', 'image', 'comment', 'l2-category']) 22 | cur_df.insert(6, 'prediction', None) 23 | for pred in open(os.path.join(args.result_dir, f"{args.experiment}.jsonl")): 24 | pred = json.loads(pred) 25 | cur_df.loc[df['index'] == pred['question_id'], 'prediction'] = pred['text'] 26 | 27 | cur_df.to_excel(os.path.join(args.upload_dir, f"{args.experiment}.xlsx"), index=False, engine='openpyxl') 28 | -------------------------------------------------------------------------------- /LLaVA/scripts/convert_mmvet_for_eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("--src", type=str) 7 | parser.add_argument("--dst", type=str) 8 | args = parser.parse_args() 9 | 10 | cur_result = {} 11 | 12 | for line in open(args.src): 13 | data = json.loads(line) 14 | qid = data['question_id'] 15 | cur_result[f'v1_{qid}'] = data['text'] 16 | 17 | with open(args.dst, 'w') as f: 18 | json.dump(cur_result, f, indent=2) 19 | -------------------------------------------------------------------------------- /LLaVA/scripts/convert_seed_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | 6 | def get_args(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("--annotation-file", type=str) 9 | parser.add_argument("--result-file", type=str) 10 | parser.add_argument("--result-upload-file", type=str) 11 | parser.add_argument("--save-path", type=str) 12 | return parser.parse_args() 13 | 14 | 15 | def eval_single(result_file, eval_only_type=None): 16 | results = {} 17 | metrics = {} 18 | for line in open(result_file): 19 | row = json.loads(line) 20 | results[row['question_id']] = row 21 | 22 | type_counts = {} 23 | correct_counts = {} 24 | for question_data in data['questions']: 25 | if eval_only_type is not None and question_data['data_type'] != eval_only_type: continue 26 | data_type = question_data['question_type_id'] 27 | type_counts[data_type] = type_counts.get(data_type, 0) + 1 28 | try: 29 | question_id = int(question_data['question_id']) 30 | except: 31 | question_id = question_data['question_id'] 32 | if question_id not in results: 33 | correct_counts[data_type] = correct_counts.get(data_type, 0) 34 | continue 35 | row = results[question_id] 36 | if row['text'] == question_data['answer']: 37 | correct_counts[data_type] = correct_counts.get(data_type, 0) + 1 38 | 39 | total_count = 0 40 | total_correct = 0 41 | for data_type in sorted(type_counts.keys()): 42 | accuracy = correct_counts[data_type] / type_counts[data_type] * 100 43 | if eval_only_type is None: 44 | print(f"{ques_type_id_to_name[data_type]}: {accuracy:.2f}%") 45 | metrics[ques_type_id_to_name[data_type]] = round(accuracy, 3) 46 | 47 | total_count += type_counts[data_type] 48 | total_correct += correct_counts[data_type] 49 | 50 | total_accuracy = total_correct / total_count * 100 51 | if eval_only_type is None: 52 | print(f"Total accuracy: {total_accuracy:.2f}%") 53 | metrics["Total accuracy"] = round(total_accuracy, 3) 54 | else: 55 | print(f"{eval_only_type} accuracy: {total_accuracy:.2f}%") 56 | metrics[eval_only_type] = round(total_accuracy, 3) 57 | 58 | # save metrics to file 59 | with open(os.path.join(args.save_path, "metrics.json"), "w") as f: 60 | json.dump(metrics, f) 61 | 62 | return results 63 | 64 | if __name__ == "__main__": 65 | args = get_args() 66 | data = json.load(open(args.annotation_file)) 67 | ques_type_id_to_name = {id:n for n,id in data['question_type'].items()} 68 | 69 | results = eval_single(args.result_file) 70 | eval_single(args.result_file, eval_only_type='image') 71 | #eval_single(args.result_file, eval_only_type='video') 72 | 73 | with open(args.result_upload_file, 'w') as fp: 74 | for question in data['questions']: 75 | qid = question['question_id'] 76 | if qid in results: 77 | result = results[qid] 78 | else: 79 | result = results[int(qid)] 80 | fp.write(json.dumps({ 81 | 'question_id': qid, 82 | 'prediction': result['text'] 83 | }) + '\n') 84 | -------------------------------------------------------------------------------- /LLaVA/scripts/convert_sqa_to_llava.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import fire 4 | import re 5 | from convert_sqa_to_llava_base_prompt import build_prompt_chatbot 6 | 7 | 8 | def convert_to_llava(base_dir, split, prompt_format="QCM-LEA"): 9 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split] 10 | problems = json.load(open(os.path.join(base_dir, "problems.json"))) 11 | 12 | split_problems = build_prompt_chatbot( 13 | problems, split_indices, prompt_format, 14 | use_caption=False, is_test=False) 15 | 16 | target_format = [] 17 | for prob_id, (input, output) in split_problems.items(): 18 | if input.startswith('Question: '): 19 | input = input.replace('Question: ', '') 20 | if output.startswith('Answer: '): 21 | output = output.replace('Answer: ', '') 22 | 23 | raw_prob_data = problems[prob_id] 24 | if raw_prob_data['image'] is None: 25 | target_format.append({ 26 | "id": prob_id, 27 | "conversations": [ 28 | {'from': 'human', 'value': f"{input}"}, 29 | {'from': 'gpt', 'value': f"{output}"}, 30 | ], 31 | }) 32 | 33 | else: 34 | target_format.append({ 35 | "id": prob_id, 36 | "image": os.path.join(prob_id, raw_prob_data['image']), 37 | "conversations": [ 38 | {'from': 'human', 'value': f"{input}\n"}, 39 | {'from': 'gpt', 'value': f"{output}"}, 40 | ], 41 | }) 42 | 43 | print(f'Number of samples: {len(target_format)}') 44 | 45 | with open(os.path.join(base_dir, f"llava_{split}_{prompt_format}.json"), "w") as f: 46 | json.dump(target_format, f, indent=2) 47 | 48 | 49 | def convert_to_jsonl(base_dir, split, prompt_format="QCM-LEPA"): 50 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split] 51 | problems = json.load(open(os.path.join(base_dir, "problems.json"))) 52 | 53 | split_problems = build_prompt_chatbot( 54 | problems, split_indices, prompt_format, 55 | use_caption=False, is_test=False) 56 | 57 | writer = open(os.path.join(base_dir, f"scienceqa_{split}_{prompt_format}.jsonl"), "w") 58 | for prob_id, (input, output) in split_problems.items(): 59 | if input.startswith('Question: '): 60 | input = input.replace('Question: ', '') 61 | if output.startswith('Answer: '): 62 | output = output.replace('Answer: ', '') 63 | 64 | raw_prob_data = problems[prob_id] 65 | if raw_prob_data['image'] is None: 66 | data = { 67 | "id": prob_id, 68 | "instruction": f"{input}", 69 | "output": f"{output}", 70 | } 71 | 72 | else: 73 | data = { 74 | "id": prob_id, 75 | "image": os.path.join(prob_id, raw_prob_data['image']), 76 | "instruction": f"{input}\n", 77 | "output": f"{output}", 78 | } 79 | writer.write(json.dumps(data) + '\n') 80 | writer.close() 81 | 82 | 83 | def main(task, **kwargs): 84 | globals()[task](**kwargs) 85 | 86 | 87 | if __name__ == "__main__": 88 | fire.Fire(main) 89 | -------------------------------------------------------------------------------- /LLaVA/scripts/convert_vizwiz_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import json 4 | 5 | from llava.eval.m4c_evaluator import EvalAIAnswerProcessor 6 | 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('--annotation-file', type=str, required=True) 11 | parser.add_argument('--result-file', type=str, required=True) 12 | parser.add_argument('--result-upload-file', type=str, required=True) 13 | return parser.parse_args() 14 | 15 | 16 | if __name__ == '__main__': 17 | 18 | args = parse_args() 19 | 20 | os.makedirs(os.path.dirname(args.result_upload_file), exist_ok=True) 21 | 22 | results = [] 23 | error_line = 0 24 | for line_idx, line in enumerate(open(args.result_file)): 25 | try: 26 | results.append(json.loads(line)) 27 | except: 28 | error_line += 1 29 | results = {x['question_id']: x['text'] for x in results} 30 | test_split = [json.loads(line) for line in open(args.annotation_file)] 31 | split_ids = set([x['question_id'] for x in test_split]) 32 | 33 | print(f'total results: {len(results)}, total split: {len(test_split)}, error_line: {error_line}') 34 | 35 | all_answers = [] 36 | 37 | answer_processor = EvalAIAnswerProcessor() 38 | 39 | for x in test_split: 40 | assert x['question_id'] in results 41 | all_answers.append({ 42 | 'image': x['image'], 43 | 'answer': answer_processor(results[x['question_id']]) 44 | }) 45 | 46 | with open(args.result_upload_file, 'w') as f: 47 | json.dump(all_answers, f) 48 | -------------------------------------------------------------------------------- /LLaVA/scripts/convert_vqav2_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import json 4 | 5 | from llava.eval.m4c_evaluator import EvalAIAnswerProcessor 6 | 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('--dir', type=str, default="./playground/data/eval/vqav2") 11 | parser.add_argument('--ckpt', type=str, required=True) 12 | parser.add_argument('--split', type=str, required=True) 13 | return parser.parse_args() 14 | 15 | 16 | if __name__ == '__main__': 17 | 18 | args = parse_args() 19 | 20 | src = os.path.join(args.dir, 'answers', args.split, args.ckpt, 'merge.jsonl') 21 | test_split = os.path.join(args.dir, 'llava_vqav2_mscoco_test2015.jsonl') 22 | dst = os.path.join(args.dir, 'answers_upload', args.split, f'{args.ckpt}.json') 23 | os.makedirs(os.path.dirname(dst), exist_ok=True) 24 | 25 | results = [] 26 | error_line = 0 27 | for line_idx, line in enumerate(open(src)): 28 | try: 29 | results.append(json.loads(line)) 30 | except: 31 | error_line += 1 32 | 33 | results = {x['question_id']: x['text'] for x in results} 34 | test_split = [json.loads(line) for line in open(test_split)] 35 | split_ids = set([x['question_id'] for x in test_split]) 36 | 37 | print(f'total results: {len(results)}, total split: {len(test_split)}, error_line: {error_line}') 38 | 39 | all_answers = [] 40 | 41 | answer_processor = EvalAIAnswerProcessor() 42 | 43 | for x in test_split: 44 | if x['question_id'] not in results: 45 | all_answers.append({ 46 | 'question_id': x['question_id'], 47 | 'answer': '' 48 | }) 49 | else: 50 | all_answers.append({ 51 | 'question_id': x['question_id'], 52 | 'answer': answer_processor(results[x['question_id']]) 53 | }) 54 | 55 | with open(dst, 'w') as f: 56 | json.dump(all_answers, open(dst, 'w')) 57 | -------------------------------------------------------------------------------- /LLaVA/scripts/extract_mm_projector.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is just a utility that I use to extract the projector for quantized models. 3 | It is NOT necessary at all to train, or run inference/serve demos. 4 | Use this script ONLY if you fully understand its implications. 5 | """ 6 | 7 | 8 | import os 9 | import argparse 10 | import torch 11 | import json 12 | from collections import defaultdict 13 | 14 | 15 | def parse_args(): 16 | parser = argparse.ArgumentParser(description='Extract MMProjector weights') 17 | parser.add_argument('--model-path', type=str, help='model folder') 18 | parser.add_argument('--output', type=str, help='output file') 19 | args = parser.parse_args() 20 | return args 21 | 22 | 23 | if __name__ == '__main__': 24 | args = parse_args() 25 | 26 | keys_to_match = ['mm_projector'] 27 | ckpt_to_key = defaultdict(list) 28 | try: 29 | model_indices = json.load(open(os.path.join(args.model_path, 'pytorch_model.bin.index.json'))) 30 | for k, v in model_indices['weight_map'].items(): 31 | if any(key_match in k for key_match in keys_to_match): 32 | ckpt_to_key[v].append(k) 33 | except FileNotFoundError: 34 | # Smaller models or model checkpoints saved by DeepSpeed. 35 | v = 'pytorch_model.bin' 36 | for k in torch.load(os.path.join(args.model_path, v), map_location='cpu').keys(): 37 | if any(key_match in k for key_match in keys_to_match): 38 | ckpt_to_key[v].append(k) 39 | 40 | loaded_weights = {} 41 | 42 | for ckpt_name, weight_keys in ckpt_to_key.items(): 43 | ckpt = torch.load(os.path.join(args.model_path, ckpt_name), map_location='cpu') 44 | for k in weight_keys: 45 | loaded_weights[k] = ckpt[k] 46 | 47 | torch.save(loaded_weights, args.output) 48 | -------------------------------------------------------------------------------- /LLaVA/scripts/finetune.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | # Uncomment and set the following variables correspondingly to run this script: 6 | 7 | ################## VICUNA ################## 8 | # PROMPT_VERSION=v1 9 | # MODEL_VERSION="vicuna-v1-3-7b" 10 | ################## VICUNA ################## 11 | 12 | ################## LLaMA-2 ################## 13 | # PROMPT_VERSION="llava_llama_2" 14 | # MODEL_VERSION="llama-2-7b-chat" 15 | ################## LLaMA-2 ################## 16 | 17 | deepspeed llava/train/train_mem.py \ 18 | --deepspeed ./scripts/zero2.json \ 19 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 20 | --version $PROMPT_VERSION \ 21 | --data_path ./playground/data/llava_instruct_80k.json \ 22 | --image_folder /path/to/coco/train2017 \ 23 | --vision_tower openai/clip-vit-large-patch14 \ 24 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \ 25 | --mm_vision_select_layer -2 \ 26 | --mm_use_im_start_end False \ 27 | --mm_use_im_patch_token False \ 28 | --bf16 True \ 29 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune \ 30 | --num_train_epochs 1 \ 31 | --per_device_train_batch_size 16 \ 32 | --per_device_eval_batch_size 4 \ 33 | --gradient_accumulation_steps 1 \ 34 | --evaluation_strategy "no" \ 35 | --save_strategy "steps" \ 36 | --save_steps 50000 \ 37 | --save_total_limit 1 \ 38 | --learning_rate 2e-5 \ 39 | --weight_decay 0. \ 40 | --warmup_ratio 0.03 \ 41 | --lr_scheduler_type "cosine" \ 42 | --logging_steps 1 \ 43 | --tf32 True \ 44 | --model_max_length 2048 \ 45 | --gradient_checkpointing True \ 46 | --dataloader_num_workers 4 \ 47 | --lazy_preprocess True \ 48 | --report_to wandb 49 | -------------------------------------------------------------------------------- /LLaVA/scripts/finetune_full_schedule.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | # Uncomment and set the following variables correspondingly to run this script: 6 | 7 | ################## VICUNA ################## 8 | # PROMPT_VERSION=v1 9 | # MODEL_VERSION="vicuna-v1-3-7b" 10 | ################## VICUNA ################## 11 | 12 | ################## LLaMA-2 ################## 13 | # PROMPT_VERSION="llava_llama_2" 14 | # MODEL_VERSION="llama-2-7b-chat" 15 | ################## LLaMA-2 ################## 16 | 17 | deepspeed llava/train/train_mem.py \ 18 | --deepspeed ./scripts/zero2.json \ 19 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 20 | --version $PROMPT_VERSION \ 21 | --data_path ./playground/data/llava_instruct_158k.json \ 22 | --image_folder /path/to/coco/train2017 \ 23 | --vision_tower openai/clip-vit-large-patch14 \ 24 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \ 25 | --mm_vision_select_layer -2 \ 26 | --mm_use_im_start_end False \ 27 | --mm_use_im_patch_token False \ 28 | --bf16 True \ 29 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune \ 30 | --num_train_epochs 3 \ 31 | --per_device_train_batch_size 16 \ 32 | --per_device_eval_batch_size 4 \ 33 | --gradient_accumulation_steps 1 \ 34 | --evaluation_strategy "no" \ 35 | --save_strategy "steps" \ 36 | --save_steps 50000 \ 37 | --save_total_limit 1 \ 38 | --learning_rate 2e-5 \ 39 | --weight_decay 0. \ 40 | --warmup_ratio 0.03 \ 41 | --lr_scheduler_type "cosine" \ 42 | --logging_steps 1 \ 43 | --tf32 True \ 44 | --model_max_length 2048 \ 45 | --gradient_checkpointing True \ 46 | --dataloader_num_workers 4 \ 47 | --lazy_preprocess True \ 48 | --report_to wandb 49 | -------------------------------------------------------------------------------- /LLaVA/scripts/finetune_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | # Uncomment and set the following variables correspondingly to run this script: 6 | 7 | ################## VICUNA ################## 8 | # PROMPT_VERSION=v1 9 | # MODEL_VERSION="vicuna-v1-3-7b" 10 | ################## VICUNA ################## 11 | 12 | ################## LLaMA-2 ################## 13 | # PROMPT_VERSION="llava_llama_2" 14 | # MODEL_VERSION="llama-2-7b-chat" 15 | ################## LLaMA-2 ################## 16 | 17 | deepspeed llava/train/train_mem.py \ 18 | --deepspeed ./scripts/zero2.json \ 19 | --lora_enable True \ 20 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 21 | --version $PROMPT_VERSION \ 22 | --data_path ./playground/data/llava_instruct_80k.json \ 23 | --image_folder /path/to/coco/train2017 \ 24 | --vision_tower openai/clip-vit-large-patch14 \ 25 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \ 26 | --mm_vision_select_layer -2 \ 27 | --mm_use_im_start_end False \ 28 | --mm_use_im_patch_token False \ 29 | --bf16 True \ 30 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune_lora \ 31 | --num_train_epochs 1 \ 32 | --per_device_train_batch_size 16 \ 33 | --per_device_eval_batch_size 4 \ 34 | --gradient_accumulation_steps 1 \ 35 | --evaluation_strategy "no" \ 36 | --save_strategy "steps" \ 37 | --save_steps 50000 \ 38 | --save_total_limit 1 \ 39 | --learning_rate 2e-5 \ 40 | --weight_decay 0. \ 41 | --warmup_ratio 0.03 \ 42 | --lr_scheduler_type "cosine" \ 43 | --logging_steps 1 \ 44 | --tf32 True \ 45 | --model_max_length 2048 \ 46 | --gradient_checkpointing True \ 47 | --lazy_preprocess True \ 48 | --dataloader_num_workers 4 \ 49 | --report_to wandb 50 | -------------------------------------------------------------------------------- /LLaVA/scripts/finetune_qlora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | # Uncomment and set the following variables correspondingly to run this script: 6 | 7 | ################## VICUNA ################## 8 | # PROMPT_VERSION=v1 9 | # MODEL_VERSION="vicuna-v1-3-7b" 10 | ################## VICUNA ################## 11 | 12 | ################## LLaMA-2 ################## 13 | # PROMPT_VERSION="llava_llama_2" 14 | # MODEL_VERSION="llama-2-7b-chat" 15 | ################## LLaMA-2 ################## 16 | 17 | deepspeed llava/train/train_mem.py \ 18 | --deepspeed ./scripts/zero2.json \ 19 | --lora_enable True \ 20 | --bits 4 \ 21 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 22 | --version $PROMPT_VERSION \ 23 | --data_path ./playground/data/llava_instruct_80k.json \ 24 | --image_folder /path/to/coco/train2017 \ 25 | --vision_tower openai/clip-vit-large-patch14 \ 26 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \ 27 | --mm_vision_select_layer -2 \ 28 | --mm_use_im_start_end False \ 29 | --mm_use_im_patch_token False \ 30 | --bf16 True \ 31 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune_lora \ 32 | --num_train_epochs 1 \ 33 | --per_device_train_batch_size 16 \ 34 | --per_device_eval_batch_size 4 \ 35 | --gradient_accumulation_steps 1 \ 36 | --evaluation_strategy "no" \ 37 | --save_strategy "steps" \ 38 | --save_steps 50000 \ 39 | --save_total_limit 1 \ 40 | --learning_rate 2e-5 \ 41 | --weight_decay 0. \ 42 | --warmup_ratio 0.03 \ 43 | --lr_scheduler_type "cosine" \ 44 | --logging_steps 1 \ 45 | --tf32 True \ 46 | --model_max_length 2048 \ 47 | --gradient_checkpointing True \ 48 | --lazy_preprocess True \ 49 | --dataloader_num_workers 4 \ 50 | --report_to wandb 51 | -------------------------------------------------------------------------------- /LLaVA/scripts/finetune_sqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | deepspeed llava/train/train_mem.py \ 6 | --deepspeed ./scripts/zero2.json \ 7 | --model_name_or_path lmsys/vicuna-13b-v1.3 \ 8 | --version $PROMPT_VERSION \ 9 | --data_path /Data/ScienceQA/data/scienceqa/llava_train_QCM-LEA.json \ 10 | --image_folder /Data/ScienceQA/data/scienceqa/images/train \ 11 | --vision_tower openai/clip-vit-large-patch14 \ 12 | --pretrain_mm_mlp_adapter ./checkpoints/huggingface/liuhaotian/llava-pretrain-vicuna-13b-v1.3/mm_projector.bin \ 13 | --mm_vision_select_layer -2 \ 14 | --mm_use_im_start_end False \ 15 | --mm_use_im_patch_token False \ 16 | --bf16 True \ 17 | --output_dir ./checkpoints/llava-vicuna-13b-v1.3-pretrain_lcs558k_plain-ScienceQA_QCM_LEA-12e \ 18 | --num_train_epochs 12 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 4 \ 21 | --gradient_accumulation_steps 1 \ 22 | --evaluation_strategy "no" \ 23 | --save_strategy "steps" \ 24 | --save_steps 50000 \ 25 | --save_total_limit 1 \ 26 | --learning_rate 2e-5 \ 27 | --weight_decay 0. \ 28 | --warmup_ratio 0.03 \ 29 | --lr_scheduler_type "cosine" \ 30 | --logging_steps 1 \ 31 | --tf32 True \ 32 | --model_max_length 2048 \ 33 | --gradient_checkpointing True \ 34 | --dataloader_num_workers 4 \ 35 | --lazy_preprocess True \ 36 | --report_to wandb 37 | -------------------------------------------------------------------------------- /LLaVA/scripts/merge_lora_weights.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from llava.model.builder import load_pretrained_model 3 | from llava.mm_utils import get_model_name_from_path 4 | 5 | 6 | def merge_lora(args): 7 | model_name = get_model_name_from_path(args.model_path) 8 | tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, device_map='cpu') 9 | 10 | model.save_pretrained(args.save_model_path) 11 | tokenizer.save_pretrained(args.save_model_path) 12 | 13 | 14 | if __name__ == "__main__": 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("--model-path", type=str, required=True) 17 | parser.add_argument("--model-base", type=str, required=True) 18 | parser.add_argument("--save-model-path", type=str, required=True) 19 | 20 | args = parser.parse_args() 21 | 22 | merge_lora(args) 23 | -------------------------------------------------------------------------------- /LLaVA/scripts/pretrain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5! 4 | 5 | # Uncomment and set the following variables correspondingly to run this script: 6 | 7 | # MODEL_VERSION=vicuna-v1-3-7b 8 | # MODEL_VERSION=llama-2-7b-chat 9 | 10 | ########### DO NOT CHANGE ########### 11 | ########### USE THIS FOR BOTH ########### 12 | PROMPT_VERSION=plain 13 | ########### DO NOT CHANGE ########### 14 | 15 | deepspeed llava/train/train_mem.py \ 16 | --deepspeed ./scripts/zero2.json \ 17 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 18 | --version $PROMPT_VERSION \ 19 | --data_path /path/to/pretrain_data.json \ 20 | --image_folder /path/to/images \ 21 | --vision_tower openai/clip-vit-large-patch14 \ 22 | --tune_mm_mlp_adapter True \ 23 | --mm_vision_select_layer -2 \ 24 | --mm_use_im_start_end False \ 25 | --mm_use_im_patch_token False \ 26 | --bf16 True \ 27 | --output_dir ./checkpoints/llava-$MODEL_VERSION-pretrain \ 28 | --num_train_epochs 1 \ 29 | --per_device_train_batch_size 16 \ 30 | --per_device_eval_batch_size 4 \ 31 | --gradient_accumulation_steps 1 \ 32 | --evaluation_strategy "no" \ 33 | --save_strategy "steps" \ 34 | --save_steps 24000 \ 35 | --save_total_limit 1 \ 36 | --learning_rate 2e-3 \ 37 | --weight_decay 0. \ 38 | --warmup_ratio 0.03 \ 39 | --lr_scheduler_type "cosine" \ 40 | --logging_steps 1 \ 41 | --tf32 True \ 42 | --model_max_length 2048 \ 43 | --gradient_checkpointing True \ 44 | --dataloader_num_workers 4 \ 45 | --lazy_preprocess True \ 46 | --report_to wandb 47 | -------------------------------------------------------------------------------- /LLaVA/scripts/pretrain_xformers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Uncomment and set the following variables correspondingly to run this script: 4 | 5 | # MODEL_VERSION=vicuna-v1-3-7b 6 | # MODEL_VERSION=llama-2-7b-chat 7 | 8 | ########### DO NOT CHANGE ########### 9 | ########### USE THIS FOR BOTH ########### 10 | PROMPT_VERSION=plain 11 | ########### DO NOT CHANGE ########### 12 | 13 | deepspeed llava/train/train_xformers.py \ 14 | --deepspeed ./scripts/zero2.json \ 15 | --model_name_or_path ./checkpoints/$MODEL_VERSION \ 16 | --version $PROMPT_VERSION \ 17 | --data_path /path/to/pretrain_data.json \ 18 | --image_folder /path/to/images \ 19 | --vision_tower openai/clip-vit-large-patch14 \ 20 | --tune_mm_mlp_adapter True \ 21 | --mm_vision_select_layer -2 \ 22 | --mm_use_im_start_end False \ 23 | --mm_use_im_patch_token False \ 24 | --bf16 False \ 25 | --output_dir ./checkpoints/llava-$MODEL_VERSION-pretrain \ 26 | --num_train_epochs 1 \ 27 | --per_device_train_batch_size 4 \ 28 | --per_device_eval_batch_size 4 \ 29 | --gradient_accumulation_steps 4 \ 30 | --evaluation_strategy "no" \ 31 | --save_strategy "steps" \ 32 | --save_steps 24000 \ 33 | --save_total_limit 1 \ 34 | --learning_rate 2e-3 \ 35 | --weight_decay 0. \ 36 | --warmup_ratio 0.03 \ 37 | --lr_scheduler_type "cosine" \ 38 | --logging_steps 1 \ 39 | --tf32 False \ 40 | --model_max_length 2048 \ 41 | --gradient_checkpointing True \ 42 | --dataloader_num_workers 4 \ 43 | --lazy_preprocess True \ 44 | --report_to wandb 45 | -------------------------------------------------------------------------------- /LLaVA/scripts/sqa_eval_batch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CHUNKS=8 4 | for IDX in {0..7}; do 5 | CUDA_VISIBLE_DEVICES=$IDX python -m llava.eval.model_vqa_science \ 6 | --model-path liuhaotian/llava-lcs558k-scienceqa-vicuna-13b-v1.3 \ 7 | --question-file ~/haotian/datasets/ScienceQA/data/scienceqa/llava_test_QCM-LEA.json \ 8 | --image-folder ~/haotian/datasets/ScienceQA/data/scienceqa/images/test \ 9 | --answers-file ./test_llava-13b-chunk$CHUNKS_$IDX.jsonl \ 10 | --num-chunks $CHUNKS \ 11 | --chunk-idx $IDX \ 12 | --conv-mode llava_v1 & 13 | done 14 | -------------------------------------------------------------------------------- /LLaVA/scripts/sqa_eval_gather.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CHUNKS=8 4 | output_file="test_llava-13b.jsonl" 5 | 6 | # Clear out the output file if it exists. 7 | > "$output_file" 8 | 9 | # Loop through the indices and concatenate each file. 10 | for idx in $(seq 0 $((CHUNKS-1))); do 11 | cat "./test_llava-13b-chunk${idx}.jsonl" >> "$output_file" 12 | done 13 | 14 | python llava/eval/eval_science_qa.py \ 15 | --base-dir ~/haotian/datasets/ScienceQA/data/scienceqa \ 16 | --result-file ./test_llava-13b.jsonl \ 17 | --output-file ./test_llava-13b_output.json \ 18 | --output-result ./test_llava-13b_result.json 19 | -------------------------------------------------------------------------------- /LLaVA/scripts/upload_pypi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Step 0: Clean up 4 | rm -rf dist 5 | 6 | # Step 1: Change the package name to "llava-torch" 7 | sed -i 's/name = "llava"/name = "llava-torch"/' pyproject.toml 8 | 9 | # Step 2: Build the package 10 | python -m build 11 | 12 | # Step 3: Revert the changes in pyproject.toml to the original 13 | sed -i 's/name = "llava-torch"/name = "llava"/' pyproject.toml 14 | 15 | # Step 4: Upload to PyPI 16 | python -m twine upload dist/* 17 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/gqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | CKPT="llava-v1.5-13b" 9 | SPLIT="llava_gqa_testdev_balanced" 10 | GQADIR="./playground/data/eval/gqa/data" 11 | 12 | for IDX in $(seq 0 $((CHUNKS-1))); do 13 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 14 | --model-path liuhaotian/llava-v1.5-13b \ 15 | --question-file ./playground/data/eval/gqa/$SPLIT.jsonl \ 16 | --image-folder ./playground/data/eval/gqa/data/images \ 17 | --answers-file ./playground/data/eval/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \ 18 | --num-chunks $CHUNKS \ 19 | --chunk-idx $IDX \ 20 | --temperature 0 \ 21 | --conv-mode vicuna_v1 & 22 | done 23 | 24 | wait 25 | 26 | output_file=./playground/data/eval/gqa/answers/$SPLIT/$CKPT/merge.jsonl 27 | 28 | # Clear out the output file if it exists. 29 | > "$output_file" 30 | 31 | # Loop through the indices and concatenate each file. 32 | for IDX in $(seq 0 $((CHUNKS-1))); do 33 | cat ./playground/data/eval/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 34 | done 35 | 36 | python scripts/convert_gqa_for_eval.py --src $output_file --dst $GQADIR/testdev_balanced_predictions.json 37 | 38 | cd $GQADIR 39 | python eval/eval.py --tier testdev_balanced 40 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/gqa_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | MODEL_PATH=$1 9 | CKPT=$2 10 | SPLIT="llava_gqa_testdev_balanced" 11 | GQADIR="./playground/data/eval/gqa/data" 12 | 13 | for IDX in $(seq 0 $((CHUNKS-1))); do 14 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 15 | --model-path $MODEL_PATH \ 16 | --model-base lmsys/vicuna-7b-v1.5 \ 17 | --question-file ./playground/data/eval/gqa/$SPLIT.jsonl \ 18 | --image-folder ./playground/data/eval/gqa/data/images \ 19 | --answers-file ./playground/data/eval/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \ 20 | --num-chunks $CHUNKS \ 21 | --chunk-idx $IDX \ 22 | --temperature 0 \ 23 | --conv-mode vicuna_v1 & 24 | done 25 | 26 | wait 27 | 28 | output_file=./playground/data/eval/gqa/answers/$SPLIT/$CKPT/merge.jsonl 29 | 30 | # Clear out the output file if it exists. 31 | > "$output_file" 32 | 33 | # Loop through the indices and concatenate each file. 34 | for IDX in $(seq 0 $((CHUNKS-1))); do 35 | cat ./playground/data/eval/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 36 | done 37 | 38 | python scripts/convert_gqa_for_eval.py --src $output_file --dst $GQADIR/testdev_balanced_predictions.json 39 | 40 | cd $GQADIR 41 | python eval/eval.py --tier testdev_balanced 42 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/llavabench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/llava-bench-in-the-wild/questions.jsonl \ 6 | --image-folder ./playground/data/eval/llava-bench-in-the-wild/images \ 7 | --answers-file ./playground/data/eval/llava-bench-in-the-wild/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | mkdir -p playground/data/eval/llava-bench-in-the-wild/reviews 12 | 13 | python llava/eval/eval_gpt_review_bench.py \ 14 | --question playground/data/eval/llava-bench-in-the-wild/questions.jsonl \ 15 | --context playground/data/eval/llava-bench-in-the-wild/context.jsonl \ 16 | --rule llava/eval/table/rule.json \ 17 | --answer-list \ 18 | playground/data/eval/llava-bench-in-the-wild/answers_gpt4.jsonl \ 19 | playground/data/eval/llava-bench-in-the-wild/answers/llava-v1.5-13b.jsonl \ 20 | --output \ 21 | playground/data/eval/llava-bench-in-the-wild/reviews/llava-v1.5-13b.jsonl 22 | 23 | python llava/eval/summarize_gpt_review.py -f playground/data/eval/llava-bench-in-the-wild/reviews/llava-v1.5-13b.jsonl 24 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/mmbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SPLIT="mmbench_dev_20230712" 4 | 5 | python -m llava.eval.model_vqa_mmbench \ 6 | --model-path liuhaotian/llava-v1.5-13b \ 7 | --question-file ./playground/data/eval/mmbench/$SPLIT.tsv \ 8 | --answers-file ./playground/data/eval/mmbench/answers/$SPLIT/llava-v1.5-13b.jsonl \ 9 | --single-pred-prompt \ 10 | --temperature 0 \ 11 | --conv-mode vicuna_v1 12 | 13 | mkdir -p playground/data/eval/mmbench/answers_upload/$SPLIT 14 | 15 | python scripts/convert_mmbench_for_submission.py \ 16 | --annotation-file ./playground/data/eval/mmbench/$SPLIT.tsv \ 17 | --result-dir ./playground/data/eval/mmbench/answers/$SPLIT \ 18 | --upload-dir ./playground/data/eval/mmbench/answers_upload/$SPLIT \ 19 | --experiment llava-v1.5-13b 20 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/mmbench_cn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SPLIT="mmbench_dev_cn_20231003" 4 | 5 | python -m llava.eval.model_vqa_mmbench \ 6 | --model-path liuhaotian/llava-v1.5-13b \ 7 | --question-file ./playground/data/eval/mmbench_cn/$SPLIT.tsv \ 8 | --answers-file ./playground/data/eval/mmbench_cn/answers/$SPLIT/llava-v1.5-13b.jsonl \ 9 | --lang cn \ 10 | --single-pred-prompt \ 11 | --temperature 0 \ 12 | --conv-mode vicuna_v1 13 | 14 | mkdir -p playground/data/eval/mmbench/answers_upload/$SPLIT 15 | 16 | python scripts/convert_mmbench_for_submission.py \ 17 | --annotation-file ./playground/data/eval/mmbench_cn/$SPLIT.tsv \ 18 | --result-dir ./playground/data/eval/mmbench_cn/answers/$SPLIT \ 19 | --upload-dir ./playground/data/eval/mmbench_cn/answers_upload/$SPLIT \ 20 | --experiment llava-v1.5-13b 21 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/mmbench_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SPLIT="mmbench_dev_20230712" 4 | 5 | MODEL_PATH=$1 6 | CKPT=$2 7 | 8 | python -m llava.eval.model_vqa_mmbench \ 9 | --model-path $MODEL_PATH \ 10 | --model-base lmsys/vicuna-7b-v1.5 \ 11 | --question-file ./playground/data/eval/mmbench/$SPLIT.tsv \ 12 | --answers-file ./playground/data/eval/mmbench/answers/$SPLIT/${CKPT}.jsonl \ 13 | --single-pred-prompt \ 14 | --temperature 0 \ 15 | --conv-mode vicuna_v1 16 | 17 | mkdir -p playground/data/eval/mmbench/answers_upload/$SPLIT 18 | 19 | python scripts/convert_mmbench_for_submission.py \ 20 | --annotation-file ./playground/data/eval/mmbench/$SPLIT.tsv \ 21 | --result-dir ./playground/data/eval/mmbench/answers/$SPLIT \ 22 | --upload-dir ./playground/data/eval/mmbench/answers_upload/$SPLIT \ 23 | --experiment $CKPT 24 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/mme.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa_loader \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/MME/llava_mme.jsonl \ 6 | --image-folder ./playground/data/eval/MME/MME_Benchmark_release_version \ 7 | --answers-file ./playground/data/eval/MME/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | cd ./playground/data/eval/MME 12 | 13 | python convert_answer_to_mme.py --experiment llava-v1.5-13b 14 | 15 | cd eval_tool 16 | 17 | python calculation.py --results_dir answers/llava-v1.5-13b 18 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/mme_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MODEL_PATH=$1 4 | CKPT=$2 5 | 6 | python -m llava.eval.model_vqa_loader \ 7 | --model-base lmsys/vicuna-7b-v1.5 \ 8 | --model-path $MODEL_PATH \ 9 | --question-file ./playground/data/eval/MME/llava_mme.jsonl \ 10 | --image-folder ./playground/data/eval/MME/MME_Benchmark_release_version \ 11 | --answers-file ./playground/data/eval/MME/answers/${CKPT}.jsonl \ 12 | --temperature 0 \ 13 | --conv-mode vicuna_v1 14 | 15 | cd ./playground/data/eval/MME 16 | 17 | python convert_answer_to_mme.py --experiment ${CKPT} 18 | 19 | python eval_tool/calculation.py --results_dir eval_tool/answers/${CKPT} 20 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/mmvet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/mm-vet/llava-mm-vet.jsonl \ 6 | --image-folder ./playground/data/eval/mm-vet/images \ 7 | --answers-file ./playground/data/eval/mm-vet/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | mkdir -p ./playground/data/eval/mm-vet/results 12 | 13 | python scripts/convert_mmvet_for_eval.py \ 14 | --src ./playground/data/eval/mm-vet/answers/llava-v1.5-13b.jsonl \ 15 | --dst ./playground/data/eval/mm-vet/results/llava-v1.5-13b.json 16 | 17 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/pope.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa_loader \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/pope/llava_pope_test.jsonl \ 6 | --image-folder ./playground/data/eval/pope/val2014 \ 7 | --answers-file ./playground/data/eval/pope/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | python llava/eval/eval_pope.py \ 12 | --annotation-dir ./playground/data/eval/pope/coco \ 13 | --question-file ./playground/data/eval/pope/llava_pope_test.jsonl \ 14 | --result-file ./playground/data/eval/pope/answers/llava-v1.5-13b.jsonl 15 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/qbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$1" = "dev" ]; then 4 | echo "Evaluating in 'dev' split." 5 | elif [ "$1" = "test" ]; then 6 | echo "Evaluating in 'test' split." 7 | else 8 | echo "Unknown split, please choose between 'dev' and 'test'." 9 | exit 1 10 | fi 11 | 12 | python -m llava.eval.model_vqa_qbench \ 13 | --model-path liuhaotian/llava-v1.5-13b \ 14 | --image-folder ./playground/data/eval/qbench/images_llvisionqa/ \ 15 | --questions-file ./playground/data/eval/qbench/llvisionqa_$1.json \ 16 | --answers-file ./playground/data/eval/qbench/llvisionqa_$1_answers.jsonl \ 17 | --conv-mode llava_v1 \ 18 | --lang en 19 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/qbench_zh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$1" = "dev" ]; then 4 | ZH_SPLIT="验证集" 5 | echo "Evaluating in 'dev' split." 6 | elif [ "$1" = "test" ]; then 7 | ZH_SPLIT="测试集" 8 | echo "Evaluating in 'test' split." 9 | else 10 | echo "Unknown split, please choose between 'dev' and 'test'." 11 | exit 1 12 | fi 13 | 14 | python -m llava.eval.model_vqa_qbench \ 15 | --model-path liuhaotian/llava-v1.5-13b \ 16 | --image-folder ./playground/data/eval/qbench/images_llvisionqa/ \ 17 | --questions-file ./playground/data/eval/qbench/质衡-问答-$ZH_SPLIT.json \ 18 | --answers-file ./playground/data/eval/qbench/llvisionqa_zh_$1_answers.jsonl \ 19 | --conv-mode llava_v1 \ 20 | --lang zh 21 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/seed.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | CKPT="llava-v1.5-13b" 9 | 10 | for IDX in $(seq 0 $((CHUNKS-1))); do 11 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 12 | --model-path liuhaotian/llava-v1.5-13b \ 13 | --question-file ./playground/data/eval/seed_bench/llava-seed-bench.jsonl \ 14 | --image-folder ./playground/data/eval/seed_bench \ 15 | --answers-file ./playground/data/eval/seed_bench/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \ 16 | --num-chunks $CHUNKS \ 17 | --chunk-idx $IDX \ 18 | --temperature 0 \ 19 | --conv-mode vicuna_v1 & 20 | done 21 | 22 | wait 23 | 24 | output_file=./playground/data/eval/seed_bench/answers/$CKPT/merge.jsonl 25 | 26 | # Clear out the output file if it exists. 27 | > "$output_file" 28 | 29 | # Loop through the indices and concatenate each file. 30 | for IDX in $(seq 0 $((CHUNKS-1))); do 31 | cat ./playground/data/eval/seed_bench/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 32 | done 33 | 34 | # Evaluate 35 | python scripts/convert_seed_for_submission.py \ 36 | --annotation-file ./playground/data/eval/seed_bench/SEED-Bench.json \ 37 | --result-file $output_file \ 38 | --result-upload-file ./playground/data/eval/seed_bench/answers_upload/llava-v1.5-13b.jsonl 39 | 40 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/seed_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | MODEL_PATH=$1 9 | CKPT=$2 10 | 11 | for IDX in $(seq 0 $((CHUNKS-1))); do 12 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 13 | --model-path $MODEL_PATH \ 14 | --model-base lmsys/vicuna-7b-v1.5 \ 15 | --question-file ./playground/data/eval/seed_bench/llava-seed-bench-image.jsonl \ 16 | --image-folder ./playground/data/eval/seed_bench \ 17 | --answers-file ./playground/data/eval/seed_bench/answers/$CKPT/${CHUNKS}_${IDX}.jsonl \ 18 | --num-chunks $CHUNKS \ 19 | --chunk-idx $IDX \ 20 | --temperature 0 \ 21 | --conv-mode vicuna_v1 & 22 | done 23 | 24 | wait 25 | 26 | output_file=./playground/data/eval/seed_bench/answers/$CKPT/merge.jsonl 27 | 28 | # Clear out the output file if it exists. 29 | > "$output_file" 30 | 31 | # Loop through the indices and concatenate each file. 32 | for IDX in $(seq 0 $((CHUNKS-1))); do 33 | cat ./playground/data/eval/seed_bench/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 34 | done 35 | 36 | # Evaluate 37 | python scripts/convert_seed_for_submission.py \ 38 | --annotation-file ./playground/data/eval/seed_bench/SEED-Bench.json \ 39 | --result-file $output_file \ 40 | --result-upload-file ./playground/data/eval/seed_bench/answers_upload/upload.jsonl \ 41 | --save-path ./playground/data/eval/seed_bench/answers/$CKPT 42 | 43 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/sqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa_science \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/scienceqa/llava_test_CQM-A.json \ 6 | --image-folder ./playground/data/eval/scienceqa/images/test \ 7 | --answers-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b.jsonl \ 8 | --single-pred-prompt \ 9 | --temperature 0 \ 10 | --conv-mode vicuna_v1 11 | 12 | python llava/eval/eval_science_qa.py \ 13 | --base-dir ./playground/data/eval/scienceqa \ 14 | --result-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b.jsonl \ 15 | --output-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_output.jsonl \ 16 | --output-result ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_result.json 17 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/sqa_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MODEL_PATH=$1 4 | CKPT=$2 5 | 6 | python -m llava.eval.model_vqa_science \ 7 | --model-base lmsys/vicuna-7b-v1.5 \ 8 | --model-path $MODEL_PATH \ 9 | --question-file ./playground/data/eval/scienceqa/llava_test_CQM-A.json \ 10 | --image-folder ./playground/data/eval/scienceqa/images/test \ 11 | --answers-file ./playground/data/eval/scienceqa/answers/${CKPT}.jsonl \ 12 | --single-pred-prompt \ 13 | --temperature 0 \ 14 | --conv-mode vicuna_v1 15 | 16 | python llava/eval/eval_science_qa.py \ 17 | --base-dir ./playground/data/eval/scienceqa \ 18 | --result-file ./playground/data/eval/scienceqa/answers/${CKPT}.jsonl \ 19 | --output-file ./playground/data/eval/scienceqa/answers/${CKPT}_output.jsonl \ 20 | --output-result ./playground/data/eval/scienceqa/answers/${CKPT}_result.json 21 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/textvqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa_loader \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/textvqa/llava_textvqa_val_v051_ocr.jsonl \ 6 | --image-folder ./playground/data/eval/textvqa/train_images \ 7 | --answers-file ./playground/data/eval/textvqa/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | python -m llava.eval.eval_textvqa \ 12 | --annotation-file ./playground/data/eval/textvqa/TextVQA_0.5.1_val.json \ 13 | --result-file ./playground/data/eval/textvqa/answers/llava-v1.5-13b.jsonl 14 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/vizwiz.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m llava.eval.model_vqa_loader \ 4 | --model-path liuhaotian/llava-v1.5-13b \ 5 | --question-file ./playground/data/eval/vizwiz/llava_test.jsonl \ 6 | --image-folder ./playground/data/eval/vizwiz/test \ 7 | --answers-file ./playground/data/eval/vizwiz/answers/llava-v1.5-13b.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | python scripts/convert_vizwiz_for_submission.py \ 12 | --annotation-file ./playground/data/eval/vizwiz/llava_test.jsonl \ 13 | --result-file ./playground/data/eval/vizwiz/answers/llava-v1.5-13b.jsonl \ 14 | --result-upload-file ./playground/data/eval/vizwiz/answers_upload/llava-v1.5-13b.json 15 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/vizwiz_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MODEL_PATH=$1 4 | CKPT=$2 5 | 6 | python -m llava.eval.model_vqa_loader \ 7 | --model-base lmsys/vicuna-7b-v1.5 \ 8 | --model-path $MODEL_PATH \ 9 | --question-file ./playground/data/eval/vizwiz/llava_test.jsonl \ 10 | --image-folder ./playground/data/eval/vizwiz/test \ 11 | --answers-file ./playground/data/eval/vizwiz/answers/${CKPT}.jsonl \ 12 | --temperature 0 \ 13 | --conv-mode vicuna_v1 14 | 15 | python scripts/convert_vizwiz_for_submission.py \ 16 | --annotation-file ./playground/data/eval/vizwiz/llava_test.jsonl \ 17 | --result-file ./playground/data/eval/vizwiz/answers/${CKPT}.jsonl \ 18 | --result-upload-file ./playground/data/eval/vizwiz/answers_upload/${CKPT}.json 19 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/vqav2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | CKPT="llava-v1.5-13b" 9 | SPLIT="llava_vqav2_mscoco_test-dev2015" 10 | 11 | for IDX in $(seq 0 $((CHUNKS-1))); do 12 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 13 | --model-path liuhaotian/llava-v1.5-13b \ 14 | --question-file ./playground/data/eval/vqav2/$SPLIT.jsonl \ 15 | --image-folder ./playground/data/eval/vqav2/test2015 \ 16 | --answers-file ./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \ 17 | --num-chunks $CHUNKS \ 18 | --chunk-idx $IDX \ 19 | --temperature 0 \ 20 | --conv-mode vicuna_v1 & 21 | done 22 | 23 | wait 24 | 25 | output_file=./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/merge.jsonl 26 | 27 | # Clear out the output file if it exists. 28 | > "$output_file" 29 | 30 | # Loop through the indices and concatenate each file. 31 | for IDX in $(seq 0 $((CHUNKS-1))); do 32 | cat ./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 33 | done 34 | 35 | python scripts/convert_vqav2_for_submission.py --split $SPLIT --ckpt $CKPT 36 | 37 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/eval/vqav2_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | MODEL_PATH=$1 9 | CKPT=$2 10 | SPLIT="llava_vqav2_mscoco_test-dev2015" 11 | 12 | for IDX in $(seq 0 $((CHUNKS-1))); do 13 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \ 14 | --model-base lmsys/vicuna-7b-v1.5 \ 15 | --model-path ${MODEL_PATH} \ 16 | --question-file ./playground/data/eval/vqav2/$SPLIT.jsonl \ 17 | --image-folder ./playground/data/eval/vqav2/test2015 \ 18 | --answers-file ./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \ 19 | --num-chunks $CHUNKS \ 20 | --chunk-idx $IDX \ 21 | --temperature 0 \ 22 | --conv-mode vicuna_v1 & 23 | done 24 | 25 | wait 26 | 27 | output_file=./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/merge.jsonl 28 | 29 | # Clear out the output file if it exists. 30 | > "$output_file" 31 | 32 | # Loop through the indices and concatenate each file. 33 | for IDX in $(seq 0 $((CHUNKS-1))); do 34 | cat ./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 35 | done 36 | 37 | python scripts/convert_vqav2_for_submission.py --split $SPLIT --ckpt $CKPT 38 | 39 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/finetune.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed llava/train/train_mem.py \ 4 | --deepspeed ./scripts/zero3.json \ 5 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 6 | --version v1 \ 7 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 8 | --image_folder ./playground/data \ 9 | --vision_tower openai/clip-vit-large-patch14-336 \ 10 | --pretrain_mm_mlp_adapter ./checkpoints/llava-v1.5-13b-pretrain/mm_projector.bin \ 11 | --mm_projector_type mlp2x_gelu \ 12 | --mm_vision_select_layer -2 \ 13 | --mm_use_im_start_end False \ 14 | --mm_use_im_patch_token False \ 15 | --image_aspect_ratio pad \ 16 | --group_by_modality_length True \ 17 | --bf16 True \ 18 | --output_dir ./checkpoints/llava-v1.5-13b \ 19 | --num_train_epochs 1 \ 20 | --per_device_train_batch_size 16 \ 21 | --per_device_eval_batch_size 4 \ 22 | --gradient_accumulation_steps 1 \ 23 | --evaluation_strategy "no" \ 24 | --save_strategy "steps" \ 25 | --save_steps 50000 \ 26 | --save_total_limit 1 \ 27 | --learning_rate 2e-5 \ 28 | --weight_decay 0. \ 29 | --warmup_ratio 0.03 \ 30 | --lr_scheduler_type "cosine" \ 31 | --logging_steps 1 \ 32 | --tf32 True \ 33 | --model_max_length 2048 \ 34 | --gradient_checkpointing True \ 35 | --dataloader_num_workers 4 \ 36 | --lazy_preprocess True \ 37 | --report_to wandb 38 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/finetune_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed llava/train/train_mem.py \ 4 | --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 \ 5 | --deepspeed ./scripts/zero3.json \ 6 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 7 | --version v1 \ 8 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 9 | --image_folder ./playground/data \ 10 | --vision_tower openai/clip-vit-large-patch14-336 \ 11 | --pretrain_mm_mlp_adapter ./checkpoints/llava-v1.5-13b-pretrain/mm_projector.bin \ 12 | --mm_projector_type mlp2x_gelu \ 13 | --mm_vision_select_layer -2 \ 14 | --mm_use_im_start_end False \ 15 | --mm_use_im_patch_token False \ 16 | --image_aspect_ratio pad \ 17 | --group_by_modality_length True \ 18 | --bf16 True \ 19 | --output_dir ./checkpoints/llava-v1.5-13b-lora \ 20 | --num_train_epochs 1 \ 21 | --per_device_train_batch_size 16 \ 22 | --per_device_eval_batch_size 4 \ 23 | --gradient_accumulation_steps 1 \ 24 | --evaluation_strategy "no" \ 25 | --save_strategy "steps" \ 26 | --save_steps 50000 \ 27 | --save_total_limit 1 \ 28 | --learning_rate 2e-4 \ 29 | --weight_decay 0. \ 30 | --warmup_ratio 0.03 \ 31 | --lr_scheduler_type "cosine" \ 32 | --logging_steps 1 \ 33 | --tf32 True \ 34 | --model_max_length 2048 \ 35 | --gradient_checkpointing True \ 36 | --dataloader_num_workers 4 \ 37 | --lazy_preprocess True \ 38 | --report_to wandb 39 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/finetune_lora_dataengine.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed --master_port $RANDOM llava/train/train_mem.py \ 4 | --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 \ 5 | --deepspeed ./scripts/zero3.json \ 6 | --model_name_or_path lmsys/vicuna-7b-v1.5 \ 7 | --version v1 \ 8 | --data_path ./playground/data/llava_v1_5_mix665k_dataengine.json \ 9 | --image_folder ./playground/data \ 10 | --vision_tower openai/clip-vit-large-patch14-336 \ 11 | --pretrain_mm_mlp_adapter ./checkpoints/llava-v1.5-7b-pretrain/mm_projector.bin \ 12 | --mm_projector_type mlp2x_gelu \ 13 | --mm_vision_select_layer -2 \ 14 | --mm_use_im_start_end False \ 15 | --mm_use_im_patch_token False \ 16 | --image_aspect_ratio pad \ 17 | --group_by_modality_length True \ 18 | --bf16 True \ 19 | --output_dir ./checkpoints/llava-v1.5-13b-lora-dataengine \ 20 | --num_train_epochs 1 \ 21 | --per_device_train_batch_size 16 \ 22 | --per_device_eval_batch_size 4 \ 23 | --gradient_accumulation_steps 1 \ 24 | --evaluation_strategy "no" \ 25 | --save_strategy "steps" \ 26 | --save_steps 50000 \ 27 | --save_total_limit 1 \ 28 | --learning_rate 1e-4 \ 29 | --weight_decay 0. \ 30 | --warmup_ratio 0.03 \ 31 | --lr_scheduler_type "cosine" \ 32 | --logging_steps 1 \ 33 | --tf32 True \ 34 | --model_max_length 2048 \ 35 | --gradient_checkpointing True \ 36 | --dataloader_num_workers 4 \ 37 | --lazy_preprocess True \ 38 | --report_to wandb 39 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/finetune_task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed llava/train/train_mem.py \ 4 | --deepspeed ./scripts/zero3.json \ 5 | --model_name_or_path liuhaotian/llava-v1.5-13b \ 6 | --version v1 \ 7 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 8 | --image_folder ./playground/data \ 9 | --vision_tower openai/clip-vit-large-patch14-336 \ 10 | --mm_projector_type mlp2x_gelu \ 11 | --mm_vision_select_layer -2 \ 12 | --mm_use_im_start_end False \ 13 | --mm_use_im_patch_token False \ 14 | --image_aspect_ratio pad \ 15 | --group_by_modality_length True \ 16 | --bf16 True \ 17 | --output_dir ./checkpoints/llava-v1.5-13b-task \ 18 | --num_train_epochs 1 \ 19 | --per_device_train_batch_size 16 \ 20 | --per_device_eval_batch_size 4 \ 21 | --gradient_accumulation_steps 1 \ 22 | --evaluation_strategy "no" \ 23 | --save_strategy "steps" \ 24 | --save_steps 50000 \ 25 | --save_total_limit 1 \ 26 | --learning_rate 2e-5 \ 27 | --weight_decay 0. \ 28 | --warmup_ratio 0.03 \ 29 | --lr_scheduler_type "cosine" \ 30 | --logging_steps 1 \ 31 | --tf32 True \ 32 | --model_max_length 2048 \ 33 | --gradient_checkpointing True \ 34 | --dataloader_num_workers 4 \ 35 | --lazy_preprocess True \ 36 | --report_to wandb 37 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/finetune_task_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed llava/train/train_mem.py \ 4 | --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 \ 5 | --deepspeed ./scripts/zero3.json \ 6 | --model_name_or_path liuhaotian/llava-v1.5-13b \ 7 | --version v1 \ 8 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 9 | --image_folder ./playground/data \ 10 | --vision_tower openai/clip-vit-large-patch14-336 \ 11 | --mm_projector_type mlp2x_gelu \ 12 | --mm_vision_select_layer -2 \ 13 | --mm_use_im_start_end False \ 14 | --mm_use_im_patch_token False \ 15 | --image_aspect_ratio pad \ 16 | --group_by_modality_length True \ 17 | --bf16 True \ 18 | --output_dir ./checkpoints/llava-v1.5-13b-task-lora \ 19 | --num_train_epochs 1 \ 20 | --per_device_train_batch_size 16 \ 21 | --per_device_eval_batch_size 4 \ 22 | --gradient_accumulation_steps 1 \ 23 | --evaluation_strategy "no" \ 24 | --save_strategy "steps" \ 25 | --save_steps 50000 \ 26 | --save_total_limit 1 \ 27 | --learning_rate 2e-4 \ 28 | --weight_decay 0. \ 29 | --warmup_ratio 0.03 \ 30 | --lr_scheduler_type "cosine" \ 31 | --logging_steps 1 \ 32 | --tf32 True \ 33 | --model_max_length 2048 \ 34 | --gradient_checkpointing True \ 35 | --dataloader_num_workers 4 \ 36 | --lazy_preprocess True \ 37 | --report_to wandb 38 | -------------------------------------------------------------------------------- /LLaVA/scripts/v1_5/pretrain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed llava/train/train_mem.py \ 4 | --deepspeed ./scripts/zero2.json \ 5 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 6 | --version plain \ 7 | --data_path ./playground/data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json \ 8 | --image_folder ./playground/data/LLaVA-Pretrain/images \ 9 | --vision_tower openai/clip-vit-large-patch14-336 \ 10 | --mm_projector_type mlp2x_gelu \ 11 | --tune_mm_mlp_adapter True \ 12 | --mm_vision_select_layer -2 \ 13 | --mm_use_im_start_end False \ 14 | --mm_use_im_patch_token False \ 15 | --bf16 True \ 16 | --output_dir ./checkpoints/llava-v1.5-13b-pretrain \ 17 | --num_train_epochs 1 \ 18 | --per_device_train_batch_size 32 \ 19 | --per_device_eval_batch_size 4 \ 20 | --gradient_accumulation_steps 1 \ 21 | --evaluation_strategy "no" \ 22 | --save_strategy "steps" \ 23 | --save_steps 24000 \ 24 | --save_total_limit 1 \ 25 | --learning_rate 1e-3 \ 26 | --weight_decay 0. \ 27 | --warmup_ratio 0.03 \ 28 | --lr_scheduler_type "cosine" \ 29 | --logging_steps 1 \ 30 | --tf32 True \ 31 | --model_max_length 2048 \ 32 | --gradient_checkpointing True \ 33 | --dataloader_num_workers 4 \ 34 | --lazy_preprocess True \ 35 | --report_to wandb 36 | -------------------------------------------------------------------------------- /MiniGPT-4/LICENSE.md: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright 2023 Deyao Zhu 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 11 | 12 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 15 | -------------------------------------------------------------------------------- /MiniGPT-4/LICENSE_Lavis.md: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2022 Salesforce, Inc. 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 11 | 12 | 3. Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 15 | -------------------------------------------------------------------------------- /MiniGPT-4/dataset/README_1_STAGE.md: -------------------------------------------------------------------------------- 1 | ## Download the filtered Conceptual Captions, SBU, LAION datasets 2 | 3 | ### Pre-training datasets download: 4 | We use the filtered synthetic captions prepared by BLIP. For more details about the dataset, please refer to [BLIP](https://github.com/salesforce/BLIP). 5 | 6 | It requires ~2.3T to store LAION and CC3M+CC12M+SBU datasets 7 | 8 | Image source | Filtered synthetic caption by ViT-L 9 | --- | :---: 10 | CC3M+CC12M+SBU | Download 11 | LAION115M | Download 12 | 13 | This will download two json files 14 | ``` 15 | ccs_synthetic_filtered_large.json 16 | laion_synthetic_filtered_large.json 17 | ``` 18 | 19 | ## prepare the data step-by-step 20 | 21 | 22 | ### setup the dataset folder and move the annotation file to the data storage folder 23 | ``` 24 | export MINIGPT4_DATASET=/YOUR/PATH/FOR/LARGE/DATASET/ 25 | mkdir ${MINIGPT4_DATASET}/cc_sbu 26 | mkdir ${MINIGPT4_DATASET}/laion 27 | mv ccs_synthetic_filtered_large.json ${MINIGPT4_DATASET}/cc_sbu 28 | mv laion_synthetic_filtered_large.json ${MINIGPT4_DATASET}/laion 29 | ``` 30 | 31 | ### Convert the scripts to data storate folder 32 | ``` 33 | cp convert_cc_sbu.py ${MINIGPT4_DATASET}/cc_sbu 34 | cp download_cc_sbu.sh ${MINIGPT4_DATASET}/cc_sbu 35 | cp convert_laion.py ${MINIGPT4_DATASET}/laion 36 | cp download_laion.sh ${MINIGPT4_DATASET}/laion 37 | ``` 38 | 39 | 40 | ### Convert the laion and cc_sbu annotation file format to be img2dataset format 41 | ``` 42 | cd ${MINIGPT4_DATASET}/cc_sbu 43 | python convert_cc_sbu.py 44 | 45 | cd ${MINIGPT4_DATASET}/laion 46 | python convert_laion.py 47 | ``` 48 | 49 | ### Download the datasets with img2dataset 50 | ``` 51 | cd ${MINIGPT4_DATASET}/cc_sbu 52 | sh download_cc_sbu.sh 53 | cd ${MINIGPT4_DATASET}/laion 54 | sh download_laion.sh 55 | ``` 56 | 57 | 58 | The final dataset structure 59 | 60 | ``` 61 | . 62 | ├── ${MINIGPT4_DATASET} 63 | │ ├── cc_sbu 64 | │ ├── convert_cc_sbu.py 65 | │ ├── download_cc_sbu.sh 66 | │ ├── ccs_synthetic_filtered_large.json 67 | │ ├── ccs_synthetic_filtered_large.tsv 68 | │ └── cc_sbu_dataset 69 | │ ├── 00000.tar 70 | │ ├── 00000.parquet 71 | │ ... 72 | │ ├── laion 73 | │ ├── convert_laion.py 74 | │ ├── download_laion.sh 75 | │ ├── laion_synthetic_filtered_large.json 76 | │ ├── laion_synthetic_filtered_large.tsv 77 | │ └── laion_dataset 78 | │ ├── 00000.tar 79 | │ ├── 00000.parquet 80 | │ ... 81 | ... 82 | ``` 83 | 84 | 85 | ## Set up the dataset configuration files 86 | 87 | Then, set up the LAION dataset loading path in 88 | [here](../minigpt4/configs/datasets/laion/defaults.yaml#L5) at Line 5 as 89 | ${MINIGPT4_DATASET}/laion/laion_dataset/{00000..10488}.tar 90 | 91 | and the Conceptual Captoin and SBU datasets loading path in 92 | [here](../minigpt4/configs/datasets/cc_sbu/defaults.yaml#L5) at Line 5 as 93 | ${MINIGPT4_DATASET}/cc_sbu/cc_sbu_dataset/{00000..01255}.tar 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /MiniGPT-4/dataset/README_2_STAGE.md: -------------------------------------------------------------------------------- 1 | ## Second Stage Data Preparation 2 | 3 | Our second stage dataset can be downloaded from 4 | [here](https://drive.google.com/file/d/1nJXhoEcy3KTExr17I7BXqY5Y9Lx_-n-9/view?usp=share_link) 5 | After extraction, you will get a data follder with the following structure: 6 | 7 | ``` 8 | cc_sbu_align 9 | ├── filter_cap.json 10 | └── image 11 | ├── 2.jpg 12 | ├── 3.jpg 13 | ... 14 | ``` 15 | 16 | Put the folder to any path you want. 17 | Then, set up the dataset path in the dataset config file 18 | [here](../minigpt4/configs/datasets/cc_sbu/align.yaml#L5) at Line 5. 19 | 20 | -------------------------------------------------------------------------------- /MiniGPT-4/dataset/convert_cc_sbu.py: -------------------------------------------------------------------------------- 1 | import json 2 | import csv 3 | 4 | # specify input and output file paths 5 | input_file = 'ccs_synthetic_filtered_large.json' 6 | output_file = 'ccs_synthetic_filtered_large.tsv' 7 | 8 | # load JSON data from input file 9 | with open(input_file, 'r') as f: 10 | data = json.load(f) 11 | 12 | # extract header and data from JSON 13 | header = data[0].keys() 14 | rows = [x.values() for x in data] 15 | 16 | # write data to TSV file 17 | with open(output_file, 'w') as f: 18 | writer = csv.writer(f, delimiter='\t') 19 | writer.writerow(header) 20 | writer.writerows(rows) 21 | -------------------------------------------------------------------------------- /MiniGPT-4/dataset/convert_laion.py: -------------------------------------------------------------------------------- 1 | import json 2 | import csv 3 | 4 | # specify input and output file paths 5 | input_file = 'laion_synthetic_filtered_large.json' 6 | output_file = 'laion_synthetic_filtered_large.tsv' 7 | 8 | # load JSON data from input file 9 | with open(input_file, 'r') as f: 10 | data = json.load(f) 11 | 12 | # extract header and data from JSON 13 | header = data[0].keys() 14 | rows = [x.values() for x in data] 15 | 16 | # write data to TSV file 17 | with open(output_file, 'w') as f: 18 | writer = csv.writer(f, delimiter='\t') 19 | writer.writerow(header) 20 | writer.writerows(rows) 21 | -------------------------------------------------------------------------------- /MiniGPT-4/dataset/download_cc_sbu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | img2dataset --url_list ccs_synthetic_filtered_large.tsv --input_format "tsv"\ 4 | --url_col "url" --caption_col "caption" --output_format webdataset\ 5 | --output_folder cc_sbu_dataset --processes_count 16 --thread_count 128 --image_size 224 \ 6 | --enable_wandb True 7 | -------------------------------------------------------------------------------- /MiniGPT-4/dataset/download_laion.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | img2dataset --url_list laion_synthetic_filtered_large.tsv --input_format "tsv"\ 4 | --url_col "url" --caption_col "caption" --output_format webdataset\ 5 | --output_folder laion_dataset --processes_count 16 --thread_count 128 --image_size 224 \ 6 | --enable_wandb True 7 | -------------------------------------------------------------------------------- /MiniGPT-4/environment.yml: -------------------------------------------------------------------------------- 1 | name: minigptv 2 | channels: 3 | - pytorch 4 | - defaults 5 | - anaconda 6 | dependencies: 7 | - python=3.9 8 | - cudatoolkit 9 | - pip 10 | - pip: 11 | - torch==2.0.0 12 | - torchaudio 13 | - torchvision 14 | - huggingface-hub==0.18.0 15 | - matplotlib==3.7.0 16 | - psutil==5.9.4 17 | - iopath 18 | - pyyaml==6.0 19 | - regex==2022.10.31 20 | - tokenizers==0.13.2 21 | - tqdm==4.64.1 22 | - transformers==4.30.0 23 | - timm==0.6.13 24 | - webdataset==0.2.48 25 | - omegaconf==2.3.0 26 | - opencv-python==4.7.0.72 27 | - decord==0.6.0 28 | - peft==0.2.0 29 | - sentence-transformers 30 | - gradio==3.47.1 31 | - accelerate==0.20.3 32 | - bitsandbytes==0.37.0 33 | - scikit-image 34 | - visual-genome 35 | - wandb 36 | -------------------------------------------------------------------------------- /MiniGPT-4/eval_configs/minigpt4_eval.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt4 3 | model_type: pretrain_vicuna0 4 | max_txt_len: 160 5 | end_sym: "###" 6 | low_resource: True 7 | prompt_template: '###Human: {} ###Assistant: ' 8 | ckpt: 'please set this value to the path of pretrained checkpoint' 9 | 10 | 11 | datasets: 12 | cc_sbu_align: 13 | vis_processor: 14 | train: 15 | name: "blip2_image_eval" 16 | image_size: 224 17 | text_processor: 18 | train: 19 | name: "blip_caption" 20 | 21 | run: 22 | task: image_text_pretrain 23 | -------------------------------------------------------------------------------- /MiniGPT-4/eval_configs/minigpt4_llama2_eval.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt4 3 | model_type: pretrain_llama2 4 | max_txt_len: 160 5 | end_sym: "" 6 | low_resource: True 7 | prompt_template: '[INST] {} [/INST] ' 8 | ckpt: 'please set this value to the path of pretrained checkpoint' 9 | 10 | 11 | datasets: 12 | cc_sbu_align: 13 | vis_processor: 14 | train: 15 | name: "blip2_image_eval" 16 | image_size: 224 17 | text_processor: 18 | train: 19 | name: "blip_caption" 20 | 21 | run: 22 | task: image_text_pretrain 23 | -------------------------------------------------------------------------------- /MiniGPT-4/eval_configs/minigptv2_benchmark_evaluation.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt_v2 3 | model_type: pretrain 4 | max_txt_len: 500 5 | end_sym: "" 6 | low_resource: False 7 | prompt_template: '[INST] {} [/INST]' 8 | llama_model: "meta-llama/Llama-2-7b-chat-hf" 9 | ckpt: "./dataengine_minigpt4v2.pth" 10 | lora_r: 64 11 | lora_alpha: 16 12 | 13 | 14 | datasets: 15 | cc_sbu_align: 16 | vis_processor: 17 | train: 18 | name: "blip2_image_eval" 19 | image_size: 448 20 | text_processor: 21 | train: 22 | name: "blip_caption" 23 | 24 | evaluation_datasets: 25 | okvqa: 26 | eval_file_path: ./evaluation_dataset/okvqa/ 27 | img_path: ./evaluation_dataset/coco2014_val 28 | max_new_tokens: 20 29 | batch_size: 10 30 | vizwiz: 31 | eval_file_path: ./evaluation_dataset/vizwiz/val.json 32 | img_path: ./evaluation_dataset/vizwiz/vizwiz_images 33 | max_new_tokens: 20 34 | batch_size: 10 35 | vsr: 36 | eval_file_path: cambridgeltl/vsr_zeroshot 37 | img_path: ./evaluation_dataset/vsr/vsr_images 38 | max_new_tokens: 20 39 | batch_size: 10 40 | seed: 41 | eval_file_path: ./evaluation_dataset/seed/seed-bench-image.jsonl 42 | img_path: ./evaluation_dataset/seed 43 | max_new_tokens: 20 44 | batch_size: 10 45 | mmbench: 46 | eval_file_path: ./evaluation_dataset/mmbench/mmbench_dev_20230712.tsv 47 | max_new_tokens: 20 48 | batch_size: 10 49 | 50 | 51 | run: 52 | task: image_text_pretrain 53 | name: minigptv2_evaluation 54 | save_path: evaluation_results 55 | 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /MiniGPT-4/eval_configs/minigptv2_eval.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt_v2 3 | model_type: pretrain 4 | max_txt_len: 500 5 | end_sym: "" 6 | low_resource: True 7 | prompt_template: '[INST] {} [/INST]' 8 | ckpt: "please set this value to the path of pretrained checkpoint" 9 | lora_r: 64 10 | lora_alpha: 16 11 | 12 | 13 | datasets: 14 | cc_sbu_align: 15 | vis_processor: 16 | train: 17 | name: "blip2_image_eval" 18 | image_size: 448 19 | text_processor: 20 | train: 21 | name: "blip_caption" 22 | 23 | run: 24 | task: image_text_pretrain 25 | -------------------------------------------------------------------------------- /MiniGPT-4/eval_scripts/convert_mmbench_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | import pandas as pd 5 | 6 | def get_args(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("--annotation-file", type=str, required=True) 9 | parser.add_argument("--result-file", type=str, required=True) 10 | parser.add_argument("--output-file", type=str, required=True) 11 | 12 | return parser.parse_args() 13 | 14 | if __name__ == "__main__": 15 | args = get_args() 16 | 17 | df = pd.read_table(args.annotation_file) 18 | 19 | cur_df = df.copy() 20 | cur_df = cur_df.drop(columns=['hint', 'category', 'source', 'image', 'comment', 'l2-category']) 21 | cur_df.insert(6, 'prediction', None) 22 | for pred in open(args.result_file): 23 | pred = json.loads(pred) 24 | pred['text'] = pred['text'].capitalize() 25 | cur_df.loc[df['index'] == pred['question_id'], 'prediction'] = pred['text'] 26 | 27 | cur_df.to_excel(args.output_file, index=False, engine='openpyxl') 28 | -------------------------------------------------------------------------------- /MiniGPT-4/eval_scripts/convert_seed_for_submission_minigpt4.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pdb 3 | import json 4 | import argparse 5 | 6 | 7 | def get_args(): 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument("--annotation-file", type=str) 10 | parser.add_argument("--result-file", type=str) 11 | parser.add_argument("--result-upload-file", type=str) 12 | parser.add_argument("--save-path", type=str) 13 | return parser.parse_args() 14 | 15 | 16 | def eval_single(result_file, eval_only_type=None): 17 | results = {} 18 | metrics = {} 19 | for line in open(result_file): 20 | row = json.loads(line) 21 | results[row['question_id']] = row 22 | 23 | type_counts = {} 24 | correct_counts = {} 25 | for question_data in data['questions']: 26 | if eval_only_type is not None and question_data['data_type'] != eval_only_type: continue 27 | data_type = question_data['question_type_id'] 28 | type_counts[data_type] = type_counts.get(data_type, 0) + 1 29 | try: 30 | question_id = int(question_data['question_id']) 31 | except: 32 | question_id = question_data['question_id'] 33 | if question_id not in results: 34 | correct_counts[data_type] = correct_counts.get(data_type, 0) 35 | continue 36 | row = results[question_id] 37 | try: 38 | if row['text'][0].lower() == question_data['answer'].lower(): 39 | correct_counts[data_type] = correct_counts.get(data_type, 0) + 1 40 | except BaseException: 41 | continue 42 | 43 | total_count = 0 44 | total_correct = 0 45 | for data_type in sorted(type_counts.keys()): 46 | accuracy = correct_counts[data_type] / type_counts[data_type] * 100 47 | if eval_only_type is None: 48 | print(f"{ques_type_id_to_name[data_type]}: {accuracy:.2f}%") 49 | metrics[ques_type_id_to_name[data_type]] = round(accuracy, 3) 50 | 51 | total_count += type_counts[data_type] 52 | total_correct += correct_counts[data_type] 53 | 54 | total_accuracy = total_correct / total_count * 100 55 | if eval_only_type is None: 56 | print(f"Total accuracy: {total_accuracy:.2f}%") 57 | metrics["Total accuracy"] = round(total_accuracy, 3) 58 | else: 59 | print(f"{eval_only_type} accuracy: {total_accuracy:.2f}%") 60 | metrics[eval_only_type] = round(total_accuracy, 3) 61 | 62 | return results 63 | 64 | if __name__ == "__main__": 65 | args = get_args() 66 | data = json.load(open(args.annotation_file)) 67 | ques_type_id_to_name = {id:n for n,id in data['question_type'].items()} 68 | 69 | results = eval_single(args.result_file) 70 | eval_single(args.result_file, eval_only_type='image') 71 | #eval_single(args.result_file, eval_only_type='video') 72 | -------------------------------------------------------------------------------- /MiniGPT-4/figs/demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/demo.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/ad_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/ad_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/ad_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/ad_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/cook_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/cook_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/cook_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/cook_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/describe_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/describe_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/describe_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/describe_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/fact_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/fact_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/fact_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/fact_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/fix_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/fix_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/fix_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/fix_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/fun_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/fun_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/fun_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/fun_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/logo_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/logo_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/op_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/op_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/op_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/op_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/people_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/people_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/people_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/people_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/rhyme_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/rhyme_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/rhyme_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/rhyme_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/story_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/story_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/story_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/story_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/web_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/web_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/wop_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/wop_1.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/examples/wop_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/examples/wop_2.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/minigpt2_demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/minigpt2_demo.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/online_demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/online_demo.png -------------------------------------------------------------------------------- /MiniGPT-4/figs/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/figs/overview.png -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | import os 9 | import sys 10 | 11 | from omegaconf import OmegaConf 12 | 13 | from minigpt4.common.registry import registry 14 | 15 | from minigpt4.datasets.builders import * 16 | from minigpt4.models import * 17 | from minigpt4.processors import * 18 | from minigpt4.tasks import * 19 | 20 | 21 | root_dir = os.path.dirname(os.path.abspath(__file__)) 22 | default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml")) 23 | 24 | registry.register_path("library_root", root_dir) 25 | repo_root = os.path.join(root_dir, "..") 26 | registry.register_path("repo_root", repo_root) 27 | cache_root = os.path.join(repo_root, default_cfg.env.cache_root) 28 | registry.register_path("cache_root", cache_root) 29 | 30 | registry.register("MAX_INT", sys.maxsize) 31 | registry.register("SPLIT_NAMES", ["train", "val", "test"]) 32 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/minigpt4/common/__init__.py -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/common/gradcam.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from matplotlib import pyplot as plt 3 | from scipy.ndimage import filters 4 | from skimage import transform as skimage_transform 5 | 6 | 7 | def getAttMap(img, attMap, blur=True, overlap=True): 8 | attMap -= attMap.min() 9 | if attMap.max() > 0: 10 | attMap /= attMap.max() 11 | attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant") 12 | if blur: 13 | attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2])) 14 | attMap -= attMap.min() 15 | attMap /= attMap.max() 16 | cmap = plt.get_cmap("jet") 17 | attMapV = cmap(attMap) 18 | attMapV = np.delete(attMapV, 3, 2) 19 | if overlap: 20 | attMap = ( 21 | 1 * (1 - attMap**0.7).reshape(attMap.shape + (1,)) * img 22 | + (attMap**0.7).reshape(attMap.shape + (1,)) * attMapV 23 | ) 24 | return attMap 25 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/common/vqa_tools/VQA/PythonEvaluationTools/vqaEvaluation/__init__.py: -------------------------------------------------------------------------------- 1 | author='aagrawal' 2 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaDemo.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | from vqaTools.vqa import VQA 4 | import random 5 | import skimage.io as io 6 | import matplotlib.pyplot as plt 7 | import os 8 | 9 | dataDir ='../../VQA' 10 | versionType ='v2_' # this should be '' when using VQA v2.0 dataset 11 | taskType ='OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0 12 | dataType ='mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0. 13 | dataSubType ='train2014' 14 | annFile ='%s/Annotations/%s%s_%s_annotations.json'%(dataDir, versionType, dataType, dataSubType) 15 | quesFile ='%s/Questions/%s%s_%s_%s_questions.json'%(dataDir, versionType, taskType, dataType, dataSubType) 16 | imgDir = '%s/Images/%s/%s/' %(dataDir, dataType, dataSubType) 17 | 18 | # initialize VQA api for QA annotations 19 | vqa=VQA(annFile, quesFile) 20 | 21 | # load and display QA annotations for given question types 22 | """ 23 | All possible quesTypes for abstract and mscoco has been provided in respective text files in ../QuestionTypes/ folder. 24 | """ 25 | annIds = vqa.getQuesIds(quesTypes='how many'); 26 | anns = vqa.loadQA(annIds) 27 | randomAnn = random.choice(anns) 28 | vqa.showQA([randomAnn]) 29 | imgId = randomAnn['image_id'] 30 | imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' 31 | if os.path.isfile(imgDir + imgFilename): 32 | I = io.imread(imgDir + imgFilename) 33 | plt.imshow(I) 34 | plt.axis('off') 35 | plt.show() 36 | 37 | # load and display QA annotations for given answer types 38 | """ 39 | ansTypes can be one of the following 40 | yes/no 41 | number 42 | other 43 | """ 44 | annIds = vqa.getQuesIds(ansTypes='yes/no'); 45 | anns = vqa.loadQA(annIds) 46 | randomAnn = random.choice(anns) 47 | vqa.showQA([randomAnn]) 48 | imgId = randomAnn['image_id'] 49 | imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' 50 | if os.path.isfile(imgDir + imgFilename): 51 | I = io.imread(imgDir + imgFilename) 52 | plt.imshow(I) 53 | plt.axis('off') 54 | plt.show() 55 | 56 | # load and display QA annotations for given images 57 | """ 58 | Usage: vqa.getImgIds(quesIds=[], quesTypes=[], ansTypes=[]) 59 | Above method can be used to retrieve imageIds for given question Ids or given question types or given answer types. 60 | """ 61 | ids = vqa.getImgIds() 62 | annIds = vqa.getQuesIds(imgIds=random.sample(ids,5)); 63 | anns = vqa.loadQA(annIds) 64 | randomAnn = random.choice(anns) 65 | vqa.showQA([randomAnn]) 66 | imgId = randomAnn['image_id'] 67 | imgFilename = 'COCO_' + dataSubType + '_'+ str(imgId).zfill(12) + '.jpg' 68 | if os.path.isfile(imgDir + imgFilename): 69 | I = io.imread(imgDir + imgFilename) 70 | plt.imshow(I) 71 | plt.axis('off') 72 | plt.show() 73 | 74 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/common/vqa_tools/VQA/PythonHelperTools/vqaTools/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'aagrawal' 2 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/common/vqa_tools/VQA/QuestionTypes/abstract_v002_question_types.txt: -------------------------------------------------------------------------------- 1 | how many 2 | what color is the 3 | is the 4 | where is the 5 | what 6 | what is 7 | are the 8 | what is the 9 | is there a 10 | does the 11 | is the woman 12 | is the man 13 | what is on the 14 | is it 15 | is the girl 16 | is the boy 17 | is the dog 18 | are they 19 | who is 20 | what kind of 21 | what color are the 22 | what is in the 23 | what is the man 24 | is there 25 | what is the woman 26 | what are the 27 | what is the boy 28 | are there 29 | what is the girl 30 | is this 31 | how 32 | which 33 | how many people are 34 | is the cat 35 | why is the 36 | are 37 | will the 38 | what type of 39 | what is the dog 40 | do 41 | is she 42 | does 43 | do the 44 | is 45 | is the baby 46 | are there any 47 | is the lady 48 | can 49 | what animal is 50 | where are the 51 | is the sun 52 | what are they 53 | did the 54 | what is the cat 55 | what is the lady 56 | how many clouds are 57 | is that 58 | is the little girl 59 | is he 60 | are these 61 | how many trees are 62 | how many pillows 63 | are the people 64 | why 65 | is the young 66 | how many windows are 67 | is this a 68 | what is the little 69 | is the tv 70 | how many animals are 71 | who 72 | how many pictures 73 | how many plants are 74 | how many birds are 75 | what color is 76 | what is the baby 77 | is anyone 78 | what color 79 | how many bushes 80 | is the old man 81 | none of the above 82 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/common/vqa_tools/VQA/QuestionTypes/mscoco_question_types.txt: -------------------------------------------------------------------------------- 1 | how many 2 | is the 3 | what 4 | what color is the 5 | what is the 6 | is this 7 | is this a 8 | what is 9 | are the 10 | what kind of 11 | is there a 12 | what type of 13 | is it 14 | what are the 15 | where is the 16 | is there 17 | does the 18 | what color are the 19 | are these 20 | are there 21 | which 22 | is 23 | what is the man 24 | is the man 25 | are 26 | how 27 | does this 28 | what is on the 29 | what does the 30 | how many people are 31 | what is in the 32 | what is this 33 | do 34 | what are 35 | are they 36 | what time 37 | what sport is 38 | are there any 39 | is he 40 | what color is 41 | why 42 | where are the 43 | what color 44 | who is 45 | what animal is 46 | is the woman 47 | is this an 48 | do you 49 | how many people are in 50 | what room is 51 | has 52 | is this person 53 | what is the woman 54 | can you 55 | why is the 56 | is the person 57 | what is the color of the 58 | what is the person 59 | could 60 | was 61 | is that a 62 | what number is 63 | what is the name 64 | what brand 65 | none of the above 66 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/common/vqa_tools/VQA/license.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, Aishwarya Agrawal 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 2. Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 14 | AND 15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 18 | FOR 19 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 22 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | 26 | The views and conclusions contained in the software and documentation are 27 | those 28 | of the authors and should not be interpreted as representing official 29 | policies, 30 | either expressed or implied, of the FreeBSD Project. 31 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/common/vqa_tools/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | __author__ = "aagrawal" 9 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/aokvqa/defaults.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, salesforce.com, inc. 2 | # All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause 5 | 6 | datasets: 7 | aok_vqa: 8 | # data_dir: ${env.data_dir}/datasets 9 | data_type: images # [images|videos|features] 10 | 11 | build_info: 12 | # Be careful not to append minus sign (-) before split to avoid itemizing 13 | annotations: 14 | train: 15 | url: 16 | - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/aokvqa/aokvqa_v1p0_train.json 17 | storage: 18 | - ./train_dataset/aokvqa/aokvqa_v1p0_train.json 19 | images: 20 | storage: ./train_dataset/COCO2014 -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/cc_sbu/align.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | cc_sbu_align: 3 | data_type: images 4 | build_info: 5 | storage: /path/to/cc_sbu_align/ 6 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/cc_sbu/defaults.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | cc_sbu: 3 | data_type: images 4 | build_info: 5 | storage: /path/to/cc_sbu_dataset/{00000..01255}.tar 6 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/coco/caption.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, salesforce.com, inc. 2 | # All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause 5 | 6 | datasets: 7 | coco_caption: # name of the dataset builder 8 | # dataset_card: dataset_card/coco_caption.md 9 | # data_dir: ${env.data_dir}/datasets 10 | data_type: images # [images|videos|features] 11 | 12 | build_info: 13 | # Be careful not to append minus sign (-) before split to avoid itemizing 14 | annotations: 15 | train: 16 | url: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_train.json 17 | md5: aa31ac474cf6250ebb81d18348a07ed8 18 | storage: ./train_dataset/COCO2014/coco_karpathy_train.json 19 | images: 20 | storage: ./train_dataset/COCO2014 21 | 22 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/coco/defaults_vqa.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, salesforce.com, inc. 2 | # All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause 5 | 6 | datasets: 7 | coco_vqa: 8 | # data_dir: ${env.data_dir}/datasets 9 | data_type: images # [images|videos|features] 10 | 11 | build_info: 12 | 13 | annotations: 14 | train: 15 | url: 16 | - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/vqav2/vqa_train.json 17 | - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/vqav2/vqa_val.json 18 | storage: 19 | - ./train_dataset/vqav2/vqa_train.json 20 | - ./train_dataset/vqav2/vqa_val.json 21 | images: 22 | storage: ./train_dataset/COCO2014 23 | 24 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/coco_bbox/invrefcoco.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | invrefcoco: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/COCO2014/train 6 | ann_path: ./train_dataset/refcoco 7 | dataset: invrefcoco 8 | splitBy: unc -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/coco_bbox/invrefcocog.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | invrefcocog: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/COCO2014/train 6 | ann_path: ./train_dataset/refcoco 7 | dataset: invrefcocog 8 | splitBy: umd -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/coco_bbox/invrefcocop.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | invrefcocop: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/COCO2014/train 6 | ann_path: ./train_dataset/refcoco 7 | dataset: invrefcoco+ 8 | splitBy: unc -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/coco_bbox/refcoco.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | refcoco: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/COCO2014/train 6 | ann_path: ./train_dataset/refcoco 7 | dataset: refcoco 8 | splitBy: unc -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/coco_bbox/refcocog.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | refcocog: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/COCO2014/train 6 | ann_path: ./train_dataset/refcoco 7 | dataset: refcocog 8 | splitBy: umd -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/coco_bbox/refcocop.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | refcocop: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/COCO2014/train 6 | ann_path: ./train_dataset/refcoco 7 | dataset: refcoco+ 8 | splitBy: unc -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/engine/da.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, salesforce.com, inc. 2 | # All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause 5 | 6 | datasets: 7 | engine_da: 8 | # data_dir: ${env.data_dir}/datasets 9 | data_type: images # [images|videos|features] 10 | 11 | build_info: 12 | 13 | annotations: 14 | train: 15 | url: 16 | - /path/to/data 17 | storage: 18 | - ./train_dataset/data_engine/dataengine_minigpt4.json 19 | images: 20 | storage: ./train_dataset/vg 21 | 22 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/engine/mc.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, salesforce.com, inc. 2 | # All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause 5 | 6 | datasets: 7 | engine_mc: 8 | # data_dir: ${env.data_dir}/datasets 9 | data_type: images # [images|videos|features] 10 | 11 | build_info: 12 | 13 | annotations: 14 | train: 15 | url: 16 | - /path/to/data 17 | storage: 18 | - ./train_dataset/data_engine/dataengine_minigpt4.json 19 | images: 20 | storage: ./train_dataset/vg 21 | 22 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/engine/mcp.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, salesforce.com, inc. 2 | # All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause 5 | 6 | datasets: 7 | engine_mcp: 8 | # data_dir: ${env.data_dir}/datasets 9 | data_type: images # [images|videos|features] 10 | 11 | build_info: 12 | 13 | annotations: 14 | train: 15 | url: 16 | - /path/to/data 17 | storage: 18 | - ./train_dataset/data_engine/dataengine_minigpt4.json 19 | images: 20 | storage: ./train_dataset/vg 21 | 22 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/flickr/caption_to_phrase.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | flickr_CaptionToPhrase: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/flickr/flickr30k-images 6 | ann_path: ./train_dataset/flickr/captiontobbox.json 7 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/flickr/default.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | flickr_grounded_caption: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/flickr/flickr30k-images 6 | ann_path: ./train_dataset/flickr/groundedcaption.json 7 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/flickr/object_to_phrase.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | flickr_ObjectToPhrase: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/flickr/flickr30k-images 6 | ann_path: ./train_dataset/flickr/phrasetobbox.json 7 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/gqa/balanced_val.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, salesforce.com, inc. 2 | # All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause 5 | 6 | datasets: 7 | gqa: 8 | # data_dir: ${env.data_dir}/datasets 9 | data_type: images # [images|videos|features] 10 | 11 | build_info: 12 | # Be careful not to append minus sign (-) before split to avoid itemizing 13 | annotations: 14 | train: 15 | url: 16 | - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/gqa/train_balanced_questions.json 17 | storage: 18 | - ./train_dataset/gqa/train_balanced_questions.json 19 | 20 | images: 21 | storage: ./train_dataset/gqa/images 22 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/laion/defaults.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | laion: 3 | data_type: images 4 | build_info: 5 | storage: /path/to/laion_dataset/{00000..10488}.tar 6 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/llava/conversation.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | 3 | llava_conversation: 4 | data_type: images 5 | build_info: 6 | image_path: ./train_dataset/COCO2014/train 7 | ann_path: ./train_dataset/llava/conversation_58k.json -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/llava/detail.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | llava_detail: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/COCO2014/train 6 | ann_path: ./train_dataset/llava/detail_23k.json -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/llava/reason.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | 3 | llava_reason: 4 | data_type: images 5 | build_info: 6 | image_path: ./train_dataset/COCO2014/train 7 | ann_path: ./train_dataset/llava/complex_reasoning_77k.json -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/multitask_conversation/default.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | multitask_conversation: 3 | data_type: images 4 | build_info: 5 | 6 | image_path: ./train_dataset/COCO2014/train 7 | ann_path: ./train_dataset/multitask_conversation/multitask_conversation.json -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/nlp/unnatural_instruction.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | unnatural_instruction: 3 | data_type: text 4 | build_info: 5 | ann_path: ./train_dataset/nlp/filtered_unnatural_instruction.json -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/ocrvqa/ocrvqa.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | ocrvqa: 3 | data_type: images 4 | build_info: 5 | image_path: ./train_dataset/ocrvqa/images 6 | ann_path: ./train_dataset/ocrvqa/dataset.json -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/okvqa/defaults.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2022, salesforce.com, inc. 2 | # All rights reserved. 3 | # SPDX-License-Identifier: BSD-3-Clause 4 | # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause 5 | 6 | datasets: 7 | ok_vqa: 8 | # data_dir: ${env.data_dir}/datasets 9 | data_type: images # [images|videos|features] 10 | 11 | build_info: 12 | # Be careful not to append minus sign (-) before split to avoid itemizing 13 | annotations: 14 | train: 15 | url: 16 | # TODO make this order insensitive 17 | - https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/okvqa/okvqa_train.json 18 | storage: 19 | - ./train_dataset/okvqa/okvqa_train.json 20 | images: 21 | storage: ./train_dataset/COCO2014 -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/textcaps/caption.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | textcaps_caption: 3 | data_type: images 4 | 5 | build_info: 6 | image_path: ./train_dataset/textcaps/train_images 7 | ann_path: ./train_dataset/textcaps/TextCaps_0.1_train.json 8 | 9 | 10 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/datasets/vg/ref.yaml: -------------------------------------------------------------------------------- 1 | datasets: 2 | refvg: 3 | data_type: images 4 | build_info: 5 | data_dir: ./train_dataset/vg -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/default.yaml: -------------------------------------------------------------------------------- 1 | env: 2 | # For default users 3 | # cache_root: "cache" 4 | # For internal use with persistent storage 5 | cache_root: "./cache" 6 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/models/minigpt4_llama2.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt4 3 | 4 | # vit encoder 5 | image_size: 224 6 | drop_path_rate: 0 7 | use_grad_checkpoint: False 8 | vit_precision: "fp16" 9 | freeze_vit: True 10 | has_qformer: False 11 | 12 | # generation configs 13 | prompt: "" 14 | 15 | llama_model: "please set this value to the path of llama2-chat-7b" 16 | 17 | preprocess: 18 | vis_processor: 19 | train: 20 | name: "blip2_image_train" 21 | image_size: 224 22 | eval: 23 | name: "blip2_image_eval" 24 | image_size: 224 25 | text_processor: 26 | train: 27 | name: "blip_caption" 28 | eval: 29 | name: "blip_caption" 30 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/models/minigpt4_vicuna0.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt4 3 | 4 | # vit encoder 5 | image_size: 224 6 | drop_path_rate: 0 7 | use_grad_checkpoint: False 8 | vit_precision: "fp16" 9 | freeze_vit: True 10 | freeze_qformer: True 11 | 12 | # Q-Former 13 | num_query_token: 32 14 | 15 | # generation configs 16 | prompt: "" 17 | 18 | llama_model: "please set this value to the path of vicuna model" 19 | 20 | preprocess: 21 | vis_processor: 22 | train: 23 | name: "blip2_image_train" 24 | image_size: 224 25 | eval: 26 | name: "blip2_image_eval" 27 | image_size: 224 28 | text_processor: 29 | train: 30 | name: "blip_caption" 31 | eval: 32 | name: "blip_caption" 33 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/configs/models/minigpt_v2.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt_v2 3 | 4 | # vit encoder 5 | image_size: 448 6 | drop_path_rate: 0 7 | use_grad_checkpoint: False 8 | vit_precision: "fp16" 9 | freeze_vit: True 10 | 11 | # generation configs 12 | prompt: "" 13 | 14 | llama_model: "please set this value to the path of llama2-chat-7b" 15 | lora_r: 64 16 | lora_alpha: 16 17 | 18 | 19 | preprocess: 20 | vis_processor: 21 | train: 22 | name: "blip2_image_train" 23 | image_size: 448 24 | eval: 25 | name: "blip2_image_eval" 26 | image_size: 448 27 | text_processor: 28 | train: 29 | name: "blip_caption" 30 | eval: 31 | name: "blip_caption" 32 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/conversation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/minigpt4/conversation/__init__.py -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/minigpt4/datasets/__init__.py -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/builders/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | from minigpt4.datasets.builders.base_dataset_builder import load_dataset_config 9 | from minigpt4.datasets.builders.image_text_pair_builder import ( 10 | CCSBUBuilder, 11 | LaionBuilder, 12 | CCSBUAlignBuilder 13 | ) 14 | from minigpt4.common.registry import registry 15 | 16 | __all__ = [ 17 | "CCSBUBuilder", 18 | "LaionBuilder", 19 | "CCSBUAlignBuilder" 20 | ] 21 | 22 | 23 | def load_dataset(name, cfg_path=None, vis_path=None, data_type=None): 24 | """ 25 | Example 26 | 27 | >>> dataset = load_dataset("coco_caption", cfg=None) 28 | >>> splits = dataset.keys() 29 | >>> print([len(dataset[split]) for split in splits]) 30 | 31 | """ 32 | if cfg_path is None: 33 | cfg = None 34 | else: 35 | cfg = load_dataset_config(cfg_path) 36 | 37 | try: 38 | builder = registry.get_builder_class(name)(cfg) 39 | except TypeError: 40 | print( 41 | f"Dataset {name} not found. Available datasets:\n" 42 | + ", ".join([str(k) for k in dataset_zoo.get_names()]) 43 | ) 44 | exit(1) 45 | 46 | if vis_path is not None: 47 | if data_type is None: 48 | # use default data type in the config 49 | data_type = builder.config.data_type 50 | 51 | assert ( 52 | data_type in builder.config.build_info 53 | ), f"Invalid data_type {data_type} for {name}." 54 | 55 | builder.config.build_info.get(data_type).storage = vis_path 56 | 57 | dataset = builder.build_datasets() 58 | return dataset 59 | 60 | 61 | class DatasetZoo: 62 | def __init__(self) -> None: 63 | self.dataset_zoo = { 64 | k: list(v.DATASET_CONFIG_DICT.keys()) 65 | for k, v in sorted(registry.mapping["builder_name_mapping"].items()) 66 | } 67 | 68 | def get_names(self): 69 | return list(self.dataset_zoo.keys()) 70 | 71 | 72 | dataset_zoo = DatasetZoo() 73 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/MiniGPT-4/minigpt4/datasets/datasets/__init__.py -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/datasets/base_dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | import json 9 | from typing import Iterable 10 | 11 | from torch.utils.data import Dataset, ConcatDataset 12 | from torch.utils.data.dataloader import default_collate 13 | 14 | 15 | 16 | 17 | class BaseDataset(Dataset): 18 | def __init__( 19 | self, vis_processor=None, text_processor=None, vis_root=None, ann_paths=[] 20 | ): 21 | """ 22 | vis_root (string): Root directory of images (e.g. coco/images/) 23 | ann_root (string): directory to store the annotation file 24 | """ 25 | self.vis_root = vis_root 26 | 27 | self.annotation = [] 28 | # print("ann paths", ann_paths) 29 | for ann_path in ann_paths: 30 | # print("ann_path", ann_path) 31 | ann = json.load(open(ann_path, "r")) 32 | if isinstance(ann, dict): 33 | self.annotation.extend(json.load(open(ann_path, "r"))['annotations']) 34 | # self.annotation.extend(json.load(open(ann_path, "r"))) 35 | else: 36 | self.annotation.extend(json.load(open(ann_path, "r"))) 37 | 38 | self.vis_processor = vis_processor 39 | self.text_processor = text_processor 40 | 41 | self._add_instance_ids() 42 | 43 | def __len__(self): 44 | return len(self.annotation) 45 | 46 | def collater(self, samples): 47 | return default_collate(samples) 48 | 49 | def set_processors(self, vis_processor, text_processor): 50 | self.vis_processor = vis_processor 51 | self.text_processor = text_processor 52 | 53 | def _add_instance_ids(self, key="instance_id"): 54 | for idx, ann in enumerate(self.annotation): 55 | ann[key] = str(idx) 56 | 57 | 58 | 59 | class ConcatDataset(ConcatDataset): 60 | def __init__(self, datasets: Iterable[Dataset]) -> None: 61 | super().__init__(datasets) 62 | 63 | def collater(self, samples): 64 | # TODO For now only supports datasets with same underlying collater implementations 65 | 66 | all_keys = set() 67 | for s in samples: 68 | all_keys.update(s) 69 | 70 | shared_keys = all_keys 71 | for s in samples: 72 | shared_keys = shared_keys & set(s.keys()) 73 | 74 | samples_shared_keys = [] 75 | for s in samples: 76 | samples_shared_keys.append({k: s[k] for k in s.keys() if k in shared_keys}) 77 | 78 | return self.datasets[0].collater(samples_shared_keys) 79 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/datasets/cc_sbu_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | import webdataset as wds 4 | from minigpt4.datasets.datasets.base_dataset import BaseDataset 5 | from minigpt4.datasets.datasets.caption_datasets import CaptionDataset 6 | 7 | 8 | class CCSBUDataset(BaseDataset): 9 | def __init__(self, vis_processor, text_processor, location): 10 | super().__init__(vis_processor=vis_processor, text_processor=text_processor) 11 | 12 | self.inner_dataset = wds.DataPipeline( 13 | wds.ResampledShards(location), 14 | wds.tarfile_to_samples(handler=wds.warn_and_continue), 15 | wds.shuffle(1000, handler=wds.warn_and_continue), 16 | wds.decode("pilrgb", handler=wds.warn_and_continue), 17 | wds.to_tuple("jpg", "json", handler=wds.warn_and_continue), 18 | wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue), 19 | wds.map(self.to_dict, handler=wds.warn_and_continue), 20 | ) 21 | 22 | def to_dict(self, sample): 23 | return { 24 | "image": sample[0], 25 | "answer": self.text_processor(sample[1]["caption"]), 26 | } 27 | 28 | 29 | class CCSBUAlignDataset(CaptionDataset): 30 | 31 | def __getitem__(self, index): 32 | 33 | # TODO this assumes image input, not general enough 34 | ann = self.annotation[index] 35 | 36 | img_file = '{}.jpg'.format(ann["image_id"]) 37 | image_path = os.path.join(self.vis_root, img_file) 38 | image = Image.open(image_path).convert("RGB") 39 | 40 | image = self.vis_processor(image) 41 | caption = ann["caption"] 42 | 43 | return { 44 | "image": image, 45 | "answer": caption, 46 | "image_id": self.img_ids[ann["image_id"]], 47 | } -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/datasets/gqa_datasets.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | import os 9 | import json 10 | 11 | from PIL import Image 12 | 13 | from minigpt4.datasets.datasets.vqa_datasets import VQADataset 14 | 15 | from collections import OrderedDict 16 | import random 17 | 18 | class __DisplMixin: 19 | def displ_item(self, index): 20 | sample, ann = self.__getitem__(index), self.annotation[index] 21 | 22 | return OrderedDict( 23 | { 24 | "file": ann["image"], 25 | "question": ann["question"], 26 | "question_id": ann["question_id"], 27 | "answers": "; ".join(ann["answer"]), 28 | "image": sample["image"], 29 | } 30 | ) 31 | 32 | 33 | class GQADataset(VQADataset, __DisplMixin): 34 | def __init__(self, vis_processor, text_processor, vis_root, ann_paths): 35 | super().__init__(vis_processor, text_processor, vis_root, ann_paths) 36 | self.instruction_pool =[ 37 | "[vqa] {}", 38 | "[vqa] Based on the image, respond to this question with a short answer: {}" 39 | ] 40 | 41 | def __getitem__(self, index): 42 | ann = self.annotation[index] 43 | 44 | image_path = os.path.join(self.vis_root, ann["image"]) 45 | image = Image.open(image_path).convert("RGB") 46 | 47 | image = self.vis_processor(image) 48 | question = self.text_processor(ann["question"]) 49 | 50 | instruction = random.choice(self.instruction_pool).format(question) 51 | instruction = " {} ".format(instruction) 52 | 53 | answers = self.text_processor(ann["answer"]) 54 | 55 | return { 56 | "image": image, 57 | "instruction_input": instruction, 58 | "answer": answers, 59 | } 60 | 61 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/datasets/laion_dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | import webdataset as wds 9 | from minigpt4.datasets.datasets.base_dataset import BaseDataset 10 | 11 | 12 | class LaionDataset(BaseDataset): 13 | def __init__(self, vis_processor, text_processor, location): 14 | super().__init__(vis_processor=vis_processor, text_processor=text_processor) 15 | 16 | self.inner_dataset = wds.DataPipeline( 17 | wds.ResampledShards(location), 18 | wds.tarfile_to_samples(handler=wds.warn_and_continue), 19 | wds.shuffle(1000, handler=wds.warn_and_continue), 20 | wds.decode("pilrgb", handler=wds.warn_and_continue), 21 | wds.to_tuple("jpg", "json", handler=wds.warn_and_continue), 22 | wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue), 23 | wds.map(self.to_dict, handler=wds.warn_and_continue), 24 | ) 25 | 26 | def to_dict(self, sample): 27 | return { 28 | "image": sample[0], 29 | "answer": self.text_processor(sample[1]["caption"]), 30 | } 31 | 32 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/datasets/multitask_conversation.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import pickle 4 | import random 5 | import time 6 | import itertools 7 | 8 | import numpy as np 9 | from PIL import Image 10 | import skimage.io as io 11 | import matplotlib.pyplot as plt 12 | from matplotlib.collections import PatchCollection 13 | from matplotlib.patches import Polygon, Rectangle 14 | from torch.utils.data import Dataset 15 | import webdataset as wds 16 | 17 | from minigpt4.datasets.datasets.base_dataset import BaseDataset 18 | from minigpt4.datasets.datasets.caption_datasets import CaptionDataset 19 | 20 | 21 | 22 | 23 | class MultiTaskConversationDataset(Dataset): 24 | def __init__(self, vis_processor, text_processor, vis_root, ann_path): 25 | """ 26 | vis_root (string): Root directory of images (e.g. coco/images/) 27 | ann_root (string): directory to store the annotation file 28 | """ 29 | self.vis_root = vis_root 30 | 31 | self.vis_processor = vis_processor 32 | self.text_processor = text_processor 33 | 34 | 35 | with open(ann_path, 'r') as f: 36 | self.ann = json.load(f) 37 | 38 | self.connect_sym = "!@#" 39 | 40 | def __len__(self): 41 | return len(self.ann) 42 | 43 | def __getitem__(self, index): 44 | info = self.ann[index] 45 | 46 | image_file = 'COCO_train2014_{}.jpg'.format(info['id']) 47 | image_path = os.path.join(self.vis_root, image_file) 48 | image = Image.open(image_path).convert("RGB") 49 | image = self.vis_processor(image) 50 | 51 | first_instruction = info['conversations'][0]['value'].replace('', '').replace('\n', '').strip() 52 | first_instruction = ' {} '.format(first_instruction) 53 | 54 | questions = [first_instruction] 55 | answers = [] 56 | 57 | for i, item in enumerate(info["conversations"][1:]): 58 | if i % 2 ==0: # assistant 59 | assistant_answer = item["value"] 60 | answers.append(assistant_answer) 61 | else: 62 | human_instruction = item["value"]+" " 63 | questions.append(human_instruction) 64 | 65 | questions = self.connect_sym.join(questions) 66 | answers = self.connect_sym.join(answers) 67 | 68 | 69 | return { 70 | "image": image, 71 | "conv_q": questions, 72 | 'conv_a': answers, 73 | "image_id": info['id'], 74 | "connect_sym": self.connect_sym 75 | } -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/datasets/ocrvqa_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import pickle 4 | import random 5 | import time 6 | import itertools 7 | 8 | import numpy as np 9 | from PIL import Image 10 | import skimage.io as io 11 | import matplotlib.pyplot as plt 12 | from matplotlib.collections import PatchCollection 13 | from matplotlib.patches import Polygon, Rectangle 14 | from torch.utils.data import Dataset 15 | import webdataset as wds 16 | 17 | from minigpt4.datasets.datasets.base_dataset import BaseDataset 18 | from minigpt4.datasets.datasets.caption_datasets import CaptionDataset 19 | 20 | 21 | class OCRVQADataset(Dataset): 22 | def __init__(self, vis_processor, text_processor, vis_root, ann_path): 23 | """ 24 | vis_root (string): Root directory of images (e.g. coco/images/) 25 | ann_root (string): directory to store the annotation file 26 | """ 27 | self.vis_root = vis_root 28 | 29 | self.vis_processor = vis_processor 30 | self.text_processor = text_processor 31 | self.data = self.create_data(ann_path) 32 | 33 | self.instruction_pool =[ 34 | "[vqa] {}", 35 | "[vqa] Based on the image, respond to this question with a short answer: {}" 36 | ] 37 | 38 | def create_data(self, ann_path): 39 | processed_data = [] 40 | with open(ann_path, 'r') as f: 41 | data = json.load(f) 42 | for k in data.keys(): 43 | if data[k]['split'] != 1: continue # 1 for training, 2 for validation, 3 for test 44 | ext = os.path.splitext(data[k]['imageURL'])[1] 45 | imageFile = k + ext 46 | assert len(data[k]['questions']) == len(data[k]['answers']) 47 | for q, a in zip(data[k]['questions'], data[k]['answers']): 48 | processed_data.append( 49 | {'question': q, 50 | 'answer': a, 51 | 'image_path': imageFile, 52 | 'image_id': k, 53 | 'title': data[k]['title'], 54 | 'genre': data[k]['genre'], 55 | } 56 | ) 57 | return processed_data 58 | 59 | def __len__(self): 60 | return len(self.data) 61 | 62 | def __getitem__(self, index): 63 | sample = self.data[index] 64 | image = Image.open(os.path.join(self.vis_root, sample['image_path'])).convert("RGB") 65 | image = self.vis_processor(image) 66 | question = self.text_processor(sample["question"]) 67 | answer = self.text_processor(sample["answer"]) 68 | 69 | instruction = random.choice(self.instruction_pool).format(question) 70 | instruction = " {} ".format(instruction) 71 | return { 72 | "image": image, 73 | "instruction_input": instruction, 74 | "answer": answer, 75 | "image_id": sample['image_id'] 76 | } 77 | 78 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/datasets/text_caps.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import pickle 4 | import random 5 | import time 6 | import itertools 7 | 8 | import numpy as np 9 | from PIL import Image 10 | import skimage.io as io 11 | import matplotlib.pyplot as plt 12 | from matplotlib.collections import PatchCollection 13 | from matplotlib.patches import Polygon, Rectangle 14 | from torch.utils.data import Dataset 15 | import webdataset as wds 16 | 17 | from minigpt4.datasets.datasets.base_dataset import BaseDataset 18 | from minigpt4.datasets.datasets.caption_datasets import CaptionDataset 19 | 20 | 21 | 22 | class TextCapDataset(Dataset): 23 | def __init__(self, vis_processor, text_processor, vis_root, ann_path): 24 | """ 25 | vis_root (string): Root directory of images (e.g. coco/images/) 26 | ann_root (string): directory to store the annotation file 27 | """ 28 | self.vis_root = vis_root 29 | 30 | self.vis_processor = vis_processor 31 | self.text_processor = text_processor 32 | 33 | self.instruction_pool = [ 34 | 'Briefly describe this image.', 35 | 'Provide a concise depiction of this image.', 36 | 'Present a short description of this image.', 37 | 'Summarize this image in a few words.', 38 | 'A short image caption:', 39 | 'A short image description:', 40 | 'A photo of ', 41 | 'An image that shows ', 42 | 'Write a short description for the image. ', 43 | 'Write a description for the photo.', 44 | 'Provide a description of what is presented in the photo.', 45 | 'Briefly describe the content of the image.', 46 | 'Can you briefly explain what you see in the image?', 47 | 'Could you use a few words to describe what you perceive in the photo?', 48 | 'Please provide a short depiction of the picture.', 49 | 'Using language, provide a short account of the image.', 50 | 'Use a few words to illustrate what is happening in the picture.', 51 | ] 52 | 53 | with open(ann_path, 'r') as f: 54 | self.ann = json.load(f) 55 | 56 | 57 | def __len__(self): 58 | return len(self.ann["data"]) 59 | 60 | 61 | def __getitem__(self, index): 62 | info = self.ann["data"][index] 63 | 64 | image_file = '{}.jpg'.format(info['image_id']) 65 | 66 | image_path = os.path.join(self.vis_root, image_file) 67 | image = Image.open(image_path).convert("RGB") 68 | image = self.vis_processor(image) 69 | 70 | caption = info["caption_str"] 71 | caption = self.text_processor(caption) 72 | instruction = " [caption] {} ".format(random.choice(self.instruction_pool)) 73 | return { 74 | "image": image, 75 | "instruction_input": instruction, 76 | "answer": caption, 77 | } 78 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/datasets/unnatural_instruction.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import pickle 4 | import random 5 | import time 6 | import itertools 7 | 8 | import numpy as np 9 | from PIL import Image 10 | import skimage.io as io 11 | import matplotlib.pyplot as plt 12 | from matplotlib.collections import PatchCollection 13 | from matplotlib.patches import Polygon, Rectangle 14 | from torch.utils.data import Dataset 15 | import webdataset as wds 16 | 17 | from minigpt4.datasets.datasets.base_dataset import BaseDataset 18 | from minigpt4.datasets.datasets.caption_datasets import CaptionDataset 19 | 20 | 21 | class UnnaturalDataset(Dataset): 22 | def __init__(self, text_processor, ann_path): 23 | """ 24 | vis_root (string): Root directory of images (e.g. coco/images/) 25 | ann_root (string): directory to store the annotation file 26 | """ 27 | self.text_processor = text_processor 28 | 29 | with open(ann_path, 'r') as f: 30 | self.ann = json.load(f) 31 | 32 | def __len__(self): 33 | return len(self.ann) 34 | 35 | def __getitem__(self, index): 36 | info = self.ann[index]["instances"][0] 37 | instruction = info["instruction_with_input"] 38 | constraints = info["constraints"] 39 | answer = info["output"] 40 | if constraints != None: 41 | instruction = instruction+" "+constraints 42 | 43 | return { 44 | "instruction_input": self.text_processor(instruction), 45 | "answer": self.text_processor(answer), 46 | } 47 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/datasets/datasets/vg_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import pickle 4 | import random 5 | import time 6 | import itertools 7 | 8 | import numpy as np 9 | from PIL import Image 10 | from torch.utils.data import Dataset 11 | from visual_genome import local 12 | 13 | 14 | 15 | 16 | class ReferVisualGenomeDataset(Dataset): 17 | def __init__(self, vis_processor, text_processor, data_dir): 18 | """ 19 | vis_root (string): Root directory of images (e.g. coco/images/) 20 | ann_root (string): directory to store the annotation file 21 | """ 22 | self.data_dir = data_dir 23 | 24 | self.vis_processor = vis_processor 25 | self.text_processor = text_processor 26 | 27 | all_regions = local.get_all_region_descriptions(self.data_dir) 28 | all_regions = [region for regions in all_regions for region in regions] 29 | 30 | # follow OFA practice, only regions smaller than 16384 pixels are used for refer 31 | self.regions = [region for region in all_regions if region.width * region.height < 16384] 32 | 33 | 34 | self.instruction_pool = [ 35 | "[refer] {}", 36 | "[refer] give me the location of {}", 37 | "[refer] where is {} ?", 38 | "[refer] from this image, tell me the location of {}", 39 | "[refer] the location of {} is", 40 | "[refer] could you tell me the location for {} ?", 41 | "[refer] where can I locate the {} ?", 42 | ] 43 | 44 | 45 | def __len__(self): 46 | return len(self.regions) 47 | 48 | def preprocess(self, index): 49 | region = self.regions[index] 50 | image_file = region.image.url.split('/')[-2:] 51 | image_path = os.path.join(self.data_dir, *image_file) 52 | image = Image.open(image_path).convert("RGB") 53 | image_orig_size = image.size 54 | image = self.vis_processor(image) 55 | image_new_size = [100,100] 56 | 57 | sample_sentence = region.phrase 58 | refer_sentence = self.text_processor(sample_sentence) 59 | 60 | bbox = [region.x, region.y, region.width, region.height] 61 | 62 | bbox = [ 63 | bbox[0] / image_orig_size[0] * image_new_size[0], 64 | bbox[1] / image_orig_size[1] * image_new_size[1], 65 | (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0], 66 | (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1] 67 | ] 68 | bbox = [int(x) for x in bbox] 69 | bbox = "{{<{}><{}><{}><{}>}}".format(*bbox) 70 | return { 71 | "image": image, 72 | "refer_sentence": refer_sentence, 73 | "bbox": bbox, 74 | "image_id": region.image.id, 75 | } 76 | 77 | def __getitem__(self, index): 78 | data = self.preprocess(index) 79 | instruction = random.choice(self.instruction_pool).format(data['refer_sentence']) 80 | 81 | instruction = " {} ".format(instruction) 82 | 83 | return { 84 | "image": data['image'], 85 | "instruction_input": instruction, 86 | "answer": data['bbox'], 87 | "image_id": data['image_id'], 88 | } 89 | 90 | 91 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/processors/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | from minigpt4.processors.base_processor import BaseProcessor 9 | from minigpt4.processors.blip_processors import ( 10 | Blip2ImageTrainProcessor, 11 | Blip2ImageEvalProcessor, 12 | BlipCaptionProcessor, 13 | ) 14 | 15 | from minigpt4.common.registry import registry 16 | 17 | __all__ = [ 18 | "BaseProcessor", 19 | "Blip2ImageTrainProcessor", 20 | "Blip2ImageEvalProcessor", 21 | "BlipCaptionProcessor", 22 | ] 23 | 24 | 25 | def load_processor(name, cfg=None): 26 | """ 27 | Example 28 | 29 | >>> processor = load_processor("alpro_video_train", cfg=None) 30 | """ 31 | processor = registry.get_processor_class(name).from_config(cfg) 32 | 33 | return processor 34 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/processors/base_processor.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | from omegaconf import OmegaConf 9 | 10 | 11 | class BaseProcessor: 12 | def __init__(self): 13 | self.transform = lambda x: x 14 | return 15 | 16 | def __call__(self, item): 17 | return self.transform(item) 18 | 19 | @classmethod 20 | def from_config(cls, cfg=None): 21 | return cls() 22 | 23 | def build(self, **kwargs): 24 | cfg = OmegaConf.create(kwargs) 25 | 26 | return self.from_config(cfg) 27 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/runners/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | from minigpt4.runners.runner_base import RunnerBase 9 | 10 | __all__ = ["RunnerBase"] 11 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | from minigpt4.common.registry import registry 9 | from minigpt4.tasks.base_task import BaseTask 10 | from minigpt4.tasks.image_text_pretrain import ImageTextPretrainTask 11 | 12 | 13 | def setup_task(cfg): 14 | assert "task" in cfg.run_cfg, "Task name must be provided." 15 | 16 | task_name = cfg.run_cfg.task 17 | task = registry.get_task_class(task_name).setup_task(cfg=cfg) 18 | assert task is not None, "Task {} not properly registered.".format(task_name) 19 | 20 | return task 21 | 22 | 23 | __all__ = [ 24 | "BaseTask", 25 | "ImageTextPretrainTask", 26 | ] 27 | -------------------------------------------------------------------------------- /MiniGPT-4/minigpt4/tasks/image_text_pretrain.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | from minigpt4.common.registry import registry 9 | from minigpt4.tasks.base_task import BaseTask 10 | 11 | 12 | @registry.register_task("image_text_pretrain") 13 | class ImageTextPretrainTask(BaseTask): 14 | def __init__(self): 15 | super().__init__() 16 | 17 | def evaluation(self, model, data_loader, cuda_enabled=True): 18 | pass 19 | -------------------------------------------------------------------------------- /MiniGPT-4/prompts/alignment.txt: -------------------------------------------------------------------------------- 1 | Describe this image in detail. 2 | Take a look at this image and describe what you notice. 3 | Please provide a detailed description of the picture. 4 | Could you describe the contents of this image for me? -------------------------------------------------------------------------------- /MiniGPT-4/train.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022, salesforce.com, inc. 3 | All rights reserved. 4 | SPDX-License-Identifier: BSD-3-Clause 5 | For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause 6 | """ 7 | 8 | import argparse 9 | import os 10 | import random 11 | 12 | import numpy as np 13 | import torch 14 | import torch.backends.cudnn as cudnn 15 | import wandb 16 | 17 | import minigpt4.tasks as tasks 18 | from minigpt4.common.config import Config 19 | from minigpt4.common.dist_utils import get_rank, init_distributed_mode 20 | from minigpt4.common.logger import setup_logger 21 | from minigpt4.common.optims import ( 22 | LinearWarmupCosineLRScheduler, 23 | LinearWarmupStepLRScheduler, 24 | ) 25 | from minigpt4.common.registry import registry 26 | from minigpt4.common.utils import now 27 | 28 | # imports modules for registration 29 | from minigpt4.datasets.builders import * 30 | from minigpt4.models import * 31 | from minigpt4.processors import * 32 | from minigpt4.runners import * 33 | from minigpt4.tasks import * 34 | 35 | 36 | def parse_args(): 37 | parser = argparse.ArgumentParser(description="Training") 38 | 39 | parser.add_argument("--cfg-path", required=True, help="path to configuration file.") 40 | parser.add_argument( 41 | "--options", 42 | nargs="+", 43 | help="override some settings in the used config, the key-value pair " 44 | "in xxx=yyy format will be merged into config file (deprecate), " 45 | "change to --cfg-options instead.", 46 | ) 47 | args = parser.parse_args() 48 | 49 | return args 50 | 51 | 52 | def setup_seeds(config): 53 | seed = config.run_cfg.seed + get_rank() 54 | 55 | random.seed(seed) 56 | np.random.seed(seed) 57 | torch.manual_seed(seed) 58 | 59 | cudnn.benchmark = False 60 | cudnn.deterministic = True 61 | 62 | 63 | def get_runner_class(cfg): 64 | """ 65 | Get runner class from config. Default to epoch-based runner. 66 | """ 67 | runner_cls = registry.get_runner_class(cfg.run_cfg.get("runner", "runner_base")) 68 | 69 | return runner_cls 70 | 71 | 72 | def main(): 73 | # allow auto-dl completes on main process without timeout when using NCCL backend. 74 | # os.environ["NCCL_BLOCKING_WAIT"] = "1" 75 | 76 | # set before init_distributed_mode() to ensure the same job_id shared across all ranks. 77 | job_id = now() 78 | args = parse_args() 79 | cfg = Config(args) 80 | 81 | init_distributed_mode(cfg.run_cfg) 82 | setup_seeds(cfg) 83 | 84 | # set after init_distributed_mode() to only log on master. 85 | setup_logger() 86 | cfg.pretty_print() 87 | 88 | task = tasks.setup_task(cfg) 89 | datasets = task.build_datasets(cfg) 90 | model = task.build_model(cfg) 91 | 92 | if cfg.run_cfg.wandb_log: 93 | wandb.login() 94 | wandb.init(project="minigptv", name=cfg.run_cfg.job_name) 95 | wandb.watch(model) 96 | 97 | runner = get_runner_class(cfg)( 98 | cfg=cfg, job_id=job_id, task=task, model=model, datasets=datasets 99 | ) 100 | runner.train() 101 | 102 | 103 | if __name__ == "__main__": 104 | main() 105 | -------------------------------------------------------------------------------- /MiniGPT-4/train_configs/minigpt4_llama2_stage1_pretrain.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt4 3 | model_type: pretrain_llama2 4 | 5 | 6 | datasets: 7 | laion: 8 | batch_size: 64 9 | vis_processor: 10 | train: 11 | name: "blip2_image_train" 12 | image_size: 224 13 | text_processor: 14 | train: 15 | name: "blip_caption" 16 | sample_ratio: 115 17 | cc_sbu: 18 | batch_size: 64 19 | vis_processor: 20 | train: 21 | name: "blip2_image_train" 22 | image_size: 224 23 | text_processor: 24 | train: 25 | name: "blip_caption" 26 | sample_ratio: 14 27 | 28 | 29 | run: 30 | task: image_text_pretrain 31 | # optimizer 32 | lr_sched: "linear_warmup_cosine_lr" 33 | init_lr: 1e-4 34 | min_lr: 8e-5 35 | warmup_lr: 1e-6 36 | 37 | weight_decay: 0.05 38 | max_epoch: 4 39 | num_workers: 4 40 | warmup_steps: 5000 41 | iters_per_epoch: 5000 42 | 43 | seed: 42 44 | output_dir: "output/minigpt4_stage1_pretrain" 45 | 46 | amp: True 47 | resume_ckpt_path: null 48 | 49 | evaluate: False 50 | train_splits: ["train"] 51 | 52 | device: "cuda" 53 | world_size: 1 54 | dist_url: "env://" 55 | distributed: True 56 | 57 | wandb_log: True 58 | job_name: minigpt4_llama2_pretrain -------------------------------------------------------------------------------- /MiniGPT-4/train_configs/minigpt4_llama2_stage2_finetune.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt4 3 | model_type: pretrain_llama2 4 | 5 | max_txt_len: 160 6 | end_sym: "" 7 | prompt_path: "prompts/alignment.txt" 8 | prompt_template: '[INST] {} [/INST] ' 9 | ckpt: '/path/to/stage1/checkpoint/' 10 | 11 | 12 | datasets: 13 | cc_sbu_align: 14 | batch_size: 12 15 | vis_processor: 16 | train: 17 | name: "blip2_image_train" 18 | image_size: 224 19 | text_processor: 20 | train: 21 | name: "blip_caption" 22 | 23 | run: 24 | task: image_text_pretrain 25 | # optimizer 26 | lr_sched: "linear_warmup_cosine_lr" 27 | init_lr: 3e-5 28 | min_lr: 1e-5 29 | warmup_lr: 1e-6 30 | 31 | weight_decay: 0.05 32 | max_epoch: 5 33 | iters_per_epoch: 200 34 | num_workers: 4 35 | warmup_steps: 200 36 | 37 | seed: 42 38 | output_dir: "output/minigpt4_stage2_finetune" 39 | 40 | amp: True 41 | resume_ckpt_path: null 42 | 43 | evaluate: False 44 | train_splits: ["train"] 45 | 46 | device: "cuda" 47 | world_size: 1 48 | dist_url: "env://" 49 | distributed: True 50 | 51 | wandb_log: True 52 | job_name: minigpt4_llama2_finetune -------------------------------------------------------------------------------- /MiniGPT-4/train_configs/minigpt4_stage1_pretrain.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt4 3 | model_type: pretrain_vicuna0 4 | 5 | 6 | datasets: 7 | laion: 8 | batch_size: 64 9 | vis_processor: 10 | train: 11 | name: "blip2_image_train" 12 | image_size: 224 13 | text_processor: 14 | train: 15 | name: "blip_caption" 16 | sample_ratio: 115 17 | cc_sbu: 18 | batch_size: 64 19 | vis_processor: 20 | train: 21 | name: "blip2_image_train" 22 | image_size: 224 23 | text_processor: 24 | train: 25 | name: "blip_caption" 26 | sample_ratio: 14 27 | 28 | 29 | run: 30 | task: image_text_pretrain 31 | # optimizer 32 | lr_sched: "linear_warmup_cosine_lr" 33 | init_lr: 1e-4 34 | min_lr: 8e-5 35 | warmup_lr: 1e-6 36 | 37 | weight_decay: 0.05 38 | max_epoch: 4 39 | num_workers: 4 40 | warmup_steps: 5000 41 | iters_per_epoch: 5000 42 | 43 | seed: 42 44 | output_dir: "output/minigpt4_stage1_pretrain" 45 | 46 | amp: True 47 | resume_ckpt_path: null 48 | 49 | evaluate: False 50 | train_splits: ["train"] 51 | 52 | device: "cuda" 53 | world_size: 1 54 | dist_url: "env://" 55 | distributed: True 56 | 57 | wandb_log: True 58 | job_name: minigpt4_pretrain -------------------------------------------------------------------------------- /MiniGPT-4/train_configs/minigpt4_stage2_finetune.yaml: -------------------------------------------------------------------------------- 1 | model: 2 | arch: minigpt4 3 | model_type: pretrain_vicuna0 4 | 5 | max_txt_len: 160 6 | end_sym: "###" 7 | prompt_path: "prompts/alignment.txt" 8 | prompt_template: '###Human: {} ###Assistant: ' 9 | ckpt: '/path/to/stage1/checkpoint/' 10 | 11 | 12 | datasets: 13 | cc_sbu_align: 14 | batch_size: 12 15 | vis_processor: 16 | train: 17 | name: "blip2_image_train" 18 | image_size: 224 19 | text_processor: 20 | train: 21 | name: "blip_caption" 22 | 23 | run: 24 | task: image_text_pretrain 25 | # optimizer 26 | lr_sched: "linear_warmup_cosine_lr" 27 | init_lr: 3e-5 28 | min_lr: 1e-5 29 | warmup_lr: 1e-6 30 | 31 | weight_decay: 0.05 32 | max_epoch: 5 33 | iters_per_epoch: 200 34 | num_workers: 4 35 | warmup_steps: 200 36 | 37 | seed: 42 38 | output_dir: "output/minigpt4_stage2_finetune" 39 | 40 | amp: True 41 | resume_ckpt_path: null 42 | 43 | evaluate: False 44 | train_splits: ["train"] 45 | 46 | device: "cuda" 47 | world_size: 1 48 | dist_url: "env://" 49 | distributed: True 50 | 51 | wandb_log: True 52 | job_name: minigpt4_finetune -------------------------------------------------------------------------------- /showcase.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opendatalab/MLLM-DataEngine/7d5abb67ac1777aa710d687b3337c22b2356c1b3/showcase.png --------------------------------------------------------------------------------