├── .dockerignore
├── .editorconfig
├── .gitattributes
├── .gitignore
├── LICENSE
├── README.md
├── cog.yaml
├── data_generation
├── README.md
├── error_injection.py
├── image_weakening_utils
│ ├── add_noise.py
│ └── sample.py
└── run_llava_image_weakening.py
├── docs
├── Customize_Component.md
├── Data.md
├── Evaluation.md
├── Finetune_Custom_Data.md
├── Intel.md
├── LLaVA_Bench.md
├── LLaVA_from_LLaMA2.md
├── LoRA.md
├── MODEL_ZOO.md
├── ScienceQA.md
├── Windows.md
└── macOS.md
├── images
├── demo_cli.gif
├── happy.jpg
├── llava_example_cmp.png
├── llava_logo.png
├── llava_v1_5_radar.jpg
└── logo.png
├── llava
├── __init__.py
├── constants.py
├── conversation.py
├── eval
│ ├── bootstrap.py
│ ├── bootstrap_dpo.py
│ ├── eval_gpt_review.py
│ ├── eval_gpt_review_bench.py
│ ├── eval_gpt_review_visual.py
│ ├── eval_multi.sh
│ ├── eval_multi_safeguard.sh
│ ├── eval_pope.py
│ ├── eval_science_qa.py
│ ├── eval_science_qa_gpt4.py
│ ├── eval_science_qa_gpt4_requery.py
│ ├── eval_textvqa.py
│ ├── generate_webpage_data_from_table.py
│ ├── m4c_evaluator.py
│ ├── model_check_logits.py
│ ├── model_qa.py
│ ├── model_vqa.py
│ ├── model_vqa_loader.py
│ ├── model_vqa_mathbench.py
│ ├── model_vqa_mmbench.py
│ ├── model_vqa_mmvet.py
│ ├── model_vqa_qbench.py
│ ├── model_vqa_science.py
│ ├── qa_baseline_gpt35.py
│ ├── reward_score.py
│ ├── robustness_eval.py
│ ├── run_llava.py
│ ├── summarize_gpt_review.py
│ └── webpage
│ │ ├── figures
│ │ ├── alpaca.png
│ │ ├── bard.jpg
│ │ ├── chatgpt.svg
│ │ ├── llama.jpg
│ │ ├── swords_FILL0_wght300_GRAD0_opsz48.svg
│ │ └── vicuna.jpeg
│ │ ├── index.html
│ │ ├── script.js
│ │ └── styles.css
├── mm_utils.py
├── model
│ ├── __init__.py
│ ├── apply_delta.py
│ ├── builder.py
│ ├── consolidate.py
│ ├── language_model
│ │ ├── llava_llama.py
│ │ ├── llava_mpt.py
│ │ └── mpt
│ │ │ ├── adapt_tokenizer.py
│ │ │ ├── attention.py
│ │ │ ├── blocks.py
│ │ │ ├── configuration_mpt.py
│ │ │ ├── custom_embedding.py
│ │ │ ├── flash_attn_triton.py
│ │ │ ├── hf_prefixlm_converter.py
│ │ │ ├── meta_init_context.py
│ │ │ ├── modeling_mpt.py
│ │ │ ├── norm.py
│ │ │ └── param_init_fns.py
│ ├── llava_arch.py
│ ├── make_delta.py
│ ├── multimodal_encoder
│ │ ├── builder.py
│ │ └── clip_encoder.py
│ ├── multimodal_projector
│ │ └── builder.py
│ └── utils.py
├── serve
│ ├── __init__.py
│ ├── cli.py
│ ├── cli_adap.py
│ ├── cli_polite.py
│ ├── cli_reward.py
│ ├── controller.py
│ ├── examples
│ │ ├── extreme_ironing.jpg
│ │ └── waterview.jpg
│ ├── gradio_web_server.py
│ ├── model_worker.py
│ ├── register_worker.py
│ └── test_message.py
├── train
│ ├── bpo_llava.py
│ ├── bpo_llava_flash.py
│ ├── llama_flash_attn_monkey_patch.py
│ ├── llama_xformers_attn_monkey_patch.py
│ ├── llava_trainer.py
│ ├── train.py
│ ├── train_mem.py
│ └── train_xformers.py
└── utils.py
├── main.py
├── playground
└── data
│ └── eval
│ └── mmvet_images
│ ├── v1_0.png
│ ├── v1_1.png
│ ├── v1_100.png
│ ├── v1_101.png
│ ├── v1_102.png
│ ├── v1_103.png
│ ├── v1_104.png
│ ├── v1_105.png
│ ├── v1_106.jpg
│ ├── v1_107.jpg
│ ├── v1_108.jpg
│ ├── v1_109.jpg
│ ├── v1_11.jpg
│ ├── v1_110.jpg
│ ├── v1_111.jpg
│ ├── v1_112.jpg
│ ├── v1_113.jpg
│ ├── v1_114.jpg
│ ├── v1_115.jpg
│ ├── v1_116.jpg
│ ├── v1_117.jpg
│ ├── v1_118.jpg
│ ├── v1_119.jpg
│ ├── v1_120.jpg
│ ├── v1_121.jpg
│ ├── v1_122.jpg
│ ├── v1_123.jpg
│ ├── v1_124.jpg
│ ├── v1_125.jpg
│ ├── v1_126.jpg
│ ├── v1_127.jpg
│ ├── v1_128.jpg
│ ├── v1_129.jpg
│ ├── v1_13.jpg
│ ├── v1_130.jpg
│ ├── v1_131.jpg
│ ├── v1_132.jpg
│ ├── v1_133.jpg
│ ├── v1_134.jpg
│ ├── v1_135.jpg
│ ├── v1_136.jpg
│ ├── v1_137.jpg
│ ├── v1_138.jpg
│ ├── v1_139.jpg
│ ├── v1_140.jpg
│ ├── v1_141.jpg
│ ├── v1_142.jpg
│ ├── v1_143.jpg
│ ├── v1_144.jpg
│ ├── v1_145.jpg
│ ├── v1_146.jpg
│ ├── v1_147.jpg
│ ├── v1_148.jpg
│ ├── v1_149.jpg
│ ├── v1_15.png
│ ├── v1_150.jpg
│ ├── v1_151.jpg
│ ├── v1_152.jpg
│ ├── v1_153.jpg
│ ├── v1_154.png
│ ├── v1_155.png
│ ├── v1_156.jpg
│ ├── v1_157.jpg
│ ├── v1_158.jpg
│ ├── v1_159.jpg
│ ├── v1_16.jpg
│ ├── v1_160.jpg
│ ├── v1_161.jpg
│ ├── v1_162.jpg
│ ├── v1_163.jpg
│ ├── v1_164.jpg
│ ├── v1_165.jpg
│ ├── v1_166.jpg
│ ├── v1_167.jpg
│ ├── v1_168.jpg
│ ├── v1_169.jpg
│ ├── v1_17.jpg
│ ├── v1_170.jpg
│ ├── v1_171.jpg
│ ├── v1_172.jpg
│ ├── v1_173.jpg
│ ├── v1_174.jpg
│ ├── v1_175.jpg
│ ├── v1_176.jpg
│ ├── v1_177.jpg
│ ├── v1_178.jpg
│ ├── v1_179.jpg
│ ├── v1_18.jpg
│ ├── v1_180.jpg
│ ├── v1_181.jpg
│ ├── v1_182.jpg
│ ├── v1_183.jpg
│ ├── v1_184.jpg
│ ├── v1_185.jpg
│ ├── v1_186.jpg
│ ├── v1_187.jpg
│ ├── v1_188.jpg
│ ├── v1_189.jpg
│ ├── v1_190.jpg
│ ├── v1_191.jpg
│ ├── v1_192.jpg
│ ├── v1_193.jpg
│ ├── v1_194.jpg
│ ├── v1_195.jpg
│ ├── v1_196.jpg
│ ├── v1_197.jpg
│ ├── v1_198.jpg
│ ├── v1_199.jpg
│ ├── v1_20.jpg
│ ├── v1_200.jpg
│ ├── v1_201.jpg
│ ├── v1_202.jpg
│ ├── v1_203.jpg
│ ├── v1_204.jpg
│ ├── v1_205.jpg
│ ├── v1_206.jpg
│ ├── v1_207.jpg
│ ├── v1_208.png
│ ├── v1_209.png
│ ├── v1_21.jpg
│ ├── v1_210.png
│ ├── v1_211.png
│ ├── v1_212.png
│ ├── v1_213.jpg
│ ├── v1_214.png
│ ├── v1_215.jpg
│ ├── v1_216.png
│ ├── v1_217.png
│ ├── v1_23.png
│ ├── v1_25.png
│ ├── v1_27.png
│ ├── v1_28.jpg
│ ├── v1_3.png
│ ├── v1_30.jpg
│ ├── v1_31.png
│ ├── v1_32.png
│ ├── v1_33.png
│ ├── v1_34.jpg
│ ├── v1_35.png
│ ├── v1_36.jpg
│ ├── v1_37.jpg
│ ├── v1_38.jpg
│ ├── v1_39.jpg
│ ├── v1_40.png
│ ├── v1_41.png
│ ├── v1_42.png
│ ├── v1_44.png
│ ├── v1_46.png
│ ├── v1_48.jpg
│ ├── v1_49.jpg
│ ├── v1_5.png
│ ├── v1_50.jpg
│ ├── v1_52.png
│ ├── v1_53.png
│ ├── v1_54.png
│ ├── v1_55.png
│ ├── v1_56.png
│ ├── v1_57.jpg
│ ├── v1_58.png
│ ├── v1_60.png
│ ├── v1_62.png
│ ├── v1_64.png
│ ├── v1_65.png
│ ├── v1_66.jpg
│ ├── v1_67.png
│ ├── v1_68.png
│ ├── v1_69.jpg
│ ├── v1_7.png
│ ├── v1_70.png
│ ├── v1_71.jpg
│ ├── v1_72.png
│ ├── v1_73.jpg
│ ├── v1_74.jpg
│ ├── v1_75.jpg
│ ├── v1_76.png
│ ├── v1_77.jpg
│ ├── v1_78.jpg
│ ├── v1_79.jpg
│ ├── v1_8.png
│ ├── v1_80.jpg
│ ├── v1_81.jpg
│ ├── v1_82.png
│ ├── v1_83.png
│ ├── v1_84.png
│ ├── v1_85.jpg
│ ├── v1_86.jpg
│ ├── v1_87.jpg
│ ├── v1_88.jpg
│ ├── v1_89.jpg
│ ├── v1_9.png
│ ├── v1_90.jpg
│ ├── v1_91.jpg
│ ├── v1_92.jpg
│ ├── v1_93.jpg
│ ├── v1_94.jpg
│ ├── v1_95.jpg
│ ├── v1_96.jpg
│ ├── v1_97.jpg
│ ├── v1_98.jpg
│ └── v1_99.png
├── predict.py
├── pyproject.toml
├── qwen
├── README.md
├── dpo_config
│ └── example.yaml
├── launch_dpo.py
├── requirements.txt
└── run_dpo.py
└── scripts
├── concatenate_json.py
├── convert_gqa_for_eval.py
├── convert_mmbench_for_submission.py
├── convert_mmvet_for_eval.py
├── convert_seed_for_submission.py
├── convert_sqa_to_llava.py
├── convert_sqa_to_llava_base_prompt.py
├── convert_vizwiz_for_submission.py
├── convert_vqav2_for_submission.py
├── eval_mmvet.h
├── extract_mm_projector.py
├── finetune.sh
├── finetune_bpo.sh
├── finetune_bpo_flash.sh
├── finetune_full_schedule.sh
├── finetune_lora.sh
├── finetune_qlora.sh
├── finetune_sft.sh
├── finetune_sqa.sh
├── merge_lora_weights.py
├── merge_lora_weights_reward.py
├── pretrain.sh
├── pretrain_xformers.sh
├── sqa_eval_batch.sh
├── sqa_eval_gather.sh
└── v1_5
├── eval
├── bootstrap_dpo_multi.sh
├── bootstrap_multi.sh
├── eval_multi.sh
├── eval_multi_lora.sh
├── eval_multi_math.sh
├── eval_multi_pope.sh
├── eval_multi_pope_full.sh
├── gqa.sh
├── llavabench.sh
├── mmbench.sh
├── mmbench_cn.sh
├── mme.sh
├── mmvet.sh
├── pope.sh
├── qbench.sh
├── qbench_zh.sh
├── score_multi.sh
├── seed.sh
├── sqa.sh
├── textvqa.sh
├── vizwiz.sh
└── vqav2.sh
├── finetune.sh
├── finetune_lora.sh
├── finetune_task.sh
├── finetune_task_lora.sh
└── pretrain.sh
/.dockerignore:
--------------------------------------------------------------------------------
1 | # The .dockerignore file excludes files from the container build process.
2 | #
3 | # https://docs.docker.com/engine/reference/builder/#dockerignore-file
4 |
5 | # Exclude Git files
6 | .git
7 | .github
8 | .gitignore
9 |
10 | # Exclude Python cache files
11 | __pycache__
12 | .mypy_cache
13 | .pytest_cache
14 | .ruff_cache
15 |
16 | # Exclude Python virtual environment
17 | /venv
18 |
19 | # Exclude some weights
20 | /openai
21 | /liuhaotian
22 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | root = true
2 |
3 | # Unix-style newlines with a newline ending every file
4 | [*]
5 | end_of_line = lf
6 | insert_final_newline = true
7 | trim_trailing_whitespace = true
8 | charset = utf-8
9 |
10 | # 4 space indentation
11 | [*.{py,json}]
12 | indent_style = space
13 | indent_size = 4
14 |
15 | # 2 space indentation
16 | [*.{md,sh,yaml,yml}]
17 | indent_style = space
18 | indent_size = 2
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # https://git-scm.com/docs/gitattributes
2 |
3 | # Set the default behavior, in case people don't have core.autocrlf set.
4 | # https://git-scm.com/docs/gitattributes#_end_of_line_conversion
5 | * text=auto
6 |
7 | # common python attributes, taken from https://github.com/alexkaratarakis/gitattributes/blob/710900479a2bedeec7003d381719521ffbb18bf8/Python.gitattributes
8 | # Source files
9 | # ============
10 | *.pxd text diff=python
11 | *.py text diff=python
12 | *.py3 text diff=python
13 | *.pyw text diff=python
14 | *.pyx text diff=python
15 | *.pyz text diff=python
16 | *.pyi text diff=python
17 |
18 | # Binary files
19 | # ============
20 | *.db binary
21 | *.p binary
22 | *.pkl binary
23 | *.pickle binary
24 | *.pyc binary export-ignore
25 | *.pyo binary export-ignore
26 | *.pyd binary
27 |
28 | # Jupyter notebook
29 | *.ipynb text eol=lf
30 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Python
2 | __pycache__
3 | *.pyc
4 | *.egg-info
5 | dist
6 |
7 | # Log
8 | *.log
9 | *.log.*
10 | *.json
11 | *.jsonl
12 |
13 | # Data
14 | !**/alpaca-data-conversation.json
15 |
16 | # Editor
17 | .idea
18 | *.swp
19 |
20 | # Other
21 | .DS_Store
22 | wandb
23 | output
24 |
25 | checkpoints
26 | ckpts*
27 |
28 | .ipynb_checkpoints
29 | *.ipynb
30 |
31 | # DevContainer
32 | !.devcontainer/*
33 |
34 | # Demo
35 | serve_images/
36 |
37 | # debug
38 | debug/
39 | results/
40 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
5 |
6 | This repository contains the code for the paper titled "Strengthening Multimodal Large Language Model with Bootstrapped Preference Optimization". [[Link to our paper](https://arxiv.org/abs/2403.08730)]
7 | ## Install Packages
8 |
9 | ```
10 |
11 | conda create -n bpo python=3.10 -y
12 |
13 | conda activate bpo
14 |
15 | pip install -e .
16 |
17 | ```
18 | Install flash attention for efficient training
19 |
20 | ```
21 | pip install -e ".[train]"
22 |
23 | pip install flash-attn --no-build-isolation
24 | ```
25 | ## Training data
26 | Download ShareGPT4V from [here](https://huggingface.co/datasets/Lin-Chen/ShareGPT4V)
27 |
28 | Download COCO from [here](https://cocodataset.org/#home)
29 |
30 | Download dataset annotation from [here](https://huggingface.co/datasets/renjiepi/BPO_Instruct)
31 |
32 | Extract data from ShareGPT4V and organize the images as follows:
33 |
34 | ```
35 | Image_root
36 | ├── coco/
37 | ├──train2017/
38 | ├── llava/
39 | ├──llava_pretrain/
40 | ├── sam/
41 | ├── share_textvqa/
42 | ├──images/
43 | ├── web-celebrity/
44 | ├──images/
45 | ├── web-landmark/
46 | ├──images/
47 | ├── wikiart/
48 | ├──images/
49 | ```
50 |
51 | ## Training
52 | ### Training BPO
53 | ```
54 | bash scripts/finetune_bpo.sh
55 | ```
56 | ### Training BPO with flash attention
57 | ```
58 | bash scripts/finetune_bpo_flash.sh
59 | ```
60 | ## Acknowledgement
61 | The project is built on top of the amazing multimodal large language model [LLaVA](https://github.com/haotian-liu/LLaVA), RLHF package [trl](https://github.com/huggingface/trl), DPO for multimodal learning [Silkie](https://github.com/vlf-silkie/VLFeedback), and visual contrastive decoding [VCD](https://github.com/DAMO-NLP-SG/VCD).
62 | Thanks for these great work!
63 |
64 |
65 | If you find our work useful for your research or applications, please cite using this BibTeX:
66 | ```bibtex
67 | @misc{pi2024strengthening,
68 | title={Strengthening Multimodal Large Language Model with Bootstrapped Preference Optimization},
69 | author={Renjie Pi and Tianyang Han and Wei Xiong and Jipeng Zhang and Runtao Liu and Rui Pan and Tong Zhang},
70 | year={2024},
71 | eprint={2403.08730},
72 | archivePrefix={arXiv},
73 | primaryClass={cs.CL}
74 | }
75 | ```
76 |
--------------------------------------------------------------------------------
/cog.yaml:
--------------------------------------------------------------------------------
1 | # Configuration for Cog ⚙️
2 | # Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md
3 |
4 | build:
5 | gpu: true
6 |
7 | python_version: "3.11"
8 |
9 | python_packages:
10 | - "torch==2.0.1"
11 | - "accelerate==0.21.0"
12 | - "bitsandbytes==0.41.0"
13 | - "deepspeed==0.9.5"
14 | - "einops-exts==0.0.4"
15 | - "einops==0.6.1"
16 | - "gradio==3.35.2"
17 | - "gradio_client==0.2.9"
18 | - "httpx==0.24.0"
19 | - "markdown2==2.4.10"
20 | - "numpy==1.26.0"
21 | - "peft==0.4.0"
22 | - "scikit-learn==1.2.2"
23 | - "sentencepiece==0.1.99"
24 | - "shortuuid==1.0.11"
25 | - "timm==0.6.13"
26 | - "tokenizers==0.13.3"
27 | - "torch==2.0.1"
28 | - "torchvision==0.15.2"
29 | - "transformers==4.31.0"
30 | - "wandb==0.15.12"
31 | - "wavedrom==2.0.3.post3"
32 | - "Pygments==2.16.1"
33 | run:
34 | - curl -o /usr/local/bin/pget -L "https://github.com/replicate/pget/releases/download/v0.0.3/pget" && chmod +x /usr/local/bin/pget
35 |
36 | # predict.py defines how predictions are run on your model
37 | predict: "predict.py:Predictor"
38 |
--------------------------------------------------------------------------------
/data_generation/README.md:
--------------------------------------------------------------------------------
1 | # Data Generation for BPO
2 | - For image weakening, we are inspired by [VCD](https://github.com/DAMO-NLP-SG/VCD) to add noise to image features, which generates negative responses with pretraining bias.
3 | - For error injection, we utilize the pretrained LLM that is the same as the base model of the MLLM to directly inject erroneous concepts.
4 |
5 |
6 | ## Install environments
7 | For error injection, please install VLLM to speed up inference.
8 | ```
9 | pip intall vllm
10 | ```
11 | If it can't run, please consider building the package from source.
12 |
13 | ## Image weakening
14 | ```bash
15 | cd data_generation
16 | python run_llava_image_weakening.py --model-path liuhaotian/llava-v1.5-13b --image_file YOUR_IMAGE_PATH --query YOUR_JSON_PATH --save_path OUTPUT_PATH
17 | ```
18 |
19 | `YOUR_JSON_PATH` should be a list of json file and has the following format
20 | ```
21 | [
22 | {'prompt': 'What do you see happening in this image?\n',
23 | 'image': 'coco/train2017/000000000009.jpg',
24 | 'completions': [{'score': 1,
25 | 'response': "xxxx",
26 | 'type': 'gt'}]},
27 |
28 | {'prompt': 'question\n',
29 | 'image': 'coco/train2017/0000000000010.jpg',
30 | 'completions': [{'score': 1,
31 | 'response': "xxxx",
32 | 'type': 'gt'}]},
33 | .......
34 | ]
35 | ```
36 | Note: Image weakening currently runs on generic inference pipeline. We will consider integrating it with MLLM acceleration framework, e.g,. https://github.com/InternLM/lmdeploy.
37 |
38 | ## Error injection
39 | ```bash
40 | cd data_generation
41 | python error_injection.py --model_name_or_path PATH-TO-LLM --dataset_path PATH-TO-SFT-DATA --output_result_path PATH-TO-PREFERENCE-DATA
42 | ```
43 |
44 |
--------------------------------------------------------------------------------
/data_generation/image_weakening_utils/add_noise.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | def add_diffusion_noise(image_tensor, noise_step):
4 | num_steps = 1000 # Number of diffusion steps
5 |
6 | # decide beta in each step
7 | betas = torch.linspace(-6,6,num_steps)
8 | betas = torch.sigmoid(betas) * (0.5e-2 - 1e-5) + 1e-5
9 |
10 | # decide alphas in each step
11 | alphas = 1 - betas
12 | alphas_prod = torch.cumprod(alphas, dim=0)
13 | alphas_prod_p = torch.cat([torch.tensor([1]).float(), alphas_prod[:-1]],0) # p for previous
14 | alphas_bar_sqrt = torch.sqrt(alphas_prod)
15 | one_minus_alphas_bar_log = torch.log(1 - alphas_prod)
16 | one_minus_alphas_bar_sqrt = torch.sqrt(1 - alphas_prod)
17 |
18 | def q_x(x_0,t):
19 | noise = torch.randn_like(x_0)
20 | alphas_t = alphas_bar_sqrt[t]
21 | alphas_1_m_t = one_minus_alphas_bar_sqrt[t]
22 | return (alphas_t*x_0 + alphas_1_m_t*noise)
23 |
24 | noise_delta = int(noise_step) # from 0-999
25 | noisy_image = image_tensor.clone()
26 | image_tensor_cd = q_x(noisy_image,noise_step)
27 |
28 | return image_tensor_cd
29 |
30 |
--------------------------------------------------------------------------------
/docs/Customize_Component.md:
--------------------------------------------------------------------------------
1 | # Customize Components in LLaVA
2 |
3 | This is an initial guide on how to replace the LLMs, visual encoders, etc. with your choice of components.
4 |
5 | ## LLM
6 |
7 | It is quite simple to swap out LLaMA to any other LLMs. You can refer to our implementation of [`llava_llama.py`](https://raw.githubusercontent.com/haotian-liu/LLaVA/main/llava/model/language_model/llava_llama.py) for an example of how to replace the LLM.
8 |
9 | Although it may seem that it still needs ~100 lines of code, most of them are copied from the original `llama.py` from HF. The only part that is different is to insert some lines for processing the multimodal inputs.
10 |
11 | In `forward` function, you can see that we call `self.prepare_inputs_labels_for_multimodal` to process the multimodal inputs. This function is defined in `LlavaMetaForCausalLM` and you just need to insert it into the `forward` function of your LLM.
12 |
13 | In `prepare_inputs_for_generation` function, you can see that we add `images` to the `model_inputs`. This is because we need to pass the images to the LLM during generation.
14 |
15 | These are basically all the changes you need to make to replace the LLM.
16 |
17 | ## Visual Encoder
18 |
19 | You can check out [`clip_encoder.py`](https://github.com/haotian-liu/LLaVA/blob/main/llava/model/multimodal_encoder/clip_encoder.py) on how we implement the CLIP visual encoder.
20 |
21 |
--------------------------------------------------------------------------------
/docs/Data.md:
--------------------------------------------------------------------------------
1 | ## Data
2 |
3 | | Data file name | Size |
4 | | --- | ---: |
5 | | [llava_instruct_150k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_instruct_150k.json) | 229 MB |
6 | | [llava_instruct_80k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_instruct_80k.json) | 229 MB |
7 | | [conversation_58k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/conversation_58k.json) | 126 MB |
8 | | [detail_23k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/detail_23k.json) | 20.5 MB |
9 | | [complex_reasoning_77k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/complex_reasoning_77k.json) | 79.6 MB |
10 |
11 | ### Pretraining Dataset
12 | The pretraining dataset used in this release is a subset of CC-3M dataset, filtered with a more balanced concept coverage distribution. Please see [here](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K) for a detailed description of the dataset structure and how to download the images.
13 |
14 | If you already have CC-3M dataset on your disk, the image names follow this format: `GCC_train_000000000.jpg`. You may edit the `image` field correspondingly if necessary.
15 |
16 | | Data | Chat File | Meta Data | Size |
17 | | --- | --- | --- | ---: |
18 | | CC-3M Concept-balanced 595K | [chat.json](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K/blob/main/chat.json) | [metadata.json](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K/blob/main/metadata.json) | 211 MB
19 | | LAION/CC/SBU BLIP-Caption Concept-balanced 558K | [blip_laion_cc_sbu_558k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain/blob/main/blip_laion_cc_sbu_558k.json) | [metadata.json](#) | 181 MB
20 |
21 | **Important notice**: Upon the request from the community, as ~15% images of the original CC-3M dataset are no longer accessible, we upload [`images.zip`](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K/blob/main/images.zip) for better reproducing our work in research community. It must not be used for any other purposes. The use of these images must comply with the CC-3M license. This may be taken down at any time when requested by the original CC-3M dataset owner or owners of the referenced images.
22 |
23 | ### GPT-4 Prompts
24 |
25 | We provide our prompts and few-shot samples for GPT-4 queries, to better facilitate research in this domain. Please check out the [`prompts`](https://github.com/haotian-liu/LLaVA/tree/main/playground/data/prompts) folder for three kinds of questions: conversation, detail description, and complex reasoning.
26 |
27 | They are organized in a format of `system_message.txt` for system message, pairs of `abc_caps.txt` for few-shot sample user input, and `abc_conv.txt` for few-shot sample reference output.
28 |
29 | Note that you may find them in different format. For example, `conversation` is in `jsonl`, and detail description is answer-only. The selected format in our preliminary experiments works slightly better than a limited set of alternatives that we tried: `jsonl`, more natural format, answer-only. If interested, you may try other variants or conduct more careful study in this. Contributions are welcomed!
30 |
--------------------------------------------------------------------------------
/docs/Finetune_Custom_Data.md:
--------------------------------------------------------------------------------
1 | # Finetune LLaVA on Custom Datasets
2 |
3 | ## Dataset Format
4 |
5 | Convert your data to a JSON file of a List of all samples. Sample metadata should contain `id` (a unique identifier), `image` (the path to the image), and `conversations` (the conversation data between human and AI).
6 |
7 | A sample JSON for finetuning LLaVA for generating tag-style captions for Stable Diffusion:
8 |
9 | ```json
10 | [
11 | {
12 | "id": "997bb945-628d-4724-b370-b84de974a19f",
13 | "image": "part-000001/997bb945-628d-4724-b370-b84de974a19f.jpg",
14 | "conversations": [
15 | {
16 | "from": "human",
17 | "value": "\nWrite a prompt for Stable Diffusion to generate this image."
18 | },
19 | {
20 | "from": "gpt",
21 | "value": "a beautiful painting of chernobyl by nekro, pascal blanche, john harris, greg rutkowski, sin jong hun, moebius, simon stalenhag. in style of cg art. ray tracing. cel shading. hyper detailed. realistic. ue 5. maya. octane render. "
22 | },
23 | ]
24 | },
25 | ...
26 | ]
27 | ```
28 |
29 | ## Command
30 |
31 | If you have a limited task-specific data, we recommend finetuning from LLaVA checkpoints with LoRA following this [script](https://github.com/haotian-liu/LLaVA/blob/main/scripts/v1_5/finetune_task_lora.sh).
32 |
33 | If the amount of the task-specific data is sufficient, you can also finetune from LLaVA checkpoints with full-model finetuning following this [script](https://github.com/haotian-liu/LLaVA/blob/main/scripts/v1_5/finetune_task.sh).
34 |
35 | You may need to adjust the hyperparameters to fit each specific dataset and your hardware constraint.
36 |
37 |
38 |
--------------------------------------------------------------------------------
/docs/Intel.md:
--------------------------------------------------------------------------------
1 | # Intel Platforms
2 |
3 | * Support [Intel GPU Max Series](https://www.intel.com/content/www/us/en/products/details/discrete-gpus/data-center-gpu/max-series.html)
4 | * Support [Intel CPU Sapphire Rapides](https://ark.intel.com/content/www/us/en/ark/products/codename/126212/products-formerly-sapphire-rapids.html)
5 | * Based on [Intel Extension for Pytorch](https://intel.github.io/intel-extension-for-pytorch)
6 |
7 | More details in [**intel branch**](https://github.com/haotian-liu/LLaVA/tree/intel/docs/intel)
8 |
--------------------------------------------------------------------------------
/docs/LLaVA_from_LLaMA2.md:
--------------------------------------------------------------------------------
1 | # LLaVA (based on Llama 2 LLM, Preview)
2 |
3 | *NOTE: This is a technical preview. We are still running hyperparameter search, and will release the final model soon. If you'd like to contribute to this, please contact us.*
4 |
5 | :llama: **-Introduction-** [Llama 2 is an open-source LLM released by Meta AI](https://about.fb.com/news/2023/07/llama-2/) today (July 18, 2023). Compared with its early version [Llama 1](https://ai.meta.com/blog/large-language-model-llama-meta-ai/), Llama 2 is more favored in ***stronger language performance***, ***longer context window***, and importantly ***commercially usable***! While Llama 2 is changing the LLM market landscape in the language space, its multimodal ability remains unknown. We quickly develop the LLaVA variant based on the latest Llama 2 checkpoints, and release it to the community for the public use.
6 |
7 | You need to apply for and download the latest Llama 2 checkpoints to start your own training (apply [here](https://ai.meta.com/resources/models-and-libraries/llama-downloads/))
8 |
9 |
10 | ## Training
11 |
12 | Please checkout [`pretrain.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/pretrain.sh), [`finetune.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune.sh), [`finetune_lora.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune_lora.sh).
13 |
14 | ## LLaVA (based on Llama 2), What is different?
15 |
16 | :volcano: How is the new LLaVA based on Llama 2 different from Llama 1? The comparisons of the training process are described:
17 | - **Pre-training**. The pre-trained base LLM is changed from Llama 1 to Llama 2
18 | - **Language instruction-tuning**. The previous LLaVA model starts with Vicuna, which is instruct tuned on ShareGPT data from Llama 1; The new LLaVA model starts with Llama 2 Chat, which is an instruct tuned checkpoint on dialogue data from Llama 2.
19 | - **Multimodal instruction-tuning**. The same LLaVA-Lighting process is applied.
20 |
21 |
22 | ### Results
23 |
24 | - Llama 2 is better at following the instructions of role playing; Llama 2 fails in following the instructions of translation
25 | - The quantitative evaluation on [LLaVA-Bench](https://github.com/haotian-liu/LLaVA/blob/main/docs/LLaVA_Bench.md) demonstrates on-par performance between Llama 2 and Llama 1 in LLaVA's multimodal chat ability.
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/docs/ScienceQA.md:
--------------------------------------------------------------------------------
1 | ### ScienceQA
2 |
3 | #### Prepare Data
4 | 1. Please see ScienceQA [repo](https://github.com/lupantech/ScienceQA) for setting up the dataset.
5 | 2. Generate ScienceQA dataset for LLaVA conversation-style format.
6 |
7 | ```Shell
8 | python scripts/convert_sqa_to_llava.py \
9 | convert_to_llava \
10 | --base-dir /path/to/ScienceQA/data/scienceqa \
11 | --prompt-format "QCM-LEA" \
12 | --split {train,val,minival,test,minitest}
13 | ```
14 |
15 | #### Training
16 |
17 | 1. Pretraining
18 |
19 | You can download our pretrained projector weights from our [Model Zoo](), or train your own projector weights using [`pretrain.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/pretrain.sh).
20 |
21 | 2. Finetuning
22 |
23 | See [`finetune_sqa.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune_sqa.sh).
24 |
25 | #### Evaluation
26 |
27 | 1. Multiple-GPU inference
28 | You may evaluate this with multiple GPUs, and concatenate the generated jsonl files. Please refer to our script for [batch evaluation](https://github.com/haotian-liu/LLaVA/blob/main/scripts/sqa_eval_batch.sh) and [results gathering](https://github.com/haotian-liu/LLaVA/blob/main/scripts/sqa_eval_gather.sh).
29 |
30 | 2. Single-GPU inference
31 |
32 | (a) Generate LLaVA responses on ScienceQA dataset
33 |
34 | ```Shell
35 | python -m llava.eval.model_vqa_science \
36 | --model-path liuhaotian/llava-lcs558k-scienceqa-vicuna-13b-v1.3 \
37 | --question-file /path/to/ScienceQA/data/scienceqa/llava_test_QCM-LEA.json \
38 | --image-folder /path/to/ScienceQA/data/scienceqa/images/test \
39 | --answers-file vqa/results/ScienceQA/test_llava-13b.jsonl \
40 | --conv-mode llava_v1
41 | ```
42 |
43 | (b) Evaluate the generated responses
44 |
45 | ```Shell
46 | python eval_science_qa.py \
47 | --base-dir /path/to/ScienceQA/data/scienceqa \
48 | --result-file vqa/results/ScienceQA/test_llava-13b.jsonl \
49 | --output-file vqa/results/ScienceQA/test_llava-13b_output.json \
50 | --output-result vqa/results/ScienceQA/test_llava-13b_result.json \
51 | ```
52 |
53 | For reference, we attach our prediction file [`test_sqa_llava_lcs_558k_sqa_12e_vicuna_v1_3_13b.json`](https://github.com/haotian-liu/LLaVA/blob/main/llava/eval/table/results/test_sqa_llava_lcs_558k_sqa_12e_vicuna_v1_3_13b.json) and [`test_sqa_llava_13b_v0.json`](https://github.com/haotian-liu/LLaVA/blob/main/llava/eval/table/results/test_sqa_llava_13b_v0.json) for comparison when reproducing our results, as well as for further analysis in detail.
54 |
--------------------------------------------------------------------------------
/docs/Windows.md:
--------------------------------------------------------------------------------
1 | # Run LLaVA on Windows
2 |
3 | *NOTE: LLaVA on Windows is not fully supported. Currently we only support 16-bit inference. For a more complete support, please use [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) for now. More functionalities on Windows is to be added soon, stay tuned.*
4 |
5 | ## Installation
6 |
7 | 1. Clone this repository and navigate to LLaVA folder
8 | ```bash
9 | git clone https://github.com/haotian-liu/LLaVA.git
10 | cd LLaVA
11 | ```
12 |
13 | 2. Install Package
14 | ```Shell
15 | conda create -n llava python=3.10 -y
16 | conda activate llava
17 | python -mpip install --upgrade pip # enable PEP 660 support
18 | pip install torch==2.0.1+cu117 torchvision==0.15.2+cu117 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu117
19 | pip install -e .
20 | pip uninstall bitsandbytes
21 | ```
22 |
23 | ## Run demo
24 |
25 | See instructions [here](https://github.com/haotian-liu/LLaVA#demo).
26 |
27 | Note that quantization (4-bit, 8-bit) is *NOT* supported on Windows. Stay tuned for the 4-bit support on Windows!
28 |
--------------------------------------------------------------------------------
/docs/macOS.md:
--------------------------------------------------------------------------------
1 | # Run LLaVA on macOS
2 |
3 | *NOTE: LLaVA on macOS is not fully supported. Currently we only support 16-bit inference. More functionalities on macOS is to be added soon, stay tuned.*
4 |
5 | ## Installation
6 |
7 | 1. Clone this repository and navigate to LLaVA folder
8 | ```bash
9 | git clone https://github.com/haotian-liu/LLaVA.git
10 | cd LLaVA
11 | ```
12 |
13 | 2. Install Package
14 | ```Shell
15 | conda create -n llava python=3.10 -y
16 | conda activate llava
17 | python -mpip install --upgrade pip # enable PEP 660 support
18 | pip install -e .
19 | pip install torch==2.1.0 torchvision==0.16.0
20 | pip uninstall bitsandbytes
21 | ```
22 |
23 | ## Run demo
24 |
25 | Specify `--device mps` when launching model worker or CLI.
26 |
27 | See instructions [here](https://github.com/haotian-liu/LLaVA#demo).
28 |
29 | Note that quantization (4-bit, 8-bit) is *NOT* supported on macOS. Stay tuned for the 4-bit support on macOS!
30 |
--------------------------------------------------------------------------------
/images/demo_cli.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/images/demo_cli.gif
--------------------------------------------------------------------------------
/images/happy.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/images/happy.jpg
--------------------------------------------------------------------------------
/images/llava_example_cmp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/images/llava_example_cmp.png
--------------------------------------------------------------------------------
/images/llava_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/images/llava_logo.png
--------------------------------------------------------------------------------
/images/llava_v1_5_radar.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/images/llava_v1_5_radar.jpg
--------------------------------------------------------------------------------
/images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/images/logo.png
--------------------------------------------------------------------------------
/llava/__init__.py:
--------------------------------------------------------------------------------
1 | from .model import LlavaLlamaForCausalLM, LlavaLlamaBPOForCausalLM
2 |
--------------------------------------------------------------------------------
/llava/constants.py:
--------------------------------------------------------------------------------
1 | CONTROLLER_HEART_BEAT_EXPIRATION = 30
2 | WORKER_HEART_BEAT_INTERVAL = 15
3 |
4 | LOGDIR = "."
5 |
6 | # Model Constants
7 | IGNORE_INDEX = -100
8 | IMAGE_TOKEN_INDEX = -200
9 | DEFAULT_IMAGE_TOKEN = ""
10 | DEFAULT_IMAGE_PATCH_TOKEN = ""
11 | DEFAULT_IM_START_TOKEN = ""
12 | DEFAULT_IM_END_TOKEN = ""
13 | IMAGE_PLACEHOLDER = ""
14 |
--------------------------------------------------------------------------------
/llava/eval/eval_multi.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Check if four arguments are passed
4 | if [ "$#" -ne 4 ]; then
5 | echo "Usage: $0 "
6 | exit 1
7 | fi
8 |
9 | scenes=("01-Illegal_Activitiy" "02-HateSpeech" "03-Malware_Generation" "04-Physical_Harm" "05-EconomicHarm" "06-Fraud" "07-Sex" "08-Political_Lobbying" "09-Privacy_Violence" "10-Legal_Opinion" "11-Financial_Advice" "12-Health_Consultation" "13-Gov_Decision")
10 |
11 | # Assign the command line arguments to variables
12 | model_path=$1
13 | answers_root=$2
14 | N=$3
15 | temperature=$4
16 |
17 | # Check if the answers_root directory exists
18 | if [ ! -d "$answers_root" ]; then
19 | # Directory does not exist, so create it
20 | mkdir "$answers_root"
21 | fi
22 |
23 | for scene in "${scenes[@]}"; do
24 | answer_scene_path="${answers_root}/${scene}"
25 | if [ ! -d "$answer_scene_path" ]; then
26 | # Directory does not exist, so create it
27 | mkdir "$answer_scene_path"
28 | fi
29 |
30 | # Loop over each chunk/process
31 | for ((chunk_id = 0; chunk_id < N; chunk_id++)); do
32 | # Define the answer path for each chunk
33 | answer_path="${answer_scene_path}/${chunk_id}.json"
34 | if [ -f "$answer_path" ]; then
35 | rm "$answer_path"
36 | fi
37 |
38 | # Run the Python program in the background
39 | CUDA_VISIBLE_DEVICES="$chunk_id" python llava/eval/robustness_eval.py --model-path "$model_path" --scene "$scene" --answers_file "$answer_path" --num-chunks "$N" --chunk-idx "$chunk_id" --temperature "$temperature" &
40 |
41 | # Uncomment below if you need a slight delay between starting each process
42 | # sleep 0.1
43 | done
44 |
45 | # Wait for all background processes to finish
46 | wait
47 | cd $answer_scene_path
48 | merged_file="merged.json"
49 | if [ -f "$merged_file" ]; then
50 | rm "$merged_file"
51 | fi
52 |
53 | # Merge all the JSON files into one
54 | python ~/polite_llava/scripts/concatenate_json.py *.json
55 | cd ~/polite_llava
56 | # Remove the unmerged files
57 | for ((chunk_id = 0; chunk_id < N; chunk_id++)); do
58 | answer_path="${answer_scene_path}/${chunk_id}.json"
59 | if [ -f "$answer_path" ]; then
60 | rm "$answer_path"
61 | fi
62 | done
63 | done
64 |
--------------------------------------------------------------------------------
/llava/eval/eval_multi_safeguard.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Check if four arguments are passed
4 | if [ "$#" -ne 6 ]; then
5 | echo "Usage: $0 "
6 | exit 1
7 | fi
8 |
9 | #scenes=("01-Illegal_Activitiy" "02-HateSpeech" "03-Malware_Generation" "04-Physical_Harm" "05-EconomicHarm" "06-Fraud" "07-Sex" "08-Political_Lobbying" "09-Privacy_Violence" "10-Legal_Opinion" "11-Financial_Advice" "12-Health_Consultation" "13-Gov_Decision")
10 | scenes=("04-Physical_Harm" "05-EconomicHarm" "06-Fraud" "07-Sex" "08-Political_Lobbying" "09-Privacy_Violence" "10-Legal_Opinion" "11-Financial_Advice" "12-Health_Consultation" "13-Gov_Decision")
11 | #scenes=("07-Sex" "08-Political_Lobbying" "09-Privacy_Violence" "10-Legal_Opinion" "11-Financial_Advice" "12-Health_Consultation" "13-Gov_Decision")
12 |
13 | # Assign the command line arguments to variables
14 | model_path=$1
15 | answers_root=$2
16 | N=$3
17 | temperature=$4
18 | harm_detector=$5
19 | detoxifier=$6
20 |
21 | # Check if the answers_root directory exists
22 | if [ ! -d "$answers_root" ]; then
23 | # Directory does not exist, so create it
24 | mkdir "$answers_root"
25 | fi
26 |
27 | for scene in "${scenes[@]}"; do
28 | answer_scene_path="${answers_root}/${scene}"
29 | if [ ! -d "$answer_scene_path" ]; then
30 | # Directory does not exist, so create it
31 | mkdir "$answer_scene_path"
32 | fi
33 |
34 | # Loop over each chunk/process
35 | for ((chunk_id = 0; chunk_id < N; chunk_id++)); do
36 | # Define the answer path for each chunk
37 | answer_path="${answer_scene_path}/${chunk_id}.json"
38 | if [ -f "$answer_path" ]; then
39 | rm "$answer_path"
40 | fi
41 |
42 | # Run the Python program in the background
43 | CUDA_VISIBLE_DEVICES="$chunk_id" python llava/eval/robustness_eval.py --model-path "$model_path" --scene "$scene" --answers_file "$answer_path" --num-chunks "$N" --chunk-idx "$chunk_id" --temperature "$temperature" --harm_detector "$harm_detector" --detoxifier "$detoxifier"&
44 |
45 | # Uncomment below if you need a slight delay between starting each process
46 | # sleep 0.1
47 | done
48 |
49 | # Wait for all background processes to finish
50 | wait
51 | cd $answer_scene_path
52 | merged_file="merged.json"
53 | if [ -f "$merged_file" ]; then
54 | rm "$merged_file"
55 | fi
56 |
57 | # Merge all the JSON files into one
58 | python ~/polite_llava/scripts/concatenate_json.py *.json
59 | cd ~/polite_llava
60 | # Remove the unmerged files
61 | for ((chunk_id = 0; chunk_id < N; chunk_id++)); do
62 | answer_path="${answer_scene_path}/${chunk_id}.json"
63 | if [ -f "$answer_path" ]; then
64 | rm "$answer_path"
65 | fi
66 | done
67 | done
68 |
--------------------------------------------------------------------------------
/llava/eval/eval_pope.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import argparse
4 |
5 | def eval_pope(answers, label_file):
6 | label_list = [json.loads(q)['label'] for q in open(label_file, 'r')]
7 |
8 | for answer in answers:
9 | text = answer['text']
10 |
11 | # Only keep the first sentence
12 | if text.find('.') != -1:
13 | text = text.split('.')[0]
14 |
15 | text = text.replace(',', '')
16 | words = text.split(' ')
17 | if 'No' in words or 'not' in words or 'no' in words:
18 | answer['text'] = 'no'
19 | else:
20 | answer['text'] = 'yes'
21 |
22 | for i in range(len(label_list)):
23 | if label_list[i] == 'no':
24 | label_list[i] = 0
25 | else:
26 | label_list[i] = 1
27 |
28 | pred_list = []
29 | for answer in answers:
30 | if answer['text'] == 'no':
31 | pred_list.append(0)
32 | else:
33 | pred_list.append(1)
34 |
35 | pos = 1
36 | neg = 0
37 | yes_ratio = pred_list.count(1) / len(pred_list)
38 |
39 | TP, TN, FP, FN = 0, 0, 0, 0
40 | for pred, label in zip(pred_list, label_list):
41 | if pred == pos and label == pos:
42 | TP += 1
43 | elif pred == pos and label == neg:
44 | FP += 1
45 | elif pred == neg and label == neg:
46 | TN += 1
47 | elif pred == neg and label == pos:
48 | FN += 1
49 |
50 | print('TP\tFP\tTN\tFN\t')
51 | print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN))
52 |
53 | precision = float(TP) / float(TP + FP)
54 | recall = float(TP) / float(TP + FN)
55 | f1 = 2*precision*recall / (precision + recall)
56 | acc = (TP + TN) / (TP + TN + FP + FN)
57 | print('Accuracy: {}'.format(acc))
58 | print('Precision: {}'.format(precision))
59 | print('Recall: {}'.format(recall))
60 | print('F1 score: {}'.format(f1))
61 | print('Yes ratio: {}'.format(yes_ratio))
62 | print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) )
63 |
64 | if __name__ == "__main__":
65 | parser = argparse.ArgumentParser()
66 | parser.add_argument("--annotation-dir", type=str)
67 | parser.add_argument("--question-file", type=str)
68 | parser.add_argument("--result-file", type=str)
69 | args = parser.parse_args()
70 |
71 | questions = [json.loads(line) for line in open(args.question_file)]
72 | questions = {question['question_id']: question for question in questions}
73 | answers = [json.loads(q) for q in open(args.result_file)]
74 | for file in os.listdir(args.annotation_dir):
75 | assert file.startswith('coco_pope_')
76 | assert file.endswith('.json')
77 | category = file[10:-5]
78 | cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category]
79 | print('Category: {}, # samples: {}'.format(category, len(cur_answers)))
80 | eval_pope(cur_answers, os.path.join(args.annotation_dir, file))
81 | print("====================================")
82 |
--------------------------------------------------------------------------------
/llava/eval/eval_textvqa.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import json
4 | import re
5 |
6 | from llava.eval.m4c_evaluator import TextVQAAccuracyEvaluator
7 |
8 |
9 | def get_args():
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument('--annotation-file', type=str)
12 | parser.add_argument('--result-file', type=str)
13 | parser.add_argument('--result-dir', type=str)
14 | return parser.parse_args()
15 |
16 |
17 | def prompt_processor(prompt):
18 | if prompt.startswith('OCR tokens: '):
19 | pattern = r"Question: (.*?) Short answer:"
20 | match = re.search(pattern, prompt, re.DOTALL)
21 | question = match.group(1)
22 | elif 'Reference OCR token: ' in prompt and len(prompt.split('\n')) == 3:
23 | if prompt.startswith('Reference OCR token:'):
24 | question = prompt.split('\n')[1]
25 | else:
26 | question = prompt.split('\n')[0]
27 | elif len(prompt.split('\n')) == 2:
28 | question = prompt.split('\n')[0]
29 | else:
30 | assert False
31 |
32 | return question.lower()
33 |
34 |
35 | def eval_single(annotation_file, result_file):
36 | experiment_name = os.path.splitext(os.path.basename(result_file))[0]
37 | print(experiment_name)
38 | annotations = json.load(open(annotation_file))['data']
39 | annotations = {(annotation['image_id'], annotation['question'].lower()): annotation for annotation in annotations}
40 | results = [json.loads(line) for line in open(result_file)]
41 |
42 | pred_list = []
43 | for result in results:
44 | annotation = annotations[(result['question_id'], prompt_processor(result['prompt']))]
45 | pred_list.append({
46 | "pred_answer": result['text'],
47 | "gt_answers": annotation['answers'],
48 | })
49 |
50 | evaluator = TextVQAAccuracyEvaluator()
51 | print('Samples: {}\nAccuracy: {:.2f}%\n'.format(len(pred_list), 100. * evaluator.eval_pred_list(pred_list)))
52 |
53 |
54 | if __name__ == "__main__":
55 | args = get_args()
56 |
57 | if args.result_file is not None:
58 | eval_single(args.annotation_file, args.result_file)
59 |
60 | if args.result_dir is not None:
61 | for result_file in sorted(os.listdir(args.result_dir)):
62 | if not result_file.endswith('.jsonl'):
63 | print(f'Skipping {result_file}')
64 | continue
65 | eval_single(args.annotation_file, os.path.join(args.result_dir, result_file))
66 |
--------------------------------------------------------------------------------
/llava/eval/qa_baseline_gpt35.py:
--------------------------------------------------------------------------------
1 | """Generate answers with GPT-3.5"""
2 | # Note: you need to be using OpenAI Python v0.27.0 for the code below to work
3 | import argparse
4 | import json
5 | import os
6 | import time
7 | import concurrent.futures
8 |
9 | import openai
10 | import tqdm
11 | import shortuuid
12 |
13 | MODEL = 'gpt-3.5-turbo'
14 | MODEL_ID = 'gpt-3.5-turbo:20230327'
15 |
16 | def get_answer(question_id: int, question: str, max_tokens: int):
17 | ans = {
18 | 'answer_id': shortuuid.uuid(),
19 | 'question_id': question_id,
20 | 'model_id': MODEL_ID,
21 | }
22 | for _ in range(3):
23 | try:
24 | response = openai.ChatCompletion.create(
25 | model=MODEL,
26 | messages=[{
27 | 'role': 'system',
28 | 'content': 'You are a helpful assistant.'
29 | }, {
30 | 'role': 'user',
31 | 'content': question,
32 | }],
33 | max_tokens=max_tokens,
34 | )
35 | ans['text'] = response['choices'][0]['message']['content']
36 | return ans
37 | except Exception as e:
38 | print('[ERROR]', e)
39 | ans['text'] = '#ERROR#'
40 | time.sleep(1)
41 | return ans
42 |
43 |
44 | if __name__ == '__main__':
45 | parser = argparse.ArgumentParser(description='ChatGPT answer generation.')
46 | parser.add_argument('-q', '--question')
47 | parser.add_argument('-o', '--output')
48 | parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
49 | args = parser.parse_args()
50 |
51 | questions_dict = {}
52 | with open(os.path.expanduser(args.question)) as f:
53 | for line in f:
54 | if not line:
55 | continue
56 | q = json.loads(line)
57 | questions_dict[q['question_id']] = q['text']
58 |
59 | answers = []
60 |
61 | with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor:
62 | futures = []
63 | for qid, question in questions_dict.items():
64 | future = executor.submit(get_answer, qid, question, args.max_tokens)
65 | futures.append(future)
66 |
67 | for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
68 | answers.append(future.result())
69 |
70 | answers.sort(key=lambda x: x['question_id'])
71 |
72 | with open(os.path.expanduser(args.output), 'w') as f:
73 | table = [json.dumps(ans) for ans in answers]
74 | f.write('\n'.join(table))
75 |
--------------------------------------------------------------------------------
/llava/eval/summarize_gpt_review.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from collections import defaultdict
4 |
5 | import numpy as np
6 |
7 | import argparse
8 |
9 | def parse_args():
10 | parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
11 | parser.add_argument('-d', '--dir', default=None)
12 | parser.add_argument('-v', '--version', default=None)
13 | parser.add_argument('-s', '--select', nargs='*', default=None)
14 | parser.add_argument('-f', '--files', nargs='*', default=[])
15 | parser.add_argument('-i', '--ignore', nargs='*', default=[])
16 | return parser.parse_args()
17 |
18 |
19 | if __name__ == '__main__':
20 | args = parse_args()
21 |
22 | if args.ignore is not None:
23 | args.ignore = [int(x) for x in args.ignore]
24 |
25 | if len(args.files) > 0:
26 | review_files = args.files
27 | else:
28 | review_files = [x for x in os.listdir(args.dir) if x.endswith('.jsonl') and (x.startswith('gpt4_text') or x.startswith('reviews_') or x.startswith('review_') or 'review' in args.dir)]
29 |
30 | for review_file in sorted(review_files):
31 | config = os.path.basename(review_file).replace('gpt4_text_', '').replace('.jsonl', '')
32 | if args.select is not None and any(x not in config for x in args.select):
33 | continue
34 | if '0613' in config:
35 | version = '0613'
36 | else:
37 | version = '0314'
38 | if args.version is not None and args.version != version:
39 | continue
40 | scores = defaultdict(list)
41 | print(config)
42 | with open(os.path.join(args.dir, review_file) if args.dir is not None else review_file) as f:
43 | for review_str in f:
44 | review = json.loads(review_str)
45 | if review['question_id'] in args.ignore:
46 | continue
47 | if 'category' in review:
48 | scores[review['category']].append(review['tuple'])
49 | scores['all'].append(review['tuple'])
50 | else:
51 | if 'tuple' in review:
52 | scores['all'].append(review['tuple'])
53 | else:
54 | scores['all'].append(review['score'])
55 | for k, v in sorted(scores.items()):
56 | stats = np.asarray(v).mean(0).tolist()
57 | stats = [round(x, 3) for x in stats]
58 | # print(k, stats, round(stats[1]/stats[0]*100, 1))
59 | print(k, round(stats[1]/stats[0]*100, 1), round(stats[0] * 10, 1), round(stats[1] * 10, 1))
60 | print('=================================')
61 |
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/alpaca.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/llava/eval/webpage/figures/alpaca.png
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/bard.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/llava/eval/webpage/figures/bard.jpg
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/chatgpt.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/llama.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/llava/eval/webpage/figures/llama.jpg
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/swords_FILL0_wght300_GRAD0_opsz48.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/llava/eval/webpage/figures/vicuna.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/llava/eval/webpage/figures/vicuna.jpeg
--------------------------------------------------------------------------------
/llava/eval/webpage/styles.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
3 | background-color: #f8f9fa;
4 | }
5 |
6 | .navbar-dark .navbar-nav .nav-link {
7 | color: #f1cf68;
8 | font-size: 1.1rem;
9 | padding: 0.5rem 0.6rem;
10 | }
11 |
12 | .card-header {
13 | font-weight: bold;
14 | }
15 |
16 | .card {
17 | box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
18 | transition: 0.3s;
19 | }
20 |
21 | .card:hover {
22 | box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2);
23 | }
24 |
25 | button {
26 | transition: background-color 0.3s;
27 | }
28 |
29 | button:hover {
30 | background-color: #007bff;
31 | }
32 |
33 | @media (max-width: 767px) {
34 | .form-row .form-group {
35 | margin-bottom: 10px;
36 | }
37 | }
38 |
39 | /* Extra styles */
40 |
41 | .expandable-card .card-text-container {
42 | max-height: 200px;
43 | overflow-y: hidden;
44 | position: relative;
45 | }
46 |
47 | .expandable-card.expanded .card-text-container {
48 | max-height: none;
49 | }
50 |
51 | .expand-btn {
52 | position: relative;
53 | display: none;
54 | background-color: rgba(255, 255, 255, 0.8);
55 | color: #510c75;
56 | border-color: transparent;
57 | }
58 |
59 | .expand-btn:hover {
60 | background-color: rgba(200, 200, 200, 0.8);
61 | text-decoration: none;
62 | border-color: transparent;
63 | color: #510c75;
64 | }
65 |
66 | .expand-btn:focus {
67 | outline: none;
68 | text-decoration: none;
69 | }
70 |
71 | .expandable-card:not(.expanded) .card-text-container:after {
72 | content: "";
73 | position: absolute;
74 | bottom: 0;
75 | left: 0;
76 | width: 100%;
77 | height: 90px;
78 | background: linear-gradient(rgba(255, 255, 255, 0.2), rgba(255, 255, 255, 1));
79 | }
80 |
81 | .expandable-card:not(.expanded) .expand-btn {
82 | margin-top: -40px;
83 | }
84 |
85 | .card-body {
86 | padding-bottom: 5px;
87 | }
88 |
89 | .vertical-flex-layout {
90 | justify-content: center;
91 | align-items: center;
92 | height: 100%;
93 | display: flex;
94 | flex-direction: column;
95 | gap: 5px;
96 | }
97 |
98 | .figure-img {
99 | max-width: 100%;
100 | height: auto;
101 | }
102 |
103 | .adjustable-font-size {
104 | font-size: calc(0.5rem + 2vw);
105 | }
106 |
--------------------------------------------------------------------------------
/llava/model/__init__.py:
--------------------------------------------------------------------------------
1 | from .language_model.llava_llama import LlavaLlamaForCausalLM, LlavaLlamaBPOForCausalLM, LlavaConfig, LlavaLlamaForSequenceClassification, LlavaLlamaForSequenceClassificationSep #, LlavaLlamaForCausalLMAdapt
2 | # from .language_model.llava_mpt import LlavaMPTForCausalLM, LlavaMPTConfig
3 |
--------------------------------------------------------------------------------
/llava/model/apply_delta.py:
--------------------------------------------------------------------------------
1 | """
2 | Usage:
3 | python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta
4 | """
5 | import argparse
6 |
7 | import torch
8 | from tqdm import tqdm
9 | from transformers import AutoTokenizer, AutoModelForCausalLM
10 | from llava import LlavaLlamaForCausalLM
11 |
12 |
13 | def apply_delta(base_model_path, target_model_path, delta_path):
14 | print("Loading base model")
15 | base = AutoModelForCausalLM.from_pretrained(
16 | base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
17 |
18 | print("Loading delta")
19 | delta = LlavaLlamaForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
20 | delta_tokenizer = AutoTokenizer.from_pretrained(delta_path)
21 |
22 | print("Applying delta")
23 | for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"):
24 | if name not in base.state_dict():
25 | assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model'
26 | continue
27 | if param.data.shape == base.state_dict()[name].shape:
28 | param.data += base.state_dict()[name]
29 | else:
30 | assert name in ['model.embed_tokens.weight', 'lm_head.weight'], \
31 | f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}'
32 | bparam = base.state_dict()[name]
33 | param.data[:bparam.shape[0], :bparam.shape[1]] += bparam
34 |
35 | print("Saving target model")
36 | delta.save_pretrained(target_model_path)
37 | delta_tokenizer.save_pretrained(target_model_path)
38 |
39 |
40 | if __name__ == "__main__":
41 | parser = argparse.ArgumentParser()
42 | parser.add_argument("--base-model-path", type=str, required=True)
43 | parser.add_argument("--target-model-path", type=str, required=True)
44 | parser.add_argument("--delta-path", type=str, required=True)
45 |
46 | args = parser.parse_args()
47 |
48 | apply_delta(args.base_model_path, args.target_model_path, args.delta_path)
49 |
--------------------------------------------------------------------------------
/llava/model/consolidate.py:
--------------------------------------------------------------------------------
1 | """
2 | Usage:
3 | python3 -m llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_weights/llava-7b_consolidate
4 | """
5 | import argparse
6 |
7 | import torch
8 | from transformers import AutoTokenizer, AutoModelForCausalLM
9 | from llava.model import *
10 | from llava.model.utils import auto_upgrade
11 |
12 |
13 | def consolidate_ckpt(src_path, dst_path):
14 | print("Loading model")
15 | auto_upgrade(src_path)
16 | src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
17 | src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False)
18 | src_model.save_pretrained(dst_path)
19 | src_tokenizer.save_pretrained(dst_path)
20 |
21 |
22 | if __name__ == "__main__":
23 | parser = argparse.ArgumentParser()
24 | parser.add_argument("--src", type=str, required=True)
25 | parser.add_argument("--dst", type=str, required=True)
26 |
27 | args = parser.parse_args()
28 |
29 | consolidate_ckpt(args.src, args.dst)
30 |
--------------------------------------------------------------------------------
/llava/model/language_model/mpt/adapt_tokenizer.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 | from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
3 | Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
4 | NUM_SENTINEL_TOKENS: int = 100
5 |
6 | def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):
7 | """Adds sentinel tokens and padding token (if missing).
8 |
9 | Expands the tokenizer vocabulary to include sentinel tokens
10 | used in mixture-of-denoiser tasks as well as a padding token.
11 |
12 | All added tokens are added as special tokens. No tokens are
13 | added if sentinel tokens and padding token already exist.
14 | """
15 | sentinels_to_add = [f'' for i in range(NUM_SENTINEL_TOKENS)]
16 | tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
17 | if tokenizer.pad_token is None:
18 | tokenizer.add_tokens('', special_tokens=True)
19 | tokenizer.pad_token = ''
20 | assert tokenizer.pad_token_id is not None
21 | sentinels = ''.join([f'' for i in range(NUM_SENTINEL_TOKENS)])
22 | _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
23 | tokenizer.sentinel_token_ids = _sentinel_token_ids
24 |
25 | class AutoTokenizerForMOD(AutoTokenizer):
26 | """AutoTokenizer + Adaptation for MOD.
27 |
28 | A simple wrapper around AutoTokenizer to make instantiating
29 | an MOD-adapted tokenizer a bit easier.
30 |
31 | MOD-adapted tokenizers have sentinel tokens (e.g., ),
32 | a padding token, and a property to get the token ids of the
33 | sentinel tokens.
34 | """
35 |
36 | @classmethod
37 | def from_pretrained(cls, *args, **kwargs):
38 | """See `AutoTokenizer.from_pretrained` docstring."""
39 | tokenizer = super().from_pretrained(*args, **kwargs)
40 | adapt_tokenizer_for_denoising(tokenizer)
41 | return tokenizer
--------------------------------------------------------------------------------
/llava/model/language_model/mpt/blocks.py:
--------------------------------------------------------------------------------
1 | """GPT Blocks used for the GPT Model."""
2 | from typing import Dict, Optional, Tuple
3 | import torch
4 | import torch.nn as nn
5 | from .attention import ATTN_CLASS_REGISTRY
6 | from .norm import NORM_CLASS_REGISTRY
7 |
8 | class MPTMLP(nn.Module):
9 |
10 | def __init__(self, d_model: int, expansion_ratio: int, device: Optional[str]=None):
11 | super().__init__()
12 | self.up_proj = nn.Linear(d_model, expansion_ratio * d_model, device=device)
13 | self.act = nn.GELU(approximate='none')
14 | self.down_proj = nn.Linear(expansion_ratio * d_model, d_model, device=device)
15 | self.down_proj._is_residual = True
16 |
17 | def forward(self, x):
18 | return self.down_proj(self.act(self.up_proj(x)))
19 |
20 | class MPTBlock(nn.Module):
21 |
22 | def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):
23 | del kwargs
24 | super().__init__()
25 | norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
26 | attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]
27 | self.norm_1 = norm_class(d_model, device=device)
28 | self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)
29 | self.norm_2 = norm_class(d_model, device=device)
30 | self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)
31 | self.resid_attn_dropout = nn.Dropout(resid_pdrop)
32 | self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
33 |
34 | def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:
35 | a = self.norm_1(x)
36 | (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)
37 | x = x + self.resid_attn_dropout(b)
38 | m = self.norm_2(x)
39 | n = self.ffn(m)
40 | x = x + self.resid_ffn_dropout(n)
41 | return (x, attn_weights, past_key_value)
--------------------------------------------------------------------------------
/llava/model/language_model/mpt/custom_embedding.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from torch import Tensor
5 |
6 | class SharedEmbedding(nn.Embedding):
7 |
8 | def forward(self, input: Tensor, unembed: bool=False) -> Tensor:
9 | if unembed:
10 | return F.linear(input, self.weight)
11 | return super().forward(input)
--------------------------------------------------------------------------------
/llava/model/language_model/mpt/norm.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | def _cast_if_autocast_enabled(tensor):
4 | if torch.is_autocast_enabled():
5 | if tensor.device.type == 'cuda':
6 | dtype = torch.get_autocast_gpu_dtype()
7 | elif tensor.device.type == 'cpu':
8 | dtype = torch.get_autocast_cpu_dtype()
9 | else:
10 | raise NotImplementedError()
11 | return tensor.to(dtype=dtype)
12 | return tensor
13 |
14 | class LPLayerNorm(torch.nn.LayerNorm):
15 |
16 | def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True, device=None, dtype=None):
17 | super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, device=device, dtype=dtype)
18 |
19 | def forward(self, x):
20 | module_device = x.device
21 | downcast_x = _cast_if_autocast_enabled(x)
22 | downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
23 | downcast_bias = _cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
24 | with torch.autocast(enabled=False, device_type=module_device.type):
25 | return torch.nn.functional.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps)
26 |
27 | def rms_norm(x, weight=None, eps=1e-05):
28 | output = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + eps)
29 | if weight is not None:
30 | return output * weight
31 | return output
32 |
33 | class RMSNorm(torch.nn.Module):
34 |
35 | def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
36 | super().__init__()
37 | self.eps = eps
38 | if weight:
39 | self.weight = torch.nn.Parameter(torch.ones(normalized_shape, dtype=dtype, device=device))
40 | else:
41 | self.register_parameter('weight', None)
42 |
43 | def forward(self, x):
44 | return rms_norm(x.float(), self.weight, self.eps).to(dtype=x.dtype)
45 |
46 | class LPRMSNorm(RMSNorm):
47 |
48 | def __init__(self, normalized_shape, eps=1e-05, weight=True, dtype=None, device=None):
49 | super().__init__(normalized_shape=normalized_shape, eps=eps, weight=weight, dtype=dtype, device=device)
50 |
51 | def forward(self, x):
52 | downcast_x = _cast_if_autocast_enabled(x)
53 | downcast_weight = _cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
54 | with torch.autocast(enabled=False, device_type=x.device.type):
55 | return rms_norm(downcast_x, downcast_weight, self.eps).to(dtype=x.dtype)
56 | NORM_CLASS_REGISTRY = {'layernorm': torch.nn.LayerNorm, 'low_precision_layernorm': LPLayerNorm, 'rmsnorm': RMSNorm, 'low_precision_rmsnorm': LPRMSNorm}
--------------------------------------------------------------------------------
/llava/model/make_delta.py:
--------------------------------------------------------------------------------
1 | """
2 | Usage:
3 | python3 -m llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta
4 | """
5 | import argparse
6 |
7 | import torch
8 | from tqdm import tqdm
9 | from transformers import AutoTokenizer, AutoModelForCausalLM
10 | from llava.model.utils import auto_upgrade
11 |
12 |
13 | def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id):
14 | print("Loading base model")
15 | base = AutoModelForCausalLM.from_pretrained(
16 | base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
17 |
18 | print("Loading target model")
19 | auto_upgrade(target_model_path)
20 | target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
21 |
22 | print("Calculating delta")
23 | for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
24 | if name not in base.state_dict():
25 | assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model'
26 | continue
27 | if param.data.shape == base.state_dict()[name].shape:
28 | param.data -= base.state_dict()[name]
29 | else:
30 | assert name in ['model.embed_tokens.weight', 'lm_head.weight'], f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}'
31 | bparam = base.state_dict()[name]
32 | param.data[:bparam.shape[0], :bparam.shape[1]] -= bparam
33 |
34 | print("Saving delta")
35 | if hub_repo_id:
36 | kwargs = {"push_to_hub": True, "repo_id": hub_repo_id}
37 | else:
38 | kwargs = {}
39 | target.save_pretrained(delta_path, **kwargs)
40 | target_tokenizer = AutoTokenizer.from_pretrained(target_model_path)
41 | target_tokenizer.save_pretrained(delta_path, **kwargs)
42 |
43 |
44 | if __name__ == "__main__":
45 | parser = argparse.ArgumentParser()
46 | parser.add_argument("--base-model-path", type=str, required=True)
47 | parser.add_argument("--target-model-path", type=str, required=True)
48 | parser.add_argument("--delta-path", type=str, required=True)
49 | parser.add_argument("--hub-repo-id", type=str, default=None)
50 | args = parser.parse_args()
51 |
52 | make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id)
53 |
--------------------------------------------------------------------------------
/llava/model/multimodal_encoder/builder.py:
--------------------------------------------------------------------------------
1 | import os
2 | from .clip_encoder import CLIPVisionTower
3 |
4 |
5 | def build_vision_tower(vision_tower_cfg, **kwargs):
6 | vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
7 | is_absolute_path_exists = os.path.exists(vision_tower)
8 | if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"):
9 | return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
10 |
11 | raise ValueError(f'Unknown vision tower: {vision_tower}')
12 |
--------------------------------------------------------------------------------
/llava/model/multimodal_encoder/clip_encoder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig
5 |
6 |
7 | class CLIPVisionTower(nn.Module):
8 | def __init__(self, vision_tower, args, delay_load=False):
9 | super().__init__()
10 |
11 | self.is_loaded = False
12 |
13 | self.vision_tower_name = vision_tower
14 | self.select_layer = args.mm_vision_select_layer
15 | self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch')
16 |
17 | if not delay_load:
18 | self.load_model()
19 | else:
20 | self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name)
21 |
22 | def load_model(self):
23 | self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name)
24 | self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name)
25 | self.vision_tower.requires_grad_(False)
26 |
27 | self.is_loaded = True
28 |
29 | def feature_select(self, image_forward_outs):
30 | image_features = image_forward_outs.hidden_states[self.select_layer]
31 | if self.select_feature == 'patch':
32 | image_features = image_features[:, 1:]
33 | elif self.select_feature == 'cls_patch':
34 | image_features = image_features
35 | else:
36 | raise ValueError(f'Unexpected select feature: {self.select_feature}')
37 | return image_features
38 |
39 | @torch.no_grad()
40 | def forward(self, images):
41 | if type(images) is list:
42 | image_features = []
43 | for image in images:
44 | image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True)
45 | image_feature = self.feature_select(image_forward_out).to(image.dtype)
46 | image_features.append(image_feature)
47 | else:
48 | image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
49 | image_features = self.feature_select(image_forward_outs).to(images.dtype)
50 |
51 | return image_features
52 |
53 | @property
54 | def dummy_feature(self):
55 | return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
56 |
57 | @property
58 | def dtype(self):
59 | return self.vision_tower.dtype
60 |
61 | @property
62 | def device(self):
63 | return self.vision_tower.device
64 |
65 | @property
66 | def config(self):
67 | if self.is_loaded:
68 | return self.vision_tower.config
69 | else:
70 | return self.cfg_only
71 |
72 | @property
73 | def hidden_size(self):
74 | return self.config.hidden_size
75 |
76 | @property
77 | def num_patches(self):
78 | return (self.config.image_size // self.config.patch_size) ** 2
79 |
--------------------------------------------------------------------------------
/llava/model/multimodal_projector/builder.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import re
4 |
5 |
6 | class IdentityMap(nn.Module):
7 | def __init__(self):
8 | super().__init__()
9 |
10 | def forward(self, x, *args, **kwargs):
11 | return x
12 |
13 | @property
14 | def config(self):
15 | return {"mm_projector_type": 'identity'}
16 |
17 |
18 | class SimpleResBlock(nn.Module):
19 | def __init__(self, channels):
20 | super().__init__()
21 | self.pre_norm = nn.LayerNorm(channels)
22 |
23 | self.proj = nn.Sequential(
24 | nn.Linear(channels, channels),
25 | nn.GELU(),
26 | nn.Linear(channels, channels)
27 | )
28 | def forward(self, x):
29 | x = self.pre_norm(x)
30 | return x + self.proj(x)
31 |
32 |
33 | def build_vision_projector(config, delay_load=False, **kwargs):
34 | projector_type = getattr(config, 'mm_projector_type', 'linear')
35 |
36 | if projector_type == 'linear':
37 | return nn.Linear(config.mm_hidden_size, config.hidden_size)
38 |
39 | mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
40 | if mlp_gelu_match:
41 | mlp_depth = int(mlp_gelu_match.group(1))
42 | modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
43 | for _ in range(1, mlp_depth):
44 | modules.append(nn.GELU())
45 | modules.append(nn.Linear(config.hidden_size, config.hidden_size))
46 | return nn.Sequential(*modules)
47 |
48 | if projector_type == 'identity':
49 | return IdentityMap()
50 |
51 | raise ValueError(f'Unknown projector type: {projector_type}')
52 |
53 |
54 | def build_vision_projector_adap(config, delay_load=False, **kwargs):
55 | projector_type = getattr(config, 'mm_projector_type', 'linear')
56 |
57 | if projector_type == 'linear':
58 | return nn.Linear(config.mm_hidden_size, config.hidden_size)
59 |
60 | mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
61 | if mlp_gelu_match:
62 | mlp_depth = int(mlp_gelu_match.group(1))
63 | modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
64 | for _ in range(1, mlp_depth):
65 | modules.append(nn.GELU())
66 | modules.append(nn.Linear(config.hidden_size, config.hidden_size))
67 | return nn.Sequential(*modules)
68 |
69 | if projector_type == 'identity':
70 | return IdentityMap()
71 |
72 | raise ValueError(f'Unknown projector type: {projector_type}')
73 |
--------------------------------------------------------------------------------
/llava/model/utils.py:
--------------------------------------------------------------------------------
1 | from transformers import AutoConfig
2 |
3 |
4 | def auto_upgrade(config):
5 | cfg = AutoConfig.from_pretrained(config)
6 | if 'llava' in config and 'llava' not in cfg.model_type:
7 | assert cfg.model_type == 'llama'
8 | print("You are using newer LLaVA code base, while the checkpoint of v0 is from older code base.")
9 | print("You must upgrade the checkpoint to the new code base (this can be done automatically).")
10 | confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]")
11 | if confirm.lower() in ["y", "yes"]:
12 | print("Upgrading checkpoint...")
13 | assert len(cfg.architectures) == 1
14 | setattr(cfg.__class__, "model_type", "llava")
15 | cfg.architectures[0] = 'LlavaLlamaForCausalLM'
16 | cfg.save_pretrained(config)
17 | print("Checkpoint upgraded.")
18 | else:
19 | print("Checkpoint upgrade aborted.")
20 | exit(1)
21 |
--------------------------------------------------------------------------------
/llava/serve/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/llava/serve/__init__.py
--------------------------------------------------------------------------------
/llava/serve/examples/extreme_ironing.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/llava/serve/examples/extreme_ironing.jpg
--------------------------------------------------------------------------------
/llava/serve/examples/waterview.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/llava/serve/examples/waterview.jpg
--------------------------------------------------------------------------------
/llava/serve/register_worker.py:
--------------------------------------------------------------------------------
1 | """
2 | Manually register workers.
3 |
4 | Usage:
5 | python3 -m fastchat.serve.register_worker --controller http://localhost:21001 --worker-name http://localhost:21002
6 | """
7 |
8 | import argparse
9 |
10 | import requests
11 |
12 | if __name__ == "__main__":
13 | parser = argparse.ArgumentParser()
14 | parser.add_argument("--controller-address", type=str)
15 | parser.add_argument("--worker-name", type=str)
16 | parser.add_argument("--check-heart-beat", action="store_true")
17 | args = parser.parse_args()
18 |
19 | url = args.controller_address + "/register_worker"
20 | data = {
21 | "worker_name": args.worker_name,
22 | "check_heart_beat": args.check_heart_beat,
23 | "worker_status": None,
24 | }
25 | r = requests.post(url, json=data)
26 | assert r.status_code == 200
27 |
--------------------------------------------------------------------------------
/llava/serve/test_message.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 |
4 | import requests
5 |
6 | from llava.conversation import default_conversation
7 |
8 |
9 | def main():
10 | if args.worker_address:
11 | worker_addr = args.worker_address
12 | else:
13 | controller_addr = args.controller_address
14 | ret = requests.post(controller_addr + "/refresh_all_workers")
15 | ret = requests.post(controller_addr + "/list_models")
16 | models = ret.json()["models"]
17 | models.sort()
18 | print(f"Models: {models}")
19 |
20 | ret = requests.post(controller_addr + "/get_worker_address",
21 | json={"model": args.model_name})
22 | worker_addr = ret.json()["address"]
23 | print(f"worker_addr: {worker_addr}")
24 |
25 | if worker_addr == "":
26 | return
27 |
28 | conv = default_conversation.copy()
29 | conv.append_message(conv.roles[0], args.message)
30 | prompt = conv.get_prompt()
31 |
32 | headers = {"User-Agent": "LLaVA Client"}
33 | pload = {
34 | "model": args.model_name,
35 | "prompt": prompt,
36 | "max_new_tokens": args.max_new_tokens,
37 | "temperature": 0.7,
38 | "stop": conv.sep,
39 | }
40 | response = requests.post(worker_addr + "/worker_generate_stream", headers=headers,
41 | json=pload, stream=True)
42 |
43 | print(prompt.replace(conv.sep, "\n"), end="")
44 | for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"):
45 | if chunk:
46 | data = json.loads(chunk.decode("utf-8"))
47 | output = data["text"].split(conv.sep)[-1]
48 | print(output, end="\r")
49 | print("")
50 |
51 |
52 | if __name__ == "__main__":
53 | parser = argparse.ArgumentParser()
54 | parser.add_argument("--controller-address", type=str, default="http://localhost:21001")
55 | parser.add_argument("--worker-address", type=str)
56 | parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
57 | parser.add_argument("--max-new-tokens", type=int, default=32)
58 | parser.add_argument("--message", type=str, default=
59 | "Tell me a story with more than 1000 words.")
60 | args = parser.parse_args()
61 |
62 | main()
63 |
--------------------------------------------------------------------------------
/llava/train/train_mem.py:
--------------------------------------------------------------------------------
1 | # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
2 | # Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
3 | # Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
4 |
5 | # Need to call this before importing transformers.
6 | from llava.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn
7 |
8 | replace_llama_attn_with_flash_attn()
9 |
10 | from llava.train.train import train
11 |
12 | if __name__ == "__main__":
13 | train()
14 |
--------------------------------------------------------------------------------
/llava/train/train_xformers.py:
--------------------------------------------------------------------------------
1 | # Make it more memory efficient by monkey patching the LLaMA model with xformers attention.
2 |
3 | # Need to call this before importing transformers.
4 | from llava.train.llama_xformers_attn_monkey_patch import (
5 | replace_llama_attn_with_xformers_attn,
6 | )
7 |
8 | replace_llama_attn_with_xformers_attn()
9 |
10 | from llava.train.train import train
11 |
12 | if __name__ == "__main__":
13 | train()
14 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/main.py
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_0.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_1.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_100.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_100.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_101.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_101.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_102.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_102.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_103.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_103.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_104.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_104.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_105.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_105.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_106.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_106.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_107.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_107.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_108.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_108.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_109.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_109.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_11.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_110.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_110.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_111.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_111.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_112.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_112.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_113.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_113.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_114.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_114.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_115.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_115.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_116.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_116.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_117.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_117.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_118.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_118.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_119.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_119.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_120.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_120.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_121.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_121.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_122.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_122.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_123.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_123.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_124.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_124.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_125.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_125.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_126.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_126.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_127.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_127.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_128.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_128.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_129.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_129.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_13.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_130.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_130.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_131.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_131.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_132.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_132.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_133.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_133.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_134.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_134.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_135.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_135.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_136.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_136.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_137.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_137.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_138.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_138.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_139.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_139.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_140.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_140.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_141.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_141.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_142.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_142.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_143.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_143.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_144.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_144.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_145.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_145.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_146.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_146.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_147.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_147.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_148.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_148.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_149.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_149.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_15.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_150.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_150.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_151.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_151.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_152.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_152.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_153.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_153.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_154.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_154.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_155.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_155.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_156.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_156.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_157.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_157.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_158.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_158.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_159.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_159.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_16.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_16.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_160.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_160.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_161.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_161.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_162.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_162.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_163.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_163.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_164.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_164.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_165.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_165.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_166.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_166.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_167.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_167.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_168.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_168.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_169.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_169.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_17.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_17.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_170.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_170.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_171.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_171.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_172.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_172.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_173.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_173.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_174.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_174.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_175.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_175.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_176.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_176.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_177.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_177.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_178.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_178.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_179.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_179.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_18.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_18.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_180.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_180.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_181.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_181.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_182.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_182.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_183.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_183.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_184.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_184.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_185.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_185.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_186.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_186.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_187.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_187.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_188.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_188.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_189.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_189.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_190.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_190.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_191.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_191.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_192.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_192.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_193.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_193.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_194.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_194.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_195.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_195.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_196.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_196.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_197.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_197.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_198.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_198.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_199.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_199.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_20.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_20.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_200.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_200.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_201.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_201.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_202.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_202.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_203.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_203.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_204.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_204.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_205.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_205.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_206.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_206.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_207.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_207.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_208.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_208.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_209.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_209.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_21.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_21.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_210.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_210.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_211.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_211.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_212.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_212.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_213.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_213.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_214.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_214.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_215.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_215.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_216.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_216.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_217.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_217.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_23.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_25.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_27.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_28.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_28.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_3.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_30.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_30.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_31.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_31.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_32.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_33.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_33.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_34.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_34.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_35.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_35.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_36.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_36.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_37.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_37.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_38.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_38.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_39.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_39.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_40.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_40.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_41.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_41.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_42.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_42.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_44.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_44.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_46.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_46.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_48.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_48.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_49.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_49.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_5.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_50.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_50.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_52.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_52.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_53.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_53.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_54.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_54.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_55.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_55.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_56.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_56.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_57.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_57.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_58.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_58.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_60.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_60.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_62.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_62.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_64.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_64.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_65.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_65.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_66.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_66.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_67.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_67.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_68.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_68.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_69.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_69.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_7.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_70.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_70.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_71.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_71.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_72.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_72.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_73.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_73.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_74.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_74.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_75.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_75.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_76.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_76.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_77.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_77.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_78.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_78.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_79.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_79.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_8.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_80.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_80.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_81.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_81.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_82.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_82.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_83.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_83.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_84.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_84.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_85.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_85.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_86.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_86.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_87.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_87.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_88.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_88.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_89.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_89.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_9.png
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_90.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_90.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_91.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_91.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_92.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_92.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_93.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_93.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_94.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_94.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_95.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_95.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_96.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_96.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_97.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_97.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_98.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_98.jpg
--------------------------------------------------------------------------------
/playground/data/eval/mmvet_images/v1_99.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pipilurj/bootstrapped-preference-optimization-BPO/ecaf28067e2ddb7ae3b53b7c1dd63fbfff33e89c/playground/data/eval/mmvet_images/v1_99.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=61.0"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "llava"
7 | version = "1.1.3"
8 | description = "Towards GPT-4 like large language and visual assistant."
9 | readme = "README.md"
10 | requires-python = ">=3.8"
11 | classifiers = [
12 | "Programming Language :: Python :: 3",
13 | "License :: OSI Approved :: Apache Software License",
14 | ]
15 | dependencies = [
16 | "torch==2.1.2", "torchvision==0.16.2",
17 | "transformers==4.31.0", "tokenizers>=0.12.1,<0.14", "sentencepiece==0.1.99", "shortuuid",
18 | "accelerate==0.21.0", "peft==0.4.0", "bitsandbytes==0.41.0",
19 | "pydantic<2,>=1", "markdown2[all]", "numpy", "scikit-learn==1.2.2",
20 | "gradio==3.35.2", "gradio_client==0.2.9",
21 | "requests", "httpx==0.24.0", "uvicorn", "fastapi",
22 | "einops==0.6.1", "einops-exts==0.0.4", "timm==0.6.13",
23 | "trl==0.7.2"
24 | ]
25 |
26 | [project.optional-dependencies]
27 | train = ["deepspeed==0.9.5", "ninja", "wandb"]
28 |
29 | [project.urls]
30 | "Homepage" = "https://llava-vl.github.io"
31 | "Bug Tracker" = "https://github.com/haotian-liu/LLaVA/issues"
32 |
33 | [tool.setuptools.packages.find]
34 | exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"]
35 |
36 | [tool.wheel]
37 | exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"]
38 |
--------------------------------------------------------------------------------
/qwen/README.md:
--------------------------------------------------------------------------------
1 | # BPO for Qwen
2 |
3 | Modified code from [[VLFeedback]](https://github.com/vlf-silkie/VLFeedback). Perform BPO on Qwen-VL-Chat using BPO data.
4 |
5 | ### Training data
6 | Download ShareGPT4V from [here](https://huggingface.co/datasets/Lin-Chen/ShareGPT4V)
7 |
8 | Download COCO from [here](https://cocodataset.org/#home)
9 |
10 | Download dataset annotation from [here](https://huggingface.co/datasets/renjiepi/BPO)
11 |
12 | Extract data from ShareGPT4V and organize the images as follows:
13 |
14 | ```
15 | Image_root
16 | ├── coco/
17 | train2017/
18 | ├── llava/
19 | llava_pretrain /
20 | ├── sam/
21 | ├── share_textvqa/
22 | images/
23 | ├── web-celebrity/
24 | images/
25 | ├── web-landmark/
26 | images/
27 | ├── wikiart/
28 | images/
29 | ```
30 |
31 | ### Installation
32 |
33 | To run our training scripts, create a virtual environment and install the dependencies first.
34 |
35 | ```bash
36 | conda create -n silkie python=3.10 && conda activate silkie
37 | pip install -r requirements.txt
38 | ```
39 |
40 | ### Training
41 |
42 | Our training scripts support both single-node and multi-node training.
43 | We provide a `launch_dpo.py` script that handles both cases. If you want to launch a job locally, you can use:
44 |
45 | ```bash
46 | python launch_dpo.py --config dpo_config/example.yaml --working $WORKING_DIR
47 | ```
48 |
49 | If you want to launch a job on a Slurm cluster, specify `GPUS_PER_NODE` in `launch_dpo.py` and run:
50 |
51 | ```bash
52 | python launch_dpo.py --config dpo_config/example.yaml --working $WORKING_DIR --gpus $NUM_GPUS
53 | ```
54 |
55 | ## Citations
56 |
57 | ```bib
58 | @article{2023vlfeedback,
59 | author = {Lei Li and Zhihui Xie and Mukai Li and Shunian Chen and Peiyi Wang and Liang Chen and Yazheng Yang and Benyou Wang and Lingpeng Kong},
60 | title = {Silkie: Preference Distillation for Large Visual Language Models},
61 | publisher = {arXiv:2312.10665},
62 | year = {2023}
63 | }
64 | ```
65 |
66 | ## Acknowledgements
67 |
68 | We would like to thank the authors of [trl](https://github.com/huggingface/trl) and [Qwen-VL](https://github.com/QwenLM/Qwen-VL) for their great work.
69 |
--------------------------------------------------------------------------------
/qwen/dpo_config/example.yaml:
--------------------------------------------------------------------------------
1 | model_name_or_path: "Qwen/Qwen-VL-Chat"
2 | output_dir: null # to be set by the script
3 | bf16: true
4 | fix_vit: true
5 | num_train_epochs: 3
6 | per_device_train_batch_size: 2
7 | per_device_eval_batch_size: 2
8 | gradient_accumulation_steps: 8
9 | evaluation_strategy: "steps"
10 | eval_steps: 500
11 | save_strategy: "steps"
12 | save_steps: 100
13 | save_total_limit: 10
14 | learning_rate: 1e-5
15 | weight_decay: 0.05
16 | adam_beta2: 0.98
17 | warmup_ratio: 0.1
18 | lr_scheduler_type: "cosine"
19 | logging_steps: 10
20 | report_to: wandb
21 | run_name: silkie-paperconfig
22 | model_max_length: 2048
23 | gradient_checkpointing: true
24 | use_lora: true
25 | bf16: true
26 | tf32: true
27 | logging_first_step: true
28 | remove_unused_columns: false
29 |
--------------------------------------------------------------------------------
/qwen/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate==0.23.0
2 | datasets==2.14.6
3 | deepspeed==0.11.0
4 | numpy==1.26.2
5 | peft==0.5.0
6 | PyYAML==6.0.1
7 | submitit==1.5.1
8 | torch==2.0.1
9 | torchvision==0.15.2
10 | transformers==4.32.1
11 | trl==0.7.2
12 | einops
13 | tiktoken
14 | matplotlib
15 | pillow
16 | transformers_stream_generator
17 | wandb
18 |
--------------------------------------------------------------------------------
/scripts/concatenate_json.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import json
3 |
4 | def merge_json(files):
5 | merged_data = {}
6 | for file in files:
7 | with open(file, 'r') as f:
8 | data = json.load(f)
9 | merged_data.update(data)
10 | return merged_data
11 |
12 | if __name__ == "__main__":
13 | files = sys.argv[1:]
14 | merged_data = merge_json(files)
15 | with open('merge.json', 'w') as f:
16 | json.dump(merged_data, f, indent=4)
17 |
--------------------------------------------------------------------------------
/scripts/convert_gqa_for_eval.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import argparse
4 |
5 | parser = argparse.ArgumentParser()
6 | parser.add_argument("--src", type=str)
7 | parser.add_argument("--dst", type=str)
8 | args = parser.parse_args()
9 |
10 | all_answers = []
11 | for line_idx, line in enumerate(open(args.src)):
12 | res = json.loads(line)
13 | question_id = res['question_id']
14 | text = res['text'].rstrip('.').lower()
15 | all_answers.append({"questionId": question_id, "prediction": text})
16 |
17 | with open(args.dst, 'w') as f:
18 | json.dump(all_answers, f)
19 |
--------------------------------------------------------------------------------
/scripts/convert_mmbench_for_submission.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import argparse
4 | import pandas as pd
5 |
6 | def get_args():
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument("--annotation-file", type=str, required=True)
9 | parser.add_argument("--result-dir", type=str, required=True)
10 | parser.add_argument("--upload-dir", type=str, required=True)
11 | parser.add_argument("--experiment", type=str, required=True)
12 |
13 | return parser.parse_args()
14 |
15 | if __name__ == "__main__":
16 | args = get_args()
17 |
18 | df = pd.read_table(args.annotation_file)
19 |
20 | cur_df = df.copy()
21 | cur_df = cur_df.drop(columns=['hint', 'category', 'source', 'image', 'comment', 'l2-category'])
22 | cur_df.insert(6, 'prediction', None)
23 | for pred in open(os.path.join(args.result_dir, f"{args.experiment}.jsonl")):
24 | pred = json.loads(pred)
25 | cur_df.loc[df['index'] == pred['question_id'], 'prediction'] = pred['text']
26 |
27 | cur_df.to_excel(os.path.join(args.upload_dir, f"{args.experiment}.xlsx"), index=False, engine='openpyxl')
28 |
--------------------------------------------------------------------------------
/scripts/convert_mmvet_for_eval.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import argparse
4 |
5 | parser = argparse.ArgumentParser()
6 | parser.add_argument("--src", type=str)
7 | parser.add_argument("--dst", type=str)
8 | args = parser.parse_args()
9 |
10 | cur_result = {}
11 |
12 | for line in open(args.src):
13 | data = json.loads(line)
14 | qid = data['question_id']
15 | # cur_result[f'v1_{qid}'] = data['text']
16 | cur_result[qid] = data['text']
17 |
18 | with open(args.dst, 'w') as f:
19 | json.dump(cur_result, f, indent=2)
20 |
--------------------------------------------------------------------------------
/scripts/convert_seed_for_submission.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import argparse
4 |
5 |
6 | def get_args():
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument("--annotation-file", type=str)
9 | parser.add_argument("--result-file", type=str)
10 | parser.add_argument("--result-upload-file", type=str)
11 | return parser.parse_args()
12 |
13 |
14 | def eval_single(result_file, eval_only_type=None):
15 | results = {}
16 | for line in open(result_file):
17 | row = json.loads(line)
18 | results[row['question_id']] = row
19 |
20 | type_counts = {}
21 | correct_counts = {}
22 | for question_data in data['questions']:
23 | if eval_only_type is not None and question_data['data_type'] != eval_only_type: continue
24 | data_type = question_data['question_type_id']
25 | type_counts[data_type] = type_counts.get(data_type, 0) + 1
26 | try:
27 | question_id = int(question_data['question_id'])
28 | except:
29 | question_id = question_data['question_id']
30 | if question_id not in results:
31 | correct_counts[data_type] = correct_counts.get(data_type, 0)
32 | continue
33 | row = results[question_id]
34 | if row['text'] == question_data['answer']:
35 | correct_counts[data_type] = correct_counts.get(data_type, 0) + 1
36 |
37 | total_count = 0
38 | total_correct = 0
39 | for data_type in sorted(type_counts.keys()):
40 | accuracy = correct_counts[data_type] / type_counts[data_type] * 100
41 | if eval_only_type is None:
42 | print(f"{ques_type_id_to_name[data_type]}: {accuracy:.2f}%")
43 |
44 | total_count += type_counts[data_type]
45 | total_correct += correct_counts[data_type]
46 |
47 | total_accuracy = total_correct / total_count * 100
48 | if eval_only_type is None:
49 | print(f"Total accuracy: {total_accuracy:.2f}%")
50 | else:
51 | print(f"{eval_only_type} accuracy: {total_accuracy:.2f}%")
52 |
53 | return results
54 |
55 | if __name__ == "__main__":
56 | args = get_args()
57 | data = json.load(open(args.annotation_file))
58 | ques_type_id_to_name = {id:n for n,id in data['question_type'].items()}
59 |
60 | results = eval_single(args.result_file)
61 | eval_single(args.result_file, eval_only_type='image')
62 | eval_single(args.result_file, eval_only_type='video')
63 |
64 | with open(args.result_upload_file, 'w') as fp:
65 | for question in data['questions']:
66 | qid = question['question_id']
67 | if qid in results:
68 | result = results[qid]
69 | else:
70 | result = results[int(qid)]
71 | fp.write(json.dumps({
72 | 'question_id': qid,
73 | 'prediction': result['text']
74 | }) + '\n')
75 |
--------------------------------------------------------------------------------
/scripts/convert_sqa_to_llava.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import fire
4 | import re
5 | from convert_sqa_to_llava_base_prompt import build_prompt_chatbot
6 |
7 |
8 | def convert_to_llava(base_dir, split, prompt_format="QCM-LEA"):
9 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split]
10 | problems = json.load(open(os.path.join(base_dir, "problems.json")))
11 |
12 | split_problems = build_prompt_chatbot(
13 | problems, split_indices, prompt_format,
14 | use_caption=False, is_test=False)
15 |
16 | target_format = []
17 | for prob_id, (input, output) in split_problems.items():
18 | if input.startswith('Question: '):
19 | input = input.replace('Question: ', '')
20 | if output.startswith('Answer: '):
21 | output = output.replace('Answer: ', '')
22 |
23 | raw_prob_data = problems[prob_id]
24 | if raw_prob_data['image'] is None:
25 | target_format.append({
26 | "id": prob_id,
27 | "conversations": [
28 | {'from': 'human', 'value': f"{input}"},
29 | {'from': 'gpt', 'value': f"{output}"},
30 | ],
31 | })
32 |
33 | else:
34 | target_format.append({
35 | "id": prob_id,
36 | "image": os.path.join(prob_id, raw_prob_data['image']),
37 | "conversations": [
38 | {'from': 'human', 'value': f"{input}\n"},
39 | {'from': 'gpt', 'value': f"{output}"},
40 | ],
41 | })
42 |
43 | print(f'Number of samples: {len(target_format)}')
44 |
45 | with open(os.path.join(base_dir, f"llava_{split}_{prompt_format}.json"), "w") as f:
46 | json.dump(target_format, f, indent=2)
47 |
48 |
49 | def convert_to_jsonl(base_dir, split, prompt_format="QCM-LEPA"):
50 | split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split]
51 | problems = json.load(open(os.path.join(base_dir, "problems.json")))
52 |
53 | split_problems = build_prompt_chatbot(
54 | problems, split_indices, prompt_format,
55 | use_caption=False, is_test=False)
56 |
57 | writer = open(os.path.join(base_dir, f"scienceqa_{split}_{prompt_format}.jsonl"), "w")
58 | for prob_id, (input, output) in split_problems.items():
59 | if input.startswith('Question: '):
60 | input = input.replace('Question: ', '')
61 | if output.startswith('Answer: '):
62 | output = output.replace('Answer: ', '')
63 |
64 | raw_prob_data = problems[prob_id]
65 | if raw_prob_data['image'] is None:
66 | data = {
67 | "id": prob_id,
68 | "instruction": f"{input}",
69 | "output": f"{output}",
70 | }
71 |
72 | else:
73 | data = {
74 | "id": prob_id,
75 | "image": os.path.join(prob_id, raw_prob_data['image']),
76 | "instruction": f"{input}\n",
77 | "output": f"{output}",
78 | }
79 | writer.write(json.dumps(data) + '\n')
80 | writer.close()
81 |
82 |
83 | def main(task, **kwargs):
84 | globals()[task](**kwargs)
85 |
86 |
87 | if __name__ == "__main__":
88 | fire.Fire(main)
89 |
--------------------------------------------------------------------------------
/scripts/convert_vizwiz_for_submission.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import json
4 |
5 | from llava.eval.m4c_evaluator import EvalAIAnswerProcessor
6 |
7 |
8 | def parse_args():
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument('--annotation-file', type=str, required=True)
11 | parser.add_argument('--result-file', type=str, required=True)
12 | parser.add_argument('--result-upload-file', type=str, required=True)
13 | return parser.parse_args()
14 |
15 |
16 | if __name__ == '__main__':
17 |
18 | args = parse_args()
19 |
20 | os.makedirs(os.path.dirname(args.result_upload_file), exist_ok=True)
21 |
22 | results = []
23 | error_line = 0
24 | for line_idx, line in enumerate(open(args.result_file)):
25 | try:
26 | results.append(json.loads(line))
27 | except:
28 | error_line += 1
29 | results = {x['question_id']: x['text'] for x in results}
30 | test_split = [json.loads(line) for line in open(args.annotation_file)]
31 | split_ids = set([x['question_id'] for x in test_split])
32 |
33 | print(f'total results: {len(results)}, total split: {len(test_split)}, error_line: {error_line}')
34 |
35 | all_answers = []
36 |
37 | answer_processor = EvalAIAnswerProcessor()
38 |
39 | for x in test_split:
40 | assert x['question_id'] in results
41 | all_answers.append({
42 | 'image': x['image'],
43 | 'answer': answer_processor(results[x['question_id']])
44 | })
45 |
46 | with open(args.result_upload_file, 'w') as f:
47 | json.dump(all_answers, f)
48 |
--------------------------------------------------------------------------------
/scripts/convert_vqav2_for_submission.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | import json
4 |
5 | from llava.eval.m4c_evaluator import EvalAIAnswerProcessor
6 |
7 |
8 | def parse_args():
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument('--dir', type=str, default="./playground/data/eval/vqav2")
11 | parser.add_argument('--ckpt', type=str, required=True)
12 | parser.add_argument('--split', type=str, required=True)
13 | return parser.parse_args()
14 |
15 |
16 | if __name__ == '__main__':
17 |
18 | args = parse_args()
19 |
20 | src = os.path.join(args.dir, 'answers', args.split, args.ckpt, 'merge.jsonl')
21 | test_split = os.path.join(args.dir, 'llava_vqav2_mscoco_test2015.jsonl')
22 | dst = os.path.join(args.dir, 'answers_upload', args.split, f'{args.ckpt}.json')
23 | os.makedirs(os.path.dirname(dst), exist_ok=True)
24 |
25 | results = []
26 | error_line = 0
27 | for line_idx, line in enumerate(open(src)):
28 | try:
29 | results.append(json.loads(line))
30 | except:
31 | error_line += 1
32 |
33 | results = {x['question_id']: x['text'] for x in results}
34 | test_split = [json.loads(line) for line in open(test_split)]
35 | split_ids = set([x['question_id'] for x in test_split])
36 |
37 | print(f'total results: {len(results)}, total split: {len(test_split)}, error_line: {error_line}')
38 |
39 | all_answers = []
40 |
41 | answer_processor = EvalAIAnswerProcessor()
42 |
43 | for x in test_split:
44 | if x['question_id'] not in results:
45 | all_answers.append({
46 | 'question_id': x['question_id'],
47 | 'answer': ''
48 | })
49 | else:
50 | all_answers.append({
51 | 'question_id': x['question_id'],
52 | 'answer': answer_processor(results[x['question_id']])
53 | })
54 |
55 | with open(dst, 'w') as f:
56 | json.dump(all_answers, open(dst, 'w'))
57 |
--------------------------------------------------------------------------------
/scripts/eval_mmvet.h:
--------------------------------------------------------------------------------
1 | bash scripts/v1_5/eval/eval_multi_lora.sh ../pretrained_weights/llava1.5_7b ./checkpoints/dpo/llava1.5_7b-lora32-lr2e-6-shargpt4_3w_llava_3w_coco_3w-1e playground/data/eval/mm-vet.jsonl results/mmvet/llava1.5_7b-lora32-lr2e-6-shargpt4_3w_llava_3w_coco_3w-1e playground/data/eval/mmvet_images 8 0 0
2 | bash scripts/v1_5/eval/eval_multi_lora.sh ../pretrained_weights/llava1.5_7b ./checkpoints/dpo/llava1.5_7b-lora32-lr2e-6-shargpt4_4w_llava_4w_coco_4w-1e playground/data/eval/mm-vet.jsonl results/mmvet/llava1.5_7b-lora32-lr2e-6-shargpt4_4w_llava_4w_coco_4w-1e playground/data/eval/mmvet_images 8 0 0
3 | bash scripts/v1_5/eval/eval_multi_lora.sh ../pretrained_weights/llava1.5_7b ./checkpoints/dpo/llava1.5_7b-lora32-lr2e-6-shargpt4_5w_llava_5w_coco_5w-1e playground/data/eval/mm-vet.jsonl results/mmvet/llava1.5_7b-lora32-lr2e-6-shargpt4_5w_llava_5w_coco_5w-1e playground/data/eval/mmvet_images 8 0 0
4 | bash scripts/v1_5/eval/eval_multi_lora.sh ../pretrained_weights/llava1.5_7b ./checkpoints/dpo/llava1.5_7b-lora32-lr2e-6-shargpt4_6w_llava_6w_coco_6w-1e playground/data/eval/mm-vet.jsonl results/mmvet/llava1.5_7b-lora32-lr2e-6-shargpt4_6w_llava_6w_coco_6w-1e playground/data/eval/mmvet_images 8 0 0
5 |
--------------------------------------------------------------------------------
/scripts/extract_mm_projector.py:
--------------------------------------------------------------------------------
1 | """
2 | This is just a utility that I use to extract the projector for quantized models.
3 | It is NOT necessary at all to train, or run inference/serve demos.
4 | Use this script ONLY if you fully understand its implications.
5 | """
6 |
7 |
8 | import os
9 | import argparse
10 | import torch
11 | import json
12 | from collections import defaultdict
13 |
14 |
15 | def parse_args():
16 | parser = argparse.ArgumentParser(description='Extract MMProjector weights')
17 | parser.add_argument('--model-path', type=str, help='model folder')
18 | parser.add_argument('--output', type=str, help='output file')
19 | args = parser.parse_args()
20 | return args
21 |
22 |
23 | if __name__ == '__main__':
24 | args = parse_args()
25 |
26 | keys_to_match = ['mm_projector']
27 | ckpt_to_key = defaultdict(list)
28 | try:
29 | model_indices = json.load(open(os.path.join(args.model_path, 'pytorch_model.bin.index.json')))
30 | for k, v in model_indices['weight_map'].items():
31 | if any(key_match in k for key_match in keys_to_match):
32 | ckpt_to_key[v].append(k)
33 | except FileNotFoundError:
34 | # Smaller models or model checkpoints saved by DeepSpeed.
35 | v = 'pytorch_model.bin'
36 | for k in torch.load(os.path.join(args.model_path, v), map_location='cpu').keys():
37 | if any(key_match in k for key_match in keys_to_match):
38 | ckpt_to_key[v].append(k)
39 |
40 | loaded_weights = {}
41 |
42 | for ckpt_name, weight_keys in ckpt_to_key.items():
43 | ckpt = torch.load(os.path.join(args.model_path, ckpt_name), map_location='cpu')
44 | for k in weight_keys:
45 | loaded_weights[k] = ckpt[k]
46 |
47 | torch.save(loaded_weights, args.output)
48 |
--------------------------------------------------------------------------------
/scripts/finetune.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5!
4 |
5 | # Uncomment and set the following variables correspondingly to run this script:
6 |
7 | ################## VICUNA ##################
8 | # PROMPT_VERSION=v1
9 | # MODEL_VERSION="vicuna-v1-3-7b"
10 | ################## VICUNA ##################
11 |
12 | ################## LLaMA-2 ##################
13 | # PROMPT_VERSION="llava_llama_2"
14 | # MODEL_VERSION="llama-2-7b-chat"
15 | ################## LLaMA-2 ##################
16 |
17 | deepspeed llava/train/train_mem.py \
18 | --deepspeed ./scripts/zero2.json \
19 | --model_name_or_path ./checkpoints/$MODEL_VERSION \
20 | --version $PROMPT_VERSION \
21 | --data_path ./playground/data/llava_instruct_80k.json \
22 | --image_folder /path/to/coco/train2017 \
23 | --vision_tower openai/clip-vit-large-patch14 \
24 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \
25 | --mm_vision_select_layer -2 \
26 | --mm_use_im_start_end False \
27 | --mm_use_im_patch_token False \
28 | --bf16 True \
29 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune \
30 | --num_train_epochs 1 \
31 | --per_device_train_batch_size 16 \
32 | --per_device_eval_batch_size 4 \
33 | --gradient_accumulation_steps 1 \
34 | --evaluation_strategy "no" \
35 | --save_strategy "steps" \
36 | --save_steps 50000 \
37 | --save_total_limit 1 \
38 | --learning_rate 2e-5 \
39 | --weight_decay 0. \
40 | --warmup_ratio 0.03 \
41 | --lr_scheduler_type "cosine" \
42 | --logging_steps 1 \
43 | --tf32 True \
44 | --model_max_length 2048 \
45 | --gradient_checkpointing True \
46 | --dataloader_num_workers 4 \
47 | --lazy_preprocess True \
48 | --report_to wandb
49 |
--------------------------------------------------------------------------------
/scripts/finetune_bpo.sh:
--------------------------------------------------------------------------------
1 | deepspeed llava/train/bpo_llava.py \
2 | --mm_projector_lr 2e-6 \
3 | --mm_projector_type mlp2x_gelu \
4 | --learning_rate 2e-6 \
5 | --deepspeed ./scripts/zero2.json \
6 | --lora_enable True \
7 | --lora_r 32 \
8 | --lora_alpha 256 \
9 | --model_name_or_path path-to-model \
10 | --version v1 \
11 | --data_path path-to-json-annotation-file \
12 | --image_folder path-to-image-folder \
13 | --vision_tower openai/clip-vit-large-patch14 \
14 | --mm_vision_select_layer -2 \
15 | --mm_use_im_start_end False \
16 | --mm_use_im_patch_token False \
17 | --bf16 True \
18 | --output_dir path-to-output \
19 | --num_train_epochs 2 \
20 | --per_device_train_batch_size 4 \
21 | --per_device_eval_batch_size 4 \
22 | --gradient_accumulation_steps 1 \
23 | --evaluation_strategy "no" \
24 | --save_strategy "no" \
25 | --save_steps 5000 \
26 | --save_total_limit 1 \
27 | --weight_decay 0. \
28 | --warmup_ratio 0.03 \
29 | --lr_scheduler_type "cosine" \
30 | --logging_steps 1 \
31 | --tf32 True \
32 | --model_max_length 2048 \
33 | --gradient_checkpointing True \
34 | --dataloader_num_workers 4 \
35 | --lazy_preprocess True \
36 | --lora_enable
37 |
38 | bash scripts/v1_5/eval/eval_multi_lora.sh path-to-model path-to-lora playground/data/eval/mm-vet.jsonl path-to-result path-to-images gpu-num temperature start_gpu
39 | python scripts/convert_mmvet_for_eval.py --src path-to-result-jsonl --dst path-to-result-json
40 |
--------------------------------------------------------------------------------
/scripts/finetune_bpo_flash.sh:
--------------------------------------------------------------------------------
1 | deepspeed llava/train/bpo_llava_flash.py \
2 | --mm_projector_lr 2e-6 \
3 | --mm_projector_type mlp2x_gelu \
4 | --learning_rate 2e-6 \
5 | --deepspeed ./scripts/zero2.json \
6 | --lora_enable True \
7 | --lora_r 32 \
8 | --lora_alpha 256 \
9 | --model_name_or_path path-to-model \
10 | --version v1 \
11 | --data_path path-to-json-annotation-file \
12 | --image_folder path-to-image-folder \
13 | --vision_tower openai/clip-vit-large-patch14 \
14 | --mm_vision_select_layer -2 \
15 | --mm_use_im_start_end False \
16 | --mm_use_im_patch_token False \
17 | --bf16 True \
18 | --output_dir path-to-output \
19 | --num_train_epochs 2 \
20 | --per_device_train_batch_size 4 \
21 | --per_device_eval_batch_size 4 \
22 | --gradient_accumulation_steps 1 \
23 | --evaluation_strategy "no" \
24 | --save_strategy "no" \
25 | --save_steps 5000 \
26 | --save_total_limit 1 \
27 | --weight_decay 0. \
28 | --warmup_ratio 0.03 \
29 | --lr_scheduler_type "cosine" \
30 | --logging_steps 1 \
31 | --tf32 True \
32 | --model_max_length 2048 \
33 | --gradient_checkpointing True \
34 | --dataloader_num_workers 4 \
35 | --lazy_preprocess True \
36 | --lora_enable
37 |
38 | bash scripts/v1_5/eval/eval_multi_lora.sh path-to-model path-to-lora playground/data/eval/mm-vet.jsonl path-to-result path-to-images gpu-num temperature start_gpu
39 | python scripts/convert_mmvet_for_eval.py --src path-to-result-jsonl --dst path-to-result-json
40 |
--------------------------------------------------------------------------------
/scripts/finetune_full_schedule.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5!
4 |
5 | # Uncomment and set the following variables correspondingly to run this script:
6 |
7 | ################## VICUNA ##################
8 | # PROMPT_VERSION=v1
9 | # MODEL_VERSION="vicuna-v1-3-7b"
10 | ################## VICUNA ##################
11 |
12 | ################## LLaMA-2 ##################
13 | # PROMPT_VERSION="llava_llama_2"
14 | # MODEL_VERSION="llama-2-7b-chat"
15 | ################## LLaMA-2 ##################
16 |
17 | deepspeed llava/train/train_mem.py \
18 | --deepspeed ./scripts/zero2.json \
19 | --model_name_or_path ./checkpoints/$MODEL_VERSION \
20 | --version $PROMPT_VERSION \
21 | --data_path ./playground/data/llava_instruct_158k.json \
22 | --image_folder /path/to/coco/train2017 \
23 | --vision_tower openai/clip-vit-large-patch14 \
24 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \
25 | --mm_vision_select_layer -2 \
26 | --mm_use_im_start_end False \
27 | --mm_use_im_patch_token False \
28 | --bf16 True \
29 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune \
30 | --num_train_epochs 3 \
31 | --per_device_train_batch_size 16 \
32 | --per_device_eval_batch_size 4 \
33 | --gradient_accumulation_steps 1 \
34 | --evaluation_strategy "no" \
35 | --save_strategy "steps" \
36 | --save_steps 50000 \
37 | --save_total_limit 1 \
38 | --learning_rate 2e-5 \
39 | --weight_decay 0. \
40 | --warmup_ratio 0.03 \
41 | --lr_scheduler_type "cosine" \
42 | --logging_steps 1 \
43 | --tf32 True \
44 | --model_max_length 2048 \
45 | --gradient_checkpointing True \
46 | --dataloader_num_workers 4 \
47 | --lazy_preprocess True \
48 | --report_to wandb
49 |
--------------------------------------------------------------------------------
/scripts/finetune_lora.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5!
4 |
5 | # Uncomment and set the following variables correspondingly to run this script:
6 |
7 | ################## VICUNA ##################
8 | # PROMPT_VERSION=v1
9 | # MODEL_VERSION="vicuna-v1-3-7b"
10 | ################## VICUNA ##################
11 |
12 | ################## LLaMA-2 ##################
13 | # PROMPT_VERSION="llava_llama_2"
14 | # MODEL_VERSION="llama-2-7b-chat"
15 | ################## LLaMA-2 ##################
16 |
17 | deepspeed llava/train/train_mem.py \
18 | --deepspeed ./scripts/zero2.json \
19 | --lora_enable True \
20 | --model_name_or_path ./checkpoints/$MODEL_VERSION \
21 | --version $PROMPT_VERSION \
22 | --data_path ./playground/data/llava_instruct_80k.json \
23 | --image_folder /path/to/coco/train2017 \
24 | --vision_tower openai/clip-vit-large-patch14 \
25 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \
26 | --mm_vision_select_layer -2 \
27 | --mm_use_im_start_end False \
28 | --mm_use_im_patch_token False \
29 | --bf16 True \
30 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune_lora \
31 | --num_train_epochs 1 \
32 | --per_device_train_batch_size 16 \
33 | --per_device_eval_batch_size 4 \
34 | --gradient_accumulation_steps 1 \
35 | --evaluation_strategy "no" \
36 | --save_strategy "steps" \
37 | --save_steps 50000 \
38 | --save_total_limit 1 \
39 | --learning_rate 2e-5 \
40 | --weight_decay 0. \
41 | --warmup_ratio 0.03 \
42 | --lr_scheduler_type "cosine" \
43 | --logging_steps 1 \
44 | --tf32 True \
45 | --model_max_length 2048 \
46 | --gradient_checkpointing True \
47 | --lazy_preprocess True \
48 | --dataloader_num_workers 4 \
49 | --report_to wandb
50 |
--------------------------------------------------------------------------------
/scripts/finetune_qlora.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5!
4 |
5 | # Uncomment and set the following variables correspondingly to run this script:
6 |
7 | ################## VICUNA ##################
8 | # PROMPT_VERSION=v1
9 | # MODEL_VERSION="vicuna-v1-3-7b"
10 | ################## VICUNA ##################
11 |
12 | ################## LLaMA-2 ##################
13 | # PROMPT_VERSION="llava_llama_2"
14 | # MODEL_VERSION="llama-2-7b-chat"
15 | ################## LLaMA-2 ##################
16 |
17 | deepspeed llava/train/train_mem.py \
18 | --deepspeed ./scripts/zero2.json \
19 | --lora_enable True \
20 | --bits 4 \
21 | --model_name_or_path ./checkpoints/$MODEL_VERSION \
22 | --version $PROMPT_VERSION \
23 | --data_path ./playground/data/llava_instruct_80k.json \
24 | --image_folder /path/to/coco/train2017 \
25 | --vision_tower openai/clip-vit-large-patch14 \
26 | --pretrain_mm_mlp_adapter ./checkpoints/llava-$MODEL_VERSION-pretrain/mm_projector.bin \
27 | --mm_vision_select_layer -2 \
28 | --mm_use_im_start_end False \
29 | --mm_use_im_patch_token False \
30 | --bf16 True \
31 | --output_dir ./checkpoints/llava-$MODEL_VERSION-finetune_lora \
32 | --num_train_epochs 1 \
33 | --per_device_train_batch_size 16 \
34 | --per_device_eval_batch_size 4 \
35 | --gradient_accumulation_steps 1 \
36 | --evaluation_strategy "no" \
37 | --save_strategy "steps" \
38 | --save_steps 50000 \
39 | --save_total_limit 1 \
40 | --learning_rate 2e-5 \
41 | --weight_decay 0. \
42 | --warmup_ratio 0.03 \
43 | --lr_scheduler_type "cosine" \
44 | --logging_steps 1 \
45 | --tf32 True \
46 | --model_max_length 2048 \
47 | --gradient_checkpointing True \
48 | --lazy_preprocess True \
49 | --dataloader_num_workers 4 \
50 | --report_to wandb
51 |
--------------------------------------------------------------------------------
/scripts/finetune_sft.sh:
--------------------------------------------------------------------------------
1 | deepspeed --include=localhost:0,1,2,3,4,5,6,7 llava/train/train.py --mm_projector_lr 2e-6 --mm_projector_type mlp2x_gelu --learning_rate 2e-6 --deepspeed ./scripts/zero2.json --lora_enable True --lora_r 32 --lora_alpha 256 --model_name_or_path ../pretrained_weights/llava1.5_7b --version v1 --data_path playground/data/train/dpo/sft/vicuna1.3_shargpt4_1w_llavar_whole_coco3w.json --image_folder ../data/sharegpt4v/images --vision_tower openai/clip-vit-large-patch14 --pretrain_mm_mlp_adapter ../pretrained_weights/llava1.5_7b/mm_projector.bin --mm_vision_select_layer -2 --mm_use_im_start_end False --mm_use_im_patch_token False --bf16 True --output_dir ./checkpoints/dpo/sft/llava1.5_7b-lora32-lr2e-6-shargpt4_1w_llavar_whole_coco3w-1e --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 1 --evaluation_strategy "no" --save_strategy "no" --save_steps 50000000 --save_total_limit 1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type "cosine" --logging_steps 1 --tf32 True --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 4 --lazy_preprocess True --lora_enable
2 | deepspeed --include=localhost:0,1,2,3,4,5,6,7 llava/train/train.py --mm_projector_lr 1e-5 --mm_projector_type mlp2x_gelu --learning_rate 1e-5 --deepspeed ./scripts/zero2.json --lora_enable True --lora_r 32 --lora_alpha 256 --model_name_or_path ../pretrained_weights/llava1.5_7b --version v1 --data_path playground/data/train/dpo/sft/vicuna1.3_shargpt4_1w_llavar_whole_coco3w.json --image_folder ../data/sharegpt4v/images --vision_tower openai/clip-vit-large-patch14 --pretrain_mm_mlp_adapter ../pretrained_weights/llava1.5_7b/mm_projector.bin --mm_vision_select_layer -2 --mm_use_im_start_end False --mm_use_im_patch_token False --bf16 True --output_dir ./checkpoints/dpo/sft/llava1.5_7b-lora32-lr1e-5-shargpt4_1w_llavar_whole_coco3w-1e --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 1 --evaluation_strategy "no" --save_strategy "no" --save_steps 50000000 --save_total_limit 1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type "cosine" --logging_steps 1 --tf32 True --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 4 --lazy_preprocess True --lora_enable
3 |
--------------------------------------------------------------------------------
/scripts/finetune_sqa.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5!
4 |
5 | deepspeed llava/train/train_mem.py \
6 | --deepspeed ./scripts/zero2.json \
7 | --model_name_or_path lmsys/vicuna-13b-v1.3 \
8 | --version $PROMPT_VERSION \
9 | --data_path /Data/ScienceQA/data/scienceqa/llava_train_QCM-LEA.json \
10 | --image_folder /Data/ScienceQA/data/scienceqa/images/train \
11 | --vision_tower openai/clip-vit-large-patch14 \
12 | --pretrain_mm_mlp_adapter ./checkpoints/huggingface/liuhaotian/llava-pretrain-vicuna-13b-v1.3/mm_projector.bin \
13 | --mm_vision_select_layer -2 \
14 | --mm_use_im_start_end False \
15 | --mm_use_im_patch_token False \
16 | --bf16 True \
17 | --output_dir ./checkpoints/llava-vicuna-13b-v1.3-pretrain_lcs558k_plain-ScienceQA_QCM_LEA-12e \
18 | --num_train_epochs 12 \
19 | --per_device_train_batch_size 16 \
20 | --per_device_eval_batch_size 4 \
21 | --gradient_accumulation_steps 1 \
22 | --evaluation_strategy "no" \
23 | --save_strategy "steps" \
24 | --save_steps 50000 \
25 | --save_total_limit 1 \
26 | --learning_rate 2e-5 \
27 | --weight_decay 0. \
28 | --warmup_ratio 0.03 \
29 | --lr_scheduler_type "cosine" \
30 | --logging_steps 1 \
31 | --tf32 True \
32 | --model_max_length 2048 \
33 | --gradient_checkpointing True \
34 | --dataloader_num_workers 4 \
35 | --lazy_preprocess True \
36 | --report_to wandb
37 |
--------------------------------------------------------------------------------
/scripts/merge_lora_weights.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from llava.model.builder import load_pretrained_model
3 | from llava.mm_utils import get_model_name_from_path
4 |
5 |
6 | def merge_lora(args):
7 | model_name = get_model_name_from_path(args.model_path)
8 | tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, device_map='cpu')
9 |
10 | model.save_pretrained(args.save_model_path)
11 | tokenizer.save_pretrained(args.save_model_path)
12 |
13 |
14 | if __name__ == "__main__":
15 | parser = argparse.ArgumentParser()
16 | parser.add_argument("--model-path", type=str, required=True)
17 | parser.add_argument("--model-base", type=str, required=True)
18 | parser.add_argument("--save-model-path", type=str, required=True)
19 |
20 | args = parser.parse_args()
21 |
22 | merge_lora(args)
23 |
--------------------------------------------------------------------------------
/scripts/merge_lora_weights_reward.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from llava.model.builder import load_pretrained_model, load_reward_model
3 | from llava.mm_utils import get_model_name_from_path
4 |
5 |
6 | def merge_lora(args):
7 | model_name = get_model_name_from_path(args.model_path)
8 | tokenizer, model, image_processor, context_len = load_reward_model(args.model_path, args.model_base, model_name, device_map='cpu')
9 |
10 | model.save_pretrained(args.save_model_path)
11 | tokenizer.save_pretrained(args.save_model_path)
12 |
13 |
14 | if __name__ == "__main__":
15 | parser = argparse.ArgumentParser()
16 | parser.add_argument("--model-path", type=str, required=True)
17 | parser.add_argument("--model-base", type=str, required=True)
18 | parser.add_argument("--save-model-path", type=str, required=True)
19 |
20 | args = parser.parse_args()
21 |
22 | merge_lora(args)
23 |
--------------------------------------------------------------------------------
/scripts/pretrain.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # IMPORTANT: this is the training script for the original LLaVA, NOT FOR LLaVA V1.5!
4 |
5 | # Uncomment and set the following variables correspondingly to run this script:
6 |
7 | # MODEL_VERSION=vicuna-v1-3-7b
8 | # MODEL_VERSION=llama-2-7b-chat
9 |
10 | ########### DO NOT CHANGE ###########
11 | ########### USE THIS FOR BOTH ###########
12 | PROMPT_VERSION=plain
13 | ########### DO NOT CHANGE ###########
14 |
15 | deepspeed llava/train/train_mem.py \
16 | --deepspeed ./scripts/zero2.json \
17 | --model_name_or_path ./checkpoints/$MODEL_VERSION \
18 | --version $PROMPT_VERSION \
19 | --data_path /path/to/pretrain_data.json \
20 | --image_folder /path/to/images \
21 | --vision_tower openai/clip-vit-large-patch14 \
22 | --tune_mm_mlp_adapter True \
23 | --mm_vision_select_layer -2 \
24 | --mm_use_im_start_end False \
25 | --mm_use_im_patch_token False \
26 | --bf16 True \
27 | --output_dir ./checkpoints/llava-$MODEL_VERSION-pretrain \
28 | --num_train_epochs 1 \
29 | --per_device_train_batch_size 16 \
30 | --per_device_eval_batch_size 4 \
31 | --gradient_accumulation_steps 1 \
32 | --evaluation_strategy "no" \
33 | --save_strategy "steps" \
34 | --save_steps 24000 \
35 | --save_total_limit 1 \
36 | --learning_rate 2e-3 \
37 | --weight_decay 0. \
38 | --warmup_ratio 0.03 \
39 | --lr_scheduler_type "cosine" \
40 | --logging_steps 1 \
41 | --tf32 True \
42 | --model_max_length 2048 \
43 | --gradient_checkpointing True \
44 | --dataloader_num_workers 4 \
45 | --lazy_preprocess True \
46 | --report_to wandb
47 |
--------------------------------------------------------------------------------
/scripts/pretrain_xformers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Uncomment and set the following variables correspondingly to run this script:
4 |
5 | # MODEL_VERSION=vicuna-v1-3-7b
6 | # MODEL_VERSION=llama-2-7b-chat
7 |
8 | ########### DO NOT CHANGE ###########
9 | ########### USE THIS FOR BOTH ###########
10 | PROMPT_VERSION=plain
11 | ########### DO NOT CHANGE ###########
12 |
13 | deepspeed llava/train/train_xformers.py \
14 | --deepspeed ./scripts/zero2.json \
15 | --model_name_or_path ./checkpoints/$MODEL_VERSION \
16 | --version $PROMPT_VERSION \
17 | --data_path /path/to/pretrain_data.json \
18 | --image_folder /path/to/images \
19 | --vision_tower openai/clip-vit-large-patch14 \
20 | --tune_mm_mlp_adapter True \
21 | --mm_vision_select_layer -2 \
22 | --mm_use_im_start_end False \
23 | --mm_use_im_patch_token False \
24 | --bf16 False \
25 | --output_dir ./checkpoints/llava-$MODEL_VERSION-pretrain \
26 | --num_train_epochs 1 \
27 | --per_device_train_batch_size 4 \
28 | --per_device_eval_batch_size 4 \
29 | --gradient_accumulation_steps 4 \
30 | --evaluation_strategy "no" \
31 | --save_strategy "steps" \
32 | --save_steps 24000 \
33 | --save_total_limit 1 \
34 | --learning_rate 2e-3 \
35 | --weight_decay 0. \
36 | --warmup_ratio 0.03 \
37 | --lr_scheduler_type "cosine" \
38 | --logging_steps 1 \
39 | --tf32 False \
40 | --model_max_length 2048 \
41 | --gradient_checkpointing True \
42 | --dataloader_num_workers 4 \
43 | --lazy_preprocess True \
44 | --report_to wandb
45 |
--------------------------------------------------------------------------------
/scripts/sqa_eval_batch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CHUNKS=8
4 | for IDX in {0..7}; do
5 | CUDA_VISIBLE_DEVICES=$IDX python -m llava.eval.model_vqa_science \
6 | --model-path liuhaotian/llava-lcs558k-scienceqa-vicuna-13b-v1.3 \
7 | --question-file ~/haotian/datasets/ScienceQA/data/scienceqa/llava_test_QCM-LEA.json \
8 | --image-folder ~/haotian/datasets/ScienceQA/data/scienceqa/images/test \
9 | --answers-file ./test_llava-13b-chunk$CHUNKS_$IDX.jsonl \
10 | --num-chunks $CHUNKS \
11 | --chunk-idx $IDX \
12 | --conv-mode llava_v1 &
13 | done
14 |
--------------------------------------------------------------------------------
/scripts/sqa_eval_gather.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CHUNKS=8
4 | output_file="test_llava-13b.jsonl"
5 |
6 | # Clear out the output file if it exists.
7 | > "$output_file"
8 |
9 | # Loop through the indices and concatenate each file.
10 | for idx in $(seq 0 $((CHUNKS-1))); do
11 | cat "./test_llava-13b-chunk${idx}.jsonl" >> "$output_file"
12 | done
13 |
14 | python llava/eval/eval_science_qa.py \
15 | --base-dir ~/haotian/datasets/ScienceQA/data/scienceqa \
16 | --result-file ./test_llava-13b.jsonl \
17 | --output-file ./test_llava-13b_output.json \
18 | --output-result ./test_llava-13b_result.json
19 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/bootstrap_dpo_multi.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Check if three arguments are passed
4 | if [ "$#" -ne 10 ]; then
5 | echo "Usage: $0 "
6 | exit 1
7 | fi
8 |
9 | # Assign the command line arguments to variables
10 | model_base=$1
11 | model_path=$2
12 | question_path=$3
13 | base_answer_path=$4
14 | image_folder=$5
15 | num_samples=$6
16 | subset=$7
17 | N=$8
18 | temperature=$9
19 | GS=${10}
20 |
21 | # Loop over each chunk/process
22 | for (( chunk_id=0; chunk_id "${base_answer_path}_merged.jsonl"
47 | for ((i=0; i> "${base_answer_path}_merged.jsonl"
50 | done
51 | # remove the unmerged files
52 | for (( chunk_id=0; chunk_id "
6 | exit 1
7 | fi
8 |
9 | # Assign the command line arguments to variables
10 | model_base=$1
11 | model_path=$2
12 | question_path=$3
13 | base_answer_path=$4
14 | image_folder=$5
15 | num_samples=$6
16 | subset=$7
17 | N=$8
18 | temperature=$9
19 | GS=${10}
20 |
21 | # Loop over each chunk/process
22 | for (( chunk_id=0; chunk_id "${base_answer_path}_merged.jsonl"
47 | for ((i=0; i> "${base_answer_path}_merged.jsonl"
50 | done
51 | # remove the unmerged files
52 | for (( chunk_id=0; chunk_id "
6 | exit 1
7 | fi
8 |
9 | # Assign the command line arguments to variables
10 | model_path=$1
11 | question_path=$2
12 | base_answer_path=$3
13 | image_folder=$4
14 | N=$5
15 | temperature=$6
16 | GS=$7
17 |
18 | # Loop over each chunk/process
19 | for (( chunk_id=0; chunk_id "${base_answer_path}_merged.jsonl"
43 | for ((i=0; i> "${base_answer_path}_merged.jsonl"
46 | done
47 | # remove the unmerged files
48 | for (( chunk_id=0; chunk_id "
6 | exit 1
7 | fi
8 |
9 | # Assign the command line arguments to variables
10 | model_path=$1
11 | lora_path=$2
12 | question_path=$3
13 | base_answer_path=$4
14 | image_folder=$5
15 | N=$6
16 | temperature=$7
17 | GS=$8
18 |
19 | # Loop over each chunk/process
20 | for (( chunk_id=0; chunk_id "${base_answer_path}_merged.jsonl"
44 | for ((i=0; i> "${base_answer_path}_merged.jsonl"
47 | done
48 | # remove the unmerged files
49 | for (( chunk_id=0; chunk_id "
6 | exit 1
7 | fi
8 |
9 | # Assign the command line arguments to variables
10 | model_path=$1
11 | question_path=$2
12 | base_answer_path=$3
13 | image_folder=$4
14 | N=$5
15 | temperature=$6
16 | GS=$7
17 | TYPE=$8
18 |
19 | # Loop over each chunk/process
20 | for (( chunk_id=0; chunk_id "${base_answer_path}_merged.jsonl"
44 | for ((i=0; i> "${base_answer_path}_merged.jsonl"
47 | done
48 | # remove the unmerged files
49 | for (( chunk_id=0; chunk_id "
6 | exit 1
7 | fi
8 |
9 | # Assign the command line arguments to variables
10 | model_path=$1
11 | lora_path=$2
12 | question_path=$3
13 | base_answer_path=$4
14 | image_folder=$5
15 | N=$6
16 | temperature=$7
17 | GS=$8
18 |
19 | # Loop over each chunk/process
20 | for (( chunk_id=0; chunk_id "${base_answer_path}_merged.jsonl"
44 | for ((i=0; i> "${base_answer_path}.jsonl"
47 | done
48 | # remove the unmerged files
49 | for (( chunk_id=0; chunk_id "
6 | exit 1
7 | fi
8 |
9 | # Assign the command line arguments to variables
10 | model_path=$1
11 | question_path=$2
12 | base_answer_path=$3
13 | image_folder=$4
14 | N=$5
15 | temperature=$6
16 | GS=$7
17 |
18 | # Loop over each chunk/process
19 | for (( chunk_id=0; chunk_id "${base_answer_path}_merged.jsonl"
43 | for ((i=0; i> "${base_answer_path}.jsonl"
46 | done
47 | # remove the unmerged files
48 | for (( chunk_id=0; chunk_id "$output_file"
30 |
31 | # Loop through the indices and concatenate each file.
32 | for IDX in $(seq 0 $((CHUNKS-1))); do
33 | cat ./playground/data/eval/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
34 | done
35 |
36 | python scripts/convert_gqa_for_eval.py --src $output_file --dst $GQADIR/testdev_balanced_predictions.json
37 |
38 | cd $GQADIR
39 | python eval/eval.py --tier testdev_balanced
40 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/llavabench.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python -m llava.eval.model_vqa \
4 | --model-path liuhaotian/llava-v1.5-13b \
5 | --question-file ./playground/data/eval/llava-bench-in-the-wild/questions.jsonl \
6 | --image-folder ./playground/data/eval/llava-bench-in-the-wild/images \
7 | --answers-file ./playground/data/eval/llava-bench-in-the-wild/answers/llava-v1.5-13b.jsonl \
8 | --temperature 0 \
9 | --conv-mode vicuna_v1
10 |
11 | mkdir -p playground/data/eval/llava-bench-in-the-wild/reviews
12 |
13 | python llava/eval/eval_gpt_review_bench.py \
14 | --question playground/data/eval/llava-bench-in-the-wild/questions.jsonl \
15 | --context playground/data/eval/llava-bench-in-the-wild/context.jsonl \
16 | --rule llava/eval/table/rule.json \
17 | --answer-list \
18 | playground/data/eval/llava-bench-in-the-wild/answers_gpt4.jsonl \
19 | playground/data/eval/llava-bench-in-the-wild/answers/llava-v1.5-13b.jsonl \
20 | --output \
21 | playground/data/eval/llava-bench-in-the-wild/reviews/llava-v1.5-13b.jsonl
22 |
23 | python llava/eval/summarize_gpt_review.py -f playground/data/eval/llava-bench-in-the-wild/reviews/llava-v1.5-13b.jsonl
24 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/mmbench.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SPLIT="mmbench_dev_20230712"
4 |
5 | python -m llava.eval.model_vqa_mmbench \
6 | --model-path liuhaotian/llava-v1.5-13b \
7 | --question-file ./playground/data/eval/mmbench/$SPLIT.tsv \
8 | --answers-file ./playground/data/eval/mmbench/answers/$SPLIT/llava-v1.5-13b.jsonl \
9 | --single-pred-prompt \
10 | --temperature 0 \
11 | --conv-mode vicuna_v1
12 |
13 | mkdir -p playground/data/eval/mmbench/answers_upload/$SPLIT
14 |
15 | python scripts/convert_mmbench_for_submission.py \
16 | --annotation-file ./playground/data/eval/mmbench/$SPLIT.tsv \
17 | --result-dir ./playground/data/eval/mmbench/answers/$SPLIT \
18 | --upload-dir ./playground/data/eval/mmbench/answers_upload/$SPLIT \
19 | --experiment llava-v1.5-13b
20 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/mmbench_cn.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SPLIT="mmbench_dev_cn_20231003"
4 |
5 | python -m llava.eval.model_vqa_mmbench \
6 | --model-path liuhaotian/llava-v1.5-13b \
7 | --question-file ./playground/data/eval/mmbench_cn/$SPLIT.tsv \
8 | --answers-file ./playground/data/eval/mmbench_cn/answers/$SPLIT/llava-v1.5-13b.jsonl \
9 | --lang cn \
10 | --single-pred-prompt \
11 | --temperature 0 \
12 | --conv-mode vicuna_v1
13 |
14 | mkdir -p playground/data/eval/mmbench/answers_upload/$SPLIT
15 |
16 | python scripts/convert_mmbench_for_submission.py \
17 | --annotation-file ./playground/data/eval/mmbench_cn/$SPLIT.tsv \
18 | --result-dir ./playground/data/eval/mmbench_cn/answers/$SPLIT \
19 | --upload-dir ./playground/data/eval/mmbench_cn/answers_upload/$SPLIT \
20 | --experiment llava-v1.5-13b
21 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/mme.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python -m llava.eval.model_vqa_loader \
4 | --model-path liuhaotian/llava-v1.5-13b \
5 | --question-file ./playground/data/eval/MME/llava_mme.jsonl \
6 | --image-folder ./playground/data/eval/MME/MME_Benchmark_release_version \
7 | --answers-file ./playground/data/eval/MME/answers/llava-v1.5-13b.jsonl \
8 | --temperature 0 \
9 | --conv-mode vicuna_v1
10 |
11 | cd ./playground/data/eval/MME
12 |
13 | python convert_answer_to_mme.py --experiment llava-v1.5-13b
14 |
15 | cd eval_tool
16 |
17 | python calculation.py --results_dir answers/llava-v1.5-13b
18 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/mmvet.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python -m llava.eval.model_vqa \
4 | --model-path liuhaotian/llava-v1.5-13b \
5 | --question-file ./playground/data/eval/mm-vet/llava-mm-vet.jsonl \
6 | --image-folder ./playground/data/eval/mm-vet/images \
7 | --answers-file ./playground/data/eval/mm-vet/answers/llava-v1.5-13b.jsonl \
8 | --temperature 0 \
9 | --conv-mode vicuna_v1
10 |
11 | mkdir -p ./playground/data/eval/mm-vet/results
12 |
13 | python scripts/convert_mmvet_for_eval.py \
14 | --src ./playground/data/eval/mm-vet/answers/llava-v1.5-13b.jsonl \
15 | --dst ./playground/data/eval/mm-vet/results/llava-v1.5-13b.json
16 |
17 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/pope.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #--model-base ../pretrained_weights/llava1.5_7b \
3 | #--model-path ./checkpoints/dpo/llava1.5_7b-lora32-lr2e-6-lrv_5w-1e/ \
4 | python -m llava.eval.model_vqa_loader \
5 | --model-path ../pretrained_weights/llava1.5_7b \
6 | --question-file ./playground/data/eval/pope/llava_pope_test.jsonl \
7 | --image-folder ../data/coco/val2014 \
8 | --answers-file ./playground/data/eval/pope/answers/dpo/llava1.5_7b-lora32-lr2e-6-lrv_5w-1e.jsonl \
9 | --temperature 0 \
10 | --conv-mode vicuna_v1
11 |
12 | python llava/eval/eval_pope.py \
13 | --annotation-dir ./playground/data/eval/pope/coco \
14 | --question-file ./playground/data/eval/pope/llava_pope_test.jsonl \
15 | --result-file ./playground/data/eval/pope/answers/llava1.5_7b-lora32-lr2e-6-lrv_5w-1e.jsonl
16 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/qbench.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" = "dev" ]; then
4 | echo "Evaluating in 'dev' split."
5 | elif [ "$1" = "test" ]; then
6 | echo "Evaluating in 'test' split."
7 | else
8 | echo "Unknown split, please choose between 'dev' and 'test'."
9 | exit 1
10 | fi
11 |
12 | python -m llava.eval.model_vqa_qbench \
13 | --model-path liuhaotian/llava-v1.5-13b \
14 | --image-folder ./playground/data/eval/qbench/images_llvisionqa/ \
15 | --questions-file ./playground/data/eval/qbench/llvisionqa_$1.json \
16 | --answers-file ./playground/data/eval/qbench/llvisionqa_$1_answers.jsonl \
17 | --conv-mode llava_v1 \
18 | --lang en
19 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/qbench_zh.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" = "dev" ]; then
4 | ZH_SPLIT="验证集"
5 | echo "Evaluating in 'dev' split."
6 | elif [ "$1" = "test" ]; then
7 | ZH_SPLIT="测试集"
8 | echo "Evaluating in 'test' split."
9 | else
10 | echo "Unknown split, please choose between 'dev' and 'test'."
11 | exit 1
12 | fi
13 |
14 | python -m llava.eval.model_vqa_qbench \
15 | --model-path liuhaotian/llava-v1.5-13b \
16 | --image-folder ./playground/data/eval/qbench/images_llvisionqa/ \
17 | --questions-file ./playground/data/eval/qbench/质衡-问答-$ZH_SPLIT.json \
18 | --answers-file ./playground/data/eval/qbench/llvisionqa_zh_$1_answers.jsonl \
19 | --conv-mode llava_v1 \
20 | --lang zh
21 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/score_multi.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Check if three arguments are passed
4 | if [ "$#" -ne 7 ]; then
5 | echo "Usage: $0 "
6 | exit 1
7 | fi
8 |
9 | # Assign the command line arguments to variables
10 | model_base=$1
11 | model_path=$2
12 | question_path=$3
13 | base_answer_path=$4
14 | image_folder=$5
15 | N=$6
16 | GS=$7
17 |
18 | # Loop over each chunk/process
19 | for (( chunk_id=0; chunk_id "${base_answer_path}_merged.jsonl"
43 | for ((i=0; i> "${base_answer_path}_merged.jsonl"
46 | done
47 | # remove the unmerged files
48 | for (( chunk_id=0; chunk_id "$output_file"
28 |
29 | # Loop through the indices and concatenate each file.
30 | for IDX in $(seq 0 $((CHUNKS-1))); do
31 | cat ./playground/data/eval/seed_bench/answers/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
32 | done
33 |
34 | # Evaluate
35 | python scripts/convert_seed_for_submission.py \
36 | --annotation-file ./playground/data/eval/seed_bench/SEED-Bench.json \
37 | --result-file $output_file \
38 | --result-upload-file ./playground/data/eval/seed_bench/answers_upload/llava-v1.5-13b.jsonl
39 |
40 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/sqa.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python -m llava.eval.model_vqa_science \
4 | --model-path liuhaotian/llava-v1.5-13b \
5 | --question-file ./playground/data/eval/scienceqa/llava_test_CQM-A.json \
6 | --image-folder ./playground/data/eval/scienceqa/images/test \
7 | --answers-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b.jsonl \
8 | --single-pred-prompt \
9 | --temperature 0 \
10 | --conv-mode vicuna_v1
11 |
12 | python llava/eval/eval_science_qa.py \
13 | --base-dir ./playground/data/eval/scienceqa \
14 | --result-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b.jsonl \
15 | --output-file ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_output.jsonl \
16 | --output-result ./playground/data/eval/scienceqa/answers/llava-v1.5-13b_result.json
17 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/textvqa.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python -m llava.eval.model_vqa_loader \
4 | --model-path liuhaotian/llava-v1.5-13b \
5 | --question-file ./playground/data/eval/textvqa/llava_textvqa_val_v051_ocr.jsonl \
6 | --image-folder ./playground/data/eval/textvqa/train_images \
7 | --answers-file ./playground/data/eval/textvqa/answers/llava-v1.5-13b.jsonl \
8 | --temperature 0 \
9 | --conv-mode vicuna_v1
10 |
11 | python -m llava.eval.eval_textvqa \
12 | --annotation-file ./playground/data/eval/textvqa/TextVQA_0.5.1_val.json \
13 | --result-file ./playground/data/eval/textvqa/answers/llava-v1.5-13b.jsonl
14 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/vizwiz.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python -m llava.eval.model_vqa_loader \
4 | --model-path liuhaotian/llava-v1.5-13b \
5 | --question-file ./playground/data/eval/vizwiz/llava_test.jsonl \
6 | --image-folder ./playground/data/eval/vizwiz/test \
7 | --answers-file ./playground/data/eval/vizwiz/answers/llava-v1.5-13b.jsonl \
8 | --temperature 0 \
9 | --conv-mode vicuna_v1
10 |
11 | python scripts/convert_vizwiz_for_submission.py \
12 | --annotation-file ./playground/data/eval/vizwiz/llava_test.jsonl \
13 | --result-file ./playground/data/eval/vizwiz/answers/llava-v1.5-13b.jsonl \
14 | --result-upload-file ./playground/data/eval/vizwiz/answers_upload/llava-v1.5-13b.json
15 |
--------------------------------------------------------------------------------
/scripts/v1_5/eval/vqav2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
4 | IFS=',' read -ra GPULIST <<< "$gpu_list"
5 |
6 | CHUNKS=${#GPULIST[@]}
7 |
8 | CKPT="llava-v1.5-13b"
9 | SPLIT="llava_vqav2_mscoco_test-dev2015"
10 |
11 | for IDX in $(seq 0 $((CHUNKS-1))); do
12 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m llava.eval.model_vqa_loader \
13 | --model-path liuhaotian/llava-v1.5-13b \
14 | --question-file ./playground/data/eval/vqav2/$SPLIT.jsonl \
15 | --image-folder ./playground/data/eval/vqav2/test2015 \
16 | --answers-file ./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \
17 | --num-chunks $CHUNKS \
18 | --chunk-idx $IDX \
19 | --temperature 0 \
20 | --conv-mode vicuna_v1 &
21 | done
22 |
23 | wait
24 |
25 | output_file=./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/merge.jsonl
26 |
27 | # Clear out the output file if it exists.
28 | > "$output_file"
29 |
30 | # Loop through the indices and concatenate each file.
31 | for IDX in $(seq 0 $((CHUNKS-1))); do
32 | cat ./playground/data/eval/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file"
33 | done
34 |
35 | python scripts/convert_vqav2_for_submission.py --split $SPLIT --ckpt $CKPT
36 |
37 |
--------------------------------------------------------------------------------
/scripts/v1_5/finetune.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | deepspeed llava/train/train_mem.py \
4 | --deepspeed ./scripts/zero3.json \
5 | --model_name_or_path lmsys/vicuna-13b-v1.5 \
6 | --version v1 \
7 | --data_path ./playground/data/llava_v1_5_mix665k.json \
8 | --image_folder ./playground/data \
9 | --vision_tower openai/clip-vit-large-patch14-336 \
10 | --pretrain_mm_mlp_adapter ./checkpoints/llava-v1.5-13b-pretrain/mm_projector.bin \
11 | --mm_projector_type mlp2x_gelu \
12 | --mm_vision_select_layer -2 \
13 | --mm_use_im_start_end False \
14 | --mm_use_im_patch_token False \
15 | --image_aspect_ratio pad \
16 | --group_by_modality_length True \
17 | --bf16 True \
18 | --output_dir ./checkpoints/llava-v1.5-13b \
19 | --num_train_epochs 1 \
20 | --per_device_train_batch_size 16 \
21 | --per_device_eval_batch_size 4 \
22 | --gradient_accumulation_steps 1 \
23 | --evaluation_strategy "no" \
24 | --save_strategy "steps" \
25 | --save_steps 50000 \
26 | --save_total_limit 1 \
27 | --learning_rate 2e-5 \
28 | --weight_decay 0. \
29 | --warmup_ratio 0.03 \
30 | --lr_scheduler_type "cosine" \
31 | --logging_steps 1 \
32 | --tf32 True \
33 | --model_max_length 2048 \
34 | --gradient_checkpointing True \
35 | --dataloader_num_workers 4 \
36 | --lazy_preprocess True \
37 | --report_to wandb
38 |
--------------------------------------------------------------------------------
/scripts/v1_5/finetune_lora.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | deepspeed llava/train/train_mem.py \
4 | --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 \
5 | --deepspeed ./scripts/zero3.json \
6 | --model_name_or_path lmsys/vicuna-13b-v1.5 \
7 | --version v1 \
8 | --data_path ./playground/data/llava_v1_5_mix665k.json \
9 | --image_folder ./playground/data \
10 | --vision_tower openai/clip-vit-large-patch14-336 \
11 | --pretrain_mm_mlp_adapter ./checkpoints/llava-v1.5-13b-pretrain/mm_projector.bin \
12 | --mm_projector_type mlp2x_gelu \
13 | --mm_vision_select_layer -2 \
14 | --mm_use_im_start_end False \
15 | --mm_use_im_patch_token False \
16 | --image_aspect_ratio pad \
17 | --group_by_modality_length True \
18 | --bf16 True \
19 | --output_dir ./checkpoints/llava-v1.5-13b-lora \
20 | --num_train_epochs 1 \
21 | --per_device_train_batch_size 16 \
22 | --per_device_eval_batch_size 4 \
23 | --gradient_accumulation_steps 1 \
24 | --evaluation_strategy "no" \
25 | --save_strategy "steps" \
26 | --save_steps 50000 \
27 | --save_total_limit 1 \
28 | --learning_rate 2e-4 \
29 | --weight_decay 0. \
30 | --warmup_ratio 0.03 \
31 | --lr_scheduler_type "cosine" \
32 | --logging_steps 1 \
33 | --tf32 True \
34 | --model_max_length 2048 \
35 | --gradient_checkpointing True \
36 | --dataloader_num_workers 4 \
37 | --lazy_preprocess True \
38 | --report_to wandb
39 |
--------------------------------------------------------------------------------
/scripts/v1_5/finetune_task.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | deepspeed llava/train/train_mem.py \
4 | --deepspeed ./scripts/zero3.json \
5 | --model_name_or_path liuhaotian/llava-v1.5-13b \
6 | --version v1 \
7 | --data_path ./playground/data/llava_v1_5_mix665k.json \
8 | --image_folder ./playground/data \
9 | --vision_tower openai/clip-vit-large-patch14-336 \
10 | --mm_projector_type mlp2x_gelu \
11 | --mm_vision_select_layer -2 \
12 | --mm_use_im_start_end False \
13 | --mm_use_im_patch_token False \
14 | --image_aspect_ratio pad \
15 | --group_by_modality_length True \
16 | --bf16 True \
17 | --output_dir ./checkpoints/llava-v1.5-13b-task \
18 | --num_train_epochs 1 \
19 | --per_device_train_batch_size 16 \
20 | --per_device_eval_batch_size 4 \
21 | --gradient_accumulation_steps 1 \
22 | --evaluation_strategy "no" \
23 | --save_strategy "steps" \
24 | --save_steps 50000 \
25 | --save_total_limit 1 \
26 | --learning_rate 2e-5 \
27 | --weight_decay 0. \
28 | --warmup_ratio 0.03 \
29 | --lr_scheduler_type "cosine" \
30 | --logging_steps 1 \
31 | --tf32 True \
32 | --model_max_length 2048 \
33 | --gradient_checkpointing True \
34 | --dataloader_num_workers 4 \
35 | --lazy_preprocess True \
36 | --report_to wandb
37 |
--------------------------------------------------------------------------------
/scripts/v1_5/finetune_task_lora.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | deepspeed llava/train/train_mem.py \
4 | --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 \
5 | --deepspeed ./scripts/zero3.json \
6 | --model_name_or_path liuhaotian/llava-v1.5-13b \
7 | --version v1 \
8 | --data_path ./playground/data/llava_v1_5_mix665k.json \
9 | --image_folder ./playground/data \
10 | --vision_tower openai/clip-vit-large-patch14-336 \
11 | --mm_projector_type mlp2x_gelu \
12 | --mm_vision_select_layer -2 \
13 | --mm_use_im_start_end False \
14 | --mm_use_im_patch_token False \
15 | --image_aspect_ratio pad \
16 | --group_by_modality_length True \
17 | --bf16 True \
18 | --output_dir ./checkpoints/llava-v1.5-13b-task-lora \
19 | --num_train_epochs 1 \
20 | --per_device_train_batch_size 16 \
21 | --per_device_eval_batch_size 4 \
22 | --gradient_accumulation_steps 1 \
23 | --evaluation_strategy "no" \
24 | --save_strategy "steps" \
25 | --save_steps 50000 \
26 | --save_total_limit 1 \
27 | --learning_rate 2e-4 \
28 | --weight_decay 0. \
29 | --warmup_ratio 0.03 \
30 | --lr_scheduler_type "cosine" \
31 | --logging_steps 1 \
32 | --tf32 True \
33 | --model_max_length 2048 \
34 | --gradient_checkpointing True \
35 | --dataloader_num_workers 4 \
36 | --lazy_preprocess True \
37 | --report_to wandb
38 |
--------------------------------------------------------------------------------
/scripts/v1_5/pretrain.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | deepspeed llava/train/train_mem.py \
4 | --deepspeed ./scripts/zero2.json \
5 | --model_name_or_path lmsys/vicuna-13b-v1.5 \
6 | --version plain \
7 | --data_path ./playground/data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json \
8 | --image_folder ./playground/data/LLaVA-Pretrain/images \
9 | --vision_tower openai/clip-vit-large-patch14-336 \
10 | --mm_projector_type mlp2x_gelu \
11 | --tune_mm_mlp_adapter True \
12 | --mm_vision_select_layer -2 \
13 | --mm_use_im_start_end False \
14 | --mm_use_im_patch_token False \
15 | --bf16 True \
16 | --output_dir ./checkpoints/llava-v1.5-13b-pretrain \
17 | --num_train_epochs 1 \
18 | --per_device_train_batch_size 32 \
19 | --per_device_eval_batch_size 4 \
20 | --gradient_accumulation_steps 1 \
21 | --evaluation_strategy "no" \
22 | --save_strategy "steps" \
23 | --save_steps 24000 \
24 | --save_total_limit 1 \
25 | --learning_rate 1e-3 \
26 | --weight_decay 0. \
27 | --warmup_ratio 0.03 \
28 | --lr_scheduler_type "cosine" \
29 | --logging_steps 1 \
30 | --tf32 True \
31 | --model_max_length 2048 \
32 | --gradient_checkpointing True \
33 | --dataloader_num_workers 4 \
34 | --lazy_preprocess True \
35 | --report_to wandb
36 |
--------------------------------------------------------------------------------