├── .gitignore ├── LICENSE ├── README.md ├── docs ├── COST.md ├── Data.md └── Getting_Started.md ├── images ├── VCoder-COST.svg ├── demo1.png ├── demo2.png ├── demo3.png ├── demo4.png ├── demo5.png ├── demo6.png ├── eval.svg ├── features.svg ├── logo.png └── vcoder.svg ├── playground └── data │ ├── eval │ └── gqa │ │ └── data │ │ └── eval │ │ └── eval.py │ └── prompts │ ├── complex_reasoning │ ├── 000_caps.txt │ ├── 000_conv.txt │ ├── 001_caps.txt │ ├── 001_conv.txt │ ├── 002_caps.txt │ ├── 002_conv.txt │ └── system_message.txt │ ├── conversation │ ├── 000_caps.txt │ ├── 000_conv.txt │ ├── 001_caps.txt │ ├── 001_conv.txt │ └── system_message.txt │ └── detail_description │ ├── 000_caps.txt │ ├── 000_conv.txt │ ├── 001_caps.txt │ ├── 001_conv.txt │ ├── 002_caps.txt │ ├── 002_conv.txt │ └── system_message.txt ├── pyproject.toml ├── scripts ├── convert_gqa_for_eval.py ├── convert_mmbench_for_submission.py ├── convert_vizwiz_for_submission.py ├── convert_vqav2_for_submission.py ├── merge_lora_weights.py ├── v1_5 │ ├── eval │ │ ├── cost.sh │ │ ├── cost_depth.sh │ │ ├── gqa.sh │ │ ├── mmbench.sh │ │ ├── mme.sh │ │ ├── pope.sh │ │ ├── vizwiz.sh │ │ └── vqav2.sh │ ├── finetune.sh │ ├── finetune_lora.sh │ ├── pretrain.sh │ ├── vcoder_ds_train.sh │ ├── vcoder_it.sh │ ├── vcoder_it_lora.sh │ └── vcoder_train.sh ├── zero2.json ├── zero3.json └── zero3_offload.json └── vcoder_llava ├── __init__.py ├── constants.py ├── data_utils.py ├── eval ├── eval_depth_accuracy.py ├── eval_pope.py ├── eval_seg_accuracy.py ├── eval_seg_accuracy_gpt4.py ├── gpt4_query.py ├── m4c_evaluator.py ├── model_depth_loader.py ├── model_seg_loader.py ├── model_vqa_loader.py ├── model_vqa_mmbench.py ├── model_vqa_mme.py └── synonyms.txt ├── mm_utils.py ├── model ├── __init__.py ├── apply_delta.py ├── builder.py ├── consolidate.py ├── language_model │ ├── llava_llama.py │ ├── vcoder_ds_llava_llama.py │ ├── vcoder_it_llava_llama.py │ └── vcoder_llava_llama.py ├── llava_arch.py ├── make_delta.py ├── multimodal_adapter │ └── builder.py ├── multimodal_depth_adapter │ └── builder.py ├── multimodal_encoder │ ├── builder.py │ └── clip_encoder.py ├── multimodal_projector │ └── builder.py ├── utils.py ├── vcoder_ds_llava_arch.py ├── vcoder_it_llava_arch.py └── vcoder_llava_arch.py ├── questions.py ├── serve ├── __init__.py ├── chat.py ├── cli.py ├── examples │ ├── corgi.jpg │ ├── corgi_pan.png │ ├── depth.jpeg │ ├── depth_depth.png │ ├── depth_pan.png │ ├── friends.jpg │ ├── friends_pan.png │ ├── people.jpg │ ├── people_pan.png │ ├── suits.jpg │ ├── suits_depth.jpeg │ ├── suits_ins.png │ └── suits_pan.png └── gradio_app.py ├── train ├── llama_flash_attn_monkey_patch.py ├── llava_trainer.py ├── train.py ├── train_mem.py ├── vcoder_ds_llava_trainer.py ├── vcoder_ds_train.py ├── vcoder_ds_train_mem.py ├── vcoder_it.py ├── vcoder_it_mem.py ├── vcoder_llava_trainer.py ├── vcoder_train.py └── vcoder_train_mem.py ├── utils.py └── vcoder_conversation.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Python 2 | __pycache__ 3 | *.pyc 4 | *.egg-info 5 | dist 6 | 7 | # Log 8 | *.log 9 | *.log.* 10 | *.jsonl 11 | 12 | # Data 13 | !**/alpaca-data-conversation.json 14 | 15 | # Editor 16 | .idea 17 | *.swp 18 | 19 | # Other 20 | .DS_Store 21 | wandb 22 | output 23 | 24 | checkpoints 25 | ckpts* 26 | 27 | .ipynb_checkpoints 28 | *.ipynb 29 | visualize_results/ 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ✌️ VCoder: Versatile Vision Encoders for Multimodal Large Language Models 2 | 3 | [![Framework: PyTorch](https://img.shields.io/badge/Framework-PyTorch-orange.svg)](https://pytorch.org/) [![HuggingFace space](https://img.shields.io/badge/🤗-HuggingFace%20Space-cyan.svg)](https://huggingface.co/spaces/shi-labs/VCoder) [![YouTube](https://badges.aleen42.com/src/youtube.svg)](https://youtu.be/go493IGgVWo) 4 | 5 | [Jitesh Jain](https://praeclarumjj3.github.io/), [Jianwei Yang](https://jwyang.github.io/), [Humphrey Shi](https://www.humphreyshi.com/home) 6 | 7 | [[`Project Page`](https://praeclarumjj3.github.io/vcoder/)] [[`COST Dataset`](https://huggingface.co/datasets/shi-labs/COST)] [[`arXiv`](https://arxiv.org/abs/2312.14233)] [[`pdf`](https://arxiv.org/pdf/2312.14233.pdf)] [[`Video`](https://drive.google.com/file/d/1o48-1PDeGsjHcgcStjvqKpsReR3stdOe/view?usp=sharing)] [[`BibTeX`](#citation)] 8 | 9 | This repo contains the code for our paper **VCoder: Versatile Vision Encoders for Multimodal Large Language Models**. 10 | 11 |

12 | 13 |

14 | 15 |

16 | 17 |

18 | 19 | ## Contents 20 | 21 | 1. [Installation Instructions](#installation-instructions) 22 | 2. [Demo](#demo) 23 | 3. [Dataset Preparation](docs/Data.md) 24 | 4. [Getting Started](#getting-started) 25 | 5. [Results](#results) 26 | 6. [Citation](#citation) 27 | 28 | ## News 29 | 30 | - **[December 29, 2023]**: Our demo is now available on [HuggingFace Spaces](https://huggingface.co/spaces/shi-labs/VCoder). Thanks to the HF team for their support! 🤗 31 | - **[December 21, 2023]**: [**Project Page**](https://praeclarumjj3.github.io/vcoder/), [**Dataset**](https://huggingface.co/datasets/shi-labs/COST), [**ArXiv Preprint**](https://arxiv.org/abs/2312.14233) and [**GitHub Repo**](https://github.com/SHI-Labs/VCoder) are public! 🚀 32 | - 🎯 VCoder is an adapter for improving MLLMs at object-level perception tasks with the aid of auxiliary perception modalities as control inputs. 33 | - 🎁 We also release the [COST](https://huggingface.co/datasets/shi-labs/COST) dataset to train and evaluate MLLMs at object-level perception tasks! 34 | - 🥁 VCoder LLaVA-1.5 and VCoder-DS LLava-1.5 checkpoints are available on [HuggingFace Hub](https://huggingface.co/models?search=vcoder)! 35 | - 👨🏻‍💻 **[COMING SOON]** VCoder (IT) LLaVA-1.5 trained on a mix of instruction-tuning data and COST dataset! 36 | 37 | ## Installation Instructions 38 | 39 | We use Python 3.10 and PyTorch 2.0.1 (CUDA 11.7 build) on Ubuntu 20.04.3 LTS. 40 | 41 | - Clone this repository. 42 | 43 | ```bash 44 | git clone https://github.com/SHI-Labs/VCoder 45 | cd VCoder 46 | ``` 47 | 48 | - Setup conda environment. 49 | 50 | ```bash 51 | conda create -n vcoder python=3.10 -y 52 | conda activate vcoder 53 | pip install --upgrade pip 54 | conda install -c "nvidia/label/cuda-11.7.0" cuda-toolkit 55 | conda install pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pytorch-cuda=11.7 -c pytorch -c nvidia 56 | pip install -e . 57 | pip install ninja 58 | pip install flash-attn --no-build-isolation 59 | ``` 60 | 61 | - Install additional packages for evaluation. 62 | 63 | ```bash 64 | python -m spacy download en_core_web_sm 65 | pip install --user -U nltk 66 | ``` 67 | 68 | ## Demo 69 | 70 | [![HuggingFace space](https://img.shields.io/badge/🤗-HuggingFace%20Space-cyan.svg)](https://huggingface.co/spaces/shi-labs/VCoder) 71 | 72 | You can use one of the CLI or Gradio interface to interact with VCoder LLaVA-1.5 locally. 73 | 74 | >Note: You can obtain the segmentation map from the [OneFormer Demo](https://huggingface.co/spaces/shi-labs/OneFormer) and the depth map from [DINOv2](https://github.com/facebookresearch/dinov2/blob/main/notebooks/depth_estimation.ipynb). 75 | 76 | ### Gradio Interface 77 | 78 | Run the following command: 79 | 80 | ```bash 81 | CUDA_VISIBLE_DEVICES=0 python -m vcoder_llava.serve.gradio_app --model-path shi-labs/vcoder_ds_llava-v1.5-13b 82 | ``` 83 | 84 | ### CLI Inference 85 | 86 | Run the following command: 87 | 88 | ```bash 89 | CUDA_VISIBLE_DEVICES=0 python -m vcoder_llava.serve.cli \ 90 | --model-path shi-labs/vcoder_ds_llava-v1.5-13b \ 91 | --image-file "vcoder_llava/serve/examples/suits.jpg" \ 92 | --seg-image-file "vcoder_llava/serve/examples/suits_pan.png" \ # optional [reqd with depth input] 93 | --depth-image-file "vcoder_llava/serve/examples/suits_depth.jpeg" \ # optional 94 | --load-4bit # optional, you may also use --load-8bit 95 | ``` 96 | 97 | ## Getting Started 98 | 99 | Please see [Getting Started with VCoder](docs/Getting_Started.md) for training and evaluation commands. 100 | 101 | ## Results 102 | 103 | Note that we do not finetune any parameters in the original LLaVA-1.5 models, so VCoder's performance on general question answering benchmarks is the same as [LLaVA-1.5](https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md#llava-v15) . 104 | 105 | ### Benchmarking on COST 106 | 107 | | **Model** | **Semantic** | **Instance** | **Panoptic** | **Depth** | **Checkpoint** | 108 | |---------|:-------------:|:-------------:|:-------------:|:-------------:|:-------------:| 109 | | |**CS(↑)/HS(↓)**|**CS(↑)/HS(↓)**|**CS(↑)/HS(↓)**|**DS(↓)**| | 110 | | VCoder LLaVA-1.5-7b | 88.6/10.4 | 71.1/26.9 | 86.0/12.8 | - | [HF Hub](https://huggingface.co/shi-labs/vcoder_llava-v1.5-7b) | 111 | | VCoder LLaVA-1.5-13b | 89.0/10.0 | 73.3/25.0 | 87.2/11.6 | - | [HF Hub](https://huggingface.co/shi-labs/vcoder_llava-v1.5-13b) | 112 | | VCoder-DS LLaVA-1.5-7b | 87.8/11.5 | 69.9/28.5 | 86.8/12.4 | 65.9 | [HF Hub](https://huggingface.co/shi-labs/vcoder_ds_llava-v1.5-7b) | 113 | | VCoder-DS LLaVA-1.5-13b | 88.5/10.9 | 71.7/26.3 | 88.5/10.8 | 63.3 | [HF Hub](https://huggingface.co/shi-labs/vcoder_ds_llava-v1.5-13b) | 114 | 115 | > We release the model responses used for benchmarking [here](https://drive.google.com/drive/folders/1R9meaFRneo76YIsSxIPRWPnKqgaDI-t3?usp=sharing). 116 | 117 | ## Citation 118 | 119 | If you found VCoder useful in your research, please consider starring ⭐ us on GitHub and citing 📚 us in your research! 120 | 121 | ```bibtex 122 | @article{jain2023vcoder, 123 | title={{VCoder: Versatile Vision Encoders for Multimodal Large Language Models}}, 124 | author={Jitesh Jain and Jianwei Yang and Humphrey Shi}, 125 | journal={arXiv}, 126 | year={2023} 127 | } 128 | ``` 129 | 130 | ## Acknowledgement 131 | 132 | We thank the authors of [LLaVA](https://github.com/haotian-liu/LLaVA), [OneFormer](https://github.com/SHI-Labs/OneFormer), and [DINOv2](https://github.com/facebookresearch/dinov2) for open-sourcing their codebase and checkpoints. We are also grateful to the authors of [CHAIR](https://github.com/LisaAnne/Hallucination) for releasing their synonym word mapping. 133 | -------------------------------------------------------------------------------- /docs/COST.md: -------------------------------------------------------------------------------- 1 | # COST Dataset 2 | 3 | The COST dataset includes the following components for training and evaluating MLLMs on object-level perception tasks: 4 | 5 | - **RGB Images** obtained from the [COCO-2017](https://cocodataset.org/#download) dataset. 6 | - **Segmentation Maps** for semantic, instance, and panoptic segmentation tasks, obtained using the publicly available [DiNAT-L OneFormer](https://github.com/SHI-Labs/OneFormer#coco) model trained on the COCO dataset. 7 | - **Questions** obtained by prompting [GPT-4](https://chat.openai.com/) for object identification and object order perception tasks. You can find the questions in [questions.py](vcoder_llava/questions.py). 8 | - **Depth Maps** obtained using the publicly available ViT-L/14 distilled variant of [DINOv2 DPT](https://github.com/facebookresearch/dinov2#pretrained-heads---depth-estimation) model trained on the NYUd dataset. 9 | 10 | We represent the information from the segmentation maps and depth maps in text form to obtain the final question-answer pairs. Please refer to Sec 3.1 in our paper for more details. 11 | 12 |

13 | 14 |

15 | 16 | We provide different splits of the COST dataset for training and evaluation. 17 | 18 | | **split** | **Number of Images** | **Number of QnA pairs** | **splits from COCO** | 19 | | :-------: | :------------------: | :---------------------: | :------------------: | 20 | | train | 280k | 280k | train2017, test2017, unlabeled2017 | 21 | | val | 5k | 5k | val2017 | 22 | 23 | ## File Structure 24 | 25 | ```text 26 | coco_segm_text 27 | ├── depth 28 | │ └── test 29 | │ │ └── ... 30 | │ └── train 31 | │ │ └── depth # contains depth maps for the train2017 split 32 | │ │ └── panoptic_order.txt # contains answers for object order perception task on images in test2017 split 33 | │ └── unlabeled 34 | │ │ └── ... 35 | │ └── val 36 | │ │ └── ... 37 | ├── test 38 | │ └── ... 39 | ├── train 40 | │ └── instance_inference # contains instance masks for train2017 split 41 | │ └── instance.txt # contains answers for instance object identification task on images in train2017 split 42 | │ └── panoptic_inference # contains panoptic masks for train2017 split 43 | │ └── panoptic.txt # contains answers for panoptic object identification task on images in train2017 split 44 | │ └── semantic_inference # contains semantic masks for train2017 split 45 | │ └── semantic.txt # contains answers for instance object identification task on images in train2017 split 46 | ├── unlabeled 47 | │ └── ... 48 | ├── val 49 | │ └── ... 50 | ``` 51 | 52 | ## Citation 53 | 54 | If you use the COST dataset, please consider starring ⭐ us on [GitHub](https://github.com/SHI-Labs/VCoder) and citing 📚 us in your research! 55 | 56 | ```bibtex 57 | @article{jain2023vcoder, 58 | title={{VCoder: Versatile Vision Encoders for Multimodal Large Language Models}}, 59 | author={Jitesh Jain and Jianwei Yang and Humphrey Shi}, 60 | journal={arXiv}, 61 | year={2023} 62 | } 63 | ``` 64 | -------------------------------------------------------------------------------- /docs/Data.md: -------------------------------------------------------------------------------- 1 | # Dataset Preparation 2 | 3 | While training our VCoder LLaVA-1.5 framework, we use the datasets focused on two sets of tasks: **Object Perception** and **General Question Answering**. Note that we only use General Question Answering datasets for regularization during training. 4 | 5 | ```text 6 | playground/data 7 | ├── coco 8 | │ └── train2017 9 | │ └── val2017 10 | │ └── test2017 11 | │ └── unlabeled2017 12 | │── coco_segm_text 13 | │ └── depth 14 | │ └── train 15 | │ └── val 16 | │ └── test 17 | │ └── unlabeled 18 | ├── gqa 19 | │ └── images 20 | │ └── seg_images 21 | ├── ocr_vqa 22 | │ └── images 23 | │ └── seg_images 24 | ├── textvqa 25 | │ └── train_images 26 | │ └── seg_images 27 | └── vg 28 | ├── VG_100K 29 | └── VG_100K_2 30 | └── vg 31 | └── SEG_VG_100K 32 | └── SEG_VG_100K_2 33 | ``` 34 | 35 | ## Object Perception 36 | 37 |

38 | 39 |

40 | 41 | We use our COCO Segmentation Text (**[COST](https://huggingface.co/datasets/shi-labs/COST)**) dataset to improve VCoder's performance at predicting objects, their counts and depth order in a given image. It also contains segmentation maps (obtained from [OneFormer](https://github.com/SHI-Labs/OneFormer)) and depth maps (obtained from [DINOv2 DPT](https://github.com/facebookresearch/dinov2)) corresponding to all images. For more information, please see [COST.md](COST.md). 42 | 43 | - Download and unzip COCO images: 44 | 45 | ```bash 46 | cd playground/data 47 | mkdir coco 48 | cd coco 49 | http://images.cocodataset.org/zips/train2017.zip 50 | http://images.cocodataset.org/zips/val2017.zip 51 | http://images.cocodataset.org/zips/test2017.zip 52 | http://images.cocodataset.org/zips/unlabeled2017.zip 53 | unzip train2017.zip && val2017.zip && test2017.zip && unlabeled2017.zip 54 | ``` 55 | 56 | - Download and unzip COST dataset: 57 | 58 | ```bash 59 | cd playground/data 60 | wget https://huggingface.co/datasets/shi-labs/COST/resolve/main/coco_segm_text.zip 61 | 62 | # unzip object identification data 63 | unzip coco_segm_text.zip 64 | ``` 65 | 66 | ## General Question Answering 67 | 68 | **Note that you only need to download the following datasets to train VCoder-DS LLaVA-1.5 models**. We use the same datasets from [LLaVA](https://github.com/haotian-liu/LLaVA). 69 | 70 | - Download the for Instruction Tuning [JSON](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_v1_5_mix665k.json) file: 71 | 72 | ```bash 73 | cd playground/data 74 | wget https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_v1_5_mix665k.json 75 | ``` 76 | 77 | - Download and unzip GQA images: 78 | 79 | ```bash 80 | cd playground/data 81 | mkdir gqa 82 | cd gqa 83 | wget https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip 84 | unzip images.zip 85 | 86 | # download segmentation maps 87 | wget https://huggingface.co/datasets/shi-labs/COST/resolve/main/seg_gqa.zip 88 | unzip seg_gqa.zip 89 | ``` 90 | 91 | - Download and unzip OCR-VQA images: 92 | 93 | ```bash 94 | cd playground/data 95 | mkdir ocr_vqa 96 | cd ocr_vqa 97 | 98 | # script to download OCR-VQA images 99 | gdown https://drive.google.com/uc?id=1r0tyZUwGCc4wIG4RkiglCGNL_nFJjR6Q 100 | gdown https://drive.google.com/uc?id=16eqkNbgARX1aLM4q0l5WBiPPSFbK0Elp 101 | python loadDataset.py 102 | 103 | # download segmentation maps 104 | wget https://huggingface.co/datasets/shi-labs/COST/resolve/main/seg_ocr_vqa.zip 105 | unzip seg_ocr_vqa.zip 106 | ``` 107 | 108 | - Download and unzip TextVQA images: 109 | 110 | ```bash 111 | cd playground/data 112 | mkdir textvqa 113 | cd textvqa 114 | wget https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip 115 | unzip train_val_images.zip 116 | 117 | # download segmentation maps 118 | wget https://huggingface.co/datasets/shi-labs/COST/resolve/main/textvqa_seg.zip 119 | unzip textvqa_seg.zip 120 | ``` 121 | 122 | - Download and unzip Visual Genome images: 123 | 124 | ```bash 125 | cd playground/data 126 | mkdir vg 127 | cd vg 128 | wget https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip 129 | wget https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip 130 | unzip images2.zip && unzip images.zip 131 | 132 | # download segmentation maps 133 | wget https://huggingface.co/datasets/shi-labs/COST/resolve/main/seg_vg.zip 134 | unzip seg_vg.zip 135 | ``` 136 | -------------------------------------------------------------------------------- /docs/Getting_Started.md: -------------------------------------------------------------------------------- 1 | # Getting Started with VCoder 2 | 3 | This document provides a brief intro to the usage of VCoder LLaVA-1.5. Our code is based on original [LLaVA](https://github.com/haotian-liu/LLaVA), please checkout their repo for more information. 4 | 5 | ## Training 6 | 7 | ### Download LLaVA-1.5 checkpoints 8 | 9 | We add our VCoder to a pretrained LLaVA-1.5 model and train on the COST dataset. 10 | 11 |
12 | LLaVA-1.5-7b 13 | 14 | [MLP Projector](https://huggingface.co/liuhaotian/llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5) | [MLLM](https://huggingface.co/liuhaotian/llava-v1.5-7b) 15 | 16 | ```bash 17 | # Download the Projector weights store them inside outputs folder 18 | git lfs install 19 | mkdir outputs 20 | cd outputs 21 | git clone https://huggingface.co/liuhaotian/llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5 22 | ``` 23 | 24 |
25 | 26 |
27 | LLaVA-1.5-13b 28 | 29 | [MLP Projector](https://huggingface.co/liuhaotian/llava-v1.5-mlp2x-336px-pretrain-vicuna-13b-v1.5) | [MLLM](https://huggingface.co/liuhaotian/llava-v1.5-13b) 30 | 31 | ```bash 32 | # Download the Projector weights store them inside outputs folder 33 | git lfs install 34 | mkdir outputs 35 | cd outputs 36 | git clone https://huggingface.co/liuhaotian/llava-v1.5-mlp2x-336px-pretrain-vicuna-13b-v1.5 37 | ``` 38 | 39 |
40 | 41 | We provide training code for two variants of VCoder. We train all our models on 8 A100s. 42 | 43 | ### Only Trained for Object Identification and Counting 44 | 45 | - Run `bash scripts/vcoder_train.sh` to train either of following variants on the COST dataset: 46 | 47 | - **VCoder LLaVA-1.5-7b**: We train the model for 2 epochs. The training time is ~8 hours. 48 | - **VCoder LLaVA-1.5-13b**: We train the model for 2 epochs. The training time is ~14 hours. 49 | 50 | - Remember to set the model variant in [scripts/vcoder_train.sh](../scripts/v1_5/vcoder_train.sh) before training. 51 | 52 | ### Trained for Object Identification, Counting and Depth Order Prediction 53 | 54 | >Note: These are the models which we use in our demo. 55 | 56 | - Run `bash scripts/vcoder_ds_train.sh` to train either of following variants on the combination of COST dataset and General Question Answering (for regularization) datasets. 57 | 58 | - **VCoder-DS LLaVA-1.5-7b**: We train the model for 1 epoch. The training time is ~17 hours. 59 | - **VCoder-DS LLaVA-1.5-13b**: We train the model for 1 epoch. The training time is ~30 hours. 60 | 61 | - Remember to set the model variant in [scripts/vcoder_ds_train.sh](../scripts/v1_5/vcoder_train.sh) before training. 62 | 63 | ## Evaluation 64 | 65 | We evaluate our models on the COST val dataset. We have written our own [evaluators](../vcoder_llava/eval) for the same. 66 | 67 |

68 | 69 |

70 | 71 | ### Object Identification and Counting 72 | 73 | We evaluate on the semantic, instance and panoptic object perception tasks. 74 | 75 | ```bash 76 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/v1_5/eval/cost.sh 77 | ``` 78 | 79 | Remember to set the model variant in [scripts/v1_5/eval/cost.sh](../scripts/v1_5/eval/cost.sh) before evaluating. 80 | 81 | ### Depth Order Identification for Objects 82 | 83 | We evaluate on the depth object perception tasks. 84 | 85 | ```bash 86 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/v1_5/eval/cost_depth.sh 87 | ``` 88 | 89 | Remember to set the model variant in [scripts/v1_5/eval/cost_depth.sh](../scripts/v1_5/eval/cost_depth.sh) before evaluating. 90 | 91 | ### General Question-Answering 92 | 93 | - We follow the same evaluation setting from [LLaVA-1.5](https://github.com/haotian-liu/LLaVA). 94 | - Download and unzip the eval files from [google drive](https://drive.google.com/file/d/1atZSBBrAX54yYpxtVVW33zFvcnaHeFPy/view?usp=sharing) to `./playground/data/eval`. This also provides a general structure for all datasets. 95 | 96 | ```bash 97 | # pip3 install gdown 98 | cd playground/data/eval 99 | gdown https://drive.google.com/uc?id=1atZSBBrAX54yYpxtVVW33zFvcnaHeFPy 100 | unzip eval.zip 101 | ``` 102 | 103 | #### VQAv2 104 | 105 | - Download [`test2015`](http://images.cocodataset.org/zips/test2015.zip) and put it under `./playground/data/eval/vqav2`. 106 | - Multi-GPU inference. 107 | 108 | ```bash 109 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/v1_5/eval/vqav2.sh 110 | ``` 111 | 112 | - Submit the results to the [evaluation server](https://eval.ai/web/challenges/challenge-page/830/my-submission). 113 | 114 | #### GQA 115 | 116 | - Download the [data](https://cs.stanford.edu/people/dorarad/gqa/download.html) and [evaluation scripts](https://cs.stanford.edu/people/dorarad/gqa/evaluate.html) following the official instructions and put under `./playground/data/eval/gqa/data`. 117 | - Multi-GPU inference. 118 | 119 | ```bash 120 | CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/v1_5/eval/gqa.sh 121 | ``` 122 | 123 | #### VisWiz 124 | 125 | - Download [`test.json`](https://vizwiz.cs.colorado.edu/VizWiz_final/vqa_data/Annotations.zip) and extract [`test.zip`](https://vizwiz.cs.colorado.edu/VizWiz_final/images/test.zip) to `test`. Put them under `./playground/data/eval/vizwiz`. 126 | - Single-GPU inference. 127 | 128 | ```bash 129 | CUDA_VISIBLE_DEVICES=0 bash scripts/v1_5/eval/vizwiz.sh 130 | ``` 131 | 132 | - Submit the results to the [evaluation server](https://eval.ai/web/challenges/challenge-page/1911/my-submission). 133 | 134 | #### POPE 135 | 136 | - Download `coco` from [POPE](https://github.com/AoiDragon/POPE/tree/e3e39262c85a6a83f26cf5094022a782cb0df58d/output/coco) and put under `./playground/data/eval/pope`. 137 | - Single-GPU inference and evaluate. 138 | 139 | ```bash 140 | CUDA_VISIBLE_DEVICES=0 bash scripts/v1_5/eval/pope.sh 141 | ``` 142 | 143 | ### MME 144 | 145 | - Download the data following the official instructions [here](https://github.com/BradyFU/Awesome-Multimodal-Large-Language-Models/tree/Evaluation). 146 | - Downloaded images to `MME_Benchmark_release_version`. 147 | - put the official `eval_tool` and `MME_Benchmark_release_version` under `./playground/data/eval/MME`. 148 | - Single-GPU inference and evaluate. 149 | 150 | ```bash 151 | CUDA_VISIBLE_DEVICES=0 bash scripts/v1_5/eval/mme.sh 152 | ``` 153 | 154 | ### MMBench 155 | 156 | - Download [`mmbench_dev_20230712.tsv`](https://download.openmmlab.com/mmclassification/datasets/mmbench/mmbench_dev_20230712.tsv) and put under `./playground/data/eval/mmbench`. 157 | - Single-GPU inference. 158 | 159 | ```bash 160 | CUDA_VISIBLE_DEVICES=0 bash scripts/v1_5/eval/mmbench.sh 161 | ``` 162 | 163 | - Submit the results to the [evaluation server](https://opencompass.org.cn/leaderboard-multimodal). 164 | -------------------------------------------------------------------------------- /images/demo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/images/demo1.png -------------------------------------------------------------------------------- /images/demo2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/images/demo2.png -------------------------------------------------------------------------------- /images/demo3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/images/demo3.png -------------------------------------------------------------------------------- /images/demo4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/images/demo4.png -------------------------------------------------------------------------------- /images/demo5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/images/demo5.png -------------------------------------------------------------------------------- /images/demo6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/images/demo6.png -------------------------------------------------------------------------------- /images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/images/logo.png -------------------------------------------------------------------------------- /playground/data/prompts/complex_reasoning/000_caps.txt: -------------------------------------------------------------------------------- 1 | A man wearing multiple neck ties making a goofy face. 2 | A man in a white shirt wearing very many ties. 3 | a man with ties on poses for a picture 4 | A man wearing multiple ties on his neck. 5 | A young man smiles while wearing several ties. 6 | 7 | tie: [0.574, 0.298, 0.752, 0.704] 8 | tie: [0.464, 0.339, 0.639, 0.789] 9 | tie: [0.349, 0.363, 0.563, 0.732] 10 | tie: [0.259, 0.255, 0.668, 0.805] 11 | person: [0.019, 0.065, 0.962, 0.988] 12 | person: [0.0, 0.24, 0.214, 1.0] 13 | tie: [0.316, 0.778, 0.443, 0.867] 14 | tie: [0.386, 0.707, 0.496, 0.801] 15 | tie: [0.251, 0.354, 0.402, 0.747] 16 | tie: [0.44, 0.318, 0.63, 0.397] 17 | tie: [0.566, 0.33, 0.655, 0.543] 18 | tie: [0.25, 0.409, 0.359, 0.556] -------------------------------------------------------------------------------- /playground/data/prompts/complex_reasoning/000_conv.txt: -------------------------------------------------------------------------------- 1 | Question: 2 | What is unusual about this photo? 3 | === 4 | Answer: 5 | In the photo, the man is wearing a total of ten ties around his neck. This is considered unusual because it is not common for someone to wear multiple ties at once. In a typical setting, a person would wear only one tie. The man's goofy facial expression also adds to the unconventional and humorous nature of the image. -------------------------------------------------------------------------------- /playground/data/prompts/complex_reasoning/001_caps.txt: -------------------------------------------------------------------------------- 1 | A group of people standing outside of a black vehicle with various luggage. 2 | Luggage surrounds a vehicle in an underground parking area 3 | People try to fit all of their luggage in an SUV. 4 | The sport utility vehicle is parked in the public garage, being packed for a trip 5 | Some people with luggage near a van that is transporting it. 6 | 7 | person: [0.681, 0.242, 0.774, 0.694] 8 | person: [0.63, 0.222, 0.686, 0.516] 9 | person: [0.444, 0.233, 0.487, 0.34] 10 | backpack: [0.384, 0.696, 0.485, 0.914] 11 | backpack: [0.755, 0.413, 0.846, 0.692] 12 | suitcase: [0.758, 0.413, 0.845, 0.69] 13 | suitcase: [0.1, 0.497, 0.173, 0.579] 14 | bicycle: [0.282, 0.363, 0.327, 0.442] 15 | car: [0.786, 0.25, 0.848, 0.322] 16 | car: [0.783, 0.27, 0.827, 0.335] 17 | car: [0.86, 0.254, 0.891, 0.3] 18 | car: [0.261, 0.101, 0.787, 0.626] -------------------------------------------------------------------------------- /playground/data/prompts/complex_reasoning/001_conv.txt: -------------------------------------------------------------------------------- 1 | Question: 2 | What challenges do these people face? 3 | === 4 | Answer: 5 | In the image, a group of people is standing outside a black SUV in a parking area, surrounded by various pieces of luggage, including suitcases and backpacks. They are facing the challenge of fitting all their luggage into the black SUV. There are multiple suitcases and backpacks to be packed, which suggests that the group has a significant amount of belongings to accommodate. They might have to strategize and arrange the luggage efficiently to ensure that everything fits properly into the vehicle. Additionally, they need to consider the comfort of the passengers and visibility while driving, so the placement of the luggage must not obstruct the driver's view or make the passengers uncomfortable during the trip. -------------------------------------------------------------------------------- /playground/data/prompts/complex_reasoning/002_caps.txt: -------------------------------------------------------------------------------- 1 | There is a movie theater that displays the show times above the doors. 2 | A red fire hydrant is deep in the snow. 3 | The fire hydrant is in the snow near a recently plowed sidewalk. 4 | This city has had a very hard winter with snow. 5 | A hotel for dogs in the snow in winter. 6 | 7 | fire hydrant: [0.326, 0.612, 0.426, 0.72] -------------------------------------------------------------------------------- /playground/data/prompts/complex_reasoning/002_conv.txt: -------------------------------------------------------------------------------- 1 | Question: 2 | What challenges might this city face? 3 | === 4 | Answer: 5 | The city faces challenges due to the harsh winter conditions and heavy snowfall. In the image, a red fire hydrant is almost buried deep in the snow, which indicates the significant amount of snow the city has experienced. This can lead to various challenges such as difficulties in transportation, increased risk of accidents, and disruptions to daily life. For example, the recently plowed sidewalk near the fire hydrant shows that the city has to constantly clear snow from roads and sidewalks to maintain access and safety for pedestrians and vehicles. Moreover, emergency services, like firefighters, might face challenges accessing crucial equipment, such as fire hydrants, during emergencies due to the snow accumulation. This highlights the importance of effective snow management strategies and preparedness in such cities to minimize the impact of harsh winter conditions on residents and essential services. -------------------------------------------------------------------------------- /playground/data/prompts/complex_reasoning/system_message.txt: -------------------------------------------------------------------------------- 1 | You are an AI visual assistant that can analyze a single image. You receive five sentences, each describing the same image you are observing. In addition, specific object locations within the image are given, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. 2 | 3 | The task is to use the provided caption and bounding box information, create a plausible question about the image, and provide the answer in detail. 4 | 5 | Create complex questions beyond describing the scene. 6 | To answer such questions, one should require first understanding the visual content, then based on the background knowledge or reasoning, either explain why the things are happening that way, or provide guides and help to user's request. Make the question challenging by not including the visual content details in the question so that the user needs to reason about that first. 7 | 8 | Instead of directly mentioning the bounding box coordinates, utilize this data to explain the scene using natural language. Include details like object counts, position of the objects, relative position between the objects. 9 | 10 | When using the information from the caption and coordinates, directly explain the scene, and do not mention that the information source is the caption or the bounding box. Always answer as if you are directly looking at the image. -------------------------------------------------------------------------------- /playground/data/prompts/conversation/000_caps.txt: -------------------------------------------------------------------------------- 1 | There is a movie theater that displays the show times above the doors. 2 | A red fire hydrant is deep in the snow. 3 | The fire hydrant is in the snow near a recently plowed sidewalk. 4 | This city has had a very hard winter with snow. 5 | A hotel for dogs in the snow in winter. -------------------------------------------------------------------------------- /playground/data/prompts/conversation/000_conv.txt: -------------------------------------------------------------------------------- 1 | Question: 2 | What color is the fire hydrant in the image? 3 | === 4 | Answer: 5 | The fire hydrant in the image is red. 6 | === 7 | Question: 8 | Is the fire hydrant fully covered by snow or partially visible? 9 | === 10 | Answer: 11 | The fire hydrant is partially visible, with a significant portion of it covered by the deep snow. 12 | === 13 | Question: 14 | What is the condition of the sidewalk near the fire hydrant? 15 | === 16 | Answer: 17 | The sidewalk near the fire hydrant has been recently plowed, so it is relatively clear of snow compared to the surrounding area. 18 | === 19 | Question: 20 | What does the image suggest about the winter season in the city? 21 | === 22 | Answer: 23 | The image suggests that the winter season in the city has been quite harsh and challenging. The significant snow accumulation seen in the image indicates that the city has experienced heavy snowfall, which can lead to various difficulties and disruptions for its residents. 24 | 25 | For instance, the deep snow covering the fire hydrant could potentially hinder its accessibility in case of emergencies, which poses safety concerns. The recently plowed sidewalk implies that the city's maintenance crews have been actively working to keep the walkways clear and safe for pedestrians, but the sheer amount of snowfall might make it difficult to keep up with the continuous snow removal efforts. 26 | 27 | Furthermore, such extreme winter conditions can impact transportation, leading to traffic issues, delays in public transit, and increased risks of accidents due to slippery roads. It can also cause problems for businesses and schools, as people might face challenges commuting to work or attending classes. Additionally, the heavy snow can put extra strain on infrastructure, such as roofs and power lines, increasing the likelihood of structural damage or power outages. 28 | 29 | In conclusion, the image of the red fire hydrant deep in the snow and the recently plowed sidewalk suggest that the city has faced a particularly severe winter season, with substantial snowfall that has likely caused various challenges and disruptions for its residents and infrastructure. -------------------------------------------------------------------------------- /playground/data/prompts/conversation/001_caps.txt: -------------------------------------------------------------------------------- 1 | A man is skiing in the open snow covered hills 2 | A skier is making his way into the snow. 3 | A skier on the foothills of a huge mountain range. 4 | A skier looks at mountains while standing near a trail sign. 5 | a single person skiing by an area with a lot of bushes -------------------------------------------------------------------------------- /playground/data/prompts/conversation/001_conv.txt: -------------------------------------------------------------------------------- 1 | Question: 2 | What is the skier doing? 3 | === 4 | Answer: 5 | The skier is skiing in the snow-covered hills, making his way through the snow and enjoying the beautiful mountain scenery. They are also standing near a trail sign, which indicates their location on the mountain. 6 | === 7 | Question: 8 | Are there any other people or skiers visible in the image? 9 | === 10 | Answer: 11 | No, there is only a single person skiing in the image, and they are surrounded by an area with a lot of bushes. 12 | === 13 | Question: 14 | What kind of skiing is the person doing? Cross-country or downhill? 15 | === 16 | Answer: 17 | Based on what I can see in the photo, it is not possible to definitively determine whether the skier is doing cross-country or downhill skiing. However, given the presence of a trail sign and the fact that they are skiing in the foothills, it is more likely that the skier is engaged in cross-country skiing. Downhill skiing typically takes place on more steep slopes and involves ski lifts, whereas cross-country skiing often occurs on relatively flatter terrain with marked trails. 18 | === 19 | Question: 20 | Can you explain cross-country and downhill skiing a bit more? 21 | === 22 | Answer: 23 | Cross-country skiing and downhill skiing are two distinct types of skiing, each with its own techniques, equipment, and terrain preferences. 24 | 25 | 1. Cross-Country Skiing: 26 | Also known as Nordic skiing, cross-country skiing involves traversing relatively flat terrain or gentle slopes using skis and poles. It is a popular recreational and competitive sport that focuses on endurance, balance, and aerobic fitness. Cross-country skiers use their own locomotion to move across snow-covered terrain, rather than relying on ski lifts or other forms of assistance. 27 | There are two main techniques in cross-country skiing: 28 | - Classic: This technique involves skiing in parallel tracks, using a forward-and-backward gliding motion while keeping the skis parallel to each other. The skier uses their poles to propel themselves forward. 29 | - Skate skiing: This technique is similar to ice skating, where the skier pushes off one ski at an angle and glides on the other. It requires a smooth, groomed surface and is typically faster than the classic technique. 30 | Cross-country ski equipment consists of lightweight skis, boots, bindings, and poles. The boots are more flexible than those used in downhill skiing, allowing for better ankle movement and control. 31 | 32 | 2. Downhill Skiing: 33 | Also known as alpine skiing, downhill skiing involves descending slopes at high speeds using skis and poles for balance and control. This sport is more focused on speed, technique, and navigating challenging terrain, including steep slopes, moguls, and even jumps. 34 | Downhill skiing can be further categorized into several disciplines, such as slalom, giant slalom, super-G, and downhill racing. Each discipline has its own set of rules, courses, and ski equipment. 35 | Downhill ski equipment includes heavier and stiffer skis, boots, bindings, and poles than those used in cross-country skiing. The boots are more rigid to provide better support and control during high-speed descents and sharp turns. 36 | 37 | In summary, cross-country skiing is an endurance-based sport that involves traveling across flat or gently sloping terrain, while downhill skiing is focused on speed and technique as skiers navigate steeper slopes and challenging terrain. Both sports require specialized equipment and techniques, but they offer different experiences and challenges to participants. -------------------------------------------------------------------------------- /playground/data/prompts/conversation/system_message.txt: -------------------------------------------------------------------------------- 1 | You are an AI visual assistant, and you are seeing a single image. What you see are provided with five sentences, describing the same image you are looking at. Answer all questions as you are seeing the image. 2 | 3 | Design a conversation between you and a person asking about this photo. The answers should be in a tone that a visual AI assistant is seeing the image and answering the question. 4 | Ask diverse questions and give corresponding answers. 5 | 6 | Include questions asking about the visual content of the image, including the object types, counting the objects, object actions, object locations, relative positions between objects, etc. Only include questions that have definite answers: 7 | (1) one can see the content in the image that the question asks about and can answer confidently; 8 | (2) one can determine confidently from the image that it is not in the image. 9 | Do not ask any question that cannot be answered confidently. 10 | 11 | Also include complex questions that are relevant to the content in the image, for example, asking about background knowledge of the objects in the image, asking to discuss about events happening in the image, etc. Again, do not ask about uncertain details. 12 | Provide detailed answers when answering complex questions. For example, give detailed examples or reasoning steps to make the content more convincing and well-organized. You can include multiple paragraphs if necessary. -------------------------------------------------------------------------------- /playground/data/prompts/detail_description/000_caps.txt: -------------------------------------------------------------------------------- 1 | A harbor filled with lots of boats next to a building. 2 | A bicycle parked in front of several boats at a dock. 3 | A red bicycle in front of a line of docked white yachts 4 | A bike sits before boats which sit before a long building. 5 | A bicycle is a convenient means of land transportation when you live on a boat. 6 | 7 | bicycle: [0.287, 0.641, 0.507, 0.874] 8 | bicycle: [0.566, 0.667, 0.63, 0.731] 9 | boat: [0.318, 0.579, 0.575, 0.724] 10 | boat: [0.704, 0.607, 0.818, 0.727] 11 | boat: [0.818, 0.601, 0.942, 0.744] 12 | boat: [0.002, 0.53, 0.243, 0.71] 13 | boat: [0.541, 0.611, 0.668, 0.731] 14 | person: [0.778, 0.527, 0.797, 0.57] 15 | cup: [0.708, 0.733, 0.724, 0.758] 16 | boat: [0.236, 0.532, 0.404, 0.64] 17 | boat: [0.81, 0.632, 0.836, 0.676] 18 | boat: [0.957, 0.526, 1.0, 0.752] -------------------------------------------------------------------------------- /playground/data/prompts/detail_description/000_conv.txt: -------------------------------------------------------------------------------- 1 | It is a harbor filled with numerous boats of various sizes docked next to a long building. Among the boats, there are a few white yachts lined up, standing out from the rest. There is a red bicycle prominently parked in front of the line of docked boats, serving as a convenient means of land transportation for those living on the boats. Another bicycle can be seen further back in the scene, near the middle of the harbor. 2 | 3 | A person is visible near the right side of the harbor, possibly enjoying the view or attending to their boat. Additionally, there is a cup placed on a surface near the middle of the scene. -------------------------------------------------------------------------------- /playground/data/prompts/detail_description/001_caps.txt: -------------------------------------------------------------------------------- 1 | A group of people standing outside of a black vehicle with various luggage. 2 | Luggage surrounds a vehicle in an underground parking area 3 | People try to fit all of their luggage in an SUV. 4 | The sport utility vehicle is parked in the public garage, being packed for a trip 5 | Some people with luggage near a van that is transporting it. 6 | 7 | person: [0.681, 0.242, 0.774, 0.694] 8 | person: [0.63, 0.222, 0.686, 0.516] 9 | person: [0.444, 0.233, 0.487, 0.34] 10 | backpack: [0.384, 0.696, 0.485, 0.914] 11 | backpack: [0.755, 0.413, 0.846, 0.692] 12 | suitcase: [0.758, 0.413, 0.845, 0.69] 13 | suitcase: [0.1, 0.497, 0.173, 0.579] 14 | bicycle: [0.282, 0.363, 0.327, 0.442] 15 | car: [0.786, 0.25, 0.848, 0.322] 16 | car: [0.783, 0.27, 0.827, 0.335] 17 | car: [0.86, 0.254, 0.891, 0.3] 18 | car: [0.261, 0.101, 0.787, 0.626] -------------------------------------------------------------------------------- /playground/data/prompts/detail_description/001_conv.txt: -------------------------------------------------------------------------------- 1 | The image is an underground parking area with a black sport utility vehicle (SUV) parked. There are three people in the scene, with one person standing closer to the left side of the vehicle, another person in the middle, and the third person on the right side. They are all working together to pack their luggage into the SUV for a trip. 2 | 3 | In the parking area, various luggage items are scattered around the vehicle. There are two backpacks, one located near the left rear wheel and the other closer to the right side of the vehicle. Additionally, there are two suitcases, one on the right side of the car and another further away near the center of the parking area. A bicycle can also be seen on the left side of the vehicle. 4 | 5 | Other cars are parked around the main SUV, with one car positioned behind it and slightly to the left, another behind and slightly to the right, and the third car further behind on the right side. -------------------------------------------------------------------------------- /playground/data/prompts/detail_description/002_caps.txt: -------------------------------------------------------------------------------- 1 | A man holds a Wii-mote above his head while another looks on. 2 | A guy and his friend are playing Nintendo Wii. 3 | A young man is holding a video game remote over his head. 4 | two men standing in a room while one plays with a wii mote 5 | Some guys standing and playing a video game. 6 | 7 | couch: [0.697, 0.759, 0.995, 1.0] 8 | dining table: [0.426, 0.755, 1.0, 0.987] 9 | person: [0.082, 0.252, 0.342, 1.0] 10 | person: [0.399, 0.085, 0.742, 0.982] 11 | remote: [0.477, 0.135, 0.516, 0.187] 12 | sink: [0.016, 0.501, 0.063, 0.52] 13 | potted plant: [0.798, 0.384, 0.888, 0.645] 14 | refrigerator: [0.305, 0.389, 0.414, 0.547] 15 | chair: [0.72, 0.509, 0.858, 0.725] -------------------------------------------------------------------------------- /playground/data/prompts/detail_description/002_conv.txt: -------------------------------------------------------------------------------- 1 | The image shows two men standing in a room, engaged in playing a video game on a Nintendo Wii console. One of the men is holding a Wii remote above his head with enthusiasm, while the other man looks on, likely enjoying the friendly competition. 2 | 3 | The room appears to be a living space with a couch located in the background and a dining table nearby. A potted plant can be seen placed close to the couch, and a chair is situated in the middle of the room. The room also features a kitchen area with a sink and a refrigerator visible in the background. -------------------------------------------------------------------------------- /playground/data/prompts/detail_description/system_message.txt: -------------------------------------------------------------------------------- 1 | You are an AI visual assistant that can analyze a single image. You receive five sentences, each describing the same image you are observing. In addition, specific object locations within the image are given, along with detailed coordinates. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. 2 | 3 | Using the provided caption and bounding box information, describe the scene in a detailed manner. 4 | 5 | Instead of directly mentioning the bounding box coordinates, utilize this data to explain the scene using natural language. Include details like object counts, position of the objects, relative position between the objects. 6 | 7 | When using the information from the caption and coordinates, directly explain the scene, and do not mention that the information source is the caption or the bounding box. Always answer as if you are directly looking at the image. -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "vcoder_llava" 7 | version = "1.1.1" 8 | description = "Towards GPT-4 like large language and visual assistant." 9 | readme = "README.md" 10 | requires-python = ">=3.8" 11 | classifiers = [ 12 | "Programming Language :: Python :: 3", 13 | "License :: OSI Approved :: Apache Software License", 14 | ] 15 | dependencies = [ 16 | "einops", "fastapi", "gradio==3.35.2", "markdown2[all]", "numpy", 17 | "requests", "sentencepiece", "tokenizers>=0.12.1", 18 | "uvicorn", "wandb", "chardet", 19 | "shortuuid", "httpx==0.24.0", 20 | "deepspeed==0.9.5", "word2number", 21 | "spacy", "inflect", "openpyxl", 22 | "peft==0.4.0", "num2words", 23 | "transformers==4.31.0", 24 | "accelerate==0.21.0", 25 | "bitsandbytes==0.41.0", 26 | "scikit-learn==1.2.2", 27 | "sentencepiece==0.1.99", 28 | "einops==0.6.1", "einops-exts==0.0.4", "timm==0.6.13", 29 | "gradio_client==0.2.9" 30 | ] 31 | 32 | [project.urls] 33 | "Homepage" = "https://praeclarumjj3.github.io/vcoder/" 34 | "Bug Tracker" = "https://github.com/SHI-Labs/VCoder/issues" 35 | 36 | [tool.setuptools.packages.find] 37 | exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"] 38 | 39 | [tool.wheel] 40 | exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"] 41 | -------------------------------------------------------------------------------- /scripts/convert_gqa_for_eval.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("--src", type=str) 7 | parser.add_argument("--dst", type=str) 8 | args = parser.parse_args() 9 | 10 | all_answers = [] 11 | for line_idx, line in enumerate(open(args.src)): 12 | res = json.loads(line) 13 | question_id = res['question_id'] 14 | text = res['text'].rstrip('.').lower() 15 | all_answers.append({"questionId": question_id, "prediction": text}) 16 | 17 | with open(args.dst, 'w') as f: 18 | json.dump(all_answers, f) 19 | -------------------------------------------------------------------------------- /scripts/convert_mmbench_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | import pandas as pd 5 | 6 | def get_args(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("--annotation-file", type=str, required=True) 9 | parser.add_argument("--result-dir", type=str, required=True) 10 | parser.add_argument("--upload-dir", type=str, required=True) 11 | parser.add_argument("--experiment", type=str, required=True) 12 | 13 | return parser.parse_args() 14 | 15 | if __name__ == "__main__": 16 | args = get_args() 17 | 18 | df = pd.read_table(args.annotation_file) 19 | 20 | cur_df = df.copy() 21 | cur_df = cur_df.drop(columns=['hint', 'category', 'source', 'image', 'comment', 'l2-category']) 22 | cur_df.insert(6, 'prediction', None) 23 | for pred in open(os.path.join(args.result_dir, f"{args.experiment}.jsonl")): 24 | pred = json.loads(pred) 25 | cur_df.loc[df['index'] == pred['question_id'], 'prediction'] = pred['text'] 26 | 27 | cur_df.to_excel(os.path.join(args.upload_dir, f"{args.experiment}.xlsx"), index=False, engine='openpyxl') 28 | -------------------------------------------------------------------------------- /scripts/convert_vizwiz_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import json 4 | 5 | from vcoder_llava.eval.m4c_evaluator import EvalAIAnswerProcessor 6 | 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('--annotation-file', type=str, required=True) 11 | parser.add_argument('--result-file', type=str, required=True) 12 | parser.add_argument('--result-upload-file', type=str, required=True) 13 | return parser.parse_args() 14 | 15 | 16 | if __name__ == '__main__': 17 | 18 | args = parse_args() 19 | 20 | os.makedirs(os.path.dirname(args.result_upload_file), exist_ok=True) 21 | 22 | results = [] 23 | error_line = 0 24 | for line_idx, line in enumerate(open(args.result_file)): 25 | try: 26 | results.append(json.loads(line)) 27 | except: 28 | error_line += 1 29 | results = {x['question_id']: x['text'] for x in results} 30 | test_split = [json.loads(line) for line in open(args.annotation_file)] 31 | split_ids = set([x['question_id'] for x in test_split]) 32 | 33 | print(f'total results: {len(results)}, total split: {len(test_split)}, error_line: {error_line}') 34 | 35 | all_answers = [] 36 | 37 | answer_processor = EvalAIAnswerProcessor() 38 | 39 | for x in test_split: 40 | assert x['question_id'] in results 41 | all_answers.append({ 42 | 'image': x['image'], 43 | 'answer': answer_processor(results[x['question_id']]) 44 | }) 45 | 46 | with open(args.result_upload_file, 'w') as f: 47 | json.dump(all_answers, f) 48 | -------------------------------------------------------------------------------- /scripts/convert_vqav2_for_submission.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import json 4 | 5 | from vcoder_llava.eval.m4c_evaluator import EvalAIAnswerProcessor 6 | 7 | 8 | def parse_args(): 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('--dir', type=str, default="./playground/data/eval/vqav2") 11 | parser.add_argument('--ckpt', type=str, required=True) 12 | parser.add_argument('--split', type=str, required=True) 13 | return parser.parse_args() 14 | 15 | 16 | if __name__ == '__main__': 17 | 18 | args = parse_args() 19 | 20 | src = os.path.join(args.dir, 'answers', args.split, args.ckpt, 'merge.jsonl') 21 | test_split = os.path.join(args.dir, 'llava_vqav2_mscoco_test2015.jsonl') 22 | dst = os.path.join(args.dir, 'answers_upload', args.split, f'{args.ckpt}.json') 23 | os.makedirs(os.path.dirname(dst), exist_ok=True) 24 | 25 | results = [] 26 | error_line = 0 27 | for line_idx, line in enumerate(open(src)): 28 | try: 29 | results.append(json.loads(line)) 30 | except: 31 | error_line += 1 32 | 33 | results = {x['question_id']: x['text'] for x in results} 34 | test_split = [json.loads(line) for line in open(test_split)] 35 | split_ids = set([x['question_id'] for x in test_split]) 36 | 37 | print(f'total results: {len(results)}, total split: {len(test_split)}, error_line: {error_line}') 38 | 39 | all_answers = [] 40 | 41 | answer_processor = EvalAIAnswerProcessor() 42 | 43 | for x in test_split: 44 | if x['question_id'] not in results: 45 | all_answers.append({ 46 | 'question_id': x['question_id'], 47 | 'answer': '' 48 | }) 49 | else: 50 | all_answers.append({ 51 | 'question_id': x['question_id'], 52 | 'answer': answer_processor(results[x['question_id']]) 53 | }) 54 | 55 | with open(dst, 'w') as f: 56 | json.dump(all_answers, open(dst, 'w')) 57 | -------------------------------------------------------------------------------- /scripts/merge_lora_weights.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from vcoder_llava.model.builder import load_pretrained_model 3 | from vcoder_llava.mm_utils import get_model_name_from_path 4 | 5 | 6 | def merge_lora(args): 7 | model_name = get_model_name_from_path(args.model_path) 8 | tokenizer, model, image_processor, _, _, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, device_map='cpu') 9 | 10 | model.save_pretrained(args.save_model_path) 11 | tokenizer.save_pretrained(args.save_model_path) 12 | 13 | 14 | if __name__ == "__main__": 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("--model-path", type=str, required=True) 17 | parser.add_argument("--model-base", type=str, required=True) 18 | parser.add_argument("--save-model-path", type=str, required=True) 19 | 20 | args = parser.parse_args() 21 | 22 | merge_lora(args) 23 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/cost.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | CKPT="vcoder_llava-v1.5-7b" 9 | 10 | for IDX in $(seq 0 $((CHUNKS-1))); do 11 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m vcoder_llava.eval.model_seg_loader \ 12 | --model-path shi-labs/$CKPT \ 13 | --image-folder ./playground/data/coco/val2017 \ 14 | --seg-image-folder ./playground/data/coco_segm_text/val \ 15 | --output-file ./playground/data/eval/seg/$CKPT/output \ 16 | --num-chunks $CHUNKS \ 17 | --chunk-idx $IDX \ 18 | --temperature 0 \ 19 | --conv-mode vicuna_v1 \ 20 | --use_seg & 21 | done 22 | 23 | wait 24 | 25 | semantic_output_file=./playground/data/eval/seg/$CKPT/output_semantic.txt 26 | instance_output_file=./playground/data/eval/seg/$CKPT/output_instance.txt 27 | panoptic_output_file=./playground/data/eval/seg/$CKPT/output_panoptic.txt 28 | 29 | # Clear out the output files if it exists. 30 | > "$semantic_output_file" 31 | > "$instance_output_file" 32 | > "$panoptic_output_file" 33 | 34 | # Loop through the indices and concatenate each file. 35 | for IDX in $(seq 0 $((CHUNKS-1))); do 36 | cat ./playground/data/eval/seg/$CKPT/output_semantic_${CHUNKS}_${IDX}.txt >> "$semantic_output_file" 37 | cat ./playground/data/eval/seg/$CKPT/output_instance_${CHUNKS}_${IDX}.txt >> "$instance_output_file" 38 | cat ./playground/data/eval/seg/$CKPT/output_panoptic_${CHUNKS}_${IDX}.txt >> "$panoptic_output_file" 39 | done 40 | 41 | python -m vcoder_llava.eval.eval_seg_accuracy \ 42 | --gt_path "./playground/data/coco_segm_text/val/" \ 43 | --pred_path "./playground/data/eval/seg/$CKPT/" 44 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/cost_depth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | CKPT="vcoder_ds_llava-v1.5-7b" 9 | 10 | for IDX in $(seq 0 $((CHUNKS-1))); do 11 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m vcoder_llava.eval.model_depth_loader \ 12 | --model-path shi-labs/$CKPT \ 13 | --image-folder ./playground/data/coco/val2017 \ 14 | --seg-image-folder ./playground/data/coco_segm_text/val/ \ 15 | --depth_image-folder ./playground/data/coco_segm_text/depth/val/depth \ 16 | --output-file ./playground/data/eval/depth/$CKPT/output_depth \ 17 | --num-chunks $CHUNKS \ 18 | --chunk-idx $IDX \ 19 | --temperature 0 \ 20 | --conv-mode vicuna_v1 \ 21 | --use_depth_seg & 22 | done 23 | 24 | wait 25 | 26 | output_file=./playground/data/eval/depth/$CKPT/output_depth.txt 27 | 28 | # Clear out the output files if it exists. 29 | > "$output_file" 30 | 31 | # Loop through the indices and concatenate each file. 32 | for IDX in $(seq 0 $((CHUNKS-1))); do 33 | cat ./playground/data/eval/depth/$CKPT/output_depth_${CHUNKS}_${IDX}.txt >> "$output_file" 34 | done 35 | 36 | python -m vcoder_llava.eval.eval_depth_accuracy \ 37 | --gt_path "./playground/data/coco_segm_text/depth/val/panoptic_order.txt" \ 38 | --pred_path "./playground/data/eval/depth/$CKPT/output_depth.txt" 39 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/gqa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | CKPT="llava-v1.5-7b-lora" 9 | SPLIT="llava_gqa_testdev_balanced" 10 | GQADIR="./playground/data/eval/gqa/data" 11 | 12 | for IDX in $(seq 0 $((CHUNKS-1))); do 13 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m vcoder_llava.eval.model_vqa_loader \ 14 | --model-path liuhaotian/$CKPT \ 15 | --question-file ./playground/data/eval/gqa/$SPLIT.jsonl \ 16 | --image-folder ./playground/data/eval/gqa/data/images \ 17 | --answers-file ./playground/data/eval/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \ 18 | --num-chunks $CHUNKS \ 19 | --chunk-idx $IDX \ 20 | --temperature 0 \ 21 | --conv-mode vicuna_v1 & 22 | done 23 | 24 | wait 25 | 26 | output_file=./playground/data/eval/gqa/answers/$SPLIT/$CKPT/merge.jsonl 27 | 28 | # Clear out the output file if it exists. 29 | > "$output_file" 30 | 31 | # Loop through the indices and concatenate each file. 32 | for IDX in $(seq 0 $((CHUNKS-1))); do 33 | cat ./playground/data/eval/gqa/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 34 | done 35 | 36 | python scripts/convert_gqa_for_eval.py --src $output_file --dst $GQADIR/testdev_balanced_predictions.json 37 | 38 | cd $GQADIR 39 | python eval/eval.py --tier testdev_balanced 40 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/mmbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SPLIT="mmbench_dev_20230712" 4 | 5 | python -m vcoder_llava.eval.model_vqa_mmbench \ 6 | --model-path liuhaotian/llava-v1.5-7b-lora \ 7 | --question-file ./playground/lmm_datasets/eval/mmbench/$SPLIT.tsv \ 8 | --answers-file ./playground/lmm_datasets/eval/mmbench/answers/$SPLIT/llava-v1.5-7b-lora.jsonl \ 9 | --single-pred-prompt \ 10 | --temperature 0 \ 11 | --conv-mode vicuna_v1 12 | 13 | mkdir -p playground/data/eval/mmbench/answers_upload/$SPLIT 14 | 15 | python scripts/convert_mmbench_for_submission.py \ 16 | --annotation-file ./playground/lmm_datasets/eval/mmbench/$SPLIT.tsv \ 17 | --result-dir ./playground/lmm_datasets/eval/mmbench/answers/$SPLIT \ 18 | --upload-dir ./playground/lmm_datasets/eval/mmbench/answers_upload/$SPLIT \ 19 | --experiment llava-v1.5-7b-lora 20 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/mme.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m vcoder_llava.eval.model_vqa_mme \ 4 | --model-path liuhaotian/llava-v1.5-7b-lora \ 5 | --question-file ./playground/lmm_datasets/eval/MME/llava_mme.jsonl \ 6 | --image-folder ./playground/lmm_datasets/eval/MME/MME_Benchmark_release_version \ 7 | --answers-file ./playground/lmm_datasets/eval/MME/answers/llava-v1.5-7b-lora.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | cd ./playground/lmm_datasets/eval/MME 12 | 13 | python convert_answer_to_mme.py --experiment llava-v1.5-7b-lora 14 | 15 | cd eval_tool 16 | 17 | python calculation.py --results_dir answers/llava-v1.5-7b-lora 18 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/pope.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m vcoder_llava.eval.model_vqa_loader \ 4 | --model-path liuhaotian/llava-v1.5-7b-lora \ 5 | --question-file ./playground/data/eval/pope/llava_pope_test.jsonl \ 6 | --image-folder ./playground/data/eval/pope/val2014 \ 7 | --answers-file ./playground/data/eval/pope/answers/vcoder_it_llava-v1.5-7b-lora.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 & 10 | 11 | python vcoder_llava/eval/eval_pope.py \ 12 | --annotation-dir ./playground/data/eval/pope/coco \ 13 | --question-file ./playground/data/eval/pope/llava_pope_test.jsonl \ 14 | --result-file ./playground/data/eval/pope/answers/vcoder_it_llava-v1.5-7b-lora.jsonl 15 | -------------------------------------------------------------------------------- /scripts/v1_5/eval/vizwiz.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -m vcoder_llava.eval.model_vqa_loader \ 4 | --model-path liuhaotian/llava-v1.5-7b-lora \ 5 | --question-file ./playground/lmm_datasets/eval/vizwiz/llava_test.jsonl \ 6 | --image-folder ./playground/lmm_datasets/eval/vizwiz/test \ 7 | --answers-file ./playground/lmm_datasets/eval/vizwiz/answers/llava-v1.5-7b-lora.jsonl \ 8 | --temperature 0 \ 9 | --conv-mode vicuna_v1 10 | 11 | python scripts/convert_vizwiz_for_submission.py \ 12 | --annotation-file ./playground/lmm_datasets/eval/vizwiz/llava_test.jsonl \ 13 | --result-file ./playground/lmm_datasets/eval/vizwiz/answers/llava-v1.5-7b-lora.jsonl \ 14 | --result-upload-file ./playground/lmm_datasets/eval/vizwiz/answers_upload/llava-v1.5-7b-lora.json -------------------------------------------------------------------------------- /scripts/v1_5/eval/vqav2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_list="${CUDA_VISIBLE_DEVICES:-0}" 4 | IFS=',' read -ra GPULIST <<< "$gpu_list" 5 | 6 | CHUNKS=${#GPULIST[@]} 7 | 8 | CKPT="llava-v1.5-7b-lora" 9 | SPLIT="llava_vqav2_mscoco_test-dev2015" 10 | 11 | for IDX in $(seq 0 $((CHUNKS-1))); do 12 | CUDA_VISIBLE_DEVICES=${GPULIST[$IDX]} python -m vcoder_llava.eval.model_vqa_loader \ 13 | --model-path liuhaotian/llava-v1.5-7b-lora \ 14 | --question-file ./playground/lmm_datasets/eval/vqav2/$SPLIT.jsonl \ 15 | --image-folder ./playground/lmm_datasets/eval/vqav2/test2015 \ 16 | --answers-file ./playground/lmm_datasets/eval/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl \ 17 | --num-chunks $CHUNKS \ 18 | --chunk-idx $IDX \ 19 | --temperature 0 \ 20 | --conv-mode vicuna_v1 & 21 | done 22 | 23 | wait 24 | 25 | output_file=./playground/lmm_datasets/eval/vqav2/answers/$SPLIT/$CKPT/merge.jsonl 26 | 27 | # Clear out the output file if it exists. 28 | > "$output_file" 29 | 30 | # Loop through the indices and concatenate each file. 31 | for IDX in $(seq 0 $((CHUNKS-1))); do 32 | cat ./playground/lmm_datasets/eval/vqav2/answers/$SPLIT/$CKPT/${CHUNKS}_${IDX}.jsonl >> "$output_file" 33 | done 34 | 35 | python scripts/convert_vqav2_for_submission.py --split $SPLIT --ckpt $CKPT 36 | 37 | -------------------------------------------------------------------------------- /scripts/v1_5/finetune.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed vcoder_llava/train/train_mem.py \ 4 | --deepspeed ./scripts/zero3.json \ 5 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 6 | --version v1 \ 7 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 8 | --image_folder ./playground/data \ 9 | --vision_tower openai/clip-vit-large-patch14-336 \ 10 | --pretrain_mm_mlp_adapter ./checkpoints/llava-v1.5-13b-pretrain/mm_projector.bin \ 11 | --mm_projector_type mlp2x_gelu \ 12 | --mm_vision_select_layer -2 \ 13 | --mm_use_im_start_end False \ 14 | --mm_use_im_patch_token False \ 15 | --image_aspect_ratio pad \ 16 | --group_by_modality_length True \ 17 | --bf16 True \ 18 | --output_dir ./checkpoints/llava-v1.5-13b \ 19 | --num_train_epochs 1 \ 20 | --per_device_train_batch_size 16 \ 21 | --per_device_eval_batch_size 4 \ 22 | --gradient_accumulation_steps 1 \ 23 | --evaluation_strategy "no" \ 24 | --save_strategy "steps" \ 25 | --save_steps 50000 \ 26 | --save_total_limit 1 \ 27 | --learning_rate 2e-5 \ 28 | --weight_decay 0. \ 29 | --warmup_ratio 0.03 \ 30 | --lr_scheduler_type "cosine" \ 31 | --logging_steps 1 \ 32 | --tf32 True \ 33 | --model_max_length 2048 \ 34 | --gradient_checkpointing True \ 35 | --dataloader_num_workers 4 \ 36 | --lazy_preprocess True \ 37 | --report_to wandb -------------------------------------------------------------------------------- /scripts/v1_5/finetune_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed vcoder_llava/train/train_mem.py \ 4 | --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 \ 5 | --deepspeed ./scripts/zero3.json \ 6 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 7 | --version v1 \ 8 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 9 | --image_folder ./playground/data \ 10 | --vision_tower openai/clip-vit-large-patch14-336 \ 11 | --pretrain_mm_mlp_adapter ./checkpoints/llava-v1.5-13b-pretrain/mm_projector.bin \ 12 | --mm_projector_type mlp2x_gelu \ 13 | --mm_vision_select_layer -2 \ 14 | --mm_use_im_start_end False \ 15 | --mm_use_im_patch_token False \ 16 | --image_aspect_ratio pad \ 17 | --group_by_modality_length True \ 18 | --bf16 True \ 19 | --output_dir ./checkpoints/llava-v1.5-13b-lora \ 20 | --num_train_epochs 1 \ 21 | --per_device_train_batch_size 16 \ 22 | --per_device_eval_batch_size 4 \ 23 | --gradient_accumulation_steps 1 \ 24 | --evaluation_strategy "no" \ 25 | --save_strategy "steps" \ 26 | --save_steps 50000 \ 27 | --save_total_limit 1 \ 28 | --learning_rate 2e-4 \ 29 | --weight_decay 0. \ 30 | --warmup_ratio 0.03 \ 31 | --lr_scheduler_type "cosine" \ 32 | --logging_steps 1 \ 33 | --tf32 True \ 34 | --model_max_length 2048 \ 35 | --gradient_checkpointing True \ 36 | --dataloader_num_workers 4 \ 37 | --lazy_preprocess True \ 38 | --report_to wandb -------------------------------------------------------------------------------- /scripts/v1_5/pretrain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deepspeed vcoder_llava/train/train_mem.py \ 4 | --deepspeed ./scripts/zero2.json \ 5 | --model_name_or_path lmsys/vicuna-13b-v1.5 \ 6 | --version plain \ 7 | --data_path ./playground/data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json \ 8 | --image_folder ./playground/data/LLaVA-Pretrain/images \ 9 | --vision_tower openai/clip-vit-large-patch14-336 \ 10 | --mm_projector_type mlp2x_gelu \ 11 | --tune_mm_mlp_adapter True \ 12 | --mm_vision_select_layer -2 \ 13 | --mm_use_im_start_end False \ 14 | --mm_use_im_patch_token False \ 15 | --bf16 True \ 16 | --output_dir ./checkpoints/llava-v1.5-13b-pretrain \ 17 | --num_train_epochs 1 \ 18 | --per_device_train_batch_size 32 \ 19 | --per_device_eval_batch_size 4 \ 20 | --gradient_accumulation_steps 1 \ 21 | --evaluation_strategy "no" \ 22 | --save_strategy "steps" \ 23 | --save_steps 24000 \ 24 | --save_total_limit 1 \ 25 | --learning_rate 1e-3 \ 26 | --weight_decay 0. \ 27 | --warmup_ratio 0.03 \ 28 | --lr_scheduler_type "cosine" \ 29 | --logging_steps 1 \ 30 | --tf32 True \ 31 | --model_max_length 2048 \ 32 | --gradient_checkpointing True \ 33 | --dataloader_num_workers 4 \ 34 | --lazy_preprocess True \ 35 | --report_to wandb -------------------------------------------------------------------------------- /scripts/v1_5/vcoder_ds_train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export WANDB_PROJECT= "vcoder" 4 | deepspeed vcoder_llava/train/vcoder_ds_train_mem.py \ 5 | --deepspeed ./scripts/zero3.json \ 6 | --model_name_or_path liuhaotian/llava-v1.5-7b \ 7 | --version v1 \ 8 | --data_path /data/storage/jj/data/llava_v1_5_mix665k.json \ 9 | --depth_data_path ./playground/data \ 10 | --image_folder ./playground/data \ 11 | --seg_image_folder ./playground/data \ 12 | --depth_image_folder ./playground/data \ 13 | --use_mm2_proj True \ 14 | --pretrain_mm2_mlp_adapter llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5/mm_projector.bin \ 15 | --tune_mm_mlp_adapter False \ 16 | --freeze_mm_mlp_adapter True \ 17 | --freeze_llm True \ 18 | --seg_tune_adapter True \ 19 | --depth_tune_adapter True \ 20 | --mm_projector_type mlp2x_gelu \ 21 | --depth_mm_projector_type mlp2x_gelu \ 22 | --seg_mm_projector_type mlp2x_gelu \ 23 | --mm_vision_select_layer -2 \ 24 | --image_aspect_ratio pad \ 25 | --group_by_modality_length True \ 26 | --bf16 True \ 27 | --output_dir ./outputs/vcoder_ds_llava-v1.5-7b \ 28 | --num_train_epochs 1 \ 29 | --per_device_train_batch_size 32 \ 30 | --per_device_eval_batch_size 4 \ 31 | --gradient_accumulation_steps 1 \ 32 | --evaluation_strategy "no" \ 33 | --save_strategy "steps" \ 34 | --save_steps 1100 \ 35 | --save_total_limit 1 \ 36 | --learning_rate 1e-3 \ 37 | --weight_decay 0. \ 38 | --warmup_ratio 0.03 \ 39 | --lr_scheduler_type "cosine" \ 40 | --logging_steps 1 \ 41 | --tf32 True \ 42 | --model_max_length 2048 \ 43 | --gradient_checkpointing True \ 44 | --dataloader_num_workers 4 \ 45 | --lazy_preprocess True \ 46 | --report_to wandb -------------------------------------------------------------------------------- /scripts/v1_5/vcoder_it.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export WANDB_PROJECT= "vcoder" 4 | deepspeed vcoder_llava/train/vcoder_it_mem.py \ 5 | --deepspeed ./scripts/zero3.json \ 6 | --model_name_or_path lmsys/vicuna-7b-v1.5 \ 7 | --version v1 \ 8 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 9 | --seg_data_path ./playground/data \ 10 | --seg_image_folder ./playground/data \ 11 | --image_folder ./playground/data \ 12 | --vision_tower openai/clip-vit-large-patch14-336 \ 13 | --pretrain_mm_mlp_adapter llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5/mm_projector.bin \ 14 | --seg_tune_adapter True \ 15 | --mm_projector_type mlp2x_gelu \ 16 | --seg_mm_projector_type mlp2x_gelu \ 17 | --mm_vision_select_layer -2 \ 18 | --image_aspect_ratio pad \ 19 | --group_by_modality_length True \ 20 | --bf16 True \ 21 | --output_dir ./outputs/vcoder_it_llava-v1.5-7b \ 22 | --num_train_epochs 1 \ 23 | --per_device_train_batch_size 16 \ 24 | --per_device_eval_batch_size 4 \ 25 | --gradient_accumulation_steps 1 \ 26 | --evaluation_strategy "no" \ 27 | --save_strategy "steps" \ 28 | --save_steps 5000 \ 29 | --save_total_limit 1 \ 30 | --learning_rate 2e-5 \ 31 | --weight_decay 0. \ 32 | --warmup_ratio 0.03 \ 33 | --lr_scheduler_type "cosine" \ 34 | --logging_steps 1 \ 35 | --tf32 True \ 36 | --model_max_length 2048 \ 37 | --gradient_checkpointing True \ 38 | --dataloader_num_workers 4 \ 39 | --lazy_preprocess True \ 40 | --report_to wandb -------------------------------------------------------------------------------- /scripts/v1_5/vcoder_it_lora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export WANDB_PROJECT= "vcoder" 4 | deepspeed vcoder_llava/train/vcoder_it_mem.py \ 5 | --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 \ 6 | --deepspeed ./scripts/zero3.json \ 7 | --model_name_or_path lmsys/vicuna-7b-v1.5 \ 8 | --version v1 \ 9 | --data_path ./playground/data/llava_v1_5_mix665k.json \ 10 | --seg_data_path ./playground/data \ 11 | --seg_image_folder ./playground/data \ 12 | --image_folder ./playground/data \ 13 | --vision_tower openai/clip-vit-large-patch14-336 \ 14 | --pretrain_mm_mlp_adapter llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5/mm_projector.bin \ 15 | --seg_tune_adapter True \ 16 | --mm_projector_type mlp2x_gelu \ 17 | --seg_mm_projector_type mlp2x_gelu \ 18 | --mm_vision_select_layer -2 \ 19 | --image_aspect_ratio pad \ 20 | --group_by_modality_length True \ 21 | --bf16 True \ 22 | --output_dir ./outputs/vcoder_it_llava-v1.5-7b-lora \ 23 | --num_train_epochs 1 \ 24 | --per_device_train_batch_size 16 \ 25 | --per_device_eval_batch_size 4 \ 26 | --gradient_accumulation_steps 1 \ 27 | --evaluation_strategy "no" \ 28 | --save_strategy "steps" \ 29 | --save_steps 5000 \ 30 | --save_total_limit 1 \ 31 | --learning_rate 2e-4 \ 32 | --weight_decay 0. \ 33 | --warmup_ratio 0.03 \ 34 | --lr_scheduler_type "cosine" \ 35 | --logging_steps 1 \ 36 | --tf32 True \ 37 | --model_max_length 2048 \ 38 | --gradient_checkpointing True \ 39 | --dataloader_num_workers 4 \ 40 | --lazy_preprocess True \ 41 | --report_to wandb -------------------------------------------------------------------------------- /scripts/v1_5/vcoder_train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export WANDB_PROJECT= "vcoder" 4 | deepspeed vcoder_llava/train/vcoder_train_mem.py \ 5 | --deepspeed ./scripts/zero3.json \ 6 | --model_name_or_path liuhaotian/llava-v1.5-7b \ 7 | --version v1 \ 8 | --seg_data_path ./playground/data \ 9 | --seg_image_folder ./playground/data \ 10 | --image_folder ./playground/data \ 11 | --use_mm2_proj True \ 12 | --pretrain_mm2_mlp_adapter llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5/mm_projector.bin \ 13 | --tune_mm_mlp_adapter False \ 14 | --freeze_mm_mlp_adapter True \ 15 | --freeze_llm True \ 16 | --seg_tune_adapter True \ 17 | --mm_projector_type mlp2x_gelu \ 18 | --seg_mm_projector_type mlp2x_gelu \ 19 | --mm_vision_select_layer -2 \ 20 | --image_aspect_ratio pad \ 21 | --group_by_modality_length True \ 22 | --bf16 True \ 23 | --output_dir ./outputs/vcoder_llava-v1.5-7b \ 24 | --num_train_epochs 2 \ 25 | --per_device_train_batch_size 32 \ 26 | --per_device_eval_batch_size 4 \ 27 | --gradient_accumulation_steps 1 \ 28 | --evaluation_strategy "no" \ 29 | --save_strategy "steps" \ 30 | --save_steps 1100 \ 31 | --save_total_limit 1 \ 32 | --learning_rate 1e-3 \ 33 | --weight_decay 0. \ 34 | --warmup_ratio 0.03 \ 35 | --lr_scheduler_type "cosine" \ 36 | --logging_steps 1 \ 37 | --tf32 True \ 38 | --model_max_length 2048 \ 39 | --gradient_checkpointing True \ 40 | --dataloader_num_workers 4 \ 41 | --lazy_preprocess True \ 42 | --report_to wandb -------------------------------------------------------------------------------- /scripts/zero2.json: -------------------------------------------------------------------------------- 1 | { 2 | "fp16": { 3 | "enabled": "auto", 4 | "loss_scale": 0, 5 | "loss_scale_window": 1000, 6 | "initial_scale_power": 16, 7 | "hysteresis": 2, 8 | "min_loss_scale": 1 9 | }, 10 | "bf16": { 11 | "enabled": "auto" 12 | }, 13 | "train_micro_batch_size_per_gpu": "auto", 14 | "train_batch_size": "auto", 15 | "gradient_accumulation_steps": "auto", 16 | "zero_optimization": { 17 | "stage": 2, 18 | "overlap_comm": true, 19 | "contiguous_gradients": true, 20 | "sub_group_size": 1e9, 21 | "reduce_bucket_size": "auto" 22 | } 23 | } -------------------------------------------------------------------------------- /scripts/zero3.json: -------------------------------------------------------------------------------- 1 | { 2 | "fp16": { 3 | "enabled": "auto", 4 | "loss_scale": 0, 5 | "loss_scale_window": 1000, 6 | "initial_scale_power": 16, 7 | "hysteresis": 2, 8 | "min_loss_scale": 1 9 | }, 10 | "bf16": { 11 | "enabled": "auto" 12 | }, 13 | "train_micro_batch_size_per_gpu": "auto", 14 | "train_batch_size": "auto", 15 | "gradient_accumulation_steps": "auto", 16 | "zero_optimization": { 17 | "stage": 3, 18 | "overlap_comm": true, 19 | "contiguous_gradients": true, 20 | "sub_group_size": 1e9, 21 | "reduce_bucket_size": "auto", 22 | "stage3_prefetch_bucket_size": "auto", 23 | "stage3_param_persistence_threshold": "auto", 24 | "stage3_max_live_parameters": 1e9, 25 | "stage3_max_reuse_distance": 1e9, 26 | "stage3_gather_16bit_weights_on_model_save": true 27 | } 28 | } -------------------------------------------------------------------------------- /scripts/zero3_offload.json: -------------------------------------------------------------------------------- 1 | { 2 | "fp16": { 3 | "enabled": "auto", 4 | "loss_scale": 0, 5 | "loss_scale_window": 1000, 6 | "initial_scale_power": 16, 7 | "hysteresis": 2, 8 | "min_loss_scale": 1 9 | }, 10 | "bf16": { 11 | "enabled": "auto" 12 | }, 13 | "optimizer": { 14 | "type": "AdamW", 15 | "params": { 16 | "lr": "auto", 17 | "betas": "auto", 18 | "eps": "auto", 19 | "weight_decay": "auto" 20 | } 21 | }, 22 | "scheduler": { 23 | "type": "WarmupLR", 24 | "params": { 25 | "warmup_min_lr": "auto", 26 | "warmup_max_lr": "auto", 27 | "warmup_num_steps": "auto" 28 | } 29 | }, 30 | "zero_optimization": { 31 | "stage": 3, 32 | "offload_optimizer": { 33 | "device": "cpu", 34 | "pin_memory": true 35 | }, 36 | "offload_param": { 37 | "device": "cpu", 38 | "pin_memory": true 39 | }, 40 | "overlap_comm": true, 41 | "contiguous_gradients": true, 42 | "sub_group_size": 1e9, 43 | "reduce_bucket_size": "auto", 44 | "stage3_prefetch_bucket_size": "auto", 45 | "stage3_param_persistence_threshold": "auto", 46 | "stage3_max_live_parameters": 1e9, 47 | "stage3_max_reuse_distance": 1e9, 48 | "gather_16bit_weights_on_model_save": true 49 | }, 50 | "gradient_accumulation_steps": "auto", 51 | "gradient_clipping": "auto", 52 | "train_batch_size": "auto", 53 | "train_micro_batch_size_per_gpu": "auto", 54 | "steps_per_print": 1e5, 55 | "wall_clock_breakdown": false 56 | } -------------------------------------------------------------------------------- /vcoder_llava/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import LlavaLlamaForCausalLM, VCoderLlavaLlamaForCausalLM, VCoderDSLlavaLlamaForCausalLM 2 | -------------------------------------------------------------------------------- /vcoder_llava/constants.py: -------------------------------------------------------------------------------- 1 | LOGDIR = "." 2 | 3 | # Model Constants 4 | IGNORE_INDEX = -100 5 | IMAGE_TOKEN_INDEX = -200 6 | DEFAULT_IMAGE_TOKEN = "" 7 | 8 | SEG_TOKEN_INDEX = -300 9 | DEFAULT_SEG_TOKEN = "" 10 | 11 | DEPTH_TOKEN_INDEX = -400 12 | DEFAULT_DEPTH_TOKEN = "" 13 | -------------------------------------------------------------------------------- /vcoder_llava/data_utils.py: -------------------------------------------------------------------------------- 1 | import nltk 2 | import spacy 3 | from word2number import w2n 4 | import inflect 5 | from num2words import num2words 6 | p = inflect.engine() 7 | import numpy as np 8 | import random 9 | 10 | nltk.download('punkt') 11 | nltk.download('averaged_perceptron_tagger') 12 | nlp = spacy.load('en_core_web_sm') 13 | 14 | # object names with two words 15 | SPECIAL_WORDS = ['baseball bat', 16 | 'baseball glove', 17 | 'cell phone', 18 | 'dining table', 19 | 'fire hydrant', 20 | 'french fries', 21 | 'hair drier', 22 | 'hot dog', 23 | 'parking meter', 24 | 'potted plant', 25 | 'soccer ball', 26 | 'soccer player', 27 | 'sports ball', 28 | 'stop sign', 29 | 'teddy bear', 30 | 'tennis racket', 31 | 'toy figure', 32 | 'traffic light', 33 | 'wine glass'] 34 | 35 | def _get_nouns(lines): 36 | # function to test if something is a noun 37 | present_words = [] 38 | for s in SPECIAL_WORDS: 39 | if s in lines: 40 | present_words.append(s) 41 | 42 | for w in present_words: 43 | lines = lines.replace(w, "") 44 | 45 | is_noun = lambda pos: pos[:2] == 'NN' or pos[:2] == 'NNP' 46 | # do the nlp stuff 47 | tokenized = nltk.word_tokenize(lines) 48 | nouns = [word for (word, pos) in nltk.pos_tag(tokenized) if is_noun(pos)] 49 | noun_dict = {} 50 | if "objects" in nouns: 51 | nouns.remove("objects") 52 | if "image" in nouns: 53 | nouns.remove("image") 54 | 55 | for n in nouns: 56 | if n not in noun_dict.keys(): 57 | noun_dict[n] = 1 58 | else: 59 | noun_dict[n] += 1 60 | nouns = {} 61 | for k, v in noun_dict.items(): 62 | if not (k == "bus" or k == "skis"): 63 | if v == 1: 64 | if p.singular_noun(k): 65 | k = p.singular_noun(k) 66 | else: 67 | if not p.singular_noun(k): 68 | k = p.plural(k) 69 | try: 70 | w2n.word_to_num(k) 71 | except: 72 | if len(k) >= 3: 73 | if k == "ski": 74 | k = "skis" 75 | elif k == "gras": 76 | k = "grass" 77 | nouns[k] = v 78 | for w in present_words: 79 | nouns[w] = 1 80 | return nouns 81 | 82 | def _get_num_nouns(lines): 83 | lines = lines.replace(":", "").replace(".", "") 84 | doc = nlp(lines) 85 | num_nouns = [chunk.text for chunk in doc.noun_chunks if any(token.pos_ == 'NUM' for token in chunk)] 86 | 87 | num_noun_dict = {} 88 | for n in num_nouns: 89 | nums = n.split(", ") 90 | for n in nums: 91 | try: 92 | w = " ".join(n.split(' ')[1:]) 93 | if w == "ski": 94 | w = "skis" 95 | num_noun_dict[w] = w2n.word_to_num(n.split(' ')[0]) 96 | except: 97 | pass 98 | 99 | return num_noun_dict 100 | 101 | 102 | def _obtain_nouns(gt): 103 | gt = gt.replace("hair dryer", "hair drier").lower() 104 | nouns_gt = _get_nouns(gt) 105 | 106 | num_nouns_gt = _get_num_nouns(gt) 107 | 108 | com_keys = [] 109 | for k in nouns_gt.keys(): 110 | if p.plural(k) in num_nouns_gt.keys(): 111 | com_keys.append(k) 112 | for k in com_keys: 113 | del nouns_gt[k] 114 | 115 | num_nouns_gt = {**num_nouns_gt, **nouns_gt} 116 | 117 | return num_nouns_gt 118 | 119 | def generate_qa_pairs(text): 120 | num_nouns = _obtain_nouns(text) 121 | qa_pairs = [] 122 | 123 | for obj, count in num_nouns.items(): 124 | # Count question 125 | if count == 1: 126 | plural_obj = p.plural(obj) 127 | else: 128 | plural_obj = obj 129 | count_question = f"How many {plural_obj} are there in the image?" 130 | count_answer = f"There {'is' if count == 1 else 'are'} {num2words(count)} {obj} in the image." 131 | qa_pairs.append((count_question, count_answer)) 132 | 133 | prob_positive = np.random.uniform(0,1.) 134 | 135 | if prob_positive > 0.7 or count == 1: 136 | numeric_presence_question = f"{'Is' if count == 1 else 'Are'} there {num2words(count)} {obj} in the image?" 137 | numeric_presence_answer = "Yes." 138 | elif count > 1: 139 | numbers = [i for i in range(2, count + 6) if i != count] 140 | # Select a random number from the range 141 | cnt = random.choice(numbers) 142 | numeric_presence_question = f"{'Is' if cnt == 1 else 'Are'} there {num2words(cnt)} {obj} in the image?" 143 | numeric_presence_answer = "No." 144 | 145 | qa_pairs.append((numeric_presence_question, numeric_presence_answer)) 146 | random.shuffle(qa_pairs) 147 | 148 | return random.sample(qa_pairs, min(len(qa_pairs), random.choice([1, 2, 3, 4, 5, 6]))) 149 | 150 | if __name__ == "__main__": 151 | 152 | text = "The objects present in the image are: wall, ceiling, shelf, cabinet, counter, dining table, two people, eighteen bottles, two wine glasses, refrigerator, tv, bowl" 153 | 154 | qa = generate_qa_pairs(text) 155 | from icecream import ic 156 | ic(qa) 157 | 158 | -------------------------------------------------------------------------------- /vcoder_llava/eval/eval_depth_accuracy.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from tqdm import tqdm 3 | import nltk 4 | import spacy 5 | 6 | nltk.download('punkt') 7 | nltk.download('averaged_perceptron_tagger') 8 | nlp = spacy.load('en_core_web_sm') 9 | 10 | synonyms = open('vcoder_llava/eval/synonyms.txt').readlines() 11 | synonyms = [s.strip().split(', ') for s in synonyms] 12 | WORD_TO_COM = {} 13 | for synonym in synonyms: 14 | for s in synonym: 15 | WORD_TO_COM[s] = synonym[0] 16 | 17 | def parse_args(): 18 | parser = argparse.ArgumentParser(description="LLaVA Inference") 19 | parser.add_argument("--gt_path", type=str, default="path to gt txt files") 20 | parser.add_argument("--pred_path", type=str, default="path to pred txt files") 21 | args = parser.parse_args() 22 | return args 23 | 24 | def _obtain_seg_texts(file_path): 25 | with open(file_path) as f: 26 | lines = f.readlines() 27 | 28 | seg_labels = {} 29 | for line in lines: 30 | key = line.split("")[1].strip("\n") 31 | label = line.split("")[2].strip("\n") 32 | seg_labels[key] = label 33 | return seg_labels 34 | 35 | def extract_conversations(file_path): 36 | with open(file_path) as f: 37 | lines = f.readlines() 38 | seg_preds = {} 39 | for line in lines: 40 | if "--------" in line or line.startswith("<>"): 41 | continue 42 | elif line.startswith("Image: "): 43 | key = line.split("Image: ")[1].strip("\n") 44 | seg_preds[key] = "" 45 | else: 46 | seg_preds[key] = line.strip("<>: ").strip("\n").split("")[0] 47 | return seg_preds 48 | 49 | def _get_order(lines): 50 | if len(lines.split(":")) == 1: 51 | return {}, 0 52 | lines = lines.split(":")[1] 53 | doc = nlp(lines) 54 | nouns = [chunk.text for chunk in doc.noun_chunks] 55 | order_num = 1 56 | positions = {} 57 | for noun in nouns: 58 | object = noun.split("-")[0].strip() 59 | if object in WORD_TO_COM.keys(): 60 | object = WORD_TO_COM[object] 61 | if object not in positions.keys(): 62 | positions[object] = [order_num] 63 | else: 64 | positions[object].append(order_num) 65 | order_num += 1 66 | return positions, order_num - 1 67 | 68 | def _obtain_object_order(gt, pred): 69 | gt = gt.replace("hair dryer", "hair drier").lower() 70 | pred = pred.replace("hair dryer", "hair drier").lower() 71 | 72 | position_gt, order_num = _get_order(gt) 73 | position_pred, _ = _get_order(pred) 74 | 75 | return position_gt, position_pred, order_num 76 | 77 | def calculate_depth_score(gt_path, pred_path): 78 | gt_labels = _obtain_seg_texts(gt_path) 79 | preds = extract_conversations(pred_path) 80 | 81 | assert all([k in gt_labels.keys() for k in preds.keys()]), "GT and Predicted files don't match!" 82 | 83 | acc_depth_scores = [] 84 | 85 | for k in tqdm(gt_labels.keys(), total=len(gt_labels.keys())): 86 | gt = gt_labels[k] 87 | pred = preds[k] 88 | 89 | position_gt, position_pred, order_num = _obtain_object_order(gt, pred) 90 | 91 | depth_distance = [] 92 | 93 | for k in position_gt.keys(): 94 | if position_pred is not None and k in position_pred.keys(): 95 | order_pred = position_pred[k] 96 | order_gt = position_gt[k] 97 | if len(order_gt) < len(order_pred): 98 | order_gt.extend([100] * (len(order_pred) - len(order_gt))) 99 | elif len(order_pred) < len(order_gt): 100 | order_pred.extend([100] * (len(order_gt) - len(order_pred))) 101 | 102 | for i, j in zip(order_gt, order_pred): 103 | if i == 100 and j == 100: 104 | continue 105 | depth_distance.append(abs(i - j)) 106 | else: 107 | depth_distance.append(100) 108 | 109 | if len(depth_distance) > 0: 110 | acc_depth_scores.append(sum(depth_distance) / order_num) 111 | 112 | return acc_depth_scores 113 | 114 | 115 | if __name__ == "__main__": 116 | args = parse_args() 117 | acc_depth_scores = calculate_depth_score(args.gt_path, args.pred_path) 118 | 119 | print("Average Depth Score is: {}".format(round((sum(acc_depth_scores) / len(acc_depth_scores)), 2))) -------------------------------------------------------------------------------- /vcoder_llava/eval/eval_pope.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import argparse 4 | 5 | def eval_pope(answers, label_file): 6 | label_list = [json.loads(q)['label'] for q in open(label_file, 'r')] 7 | 8 | for answer in answers: 9 | text = answer['text'] 10 | 11 | # Only keep the first sentence 12 | if text.find('.') != -1: 13 | text = text.split('.')[0] 14 | 15 | text = text.replace(',', '') 16 | words = text.split(' ') 17 | if 'No' in words or 'not' in words or 'no' in words: 18 | answer['text'] = 'no' 19 | else: 20 | answer['text'] = 'yes' 21 | 22 | for i in range(len(label_list)): 23 | if label_list[i] == 'no': 24 | label_list[i] = 0 25 | else: 26 | label_list[i] = 1 27 | 28 | pred_list = [] 29 | for answer in answers: 30 | if answer['text'] == 'no': 31 | pred_list.append(0) 32 | else: 33 | pred_list.append(1) 34 | 35 | pos = 1 36 | neg = 0 37 | yes_ratio = pred_list.count(1) / len(pred_list) 38 | 39 | TP, TN, FP, FN = 0, 0, 0, 0 40 | for pred, label in zip(pred_list, label_list): 41 | if pred == pos and label == pos: 42 | TP += 1 43 | elif pred == pos and label == neg: 44 | FP += 1 45 | elif pred == neg and label == neg: 46 | TN += 1 47 | elif pred == neg and label == pos: 48 | FN += 1 49 | 50 | print('TP\tFP\tTN\tFN\t') 51 | print('{}\t{}\t{}\t{}'.format(TP, FP, TN, FN)) 52 | 53 | precision = float(TP) / float(TP + FP) 54 | recall = float(TP) / float(TP + FN) 55 | f1 = 2*precision*recall / (precision + recall) 56 | acc = (TP + TN) / (TP + TN + FP + FN) 57 | print('Accuracy: {}'.format(acc)) 58 | print('Precision: {}'.format(precision)) 59 | print('Recall: {}'.format(recall)) 60 | print('F1 score: {}'.format(f1)) 61 | print('Yes ratio: {}'.format(yes_ratio)) 62 | print('%.3f, %.3f, %.3f, %.3f, %.3f' % (f1, acc, precision, recall, yes_ratio) ) 63 | 64 | if __name__ == "__main__": 65 | parser = argparse.ArgumentParser() 66 | parser.add_argument("--annotation-dir", type=str) 67 | parser.add_argument("--question-file", type=str) 68 | parser.add_argument("--result-file", type=str) 69 | args = parser.parse_args() 70 | 71 | questions = [json.loads(line) for line in open(args.question_file)] 72 | questions = {question['question_id']: question for question in questions} 73 | answers = [json.loads(q) for q in open(args.result_file)] 74 | for file in os.listdir(args.annotation_dir): 75 | assert file.startswith('coco_pope_') 76 | assert file.endswith('.json') 77 | category = file[10:-5] 78 | cur_answers = [x for x in answers if questions[x['question_id']]['category'] == category] 79 | print('Category: {}, # samples: {}'.format(category, len(cur_answers))) 80 | eval_pope(cur_answers, os.path.join(args.annotation_dir, file)) 81 | print("====================================") 82 | -------------------------------------------------------------------------------- /vcoder_llava/eval/gpt4_query.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import requests 3 | import os 4 | import argparse 5 | from vcoder_llava.questions import QUESTIONS 6 | import random 7 | import glob 8 | from tqdm import tqdm 9 | import time 10 | 11 | # OpenAI API Key 12 | api_key = os.getenv("OPENAI_API_KEY") 13 | headers = { 14 | "Content-Type": "application/json", 15 | "Authorization": f"Bearer {api_key}" 16 | } 17 | 18 | # Function to encode the image 19 | def encode_image(image_path): 20 | with open(image_path, "rb") as image_file: 21 | return base64.b64encode(image_file.read()).decode('utf-8') 22 | 23 | 24 | def query_gpt4(image_path): 25 | # Getting the base64 string 26 | base64_image = encode_image(image_path) 27 | ques = "What entities can be seen in the image? Your answer should be in the format: 'The objects present in the image are: ...' and then just list the objects with their counts (in words) before them in paragraph format." \ 28 | "For example if there are 14 people, two dogs, and three chairs in an image, you should respond: The objects present in are: fourteen people, two dogs, three chairs." 29 | 30 | payload = { 31 | "model": "gpt-4-vision-preview", 32 | "messages": [ 33 | { 34 | "role": "user", 35 | "content": [ 36 | { 37 | "type": "text", 38 | "text": ques, 39 | }, 40 | { 41 | "type": "image_url", 42 | "image_url": { 43 | "url": f"data:image/jpeg;base64,{base64_image}" 44 | } 45 | } 46 | ] 47 | } 48 | ], 49 | "max_tokens": 300 50 | } 51 | 52 | response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) 53 | return response.json() 54 | 55 | 56 | if __name__ == "__main__": 57 | parser = argparse.ArgumentParser() 58 | parser.add_argument("--image-folder", type=str, default="") 59 | parser.add_argument("--output-file", type=str, default="output") 60 | args = parser.parse_args() 61 | 62 | if os.path.exists("done_ims.txt"): 63 | with open("done_ims.txt", 'r') as f: 64 | ims = f.readlines() 65 | else: 66 | ims = [] 67 | done_ims = [i.strip("\n") for i in ims] 68 | print(done_ims) 69 | 70 | os.makedirs(os.path.dirname(args.output_file), exist_ok=True) 71 | images = glob.glob(os.path.join(args.image_folder, "*.jpg")) 72 | error_imgs = [] 73 | 74 | for image in tqdm(images, total=len(images)): 75 | skip = False 76 | fail = True 77 | if image in done_ims: 78 | continue 79 | print("Running image %s" % image) 80 | while fail: 81 | try: 82 | answer = query_gpt4(image) 83 | answer = answer["choices"][0]["message"]["content"] 84 | with open(f'done_ims.txt', 'a') as f: 85 | f.write(f'{image}\n') 86 | fail = False 87 | except: 88 | fail = True 89 | print(answer) 90 | if answer['error']['message'] == "Your input image may contain content that is not allowed by our safety system.": 91 | break 92 | skip = True 93 | else: 94 | time.sleep(900) 95 | if skip: 96 | continue 97 | with open(f'{args.output_file}', 'a') as f: 98 | f.write(f'Image: {image.split("/")[-1]}\n') 99 | f.write(f'<>: {answer}\n') 100 | f.write('-------------------------------------------------------\n') 101 | 102 | 103 | print(f"Error images: {error_imgs}") 104 | -------------------------------------------------------------------------------- /vcoder_llava/eval/model_seg_loader.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import os 4 | import json 5 | from tqdm import tqdm 6 | import shortuuid 7 | import random 8 | import glob 9 | 10 | from vcoder_llava.constants import ( 11 | IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, 12 | SEG_TOKEN_INDEX, DEFAULT_SEG_TOKEN, 13 | ) 14 | from vcoder_llava.vcoder_conversation import conv_templates, SeparatorStyle 15 | from vcoder_llava.model.builder import load_pretrained_model 16 | from vcoder_llava.utils import disable_torch_init 17 | from vcoder_llava.mm_utils import process_images, tokenizer_seg_token, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria 18 | from torch.utils.data import Dataset, DataLoader 19 | from vcoder_llava.questions import QUESTIONS 20 | 21 | import math 22 | from PIL import Image 23 | 24 | def split_list(lst, n): 25 | """Split a list into n (roughly) equal-sized chunks""" 26 | chunk_size = math.ceil(len(lst) / n) # integer division 27 | return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] 28 | 29 | 30 | def get_chunk(lst, n, k): 31 | chunks = split_list(lst, n) 32 | return chunks[k] 33 | 34 | # Custom dataset class 35 | class CustomDataset(Dataset): 36 | def __init__(self, questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model_config): 37 | self.questions = questions 38 | self.image_folder = args.image_folder 39 | self.seg_image_folder = seg_image_folder 40 | 41 | self.images = glob.glob(os.path.join(args.image_folder, '*.jpg')) 42 | self.images = get_chunk(self.images, args.num_chunks, args.chunk_idx) 43 | 44 | if seg_image_folder is not None: 45 | self.seg_images = glob.glob(os.path.join(seg_image_folder, '*.jpg')) 46 | self.seg_images = get_chunk(self.seg_images, args.num_chunks, args.chunk_idx) 47 | assert len(self.images) == len(self.seg_images), f"Number of images ({len(self.images)}) and seg images ({len(self.seg_images)}) must be the same" 48 | else: 49 | self.seg_images = None 50 | self.tokenizer = tokenizer 51 | self.image_processor = image_processor 52 | self.seg_image_processor = seg_image_processor 53 | self.model_config = model_config 54 | 55 | def __getitem__(self, index): 56 | image_file = self.images[index] 57 | if self.seg_images is not None: 58 | seg_image_file = self.seg_images[index] 59 | else: 60 | seg_image_file = None 61 | ques = random.choice(self.questions) 62 | qs = DEFAULT_IMAGE_TOKEN + '\n' + ques 63 | 64 | image = Image.open(os.path.join(image_file)).convert('RGB') 65 | image_tensor = process_images([image], self.image_processor, self.model_config)[0] 66 | 67 | if seg_image_file is not None: 68 | seg_image = Image.open(os.path.join(seg_image_file)).convert('RGB') 69 | seg_image_tensor = process_images([seg_image], self.seg_image_processor, self.model_config)[0] 70 | qs = DEFAULT_SEG_TOKEN + '\n' + qs 71 | else: 72 | seg_image_tensor = image_tensor 73 | qs = qs + " Return the answer in the paragraph format: 'The objects present in the image are: ...' and then list the objects with their count in word format (if greater than 1) in front of them, like 'two people'." 74 | 75 | conv = conv_templates[args.conv_mode].copy() 76 | conv.append_message(conv.roles[0], qs) 77 | conv.append_message(conv.roles[1], None) 78 | prompt = conv.get_prompt() 79 | 80 | if seg_image_file is None: 81 | input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt') 82 | else: 83 | input_ids = tokenizer_seg_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, return_tensors='pt') 84 | 85 | return input_ids, image_tensor, seg_image_tensor, image_file.split("/")[-1], ques 86 | 87 | def __len__(self): 88 | return len(self.images) 89 | 90 | 91 | # DataLoader 92 | def create_data_loader(questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model_config, batch_size=1, num_workers=4): 93 | assert batch_size == 1, "batch_size must be 1" 94 | dataset = CustomDataset(questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model_config) 95 | data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False) 96 | return data_loader 97 | 98 | 99 | def eval_model(args, task): 100 | # Model 101 | disable_torch_init() 102 | model_path = os.path.expanduser(args.model_path) 103 | model_name = get_model_name_from_path(model_path) 104 | tokenizer, model, image_processor, seg_image_processor, _, context_len = load_pretrained_model(model_path, args.model_base, model_name) 105 | 106 | questions = QUESTIONS[task] 107 | answers_file = os.path.expanduser(args.output_file) 108 | os.makedirs(os.path.dirname(answers_file), exist_ok=True) 109 | answers_file = answers_file + f'_{task}_{args.num_chunks}_{args.chunk_idx}.txt' 110 | 111 | if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode: 112 | args.conv_mode = args.conv_mode + '_mmtag' 113 | print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.') 114 | 115 | if not args.use_seg: 116 | seg_image_folder = None 117 | else: 118 | seg_image_folder = os.path.join(args.seg_image_folder, f'{task}_inference') 119 | 120 | data_loader = create_data_loader(questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model.config) 121 | 122 | for input_ids, image_tensor, seg_image_tensor, image_file, ques in tqdm(data_loader, total=len(data_loader), desc=f'Generating {task} answers...'): 123 | 124 | stop_str = conv_templates[args.conv_mode].sep if conv_templates[args.conv_mode].sep_style != SeparatorStyle.TWO else conv_templates[args.conv_mode].sep2 125 | input_ids = input_ids.to(device='cuda', non_blocking=True) 126 | 127 | with torch.inference_mode(): 128 | if "vcoder" in args.model_path: 129 | output_ids = model.generate( 130 | input_ids, 131 | images=image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True), 132 | segs=seg_image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True), 133 | depths=None, 134 | do_sample=True if args.temperature > 0 else False, 135 | temperature=args.temperature, 136 | top_p=args.top_p, 137 | num_beams=args.num_beams, 138 | max_new_tokens=512, 139 | use_cache=True) 140 | else: 141 | output_ids = model.generate( 142 | input_ids, 143 | images=image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True), 144 | do_sample=True if args.temperature > 0 else False, 145 | temperature=args.temperature, 146 | top_p=args.top_p, 147 | num_beams=args.num_beams, 148 | max_new_tokens=512, 149 | use_cache=True) 150 | 151 | input_token_len = input_ids.shape[1] 152 | n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() 153 | if n_diff_input_output > 0: 154 | print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') 155 | outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] 156 | outputs = outputs.strip() 157 | if outputs.endswith(stop_str): 158 | outputs = outputs[:-len(stop_str)] 159 | outputs = outputs.strip() 160 | outputs = outputs.strip('\n') 161 | 162 | with open(f'{answers_file}', 'a') as f: 163 | f.write(f'Image: {image_file[0]}\n') 164 | f.write(f'<>: {ques[0]}\n') 165 | f.write(f'<>: {outputs}\n') 166 | f.write('-------------------------------------------------------\n') 167 | 168 | if __name__ == "__main__": 169 | parser = argparse.ArgumentParser() 170 | parser.add_argument("--model-path", type=str, default="facebook/opt-350m") 171 | parser.add_argument("--model-base", type=str, default=None) 172 | parser.add_argument("--image-folder", type=str, default="") 173 | parser.add_argument("--use_seg", action="store_true") 174 | parser.add_argument("--seg-image-folder", type=str, default="") 175 | parser.add_argument("--output-file", type=str, default="output") 176 | parser.add_argument("--conv-mode", type=str, default="llava_v1") 177 | parser.add_argument("--num-chunks", type=int, default=1) 178 | parser.add_argument("--chunk-idx", type=int, default=0) 179 | parser.add_argument("--temperature", type=float, default=0.2) 180 | parser.add_argument("--top_p", type=float, default=None) 181 | parser.add_argument("--num_beams", type=int, default=1) 182 | args = parser.parse_args() 183 | 184 | for task in ["semantic", "instance", "panoptic"]: 185 | eval_model(args, task) 186 | -------------------------------------------------------------------------------- /vcoder_llava/eval/model_vqa_loader.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import os 4 | import json 5 | from tqdm import tqdm 6 | import shortuuid 7 | 8 | from vcoder_llava.constants import ( 9 | IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, 10 | ) 11 | from vcoder_llava.vcoder_conversation import conv_templates, SeparatorStyle 12 | from vcoder_llava.model.builder import load_pretrained_model 13 | from vcoder_llava.utils import disable_torch_init 14 | from vcoder_llava.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria 15 | from torch.utils.data import Dataset, DataLoader 16 | 17 | from PIL import Image 18 | import math 19 | 20 | 21 | def split_list(lst, n): 22 | """Split a list into n (roughly) equal-sized chunks""" 23 | chunk_size = math.ceil(len(lst) / n) # integer division 24 | return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] 25 | 26 | 27 | def get_chunk(lst, n, k): 28 | chunks = split_list(lst, n) 29 | return chunks[k] 30 | 31 | 32 | # Custom dataset class 33 | class CustomDataset(Dataset): 34 | def __init__(self, questions, image_folder, tokenizer, image_processor, model_config): 35 | self.questions = questions 36 | self.image_folder = image_folder 37 | self.tokenizer = tokenizer 38 | self.image_processor = image_processor 39 | self.model_config = model_config 40 | 41 | def __getitem__(self, index): 42 | line = self.questions[index] 43 | image_file = line["image"] 44 | qs = line["text"] 45 | qs = DEFAULT_IMAGE_TOKEN + '\n' + qs 46 | 47 | conv = conv_templates[args.conv_mode].copy() 48 | conv.append_message(conv.roles[0], qs) 49 | conv.append_message(conv.roles[1], None) 50 | prompt = conv.get_prompt() 51 | 52 | image = Image.open(os.path.join(self.image_folder, image_file)).convert('RGB') 53 | image_tensor = process_images([image], self.image_processor, self.model_config)[0] 54 | 55 | input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt') 56 | 57 | return input_ids, image_tensor 58 | 59 | def __len__(self): 60 | return len(self.questions) 61 | 62 | 63 | # DataLoader 64 | def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, batch_size=1, num_workers=4): 65 | assert batch_size == 1, "batch_size must be 1" 66 | dataset = CustomDataset(questions, image_folder, tokenizer, image_processor, model_config) 67 | data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False) 68 | return data_loader 69 | 70 | 71 | def eval_model(args): 72 | # Model 73 | disable_torch_init() 74 | model_path = os.path.expanduser(args.model_path) 75 | model_name = get_model_name_from_path(model_path) 76 | tokenizer, model, image_processor, _, _, context_len = load_pretrained_model(model_path, args.model_base, model_name) 77 | 78 | questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")] 79 | questions = get_chunk(questions, args.num_chunks, args.chunk_idx) 80 | answers_file = os.path.expanduser(args.answers_file) 81 | os.makedirs(os.path.dirname(answers_file), exist_ok=True) 82 | ans_file = open(answers_file, "w") 83 | 84 | if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode: 85 | args.conv_mode = args.conv_mode + '_mmtag' 86 | print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.') 87 | 88 | data_loader = create_data_loader(questions, args.image_folder, tokenizer, image_processor, model.config) 89 | 90 | for (input_ids, image_tensor), line in tqdm(zip(data_loader, questions), total=len(questions)): 91 | idx = line["question_id"] 92 | cur_prompt = line["text"] 93 | 94 | stop_str = conv_templates[args.conv_mode].sep if conv_templates[args.conv_mode].sep_style != SeparatorStyle.TWO else conv_templates[args.conv_mode].sep2 95 | input_ids = input_ids.to(device='cuda', non_blocking=True) 96 | 97 | with torch.inference_mode(): 98 | output_ids = model.generate( 99 | input_ids, 100 | images=image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True), 101 | do_sample=True if args.temperature > 0 else False, 102 | temperature=args.temperature, 103 | top_p=args.top_p, 104 | num_beams=args.num_beams, 105 | max_new_tokens=128, 106 | use_cache=True) 107 | 108 | input_token_len = input_ids.shape[1] 109 | n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() 110 | if n_diff_input_output > 0: 111 | print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') 112 | outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] 113 | outputs = outputs.strip() 114 | if outputs.endswith(stop_str): 115 | outputs = outputs[:-len(stop_str)] 116 | outputs = outputs.strip() 117 | 118 | ans_id = shortuuid.uuid() 119 | ans_file.write(json.dumps({"question_id": idx, 120 | "prompt": cur_prompt, 121 | "text": outputs, 122 | "answer_id": ans_id, 123 | "model_id": model_name, 124 | "metadata": {}}) + "\n") 125 | # ans_file.flush() 126 | ans_file.close() 127 | 128 | if __name__ == "__main__": 129 | parser = argparse.ArgumentParser() 130 | parser.add_argument("--model-path", type=str, default="facebook/opt-350m") 131 | parser.add_argument("--model-base", type=str, default=None) 132 | parser.add_argument("--image-folder", type=str, default="") 133 | parser.add_argument("--question-file", type=str, default="tables/question.jsonl") 134 | parser.add_argument("--answers-file", type=str, default="answer.jsonl") 135 | parser.add_argument("--conv-mode", type=str, default="llava_v1") 136 | parser.add_argument("--num-chunks", type=int, default=1) 137 | parser.add_argument("--chunk-idx", type=int, default=0) 138 | parser.add_argument("--temperature", type=float, default=0.2) 139 | parser.add_argument("--top_p", type=float, default=None) 140 | parser.add_argument("--num_beams", type=int, default=1) 141 | args = parser.parse_args() 142 | 143 | eval_model(args) 144 | -------------------------------------------------------------------------------- /vcoder_llava/eval/model_vqa_mmbench.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import os 4 | import json 5 | import pandas as pd 6 | from tqdm import tqdm 7 | import shortuuid 8 | 9 | from vcoder_llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN 10 | from vcoder_llava.vcoder_conversation import conv_templates, SeparatorStyle 11 | from vcoder_llava.model.builder import load_pretrained_model 12 | from vcoder_llava.utils import disable_torch_init 13 | from vcoder_llava.mm_utils import tokenizer_image_token, process_images, load_image_from_base64, get_model_name_from_path 14 | 15 | from PIL import Image 16 | import math 17 | 18 | 19 | all_options = ['A', 'B', 'C', 'D'] 20 | 21 | 22 | def split_list(lst, n): 23 | """Split a list into n (roughly) equal-sized chunks""" 24 | chunk_size = math.ceil(len(lst) / n) # integer division 25 | return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] 26 | 27 | 28 | def get_chunk(lst, n, k): 29 | chunks = split_list(lst, n) 30 | return chunks[k] 31 | 32 | 33 | def is_none(value): 34 | if value is None: 35 | return True 36 | if type(value) is float and math.isnan(value): 37 | return True 38 | if type(value) is str and value.lower() == 'nan': 39 | return True 40 | if type(value) is str and value.lower() == 'none': 41 | return True 42 | return False 43 | 44 | def get_options(row, options): 45 | parsed_options = [] 46 | for option in options: 47 | option_value = row[option] 48 | if is_none(option_value): 49 | break 50 | parsed_options.append(option_value) 51 | return parsed_options 52 | 53 | 54 | def eval_model(args): 55 | # Model 56 | disable_torch_init() 57 | model_path = os.path.expanduser(args.model_path) 58 | model_name = get_model_name_from_path(model_path) 59 | tokenizer, model, image_processor, _, _, context_len = load_pretrained_model(model_path, args.model_base, model_name) 60 | 61 | questions = pd.read_table(os.path.expanduser(args.question_file)) 62 | questions = get_chunk(questions, args.num_chunks, args.chunk_idx) 63 | answers_file = os.path.expanduser(args.answers_file) 64 | os.makedirs(os.path.dirname(answers_file), exist_ok=True) 65 | ans_file = open(answers_file, "w") 66 | 67 | if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode: 68 | args.conv_mode = args.conv_mode + '_mmtag' 69 | print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.') 70 | 71 | for index, row in tqdm(questions.iterrows(), total=len(questions)): 72 | options = get_options(row, all_options) 73 | cur_option_char = all_options[:len(options)] 74 | 75 | if args.all_rounds: 76 | num_rounds = len(options) 77 | else: 78 | num_rounds = 1 79 | 80 | for round_idx in range(num_rounds): 81 | idx = row['index'] 82 | question = row['question'] 83 | hint = row['hint'] 84 | image = load_image_from_base64(row['image']) 85 | if not is_none(hint): 86 | question = hint + '\n' + question 87 | for option_char, option in zip(all_options[:len(options)], options): 88 | question = question + '\n' + option_char + '. ' + option 89 | qs = cur_prompt = question 90 | qs = DEFAULT_IMAGE_TOKEN + '\n' + qs 91 | 92 | if args.single_pred_prompt: 93 | if args.lang == 'cn': 94 | qs = qs + '\n' + "请直接回答选项字母。" 95 | else: 96 | qs = qs + '\n' + "Answer with the option's letter from the given choices directly." 97 | 98 | conv = conv_templates[args.conv_mode].copy() 99 | conv.append_message(conv.roles[0], qs) 100 | conv.append_message(conv.roles[1], None) 101 | prompt = conv.get_prompt() 102 | 103 | input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() 104 | 105 | image_tensor = process_images([image], image_processor, model.config)[0] 106 | # image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] 107 | 108 | stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 109 | 110 | with torch.inference_mode(): 111 | output_ids = model.generate( 112 | input_ids, 113 | images=image_tensor.unsqueeze(0).half().cuda(), 114 | do_sample=True if args.temperature > 0 else False, 115 | temperature=args.temperature, 116 | top_p=args.top_p, 117 | num_beams=args.num_beams, 118 | # no_repeat_ngram_size=3, 119 | max_new_tokens=1024, 120 | use_cache=True) 121 | 122 | input_token_len = input_ids.shape[1] 123 | n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() 124 | if n_diff_input_output > 0: 125 | print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') 126 | outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] 127 | outputs = outputs.strip() 128 | if outputs.endswith(stop_str): 129 | outputs = outputs[:-len(stop_str)] 130 | outputs = outputs.strip() 131 | 132 | ans_id = shortuuid.uuid() 133 | ans_file.write(json.dumps({"question_id": idx, 134 | "round_id": round_idx, 135 | "prompt": cur_prompt, 136 | "text": outputs, 137 | "options": options, 138 | "option_char": cur_option_char, 139 | "answer_id": ans_id, 140 | "model_id": model_name, 141 | "metadata": {}}) + "\n") 142 | ans_file.flush() 143 | 144 | # rotate options 145 | options = options[1:] + options[:1] 146 | cur_option_char = cur_option_char[1:] + cur_option_char[:1] 147 | ans_file.close() 148 | 149 | if __name__ == "__main__": 150 | parser = argparse.ArgumentParser() 151 | parser.add_argument("--model-path", type=str, default="facebook/opt-350m") 152 | parser.add_argument("--model-base", type=str, default=None) 153 | parser.add_argument("--image-folder", type=str, default="") 154 | parser.add_argument("--question-file", type=str, default="tables/question.jsonl") 155 | parser.add_argument("--answers-file", type=str, default="answer.jsonl") 156 | parser.add_argument("--conv-mode", type=str, default="llava_v1") 157 | parser.add_argument("--num-chunks", type=int, default=1) 158 | parser.add_argument("--chunk-idx", type=int, default=0) 159 | parser.add_argument("--temperature", type=float, default=0.2) 160 | parser.add_argument("--top_p", type=float, default=None) 161 | parser.add_argument("--num_beams", type=int, default=1) 162 | parser.add_argument("--all-rounds", action="store_true") 163 | parser.add_argument("--single-pred-prompt", action="store_true") 164 | parser.add_argument("--lang", type=str, default="en") 165 | args = parser.parse_args() 166 | 167 | eval_model(args) 168 | -------------------------------------------------------------------------------- /vcoder_llava/eval/model_vqa_mme.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | import os 4 | import json 5 | from tqdm import tqdm 6 | import shortuuid 7 | 8 | from vcoder_llava.constants import ( 9 | IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, 10 | ) 11 | from vcoder_llava.vcoder_conversation import conv_templates, SeparatorStyle 12 | from vcoder_llava.model.builder import load_pretrained_model 13 | from vcoder_llava.utils import disable_torch_init 14 | from vcoder_llava.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria 15 | from torch.utils.data import Dataset, DataLoader 16 | 17 | from PIL import Image 18 | import math 19 | 20 | 21 | def split_list(lst, n): 22 | """Split a list into n (roughly) equal-sized chunks""" 23 | chunk_size = math.ceil(len(lst) / n) # integer division 24 | return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)] 25 | 26 | 27 | def get_chunk(lst, n, k): 28 | chunks = split_list(lst, n) 29 | return chunks[k] 30 | 31 | 32 | # Custom dataset class 33 | class CustomDataset(Dataset): 34 | def __init__(self, questions, image_folder, tokenizer, image_processor, model_config): 35 | self.questions = questions 36 | self.image_folder = image_folder 37 | self.tokenizer = tokenizer 38 | self.image_processor = image_processor 39 | self.model_config = model_config 40 | 41 | def __getitem__(self, index): 42 | line = self.questions[index] 43 | image_file = line["image"] 44 | qs = line["text"] 45 | qs = DEFAULT_IMAGE_TOKEN + '\n' + qs 46 | 47 | conv = conv_templates[args.conv_mode].copy() 48 | conv.append_message(conv.roles[0], qs) 49 | conv.append_message(conv.roles[1], None) 50 | prompt = conv.get_prompt() 51 | 52 | image = Image.open(os.path.join(self.image_folder, image_file)).convert('RGB') 53 | image_tensor = process_images([image], self.image_processor, self.model_config)[0] 54 | 55 | input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt') 56 | 57 | return input_ids, image_tensor 58 | 59 | def __len__(self): 60 | return len(self.questions) 61 | 62 | 63 | # DataLoader 64 | def create_data_loader(questions, image_folder, tokenizer, image_processor, model_config, batch_size=1, num_workers=4): 65 | assert batch_size == 1, "batch_size must be 1" 66 | dataset = CustomDataset(questions, image_folder, tokenizer, image_processor, model_config) 67 | data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False) 68 | return data_loader 69 | 70 | 71 | def eval_model(args): 72 | # Model 73 | disable_torch_init() 74 | model_path = os.path.expanduser(args.model_path) 75 | model_name = get_model_name_from_path(model_path) 76 | tokenizer, model, image_processor, _, _, context_len = load_pretrained_model(model_path, args.model_base, model_name) 77 | 78 | questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")] 79 | ques = get_chunk(questions, args.num_chunks, args.chunk_idx) 80 | questions = [] 81 | for q in ques: 82 | if 'count' in q["image"] or "existence" in q["image"]: 83 | questions.append(q) 84 | answers_file = os.path.expanduser(args.answers_file) 85 | os.makedirs(os.path.dirname(answers_file), exist_ok=True) 86 | ans_file = open(answers_file, "w") 87 | 88 | if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode: 89 | args.conv_mode = args.conv_mode + '_mmtag' 90 | print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.') 91 | 92 | data_loader = create_data_loader(questions, args.image_folder, tokenizer, image_processor, model.config) 93 | 94 | for (input_ids, image_tensor), line in tqdm(zip(data_loader, questions), total=len(questions)): 95 | idx = line["question_id"] 96 | cur_prompt = line["text"] 97 | 98 | stop_str = conv_templates[args.conv_mode].sep if conv_templates[args.conv_mode].sep_style != SeparatorStyle.TWO else conv_templates[args.conv_mode].sep2 99 | input_ids = input_ids.to(device='cuda', non_blocking=True) 100 | 101 | with torch.inference_mode(): 102 | output_ids = model.generate( 103 | input_ids, 104 | images=image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True), 105 | do_sample=True if args.temperature > 0 else False, 106 | temperature=args.temperature, 107 | top_p=args.top_p, 108 | num_beams=args.num_beams, 109 | max_new_tokens=128, 110 | use_cache=True) 111 | 112 | input_token_len = input_ids.shape[1] 113 | n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item() 114 | if n_diff_input_output > 0: 115 | print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids') 116 | outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0] 117 | outputs = outputs.strip() 118 | if outputs.endswith(stop_str): 119 | outputs = outputs[:-len(stop_str)] 120 | outputs = outputs.strip() 121 | 122 | ans_id = shortuuid.uuid() 123 | ans_file.write(json.dumps({"question_id": idx, 124 | "prompt": cur_prompt, 125 | "text": outputs, 126 | "answer_id": ans_id, 127 | "model_id": model_name, 128 | "metadata": {}}) + "\n") 129 | # ans_file.flush() 130 | ans_file.close() 131 | 132 | if __name__ == "__main__": 133 | parser = argparse.ArgumentParser() 134 | parser.add_argument("--model-path", type=str, default="facebook/opt-350m") 135 | parser.add_argument("--model-base", type=str, default=None) 136 | parser.add_argument("--image-folder", type=str, default="") 137 | parser.add_argument("--question-file", type=str, default="tables/question.jsonl") 138 | parser.add_argument("--answers-file", type=str, default="answer.jsonl") 139 | parser.add_argument("--conv-mode", type=str, default="llava_v1") 140 | parser.add_argument("--num-chunks", type=int, default=1) 141 | parser.add_argument("--chunk-idx", type=int, default=0) 142 | parser.add_argument("--temperature", type=float, default=0.2) 143 | parser.add_argument("--top_p", type=float, default=None) 144 | parser.add_argument("--num_beams", type=int, default=1) 145 | args = parser.parse_args() 146 | 147 | eval_model(args) 148 | -------------------------------------------------------------------------------- /vcoder_llava/eval/synonyms.txt: -------------------------------------------------------------------------------- 1 | person, girl, boy, man, woman, kid, child, chef, baker, people, adult, rider, children, baby, worker, passenger, sister, biker, policeman, cop, officer, lady, cowboy, bride, groom, male, female, guy, traveler, mother, father, gentleman, pitcher, player, skier, snowboarder, skater, skateboarder, person, woman, guy, foreigner, child, gentleman, caller, offender, coworker, trespasser, patient, politician, soldier, grandchild, serviceman, walker, drinker, doctor, bicyclist, thief, buyer, teenager, student, camper, driver, solider, hunter, shopper, villager 2 | bicycle, bike, bicycle, bike, unicycle, minibike, trike 3 | car, automobile, van, minivan, sedan, suv, hatchback, cab, jeep, coupe, taxicab, limo, taxi 4 | motorcycle, scooter, motor bike, motor cycle, motorbike, scooter, moped 5 | airplane, jetliner, plane, air plane, monoplane, aircraft, jet, jetliner, airbus, biplane, seaplane 6 | bus, minibus, trolley 7 | train, locomotive, tramway, caboose 8 | truck, pickup, lorry, hauler, firetruck 9 | boat, ship, liner, sailboat, motorboat, dinghy, powerboat, speedboat, canoe, skiff, yacht, kayak, catamaran, pontoon, houseboat, vessel, rowboat, trawler, ferryboat, watercraft, tugboat, schooner, barge, ferry, sailboard, paddleboat, lifeboat, freighter, steamboat, riverboat, battleship, steamship 10 | traffic light, street light, traffic signal, stop light, streetlight, stoplight 11 | fire hydrant, hydrant 12 | stop sign 13 | parking meter 14 | bench, pew 15 | bird, ostrich, owl, seagull, goose, duck, parakeet, falcon, robin, pelican, waterfowl, heron, hummingbird, mallard, finch, pigeon, sparrow, seabird, osprey, blackbird, fowl, shorebird, woodpecker, egret, chickadee, quail, bluebird, kingfisher, buzzard, willet, gull, swan, bluejay, flamingo, cormorant, parrot, loon, gosling, waterbird, pheasant, rooster, sandpiper, crow, raven, turkey, oriole, cowbird, warbler, magpie, peacock, cockatiel, lorikeet, puffin, vulture, condor, macaw, peafowl, cockatoo, songbird 16 | cat, kitten, feline, tabby 17 | dog, puppy, beagle, pup, chihuahua, schnauzer, dachshund, rottweiler, canine, pitbull, collie, pug, terrier, poodle, labrador, doggie, doberman, mutt, doggy, spaniel, bulldog, sheepdog, weimaraner, corgi, cocker, greyhound, retriever, brindle, hound, whippet, husky 18 | horse, colt, pony, racehorse, stallion, equine, mare, foal, palomino, mustang, clydesdale, bronc, bronco 19 | sheep, lamb, ram, lamb, goat, ewe 20 | cow, cattle, oxen, ox, calf, cattle, holstein, heifer, buffalo, bull, zebu, bison 21 | elephant 22 | bear, panda 23 | zebra 24 | giraffe 25 | backpack, knapsack 26 | umbrella 27 | handbag, wallet, purse, briefcase 28 | tie, bow, bow tie 29 | suitcase, suit case, luggage 30 | frisbee 31 | skis, ski 32 | snowboard 33 | sports ball, ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard, longboard, skimboard, shortboard, wakeboard 39 | tennis racket, racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife, pocketknife, knive 45 | spoon 46 | bowl, container 47 | banana 48 | apple 49 | sandwich, burger, sub, cheeseburger, hamburger 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut, doughnut, bagel 56 | cake, cheesecake, cupcake, shortcake, coffeecake, pancake 57 | chair, seat, stool 58 | couch, sofa, recliner, futon, loveseat, settee, chesterfield 59 | potted plant, houseplant 60 | bed 61 | dining table, table, desk 62 | toilet, urinal, commode, toilet, lavatory, potty 63 | tv, monitor, televison, television 64 | laptop, computer, notebook, netbook, lenovo, macbook, laptop computer 65 | mouse 66 | remote 67 | keyboard 68 | cell phone, mobile phone, phone, cellphone, telephone, phon, smartphone, iPhone 69 | microwave 70 | oven, stovetop, stove, stove top oven 71 | toaster 72 | sink 73 | refrigerator, fridge, fridge, freezer 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear, teddybear 79 | hair drier, hairdryer 80 | toothbrush -------------------------------------------------------------------------------- /vcoder_llava/mm_utils.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | from io import BytesIO 3 | import base64 4 | 5 | import torch 6 | from transformers import StoppingCriteria 7 | from vcoder_llava.constants import IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, DEPTH_TOKEN_INDEX 8 | 9 | 10 | def load_image_from_base64(image): 11 | return Image.open(BytesIO(base64.b64decode(image))) 12 | 13 | 14 | def expand2square(pil_img, background_color): 15 | width, height = pil_img.size 16 | if width == height: 17 | return pil_img 18 | elif width > height: 19 | result = Image.new(pil_img.mode, (width, width), background_color) 20 | result.paste(pil_img, (0, (width - height) // 2)) 21 | return result 22 | else: 23 | result = Image.new(pil_img.mode, (height, height), background_color) 24 | result.paste(pil_img, ((height - width) // 2, 0)) 25 | return result 26 | 27 | 28 | def process_images(images, image_processor, model_cfg): 29 | image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None) 30 | new_images = [] 31 | if image_aspect_ratio == 'pad': 32 | for image in images: 33 | image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean)) 34 | image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] 35 | new_images.append(image) 36 | else: 37 | return image_processor(images, return_tensors='pt')['pixel_values'] 38 | if all(x.shape == new_images[0].shape for x in new_images): 39 | new_images = torch.stack(new_images, dim=0) 40 | return new_images 41 | 42 | 43 | def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None): 44 | prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('')] 45 | 46 | def insert_separator(X, sep): 47 | return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] 48 | 49 | input_ids = [] 50 | offset = 0 51 | if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: 52 | offset = 1 53 | input_ids.append(prompt_chunks[0][0]) 54 | 55 | for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)): 56 | input_ids.extend(x[offset:]) 57 | 58 | if return_tensors is not None: 59 | if return_tensors == 'pt': 60 | return torch.tensor(input_ids, dtype=torch.long) 61 | raise ValueError(f'Unsupported tensor type: {return_tensors}') 62 | return input_ids 63 | 64 | 65 | def tokenizer_seg_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, seg_token_index=SEG_TOKEN_INDEX, return_tensors=None): 66 | prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('\n')] 67 | 68 | def insert_separator(X, sep): 69 | return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] 70 | 71 | input_ids = [] 72 | offset = 0 73 | if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: 74 | offset = 1 75 | input_ids.append(prompt_chunks[0][0]) 76 | 77 | for x in insert_separator(prompt_chunks, [seg_token_index, image_token_index] * (offset + 1)): 78 | if seg_token_index in x: 79 | input_ids.extend(x[offset:-1]) 80 | else: 81 | input_ids.extend(x[offset:]) 82 | 83 | if return_tensors is not None: 84 | if return_tensors == 'pt': 85 | return torch.tensor(input_ids, dtype=torch.long) 86 | raise ValueError(f'Unsupported tensor type: {return_tensors}') 87 | return input_ids 88 | 89 | def _tokenizer_depth_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, seg_token_index=SEG_TOKEN_INDEX, depth_token_index=DEPTH_TOKEN_INDEX, return_tensors=None): 90 | prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('\n\n')] 91 | 92 | def insert_separator(X, sep): 93 | return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] 94 | 95 | input_ids = [] 96 | offset = 0 97 | if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: 98 | offset = 1 99 | input_ids.append(prompt_chunks[0][0]) 100 | 101 | for x in insert_separator(prompt_chunks, [image_token_index, depth_token_index, seg_token_index] * (offset + 1)): 102 | if depth_token_index in x and seg_token_index in x: 103 | input_ids.extend(x[:3]) 104 | else: 105 | input_ids.extend(x[offset:]) 106 | 107 | if return_tensors is not None: 108 | if return_tensors == 'pt': 109 | return torch.tensor(input_ids, dtype=torch.long) 110 | raise ValueError(f'Unsupported tensor type: {return_tensors}') 111 | return input_ids 112 | 113 | def tokenizer_depth_seg_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, seg_token_index=SEG_TOKEN_INDEX, depth_token_index=DEPTH_TOKEN_INDEX, return_tensors=None): 114 | if "" in prompt: 115 | return _tokenizer_depth_token(prompt, tokenizer, image_token_index, seg_token_index, depth_token_index, return_tensors) 116 | else: 117 | return tokenizer_seg_token(prompt, tokenizer, image_token_index, seg_token_index, return_tensors) 118 | 119 | 120 | def get_model_name_from_path(model_path): 121 | model_path = model_path.strip("/") 122 | model_paths = model_path.split("/") 123 | if model_paths[-1].startswith('checkpoint-'): 124 | return model_paths[-2] + "_" + model_paths[-1] 125 | else: 126 | return model_paths[-1] 127 | 128 | class KeywordsStoppingCriteria(StoppingCriteria): 129 | def __init__(self, keywords, tokenizer, input_ids): 130 | self.keywords = keywords 131 | self.keyword_ids = [] 132 | for keyword in keywords: 133 | cur_keyword_ids = tokenizer(keyword).input_ids 134 | if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id: 135 | cur_keyword_ids = cur_keyword_ids[1:] 136 | self.keyword_ids.append(torch.tensor(cur_keyword_ids)) 137 | self.tokenizer = tokenizer 138 | self.start_len = input_ids.shape[1] 139 | 140 | def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: 141 | assert output_ids.shape[0] == 1, "Only support batch size 1 (yet)" # TODO 142 | offset = min(output_ids.shape[1] - self.start_len, 3) 143 | self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids] 144 | for keyword_id in self.keyword_ids: 145 | if output_ids[0, -keyword_id.shape[0]:] == keyword_id: 146 | return True 147 | outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0] 148 | for keyword in self.keywords: 149 | if keyword in outputs: 150 | return True 151 | return False 152 | -------------------------------------------------------------------------------- /vcoder_llava/model/__init__.py: -------------------------------------------------------------------------------- 1 | from .language_model.llava_llama import LlavaLlamaForCausalLM, LlavaConfig 2 | from .language_model.vcoder_llava_llama import VCoderLlavaLlamaForCausalLM, VCoderLlavaConfig 3 | from .language_model.vcoder_ds_llava_llama import VCoderDSLlavaLlamaForCausalLM, VCoderDSLlavaConfig 4 | from .language_model.vcoder_it_llava_llama import VCoderITLlavaLlamaForCausalLM, VCoderITLlavaConfig 5 | -------------------------------------------------------------------------------- /vcoder_llava/model/apply_delta.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: 3 | python3 -m fastchat.model.apply_delta --base ~/model_weights/llama-7b --target ~/model_weights/vicuna-7b --delta lmsys/vicuna-7b-delta 4 | """ 5 | import argparse 6 | 7 | import torch 8 | from tqdm import tqdm 9 | from transformers import AutoTokenizer, AutoModelForCausalLM 10 | from vcoder_llava import LlavaLlamaForCausalLM 11 | 12 | 13 | def apply_delta(base_model_path, target_model_path, delta_path): 14 | print("Loading base model") 15 | base = AutoModelForCausalLM.from_pretrained( 16 | base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 17 | 18 | print("Loading delta") 19 | delta = LlavaLlamaForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 20 | delta_tokenizer = AutoTokenizer.from_pretrained(delta_path) 21 | 22 | print("Applying delta") 23 | for name, param in tqdm(delta.state_dict().items(), desc="Applying delta"): 24 | if name not in base.state_dict(): 25 | assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model' 26 | continue 27 | if param.data.shape == base.state_dict()[name].shape: 28 | param.data += base.state_dict()[name] 29 | else: 30 | assert name in ['model.embed_tokens.weight', 'lm_head.weight'], \ 31 | f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}' 32 | bparam = base.state_dict()[name] 33 | param.data[:bparam.shape[0], :bparam.shape[1]] += bparam 34 | 35 | print("Saving target model") 36 | delta.save_pretrained(target_model_path) 37 | delta_tokenizer.save_pretrained(target_model_path) 38 | 39 | 40 | if __name__ == "__main__": 41 | parser = argparse.ArgumentParser() 42 | parser.add_argument("--base-model-path", type=str, required=True) 43 | parser.add_argument("--target-model-path", type=str, required=True) 44 | parser.add_argument("--delta-path", type=str, required=True) 45 | 46 | args = parser.parse_args() 47 | 48 | apply_delta(args.base_model_path, args.target_model_path, args.delta_path) 49 | -------------------------------------------------------------------------------- /vcoder_llava/model/builder.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Haotian Liu 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | import os 17 | import warnings 18 | import shutil 19 | 20 | from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig 21 | import torch 22 | from vcoder_llava.model import * 23 | 24 | 25 | def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda"): 26 | kwargs = {"device_map": device_map} 27 | 28 | if load_8bit: 29 | kwargs['load_in_8bit'] = True 30 | elif load_4bit: 31 | kwargs['load_in_4bit'] = True 32 | kwargs['quantization_config'] = BitsAndBytesConfig( 33 | load_in_4bit=True, 34 | bnb_4bit_compute_dtype=torch.float16, 35 | bnb_4bit_use_double_quant=True, 36 | bnb_4bit_quant_type='nf4' 37 | ) 38 | else: 39 | kwargs['torch_dtype'] = torch.float16 40 | if 'llava' in model_name.lower(): 41 | # Load LLaVA model 42 | if 'lora' in model_name.lower() and model_base is None: 43 | warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.') 44 | if 'lora' in model_name.lower() and model_base is not None: 45 | lora_cfg_pretrained = AutoConfig.from_pretrained(model_path) 46 | tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) 47 | if 'vcoder_it' in model_name.lower(): 48 | print('Loading VCoder LLaVA from base model...') 49 | model = VCoderITLlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs) 50 | else: 51 | print('Loading LLaVA from base model...') 52 | model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs) 53 | token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features 54 | if model.lm_head.weight.shape[0] != token_num: 55 | model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) 56 | model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype)) 57 | 58 | print('Loading additional weights...') 59 | if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')): 60 | non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu') 61 | else: 62 | # this is probably from HF Hub 63 | from huggingface_hub import hf_hub_download 64 | def load_from_hf(repo_id, filename, subfolder=None): 65 | cache_file = hf_hub_download( 66 | repo_id=repo_id, 67 | filename=filename, 68 | subfolder=subfolder) 69 | return torch.load(cache_file, map_location='cpu') 70 | non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin') 71 | non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()} 72 | if any(k.startswith('model.model.') for k in non_lora_trainables): 73 | non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()} 74 | model.load_state_dict(non_lora_trainables, strict=False) 75 | 76 | from peft import PeftModel 77 | print('Loading LoRA weights...') 78 | model = PeftModel.from_pretrained(model, model_path) 79 | print('Merging LoRA weights...') 80 | model = model.merge_and_unload() 81 | print('Model is loaded...') 82 | elif model_base is not None: 83 | # this may be mm projector only 84 | print('Loading LLaVA from base model...') 85 | tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) 86 | cfg_pretrained = AutoConfig.from_pretrained(model_path) 87 | model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs) 88 | 89 | mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu') 90 | mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()} 91 | model.load_state_dict(mm_projector_weights, strict=False) 92 | else: 93 | if 'vcoder_it_llava' in model_name.lower(): 94 | print('Loading VCoder LLaVA from base model...') 95 | tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) 96 | model = VCoderITLlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) 97 | elif 'vcoder_ds_llava' in model_name.lower(): 98 | print('Loading VCoder LLaVA from base model...') 99 | tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) 100 | model = VCoderDSLlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) 101 | elif 'vcoder_llava' in model_name.lower(): 102 | print('Loading VCoder LLaVA from base model...') 103 | tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) 104 | model = VCoderLlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) 105 | else: 106 | print('Loading LLaVA from base model...') 107 | tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) 108 | model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) 109 | else: 110 | # Load language model 111 | if model_base is not None: 112 | # PEFT model 113 | from peft import PeftModel 114 | tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False) 115 | model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto") 116 | print(f"Loading LoRA weights from {model_path}") 117 | model = PeftModel.from_pretrained(model, model_path) 118 | print(f"Merging weights") 119 | model = model.merge_and_unload() 120 | print('Convert to FP16...') 121 | model.to(torch.float16) 122 | else: 123 | use_fast = False 124 | if 'mpt' in model_name.lower(): 125 | tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True) 126 | model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs) 127 | else: 128 | tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) 129 | model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) 130 | 131 | image_processor = None 132 | 133 | if hasattr(model.config, "max_sequence_length"): 134 | context_len = model.config.max_sequence_length 135 | else: 136 | context_len = 2048 137 | 138 | if 'llava' in model_name.lower(): 139 | vision_tower = model.get_vision_tower() 140 | if not vision_tower.is_loaded: 141 | vision_tower.load_model() 142 | vision_tower.to(device=device, dtype=torch.float16) 143 | image_processor = vision_tower.image_processor 144 | 145 | seg_image_processor = None 146 | if 'vcoder' in model_name.lower(): 147 | seg_image_processor = image_processor 148 | 149 | depth_image_processor = None 150 | if "ds" in model_name.lower(): 151 | depth_image_processor = image_processor 152 | 153 | model.requires_grad_(False) 154 | return tokenizer, model, image_processor, seg_image_processor, depth_image_processor, context_len 155 | -------------------------------------------------------------------------------- /vcoder_llava/model/consolidate.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: 3 | python3 -m vcoder_llava.model.consolidate --src ~/model_weights/llava-7b --dst ~/model_weights/llava-7b_consolidate 4 | """ 5 | import argparse 6 | 7 | import torch 8 | from transformers import AutoTokenizer, AutoModelForCausalLM 9 | from vcoder_llava.model import * 10 | from vcoder_llava.model.utils import auto_upgrade 11 | 12 | 13 | def consolidate_ckpt(src_path, dst_path): 14 | print("Loading model") 15 | auto_upgrade(src_path) 16 | src_model = AutoModelForCausalLM.from_pretrained(src_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 17 | src_tokenizer = AutoTokenizer.from_pretrained(src_path, use_fast=False) 18 | src_model.save_pretrained(dst_path) 19 | src_tokenizer.save_pretrained(dst_path) 20 | 21 | 22 | if __name__ == "__main__": 23 | parser = argparse.ArgumentParser() 24 | parser.add_argument("--src", type=str, required=True) 25 | parser.add_argument("--dst", type=str, required=True) 26 | 27 | args = parser.parse_args() 28 | 29 | consolidate_ckpt(args.src, args.dst) 30 | -------------------------------------------------------------------------------- /vcoder_llava/model/language_model/llava_llama.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Haotian Liu 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from typing import List, Optional, Tuple, Union 17 | 18 | import torch 19 | import torch.nn as nn 20 | from torch.nn import CrossEntropyLoss 21 | 22 | from transformers import AutoConfig, AutoModelForCausalLM, \ 23 | LlamaConfig, LlamaModel, LlamaForCausalLM 24 | 25 | from transformers.modeling_outputs import CausalLMOutputWithPast 26 | 27 | from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM 28 | 29 | 30 | class LlavaConfig(LlamaConfig): 31 | model_type = "llava" 32 | 33 | 34 | class LlavaLlamaModel(LlavaMetaModel, LlamaModel): 35 | config_class = LlavaConfig 36 | 37 | def __init__(self, config: LlamaConfig): 38 | super(LlavaLlamaModel, self).__init__(config) 39 | 40 | 41 | class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): 42 | config_class = LlavaConfig 43 | 44 | def __init__(self, config): 45 | super(LlamaForCausalLM, self).__init__(config) 46 | self.model = LlavaLlamaModel(config) 47 | 48 | self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) 49 | 50 | # Initialize weights and apply final processing 51 | self.post_init() 52 | 53 | def get_model(self): 54 | return self.model 55 | 56 | def forward( 57 | self, 58 | input_ids: torch.LongTensor = None, 59 | attention_mask: Optional[torch.Tensor] = None, 60 | past_key_values: Optional[List[torch.FloatTensor]] = None, 61 | inputs_embeds: Optional[torch.FloatTensor] = None, 62 | labels: Optional[torch.LongTensor] = None, 63 | use_cache: Optional[bool] = None, 64 | output_attentions: Optional[bool] = None, 65 | output_hidden_states: Optional[bool] = None, 66 | images: Optional[torch.FloatTensor] = None, 67 | return_dict: Optional[bool] = None, 68 | ) -> Union[Tuple, CausalLMOutputWithPast]: 69 | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions 70 | output_hidden_states = ( 71 | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states 72 | ) 73 | return_dict = return_dict if return_dict is not None else self.config.use_return_dict 74 | 75 | input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) 76 | 77 | # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) 78 | outputs = self.model( 79 | input_ids=input_ids, 80 | attention_mask=attention_mask, 81 | past_key_values=past_key_values, 82 | inputs_embeds=inputs_embeds, 83 | use_cache=use_cache, 84 | output_attentions=output_attentions, 85 | output_hidden_states=output_hidden_states, 86 | return_dict=return_dict 87 | ) 88 | 89 | hidden_states = outputs[0] 90 | logits = self.lm_head(hidden_states) 91 | 92 | loss = None 93 | if labels is not None: 94 | # Shift so that tokens < n predict n 95 | shift_logits = logits[..., :-1, :].contiguous() 96 | shift_labels = labels[..., 1:].contiguous() 97 | # Flatten the tokens 98 | loss_fct = CrossEntropyLoss() 99 | shift_logits = shift_logits.view(-1, self.config.vocab_size) 100 | shift_labels = shift_labels.view(-1) 101 | # Enable model/pipeline parallelism 102 | shift_labels = shift_labels.to(shift_logits.device) 103 | loss = loss_fct(shift_logits, shift_labels) 104 | 105 | if not return_dict: 106 | output = (logits,) + outputs[1:] 107 | return (loss,) + output if loss is not None else output 108 | 109 | return CausalLMOutputWithPast( 110 | loss=loss, 111 | logits=logits, 112 | past_key_values=outputs.past_key_values, 113 | hidden_states=outputs.hidden_states, 114 | attentions=outputs.attentions, 115 | ) 116 | 117 | def prepare_inputs_for_generation( 118 | self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs 119 | ): 120 | if past_key_values: 121 | input_ids = input_ids[:, -1:] 122 | 123 | # if `inputs_embeds` are passed, we only want to use them in the 1st generation step 124 | if inputs_embeds is not None and past_key_values is None: 125 | model_inputs = {"inputs_embeds": inputs_embeds} 126 | else: 127 | model_inputs = {"input_ids": input_ids} 128 | 129 | model_inputs.update( 130 | { 131 | "past_key_values": past_key_values, 132 | "use_cache": kwargs.get("use_cache"), 133 | "attention_mask": attention_mask, 134 | "images": kwargs.get("images", None), 135 | } 136 | ) 137 | return model_inputs 138 | 139 | AutoConfig.register("llava", LlavaConfig) 140 | AutoModelForCausalLM.register(LlavaConfig, LlavaLlamaForCausalLM) 141 | -------------------------------------------------------------------------------- /vcoder_llava/model/language_model/vcoder_ds_llava_llama.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Haotian Liu 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from typing import List, Optional, Tuple, Union 17 | 18 | import torch 19 | import torch.nn as nn 20 | from torch.nn import CrossEntropyLoss 21 | 22 | from transformers import AutoConfig, AutoModelForCausalLM, \ 23 | LlamaConfig, LlamaModel, LlamaForCausalLM 24 | 25 | from transformers.modeling_outputs import CausalLMOutputWithPast 26 | 27 | from ..vcoder_ds_llava_arch import VCoderDSLlavaMetaModel, VCoderDSLlavaMetaForCausalLM 28 | 29 | 30 | class VCoderDSLlavaConfig(LlamaConfig): 31 | model_type = "vcoder_ds_llava" 32 | 33 | 34 | class VCoderDSLlavaLlamaModel(VCoderDSLlavaMetaModel, LlamaModel): 35 | config_class = VCoderDSLlavaConfig 36 | 37 | def __init__(self, config: LlamaConfig): 38 | super(VCoderDSLlavaLlamaModel, self).__init__(config) 39 | 40 | 41 | class VCoderDSLlavaLlamaForCausalLM(LlamaForCausalLM, VCoderDSLlavaMetaForCausalLM): 42 | config_class = VCoderDSLlavaConfig 43 | 44 | def __init__(self, config): 45 | super(LlamaForCausalLM, self).__init__(config) 46 | self.model = VCoderDSLlavaLlamaModel(config) 47 | 48 | self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) 49 | 50 | 51 | # Initialize weights and apply final processing 52 | self.post_init() 53 | 54 | def get_model(self): 55 | return self.model 56 | 57 | def forward( 58 | self, 59 | input_ids: torch.LongTensor = None, 60 | attention_mask: Optional[torch.Tensor] = None, 61 | past_key_values: Optional[List[torch.FloatTensor]] = None, 62 | inputs_embeds: Optional[torch.FloatTensor] = None, 63 | labels: Optional[torch.LongTensor] = None, 64 | use_cache: Optional[bool] = None, 65 | output_attentions: Optional[bool] = None, 66 | output_hidden_states: Optional[bool] = None, 67 | images: Optional[torch.FloatTensor] = None, 68 | segs: Optional[torch.FloatTensor] = None, 69 | depths: Optional[torch.FloatTensor] = None, 70 | return_dict: Optional[bool] = None, 71 | ) -> Union[Tuple, CausalLMOutputWithPast]: 72 | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions 73 | output_hidden_states = ( 74 | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states 75 | ) 76 | return_dict = return_dict if return_dict is not None else self.config.use_return_dict 77 | 78 | input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images, segs, depths) 79 | 80 | # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) 81 | outputs = self.model( 82 | input_ids=input_ids, 83 | attention_mask=attention_mask, 84 | past_key_values=past_key_values, 85 | inputs_embeds=inputs_embeds, 86 | use_cache=use_cache, 87 | output_attentions=output_attentions, 88 | output_hidden_states=output_hidden_states, 89 | return_dict=return_dict 90 | ) 91 | 92 | hidden_states = outputs[0] 93 | logits = self.lm_head(hidden_states) 94 | 95 | loss = None 96 | if labels is not None: 97 | # Shift so that tokens < n predict n 98 | shift_logits = logits[..., :-1, :].contiguous() 99 | shift_labels = labels[..., 1:].contiguous() 100 | # Flatten the tokens 101 | loss_fct = CrossEntropyLoss() 102 | shift_logits = shift_logits.view(-1, self.config.vocab_size) 103 | shift_labels = shift_labels.view(-1) 104 | # Enable model/pipeline parallelism 105 | shift_labels = shift_labels.to(shift_logits.device) 106 | loss = loss_fct(shift_logits, shift_labels) 107 | 108 | if not return_dict: 109 | output = (logits,) + outputs[1:] 110 | return (loss,) + output if loss is not None else output 111 | 112 | return CausalLMOutputWithPast( 113 | loss=loss, 114 | logits=logits, 115 | past_key_values=outputs.past_key_values, 116 | hidden_states=outputs.hidden_states, 117 | attentions=outputs.attentions, 118 | ) 119 | 120 | def prepare_inputs_for_generation( 121 | self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs 122 | ): 123 | if past_key_values: 124 | input_ids = input_ids[:, -1:] 125 | 126 | # if `inputs_embeds` are passed, we only want to use them in the 1st generation step 127 | if inputs_embeds is not None and past_key_values is None: 128 | model_inputs = {"inputs_embeds": inputs_embeds} 129 | else: 130 | model_inputs = {"input_ids": input_ids} 131 | 132 | model_inputs.update( 133 | { 134 | "past_key_values": past_key_values, 135 | "use_cache": kwargs.get("use_cache"), 136 | "attention_mask": attention_mask, 137 | "images": kwargs.get("images", None), 138 | "segs": kwargs.get("segs", None), 139 | "depths": kwargs.get("depths", None), 140 | } 141 | ) 142 | return model_inputs 143 | 144 | AutoConfig.register("vcoder_ds_llava", VCoderDSLlavaConfig) 145 | AutoModelForCausalLM.register(VCoderDSLlavaConfig, VCoderDSLlavaLlamaForCausalLM) 146 | -------------------------------------------------------------------------------- /vcoder_llava/model/language_model/vcoder_it_llava_llama.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Haotian Liu 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from typing import List, Optional, Tuple, Union 17 | 18 | import torch 19 | import torch.nn as nn 20 | from torch.nn import CrossEntropyLoss 21 | 22 | from transformers import AutoConfig, AutoModelForCausalLM, \ 23 | LlamaConfig, LlamaModel, LlamaForCausalLM 24 | 25 | from transformers.modeling_outputs import CausalLMOutputWithPast 26 | 27 | from ..vcoder_it_llava_arch import VCoderITLlavaMetaModel, VCoderITLlavaMetaForCausalLM 28 | 29 | 30 | class VCoderITLlavaConfig(LlamaConfig): 31 | model_type = "vcoder_it_llava" 32 | 33 | 34 | class VCoderITLlavaLlamaModel(VCoderITLlavaMetaModel, LlamaModel): 35 | config_class = VCoderITLlavaConfig 36 | 37 | def __init__(self, config: LlamaConfig): 38 | super(VCoderITLlavaLlamaModel, self).__init__(config) 39 | 40 | 41 | class VCoderITLlavaLlamaForCausalLM(LlamaForCausalLM, VCoderITLlavaMetaForCausalLM): 42 | config_class = VCoderITLlavaConfig 43 | 44 | def __init__(self, config): 45 | super(LlamaForCausalLM, self).__init__(config) 46 | self.model = VCoderITLlavaLlamaModel(config) 47 | 48 | self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) 49 | 50 | # Initialize weights and apply final processing 51 | self.post_init() 52 | 53 | def get_model(self): 54 | return self.model 55 | 56 | def forward( 57 | self, 58 | input_ids: torch.LongTensor = None, 59 | attention_mask: Optional[torch.Tensor] = None, 60 | past_key_values: Optional[List[torch.FloatTensor]] = None, 61 | inputs_embeds: Optional[torch.FloatTensor] = None, 62 | labels: Optional[torch.LongTensor] = None, 63 | use_cache: Optional[bool] = None, 64 | output_attentions: Optional[bool] = None, 65 | output_hidden_states: Optional[bool] = None, 66 | images: Optional[torch.FloatTensor] = None, 67 | segs: Optional[torch.FloatTensor] = None, 68 | return_dict: Optional[bool] = None, 69 | ) -> Union[Tuple, CausalLMOutputWithPast]: 70 | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions 71 | output_hidden_states = ( 72 | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states 73 | ) 74 | return_dict = return_dict if return_dict is not None else self.config.use_return_dict 75 | 76 | input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images, segs) 77 | 78 | # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) 79 | outputs = self.model( 80 | input_ids=input_ids, 81 | attention_mask=attention_mask, 82 | past_key_values=past_key_values, 83 | inputs_embeds=inputs_embeds, 84 | use_cache=use_cache, 85 | output_attentions=output_attentions, 86 | output_hidden_states=output_hidden_states, 87 | return_dict=return_dict 88 | ) 89 | 90 | hidden_states = outputs[0] 91 | logits = self.lm_head(hidden_states) 92 | 93 | loss = None 94 | if labels is not None: 95 | # Shift so that tokens < n predict n 96 | shift_logits = logits[..., :-1, :].contiguous() 97 | shift_labels = labels[..., 1:].contiguous() 98 | # Flatten the tokens 99 | loss_fct = CrossEntropyLoss() 100 | shift_logits = shift_logits.view(-1, self.config.vocab_size) 101 | shift_labels = shift_labels.view(-1) 102 | # Enable model/pipeline parallelism 103 | shift_labels = shift_labels.to(shift_logits.device) 104 | loss = loss_fct(shift_logits, shift_labels) 105 | 106 | if not return_dict: 107 | output = (logits,) + outputs[1:] 108 | return (loss,) + output if loss is not None else output 109 | 110 | return CausalLMOutputWithPast( 111 | loss=loss, 112 | logits=logits, 113 | past_key_values=outputs.past_key_values, 114 | hidden_states=outputs.hidden_states, 115 | attentions=outputs.attentions, 116 | ) 117 | 118 | def prepare_inputs_for_generation( 119 | self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs 120 | ): 121 | if past_key_values: 122 | input_ids = input_ids[:, -1:] 123 | 124 | # if `inputs_embeds` are passed, we only want to use them in the 1st generation step 125 | if inputs_embeds is not None and past_key_values is None: 126 | model_inputs = {"inputs_embeds": inputs_embeds} 127 | else: 128 | model_inputs = {"input_ids": input_ids} 129 | 130 | model_inputs.update( 131 | { 132 | "past_key_values": past_key_values, 133 | "use_cache": kwargs.get("use_cache"), 134 | "attention_mask": attention_mask, 135 | "images": kwargs.get("images", None), 136 | "segs": kwargs.get("segs", None), 137 | } 138 | ) 139 | return model_inputs 140 | 141 | AutoConfig.register("vcoder_it_llava", VCoderITLlavaConfig) 142 | AutoModelForCausalLM.register(VCoderITLlavaConfig, VCoderITLlavaLlamaForCausalLM) 143 | -------------------------------------------------------------------------------- /vcoder_llava/model/language_model/vcoder_llava_llama.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Haotian Liu 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | from typing import List, Optional, Tuple, Union 17 | 18 | import torch 19 | import torch.nn as nn 20 | from torch.nn import CrossEntropyLoss 21 | 22 | from transformers import AutoConfig, AutoModelForCausalLM, \ 23 | LlamaConfig, LlamaModel, LlamaForCausalLM 24 | 25 | from transformers.modeling_outputs import CausalLMOutputWithPast 26 | 27 | from ..vcoder_llava_arch import VCoderLlavaMetaModel, VCoderLlavaMetaForCausalLM 28 | 29 | 30 | class VCoderLlavaConfig(LlamaConfig): 31 | model_type = "vcoder_llava" 32 | 33 | 34 | class VCoderLlavaLlamaModel(VCoderLlavaMetaModel, LlamaModel): 35 | config_class = VCoderLlavaConfig 36 | 37 | def __init__(self, config: LlamaConfig): 38 | super(VCoderLlavaLlamaModel, self).__init__(config) 39 | 40 | 41 | class VCoderLlavaLlamaForCausalLM(LlamaForCausalLM, VCoderLlavaMetaForCausalLM): 42 | config_class = VCoderLlavaConfig 43 | 44 | def __init__(self, config): 45 | super(LlamaForCausalLM, self).__init__(config) 46 | self.model = VCoderLlavaLlamaModel(config) 47 | 48 | self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) 49 | 50 | # Initialize weights and apply final processing 51 | self.post_init() 52 | 53 | def get_model(self): 54 | return self.model 55 | 56 | def forward( 57 | self, 58 | input_ids: torch.LongTensor = None, 59 | attention_mask: Optional[torch.Tensor] = None, 60 | past_key_values: Optional[List[torch.FloatTensor]] = None, 61 | inputs_embeds: Optional[torch.FloatTensor] = None, 62 | labels: Optional[torch.LongTensor] = None, 63 | use_cache: Optional[bool] = None, 64 | output_attentions: Optional[bool] = None, 65 | output_hidden_states: Optional[bool] = None, 66 | images: Optional[torch.FloatTensor] = None, 67 | segs: Optional[torch.FloatTensor] = None, 68 | return_dict: Optional[bool] = None, 69 | ) -> Union[Tuple, CausalLMOutputWithPast]: 70 | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions 71 | output_hidden_states = ( 72 | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states 73 | ) 74 | return_dict = return_dict if return_dict is not None else self.config.use_return_dict 75 | 76 | input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images, segs) 77 | 78 | # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) 79 | outputs = self.model( 80 | input_ids=input_ids, 81 | attention_mask=attention_mask, 82 | past_key_values=past_key_values, 83 | inputs_embeds=inputs_embeds, 84 | use_cache=use_cache, 85 | output_attentions=output_attentions, 86 | output_hidden_states=output_hidden_states, 87 | return_dict=return_dict 88 | ) 89 | 90 | hidden_states = outputs[0] 91 | logits = self.lm_head(hidden_states) 92 | 93 | loss = None 94 | if labels is not None: 95 | # Shift so that tokens < n predict n 96 | shift_logits = logits[..., :-1, :].contiguous() 97 | shift_labels = labels[..., 1:].contiguous() 98 | # Flatten the tokens 99 | loss_fct = CrossEntropyLoss() 100 | shift_logits = shift_logits.view(-1, self.config.vocab_size) 101 | shift_labels = shift_labels.view(-1) 102 | # Enable model/pipeline parallelism 103 | shift_labels = shift_labels.to(shift_logits.device) 104 | loss = loss_fct(shift_logits, shift_labels) 105 | 106 | if not return_dict: 107 | output = (logits,) + outputs[1:] 108 | return (loss,) + output if loss is not None else output 109 | 110 | return CausalLMOutputWithPast( 111 | loss=loss, 112 | logits=logits, 113 | past_key_values=outputs.past_key_values, 114 | hidden_states=outputs.hidden_states, 115 | attentions=outputs.attentions, 116 | ) 117 | 118 | def prepare_inputs_for_generation( 119 | self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs 120 | ): 121 | if past_key_values: 122 | input_ids = input_ids[:, -1:] 123 | 124 | # if `inputs_embeds` are passed, we only want to use them in the 1st generation step 125 | if inputs_embeds is not None and past_key_values is None: 126 | model_inputs = {"inputs_embeds": inputs_embeds} 127 | else: 128 | model_inputs = {"input_ids": input_ids} 129 | 130 | model_inputs.update( 131 | { 132 | "past_key_values": past_key_values, 133 | "use_cache": kwargs.get("use_cache"), 134 | "attention_mask": attention_mask, 135 | "images": kwargs.get("images", None), 136 | "segs": kwargs.get("segs", None), 137 | } 138 | ) 139 | return model_inputs 140 | 141 | AutoConfig.register("vcoder_llava", VCoderLlavaConfig) 142 | AutoModelForCausalLM.register(VCoderLlavaConfig, VCoderLlavaLlamaForCausalLM) 143 | -------------------------------------------------------------------------------- /vcoder_llava/model/make_delta.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: 3 | python3 -m vcoder_llava.model.make_delta --base ~/model_weights/llama-7b --target ~/model_weights/llava-7b --delta ~/model_weights/llava-7b-delta --hub-repo-id liuhaotian/llava-7b-delta 4 | """ 5 | import argparse 6 | 7 | import torch 8 | from tqdm import tqdm 9 | from transformers import AutoTokenizer, AutoModelForCausalLM 10 | from vcoder_llava.model.utils import auto_upgrade 11 | 12 | 13 | def make_delta(base_model_path, target_model_path, delta_path, hub_repo_id): 14 | print("Loading base model") 15 | base = AutoModelForCausalLM.from_pretrained( 16 | base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 17 | 18 | print("Loading target model") 19 | auto_upgrade(target_model_path) 20 | target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) 21 | 22 | print("Calculating delta") 23 | for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"): 24 | if name not in base.state_dict(): 25 | assert name in ['model.mm_projector.weight', 'model.mm_projector.bias'], f'{name} not in base model' 26 | continue 27 | if param.data.shape == base.state_dict()[name].shape: 28 | param.data -= base.state_dict()[name] 29 | else: 30 | assert name in ['model.embed_tokens.weight', 'lm_head.weight'], f'{name} dimension mismatch: {param.data.shape} vs {base.state_dict()[name].shape}' 31 | bparam = base.state_dict()[name] 32 | param.data[:bparam.shape[0], :bparam.shape[1]] -= bparam 33 | 34 | print("Saving delta") 35 | if hub_repo_id: 36 | kwargs = {"push_to_hub": True, "repo_id": hub_repo_id} 37 | else: 38 | kwargs = {} 39 | target.save_pretrained(delta_path, **kwargs) 40 | target_tokenizer = AutoTokenizer.from_pretrained(target_model_path) 41 | target_tokenizer.save_pretrained(delta_path, **kwargs) 42 | 43 | 44 | if __name__ == "__main__": 45 | parser = argparse.ArgumentParser() 46 | parser.add_argument("--base-model-path", type=str, required=True) 47 | parser.add_argument("--target-model-path", type=str, required=True) 48 | parser.add_argument("--delta-path", type=str, required=True) 49 | parser.add_argument("--hub-repo-id", type=str, default=None) 50 | args = parser.parse_args() 51 | 52 | make_delta(args.base_model_path, args.target_model_path, args.delta_path, args.hub_repo_id) 53 | -------------------------------------------------------------------------------- /vcoder_llava/model/multimodal_adapter/builder.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import re 3 | 4 | class IdentityMap(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | 8 | def forward(self, x, *args, **kwargs): 9 | return x 10 | 11 | @property 12 | def config(self): 13 | return {"seg_mm_projector_type": 'identity'} 14 | 15 | 16 | class SimpleResBlock(nn.Module): 17 | def __init__(self, channels): 18 | super().__init__() 19 | self.pre_norm = nn.LayerNorm(channels) 20 | 21 | self.proj = nn.Sequential( 22 | nn.Linear(channels, channels), 23 | nn.GELU(), 24 | nn.Linear(channels, channels) 25 | ) 26 | def forward(self, x): 27 | x = self.pre_norm(x) 28 | return x + self.proj(x) 29 | 30 | 31 | def build_seg_projector(config, delay_load=False, **kwargs): 32 | projector_type = getattr(config, 'seg_mm_projector_type', 'linear') 33 | 34 | if projector_type == 'linear': 35 | return nn.Linear(config.seg_mm_hidden_size, config.hidden_size) 36 | 37 | mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) 38 | if mlp_gelu_match: 39 | mlp_depth = int(mlp_gelu_match.group(1)) 40 | modules = [nn.Linear(config.seg_mm_hidden_size, config.hidden_size)] 41 | for _ in range(1, mlp_depth): 42 | modules.append(nn.GELU()) 43 | modules.append(nn.Linear(config.hidden_size, config.hidden_size)) 44 | return nn.Sequential(*modules) 45 | 46 | if projector_type == 'identity': 47 | return IdentityMap() 48 | 49 | raise ValueError(f'Unknown seg projector type: {projector_type}') -------------------------------------------------------------------------------- /vcoder_llava/model/multimodal_depth_adapter/builder.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import re 3 | 4 | class IdentityMap(nn.Module): 5 | def __init__(self): 6 | super().__init__() 7 | 8 | def forward(self, x, *args, **kwargs): 9 | return x 10 | 11 | @property 12 | def config(self): 13 | return {"depth_mm_projector_type": 'identity'} 14 | 15 | 16 | 17 | class SimpleResBlock(nn.Module): 18 | def __init__(self, channels): 19 | super().__init__() 20 | self.pre_norm = nn.LayerNorm(channels) 21 | 22 | self.proj = nn.Sequential( 23 | nn.Linear(channels, channels), 24 | nn.GELU(), 25 | nn.Linear(channels, channels) 26 | ) 27 | def forward(self, x): 28 | x = self.pre_norm(x) 29 | return x + self.proj(x) 30 | 31 | 32 | def build_depth_projector(config, delay_load=False, **kwargs): 33 | projector_type = getattr(config, 'depth_mm_projector_type', 'linear') 34 | 35 | if projector_type == 'linear': 36 | return nn.Linear(config.depth_mm_hidden_size, config.hidden_size) 37 | 38 | mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) 39 | if mlp_gelu_match: 40 | mlp_depth = int(mlp_gelu_match.group(1)) 41 | modules = [nn.Linear(config.depth_mm_hidden_size, config.hidden_size)] 42 | for _ in range(1, mlp_depth): 43 | modules.append(nn.GELU()) 44 | modules.append(nn.Linear(config.hidden_size, config.hidden_size)) 45 | return nn.Sequential(*modules) 46 | 47 | if projector_type == 'identity': 48 | return IdentityMap() 49 | 50 | raise ValueError(f'Unknown depth projector type: {projector_type}') -------------------------------------------------------------------------------- /vcoder_llava/model/multimodal_encoder/builder.py: -------------------------------------------------------------------------------- 1 | import os 2 | from .clip_encoder import CLIPVisionTower 3 | 4 | 5 | def build_vision_tower(vision_tower_cfg, **kwargs): 6 | vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None)) 7 | is_absolute_path_exists = os.path.exists(vision_tower) 8 | if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"): 9 | return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs) 10 | 11 | raise ValueError(f'Unknown vision tower: {vision_tower}') 12 | -------------------------------------------------------------------------------- /vcoder_llava/model/multimodal_encoder/clip_encoder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig 5 | 6 | 7 | class CLIPVisionTower(nn.Module): 8 | def __init__(self, vision_tower, args, delay_load=False): 9 | super().__init__() 10 | 11 | self.is_loaded = False 12 | 13 | self.vision_tower_name = vision_tower 14 | self.select_layer = args.mm_vision_select_layer 15 | self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch') 16 | 17 | if not delay_load: 18 | self.load_model() 19 | else: 20 | self.cfg_only = CLIPVisionConfig.from_pretrained(self.vision_tower_name) 21 | 22 | def load_model(self): 23 | self.image_processor = CLIPImageProcessor.from_pretrained(self.vision_tower_name) 24 | self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name) 25 | self.vision_tower.requires_grad_(False) 26 | 27 | self.is_loaded = True 28 | 29 | def feature_select(self, image_forward_outs): 30 | image_features = image_forward_outs.hidden_states[self.select_layer] 31 | if self.select_feature == 'patch': 32 | image_features = image_features[:, 1:] 33 | elif self.select_feature == 'cls_patch': 34 | image_features = image_features 35 | else: 36 | raise ValueError(f'Unexpected select feature: {self.select_feature}') 37 | return image_features 38 | 39 | @torch.no_grad() 40 | def forward(self, images): 41 | if type(images) is list: 42 | image_features = [] 43 | for image in images: 44 | image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True) 45 | image_feature = self.feature_select(image_forward_out).to(image.dtype) 46 | image_features.append(image_feature) 47 | else: 48 | image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True) 49 | image_features = self.feature_select(image_forward_outs).to(images.dtype) 50 | 51 | return image_features 52 | 53 | @property 54 | def dummy_feature(self): 55 | return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype) 56 | 57 | @property 58 | def dtype(self): 59 | return self.vision_tower.dtype 60 | 61 | @property 62 | def device(self): 63 | return self.vision_tower.device 64 | 65 | @property 66 | def config(self): 67 | if self.is_loaded: 68 | return self.vision_tower.config 69 | else: 70 | return self.cfg_only 71 | 72 | @property 73 | def hidden_size(self): 74 | return self.config.hidden_size 75 | 76 | @property 77 | def num_patches(self): 78 | return (self.config.image_size // self.config.patch_size) ** 2 -------------------------------------------------------------------------------- /vcoder_llava/model/multimodal_projector/builder.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import re 4 | 5 | 6 | class IdentityMap(nn.Module): 7 | def __init__(self): 8 | super().__init__() 9 | 10 | def forward(self, x, *args, **kwargs): 11 | return x 12 | 13 | @property 14 | def config(self): 15 | return {"mm_projector_type": 'identity'} 16 | 17 | 18 | class SimpleResBlock(nn.Module): 19 | def __init__(self, channels): 20 | super().__init__() 21 | self.pre_norm = nn.LayerNorm(channels) 22 | 23 | self.proj = nn.Sequential( 24 | nn.Linear(channels, channels), 25 | nn.GELU(), 26 | nn.Linear(channels, channels) 27 | ) 28 | def forward(self, x): 29 | x = self.pre_norm(x) 30 | return x + self.proj(x) 31 | 32 | 33 | def build_vision_projector(config, delay_load=False, **kwargs): 34 | projector_type = getattr(config, 'mm_projector_type', 'linear') 35 | 36 | if projector_type == 'linear': 37 | return nn.Linear(config.mm_hidden_size, config.hidden_size) 38 | 39 | mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type) 40 | if mlp_gelu_match: 41 | mlp_depth = int(mlp_gelu_match.group(1)) 42 | modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)] 43 | for _ in range(1, mlp_depth): 44 | modules.append(nn.GELU()) 45 | modules.append(nn.Linear(config.hidden_size, config.hidden_size)) 46 | return nn.Sequential(*modules) 47 | 48 | if projector_type == 'identity': 49 | return IdentityMap() 50 | 51 | raise ValueError(f'Unknown projector type: {projector_type}') 52 | -------------------------------------------------------------------------------- /vcoder_llava/model/utils.py: -------------------------------------------------------------------------------- 1 | from transformers import AutoConfig 2 | 3 | 4 | def auto_upgrade(config): 5 | cfg = AutoConfig.from_pretrained(config) 6 | if 'llava' in config and 'llava' not in cfg.model_type: 7 | assert cfg.model_type == 'llama' 8 | print("You are using newer LLaVA code base, while the checkpoint of v0 is from older code base.") 9 | print("You must upgrade the checkpoint to the new code base (this can be done automatically).") 10 | confirm = input("Please confirm that you want to upgrade the checkpoint. [Y/N]") 11 | if confirm.lower() in ["y", "yes"]: 12 | print("Upgrading checkpoint...") 13 | assert len(cfg.architectures) == 1 14 | setattr(cfg.__class__, "model_type", "llava") 15 | cfg.architectures[0] = 'LlavaLlamaForCausalLM' 16 | cfg.save_pretrained(config) 17 | print("Checkpoint upgraded.") 18 | else: 19 | print("Checkpoint upgrade aborted.") 20 | exit(1) 21 | -------------------------------------------------------------------------------- /vcoder_llava/serve/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/__init__.py -------------------------------------------------------------------------------- /vcoder_llava/serve/cli.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import torch 3 | from vcoder_llava.vcoder_conversation import conv_templates, SeparatorStyle 4 | from vcoder_llava.model.builder import load_pretrained_model 5 | from vcoder_llava.utils import disable_torch_init 6 | from vcoder_llava.mm_utils import process_images, tokenizer_image_token, tokenizer_depth_seg_token, get_model_name_from_path, KeywordsStoppingCriteria 7 | from vcoder_llava.constants import ( 8 | IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, 9 | SEG_TOKEN_INDEX, DEFAULT_SEG_TOKEN, 10 | DEPTH_TOKEN_INDEX, DEFAULT_DEPTH_TOKEN 11 | ) 12 | 13 | from PIL import Image 14 | 15 | import requests 16 | from PIL import Image 17 | from io import BytesIO 18 | from transformers import TextStreamer 19 | 20 | 21 | def load_image(image_file): 22 | if image_file.startswith('http://') or image_file.startswith('https://'): 23 | response = requests.get(image_file) 24 | image = Image.open(BytesIO(response.content)).convert('RGB') 25 | else: 26 | image = Image.open(image_file).convert('RGB') 27 | return image 28 | 29 | 30 | def main(args): 31 | # Model 32 | disable_torch_init() 33 | 34 | model_name = get_model_name_from_path(args.model_path) 35 | tokenizer, model, image_processor, seg_image_processor, depth_image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device) 36 | 37 | conv_mode = "llava_v1" 38 | 39 | if args.conv_mode is not None and conv_mode != args.conv_mode: 40 | print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode)) 41 | else: 42 | args.conv_mode = conv_mode 43 | 44 | conv = conv_templates[args.conv_mode].copy() 45 | roles = conv.roles 46 | 47 | image = load_image(args.image_file) 48 | # Similar operation in model_worker.py 49 | image_tensor = process_images([image], image_processor, args) 50 | if type(image_tensor) is list: 51 | image_tensor = [image.to(model.device, dtype=torch.float16) for image in image_tensor] 52 | else: 53 | image_tensor = image_tensor.to(model.device, dtype=torch.float16) 54 | 55 | # Segmentation 56 | seg_image_tensor = None 57 | if args.seg_file is not None: 58 | seg_image = load_image(args.seg_file) 59 | seg_image_tensor = process_images([seg_image], seg_image_processor, args) 60 | if type(seg_image_tensor) is list: 61 | seg_image_tensor = [image.to(model.device, dtype=torch.float16) for image in seg_image_tensor] 62 | else: 63 | seg_image_tensor = seg_image_tensor.to(model.device, dtype=torch.float16) 64 | else: 65 | seg_image = None 66 | 67 | # Depth 68 | depth_image_tensor = None 69 | if args.depth_file is not None: 70 | depth_image = load_image(args.depth_file) 71 | depth_image_tensor = process_images([depth_image], depth_image_processor, args) 72 | if type(depth_image_tensor) is list: 73 | depth_image_tensor = [image.to(model.device, dtype=torch.float16) for image in depth_image_tensor] 74 | else: 75 | depth_image_tensor = depth_image_tensor.to(model.device, dtype=torch.float16) 76 | else: 77 | depth_image = None 78 | 79 | 80 | while True: 81 | try: 82 | inp = input(f"{roles[0]}: ") 83 | except EOFError: 84 | inp = "" 85 | if not inp: 86 | print("exit...") 87 | break 88 | 89 | print(f"{roles[1]}: ", end="") 90 | 91 | if image is not None: 92 | # first message 93 | inp = DEFAULT_IMAGE_TOKEN + '\n' + inp 94 | image = None 95 | 96 | if seg_image is not None: 97 | # first message 98 | inp = DEFAULT_SEG_TOKEN + '\n' + inp 99 | seg_image = None 100 | 101 | if depth_image is not None: 102 | # first message 103 | inp = DEFAULT_DEPTH_TOKEN + '\n' + inp 104 | depth_image = None 105 | conv.append_message(conv.roles[0], inp) 106 | else: 107 | # later messages 108 | conv.append_message(conv.roles[0], inp) 109 | conv.append_message(conv.roles[1], None) 110 | prompt = conv.get_prompt() 111 | 112 | if "" not in prompt: 113 | input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() 114 | else: 115 | input_ids = tokenizer_depth_seg_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, DEPTH_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() 116 | stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 117 | keywords = [stop_str] 118 | stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) 119 | streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) 120 | 121 | with torch.inference_mode(): 122 | output_ids = model.generate( 123 | input_ids, 124 | images=image_tensor, 125 | segs=seg_image_tensor, 126 | depths=depth_image_tensor, 127 | do_sample=True, 128 | temperature=args.temperature, 129 | max_new_tokens=args.max_new_tokens, 130 | streamer=streamer, 131 | use_cache=True, 132 | stopping_criteria=[stopping_criteria]) 133 | 134 | 135 | outputs = tokenizer.decode(output_ids[0, input_ids.shape[1]:]).strip() 136 | conv.messages[-1][-1] = outputs 137 | 138 | if args.debug: 139 | print("\n", {"prompt": prompt, "outputs": outputs}, "\n") 140 | 141 | 142 | if __name__ == "__main__": 143 | parser = argparse.ArgumentParser() 144 | parser.add_argument("--model-path", type=str, default="shi-labs/vcoder_ds_llava-v1.5-13b") 145 | parser.add_argument("--model-base", type=str, default=None) 146 | parser.add_argument("--image-file", type=str, required=True) 147 | parser.add_argument("--seg-file", type=str, default=None) 148 | parser.add_argument("--depth-file", type=str, default=None) 149 | parser.add_argument("--device", type=str, default="cuda") 150 | parser.add_argument("--conv-mode", type=str, default=None) 151 | parser.add_argument("--temperature", type=float, default=0.2) 152 | parser.add_argument("--max-new-tokens", type=int, default=512) 153 | parser.add_argument("--load-8bit", action="store_true") 154 | parser.add_argument("--load-4bit", action="store_true") 155 | parser.add_argument("--debug", action="store_true") 156 | parser.add_argument("--image-aspect-ratio", type=str, default='pad') 157 | args = parser.parse_args() 158 | main(args) -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/corgi.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/corgi.jpg -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/corgi_pan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/corgi_pan.png -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/depth.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/depth.jpeg -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/depth_depth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/depth_depth.png -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/depth_pan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/depth_pan.png -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/friends.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/friends.jpg -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/friends_pan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/friends_pan.png -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/people.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/people.jpg -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/people_pan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/people_pan.png -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/suits.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/suits.jpg -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/suits_depth.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/suits_depth.jpeg -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/suits_ins.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/suits_ins.png -------------------------------------------------------------------------------- /vcoder_llava/serve/examples/suits_pan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SHI-Labs/VCoder/4e85acef896cee7843257d5d844ec945407efb13/vcoder_llava/serve/examples/suits_pan.png -------------------------------------------------------------------------------- /vcoder_llava/train/llama_flash_attn_monkey_patch.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Tuple 2 | import warnings 3 | 4 | import torch 5 | 6 | import transformers 7 | from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv 8 | 9 | try: 10 | from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func 11 | except ImportError: 12 | from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func 13 | from flash_attn.bert_padding import unpad_input, pad_input 14 | 15 | 16 | def forward( 17 | self, 18 | hidden_states: torch.Tensor, 19 | attention_mask: Optional[torch.Tensor] = None, 20 | position_ids: Optional[torch.Tensor] = None, 21 | past_key_value: Optional[Tuple[torch.Tensor]] = None, 22 | output_attentions: bool = False, 23 | use_cache: bool = False, 24 | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: 25 | if output_attentions: 26 | warnings.warn( 27 | "Output attentions is not supported for patched `LlamaAttention`, returning `None` instead." 28 | ) 29 | 30 | bsz, q_len, _ = hidden_states.size() 31 | 32 | query_states = ( 33 | self.q_proj(hidden_states) 34 | .view(bsz, q_len, self.num_heads, self.head_dim) 35 | .transpose(1, 2) 36 | ) 37 | key_states = ( 38 | self.k_proj(hidden_states) 39 | .view(bsz, q_len, self.num_key_value_heads, self.head_dim) 40 | .transpose(1, 2) 41 | ) 42 | value_states = ( 43 | self.v_proj(hidden_states) 44 | .view(bsz, q_len, self.num_key_value_heads, self.head_dim) 45 | .transpose(1, 2) 46 | ) # shape: (b, num_heads, s, head_dim) 47 | 48 | kv_seq_len = key_states.shape[-2] 49 | if past_key_value is not None: 50 | kv_seq_len += past_key_value[0].shape[-2] 51 | 52 | cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) 53 | query_states, key_states = apply_rotary_pos_emb( 54 | query_states, key_states, cos, sin, position_ids 55 | ) 56 | 57 | if past_key_value is not None: 58 | # reuse k, v 59 | key_states = torch.cat([past_key_value[0], key_states], dim=2) 60 | value_states = torch.cat([past_key_value[1], value_states], dim=2) 61 | 62 | past_key_value = (key_states, value_states) if use_cache else None 63 | 64 | # repeat k/v heads if n_kv_heads < n_heads 65 | key_states = repeat_kv(key_states, self.num_key_value_groups) 66 | value_states = repeat_kv(value_states, self.num_key_value_groups) 67 | 68 | # Transform the data into the format required by flash attention 69 | qkv = torch.stack([query_states, key_states, value_states], dim=2) 70 | qkv = qkv.transpose(1, 3) # shape: [b, s, 3, num_heads, head_dim] 71 | key_padding_mask = attention_mask 72 | 73 | if key_padding_mask is None: 74 | qkv = qkv.reshape(-1, 3, self.num_heads, self.head_dim) 75 | cu_q_lens = torch.arange( 76 | 0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device 77 | ) 78 | max_s = q_len 79 | output = flash_attn_unpadded_qkvpacked_func( 80 | qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True 81 | ) 82 | output = output.view(bsz, q_len, -1) 83 | else: 84 | qkv = qkv.reshape(bsz, q_len, -1) 85 | qkv, indices, cu_q_lens, max_s = unpad_input(qkv, key_padding_mask) 86 | qkv = qkv.view(-1, 3, self.num_heads, self.head_dim) 87 | output_unpad = flash_attn_unpadded_qkvpacked_func( 88 | qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True 89 | ) 90 | output_unpad = output_unpad.reshape(-1, self.num_heads * self.head_dim) 91 | output = pad_input(output_unpad, indices, bsz, q_len) 92 | 93 | return self.o_proj(output), None, past_key_value 94 | 95 | 96 | # Disable the transformation of the attention mask in LlamaModel as the flash attention 97 | # requires the attention mask to be the same as the key_padding_mask 98 | def _prepare_decoder_attention_mask( 99 | self, attention_mask, input_shape, inputs_embeds, past_key_values_length 100 | ): 101 | # [bsz, seq_len] 102 | return attention_mask 103 | 104 | 105 | def replace_llama_attn_with_flash_attn(): 106 | cuda_major, cuda_minor = torch.cuda.get_device_capability() 107 | if cuda_major < 8: 108 | warnings.warn( 109 | "Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward." 110 | "ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593" 111 | ) 112 | transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = ( 113 | _prepare_decoder_attention_mask 114 | ) 115 | transformers.models.llama.modeling_llama.LlamaAttention.forward = forward 116 | -------------------------------------------------------------------------------- /vcoder_llava/train/train_mem.py: -------------------------------------------------------------------------------- 1 | # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright: 2 | # Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright: 3 | # Make it more memory efficient by monkey patching the LLaMA model with FlashAttn. 4 | 5 | # Need to call this before importing transformers. 6 | from vcoder_llava.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn 7 | 8 | replace_llama_attn_with_flash_attn() 9 | 10 | from vcoder_llava.train.train import train 11 | 12 | if __name__ == "__main__": 13 | train() 14 | -------------------------------------------------------------------------------- /vcoder_llava/train/vcoder_ds_llava_trainer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import torch.nn as nn 4 | from torch.utils.data import Sampler 5 | 6 | from transformers import Trainer 7 | from transformers.trainer import ( 8 | is_sagemaker_mp_enabled, 9 | get_parameter_names, 10 | has_length, 11 | ALL_LAYERNORM_LAYERS, 12 | ShardedDDPOption, 13 | logger, 14 | ) 15 | from typing import List, Optional 16 | 17 | 18 | def maybe_zero_3(param, ignore_status=False, name=None): 19 | from deepspeed import zero 20 | from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus 21 | if hasattr(param, "ds_id"): 22 | if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: 23 | if not ignore_status: 24 | print(name, 'no ignore status') 25 | with zero.GatheredParameters([param]): 26 | param = param.data.detach().cpu().clone() 27 | else: 28 | param = param.detach().cpu().clone() 29 | return param 30 | 31 | 32 | def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match): 33 | to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)} 34 | to_return = {k: maybe_zero_3(v, ignore_status=True, name=k).cpu() for k, v in to_return.items()} 35 | return to_return 36 | 37 | 38 | def split_to_even_chunks(indices, lengths, num_chunks): 39 | """ 40 | Split a list of indices into `chunks` chunks of roughly equal lengths. 41 | """ 42 | 43 | if len(indices) % num_chunks != 0: 44 | return [indices[i::num_chunks] for i in range(num_chunks)] 45 | 46 | num_indices_per_chunk = len(indices) // num_chunks 47 | 48 | chunks = [[] for _ in range(num_chunks)] 49 | chunks_lengths = [0 for _ in range(num_chunks)] 50 | for index in indices: 51 | shortest_chunk = chunks_lengths.index(min(chunks_lengths)) 52 | chunks[shortest_chunk].append(index) 53 | chunks_lengths[shortest_chunk] += lengths[index] 54 | if len(chunks[shortest_chunk]) == num_indices_per_chunk: 55 | chunks_lengths[shortest_chunk] = float("inf") 56 | 57 | return chunks 58 | 59 | 60 | def get_modality_length_grouped_indices(lengths, batch_size, world_size, generator=None): 61 | # We need to use torch for the random part as a distributed sampler will set the random seed for torch. 62 | assert all(l != 0 for l in lengths), "Should not have zero length." 63 | mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) if l > 0]) 64 | 65 | assert len(mm_indices) > 0, "Should have at least one multimodal sample." 66 | 67 | mm_shuffle = [mm_indices[i] for i in get_length_grouped_indices(mm_lengths, batch_size, world_size, generator=None)] 68 | megabatch_size = world_size * batch_size 69 | mm_megabatches = [mm_shuffle[i : i + megabatch_size] for i in range(0, len(mm_shuffle), megabatch_size)] 70 | 71 | last_mm = mm_megabatches[-1] 72 | additional_batch = last_mm 73 | megabatches = mm_megabatches[:-1] 74 | megabatch_indices = torch.randperm(len(megabatches), generator=generator) 75 | megabatches = [megabatches[i] for i in megabatch_indices] 76 | 77 | if len(additional_batch) > 0: 78 | megabatches.append(sorted(additional_batch)) 79 | 80 | return [i for megabatch in megabatches for i in megabatch] 81 | 82 | 83 | def get_length_grouped_indices(lengths, batch_size, world_size, generator=None, merge=True): 84 | # We need to use torch for the random part as a distributed sampler will set the random seed for torch. 85 | indices = torch.randperm(len(lengths), generator=generator) 86 | megabatch_size = world_size * batch_size 87 | megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)] 88 | megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches] 89 | megabatches = [split_to_even_chunks(megabatch, lengths, world_size) for megabatch in megabatches] 90 | 91 | return [i for megabatch in megabatches for batch in megabatch for i in batch] 92 | 93 | 94 | class LengthGroupedSampler(Sampler): 95 | r""" 96 | Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while 97 | keeping a bit of randomness. 98 | """ 99 | 100 | def __init__( 101 | self, 102 | batch_size: int, 103 | world_size: int, 104 | lengths: Optional[List[int]] = None, 105 | generator=None, 106 | group_by_modality: bool = False, 107 | ): 108 | if lengths is None: 109 | raise ValueError("Lengths must be provided.") 110 | 111 | self.batch_size = batch_size 112 | self.world_size = world_size 113 | self.lengths = lengths 114 | self.generator = generator 115 | self.group_by_modality = group_by_modality 116 | 117 | def __len__(self): 118 | return len(self.lengths) 119 | 120 | def __iter__(self): 121 | if self.group_by_modality: 122 | indices = get_modality_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator) 123 | else: 124 | indices = get_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator) 125 | return iter(indices) 126 | 127 | 128 | class VCoderDSLLaVATrainer(Trainer): 129 | 130 | def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: 131 | if self.train_dataset is None or not has_length(self.train_dataset): 132 | return None 133 | 134 | if self.args.group_by_modality_length: 135 | lengths = self.train_dataset.modality_lengths 136 | return LengthGroupedSampler( 137 | # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps 138 | self.args.train_batch_size, 139 | world_size=self.args.world_size, 140 | lengths=lengths, 141 | group_by_modality=True, 142 | ) 143 | else: 144 | return super()._get_train_sampler() 145 | 146 | def create_optimizer(self): 147 | """ 148 | Setup the optimizer. 149 | 150 | We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the 151 | Trainer's init through `optimizers`, or subclass and override this method in a subclass. 152 | """ 153 | if is_sagemaker_mp_enabled(): 154 | return super().create_optimizer() 155 | if self.sharded_ddp == ShardedDDPOption.SIMPLE: 156 | return super().create_optimizer() 157 | 158 | opt_model = self.model 159 | 160 | if self.optimizer is None: 161 | decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS) 162 | decay_parameters = [name for name in decay_parameters if "bias" not in name] 163 | optimizer_grouped_parameters = [ 164 | { 165 | "params": [ 166 | p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) 167 | ], 168 | "weight_decay": self.args.weight_decay, 169 | }, 170 | { 171 | "params": [ 172 | p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) 173 | ], 174 | "weight_decay": 0.0, 175 | }, 176 | ] 177 | 178 | optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args) 179 | 180 | if self.sharded_ddp == ShardedDDPOption.SIMPLE: 181 | self.optimizer = OSS( 182 | params=optimizer_grouped_parameters, 183 | optim=optimizer_cls, 184 | **optimizer_kwargs, 185 | ) 186 | else: 187 | self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) 188 | if optimizer_cls.__name__ == "Adam8bit": 189 | import bitsandbytes 190 | 191 | manager = bitsandbytes.optim.GlobalOptimManager.get_instance() 192 | 193 | skipped = 0 194 | for module in opt_model.modules(): 195 | if isinstance(module, nn.Embedding): 196 | skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) 197 | logger.info(f"skipped {module}: {skipped/2**20}M params") 198 | manager.register_module_override(module, "weight", {"optim_bits": 32}) 199 | logger.debug(f"bitsandbytes: will optimize {module} in fp32") 200 | logger.info(f"skipped: {skipped/2**20}M params") 201 | 202 | return self.optimizer 203 | 204 | def _save_checkpoint(self, model, trial, metrics=None): 205 | super(VCoderDSLLaVATrainer, self)._save_checkpoint(model, trial, metrics) 206 | 207 | def _save(self, output_dir: Optional[str] = None, state_dict=None): 208 | super(VCoderDSLLaVATrainer, self)._save(output_dir, state_dict) 209 | -------------------------------------------------------------------------------- /vcoder_llava/train/vcoder_ds_train_mem.py: -------------------------------------------------------------------------------- 1 | # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright: 2 | # Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright: 3 | # Make it more memory efficient by monkey patching the LLaMA model with FlashAttn. 4 | 5 | # Need to call this before importing transformers. 6 | from vcoder_llava.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn 7 | 8 | replace_llama_attn_with_flash_attn() 9 | 10 | from vcoder_llava.train.vcoder_ds_train import vcoder_ds_train 11 | import warnings 12 | warnings.filterwarnings("ignore") 13 | 14 | if __name__ == "__main__": 15 | vcoder_ds_train() 16 | -------------------------------------------------------------------------------- /vcoder_llava/train/vcoder_it_mem.py: -------------------------------------------------------------------------------- 1 | # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright: 2 | # Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright: 3 | # Make it more memory efficient by monkey patching the LLaMA model with FlashAttn. 4 | 5 | # Need to call this before importing transformers. 6 | from vcoder_llava.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn 7 | 8 | replace_llama_attn_with_flash_attn() 9 | 10 | from vcoder_llava.train.vcoder_it import vcoder_it 11 | import warnings 12 | warnings.filterwarnings("ignore") 13 | 14 | if __name__ == "__main__": 15 | vcoder_it() 16 | -------------------------------------------------------------------------------- /vcoder_llava/train/vcoder_llava_trainer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import torch.nn as nn 4 | from torch.utils.data import Sampler 5 | 6 | from transformers import Trainer 7 | from transformers.trainer import ( 8 | is_sagemaker_mp_enabled, 9 | get_parameter_names, 10 | has_length, 11 | ALL_LAYERNORM_LAYERS, 12 | ShardedDDPOption, 13 | logger, 14 | ) 15 | from typing import List, Optional 16 | 17 | 18 | def maybe_zero_3(param, ignore_status=False, name=None): 19 | from deepspeed import zero 20 | from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus 21 | if hasattr(param, "ds_id"): 22 | if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: 23 | if not ignore_status: 24 | print(name, 'no ignore status') 25 | with zero.GatheredParameters([param]): 26 | param = param.data.detach().cpu().clone() 27 | else: 28 | param = param.detach().cpu().clone() 29 | return param 30 | 31 | 32 | def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match): 33 | to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)} 34 | to_return = {k: maybe_zero_3(v, ignore_status=True, name=k).cpu() for k, v in to_return.items()} 35 | return to_return 36 | 37 | 38 | def split_to_even_chunks(indices, lengths, num_chunks): 39 | """ 40 | Split a list of indices into `chunks` chunks of roughly equal lengths. 41 | """ 42 | 43 | if len(indices) % num_chunks != 0: 44 | return [indices[i::num_chunks] for i in range(num_chunks)] 45 | 46 | num_indices_per_chunk = len(indices) // num_chunks 47 | 48 | chunks = [[] for _ in range(num_chunks)] 49 | chunks_lengths = [0 for _ in range(num_chunks)] 50 | for index in indices: 51 | shortest_chunk = chunks_lengths.index(min(chunks_lengths)) 52 | chunks[shortest_chunk].append(index) 53 | chunks_lengths[shortest_chunk] += lengths[index] 54 | if len(chunks[shortest_chunk]) == num_indices_per_chunk: 55 | chunks_lengths[shortest_chunk] = float("inf") 56 | 57 | return chunks 58 | 59 | 60 | def get_modality_length_grouped_indices(lengths, batch_size, world_size, generator=None): 61 | # We need to use torch for the random part as a distributed sampler will set the random seed for torch. 62 | assert all(l != 0 for l in lengths), "Should not have zero length." 63 | mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) if l > 0]) 64 | 65 | assert len(mm_indices) > 0, "Should have at least one multimodal sample." 66 | 67 | mm_shuffle = [mm_indices[i] for i in get_length_grouped_indices(mm_lengths, batch_size, world_size, generator=None)] 68 | megabatch_size = world_size * batch_size 69 | mm_megabatches = [mm_shuffle[i : i + megabatch_size] for i in range(0, len(mm_shuffle), megabatch_size)] 70 | 71 | last_mm = mm_megabatches[-1] 72 | additional_batch = last_mm 73 | megabatches = mm_megabatches[:-1] 74 | megabatch_indices = torch.randperm(len(megabatches), generator=generator) 75 | megabatches = [megabatches[i] for i in megabatch_indices] 76 | 77 | # if len(additional_batch) >= megabatch_size: 78 | # megabatches = [additional_batch[:megabatch_size]] + megabatches 79 | # additional_batch = additional_batch[megabatch_size:] 80 | 81 | if len(additional_batch) > 0: 82 | megabatches.append(sorted(additional_batch)) 83 | 84 | return [i for megabatch in megabatches for i in megabatch] 85 | 86 | 87 | def get_length_grouped_indices(lengths, batch_size, world_size, generator=None, merge=True): 88 | # We need to use torch for the random part as a distributed sampler will set the random seed for torch. 89 | indices = torch.randperm(len(lengths), generator=generator) 90 | megabatch_size = world_size * batch_size 91 | megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)] 92 | megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches] 93 | megabatches = [split_to_even_chunks(megabatch, lengths, world_size) for megabatch in megabatches] 94 | 95 | return [i for megabatch in megabatches for batch in megabatch for i in batch] 96 | 97 | 98 | class LengthGroupedSampler(Sampler): 99 | r""" 100 | Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while 101 | keeping a bit of randomness. 102 | """ 103 | 104 | def __init__( 105 | self, 106 | batch_size: int, 107 | world_size: int, 108 | lengths: Optional[List[int]] = None, 109 | generator=None, 110 | group_by_modality: bool = False, 111 | ): 112 | if lengths is None: 113 | raise ValueError("Lengths must be provided.") 114 | 115 | self.batch_size = batch_size 116 | self.world_size = world_size 117 | self.lengths = lengths 118 | self.generator = generator 119 | self.group_by_modality = group_by_modality 120 | 121 | def __len__(self): 122 | return len(self.lengths) 123 | 124 | def __iter__(self): 125 | if self.group_by_modality: 126 | indices = get_modality_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator) 127 | else: 128 | indices = get_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator) 129 | return iter(indices) 130 | 131 | 132 | class VCoderLLaVATrainer(Trainer): 133 | 134 | def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: 135 | if self.train_dataset is None or not has_length(self.train_dataset): 136 | return None 137 | 138 | if self.args.group_by_modality_length: 139 | lengths = self.train_dataset.modality_lengths 140 | return LengthGroupedSampler( 141 | # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps 142 | self.args.train_batch_size, 143 | world_size=self.args.world_size, 144 | lengths=lengths, 145 | group_by_modality=True, 146 | ) 147 | else: 148 | return super()._get_train_sampler() 149 | 150 | def create_optimizer(self): 151 | """ 152 | Setup the optimizer. 153 | 154 | We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the 155 | Trainer's init through `optimizers`, or subclass and override this method in a subclass. 156 | """ 157 | if is_sagemaker_mp_enabled(): 158 | return super().create_optimizer() 159 | if self.sharded_ddp == ShardedDDPOption.SIMPLE: 160 | return super().create_optimizer() 161 | 162 | opt_model = self.model 163 | 164 | if self.optimizer is None: 165 | decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS) 166 | decay_parameters = [name for name in decay_parameters if "bias" not in name] 167 | optimizer_grouped_parameters = [ 168 | { 169 | "params": [ 170 | p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) 171 | ], 172 | "weight_decay": self.args.weight_decay, 173 | }, 174 | { 175 | "params": [ 176 | p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) 177 | ], 178 | "weight_decay": 0.0, 179 | }, 180 | ] 181 | 182 | optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args) 183 | 184 | if self.sharded_ddp == ShardedDDPOption.SIMPLE: 185 | self.optimizer = OSS( 186 | params=optimizer_grouped_parameters, 187 | optim=optimizer_cls, 188 | **optimizer_kwargs, 189 | ) 190 | else: 191 | self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) 192 | if optimizer_cls.__name__ == "Adam8bit": 193 | import bitsandbytes 194 | 195 | manager = bitsandbytes.optim.GlobalOptimManager.get_instance() 196 | 197 | skipped = 0 198 | for module in opt_model.modules(): 199 | if isinstance(module, nn.Embedding): 200 | skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) 201 | logger.info(f"skipped {module}: {skipped/2**20}M params") 202 | manager.register_module_override(module, "weight", {"optim_bits": 32}) 203 | logger.debug(f"bitsandbytes: will optimize {module} in fp32") 204 | logger.info(f"skipped: {skipped/2**20}M params") 205 | 206 | return self.optimizer 207 | 208 | def _save_checkpoint(self, model, trial, metrics=None): 209 | super(VCoderLLaVATrainer, self)._save_checkpoint(model, trial, metrics) 210 | 211 | def _save(self, output_dir: Optional[str] = None, state_dict=None): 212 | super(VCoderLLaVATrainer, self)._save(output_dir, state_dict) 213 | -------------------------------------------------------------------------------- /vcoder_llava/train/vcoder_train_mem.py: -------------------------------------------------------------------------------- 1 | # Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright: 2 | # Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright: 3 | # Make it more memory efficient by monkey patching the LLaMA model with FlashAttn. 4 | 5 | # Need to call this before importing transformers. 6 | from vcoder_llava.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn 7 | 8 | replace_llama_attn_with_flash_attn() 9 | 10 | from vcoder_llava.train.vcoder_train import vcoder_train 11 | import warnings 12 | warnings.filterwarnings("ignore") 13 | 14 | if __name__ == "__main__": 15 | vcoder_train() 16 | -------------------------------------------------------------------------------- /vcoder_llava/utils.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | import logging.handlers 4 | import os 5 | import sys 6 | 7 | import requests 8 | 9 | from vcoder_llava.constants import LOGDIR 10 | 11 | server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" 12 | moderation_msg = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE TRY AGAIN." 13 | 14 | handler = None 15 | 16 | 17 | def build_logger(logger_name, logger_filename): 18 | global handler 19 | 20 | formatter = logging.Formatter( 21 | fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", 22 | datefmt="%Y-%m-%d %H:%M:%S", 23 | ) 24 | 25 | # Set the format of root handlers 26 | if not logging.getLogger().handlers: 27 | logging.basicConfig(level=logging.INFO) 28 | logging.getLogger().handlers[0].setFormatter(formatter) 29 | 30 | # Redirect stdout and stderr to loggers 31 | stdout_logger = logging.getLogger("stdout") 32 | stdout_logger.setLevel(logging.INFO) 33 | sl = StreamToLogger(stdout_logger, logging.INFO) 34 | sys.stdout = sl 35 | 36 | stderr_logger = logging.getLogger("stderr") 37 | stderr_logger.setLevel(logging.ERROR) 38 | sl = StreamToLogger(stderr_logger, logging.ERROR) 39 | sys.stderr = sl 40 | 41 | # Get logger 42 | logger = logging.getLogger(logger_name) 43 | logger.setLevel(logging.INFO) 44 | 45 | # Add a file handler for all loggers 46 | if handler is None: 47 | os.makedirs(LOGDIR, exist_ok=True) 48 | filename = os.path.join(LOGDIR, logger_filename) 49 | handler = logging.handlers.TimedRotatingFileHandler( 50 | filename, when='D', utc=True) 51 | handler.setFormatter(formatter) 52 | 53 | for name, item in logging.root.manager.loggerDict.items(): 54 | if isinstance(item, logging.Logger): 55 | item.addHandler(handler) 56 | 57 | return logger 58 | 59 | 60 | class StreamToLogger(object): 61 | """ 62 | Fake file-like stream object that redirects writes to a logger instance. 63 | """ 64 | def __init__(self, logger, log_level=logging.INFO): 65 | self.terminal = sys.stdout 66 | self.logger = logger 67 | self.log_level = log_level 68 | self.linebuf = '' 69 | 70 | def __getattr__(self, attr): 71 | return getattr(self.terminal, attr) 72 | 73 | def write(self, buf): 74 | temp_linebuf = self.linebuf + buf 75 | self.linebuf = '' 76 | for line in temp_linebuf.splitlines(True): 77 | # From the io.TextIOWrapper docs: 78 | # On output, if newline is None, any '\n' characters written 79 | # are translated to the system default line separator. 80 | # By default sys.stdout.write() expects '\n' newlines and then 81 | # translates them so this is still cross platform. 82 | if line[-1] == '\n': 83 | self.logger.log(self.log_level, line.rstrip()) 84 | else: 85 | self.linebuf += line 86 | 87 | def flush(self): 88 | if self.linebuf != '': 89 | self.logger.log(self.log_level, self.linebuf.rstrip()) 90 | self.linebuf = '' 91 | 92 | 93 | def disable_torch_init(): 94 | """ 95 | Disable the redundant torch default initialization to accelerate model creation. 96 | """ 97 | import torch 98 | setattr(torch.nn.Linear, "reset_parameters", lambda self: None) 99 | setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None) 100 | 101 | 102 | def violates_moderation(text): 103 | """ 104 | Check whether the text violates OpenAI moderation API. 105 | """ 106 | url = "https://api.openai.com/v1/moderations" 107 | headers = {"Content-Type": "application/json", 108 | "Authorization": "Bearer " + os.environ["OPENAI_API_KEY"]} 109 | text = text.replace("\n", "") 110 | data = "{" + '"input": ' + f'"{text}"' + "}" 111 | data = data.encode("utf-8") 112 | try: 113 | ret = requests.post(url, headers=headers, data=data, timeout=5) 114 | flagged = ret.json()["results"][0]["flagged"] 115 | except requests.exceptions.RequestException as e: 116 | flagged = False 117 | except KeyError as e: 118 | flagged = False 119 | 120 | return flagged 121 | 122 | 123 | def pretty_print_semaphore(semaphore): 124 | if semaphore is None: 125 | return "None" 126 | return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})" 127 | --------------------------------------------------------------------------------