├── README.md ├── assets └── pipeline.png ├── inference.py ├── llava.egg-info ├── PKG-INFO ├── SOURCES.txt ├── dependency_links.txt ├── requires.txt └── top_level.txt ├── llava ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-310.pyc │ ├── __init__.cpython-311.pyc │ ├── constants.cpython-310.pyc │ ├── constants.cpython-311.pyc │ ├── conversation.cpython-310.pyc │ ├── conversation.cpython-311.pyc │ ├── mm_utils.cpython-310.pyc │ ├── mm_utils.cpython-311.pyc │ ├── oryx_mm_utils.cpython-310.pyc │ ├── oryx_mm_utils.cpython-311.pyc │ ├── utils.cpython-310.pyc │ └── utils.cpython-311.pyc ├── constants.py ├── conversation.py ├── mm_utils.py ├── model │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-310.pyc │ │ ├── __init__.cpython-311.pyc │ │ ├── builder.cpython-310.pyc │ │ ├── builder.cpython-311.pyc │ │ ├── compressor.cpython-310.pyc │ │ ├── compressor.cpython-311.pyc │ │ ├── llava_arch.cpython-310.pyc │ │ ├── llava_arch.cpython-311.pyc │ │ ├── llava_arch_zip.cpython-310.pyc │ │ └── llava_arch_zip.cpython-311.pyc │ ├── apply_delta.py │ ├── builder.py │ ├── compressor.py │ ├── consolidate.py │ ├── language_model │ │ ├── __pycache__ │ │ │ ├── llava_llama.cpython-310.pyc │ │ │ ├── llava_llama.cpython-311.pyc │ │ │ ├── llava_mistral.cpython-310.pyc │ │ │ ├── llava_mistral.cpython-311.pyc │ │ │ ├── llava_mixtral.cpython-310.pyc │ │ │ ├── llava_mixtral.cpython-311.pyc │ │ │ ├── llava_qwen.cpython-310.pyc │ │ │ ├── llava_qwen.cpython-311.pyc │ │ │ ├── llava_qwen_zip.cpython-310.pyc │ │ │ └── llava_qwen_zip.cpython-311.pyc │ │ ├── llava_gemma.py │ │ ├── llava_llama.py │ │ ├── llava_mistral.py │ │ ├── llava_mixtral.py │ │ ├── llava_mpt.py │ │ ├── llava_qwen.py │ │ ├── llava_qwen_moe.py │ │ ├── llava_qwen_zip.py │ │ └── modeling_llama.py │ ├── llava_arch.py │ ├── llava_arch_zip.py │ ├── multimodal_encoder │ │ ├── __pycache__ │ │ │ ├── builder.cpython-310.pyc │ │ │ ├── builder.cpython-311.pyc │ │ │ ├── clip_encoder.cpython-310.pyc │ │ │ ├── clip_encoder.cpython-311.pyc │ │ │ ├── hf_vision.cpython-310.pyc │ │ │ ├── hf_vision.cpython-311.pyc │ │ │ ├── imagebind.cpython-310.pyc │ │ │ ├── imagebind.cpython-311.pyc │ │ │ ├── open_clip_encoder.cpython-310.pyc │ │ │ ├── open_clip_encoder.cpython-311.pyc │ │ │ ├── siglip_encoder.cpython-310.pyc │ │ │ ├── siglip_encoder.cpython-311.pyc │ │ │ ├── siglip_encoder_zip.cpython-310.pyc │ │ │ └── siglip_encoder_zip.cpython-311.pyc │ │ ├── builder.py │ │ ├── clip_encoder.py │ │ ├── dev_eva_clip │ │ │ ├── eva_clip │ │ │ │ ├── __init__.py │ │ │ │ ├── bpe_simple_vocab_16e6.txt.gz │ │ │ │ ├── constants.py │ │ │ │ ├── eva_vit_model.py │ │ │ │ ├── factory.py │ │ │ │ ├── hf_configs.py │ │ │ │ ├── hf_model.py │ │ │ │ ├── loss.py │ │ │ │ ├── model.py │ │ │ │ ├── model_configs │ │ │ │ │ ├── EVA-CLIP-18B.json │ │ │ │ │ ├── EVA-CLIP-8B-plus.json │ │ │ │ │ ├── EVA-CLIP-8B.json │ │ │ │ │ ├── EVA01-CLIP-B-16.json │ │ │ │ │ ├── EVA01-CLIP-g-14-plus.json │ │ │ │ │ ├── EVA01-CLIP-g-14.json │ │ │ │ │ ├── EVA02-CLIP-B-16.json │ │ │ │ │ ├── EVA02-CLIP-L-14-336.json │ │ │ │ │ ├── EVA02-CLIP-L-14.json │ │ │ │ │ ├── EVA02-CLIP-bigE-14-plus.json │ │ │ │ │ ├── EVA02-CLIP-bigE-14.json │ │ │ │ │ ├── Internal-EVA02-CLIP-10B-14-448.json │ │ │ │ │ └── Internal-EVA02-CLIP-10B-14.json │ │ │ │ ├── modified_resnet.py │ │ │ │ ├── openai.py │ │ │ │ ├── pretrained.py │ │ │ │ ├── rope.py │ │ │ │ ├── timm_model.py │ │ │ │ ├── tokenizer.py │ │ │ │ ├── transform.py │ │ │ │ ├── transformer.py │ │ │ │ └── utils.py │ │ │ └── eva_vit.py │ │ ├── eva_clip │ │ │ ├── eva_clip_encoder.py │ │ │ ├── eva_clip_processors.py │ │ │ ├── eva_vit.py │ │ │ ├── factory.py │ │ │ └── model_configs │ │ │ │ ├── EVA-CLIP-18B.json │ │ │ │ ├── EVA-CLIP-8B-plus.json │ │ │ │ ├── EVA-CLIP-8B.json │ │ │ │ ├── EVA01-CLIP-B-16.json │ │ │ │ ├── EVA01-CLIP-g-14-plus.json │ │ │ │ ├── EVA01-CLIP-g-14.json │ │ │ │ ├── EVA02-CLIP-B-16.json │ │ │ │ ├── EVA02-CLIP-L-14-336.json │ │ │ │ ├── EVA02-CLIP-L-14.json │ │ │ │ ├── EVA02-CLIP-bigE-14-plus.json │ │ │ │ ├── EVA02-CLIP-bigE-14.json │ │ │ │ ├── Internal-EVA02-CLIP-10B-14-448.json │ │ │ │ └── Internal-EVA02-CLIP-10B-14.json │ │ ├── hf_vision.py │ │ ├── imagebind.py │ │ ├── open_clip_encoder.py │ │ └── siglip_encoder.py │ ├── multimodal_projector │ │ ├── __pycache__ │ │ │ ├── builder.cpython-310.pyc │ │ │ ├── builder.cpython-311.pyc │ │ │ ├── pooler_projector.cpython-310.pyc │ │ │ └── pooler_projector.cpython-311.pyc │ │ ├── builder.py │ │ └── pooler_projector.py │ ├── multimodal_resampler │ │ ├── __pycache__ │ │ │ ├── builder.cpython-310.pyc │ │ │ ├── builder.cpython-311.pyc │ │ │ ├── masked_drop.cpython-310.pyc │ │ │ ├── masked_drop.cpython-311.pyc │ │ │ ├── perceiver.cpython-310.pyc │ │ │ ├── perceiver.cpython-311.pyc │ │ │ ├── qformer.cpython-310.pyc │ │ │ ├── qformer.cpython-311.pyc │ │ │ ├── spatial_pool.cpython-310.pyc │ │ │ └── spatial_pool.cpython-311.pyc │ │ ├── builder.py │ │ ├── masked_drop.py │ │ ├── perceiver.py │ │ ├── qformer.py │ │ └── spatial_pool.py │ └── utils.py ├── train │ ├── __pycache__ │ │ ├── llava_trainer.cpython-310.pyc │ │ ├── llava_trainer.cpython-311.pyc │ │ ├── train.cpython-310.pyc │ │ ├── train.cpython-311.pyc │ │ └── train_zip.cpython-311.pyc │ ├── llama_flash_attn_monkey_patch.py │ ├── llava_trainer.py │ ├── llava_trainer_eval.py │ ├── train.py │ ├── train_dpo.py │ ├── train_mem.py │ ├── train_zip.py │ └── train_zip_entry.py └── utils.py ├── pyproject.toml ├── requirements.txt └── scripts ├── eval ├── egoschema_submit.py ├── eval_egoschema.sh └── eval_lmms.sh ├── zero2.json ├── zero2_fused_adamw.json ├── zero2_offload.json ├── zero3.json ├── zero3_offload.json └── zero3pp.json /README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/README.md -------------------------------------------------------------------------------- /assets/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/assets/pipeline.png -------------------------------------------------------------------------------- /inference.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/inference.py -------------------------------------------------------------------------------- /llava.egg-info/PKG-INFO: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava.egg-info/PKG-INFO -------------------------------------------------------------------------------- /llava.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava.egg-info/SOURCES.txt -------------------------------------------------------------------------------- /llava.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /llava.egg-info/requires.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava.egg-info/requires.txt -------------------------------------------------------------------------------- /llava.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | llava 2 | trl 3 | -------------------------------------------------------------------------------- /llava/__init__.py: -------------------------------------------------------------------------------- 1 | from .model import LlavaLlamaForCausalLM 2 | -------------------------------------------------------------------------------- /llava/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /llava/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /llava/__pycache__/constants.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/constants.cpython-310.pyc -------------------------------------------------------------------------------- /llava/__pycache__/constants.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/constants.cpython-311.pyc -------------------------------------------------------------------------------- /llava/__pycache__/conversation.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/conversation.cpython-310.pyc -------------------------------------------------------------------------------- /llava/__pycache__/conversation.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/conversation.cpython-311.pyc -------------------------------------------------------------------------------- /llava/__pycache__/mm_utils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/mm_utils.cpython-310.pyc -------------------------------------------------------------------------------- /llava/__pycache__/mm_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/mm_utils.cpython-311.pyc -------------------------------------------------------------------------------- /llava/__pycache__/oryx_mm_utils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/oryx_mm_utils.cpython-310.pyc -------------------------------------------------------------------------------- /llava/__pycache__/oryx_mm_utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/oryx_mm_utils.cpython-311.pyc -------------------------------------------------------------------------------- /llava/__pycache__/utils.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/utils.cpython-310.pyc -------------------------------------------------------------------------------- /llava/__pycache__/utils.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/__pycache__/utils.cpython-311.pyc -------------------------------------------------------------------------------- /llava/constants.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/constants.py -------------------------------------------------------------------------------- /llava/conversation.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/conversation.py -------------------------------------------------------------------------------- /llava/mm_utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/mm_utils.py -------------------------------------------------------------------------------- /llava/model/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__init__.py -------------------------------------------------------------------------------- /llava/model/__pycache__/__init__.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__pycache__/__init__.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/__pycache__/builder.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__pycache__/builder.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/__pycache__/builder.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__pycache__/builder.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/__pycache__/compressor.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__pycache__/compressor.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/__pycache__/compressor.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__pycache__/compressor.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/__pycache__/llava_arch.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__pycache__/llava_arch.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/__pycache__/llava_arch.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__pycache__/llava_arch.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/__pycache__/llava_arch_zip.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__pycache__/llava_arch_zip.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/__pycache__/llava_arch_zip.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/__pycache__/llava_arch_zip.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/apply_delta.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/apply_delta.py -------------------------------------------------------------------------------- /llava/model/builder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/builder.py -------------------------------------------------------------------------------- /llava/model/compressor.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/compressor.py -------------------------------------------------------------------------------- /llava/model/consolidate.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/consolidate.py -------------------------------------------------------------------------------- /llava/model/language_model/__pycache__/llava_llama.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/__pycache__/llava_llama.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/language_model/__pycache__/llava_llama.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/__pycache__/llava_llama.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/language_model/__pycache__/llava_mistral.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/__pycache__/llava_mistral.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/language_model/__pycache__/llava_mistral.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/__pycache__/llava_mistral.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/language_model/__pycache__/llava_mixtral.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/__pycache__/llava_mixtral.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/language_model/__pycache__/llava_mixtral.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/__pycache__/llava_mixtral.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/language_model/__pycache__/llava_qwen.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/__pycache__/llava_qwen.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/language_model/__pycache__/llava_qwen.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/__pycache__/llava_qwen.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/language_model/__pycache__/llava_qwen_zip.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/__pycache__/llava_qwen_zip.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/language_model/__pycache__/llava_qwen_zip.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/__pycache__/llava_qwen_zip.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/language_model/llava_gemma.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/llava_gemma.py -------------------------------------------------------------------------------- /llava/model/language_model/llava_llama.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/llava_llama.py -------------------------------------------------------------------------------- /llava/model/language_model/llava_mistral.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/llava_mistral.py -------------------------------------------------------------------------------- /llava/model/language_model/llava_mixtral.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/llava_mixtral.py -------------------------------------------------------------------------------- /llava/model/language_model/llava_mpt.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/llava_mpt.py -------------------------------------------------------------------------------- /llava/model/language_model/llava_qwen.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/llava_qwen.py -------------------------------------------------------------------------------- /llava/model/language_model/llava_qwen_moe.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/llava_qwen_moe.py -------------------------------------------------------------------------------- /llava/model/language_model/llava_qwen_zip.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/llava_qwen_zip.py -------------------------------------------------------------------------------- /llava/model/language_model/modeling_llama.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/language_model/modeling_llama.py -------------------------------------------------------------------------------- /llava/model/llava_arch.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/llava_arch.py -------------------------------------------------------------------------------- /llava/model/llava_arch_zip.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/llava_arch_zip.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/builder.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/builder.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/builder.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/builder.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/clip_encoder.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/hf_vision.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/hf_vision.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/hf_vision.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/hf_vision.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/imagebind.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/imagebind.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/imagebind.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/imagebind.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/open_clip_encoder.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/open_clip_encoder.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/open_clip_encoder.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/open_clip_encoder.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/siglip_encoder.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/siglip_encoder.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/siglip_encoder.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/siglip_encoder.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/siglip_encoder_zip.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/siglip_encoder_zip.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/__pycache__/siglip_encoder_zip.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/__pycache__/siglip_encoder_zip.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/builder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/builder.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/clip_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/clip_encoder.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/__init__.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/bpe_simple_vocab_16e6.txt.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/bpe_simple_vocab_16e6.txt.gz -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/constants.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/constants.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/eva_vit_model.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/eva_vit_model.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/factory.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/factory.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_configs.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_configs.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_model.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/hf_model.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/loss.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/loss.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-18B.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-18B.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B-plus.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B-plus.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA-CLIP-8B.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-B-16.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-B-16.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-g-14.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA01-CLIP-g-14.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-B-16.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-B-16.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-L-14-336.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-L-14-336.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-L-14.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-L-14.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-bigE-14.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/EVA02-CLIP-bigE-14.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14-448.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14-448.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/modified_resnet.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/modified_resnet.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/openai.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/openai.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/pretrained.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/pretrained.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/rope.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/rope.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/timm_model.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/timm_model.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/tokenizer.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/tokenizer.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/transform.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/transform.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/transformer.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/transformer.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_clip/utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_clip/utils.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/dev_eva_clip/eva_vit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/dev_eva_clip/eva_vit.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/eva_clip_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/eva_clip_encoder.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/eva_clip_processors.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/eva_clip_processors.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/eva_vit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/eva_vit.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/factory.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/factory.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA-CLIP-18B.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA-CLIP-18B.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA-CLIP-8B-plus.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA-CLIP-8B-plus.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA-CLIP-8B.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA-CLIP-8B.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA01-CLIP-B-16.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA01-CLIP-B-16.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA01-CLIP-g-14.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA01-CLIP-g-14.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-B-16.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-B-16.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14-336.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14-336.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-L-14.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-bigE-14.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/EVA02-CLIP-bigE-14.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14-448.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14-448.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/eva_clip/model_configs/Internal-EVA02-CLIP-10B-14.json -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/hf_vision.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/hf_vision.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/imagebind.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/imagebind.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/open_clip_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/open_clip_encoder.py -------------------------------------------------------------------------------- /llava/model/multimodal_encoder/siglip_encoder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_encoder/siglip_encoder.py -------------------------------------------------------------------------------- /llava/model/multimodal_projector/__pycache__/builder.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_projector/__pycache__/builder.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_projector/__pycache__/builder.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_projector/__pycache__/builder.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_projector/__pycache__/pooler_projector.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_projector/__pycache__/pooler_projector.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_projector/__pycache__/pooler_projector.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_projector/__pycache__/pooler_projector.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_projector/builder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_projector/builder.py -------------------------------------------------------------------------------- /llava/model/multimodal_projector/pooler_projector.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_projector/pooler_projector.py -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/__pycache__/builder.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/__pycache__/builder.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/__pycache__/builder.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/__pycache__/builder.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/__pycache__/masked_drop.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/__pycache__/masked_drop.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/__pycache__/masked_drop.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/__pycache__/masked_drop.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/__pycache__/perceiver.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/__pycache__/perceiver.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/__pycache__/perceiver.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/__pycache__/perceiver.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/__pycache__/qformer.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/__pycache__/qformer.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/__pycache__/qformer.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/__pycache__/qformer.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/__pycache__/spatial_pool.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/__pycache__/spatial_pool.cpython-310.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/__pycache__/spatial_pool.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/__pycache__/spatial_pool.cpython-311.pyc -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/builder.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/builder.py -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/masked_drop.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/masked_drop.py -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/perceiver.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/perceiver.py -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/qformer.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/qformer.py -------------------------------------------------------------------------------- /llava/model/multimodal_resampler/spatial_pool.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/multimodal_resampler/spatial_pool.py -------------------------------------------------------------------------------- /llava/model/utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/model/utils.py -------------------------------------------------------------------------------- /llava/train/__pycache__/llava_trainer.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/__pycache__/llava_trainer.cpython-310.pyc -------------------------------------------------------------------------------- /llava/train/__pycache__/llava_trainer.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/__pycache__/llava_trainer.cpython-311.pyc -------------------------------------------------------------------------------- /llava/train/__pycache__/train.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/__pycache__/train.cpython-310.pyc -------------------------------------------------------------------------------- /llava/train/__pycache__/train.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/__pycache__/train.cpython-311.pyc -------------------------------------------------------------------------------- /llava/train/__pycache__/train_zip.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/__pycache__/train_zip.cpython-311.pyc -------------------------------------------------------------------------------- /llava/train/llama_flash_attn_monkey_patch.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/llama_flash_attn_monkey_patch.py -------------------------------------------------------------------------------- /llava/train/llava_trainer.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/llava_trainer.py -------------------------------------------------------------------------------- /llava/train/llava_trainer_eval.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/llava_trainer_eval.py -------------------------------------------------------------------------------- /llava/train/train.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/train.py -------------------------------------------------------------------------------- /llava/train/train_dpo.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/train_dpo.py -------------------------------------------------------------------------------- /llava/train/train_mem.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/train_mem.py -------------------------------------------------------------------------------- /llava/train/train_zip.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/train_zip.py -------------------------------------------------------------------------------- /llava/train/train_zip_entry.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/train/train_zip_entry.py -------------------------------------------------------------------------------- /llava/utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/llava/utils.py -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/pyproject.toml -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/requirements.txt -------------------------------------------------------------------------------- /scripts/eval/egoschema_submit.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/scripts/eval/egoschema_submit.py -------------------------------------------------------------------------------- /scripts/eval/eval_egoschema.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/scripts/eval/eval_egoschema.sh -------------------------------------------------------------------------------- /scripts/eval/eval_lmms.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/scripts/eval/eval_lmms.sh -------------------------------------------------------------------------------- /scripts/zero2.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/scripts/zero2.json -------------------------------------------------------------------------------- /scripts/zero2_fused_adamw.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/scripts/zero2_fused_adamw.json -------------------------------------------------------------------------------- /scripts/zero2_offload.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/scripts/zero2_offload.json -------------------------------------------------------------------------------- /scripts/zero3.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/scripts/zero3.json -------------------------------------------------------------------------------- /scripts/zero3_offload.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/scripts/zero3_offload.json -------------------------------------------------------------------------------- /scripts/zero3pp.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HumanMLLM/LLaVA-Scissor/HEAD/scripts/zero3pp.json --------------------------------------------------------------------------------