├── .gitattributes
├── .github
├── ISSUE_TEMPLATE
│ ├── api-feedback.md
│ ├── blank-template.md
│ ├── bug-report.md
│ └── feature-request.md
└── workflows
│ └── documentation.yaml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── assets
├── Cockatoo3b.png
├── Cockatoo7b.png
├── Hymba_loss.png
├── Parakeets.png
├── colab-shell-chatbot-demo.png
├── features.png
├── logo.png
├── multimodal-chatbot-demo.gif
├── robin13b.png
├── robin13b_.jpg
├── robin33b.png
├── robin33b_.png
├── robin65b.png
├── robin65b_.png
├── robin7b.jpg
└── robin7b_.png
├── configs
├── accelerate_dsz0_config.yaml
├── accelerate_dsz2_config.yaml
├── accelerate_dsz3_config.yaml
├── accelerator_multigpu_config.yaml
├── accelerator_singlegpu_config.yaml
├── ds_config_chatbot.json
├── ds_config_eval.json
├── ds_config_multimodal.json
├── ds_config_vis_chatbot.json
├── ds_config_zero0_no_offload.json
├── ds_config_zero2.json
├── ds_config_zero2_no_offload.json
├── ds_config_zero3.json
├── ds_config_zero3_for_eval.json
├── ds_config_zero3_no_offload.json
└── iterative_dpo.yaml
├── contrib
├── README.md
├── langchain
│ ├── README.md
│ └── retrieval_chatbot.py
├── long-context
│ ├── hf_sft_full_finetune.sh
│ ├── hf_sft_lora_flashattn.sh
│ └── sft_summarizer.py
├── rlhflow
│ ├── reward_modeling.py
│ └── run_reward_modeling.sh
├── text2image
│ ├── README.md
│ ├── accelerate_t2i_config.yaml
│ ├── diffuser_args.py
│ ├── diffuser_finetuner.py
│ ├── finetune_t2i.py
│ ├── finetune_t2i.sh
│ ├── requirements.txt
│ └── t2i_dataset.py
└── tool-finetune
│ ├── README.md
│ ├── function_call_finetune.py
│ └── run_function_call_finetune.sh
├── data
└── download.sh
├── docker
├── Dockerfile
└── README.md
├── docs
├── requirements.txt
└── source
│ ├── _static
│ ├── IT_sample1.png
│ ├── IT_sample2.png
│ ├── IT_sample3.png
│ ├── IT_sample4.png
│ ├── IT_sample5.png
│ ├── IT_sample6.png
│ ├── IT_sample7.png
│ ├── benchmark-1.png
│ ├── benchmark-2.png
│ ├── check_before_after_lora_tuning.jsonl
│ ├── eq.png
│ ├── logo.png
│ ├── logo.svg
│ ├── logo2.svg
│ ├── logo3.svg
│ ├── logo4.svg
│ ├── logo5.svg
│ ├── logo6.svg
│ ├── nll.png
│ ├── ppl.png
│ ├── raft-demo-examples.png
│ ├── raft.png
│ ├── raft_idea.PNG
│ └── raft_reward.PNG
│ ├── about
│ ├── authors.md
│ ├── changelog.md
│ └── index.md
│ ├── blogs
│ ├── benchmark.md
│ └── index.md
│ ├── conf.py
│ ├── examples
│ ├── DATASETS.md
│ ├── TASK_GUIDE.md
│ ├── checkpoints.md
│ ├── customize_conversation_template.md
│ ├── finetuning.md
│ ├── index.md
│ ├── medical_finetune.md
│ ├── raft.md
│ ├── reward_modeling.md
│ └── supported_conversation_template.md
│ └── index.md
├── examples
├── benchmarking.py
├── chatbot.py
├── chatbot_gradio.py
├── detail_memory.py
├── dpo_train.py
├── dpov2_train.py
├── ds_config.json
├── evaluation.py
├── finetune.py
├── finetune_multi_modal.py
├── inference.py
├── iterative_dpo_train.py
├── merge_lora.py
├── multistage_finetune.py
├── raft_align.py
├── reward_modeling.py
├── rm_inference.py
├── speculative_inference.py
├── tool_inference.py
├── vis_chatbot.py
├── vis_chatbot_gradio.py
└── vllm_inference.py
├── experimental
├── Hymba
│ ├── README.md
│ └── run_finetune_hymba.sh
├── LISA-diffusion
│ ├── README.md
│ ├── diffusion_dpo
│ │ ├── train_diffusion_dpo.py
│ │ └── train_diffusion_dpo_lisa.py
│ ├── docs
│ │ ├── instruct_lisa_lake.png
│ │ ├── instruct_lora_lake.png
│ │ ├── lcm_lisa_mountain.png
│ │ └── lcm_lora_mountain.png
│ ├── instruct_pix2pix
│ │ ├── test_instruct_pix2pix.py
│ │ └── train_instruct_pix2pix_lisa.py
│ ├── latent_consistency_model
│ │ ├── train_lcm_distill_sd_wds_lisa.py
│ │ └── train_lcm_distill_sd_wds_lora.py
│ ├── requirement.txt
│ └── single_lisa.py
└── RAFT-diffusion
│ ├── README.md
│ ├── SD256-RAFT.ipynb
│ ├── requirements.txt
│ └── train_text_to_image_lora.py
├── output_models
└── download.sh
├── pyproject.toml
├── readme
├── Position_Interpolation.md
├── README_es.md
├── README_hindi.md
├── README_jp.md
├── README_ko.md
├── README_zh-hans.md
├── flash_attn2.md
└── multi_node.md
├── requirements.txt
├── scripts
├── bash.sh
├── convert_llama_weights_to_hf.py
├── data_preprocess
│ ├── add_end_mark.py
│ ├── add_prompt.py
│ ├── concat.py
│ ├── concat_shuffle_split.py
│ ├── count.py
│ ├── merge.py
│ ├── raw2textonly.py
│ ├── run_data_preprocess.sh
│ ├── sample.py
│ └── shuffle.py
├── export_llama_state_dict_checkpoint.py
├── multimodal
│ ├── README.md
│ ├── run_finetune_multi_modal_stage1.sh
│ ├── run_finetune_multi_modal_stage2.sh
│ ├── run_vis_chatbot_blip2.sh
│ ├── run_vis_chatbot_gradio_minigpt4.sh
│ ├── run_vis_chatbot_llava.sh
│ └── run_vis_chatbot_minigpt4.sh
├── run_all_benchmark.sh
├── run_app.sh
├── run_benchmark.sh
├── run_chatbot.sh
├── run_chatbot_chatglm.sh
├── run_chatbot_cpu.sh
├── run_detail_gpu_memory.sh
├── run_dpo_align.sh
├── run_dpov2_align.sh
├── run_evaluation.sh
├── run_evaluation_accelerator.sh
├── run_evaluation_with_lora.sh
├── run_finetune.sh
├── run_finetune_with_custom_optim.sh
├── run_finetune_with_lisa.sh
├── run_finetune_with_lora.sh
├── run_finetune_with_qlora.sh
├── run_inference.sh
├── run_inference_multimodal_model.sh
├── run_iterative_dpo.sh
├── run_merge_lora.sh
├── run_multistage_finetune.sh
├── run_raft_align.sh
├── run_reward_modeling.sh
├── run_reward_modeling_with_lisa.sh
├── run_reward_modeling_with_lora.sh
├── run_rm_inference.sh
├── run_tool.sh
├── run_unittest.sh
├── run_vllm_inference.sh
├── speculative_decoding
│ └── README.md
├── tools
│ └── print_model_architecture.py
└── vocab_extension
│ ├── README.md
│ ├── convert_json_to_txt.sh
│ ├── merge_tokenizer.sh
│ ├── train_merge_tokenizer.sh
│ └── train_tokenizer.sh
├── service
├── app.py
├── static
│ ├── assets
│ │ ├── background.png
│ │ └── logo.png
│ └── utils
│ │ └── vue-spinner.js
└── templates
│ └── index.html
├── setup.py
├── src
└── lmflow
│ ├── __init__.py
│ ├── args.py
│ ├── datasets
│ ├── __init__.py
│ ├── dataset.py
│ └── multi_modal_dataset.py
│ ├── models
│ ├── __init__.py
│ ├── auto_model.py
│ ├── base_model.py
│ ├── decoder_model.py
│ ├── encoder_decoder_model.py
│ ├── hf_decoder_model.py
│ ├── hf_encoder_decoder_model.py
│ ├── hf_model_mixin.py
│ ├── hf_text_regression_model.py
│ ├── interfaces
│ │ ├── __init__.py
│ │ └── tunable.py
│ ├── regression_model.py
│ ├── text_regression_model.py
│ ├── vision2seq_model.py
│ └── vision_encoder
│ │ ├── __init__.py
│ │ └── clip_encoder.py
│ ├── optim
│ ├── __init__.py
│ ├── adabelief.py
│ ├── adabound.py
│ ├── adadelta.py
│ ├── adagrad.py
│ ├── adam.py
│ ├── adamax.py
│ ├── adamp.py
│ ├── adamw_schedule_free.py
│ ├── adan.py
│ ├── dummy.py
│ ├── lamb.py
│ ├── lars.py
│ ├── muon.py
│ ├── nadam.py
│ ├── novograd.py
│ ├── optimizers.py
│ ├── radam.py
│ ├── sgd_schedule_free.py
│ ├── sgdp.py
│ ├── sophia.py
│ └── yogi.py
│ ├── pipeline
│ ├── __init__.py
│ ├── auto_pipeline.py
│ ├── base_aligner.py
│ ├── base_pipeline.py
│ ├── base_tuner.py
│ ├── dpo_aligner.py
│ ├── dpov2_aligner.py
│ ├── evaluator.py
│ ├── finetuner.py
│ ├── inferencer.py
│ ├── iterative_dpo_aligner.py
│ ├── raft_aligner.py
│ ├── rm_inferencer.py
│ ├── rm_tuner.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── dpov2_dataprocessor.py
│ │ ├── dpov2_trainer.py
│ │ ├── memory_safe_dpov2_align.py
│ │ ├── memory_safe_vllm_inference.py
│ │ ├── peft_trainer.py
│ │ ├── raft_trainer.py
│ │ ├── rm_dataprocessor.py
│ │ └── rm_trainer.py
│ └── vllm_inferencer.py
│ ├── tokenization
│ ├── __init__.py
│ ├── hf_decoder_model.py
│ └── hf_text_regression_model.py
│ ├── utils
│ ├── __init__.py
│ ├── common.py
│ ├── constants.py
│ ├── conversation_template
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── chatglm.py
│ │ ├── chatml.py
│ │ ├── deepseek.py
│ │ ├── gemma.py
│ │ ├── hymba.py
│ │ ├── internlm.py
│ │ ├── llama.py
│ │ ├── phi.py
│ │ ├── qwen.py
│ │ ├── yi.py
│ │ └── zephyr.py
│ ├── data_utils.py
│ ├── debug
│ │ └── profiler.py
│ ├── flash_attention
│ │ ├── __init__.py
│ │ ├── bloom_flash_attention.py
│ │ ├── gpt2_flash_attention.py
│ │ ├── gpt_neo_flash_attention.py
│ │ ├── llama_flash_attention.py
│ │ └── triton_flash_attention.py
│ ├── llava_conversation_lib.py
│ ├── model.py
│ ├── multimodal.py
│ ├── position_interpolation
│ │ ├── __init__.py
│ │ └── llama_rope_scaled_monkey_patch.py
│ └── versioning.py
│ └── version.py
├── tests
├── __init__.py
├── conftest.py
├── datasets
│ ├── __init__.py
│ └── test_dataset.py
├── models
│ ├── __init__.py
│ ├── test_auto_model.py
│ ├── test_hf_decoder_model.py
│ └── test_tool_inferencer.py
├── pipeline
│ ├── test_auto_pipeline.py
│ └── test_memory_safe_vllm_inferencer.py
└── utils
│ ├── __init__.py
│ ├── test_conversation_formatter.py
│ ├── test_conversation_template.py
│ └── test_data_utils.py
└── utils
├── apply_delta.py
├── convert_json_to_txt.py
├── convert_minigpt4_checkpoints.py
├── download_hf_file.py
├── lm_evaluator.py
├── make_delta.py
├── merge_tokenizer.py
├── preprocess_multimodal_data.py
└── train_tokenizer.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.html linguist-detectable=false
2 | *.js linguist-detectable=false
3 | *.ipynb linguist-detectable=false
4 | *RAFT.pdf filter=lfs diff=lfs merge=lfs -text
5 | *.gif filter=lfs diff=lfs merge=lfs -text
6 | assets/*.gif filter=lfs diff=lfs merge=lfs -text
7 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/api-feedback.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: API Feedback
3 | about: Provide feedback regarding the current design of the API.
4 | title: "[API Design]"
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/blank-template.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Blank Template
3 | about: Other issues
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug Report
3 | about: Create a report to help us improve
4 | title: "[BUG]"
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Smartphone (please complete the following information):**
32 | - Device: [e.g. iPhone6]
33 | - OS: [e.g. iOS8.1]
34 | - Browser [e.g. stock browser, safari]
35 | - Version [e.g. 22]
36 |
37 | **Additional context**
38 | Add any other context about the problem here.
39 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | about: Suggest an idea for this project
4 | title: "[New Feature]"
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/workflows/documentation.yaml:
--------------------------------------------------------------------------------
1 | name: Docs
2 | on: [push, pull_request, workflow_dispatch]
3 | jobs:
4 | docs:
5 | runs-on: ubuntu-latest
6 | steps:
7 | - uses: actions/checkout@v3
8 | - name: python environment setup
9 | uses: actions/setup-python@v5.1.0
10 | with:
11 | python-version: "3.11"
12 | - name: Install dependencies
13 | run: |
14 | pip install -r ./docs/requirements.txt
15 | - name: Sphinx build
16 | run: |
17 | sphinx-build docs/source _build
18 | - name: Deploy
19 | uses: peaceiris/actions-gh-pages@v3
20 | with:
21 | publish_branch: gh-pages
22 | github_token: ${{ secrets.GITHUB_TOKEN }}
23 | publish_dir: _build/
24 | force_orphan: true
25 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # LMFlow
2 |
3 | We welcome contributions from the open-source community with open arms! We value and appreciate all types of participation, not just code. Whether you're answering questions, offering help, improving the documentation, or simply reaching out, your contributions are immensely valuable to us. So, if you're interested, don't hesitate to get involved!
4 |
5 | To start, we encourage everyone to say hello in our public Discord channel. Here, we discuss the latest trends in Large Foundation models, showcase personal projects, help each other with contributions, or just hang out over a cup of coffee. Join us on Discord!
6 |
7 | No matter how you choose to contribute, we strive to maintain an open, welcoming, and kind community. We ask that you read our code of conduct and be respectful during your interactions. It's also essential that you become familiar with the ethical guidelines that guide our project and adhere to the same principles of transparency and responsibility.
8 |
9 | We highly value feedback from the community, so please don't hesitate to speak up if you have any valuable feedback that can help improve the library. We read and consider every message, comment, issue, and pull request (PR).
10 |
--------------------------------------------------------------------------------
/assets/Cockatoo3b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/Cockatoo3b.png
--------------------------------------------------------------------------------
/assets/Cockatoo7b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/Cockatoo7b.png
--------------------------------------------------------------------------------
/assets/Hymba_loss.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/Hymba_loss.png
--------------------------------------------------------------------------------
/assets/Parakeets.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/Parakeets.png
--------------------------------------------------------------------------------
/assets/colab-shell-chatbot-demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/colab-shell-chatbot-demo.png
--------------------------------------------------------------------------------
/assets/features.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/features.png
--------------------------------------------------------------------------------
/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/logo.png
--------------------------------------------------------------------------------
/assets/multimodal-chatbot-demo.gif:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:206296519e7892d65cacc48c7e98c6743301b74c29401d57e325197bd6e41cac
3 | size 79864304
4 |
--------------------------------------------------------------------------------
/assets/robin13b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/robin13b.png
--------------------------------------------------------------------------------
/assets/robin13b_.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/robin13b_.jpg
--------------------------------------------------------------------------------
/assets/robin33b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/robin33b.png
--------------------------------------------------------------------------------
/assets/robin33b_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/robin33b_.png
--------------------------------------------------------------------------------
/assets/robin65b.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/robin65b.png
--------------------------------------------------------------------------------
/assets/robin65b_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/robin65b_.png
--------------------------------------------------------------------------------
/assets/robin7b.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/robin7b.jpg
--------------------------------------------------------------------------------
/assets/robin7b_.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/assets/robin7b_.png
--------------------------------------------------------------------------------
/configs/accelerate_dsz0_config.yaml:
--------------------------------------------------------------------------------
1 | compute_environment: LOCAL_MACHINE
2 | debug: false
3 | deepspeed_config:
4 | gradient_accumulation_steps: 16
5 | zero3_init_flag: false
6 | zero_stage: 0
7 | distributed_type: DEEPSPEED
8 | downcast_bf16: 'no'
9 | machine_rank: 0
10 | main_training_function: main
11 | mixed_precision: bf16
12 | num_machines: 1
13 | num_processes: 8
14 | gpu_ids:
15 | rdzv_backend: static
16 | same_network: true
17 | tpu_env: []
18 | tpu_use_cluster: false
19 | tpu_use_sudo: false
20 | use_cpu: false
21 | main_process_port: 12580
--------------------------------------------------------------------------------
/configs/accelerate_dsz2_config.yaml:
--------------------------------------------------------------------------------
1 | compute_environment: LOCAL_MACHINE
2 | debug: false
3 | deepspeed_config:
4 | offload_optimizer_device: none
5 | offload_param_device: none
6 | zero3_init_flag: false
7 | zero_stage: 2
8 | distributed_type: DEEPSPEED
9 | downcast_bf16: 'no'
10 | machine_rank: 0
11 | main_training_function: main
12 | mixed_precision: bf16
13 | num_machines: 1
14 | num_processes: 8
15 | gpu_ids:
16 | rdzv_backend: static
17 | same_network: true
18 | tpu_env: []
19 | tpu_use_cluster: false
20 | tpu_use_sudo: false
21 | use_cpu: false
22 | main_process_port: 12580
--------------------------------------------------------------------------------
/configs/accelerate_dsz3_config.yaml:
--------------------------------------------------------------------------------
1 | compute_environment: LOCAL_MACHINE
2 | debug: false
3 | deepspeed_config:
4 | deepspeed_multinode_launcher: standard
5 | offload_optimizer_device: none
6 | offload_param_device: none
7 | zero3_init_flag: true
8 | zero3_save_16bit_model: true
9 | zero_stage: 3
10 | distributed_type: DEEPSPEED
11 | downcast_bf16: 'no'
12 | machine_rank: 0
13 | main_training_function: main
14 | mixed_precision: bf16
15 | num_machines: 1
16 | num_processes: 8
17 | gpu_ids:
18 | rdzv_backend: static
19 | same_network: true
20 | tpu_env: []
21 | tpu_use_cluster: false
22 | tpu_use_sudo: false
23 | use_cpu: false
24 | main_process_port: 12580
--------------------------------------------------------------------------------
/configs/accelerator_multigpu_config.yaml:
--------------------------------------------------------------------------------
1 | compute_environment: LOCAL_MACHINE
2 | distributed_type: MULTI_GPU
3 | downcast_bf16: 'no'
4 | dynamo_config:
5 | dynamo_backend: INDUCTOR
6 | gpu_ids:
7 | machine_rank: 0
8 | main_training_function: main
9 | mixed_precision: bf16
10 | num_machines: 1
11 | num_processes: 1
12 | rdzv_backend: static
13 | same_network: true
14 | tpu_env: []
15 | tpu_use_cluster: false
16 | tpu_use_sudo: false
17 | use_cpu: false
18 | main_process_port: 11002
19 |
--------------------------------------------------------------------------------
/configs/accelerator_singlegpu_config.yaml:
--------------------------------------------------------------------------------
1 | compute_environment: LOCAL_MACHINE
2 | distributed_type: 'NO'
3 | downcast_bf16: 'no'
4 | dynamo_config:
5 | dynamo_backend: INDUCTOR
6 | gpu_ids:
7 | machine_rank: 0
8 | main_training_function: main
9 | mixed_precision: bf16
10 | num_machines: 1
11 | num_processes: 1
12 | rdzv_backend: static
13 | same_network: true
14 | tpu_env: []
15 | tpu_use_cluster: false
16 | tpu_use_sudo: false
17 | use_cpu: false
18 |
--------------------------------------------------------------------------------
/configs/ds_config_chatbot.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": false
4 | },
5 | "bf16": {
6 | "enabled": true
7 | },
8 | "comms_logger": {
9 | "enabled": false,
10 | "verbose": false,
11 | "prof_all": false,
12 | "debug": false
13 | },
14 | "steps_per_print": 20000000000000000,
15 | "train_micro_batch_size_per_gpu": 1,
16 | "wall_clock_breakdown": false
17 | }
18 |
--------------------------------------------------------------------------------
/configs/ds_config_eval.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": false
4 | },
5 | "bf16": {
6 | "enabled": false
7 | },
8 | "steps_per_print": 2000,
9 | "train_micro_batch_size_per_gpu": 1,
10 | "wall_clock_breakdown": false
11 | }
12 |
--------------------------------------------------------------------------------
/configs/ds_config_multimodal.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": "auto",
4 | "loss_scale": 0,
5 | "loss_scale_window": 1000,
6 | "initial_scale_power": 16,
7 | "hysteresis": 2,
8 | "min_loss_scale": 1
9 | },
10 | "bf16": {
11 | "enabled": "auto"
12 | },
13 | "train_micro_batch_size_per_gpu": "auto",
14 | "train_batch_size": "auto",
15 | "gradient_accumulation_steps": "auto",
16 | "zero_optimization": {
17 | "stage": 2,
18 | "overlap_comm": true,
19 | "contiguous_gradients": true,
20 | "sub_group_size": 1e9,
21 | "reduce_bucket_size": "auto"
22 | }
23 | }
--------------------------------------------------------------------------------
/configs/ds_config_vis_chatbot.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": false
4 | },
5 | "bf16": {
6 | "enabled": false
7 | },
8 | "comms_logger": {
9 | "enabled": false,
10 | "verbose": false,
11 | "prof_all": false,
12 | "debug": false
13 | },
14 | "steps_per_print": 20000000000000000,
15 | "train_micro_batch_size_per_gpu": 1,
16 | "wall_clock_breakdown": false
17 | }
--------------------------------------------------------------------------------
/configs/ds_config_zero0_no_offload.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": "auto",
4 | "loss_scale": 0,
5 | "loss_scale_window": 1000,
6 | "initial_scale_power": 16,
7 | "hysteresis": 2,
8 | "min_loss_scale": 1
9 | },
10 |
11 | "bf16": {
12 | "enabled": "auto"
13 | },
14 |
15 | "zero_optimization": {
16 | "stage": 0,
17 | "allgather_partitions": true,
18 | "allgather_bucket_size": 2e8,
19 | "overlap_comm": true,
20 | "reduce_scatter": true,
21 | "reduce_bucket_size": 2e8,
22 | "contiguous_gradients": true
23 | },
24 |
25 | "gradient_accumulation_steps": "auto",
26 | "gradient_clipping": "auto",
27 | "steps_per_print": 2000,
28 | "train_batch_size": "auto",
29 | "train_micro_batch_size_per_gpu": "auto",
30 | "wall_clock_breakdown": false
31 | }
32 |
--------------------------------------------------------------------------------
/configs/ds_config_zero2.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": "auto",
4 | "loss_scale": 0,
5 | "loss_scale_window": 1000,
6 | "initial_scale_power": 16,
7 | "hysteresis": 2,
8 | "min_loss_scale": 1
9 | },
10 |
11 | "bf16": {
12 | "enabled": "auto"
13 | },
14 |
15 | "zero_optimization": {
16 | "stage": 2,
17 | "offload_optimizer": {
18 | "device": "cpu",
19 | "pin_memory": true
20 | },
21 | "allgather_partitions": true,
22 | "allgather_bucket_size": 2e8,
23 | "overlap_comm": true,
24 | "reduce_scatter": true,
25 | "reduce_bucket_size": 2e8,
26 | "contiguous_gradients": true
27 | },
28 |
29 | "gradient_accumulation_steps": "auto",
30 | "gradient_clipping": "auto",
31 | "steps_per_print": 2000,
32 | "train_batch_size": "auto",
33 | "train_micro_batch_size_per_gpu": "auto",
34 | "wall_clock_breakdown": false
35 | }
--------------------------------------------------------------------------------
/configs/ds_config_zero2_no_offload.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": "auto",
4 | "loss_scale": 0,
5 | "loss_scale_window": 1000,
6 | "initial_scale_power": 16,
7 | "hysteresis": 2,
8 | "min_loss_scale": 1
9 | },
10 |
11 | "bf16": {
12 | "enabled": "auto"
13 | },
14 |
15 | "zero_optimization": {
16 | "stage": 2,
17 | "allgather_partitions": true,
18 | "allgather_bucket_size": 2e8,
19 | "overlap_comm": true,
20 | "reduce_scatter": true,
21 | "reduce_bucket_size": 2e8,
22 | "contiguous_gradients": true
23 | },
24 |
25 | "gradient_accumulation_steps": "auto",
26 | "gradient_clipping": "auto",
27 | "steps_per_print": 2000,
28 | "train_batch_size": "auto",
29 | "train_micro_batch_size_per_gpu": "auto",
30 | "wall_clock_breakdown": false
31 | }
32 |
--------------------------------------------------------------------------------
/configs/ds_config_zero3.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": "auto",
4 | "loss_scale": 0,
5 | "loss_scale_window": 1000,
6 | "initial_scale_power": 16,
7 | "hysteresis": 2,
8 | "min_loss_scale": 1
9 | },
10 |
11 | "bf16": {
12 | "enabled": "auto"
13 | },
14 |
15 | "zero_optimization": {
16 | "stage": 3,
17 | "offload_optimizer": {
18 | "device": "cpu"
19 | },
20 | "overlap_comm": true,
21 | "contiguous_gradients": true,
22 | "sub_group_size": 1e9,
23 | "reduce_bucket_size": "auto",
24 | "stage3_prefetch_bucket_size": "auto",
25 | "stage3_param_persistence_threshold": "auto",
26 | "stage3_max_live_parameters": 2e10,
27 | "stage3_max_reuse_distance": 2e10,
28 | "stage3_gather_16bit_weights_on_model_save": true
29 | },
30 |
31 | "gradient_accumulation_steps": "auto",
32 | "gradient_clipping": "auto",
33 | "steps_per_print": 2000,
34 | "train_batch_size": "auto",
35 | "train_micro_batch_size_per_gpu": "auto",
36 | "wall_clock_breakdown": false
37 | }
38 |
--------------------------------------------------------------------------------
/configs/ds_config_zero3_for_eval.json:
--------------------------------------------------------------------------------
1 | {
2 | "bf16": {
3 | "enabled": true
4 | },
5 | "zero_optimization": {
6 | "stage": 3,
7 | "offload_optimizer": {
8 | "device": "cpu",
9 | "pin_memory": true
10 | },
11 | "offload_param": {
12 | "device": "cpu",
13 | "pin_memory": true
14 | },
15 | "overlap_comm": true,
16 | "contiguous_gradients": true,
17 | "sub_group_size": 1e9,
18 | "reduce_bucket_size": "auto",
19 | "stage3_prefetch_bucket_size": "auto",
20 | "stage3_param_persistence_threshold": "auto",
21 | "stage3_max_live_parameters": 1e9,
22 | "stage3_max_reuse_distance": 1e9,
23 | "stage3_gather_16bit_weights_on_model_save": true
24 | },
25 |
26 | "steps_per_print": 2000,
27 | "train_micro_batch_size_per_gpu": 1,
28 | "wall_clock_breakdown": false
29 | }
30 |
--------------------------------------------------------------------------------
/configs/ds_config_zero3_no_offload.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": "auto",
4 | "loss_scale": 0,
5 | "loss_scale_window": 1000,
6 | "initial_scale_power": 16,
7 | "hysteresis": 2,
8 | "min_loss_scale": 1
9 | },
10 |
11 | "bf16": {
12 | "enabled": "auto"
13 | },
14 |
15 | "zero_optimization": {
16 | "stage": 3,
17 | "overlap_comm": true,
18 | "contiguous_gradients": true,
19 | "sub_group_size": 1e9,
20 | "reduce_bucket_size": "auto",
21 | "stage3_prefetch_bucket_size": "auto",
22 | "stage3_param_persistence_threshold": "auto",
23 | "stage3_max_live_parameters": 2e10,
24 | "stage3_max_reuse_distance": 2e10,
25 | "stage3_gather_16bit_weights_on_model_save": true
26 | },
27 |
28 | "gradient_accumulation_steps": "auto",
29 | "gradient_clipping": "auto",
30 | "steps_per_print": 2000,
31 | "train_batch_size": "auto",
32 | "train_micro_batch_size_per_gpu": "auto",
33 | "wall_clock_breakdown": false
34 | }
35 |
--------------------------------------------------------------------------------
/configs/iterative_dpo.yaml:
--------------------------------------------------------------------------------
1 | # general
2 | ## model
3 | model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
4 | reference_model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
5 | reward_model_name_or_path: sfairXC/FsfairX-LLaMA3-RM-v0.1
6 | trust_remote_code: True
7 |
8 | ## data
9 | dataset_path_list:
10 | - data/iterative-prompt-3it/iter1
11 | - data/iterative-prompt-3it/iter2
12 | - data/iterative-prompt-3it/iter3
13 | conversation_template: llama3
14 | preprocessing_num_workers: 16
15 |
16 | ## pipeline
17 | output_dir: ./output_models/iterative_dpo
18 | run_name: iterative_dpo
19 | random_seed: 42
20 | use_accelerator: True
21 | enable_distributed_inference: True
22 | distributed_inference_num_instances: 8
23 | initial_iter_idx: 0 # 0 refers to the first dataset in dataset_path_list
24 | do_response_generation: True
25 | do_scoring: True
26 | do_dpo_align: True
27 |
28 |
29 | # inference phase
30 | ## general
31 | apply_chat_template: True
32 | num_output_sequences: 8
33 | use_beam_search: False
34 | temperature: 1.0
35 | top_p: 1.0
36 | max_new_tokens: 2048
37 | enable_decode_inference_result: True
38 |
39 | ## vllm
40 | use_vllm: True
41 | vllm_gpu_memory_utilization: 0.95
42 | vllm_tensor_parallel_size: 1
43 | vllm_inference_batch_size: 16
44 |
45 |
46 | # reward model scoring phase
47 | reward_arch_type: text_regression
48 | reward_torch_dtype: bf16
49 | reward_use_flash_attention: True
50 | reward_model_inference_block_size: 2048
51 | overwrite_cache: True
52 | reward_model_inference_batch_size: 10 # the actual batch size for rm forward will be reward_model_inference_batch_size * num_output_sequences
53 |
54 |
55 | # dpo phase
56 | ## model
57 | do_train: True
58 | use_flash_attention: True
59 |
60 | ## data
61 | sampling_paired_method: max_min
62 | margin_scale: 1.0
63 | length_penalty: 0
64 | max_prompt_length: 1000
65 | mask_prompt: False
66 |
67 | ## pipeline
68 | ### training
69 | accelerate_config_file: configs/accelerate_dsz2_config.yaml
70 | bf16: True
71 | num_train_epochs: 2
72 | max_steps: 1200
73 | learning_rate: 5.0e-7
74 | warmup_steps: 100
75 | per_device_train_batch_size: 1
76 | per_device_eval_batch_size: 1
77 | gradient_accumulation_steps: 16
78 | gradient_checkpointing: True
79 | loss_type: sigmoid
80 | lr_scheduler_type: cosine
81 | optim: paged_adamw_32bit
82 |
83 | ### logging
84 | logging_steps: 2
85 | save_strategy: steps
86 | save_steps: 500
87 | evaluation_strategy: steps
88 | eval_steps: 500
89 | report_to: wandb
--------------------------------------------------------------------------------
/contrib/README.md:
--------------------------------------------------------------------------------
1 | # Contributing to LMFlow
2 |
3 | Thanks for your interest in LMFlow! Our LMFlow team appreciate contributions in any form:
4 |
5 | * issues
6 | * documentation improvements
7 | * new features
8 | * bug fixes
9 | * and particularly, runnable examples with SOTA models or techniques.
10 |
11 | For details of the contribution guidelines, please kindly refer to the following sections.
12 |
13 | ## How to Contribute
14 |
15 | ### How to create Pull Requests (PR)
16 |
17 | One may refer to the following guideline for general Pull Request instructions [GitHub Pull Request Examples](https://gist.github.com/Chaser324/ce0505fbed06b947d962).
18 |
19 | In short, every PR has following steps:
20 |
21 | 1. Fork the repository under your own account.
22 | 2. Clone and install the repository to your local machine.
23 | 3. Add your own modifications.
24 | 4. Run tests and make sure everything is working.
25 | 5. Push to your own remote repository.
26 | 6. Check the instructions in the [guidebook](https://gist.github.com/Chaser324/ce0505fbed06b947d962), make sure the remote modification is update-to-date with LMFlow's main branch.
27 | 7. If not, go back to Step 3 and resolve the conflict.
28 | 8. If so, create your PR. We will be reviewing the code soon and merge the changes into main once the review is finished :smile:
29 |
30 | Currently, we enthusiastically welcome contributions of documentations and runnable examples. Runnable examples are collected under `contrib/{YOUR_NAME}` and can be used by everyone! :rocket:
31 |
32 | ## Style Guidelines
33 |
34 | ### Code Style
35 |
36 | LMFlow adopts [google coding style](https://google.github.io/styleguide/) in principle. We would encourage every contribution to have the same style as well.
37 |
38 | ### Git Commits
39 |
40 | We would appreciate the commit to follow the principles below:
41 |
42 | * Describe the message concisely about what this commit do
43 | * Describe the message in imperative mood, starting with a capitalized verb, e.g., "Fix typo in README" or "Add support LISA for model parallelism".
44 | * Squash commits to make sure that each commit describes a whole fix/feature
45 |
46 | Thank you for your interest in LMFlow! Any suggestions and contributions would be greatly appreciated.
47 |
--------------------------------------------------------------------------------
/contrib/langchain/README.md:
--------------------------------------------------------------------------------
1 | ## Langchain
2 |
3 | ### Setup
4 |
5 | ```
6 | pip install langchain
7 | pip install langchain-openai langchain-anthropic langchain-google-genai langchain-chroma langchain-community bs4
8 | ```
9 |
10 | ### Run Chatbot
11 |
12 | To run the script, go to the root of this repo and use the following command:
13 |
14 | ```
15 | python contrib/langchain/retrieval_chatbot.py [options]
16 | ```
17 |
18 | ### Command-Line Arguments
19 | - `--model-name-or-path` - Specifies the name or path of the model used for generating responses.
20 | - `--provider` - Supports the following providers: `openai`, `anthropic`, `google`, and `huggingface`.
21 | - `--set-url` - Retrieve content from a specified URL if enabled.
22 | - `--set-txt` - Retrieve content from a local txt file if enabled.
23 | - `--session-id` - Session id of this chat, default: `demo`.
24 | - `--save-history` - Saves the chat history if enabled.
25 | - `--save-dir` - Directory to store chat history, default: `tmp/chat_history`
26 |
27 | ### Example Usage
28 |
29 | - Inference with `gpt-4o`, specified url and txt file
30 | ```
31 | cd data && ./download.sh example_doc_for_retrieval.txt && cd -
32 | python contrib/langchain/retrieval_chatbot.py --provider "openai" --model-name-or-path "gpt-4o" --set-url --set-txt
33 | ```
34 | - Then set the url and txt file as follows:
35 | ```
36 | Please enter the url: https://optimalscale.github.io/LMFlow/index.html
37 | Please enter the text file path: data/example_doc_for_retrieval.txt
38 | ```
--------------------------------------------------------------------------------
/contrib/long-context/hf_sft_full_finetune.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # accelerate launch --main_process_port 0 ...
3 |
4 | # Finetune
5 | python sft_summarizer.py \
6 | --model_name_or_path microsoft/Phi-3-vision-128k-instruct \
7 | --learning_rate 1e-3 \
8 | --per_device_train_batch_size 1 \
9 | --per_device_eval_batch_size 1 \
10 | --gradient_accumulation_steps 1 \
11 | --trust_remote_code \
12 | --output_dir output_models/finetuned_Phi3 \
13 | --logging_steps 1 \
14 | --num_train_epochs 1 \
15 | --save_strategy "steps" \
16 | --save_total_limit 2 \
17 | --lr_scheduler_type "constant" \
18 | --max_steps -1 \
19 | --torch_dtype 'bfloat16' \
20 | --gradient_checkpointing \
21 | --logging_strategy "epoch" \
22 | --do_eval True \
23 | --evaluation_strategy 'epoch' \
24 | --bf16 \
25 | --bf16_full_eval True \
26 | --max_seq_length 10000 \
27 | --eval_accumulation_steps 4 \
28 | --use_peft False\
29 | --save_only_model True \
30 | --overwrite_output_dir True
--------------------------------------------------------------------------------
/contrib/long-context/hf_sft_lora_flashattn.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # accelerate launch --main_process_port 0 ...
3 |
4 | # Finetunes
5 | python sft_summarizer.py \
6 | --model_name_or_path microsoft/Phi-3-vision-128k-instruct \
7 | --learning_rate 1e-3 \
8 | --per_device_train_batch_size 1 \
9 | --per_device_eval_batch_size 1 \
10 | --gradient_accumulation_steps 1 \
11 | --trust_remote_code \
12 | --output_dir output_models/finetuned_Phi3 \
13 | --logging_steps 1 \
14 | --num_train_epochs 1 \
15 | --save_strategy "steps" \
16 | --save_total_limit 2 \
17 | --lr_scheduler_type "constant" \
18 | --max_steps -1 \
19 | --torch_dtype 'bfloat16' \
20 | --gradient_checkpointing \
21 | --logging_strategy "epoch" \
22 | --do_eval True \
23 | --evaluation_strategy 'epoch' \
24 | --bf16 \
25 | --bf16_full_eval True \
26 | --max_seq_length 10000 \
27 | --attn_implementation 'flash_attention_2' \
28 | --eval_accumulation_steps 4 \
29 | --use_peft False\
30 | --lora_r 16 \
31 | --lora_alpha 16 \
32 | --save_only_model True \
33 | --overwrite_output_dir True
--------------------------------------------------------------------------------
/contrib/rlhflow/run_reward_modeling.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Please run this script under ${project_id} in project directory of
3 | # https://github.com/shizhediao/llm-ft
4 | # COMMIT: d5fecf30ba8011067b10cf51fede53a5ab6574e4
5 |
6 | deepspeed_args="--master_port=11000" # Default argument
7 | if [ $# -ge 1 ]; then
8 | deepspeed_args="$1"
9 | fi
10 |
11 | exp_id=rm
12 | project_dir=$(cd "$(dirname $0)"/..; pwd)
13 | output_dir=${project_dir}/output_models/${exp_id}
14 | log_dir=${project_dir}/log/${exp_id}
15 |
16 | dataset_path=${project_dir}/data/hh_rlhf/rm/hh_rlhf_rm_training.json
17 | if [ ! -d data/hh_rlhf ]; then
18 | cd data && ./download.sh hh_rlhf && cd -
19 | fi
20 |
21 | mkdir -p ${output_dir} ${log_dir}
22 |
23 | deepspeed ${deepspeed_args} \
24 | contrib/rlhflow/reward_modeling.py \
25 | --model_name_or_path gpt2 \
26 | --dataset_path ${dataset_path} \
27 | --output_dir ${output_dir} --overwrite_output_dir \
28 | --num_train_epochs 1 \
29 | --learning_rate 3e-5 \
30 | --block_size 512 \
31 | --per_device_train_batch_size 1 \
32 | --per_device_eval_batch_size 1\
33 | --deepspeed configs/ds_config_zero2.json \
34 | --bf16 \
35 | --run_name rm_test \
36 | --validation_split_percentage 10 \
37 | --logging_steps 10 \
38 | --do_train \
39 | --ddp_timeout 72000 \
40 | --save_steps 999999 \
41 | --evaluation_strategy steps\
42 | --eval_steps 100\
43 | --weight_decay 0.001\
44 | --dataloader_num_workers 1 \
45 | | tee ${log_dir}/train.log \
46 | 2> ${log_dir}/train.err
47 |
--------------------------------------------------------------------------------
/contrib/text2image/accelerate_t2i_config.yaml:
--------------------------------------------------------------------------------
1 | compute_environment: LOCAL_MACHINE
2 | debug: false
3 | distributed_type: MULTI_GPU
4 | downcast_bf16: 'no'
5 | enable_cpu_affinity: false
6 | gpu_ids: all
7 | machine_rank: 0
8 | main_training_function: main
9 | mixed_precision: fp16
10 | num_machines: 1
11 | num_processes: 4
12 | rdzv_backend: static
13 | same_network: true
14 | tpu_env: []
15 | tpu_use_cluster: false
16 | tpu_use_sudo: false
17 | use_cpu: false
18 |
--------------------------------------------------------------------------------
/contrib/text2image/finetune_t2i.sh:
--------------------------------------------------------------------------------
1 | # Parses arguments
2 | model_name_or_path=stabilityai/stable-diffusion-2-1
3 | model_type="unet"
4 | dataset_path=data/example
5 | output_dir=output
6 | main_port=29500
7 | img_size=768
8 |
9 | while [[ $# -ge 1 ]]; do
10 | key="$1"
11 | case ${key} in
12 | -m|--model_name_or_path)
13 | model_name_or_path="$2"
14 | shift
15 | ;;
16 | -t|--model_type)
17 | model_type="$2"
18 | shift
19 | ;;
20 | -d|--dataset_path)
21 | dataset_path="$2"
22 | shift
23 | ;;
24 | -o|--output_dir)
25 | output_dir="$2"
26 | shift
27 | ;;
28 | -p|--main_port)
29 | main_port="$2"
30 | shift
31 | ;;
32 | -i|--img_size)
33 | img_size="$2"
34 | shift
35 | ;;
36 | *)
37 | echo "error: unknown option \"${key}\"" 1>&2
38 | exit 1
39 | esac
40 | shift
41 | done
42 |
43 | echo "model_name_or_path: ${model_name_or_path}"
44 | echo "model_type: ${model_type}"
45 | echo "dataset_path: ${dataset_path}"
46 | echo "output_dir: ${output_dir}"
47 | echo "main_port: ${main_port}"
48 | echo "img_size: ${img_size}"
49 |
50 |
51 | accelerate launch \
52 | --config_file=./accelerate_t2i_config.yaml \
53 | --main_port=${main_port} \
54 | finetune_t2i.py \
55 | --model_name_or_path=${model_name_or_path} \
56 | --model_type=${model_type} \
57 | --use_lora=True \
58 | --lora_target_module "to_k" "to_q" "to_v" "to_out.0" "add_k_proj" "add_v_proj" \
59 | --dataset_path=${dataset_path} \
60 | --image_folder="img" \
61 | --image_size=${img_size} \
62 | --train_file="train.json" \
63 | --validation_file="valid.json" \
64 | --test_file="test.json" \
65 | --output_dir=${output_dir} \
66 | --logging_dir="logs" \
67 | --overwrite_output_dir=True \
68 | --mixed_precision="fp16" \
69 | --num_train_epochs=100 \
70 | --train_batch_size=1 \
71 | --learning_rate=1e-4 \
72 | --valid_steps=50
73 |
--------------------------------------------------------------------------------
/contrib/text2image/requirements.txt:
--------------------------------------------------------------------------------
1 | diffusers>=0.29.2
--------------------------------------------------------------------------------
/contrib/tool-finetune/README.md:
--------------------------------------------------------------------------------
1 | ## Function-call Finetune
2 |
3 | ### Pip dependency
4 |
5 | ```
6 | bitsandbytes==0.40.0
7 | deepspeed==0.12.0
8 | flash-attn==2.5.7
9 | peft==0.10.0
10 | torch==2.1.2+cu118
11 | transformers==4.40.1
12 | vllm==0.5.2
13 | xformers==0.0.27
14 | ```
15 |
16 | ### Conversation Template
17 | ```
18 | {
19 | "type": "conversation",
20 | "instances": [
21 | {
22 | "system": "You are a helpful assistant with access to the following functions. Use them if required - ",
23 | "tools": ["{\"name\": \"", \"description\": \"", \"parameters\": {\"type\": \"object\", \"properties\": {\"property_1\": {\"type\": \"xxx\", \"description\": \"\"}, \"property_2\": {\"type\": \"xxx\", \"description\": \"\"}}, \"required\": [\"required_1\", \"property_n\"]}}",]",
24 | "messages": [
25 | {
26 | "role": "user",
27 | "content": ""
28 | },
29 | {
30 | "role": "function",
31 | "content": ""
32 | },
33 | {
34 | "role": "observation",
35 | "content": ""
36 | },
37 | {
38 | "role": "assistant",
39 | "content": ""
40 | }
41 | ]
42 | },
43 | {
44 | "system": "You are a helpful assistant, with no access to external functions.",
45 | "tools": [],
46 | "messages": [
47 | {
48 | "role": "user",
49 | "content": ""
50 | },
51 | {
52 | "role": "assistant",
53 | "content": ""
54 | }
55 | ]
56 | },
57 | ]
58 | }
59 | ```
60 |
61 | ### Run Function-call Finetune Example
62 | ```
63 | ./contrib/tool-finetune/run_function_call_finetune.sh \
64 | --model_name_or_path meta-llama/Meta-Llama-3-8B \
65 | --trust_remote_code True \
66 | --conversation_template llama3_for_tool \
67 | --dataset_path /home/wenhesun/LMFlow/data/glaive-function-calling-v2 \
68 | --output_model_path /home/wenhesun/LMFlow/output_models/function-call-finetuned-llama
69 | ```
70 |
71 | ### Command-Line Arguments
72 | - `--model-name-or-path` - Specifies the name or path of the model used for
73 | - `--conversation_template` - So far supports the following choices: llama3_for_tool, qwen2_for_tool
74 | - `--dataset_path` - The path to the dataset that has been converted to the specified format
75 | - `--output_model_path` - Directory to store the finetuned model and logs
--------------------------------------------------------------------------------
/contrib/tool-finetune/run_function_call_finetune.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Please run this script under ${project_id} in project directory of
3 | # https://github.com/shizhediao/llm-ft
4 | # COMMIT: d5fecf30ba8011067b10cf51fede53a5ab6574e4
5 |
6 | # Parses arguments
7 | model_name_or_path=gpt2
8 | dataset_path=data/alpaca/train_conversation
9 | output_dir=output_models/finetune
10 | deepspeed_args="--master_port=12000"
11 | conversation_template=llama2
12 |
13 | # Safety related arguments
14 | trust_remote_code=0
15 |
16 | while [[ $# -ge 1 ]]; do
17 | key="$1"
18 | case ${key} in
19 | -m|--model_name_or_path)
20 | model_name_or_path="$2"
21 | shift
22 | ;;
23 | -d|--dataset_path)
24 | dataset_path="$2"
25 | shift
26 | ;;
27 | -o|--output_model_path)
28 | output_dir="$2"
29 | shift
30 | ;;
31 | --conversation_template)
32 | conversation_template="$2"
33 | shift
34 | ;;
35 | --deepspeed_args)
36 | deepspeed_args="$2"
37 | shift
38 | ;;
39 | --trust_remote_code)
40 | trust_remote_code="$2"
41 | shift
42 | ;;
43 | *)
44 | echo "error: unknown option \"${key}\"" 1>&2
45 | exit 1
46 | esac
47 | shift
48 | done
49 |
50 | # Finetune
51 | exp_id=finetune
52 | project_dir=$(cd "$(dirname $0)"/..; pwd)
53 | log_dir=${project_dir}/log/${exp_id}
54 | mkdir -p ${output_dir} ${log_dir}
55 |
56 | deepspeed ${deepspeed_args} \
57 | contrib/tool-finetune/function_call_finetune.py \
58 | --model_name_or_path ${model_name_or_path} \
59 | --trust_remote_code ${trust_remote_code} \
60 | --dataset_path ${dataset_path} \
61 | --output_dir ${output_dir} --overwrite_output_dir \
62 | --conversation_template ${conversation_template} \
63 | --num_train_epochs 0.01 \
64 | --learning_rate 2e-5 \
65 | --disable_group_texts 1 \
66 | --block_size 1024 \
67 | --per_device_train_batch_size 1 \
68 | --deepspeed configs/ds_config_zero3.json \
69 | --fp16 \
70 | --run_name finetune \
71 | --validation_split_percentage 0 \
72 | --logging_steps 20 \
73 | --do_train \
74 | --ddp_timeout 72000 \
75 | --save_steps 5000 \
76 | --dataloader_num_workers 1 \
77 | > >(tee ${log_dir}/train.log) \
78 | 2> >(tee ${log_dir}/train.err >&2)
79 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:11.3.0-cudnn8-devel-ubuntu20.04
2 |
3 | ENV TZ=Etc/UTC
4 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
5 |
6 | RUN apt-get update --fix-missing && apt-get install -y fontconfig --fix-missing
7 | RUN apt-get install -y libopenmpi-dev
8 | RUN apt-get install -y git python3.9 python3.9-dev python3.9-venv
9 | RUN python3.9 -m venv /venv
10 | ENV PATH=/venv/bin:$PATH
11 | RUN pip install mpi4py
12 |
13 | ARG SRCDIR
14 |
15 | RUN mkdir /LMFlow/
16 | WORKDIR /LMFlow/
17 |
18 | COPY $SRCDIR/ /LMFlow/
19 |
20 | RUN pip install wheel
21 | RUN pip install -e .
22 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | pydata-sphinx-theme
3 | sphinx_design
4 | myst-parser
5 | sphinx-autoapi
6 | matplotlib
7 | numpydoc
--------------------------------------------------------------------------------
/docs/source/_static/IT_sample1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/IT_sample1.png
--------------------------------------------------------------------------------
/docs/source/_static/IT_sample2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/IT_sample2.png
--------------------------------------------------------------------------------
/docs/source/_static/IT_sample3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/IT_sample3.png
--------------------------------------------------------------------------------
/docs/source/_static/IT_sample4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/IT_sample4.png
--------------------------------------------------------------------------------
/docs/source/_static/IT_sample5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/IT_sample5.png
--------------------------------------------------------------------------------
/docs/source/_static/IT_sample6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/IT_sample6.png
--------------------------------------------------------------------------------
/docs/source/_static/IT_sample7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/IT_sample7.png
--------------------------------------------------------------------------------
/docs/source/_static/benchmark-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/benchmark-1.png
--------------------------------------------------------------------------------
/docs/source/_static/benchmark-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/benchmark-2.png
--------------------------------------------------------------------------------
/docs/source/_static/eq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/eq.png
--------------------------------------------------------------------------------
/docs/source/_static/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/logo.png
--------------------------------------------------------------------------------
/docs/source/_static/logo4.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/source/_static/logo5.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/source/_static/nll.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/nll.png
--------------------------------------------------------------------------------
/docs/source/_static/ppl.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/ppl.png
--------------------------------------------------------------------------------
/docs/source/_static/raft-demo-examples.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/raft-demo-examples.png
--------------------------------------------------------------------------------
/docs/source/_static/raft.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/raft.png
--------------------------------------------------------------------------------
/docs/source/_static/raft_idea.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/raft_idea.PNG
--------------------------------------------------------------------------------
/docs/source/_static/raft_reward.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/docs/source/_static/raft_reward.PNG
--------------------------------------------------------------------------------
/docs/source/about/authors.md:
--------------------------------------------------------------------------------
1 | # Contributors
2 |
3 |
4 | Shizhe Diao, Rui Pan, Hanze Dong, Ka Shun Shum, Jipeng Zhang, Wei Xiong, Tong Zhang
5 |
--------------------------------------------------------------------------------
/docs/source/about/changelog.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 |
4 | ## Version 0.0.1 (Mar 28, 2023)
5 |
6 | The first public version.
7 |
8 | Task tuning, instruction tuning, on user defined datasets.
9 |
10 | A simple and extensible API for developers.
11 |
12 | Efficient finetuning with LoRA.
13 |
14 | Simplified model inference framework.
15 |
16 |
--------------------------------------------------------------------------------
/docs/source/about/index.md:
--------------------------------------------------------------------------------
1 | # About
2 |
3 |
4 | ```{toctree}
5 | :maxdepth: 2
6 |
7 | changelog
8 | ```
9 |
10 |
11 | ```{toctree}
12 | :maxdepth: 2
13 |
14 | authors
15 | ```
16 |
--------------------------------------------------------------------------------
/docs/source/blogs/index.md:
--------------------------------------------------------------------------------
1 | # Blogs
2 |
3 | ## 2023
4 |
5 |
6 | ```{toctree}
7 | :maxdepth: 1
8 |
9 | benchmark
10 | ```
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 |
6 | # -- Project information -----------------------------------------------------
7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
8 |
9 | project = 'LMFlow'
10 | copyright = 'LMFlow 2024'
11 | author = 'The LMFlow Team'
12 |
13 | import sys
14 | import os
15 | sys.path.insert(0,os.path.abspath('../..'))
16 |
17 |
18 | # -- General configuration ---------------------------------------------------
19 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
20 |
21 |
22 | templates_path = ['_templates']
23 | exclude_patterns = []
24 |
25 | extensions = [
26 | "sphinx.ext.autodoc",
27 | "sphinx.ext.autosummary",
28 | "sphinx.ext.todo",
29 | "sphinx.ext.viewcode",
30 | 'myst_parser',
31 | 'autoapi.extension',
32 | #"sphinxext.rediraffe",
33 | "sphinx_design",
34 | #"sphinx_copybutton",
35 | # For extension examples and demos
36 | #"ablog",
37 | "matplotlib.sphinxext.plot_directive",
38 | #"myst_nb",
39 | # "nbsphinx", # Uncomment and comment-out MyST-NB for local testing purposes.
40 | "numpydoc",
41 | #"sphinx_togglebutton",
42 | #"sphinx_favicon",
43 | ]
44 |
45 | autosummary_generate = True
46 |
47 | autoapi_type = 'python'
48 | autoapi_dirs = ['../../src']
49 |
50 | source_suffix = {
51 | '.rst': 'restructuredtext',
52 | '.md': 'markdown',
53 | }
54 |
55 |
56 | # -- Options for HTML output -------------------------------------------------
57 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
58 |
59 | html_theme = "pydata_sphinx_theme"
60 | html_static_path = ['_static']
61 | html_css_files = []
62 | # html_logo = "_static/logo.png"
63 | html_theme_options = {
64 | "announcement": "We've released our memory-efficient finetuning algorithm LISA, check out [Paper][User Guide] for more details!",
65 | "back_to_top_button": False,
66 | "header_links_before_dropdown": 4,
67 | "icon_links": [
68 | {
69 | "name": "LMFlow",
70 | "url": "https://github.com/OptimalScale/LMFlow",
71 | "icon": "_static/logo5.svg",
72 | "type": "local",
73 | "attributes": {"target": "_blank"},
74 | },
75 | ],
76 | "logo": {
77 | "text": "LMFlow",
78 | "image_dark": "_static/logo5.svg",
79 | "alt_text": "LMFlow",
80 | },
81 | }
82 |
83 |
--------------------------------------------------------------------------------
/docs/source/examples/checkpoints.md:
--------------------------------------------------------------------------------
1 | # Checkpoints
2 |
3 | In general, you can directly load from checkpoints by using `--model_name_or_path`. However, the LLaMA case is slightly different due to the copyright issue.
4 |
5 |
6 | ## LLaMA Checkpoint
7 |
8 | 1. First, you need to get the access of LLaMA model from [facebookresearch/llama](https://github.com/facebookresearch/llama). Download the official checkpoints and save them into `${llama-path}`.
9 |
10 | 2. Second, convert the official checkpoints `${llama-path}` to HuggingFace supported checkpoints `${llama-hf-path}` by running
11 |
12 | `python ./scripts/convert_llama_weights_to_hf.py --input_dir ${llama-path} --model_size 7B --output_dir ${llama-hf-path}/llama-7b-hf`
13 |
14 | 3. Then you are good to go by setting the checkpoint path to `${llama-hf-path}/llama-7b-hf`. Enjoy it!
15 |
16 | 4. (optional) Now you have the original llama-7b-hf pretrained model. With
17 | ```sh
18 | cd output_models && ./download.sh all && cd -
19 | ```
20 | You can obtain the model difference finetuned by ours. By a way similar to `./scripts/run_evaluation_with_lora.sh`,
21 | ```sh
22 | CUDA_VISIBLE_DEVICES=0 \
23 | deepspeed examples/evaluate.py \
24 | --answer_type text \
25 | --model_name_or_path ${llama-hf-path}/llama-7b-hf \
26 | --lora_model_path output_models/${llama-model-diff-path} \
27 | --dataset_path data/alpaca/test \
28 | --prompt_structure "Input: {input}" \
29 | --deepspeed examples/ds_config.json
30 | ```
31 | You can now evaluate with the finetuned llama model.
--------------------------------------------------------------------------------
/docs/source/examples/index.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | We provide several examples to show how to use our package in your problem.
4 |
5 | ## Data preparation
6 |
7 | ```{toctree}
8 | :maxdepth: 3
9 |
10 | DATASETS
11 | ```
12 |
13 | ```{toctree}
14 | :maxdepth: 3
15 |
16 | checkpoints
17 | ```
18 |
19 | ## Finetuning
20 |
21 | For SFT,
22 |
23 | ```{toctree}
24 | :maxdepth: 3
25 |
26 | finetuning
27 | ```
28 |
29 |
30 | For alignment process,
31 |
32 | ```{toctree}
33 | :maxdepth: 3
34 |
35 | reward_modeling
36 | ```
37 |
38 |
39 | ```{toctree}
40 | :maxdepth: 3
41 |
42 | raft
43 | ```
44 |
45 | ## Inference
46 |
47 | Refer to [examples](https://github.com/OptimalScale/LMFlow/blob/main/examples).
48 |
49 | ## Evaluation
50 |
51 | ```{toctree}
52 | :maxdepth: 3
53 |
54 | TASK_GUIDE
55 | ```
56 |
57 |
58 |
--------------------------------------------------------------------------------
/docs/source/examples/medical_finetune.md:
--------------------------------------------------------------------------------
1 | # Finetune
2 |
3 | ```python
4 | import sys
5 |
6 | from transformers import HfArgumentParser
7 |
8 | from lmflow.args import (
9 | ModelArguments,
10 | DatasetArguments,
11 | AutoArguments,
12 | )
13 |
14 | from lmflow.datasets.dataset import Dataset
15 | from lmflow.models.tunable_models import TunableModel
16 | from lmflow.pipeline.auto_pipeline import AutoPipeline
17 |
18 |
19 | def main():
20 | # Parses arguments
21 | pipeline_name = "finetuner"
22 | PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name)
23 |
24 | parser = HfArgumentParser((ModelArguments, DatasetArguments, PipelineArguments))
25 | if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
26 | # If we pass only one argument to the script and it's the path to a json file,
27 | # let's parse it to get our arguments.
28 | model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
29 | else:
30 | model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses()
31 |
32 | # TODO: deepspeed config initialization
33 |
34 | # Initialization
35 | finetuner = AutoPipeline.get_pipeline(
36 | pipeline_name=pipeline_name,
37 | model_args=model_args,
38 | data_args=data_args,
39 | pipeline_args=pipeline_args,
40 | )
41 | dataset = Dataset(data_args)
42 | model = TunableModel(model_args)
43 |
44 | # Tokenization and text grouping must be done in the main process
45 | with pipeline_args.main_process_first(desc="dataset map tokenization"):
46 | tokenized_dataset = model.tokenize(dataset)
47 | lm_dataset = finetuner.group_text(
48 | tokenized_dataset,
49 | model_max_length=model.get_max_length(),
50 | )
51 |
52 | # Finetuning
53 | tuned_model = finetuner.tune(model=model, lm_dataset=lm_dataset)
54 |
55 | ```
56 |
--------------------------------------------------------------------------------
/examples/dpo_train.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | # @Time : 7/4/2024 20:31
4 | # @Author : Yu Li
5 | # @Site :
6 | # @File : dpo_train.py
7 | # 0. imports
8 | import logging
9 | import os
10 | import sys
11 | sys.path.remove(os.path.abspath(os.path.dirname(sys.argv[0])))
12 | from dataclasses import dataclass, field
13 | from typing import Optional
14 |
15 | import torch
16 | from transformers import HfArgumentParser, pipeline, AutoTokenizer
17 |
18 | from lmflow.args import (
19 | ModelArguments,
20 | DatasetArguments,
21 | AutoArguments,
22 | )
23 | from lmflow.datasets.dataset import Dataset
24 | from lmflow.models.auto_model import AutoModel
25 | from lmflow.pipeline.auto_pipeline import AutoPipeline
26 |
27 | if __name__ == "__main__":
28 | # Parses arguments
29 | pipeline_name = "dpo_aligner"
30 | PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name)
31 | parser = HfArgumentParser((
32 | ModelArguments,
33 | DatasetArguments,
34 | PipelineArguments,
35 | ))
36 |
37 | model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses()
38 |
39 | # Initializes pipeline, dataset and model for reward training
40 | aligner = AutoPipeline.get_pipeline(
41 | pipeline_name=pipeline_name,
42 | model_args=model_args,
43 | data_args=data_args,
44 | pipeline_args=pipeline_args,
45 | )
46 | model = AutoModel.get_model(model_args)
47 |
48 | # Aligns model with rewards
49 | aligned_model = aligner.align(
50 | model=model,
51 | dataset=None,
52 | reward_model=None
53 | )
--------------------------------------------------------------------------------
/examples/dpov2_train.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | import logging
5 | import os
6 | import sys
7 | import copy
8 |
9 | from transformers import (
10 | HfArgumentParser
11 | )
12 |
13 | from lmflow.datasets import Dataset
14 | from lmflow.models.auto_model import AutoModel
15 | from lmflow.pipeline.auto_pipeline import AutoPipeline
16 | from lmflow.args import (
17 | ModelArguments,
18 | DatasetArguments,
19 | AutoArguments,
20 | )
21 | from lmflow.utils.common import remove_dataclass_attr_prefix, create_copied_dataclass
22 |
23 |
24 | logger = logging.getLogger(__name__)
25 |
26 |
27 | ReferenceModelArguments = create_copied_dataclass(
28 | original_dataclass=ModelArguments,
29 | field_prefix="reference_",
30 | class_prefix="Reference"
31 | )
32 |
33 |
34 | def main():
35 | # Parses arguments
36 | pipeline_name = "dpov2_aligner"
37 | PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name)
38 |
39 | parser = HfArgumentParser((
40 | ModelArguments,
41 | ReferenceModelArguments,
42 | DatasetArguments,
43 | PipelineArguments
44 | ))
45 | if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
46 | # If we pass only one argument to the script and it's the path to a json file,
47 | # let's parse it to get our arguments.
48 | model_args, ref_model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
49 | else:
50 | model_args, ref_model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses()
51 |
52 | ref_model_args_dict = remove_dataclass_attr_prefix(ref_model_args, "reference_")
53 | ref_model_args = ModelArguments(**ref_model_args_dict)
54 |
55 | train_dataset = Dataset(data_args)
56 | eval_data_args = copy.deepcopy(data_args)
57 | eval_data_args.dataset_path = pipeline_args.eval_dataset_path
58 | eval_dataset = Dataset(eval_data_args)
59 | model = AutoModel.get_model(model_args)
60 | ref_model = AutoModel.get_model(ref_model_args)
61 | aligner = AutoPipeline.get_pipeline(
62 | pipeline_name=pipeline_name,
63 | model_args=model_args,
64 | data_args=data_args,
65 | pipeline_args=pipeline_args,
66 | ref_model_args=ref_model_args,
67 | )
68 |
69 | res = aligner.align(
70 | model=model,
71 | ref_model=ref_model,
72 | train_dataset=train_dataset,
73 | eval_dataset=eval_dataset,
74 | )
75 |
76 |
77 | if __name__ == "__main__":
78 | main()
--------------------------------------------------------------------------------
/examples/ds_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "fp16": {
3 | "enabled": false
4 | },
5 | "bf16": {
6 | "enabled": true
7 | },
8 | "steps_per_print": 2000,
9 | "train_micro_batch_size_per_gpu": 1,
10 | "wall_clock_breakdown": false
11 | }
12 |
--------------------------------------------------------------------------------
/examples/evaluation.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved.
4 | """A one-line summary of the module or program, terminated by a period.
5 |
6 | Leave one blank line. The rest of this docstring should contain an
7 | overall description of the module or program. Optionally, it may also
8 | contain a brief description of exported classes and functions and/or usage
9 | examples.
10 |
11 | Typical usage example:
12 |
13 | foo = ClassFoo()
14 | bar = foo.FunctionBar()
15 | """
16 | import json
17 | import os
18 | import sys
19 | sys.path.remove(os.path.abspath(os.path.dirname(sys.argv[0])))
20 | from transformers import HfArgumentParser
21 |
22 | from lmflow.datasets.dataset import Dataset
23 | from lmflow.pipeline.auto_pipeline import AutoPipeline
24 | from lmflow.models.auto_model import AutoModel
25 | from lmflow.args import ModelArguments, DatasetArguments, AutoArguments
26 |
27 |
28 | pipeline_name = "evaluator"
29 | PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name)
30 |
31 | parser = HfArgumentParser((ModelArguments, DatasetArguments, PipelineArguments))
32 | model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses()
33 |
34 | with open (pipeline_args.deepspeed, "r") as f:
35 | ds_config = json.load(f)
36 |
37 | model = AutoModel.get_model(
38 | model_args,
39 | tune_strategy='none',
40 | ds_config=ds_config,
41 | use_accelerator=pipeline_args.use_accelerator_for_evaluator
42 | )
43 | dataset = Dataset(data_args)
44 |
45 | evaluator = AutoPipeline.get_pipeline(
46 | pipeline_name=pipeline_name,
47 | model_args=model_args,
48 | data_args=data_args,
49 | pipeline_args=pipeline_args,
50 | )
51 | evaluator.evaluate(model=model, dataset=dataset, metric=pipeline_args.metric)
52 |
--------------------------------------------------------------------------------
/examples/finetune.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved.
4 | """A one-line summary of the module or program, terminated by a period.
5 |
6 | Leave one blank line. The rest of this docstring should contain an
7 | overall description of the module or program. Optionally, it may also
8 | contain a brief description of exported classes and functions and/or usage
9 | examples.
10 |
11 | Typical usage example:
12 |
13 | foo = ClassFoo()
14 | bar = foo.FunctionBar()
15 | """
16 |
17 | import sys
18 | import os
19 | sys.path.remove(os.path.abspath(os.path.dirname(sys.argv[0])))
20 | from transformers import HfArgumentParser
21 |
22 | from lmflow.args import (
23 | ModelArguments,
24 | DatasetArguments,
25 | AutoArguments,
26 | )
27 |
28 | from lmflow.datasets.dataset import Dataset
29 | from lmflow.models.auto_model import AutoModel
30 | from lmflow.pipeline.auto_pipeline import AutoPipeline
31 |
32 |
33 | def main():
34 | # Parses arguments
35 | pipeline_name = "finetuner"
36 | PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name)
37 |
38 | parser = HfArgumentParser((ModelArguments, DatasetArguments, PipelineArguments))
39 | if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
40 | # If we pass only one argument to the script and it's the path to a json file,
41 | # let's parse it to get our arguments.
42 | model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
43 | else:
44 | model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses()
45 |
46 | # Initialization
47 | finetuner = AutoPipeline.get_pipeline(
48 | pipeline_name=pipeline_name,
49 | model_args=model_args,
50 | data_args=data_args,
51 | pipeline_args=pipeline_args,
52 | )
53 | dataset = Dataset(data_args)
54 | model = AutoModel.get_model(model_args)
55 |
56 | # Finetuning
57 | tuned_model = finetuner.tune(model=model, dataset=dataset)
58 |
59 |
60 | if __name__ == '__main__':
61 | main()
62 |
--------------------------------------------------------------------------------
/examples/merge_lora.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved.
4 | """
5 | Merge base model and lora model into a full model.
6 | """
7 |
8 | import sys
9 | import os
10 | sys.path.remove(os.path.abspath(os.path.dirname(sys.argv[0])))
11 |
12 | from dataclasses import dataclass, field
13 | from transformers import HfArgumentParser
14 | from typing import Optional
15 |
16 | from lmflow.args import (
17 | ModelArguments,
18 | AutoArguments,
19 | )
20 |
21 | from lmflow.models.auto_model import AutoModel
22 |
23 |
24 | @dataclass
25 | class MergeLoraArguments:
26 | device: str = field(
27 | default='cpu',
28 | metadata={
29 | "help": "device to merge model on",
30 | },
31 | )
32 | ds_config: str = field(
33 | default='configs/ds_config_eval.json',
34 | metadata={
35 | "help": "deepspeed config file path",
36 | },
37 | )
38 | output_model_path: Optional[str] = field(
39 | default=None,
40 | metadata={
41 | "help": "output merged full model path"
42 | },
43 | )
44 | local_rank: Optional[int] = field(
45 | default=-1,
46 | metadata={
47 | "help": "local rank for deepspeed",
48 | },
49 | )
50 |
51 |
52 | def main():
53 | parser = HfArgumentParser((ModelArguments, MergeLoraArguments))
54 | if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
55 | model_args, merge_lora_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
56 | else:
57 | model_args, merge_lora_args = parser.parse_args_into_dataclasses()
58 |
59 | if merge_lora_args.device == 'gpu':
60 | raise NotImplementedError('Merging LoRA weight using GPU not supported yet. Please use cpu.')
61 |
62 | model_args.use_lora = True
63 | model = AutoModel.get_model(
64 | model_args,
65 | tune_strategy='none',
66 | device=merge_lora_args.device,
67 | ds_config=merge_lora_args.ds_config
68 | )
69 | model.activate_model_for_inference()
70 | model.merge_lora_weights()
71 | model.save(merge_lora_args.output_model_path, save_full_model=True)
72 |
73 |
74 | if __name__ == '__main__':
75 | main()
76 |
--------------------------------------------------------------------------------
/examples/reward_modeling.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | import logging
5 | import sys
6 | import os
7 | sys.path.remove(os.path.abspath(os.path.dirname(sys.argv[0])))
8 |
9 | import torch
10 | from transformers import (
11 | HfArgumentParser
12 | )
13 |
14 | from lmflow.args import (
15 | ModelArguments,
16 | DatasetArguments,
17 | AutoArguments,
18 | )
19 | from lmflow.datasets.dataset import Dataset
20 | from lmflow.models.auto_model import AutoModel
21 | from lmflow.pipeline.auto_pipeline import AutoPipeline
22 |
23 |
24 | logger = logging.getLogger(__name__)
25 |
26 |
27 | def main():
28 | # Parses arguments
29 | pipeline_name = "rm_tuner"
30 | PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name)
31 |
32 | parser = HfArgumentParser((ModelArguments, DatasetArguments, PipelineArguments))
33 | if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
34 | # If we pass only one argument to the script and it's the path to a json file,
35 | # let's parse it to get our arguments.
36 | model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
37 | else:
38 | model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses()
39 |
40 | # Initialization
41 | finetuner = AutoPipeline.get_pipeline(
42 | pipeline_name=pipeline_name,
43 | model_args=model_args,
44 | data_args=data_args,
45 | pipeline_args=pipeline_args,
46 | )
47 | dataset = Dataset(data_args)
48 | model = AutoModel.get_model(model_args)
49 |
50 | # Finetuning
51 | tuned_model = finetuner.tune(model=model, dataset=dataset)
52 |
53 |
54 | if __name__ == '__main__':
55 | main()
56 |
--------------------------------------------------------------------------------
/examples/rm_inference.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | import logging
5 | import os
6 | import sys
7 |
8 | from transformers import (
9 | HfArgumentParser
10 | )
11 |
12 | from lmflow.datasets import Dataset
13 | from lmflow.models.auto_model import AutoModel
14 | from lmflow.pipeline.auto_pipeline import AutoPipeline
15 | from lmflow.args import (
16 | ModelArguments,
17 | DatasetArguments,
18 | AutoArguments,
19 | )
20 |
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 |
25 | def main():
26 | # Parses arguments
27 | pipeline_name = "rm_inferencer"
28 | PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name)
29 |
30 | parser = HfArgumentParser((
31 | ModelArguments,
32 | DatasetArguments,
33 | PipelineArguments
34 | ))
35 | if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
36 | # If we pass only one argument to the script and it's the path to a json file,
37 | # let's parse it to get our arguments.
38 | model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
39 | else:
40 | model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses()
41 |
42 | dataset = Dataset(data_args)
43 | model = AutoModel.get_model(model_args, tune_strategy='none', use_accelerator=pipeline_args.use_accelerator)
44 | inferencer = AutoPipeline.get_pipeline(
45 | pipeline_name=pipeline_name,
46 | model_args=model_args,
47 | data_args=data_args,
48 | pipeline_args=pipeline_args
49 | )
50 |
51 | res = inferencer.inference(
52 | model,
53 | dataset,
54 | )
55 |
56 | if pipeline_args.save_results:
57 | res.save(pipeline_args.results_path)
58 |
59 |
60 | if __name__ == "__main__":
61 | main()
--------------------------------------------------------------------------------
/examples/speculative_inference.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 |
4 | if __name__ == '__main__':
5 | parser = argparse.ArgumentParser()
6 | parser.add_argument('--gpu', type=str, default='0',
7 | help='gpu id, currently speculative inference only support single gpu')
8 | parser.add_argument('--model', type=str, default='gpt2-xl',
9 | help='target model name or path (i.e., the large model you want to accelerate), \
10 | currently only supports huggingface decoder only models')
11 | parser.add_argument('--draft_model', type=str, default='gpt2',
12 | help='draft model name or path, currently only supports huggingface decoder only models')
13 | parser.add_argument('--gamma', type=int, default=5,
14 | help='number of tokens that the draft model will generate at each step')
15 | parser.add_argument('--max_new_tokens', type=int, default=512,
16 | help='maximum number of tokens that the speculative inference will generate')
17 | parser.add_argument('--temperature', type=float, default=0.3,
18 | help='temperature for sampling')
19 |
20 | params = parser.parse_args()
21 |
22 | os.environ["CUDA_VISIBLE_DEVICES"] = params.gpu
23 | from lmflow.args import InferencerArguments
24 | from lmflow.args import ModelArguments
25 | from lmflow.args import DatasetArguments
26 | from lmflow.models import hf_decoder_model
27 | from lmflow.pipeline.inferencer import SpeculativeInferencer
28 |
29 |
30 | model_args = ModelArguments(model_name_or_path=params.model)
31 | model = hf_decoder_model.HFDecoderModel(model_args)
32 | draft_model_args = ModelArguments(model_name_or_path=params.draft_model)
33 | draft_model = hf_decoder_model.HFDecoderModel(draft_model_args)
34 | inferencer_args = InferencerArguments()
35 | data_args = DatasetArguments()
36 |
37 | specinf = SpeculativeInferencer(model_args, draft_model_args, data_args, inferencer_args)
38 |
39 | while True:
40 | try:
41 | text = input("Speculative Inference: ")
42 | specinf_res = specinf.inference(model,
43 | draft_model,
44 | text,
45 | gamma=params.gamma,
46 | max_new_tokens=params.max_new_tokens,
47 | temperature=params.temperature)
48 | print(specinf_res)
49 | print('\n\n')
50 |
51 | except EOFError:
52 | break
53 |
--------------------------------------------------------------------------------
/examples/tool_inference.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | from lmflow.args import InferencerArguments
4 | from lmflow.args import ModelArguments
5 | from lmflow.args import DatasetArguments
6 | from lmflow.models import hf_decoder_model
7 | from lmflow.pipeline.inferencer import ToolInferencer
8 | def main():
9 | parser = argparse.ArgumentParser()
10 | parser.add_argument('--gpu', type=str, default='0',
11 | help='gpu id, currently speculative inference only support single gpu')
12 | parser.add_argument('--model', type=str, default='codellama/CodeLlama-7b-instruct-hf',
13 | help='target code generation model name or path you \
14 | currently only supports huggingface decoder only models')
15 | params = parser.parse_args()
16 | os.environ["CUDA_VISIBLE_DEVICES"] = params.gpu
17 |
18 | model_args = ModelArguments(model_name_or_path=params.model)
19 | model = hf_decoder_model.HFDecoderModel(model_args)
20 | inferencer_args = InferencerArguments()
21 | data_args = DatasetArguments()
22 |
23 | toolinf = ToolInferencer(model_args, data_args, inferencer_args)
24 |
25 | while True:
26 | try:
27 | text = input("Tool Inference: ")
28 | toolinf_res = toolinf.inference(model, text)
29 | toolinf_res = toolinf_res.replace("","")
30 | toolinf_res = toolinf_res.replace("","")
31 | print('\n\nResult:')
32 | print(toolinf_res)
33 | print('\n\n')
34 | run_code = input("Run code? (y/n): ")
35 | if run_code == 'y':
36 | toolinf.code_exec(toolinf_res)
37 | if run_code == 'n':
38 | continue
39 |
40 |
41 | except EOFError:
42 | break
43 |
44 | if __name__ == '__main__':
45 | main()
--------------------------------------------------------------------------------
/examples/vllm_inference.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | import logging
5 | import os
6 | import sys
7 |
8 | from transformers import (
9 | HfArgumentParser
10 | )
11 |
12 | from lmflow.datasets import Dataset
13 | from lmflow.models.auto_model import AutoModel
14 | from lmflow.pipeline.auto_pipeline import AutoPipeline
15 | from lmflow.args import (
16 | ModelArguments,
17 | DatasetArguments,
18 | AutoArguments,
19 | )
20 |
21 |
22 | logger = logging.getLogger(__name__)
23 |
24 |
25 | def main():
26 | # Parses arguments
27 | pipeline_name = "vllm_inferencer"
28 | PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name)
29 |
30 | parser = HfArgumentParser((
31 | ModelArguments,
32 | DatasetArguments,
33 | PipelineArguments
34 | ))
35 | if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
36 | # If we pass only one argument to the script and it's the path to a json file,
37 | # let's parse it to get our arguments.
38 | model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
39 | else:
40 | model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses()
41 |
42 | dataset = Dataset(data_args)
43 | model = AutoModel.get_model(model_args, tune_strategy='none')
44 | inferencer = AutoPipeline.get_pipeline(
45 | pipeline_name=pipeline_name,
46 | model_args=model_args,
47 | data_args=data_args,
48 | pipeline_args=pipeline_args
49 | )
50 |
51 | res = inferencer.inference(
52 | model,
53 | dataset,
54 | release_gpu=False,
55 | enable_decode_inference_result=pipeline_args.enable_decode_inference_result,
56 | enable_distributed_vllm_inference=pipeline_args.enable_distributed_vllm_inference,
57 | )
58 |
59 |
60 | if __name__ == "__main__":
61 | main()
--------------------------------------------------------------------------------
/experimental/Hymba/README.md:
--------------------------------------------------------------------------------
1 | # Training Hymba with LMFlow
2 |
3 | ## Hymba
4 | [GITHUB](https://github.com/NVlabs/hymba/tree/main)
5 | Hymba is a family of small language models (SLMs) featuring a hybrid-head parallel architecture that integrates transformer attention mechanisms with SSMs to achieve the best of both worlds: enhanced efficiency and improved performance. In Hymba, attention heads provide high-resolution recall, while SSM heads enable efficient context summarization.
6 |
7 | ## Preparing the environment
8 |
9 | - Using Docker
10 |
11 | ```bash
12 | docker pull ghcr.io/tilmto/hymba:v1
13 | docker run --gpus all -v /home/$USER:/home/$USER -it ghcr.io/tilmto/hymba:v1 bash
14 | ```
15 |
16 | - Install LMFlow in the docker container
17 |
18 | ```bash
19 | git clone https://github.com/OptimalScale/LMFlow.git
20 | cd LMFlow
21 | conda create -n lmflow python=3.9 -y
22 | conda activate lmflow
23 | conda install mpi4py
24 | pip install -e .
25 | ```
26 |
27 | - Tips
28 |
29 | For training the Hymba model, please add below arguments to the `run_finetune.sh` script:
30 |
31 | ```bash
32 | --trust_remote_code True
33 | --bf16
34 | ```
35 |
36 | Demo script: [run_finetune_hymba.sh](./run_finetune_hymba.sh)
37 |
38 | Recommend on the A100, H100, A40 GPUs.
39 |
40 |
41 | ## Training Loss
42 | The training loss curve for `nvidia/Hymba-1.5B-Instruct`, fine-tuned on the `MedMCQA/train` dataset with a learning rate of $5e-5$ over 100 steps using SFT, LoRA, LISA, and DORA, is shown below:
43 | 
--------------------------------------------------------------------------------
/experimental/Hymba/run_finetune_hymba.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Please run this script under ${project_id} in project directory of
3 | # https://github.com/shizhediao/llm-ft
4 | # COMMIT: d5fecf30ba8011067b10cf51fede53a5ab6574e4
5 |
6 | # Parses arguments
7 | model_name_or_path=nvidia/Hymba-1.5B-Instruct
8 | dataset_path=MedMCQA/train
9 | output_dir=output_models/finetune
10 | deepspeed_args="--master_port=11000"
11 | conversation_template=llama2
12 |
13 | # Safety related arguments
14 | trust_remote_code=0
15 |
16 | while [[ $# -ge 1 ]]; do
17 | key="$1"
18 | case ${key} in
19 | -m|--model_name_or_path)
20 | model_name_or_path="$2"
21 | shift
22 | ;;
23 | -d|--dataset_path)
24 | dataset_path="$2"
25 | shift
26 | ;;
27 | -o|--output_model_path)
28 | output_dir="$2"
29 | shift
30 | ;;
31 | --conversation_template)
32 | conversation_template="$2"
33 | shift
34 | ;;
35 | --deepspeed_args)
36 | deepspeed_args="$2"
37 | shift
38 | ;;
39 | --trust_remote_code)
40 | trust_remote_code="$2"
41 | shift
42 | ;;
43 | *)
44 | echo "error: unknown option \"${key}\"" 1>&2
45 | exit 1
46 | esac
47 | shift
48 | done
49 |
50 | # Finetune
51 | exp_id=finetune
52 | project_dir=$(cd "$(dirname $0)"/..; pwd)
53 | log_dir=${project_dir}/log/${exp_id}
54 | mkdir -p ${output_dir} ${log_dir}
55 |
56 | deepspeed ${deepspeed_args} \
57 | examples/finetune.py \
58 | --model_name_or_path ${model_name_or_path} \
59 | --trust_remote_code ${trust_remote_code} \
60 | --dataset_path ${dataset_path} \
61 | --output_dir ${output_dir} --overwrite_output_dir \
62 | --conversation_template ${conversation_template} \
63 | --num_train_epochs 0.01 \
64 | --learning_rate 5e-5 \
65 | --disable_group_texts 1 \
66 | --block_size 256 \
67 | --trust_remote_code True \
68 | --per_device_train_batch_size 1 \
69 | --deepspeed configs/ds_config_zero2_no_offload.json \
70 | --bf16 \
71 | --run_name hymba_finetune \
72 | --validation_split_percentage 0 \
73 | --logging_steps 1 \
74 | --do_train \
75 | --gradient_checkpointing 1 \
76 | --use_flash_attention 1 \
77 | --ddp_timeout 72000 \
78 | --save_steps 5000 \
79 | --dataloader_num_workers 1 \
80 | > >(tee ${log_dir}/train.log) \
81 | 2> >(tee ${log_dir}/train.err >&2)
82 |
83 |
84 |
--------------------------------------------------------------------------------
/experimental/LISA-diffusion/docs/instruct_lisa_lake.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/experimental/LISA-diffusion/docs/instruct_lisa_lake.png
--------------------------------------------------------------------------------
/experimental/LISA-diffusion/docs/instruct_lora_lake.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/experimental/LISA-diffusion/docs/instruct_lora_lake.png
--------------------------------------------------------------------------------
/experimental/LISA-diffusion/docs/lcm_lisa_mountain.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/experimental/LISA-diffusion/docs/lcm_lisa_mountain.png
--------------------------------------------------------------------------------
/experimental/LISA-diffusion/docs/lcm_lora_mountain.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/experimental/LISA-diffusion/docs/lcm_lora_mountain.png
--------------------------------------------------------------------------------
/experimental/LISA-diffusion/requirement.txt:
--------------------------------------------------------------------------------
1 | accelerate>=0.16.0
2 | torchvision
3 | transformers>=4.25.1
4 | ftfy
5 | peft
6 | wandb
7 | tensorboard
8 | Jinja2
9 | webdataset
10 | datasets
--------------------------------------------------------------------------------
/experimental/RAFT-diffusion/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate==0.18.0
2 | asttokens==2.2.1
3 | backcall==0.2.0
4 | bitsandbytes==0.37.2
5 | certifi==2022.12.7
6 | charset-normalizer==3.1.0
7 | clip==1.0==
8 | cmake==3.26.1
9 | comm==0.1.3
10 | contourpy==1.0.7
11 | cycler==0.11.0
12 | debugpy==1.6.7
13 | decorator==5.1.1
14 | diffusers==0.14.0
15 | executing==1.2.0
16 | filelock==3.11.0
17 | fonttools==4.39.3
18 | ftfy==6.1.1
19 | huggingface-hub==0.13.4
20 | idna==3.4
21 | importlib-metadata==6.2.0
22 | importlib-resources==5.12.0
23 | ipykernel==6.22.0
24 | ipython==8.12.0
25 | jedi==0.18.2
26 | Jinja2==3.1.2
27 | jupyter_client==8.1.0
28 | jupyter_core==5.3.0
29 | kiwisolver==1.4.4
30 | lit==16.0.0
31 | MarkupSafe==2.1.2
32 | matplotlib==3.7.1
33 | matplotlib-inline==0.1.6
34 | mpmath==1.3.0
35 | mypy-extensions==1.0.0
36 | nest-asyncio==1.5.6
37 | networkx==3.1
38 | numpy==1.24.2
39 | nvidia-cublas-cu11==11.10.3.66
40 | nvidia-cuda-cupti-cu11==11.7.101
41 | nvidia-cuda-nvrtc-cu11==11.7.99
42 | nvidia-cuda-runtime-cu11==11.7.99
43 | nvidia-cudnn-cu11==8.5.0.96
44 | nvidia-cufft-cu11==10.9.0.58
45 | nvidia-curand-cu11==10.2.10.91
46 | nvidia-cusolver-cu11==11.4.0.1
47 | nvidia-cusparse-cu11==11.7.4.91
48 | nvidia-nccl-cu11==2.14.3
49 | nvidia-nvtx-cu11==11.7.91
50 | open-clip-torch==2.16.0
51 | packaging==23.0
52 | pandas==2.0.0
53 | parso==0.8.3
54 | pexpect==4.8.0
55 | pickleshare==0.7.5
56 | Pillow==9.5.0
57 | pip==23.0.1
58 | platformdirs==3.2.0
59 | prompt-toolkit==3.0.38
60 | protobuf==3.20.3
61 | psutil==5.9.4
62 | ptyprocess==0.7.0
63 | pure-eval==0.2.2
64 | Pygments==2.14.0
65 | pyparsing==3.0.9
66 | pyre-extensions==0.0.23
67 | python-dateutil==2.8.2
68 | pytz==2023.3
69 | PyYAML==6.0
70 | pyzmq==25.0.2
71 | regex==2023.3.23
72 | requests==2.28.2
73 | sentencepiece==0.1.97
74 | setuptools==65.6.3
75 | six==1.16.0
76 | stack-data==0.6.2
77 | sympy==1.11.1
78 | timm==0.6.13
79 | tokenizers==0.13.3
80 | torch==2.0.0
81 | torchvision==0.15.1
82 | tornado==6.2
83 | tqdm==4.65.0
84 | traitlets==5.9.0
85 | transformers==4.27.4
86 | triton==2.0.0
87 | typing_extensions==4.5.0
88 | typing-inspect==0.8.0
89 | tzdata==2023.3
90 | urllib3==1.26.15
91 | wcwidth==0.2.6
92 | wheel==0.38.4
93 | xformers==0.0.18
94 | zipp==3.15.0
95 |
96 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools >= 64"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [tool.ruff]
6 | target-version = "py39"
7 | indent-width = 4
8 |
9 | [tool.ruff.lint.isort]
10 | lines-after-imports = 2
11 | known-first-party = ["lmflow"]
12 |
13 | [tool.ruff.format]
14 | quote-style = "double"
15 | indent-style = "space"
16 | docstring-code-format = true
17 | skip-magic-trailing-comma = false
18 | line-ending = "auto"
--------------------------------------------------------------------------------
/readme/Position_Interpolation.md:
--------------------------------------------------------------------------------
1 | # Position Interpolation
2 | Now LMFlow supports the latest Linear & NTK (Neural Kernel theory) scaling techniques for LLaMA models. \
3 | For more details of these techniques, you can checkout the links below:
4 | * Linear scaling: \
5 | https://arxiv.org/abs/2306.15595
6 | * NTK scaling: \
7 | https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
8 | ## Usage
9 | To use the Position Interpolation Techniques, you need to set the following options:
10 | ```
11 | --truncate_to_model_max_length False
12 | --do_rope_scaling True
13 | ```
14 | For linear scaling, set the extending ratio by:
15 | ```
16 | --rope_pi_ratio 4
17 | ```
18 | For NTK scaling, set the extending ratio by:
19 | ```
20 | --rope_ntk_ratio 4
21 | ```
22 | Here is an example of evaluation bash code:
23 | ```
24 | #!/bin/bash
25 |
26 | CUDA_VISIBLE_DEVICES=0 \
27 | deepspeed examples/evaluation.py \
28 | --answer_type text \
29 | --model_name_or_path pinkmanlove/llama-7b-hf \
30 | --dataset_path data/wiki_en_eval \
31 | --deepspeed examples/ds_config.json \
32 | --inference_batch_size_per_device 1 \
33 | --truncate_to_model_max_length False \
34 | --block_size 4096 \
35 | --use_flash_attention True \
36 | --do_rope_scaling True \
37 | --rope_pi_ratio 2 \
38 | --rope_ntk_ratio 4 \
39 | --metric ppl
40 | ```
--------------------------------------------------------------------------------
/readme/flash_attn2.md:
--------------------------------------------------------------------------------
1 | # FlashAttention-2
2 | We're thrilled to announce that LMFlow now supports training and inference using **FlashAttention-2**! This cutting-edge feature will take your language modeling to the next level. To use it, simply add ``` --use_flash_attention True ``` to the corresponding bash script.
3 | Here is an example of how to use it:
4 | ```
5 | #!/bin/bash
6 | pip install flash_attn==2.0.2
7 |
8 | deepspeed --master_port=11000 \
9 | examples/chatbot.py \
10 | --deepspeed configs/ds_config_chatbot.json \
11 | --model_name_or_path LMFlow/Full-Robin-7b-v2 \
12 | --max_new_tokens 1024 \
13 | --prompt_structure "###Human: {input_text}###Assistant:" \
14 | --end_string "#" \
15 | --use_flash_attention True
16 | ```
17 |
18 | Upgrade to LMFlow now and experience the future of language modeling!
19 |
20 |
21 | ## Known Issues
22 | ### 1. `undefined symbol` error
23 | When importing the flash attention module, you may encounter `ImportError` saying `undefined symbol`:
24 | ```bash
25 | >>> import flash_attn
26 | Traceback (most recent call last):
27 | File "", line 1, in
28 | File ".../anaconda3/envs/lmflow/lib/python3.9/site-packages/flash_attn/__init__.py", line 3, in
29 | from flash_attn.flash_attn_interface import flash_attn_func
30 | File ".../anaconda3/envs/lmflow/lib/python3.9/site-packages/flash_attn/flash_attn_interface.py", line 4, in
31 | import flash_attn_2_cuda as flash_attn_cuda
32 | ImportError: .../anaconda3/envs/lmflow/lib/python3.9/site-packages/flash_attn_2_cuda.cpython-39-x86_64-linux-gnu.so: undefined symbol: _ZN2at4_ops9_pad_enum4callERKNS_6TensorEN3c108ArrayRefINS5_6SymIntEEElNS5_8optionalIdEE
33 | ```
34 | This MAY due to the incompatibility between the PyTorch version and the flash attention module, or the compiling process of flash attention. We've tested several approaches, either downgrade PyTorch OR upgrade the flash attention module works. If you still encounter this issue, please refer to [this issue](https://github.com/Dao-AILab/flash-attention/issues/451).
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | packaging
2 | numpy
3 | datasets==2.14.6
4 | tokenizers>=0.13.3
5 | peft>=0.10.0
6 | torch>=2.0.1
7 | wandb
8 | deepspeed>=0.14.4
9 | sentencepiece
10 | transformers>=4.31.0
11 | cpm_kernels==1.0.11
12 | evaluate==0.4.0
13 | bitsandbytes>=0.40.0
14 | pydantic
15 | accelerate>=0.27.2
16 | einops>=0.6.1
--------------------------------------------------------------------------------
/scripts/bash.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Shell and python scripts goes here
--------------------------------------------------------------------------------
/scripts/data_preprocess/add_prompt.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved.
4 | """
5 | Adds prompt structure to a text2text dataset.
6 | """
7 | from __future__ import absolute_import
8 |
9 | import argparse
10 | import json
11 | import textwrap
12 | import sys
13 |
14 | def parse_argument(sys_argv):
15 | """Parses arguments from command line.
16 | Args:
17 | sys_argv: the list of arguments (strings) from command line.
18 | Returns:
19 | A struct whose member corresponds to the required (optional) variable.
20 | For example,
21 | ```
22 | args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
23 | args.input # 'a.txt'
24 | args.num # 10
25 | ```
26 | """
27 | parser = argparse.ArgumentParser(
28 | formatter_class=argparse.RawTextHelpFormatter)
29 |
30 | # Training parameters
31 | parser.add_argument(
32 | "--dataset_path", type=str,
33 | default=None,
34 | help=textwrap.dedent("input dataset path, reads from stdin by default")
35 | )
36 | parser.add_argument(
37 | "--output_path", type=str,
38 | default=None,
39 | help=textwrap.dedent("output dataset path, writes to stdout by default")
40 | )
41 | parser.add_argument(
42 | "--prompt_structure", type=str,
43 | default="{input}",
44 | help=textwrap.dedent("prompt structure to augment input")
45 | )
46 |
47 | # Parses from commandline
48 | args = parser.parse_args(sys_argv[1:])
49 |
50 | return args
51 |
52 |
53 | def main():
54 | args = parse_argument(sys.argv)
55 | if args.dataset_path is not None:
56 | with open(args.dataset_path, "r") as fin:
57 | data_dict = json.load(fin)
58 | else:
59 | data_dict = json.load(sys.stdin)
60 |
61 | if data_dict["type"] != "text2text":
62 | raise NotImplementedError(
63 | "only support text2text prompt augmentation"
64 | )
65 |
66 | data_dict["instances"] = [
67 | {
68 | "input": args.prompt_structure.format(input=instance["input"]),
69 | "output": instance["output"],
70 | }
71 | for instance in data_dict["instances"]
72 | ]
73 | if args.output_path is not None:
74 | with open(args.output_path, "w") as fout:
75 | json.dump(data_dict, fout, indent=4, ensure_ascii=False)
76 | else:
77 | json.dump(data_dict, sys.stdout, indent=4, ensure_ascii=False)
78 |
79 |
80 | if __name__ == "__main__":
81 | main()
82 |
--------------------------------------------------------------------------------
/scripts/data_preprocess/concat.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved.
4 | """
5 | Merges an extra dataset into current dataset.
6 | """
7 | from __future__ import absolute_import
8 |
9 | import argparse
10 | import json
11 | import textwrap
12 | import sys
13 |
14 | def parse_argument(sys_argv):
15 | """Parses arguments from command line.
16 | Args:
17 | sys_argv: the list of arguments (strings) from command line.
18 | Returns:
19 | A struct whose member corresponds to the required (optional) variable.
20 | For example,
21 | ```
22 | args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
23 | args.input # 'a.txt'
24 | args.num # 10
25 | ```
26 | """
27 | parser = argparse.ArgumentParser(
28 | formatter_class=argparse.RawTextHelpFormatter)
29 |
30 | # Training parameters
31 | parser.add_argument(
32 | "--output_path", type=str,
33 | default=None,
34 | help=textwrap.dedent("output dataset path, writes to stdout by default")
35 | )
36 | parser.add_argument(
37 | "--merge_from_path", type=str,
38 | nargs="+",
39 | help=textwrap.dedent(
40 | "dataset path of the extra dataset that will be merged"
41 | " into input dataset"
42 | )
43 | )
44 |
45 | # Parses from commandline
46 | args = parser.parse_args(sys_argv[1:])
47 |
48 | return args
49 |
50 |
51 | def main():
52 | args = parse_argument(sys.argv)
53 |
54 | if args.merge_from_path is not None:
55 | for i in range(0, len(args.merge_from_path)):
56 | with open(args.merge_from_path[i], "r") as fin:
57 | extra_data_dict = json.load(fin)
58 | if i == 0:
59 | data_dict = extra_data_dict
60 | else:
61 | if data_dict["type"] != extra_data_dict["type"]:
62 | raise ValueError(
63 | 'two dataset have different types:'
64 | f' input dataset: "{data_dict["type"]}";'
65 | f' merge from dataset: "{extra_data_dict["type"]}"'
66 | )
67 | data_dict["instances"].extend(extra_data_dict["instances"])
68 | else:
69 | raise ValueError("No merge files specified")
70 |
71 | if args.output_path is not None:
72 | with open(args.output_path, "w") as fout:
73 | json.dump(data_dict, fout, indent=4, ensure_ascii=False)
74 | else:
75 | json.dump(data_dict, sys.stdout, indent=4, ensure_ascii=False)
76 |
77 |
78 | if __name__ == "__main__":
79 | main()
80 |
--------------------------------------------------------------------------------
/scripts/data_preprocess/count.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved.
4 | """
5 | Counts number of instances in a dataset.
6 | """
7 | from __future__ import absolute_import
8 |
9 | import argparse
10 | import json
11 | import random
12 | import sys
13 | import textwrap
14 |
15 | def parse_argument(sys_argv):
16 | """Parses arguments from command line.
17 | Args:
18 | sys_argv: the list of arguments (strings) from command line.
19 | Returns:
20 | A struct whose member corresponds to the required (optional) variable.
21 | For example,
22 | ```
23 | args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
24 | args.input # 'a.txt'
25 | args.num # 10
26 | ```
27 | """
28 | parser = argparse.ArgumentParser(
29 | formatter_class=argparse.RawTextHelpFormatter)
30 |
31 | # Training parameters
32 | parser.add_argument(
33 | "--dataset_path", type=str,
34 | default=None,
35 | help="input dataset path, reads from stdin by default"
36 | )
37 |
38 | # Parses from commandline
39 | args = parser.parse_args(sys_argv[1:])
40 |
41 | return args
42 |
43 |
44 | def main():
45 | args = parse_argument(sys.argv)
46 | if args.dataset_path is not None:
47 | with open(args.dataset_path, "r") as fin:
48 | data_dict = json.load(fin)
49 | else:
50 | data_dict = json.load(sys.stdin)
51 |
52 | num_instances = len(data_dict["instances"])
53 | print(num_instances)
54 |
55 |
56 | if __name__ == "__main__":
57 | main()
58 |
--------------------------------------------------------------------------------
/scripts/data_preprocess/raw2textonly.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved.
4 | """
5 | Converts a raw text file, separated by lines, into a "text-only" formatted json.
6 | """
7 | from __future__ import absolute_import
8 |
9 | import argparse
10 | import json
11 | import textwrap
12 | import sys
13 |
14 | def parse_argument(sys_argv):
15 | """Parses arguments from command line.
16 | Args:
17 | sys_argv: the list of arguments (strings) from command line.
18 | Returns:
19 | A struct whose member corresponds to the required (optional) variable.
20 | For example,
21 | ```
22 | args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
23 | args.input # 'a.txt'
24 | args.num # 10
25 | ```
26 | """
27 | parser = argparse.ArgumentParser(
28 | formatter_class=argparse.RawTextHelpFormatter)
29 |
30 | # Training parameters
31 | parser.add_argument(
32 | "--dataset_path", type=str,
33 | default=None,
34 | help=textwrap.dedent("input dataset path, reads from stdin by default")
35 | )
36 | parser.add_argument(
37 | "--output_path", type=str,
38 | default=None,
39 | help=textwrap.dedent("output dataset path, writes to stdout by default")
40 | )
41 |
42 | # Parses from commandline
43 | args = parser.parse_args(sys_argv[1:])
44 |
45 | return args
46 |
47 |
48 | def raw2textonly(fin):
49 | """
50 | Converts raw text to text-only format.
51 |
52 | Args:
53 | fin: the input file description of the raw text file.
54 | Returns:
55 | a dict with "text-only" format.
56 | """
57 | data_dict = {
58 | "type": "text_only",
59 | "instances": [ { "text": line.strip() } for line in fin ],
60 | }
61 | return data_dict
62 |
63 |
64 | def main():
65 | args = parse_argument(sys.argv)
66 |
67 | if args.dataset_path is not None:
68 | with open(args.dataset_path, "r") as fin:
69 | data_dict = raw2textonly(fin)
70 | else:
71 | data_dict = raw2textonly(sys.stdin)
72 |
73 | if args.output_path is not None:
74 | with open(args.output_path, "w") as fout:
75 | json.dump(data_dict, fout, indent=4, ensure_ascii=False)
76 | else:
77 | json.dump(data_dict, sys.stdout, indent=4, ensure_ascii=False)
78 |
79 |
80 | if __name__ == "__main__":
81 | main()
82 |
--------------------------------------------------------------------------------
/scripts/data_preprocess/run_data_preprocess.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Run this shell script under project directory
3 |
4 | # For sample.py
5 | python scripts/data_preprocess/sample.py \
6 | --dataset_path ./data/example_dataset/train/train_50.json \
7 | --output_path ./data/example_dataset/train/train_50_sample.json \
8 | --ratio 0.5
9 |
10 | # For shuffle.py
11 | python scripts/data_preprocess/shuffle.py \
12 | --dataset_path ./data/example_dataset/train/train_50_sample.json \
13 | --output_path ./data/example_dataset/train/train_50_sample_shuffle.json
14 |
15 | # For merge.py : you can specify multiple files to merge
16 | python scripts/data_preprocess/merge.py \
17 | --dataset_path ./data/example_dataset/train/train_50.json \
18 | --merge_from_path ./data/example_dataset/train/train_50_sample_shuffle.json \
19 | ./data/example_dataset/train/train_50_sample.json \
20 | --output_path ./data/example_dataset/train/train_merge.json \
21 |
22 | # For concat.py: if you simply want to merge multiple files or a directory, use following.
23 | # You can also specify multiple files after --merge_from_path
24 | python scripts/data_preprocess/concat.py \
25 | --merge_from_path ./data/example_dataset/train/*.json \
26 | --output_path ./data/example_dataset/train/train_merge.json \
27 |
28 | # For concat_shuffle_split.py: if you simply want to merge multiple files or a directory, use following.
29 | python scripts/data_preprocess/concat_shuffle_split.py \
30 | --merge_from_path ./data/example_dataset/train/*.json \
31 | --output_path ./data/processed_dataset/ \
--------------------------------------------------------------------------------
/scripts/data_preprocess/sample.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved.
4 | """
5 | Samples a certain ratio of instances from a dataset.
6 | """
7 | from __future__ import absolute_import
8 |
9 | import argparse
10 | import json
11 | import random
12 | import sys
13 | import textwrap
14 |
15 | def parse_argument(sys_argv):
16 | """Parses arguments from command line.
17 | Args:
18 | sys_argv: the list of arguments (strings) from command line.
19 | Returns:
20 | A struct whose member corresponds to the required (optional) variable.
21 | For example,
22 | ```
23 | args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
24 | args.input # 'a.txt'
25 | args.num # 10
26 | ```
27 | """
28 | parser = argparse.ArgumentParser(
29 | formatter_class=argparse.RawTextHelpFormatter)
30 |
31 | # Training parameters
32 | parser.add_argument(
33 | "--dataset_path", type=str,
34 | default=None,
35 | help="input dataset path, reads from stdin by default"
36 | )
37 | parser.add_argument(
38 | "--output_path", type=str,
39 | default=None,
40 | help="output dataset path, writes to stdout by default"
41 | )
42 | parser.add_argument(
43 | "--ratio", type=float, required=True,
44 | help="sample ratio, will be floored if number of samples is not a int"
45 | )
46 | parser.add_argument(
47 | "--seed", type=int, default=42,
48 | help="pseudorandom seed"
49 | )
50 |
51 | # Parses from commandline
52 | args = parser.parse_args(sys_argv[1:])
53 |
54 | return args
55 |
56 |
57 | def main():
58 | args = parse_argument(sys.argv)
59 | if args.dataset_path is not None:
60 | with open(args.dataset_path, "r") as fin:
61 | data_dict = json.load(fin)
62 | else:
63 | data_dict = json.load(sys.stdin)
64 |
65 | random.seed(args.seed)
66 | num_instances = len(data_dict["instances"])
67 | num_sample = int(num_instances * args.ratio)
68 |
69 | data_dict["instances"] = random.sample(data_dict["instances"], num_sample)
70 |
71 | if args.output_path is not None:
72 | with open(args.output_path, "w") as fout:
73 | json.dump(data_dict, fout, indent=4, ensure_ascii=False)
74 | else:
75 | json.dump(data_dict, sys.stdout, indent=4, ensure_ascii=False)
76 |
77 |
78 | if __name__ == "__main__":
79 | main()
80 |
--------------------------------------------------------------------------------
/scripts/data_preprocess/shuffle.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2023 Statistics and Machine Learning Research Group at HKUST. All rights reserved.
4 | """
5 | Samples a certain ratio of instances from a dataset.
6 | """
7 | from __future__ import absolute_import
8 |
9 | import argparse
10 | import json
11 | import random
12 | import sys
13 | import textwrap
14 |
15 | def parse_argument(sys_argv):
16 | """Parses arguments from command line.
17 | Args:
18 | sys_argv: the list of arguments (strings) from command line.
19 | Returns:
20 | A struct whose member corresponds to the required (optional) variable.
21 | For example,
22 | ```
23 | args = parse_argument(['main.py' '--input', 'a.txt', '--num', '10'])
24 | args.input # 'a.txt'
25 | args.num # 10
26 | ```
27 | """
28 | parser = argparse.ArgumentParser(
29 | formatter_class=argparse.RawTextHelpFormatter)
30 |
31 | # Training parameters
32 | parser.add_argument(
33 | "--dataset_path", type=str,
34 | default=None,
35 | help="input dataset path, reads from stdin by default"
36 | )
37 | parser.add_argument(
38 | "--output_path", type=str,
39 | default=None,
40 | help="output dataset path, writes to stdout by default"
41 | )
42 | parser.add_argument(
43 | "--seed", type=int, default=42,
44 | help="pseudorandom seed"
45 | )
46 |
47 | # Parses from commandline
48 | args = parser.parse_args(sys_argv[1:])
49 |
50 | return args
51 |
52 |
53 | def main():
54 | args = parse_argument(sys.argv)
55 | if args.dataset_path is not None:
56 | with open(args.dataset_path, "r") as fin:
57 | data_dict = json.load(fin)
58 | else:
59 | data_dict = json.load(sys.stdin)
60 |
61 | random.seed(args.seed)
62 | random.shuffle(data_dict["instances"])
63 |
64 | if args.output_path is not None:
65 | with open(args.output_path, "w") as fout:
66 | json.dump(data_dict, fout, indent=4, ensure_ascii=False)
67 | else:
68 | json.dump(data_dict, sys.stdout, indent=4, ensure_ascii=False)
69 |
70 |
71 | if __name__ == "__main__":
72 | main()
73 |
--------------------------------------------------------------------------------
/scripts/multimodal/README.md:
--------------------------------------------------------------------------------
1 | # MultiModal Conversation
2 | ## Download dataset
3 | We use the dataset from LLava to present the example of multi-modaltiy training.
4 | Please first download the [pretrain dataset](https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K) for pre-training. Then download the [coco 2017](https://cocodataset.org/) and the [conversation file](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K/blob/main/llava_instruct_80k.json) for finetuning.
5 | After downloading, modify the data path in the training script to your own path.
6 | ## Pretrain
7 | Run the following script for pretraining:
8 | ```
9 | bash scripts/multimodal/run_finetune_multi_modal_stage1.sh
10 | ```
11 |
12 | ## Finetune
13 | Modify the path of the dataset and the pretrain language projection model and run the following script:
14 | ```
15 | bash script/multimodal/run_finetune_multi_modal_stage2.sh
16 | ```
17 |
18 | ## Inference on CLI
19 | Run the following script for LLava:
20 | ```
21 | bash script/multimodal/run_vis_chatbot_llava.sh
22 |
23 | ```
24 |
25 | Run the following script for mini gpt-4:
26 | ```
27 | bash script/multimodal/run_vis_chatbot_minigpt4.sh
28 | ```
29 |
30 | ## Inference on gradio:
31 | Run the following script for mini gpt-4:
32 | ```
33 | bash script/multimodal/run_vis_chatbot_gradio_minigpt4.sh
34 | ```
--------------------------------------------------------------------------------
/scripts/multimodal/run_finetune_multi_modal_stage1.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Please run this script under ${project_id} in project directory of
3 | # https://github.com/shizhediao/llm-ft
4 | # COMMIT: d5fecf30ba8011067b10cf51fede53a5ab6574e4
5 |
6 | # Parses argumen
7 | model_name_or_path=Salesforce/blip2-flan-t5-xxl
8 | # please download the dataset from
9 | # https://huggingface.co/datasets/liuhaotian/LLaVA-CC3M-Pretrain-595K
10 | dataset_path=./data/llava_cc3m_pretrain_595k/chat.json
11 | image_folder=./data/llava_cc3m_pretrain_595k/images
12 | output_dir=output_models/finetune_llava-336px-vicuna-7b-v1.3_stage1
13 |
14 | deepspeed_args="--master_port=12000"
15 |
16 | while [[ $# -ge 1 ]]; do
17 | key="$1"
18 | case ${key} in
19 | -m|--model_name_or_path)
20 | model_name_or_path="$2"
21 | shift
22 | ;;
23 | -d|--dataset_path)
24 | dataset_path="$2"
25 | shift
26 | ;;
27 | -o|--output_model_path)
28 | output_dir="$2"
29 | shift
30 | ;;
31 | --deepspeed_args)
32 | deepspeed_args="$2"
33 | shift
34 | ;;
35 | *)
36 | echo "error: unknown option \"${key}\"" 1>&2
37 | exit 1
38 | esac
39 | shift
40 | done
41 |
42 | if [ ! -d data/llava_cc3m_pretrain_595k ]; then
43 | cd data && ./download.sh llava_cc3m_pretrain_595k && cd -
44 | fi
45 |
46 | # Finetune
47 | exp_id=finetune
48 | project_dir=$(cd "$(dirname $0)"/..; pwd)
49 | log_dir=${project_dir}/log/${exp_id}
50 | mkdir -p ${output_dir} ${log_dir}
51 |
52 | deepspeed ${deepspeed_args} \
53 | examples/finetune_multi_modal.py \
54 | --deepspeed configs/ds_config_multimodal.json \
55 | --arch_type vision_encoder_decoder \
56 | --llava_loading True \
57 | --model_name_or_path ${model_name_or_path} \
58 | --image_encoder_name_or_path openai/clip-vit-large-patch14 \
59 | --dataset_path ${dataset_path} \
60 | --output_dir ${output_dir} --overwrite_output_dir \
61 | --image_folder ${image_folder} \
62 | --custom_vision_model True \
63 | --llm_model_name_or_path lmsys/vicuna-7b-v1.5 \
64 | --image_aspect_ratio None \
65 | --fp16 True \
66 | --gradient_accumulation_steps 4 \
67 | --per_device_train_batch_size 8 \
68 | --learning_rate 2e-3 \
69 | --weight_decay 0. \
70 | --warmup_ratio 0.03 \
71 | --lr_scheduler_type "cosine" \
72 | --run_name finetune \
73 | --validation_split_percentage 0 \
74 | --logging_steps 20 \
75 | --do_train \
76 | --ddp_timeout 72000 \
77 | --save_steps 5000 \
78 | --dataloader_num_workers 1 \
79 | --num_train_epochs 1 \
80 | --save_language_projection True \
81 | | tee ${log_dir}/train.log \
82 | 2> ${log_dir}/train.err
83 |
--------------------------------------------------------------------------------
/scripts/multimodal/run_vis_chatbot_blip2.sh:
--------------------------------------------------------------------------------
1 | model=Salesforce/blip2-opt-2.7b
2 | deepspeed examples/vis_chatbot.py --model_name_or_path ${model} \
3 | --deepspeed configs/ds_config_vis_chatbot.json \
4 | --arch_type vision_encoder_decoder \
5 | --task vqa \
6 | ${@:1}
7 |
--------------------------------------------------------------------------------
/scripts/multimodal/run_vis_chatbot_gradio_minigpt4.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | model=Salesforce/blip2-flan-t5-xxl
4 |
5 | # if [ ! -f output_models/pretrained_minigpt4_7b.pth ]; then
6 | # cd output_models && ./download.sh minigpt4_7b && cd -
7 | # fi
8 | #
9 | # if [ ! -f output_models/pretrained_minigpt4_7b_converted.pth ]; then
10 | # python utils/convert_minigpt4_checkpoints.py \
11 | # --model_path output_models/pretrained_minigpt4_7b.pth \
12 | # --save_path output_models/pretrained_minigpt4_7b_converted.pth
13 | # fi
14 | #
15 | # deepspeed --master_port=11005 examples/vis_chatbot_gradio.py \
16 | # --model_name_or_path ${model} \
17 | # --deepspeed configs/ds_config_multimodal.json \
18 | # --arch_type vision_encoder_decoder \
19 | # --task vqa \
20 | # --custom_model \
21 | # --chatbot_format mini_gpt \
22 | # --prompt_structure "###Human: {input_text}###Assistant:" \
23 | # --llm_model_name_or_path LMFlow/Full-Robin-7b-v2 \
24 | # --checkpoint_path output_models/pretrained_minigpt4_7b_converted.pth \
25 | # --low_resource True \
26 | # --max_new_tokens 1024
27 |
28 | if [ ! -f output_models/pretrained_minigpt4_13b.pth ]; then
29 | cd output_models && ./download.sh minigpt4_13b && cd -
30 | fi
31 |
32 | if [ ! -f output_models/pretrained_minigpt4_13b_converted.pth ]; then
33 | python utils/convert_minigpt4_checkpoints.py \
34 | --model_path output_models/pretrained_minigpt4_13b.pth \
35 | --save_path output_models/pretrained_minigpt4_13b_converted.pth
36 | fi
37 |
38 | deepspeed --master_port=11005 examples/vis_chatbot_gradio.py \
39 | --model_name_or_path ${model} \
40 | --deepspeed configs/ds_config_vis_chatbot.json \
41 | --arch_type vision_encoder_decoder \
42 | --task vqa \
43 | --custom_model \
44 | --chatbot_type mini_gpt \
45 | --prompt_structure "###Human: {input_text}###Assistant:" \
46 | --llm_model_name_or_path LMFlow/Full-Robin-13b-v2 \
47 | --pretrained_language_projection_path output_models/pretrained_minigpt4_13b_converted.pth \
48 | --low_resource True \
49 | --max_new_tokens 1024
50 |
--------------------------------------------------------------------------------
/scripts/multimodal/run_vis_chatbot_llava.sh:
--------------------------------------------------------------------------------
1 | # only work for gpu mem > 25G; fail to do 4 bit and 8 bit inference.
2 | model_name_or_path=Salesforce/blip2-flan-t5-xxl
3 | llava_pretrain_model_path="output_models/llava-v1-0719-336px-lora-merge-vicuna-13b-v1.3/"
4 | deepspeed_args="--master_port=12000"
5 |
6 | if [ ! -f ${llava_pretrain_model_path}"pytorch_model-00001-of-00003.bin" ]; then
7 | cd output_models && ./download.sh llava_vicuna13b_model_01 && cd -
8 | fi
9 |
10 | if [ ! -f ${llava_pretrain_model_path}"pytorch_model-00002-of-00003.bin" ]; then
11 | cd output_models && ./download.sh llava_vicuna13b_model_02 && cd -
12 | fi
13 |
14 | if [ ! -f ${llava_pretrain_model_path}"pytorch_model-00003-of-00003.bin" ]; then
15 | cd output_models && ./download.sh llava_vicuna13b_model_03 && cd -
16 | fi
17 |
18 |
19 | deepspeed ${deepspeed_args} \
20 | examples/vis_chatbot.py \
21 | --deepspeed configs/ds_config_vis_chatbot.json \
22 | --arch_type vision_encoder_decoder \
23 | --task vqa \
24 | --custom_model True \
25 | --chatbot_type llava \
26 | --prompt_structure '{input_text} ASSISTANT:' \
27 | --llava_loading True \
28 | --model_name_or_path ${model_name_or_path} \
29 | --image_encoder_name_or_path openai/clip-vit-large-patch14-336 \
30 | --custom_vision_model True \
31 | --llm_model_name_or_path lmsys/vicuna-13b-v1.5 \
32 | --llava_pretrain_model_path ${llava_pretrain_model_path}"*.bin" \
33 | --with_deepspeed False \
34 | --save_pretrain_model_path "output_models/lmflow_llava-v1-0719-336px-lora-merge-vicuna-13b-v1.3" \
35 | ${@:1}
36 |
37 |
--------------------------------------------------------------------------------
/scripts/multimodal/run_vis_chatbot_minigpt4.sh:
--------------------------------------------------------------------------------
1 | model=Salesforce/blip2-flan-t5-xxl
2 | llm_model_name_or_path=lmsys/vicuna-7b-v1.3
3 | deepspeed_args="--master_port=12000 --num_gpus=1"
4 |
5 | if [ ! -f output_models/pretrained_minigpt4_7b.pth ]; then
6 | cd output_models && ./download.sh minigpt4_7b && cd -
7 | fi
8 |
9 | if [ ! -f output_models/pretrained_minigpt4_7b_converted.pth ]; then
10 | python utils/convert_minigpt4_checkpoints.py \
11 | --model_path output_models/pretrained_minigpt4_7b.pth \
12 | --save_path output_models/pretrained_minigpt4_7b_converted.pth
13 | fi
14 |
15 | deepspeed ${deepspeed_args} examples/vis_chatbot.py --model_name_or_path ${model} --deepspeed configs/ds_config_vis_chatbot.json --arch_type vision_encoder_decoder --task vqa --custom_model \
16 | --chatbot_type mini_gpt \
17 | --prompt_structure "{input_text}###Assistant:" \
18 | --pretrained_language_projection_path output_models/pretrained_minigpt4_7b_converted.pth \
19 | --llm_model_name_or_path ${llm_model_name_or_path} \
20 | --low_resource True \
21 | ${@:1}
22 |
23 |
--------------------------------------------------------------------------------
/scripts/run_all_benchmark.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | help_message="./$(basename $0)"
4 | help_message+=" --model_name_or_path MODEL_NAME_OR_PATH"
5 |
6 | if [ $# -ge 1 ]; then
7 | extra_args="$@"
8 | fi
9 |
10 | model_name_or_path=""
11 | while [[ $# -ge 1 ]]; do
12 | key="$1"
13 | case ${key} in
14 | -h|--help)
15 | printf "${help_message}" 1>&2
16 | return 0
17 | ;;
18 | --model_name_or_path)
19 | model_name_or_path="$2"
20 | shift
21 | ;;
22 | *)
23 | # Ignores unknown options
24 | esac
25 | shift
26 | done
27 |
28 | model_name=$(echo "${model_name_or_path}" | sed "s/\//--/g")
29 | echo ${model_name}
30 |
31 | if [[ "${model_name}" = "" ]]; then
32 | echo "no model name specified" 1>&2
33 | exit 1
34 | fi
35 |
36 | log_dir=output_dir/${model_name}_lmflow_chat_nll_eval
37 | mkdir -p ${log_dir}
38 | echo "[Evaluating] Evaluate on LMFlow_chat"
39 | ./scripts/run_benchmark.sh ${extra_args} --dataset_name lmflow_chat_nll_eval | tee ${log_dir}/benchmark.log 2> ${log_dir}/benchmark.err
40 |
41 | log_dir=output_dir/${model_name}_all_nll_eval
42 | mkdir -p ${log_dir}
43 | echo "[Evaluating] Evaluate on [commonsense, wiki, instruction_following (gpt4) ] nll evaluation"
44 | ./scripts/run_benchmark.sh ${extra_args} --dataset_name all_nll_eval | tee ${log_dir}/benchmark.log 2> ${log_dir}/benchmark.err
45 |
46 | log_dir=output_dir/${model_name}_commonsense_qa_eval
47 | mkdir -p ${log_dir}
48 | echo "[Evaluating] Evaluate on commonsense QA Accuracy evaluation"
49 | ./scripts/run_benchmark.sh ${extra_args} --dataset_name commonsense_qa_eval | tee ${log_dir}/benchmark.log 2> ${log_dir}/benchmark.err
--------------------------------------------------------------------------------
/scripts/run_app.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CUDA_VISIBLE_DEVICES=0 accelerate launch --config_file configs/accelerator_singlegpu_config.yaml service/app.py \
4 | --model_name_or_path gpt2 \
5 | --torch_dtype bfloat16 \
6 | --max_new_tokens 200
--------------------------------------------------------------------------------
/scripts/run_benchmark.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$1" == "-h" -o "$1" == "--help" ]; then
4 | help_message="./$(basename $0)"
5 | help_message+=" --dataset_name DATASET_NAME"
6 | help_message+=" --model_name_or_path MODEL_NAME_OR_PATH"
7 | echo ${help_message} 1>&2
8 | exit 1
9 | fi
10 |
11 | extra_args="--dataset_name gpt4_en_eval --model_name_or_path gpt2"
12 | if [ $# -ge 1 ]; then
13 | extra_args="$@"
14 | fi
15 |
16 |
17 | CUDA_VISIBLE_DEVICES=0 \
18 | deepspeed --master_port 11001 examples/benchmarking.py \
19 | --use_ram_optimized_load 0 \
20 | --deepspeed examples/ds_config.json \
21 | --metric nll \
22 | --prompt_structure "###Human: {input}###Assistant:" \
23 | ${extra_args}
--------------------------------------------------------------------------------
/scripts/run_chatbot.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # A simple chatbot script, the memory of the chatbot has a length of maximum
3 | # model length, e.g. 4k for llama-2.
4 |
5 | model=gpt2
6 | lora_args=""
7 | if [ $# -ge 1 ]; then
8 | model=$1
9 | fi
10 | if [ $# -ge 2 ]; then
11 | lora_args="--lora_model_path $2"
12 | fi
13 |
14 | # --temperature 0.7 \
15 | accelerate launch --config_file configs/accelerator_multigpu_config.yaml \
16 | examples/chatbot.py \
17 | --deepspeed configs/ds_config_chatbot.json \
18 | --model_name_or_path ${model} \
19 | --use_accelerator True \
20 | --max_new_tokens 256 \
21 | --temperature 1.0 \
22 | --end_string "#" \
23 | ${lora_args}
24 |
--------------------------------------------------------------------------------
/scripts/run_chatbot_chatglm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | model=THUDM/chatglm-6b
4 | lora_args=""
5 | if [ $# -ge 1 ]; then
6 | model=$1
7 | fi
8 | if [ $# -ge 2 ]; then
9 | lora_args="--lora_model_path $2"
10 | fi
11 |
12 | CUDA_VISIBLE_DEVICES=0 \
13 | deepspeed examples/chatbot.py \
14 | --arch_type encoder_decoder \
15 | --deepspeed configs/ds_config_chatbot.json \
16 | --model_name_or_path ${model} \
17 | ${lora_args}
--------------------------------------------------------------------------------
/scripts/run_chatbot_cpu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | model=gpt2
4 | lora_args=""
5 | if [ $# -ge 1 ]; then
6 | model=$1
7 | fi
8 | if [ $# -ge 2 ]; then
9 | lora_args="--lora_model_path $2"
10 | fi
11 |
12 | CUDA_VISIBLE_DEVICES="" \
13 | python examples/chatbot.py \
14 | --deepspeed configs/ds_config_chatbot.json \
15 | --model_name_or_path ${model} \
16 | --device "cpu" \
17 | ${lora_args}
18 |
--------------------------------------------------------------------------------
/scripts/run_detail_gpu_memory.sh:
--------------------------------------------------------------------------------
1 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 10 0 0 128 # base
2 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 10 0 1 128 # Lora
3 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 10 0 1 256 # Lora
4 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 10 1 0 128 # LISA
5 |
6 |
7 |
8 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 512 0 0 128 # base
9 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 512 0 1 128 # Lora
10 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 512 0 1 256 # Lora
11 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 512 1 0 128 # LISA
12 |
13 |
14 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 1024 0 0 128 # base
15 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 1024 0 1 128 # Lora
16 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 1024 0 1 256 # Lora
17 | python ./examples/detail_memory.py meta-llama/Llama-2-7b-hf 1024 1 0 128 # LISA
18 |
19 |
20 |
--------------------------------------------------------------------------------
/scripts/run_dpo_align.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Please run this script under ${project_id} in project directory of
3 |
4 | # Parses arguments
5 | model_name_or_path=meta-llama/Llama-2-7b-hf
6 | dataset_path=data/dpo-mix-7k
7 | output_dir=output_models/dpo
8 | deepspeed_args="--master_port=11000"
9 | # specify gpus/single gpu here by
10 | # `--include localhost:0,1` or `--include localhost:0`
11 |
12 | while [[ $# -ge 1 ]]; do
13 | key="$1"
14 | case ${key} in
15 | -m|--model_name_or_path)
16 | model_name_or_path="$2"
17 | shift
18 | ;;
19 | -d|--dataset_path)
20 | dataset_path="$2"
21 | shift
22 | ;;
23 | -o|--output_dir)
24 | output_dir="$2"
25 | shift
26 | ;;
27 | --deepspeed_args)
28 | deepspeed_args="$2"
29 | shift
30 | ;;
31 | *)
32 | echo "error: unknown option \"${key}\"" 1>&2
33 | exit 1
34 | esac
35 | shift
36 | done
37 | exp_id=dpo
38 | project_dir=$(cd "$(dirname $0)"/..; pwd)
39 | log_dir=${project_dir}/log/${exp_id}
40 | mkdir -p ${output_dir} ${log_dir}
41 |
42 | deepspeed ${deepspeed_args} \
43 | examples/dpo_train.py \
44 | --model_name_or_path ${model_name_or_path} \
45 | --dataset_path ${dataset_path} \
46 | --output_dir ${output_dir} \
47 | --run_name dpo \
48 | --max_steps 200 \
49 | --learning_rate 1e-6 \
50 | --use_lora 1 \
51 | --lora_r 8 \
52 | --sanity_check True \
53 | --save_aggregated_lora 0\
54 | --logging_steps 20 \
55 | | tee ${log_dir}/train.log \
56 | 2> ${log_dir}/train.err
57 |
--------------------------------------------------------------------------------
/scripts/run_dpov2_align.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Parses arguments
4 | run_name=dpov2_align
5 | model_name_or_path=meta-llama/Meta-Llama-3-8B-Instruct
6 | reference_model_name_or_path=meta-llama/Meta-Llama-3-8B-Instruct
7 | dataset_path=data/iterative-prompt/train
8 | eval_dataset_path=data/iterative-prompt/eval
9 | output_dir=output_models/${run_name}
10 |
11 | while [[ $# -ge 1 ]]; do
12 | key="$1"
13 | case ${key} in
14 | -r|--run_name)
15 | run_name="$2"
16 | shift
17 | ;;
18 | --model_name_or_path)
19 | model_name_or_path="$2"
20 | shift
21 | ;;
22 | --reference_model_name_or_path)
23 | reference_model_name_or_path="$2"
24 | shift
25 | ;;
26 | --dataset_path)
27 | dataset_path="$2"
28 | shift
29 | ;;
30 | --eval_dataset_path)
31 | eval_dataset_path="$2"
32 | shift
33 | ;;
34 | -o|--output_dir)
35 | output_dir="$2"
36 | shift
37 | ;;
38 | *)
39 | echo "error: unknown option \"${key}\"" 1>&2
40 | exit 1
41 | esac
42 | shift
43 | done
44 |
45 | project_dir=$(cd "$(dirname $0)"/..; pwd)
46 | log_dir=${project_dir}/log/${run_name}
47 | mkdir -p ${output_dir} ${log_dir}
48 |
49 | accelerate launch --config_file configs/accelerate_dsz3_config.yaml \
50 | examples/dpov2_train.py \
51 | --model_name_or_path ${model_name_or_path} \
52 | --reference_model_name_or_path ${reference_model_name_or_path} \
53 | --do_train True \
54 | --dataset_path ${dataset_path} \
55 | --eval_dataset_path ${eval_dataset_path} \
56 | --bf16 True \
57 | --learning_rate 5e-7 \
58 | --lr_scheduler_type cosine \
59 | --warmup_steps 100 \
60 | --optim paged_adamw_32bit \
61 | --per_device_train_batch_size 1 \
62 | --per_device_eval_batch_size 1 \
63 | --gradient_accumulation_steps 16 \
64 | --gradient_checkpointing True \
65 | --margin_scale 1.0 \
66 | --max_prompt_length 1000 \
67 | --num_train_epochs 2 \
68 | --logging_steps 2 \
69 | --save_strategy epoch \
70 | --save_steps 5000 \
71 | --evaluation_strategy steps \
72 | --eval_steps 100 \
73 | --loss_type sigmoid \
74 | --output_dir ${output_dir} \
75 | --run_name ${run_name} \
76 | --sampling_paired_method max_min \
77 | --report_to wandb \
78 | --mask_prompt True \
79 | --length_penalty 0 \
80 | | tee ${log_dir}/train.log \
81 | 2> ${log_dir}/train.err
--------------------------------------------------------------------------------
/scripts/run_evaluation.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ! -d data/MedQA-USMLE ]; then
4 | cd data && ./download.sh MedQA-USMLE && cd -
5 | fi
6 |
7 | CUDA_VISIBLE_DEVICES=0 \
8 | deepspeed examples/evaluation.py \
9 | --answer_type medmcqa \
10 | --model_name_or_path gpt2-large \
11 | --dataset_path data/MedQA-USMLE/validation \
12 | --deepspeed examples/ds_config.json \
13 | --inference_batch_size_per_device 1 \
14 | --metric accuracy
15 |
--------------------------------------------------------------------------------
/scripts/run_evaluation_accelerator.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ ! -d data/MedQA-USMLE ]; then
4 | cd data && ./download.sh MedQA-USMLE && cd -
5 | fi
6 |
7 | CUDA_VISIBLE_DEVICES=0 accelerate launch --config_file configs/accelerator_singlegpu_config.yaml examples/evaluation.py \
8 | --answer_type usmle \
9 | --model_name_or_path gpt2-large \
10 | --dataset_path data/MedQA-USMLE/validation \
11 | --use_ram_optimized_load True \
12 | --deepspeed examples/ds_config.json \
13 | --metric accuracy \
14 | --output_dir output_dir/accelerator_1_card \
15 | --inference_batch_size_per_device 1 \
16 | --use_accelerator_for_evaluator True \
17 | --torch_dtype bfloat16
18 |
--------------------------------------------------------------------------------
/scripts/run_evaluation_with_lora.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # --model_name_or_path specifies the original huggingface model
4 | # --lora_model_path specifies the model difference introduced by finetuning,
5 | # i.e. the one saved by ./scripts/run_finetune_with_lora.sh
6 |
7 | if [ ! -d data/alpaca ]; then
8 | cd data && ./download.sh alpaca && cd -
9 | fi
10 |
11 | CUDA_VISIBLE_DEVICES=0 \
12 | deepspeed examples/evaluation.py \
13 | --answer_type text \
14 | --model_name_or_path facebook/galactica-1.3b \
15 | --lora_model_path output_models/finetune_with_lora \
16 | --dataset_path data/alpaca/test \
17 | --prompt_structure "Input: {input}" \
18 | --deepspeed examples/ds_config.json \
19 | --inference_batch_size_per_device 1 \
20 | --metric accuracy
21 |
--------------------------------------------------------------------------------
/scripts/run_finetune.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Please run this script under ${project_id} in project directory of
3 | # https://github.com/shizhediao/llm-ft
4 | # COMMIT: d5fecf30ba8011067b10cf51fede53a5ab6574e4
5 |
6 | # Parses arguments
7 | model_name_or_path=gpt2
8 | dataset_path=data/alpaca/train_conversation
9 | output_dir=output_models/finetune
10 | deepspeed_args="--master_port=11000"
11 | conversation_template=llama2
12 |
13 | # Safety related arguments
14 | trust_remote_code=0
15 |
16 | while [[ $# -ge 1 ]]; do
17 | key="$1"
18 | case ${key} in
19 | -m|--model_name_or_path)
20 | model_name_or_path="$2"
21 | shift
22 | ;;
23 | -d|--dataset_path)
24 | dataset_path="$2"
25 | shift
26 | ;;
27 | -o|--output_model_path)
28 | output_dir="$2"
29 | shift
30 | ;;
31 | --conversation_template)
32 | conversation_template="$2"
33 | shift
34 | ;;
35 | --deepspeed_args)
36 | deepspeed_args="$2"
37 | shift
38 | ;;
39 | --trust_remote_code)
40 | trust_remote_code="$2"
41 | shift
42 | ;;
43 | *)
44 | echo "error: unknown option \"${key}\"" 1>&2
45 | exit 1
46 | esac
47 | shift
48 | done
49 |
50 | # Finetune
51 | exp_id=finetune
52 | project_dir=$(cd "$(dirname $0)"/..; pwd)
53 | log_dir=${project_dir}/log/${exp_id}
54 | mkdir -p ${output_dir} ${log_dir}
55 |
56 | deepspeed ${deepspeed_args} \
57 | examples/finetune.py \
58 | --model_name_or_path ${model_name_or_path} \
59 | --trust_remote_code ${trust_remote_code} \
60 | --dataset_path ${dataset_path} \
61 | --output_dir ${output_dir} --overwrite_output_dir \
62 | --conversation_template ${conversation_template} \
63 | --num_train_epochs 0.01 \
64 | --learning_rate 2e-5 \
65 | --disable_group_texts 1 \
66 | --block_size 256 \
67 | --per_device_train_batch_size 1 \
68 | --deepspeed configs/ds_config_zero3.json \
69 | --fp16 \
70 | --run_name finetune \
71 | --validation_split_percentage 0 \
72 | --logging_steps 20 \
73 | --do_train \
74 | --ddp_timeout 72000 \
75 | --save_steps 5000 \
76 | --dataloader_num_workers 1 \
77 | > >(tee ${log_dir}/train.log) \
78 | 2> >(tee ${log_dir}/train.err >&2)
79 |
--------------------------------------------------------------------------------
/scripts/run_finetune_with_lora.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Please run this script under ${project_id} in project directory of
3 |
4 | # Parses arguments
5 | model_name_or_path=gpt2
6 | dataset_path=data/alpaca/train_conversation
7 | conversation_template=llama2
8 | output_dir=output_models/finetune
9 | deepspeed_args="--master_port=11000"
10 |
11 | # Safety related arguments
12 | trust_remote_code=0
13 |
14 | while [[ $# -ge 1 ]]; do
15 | key="$1"
16 | case ${key} in
17 | -m|--model_name_or_path)
18 | model_name_or_path="$2"
19 | shift
20 | ;;
21 | -d|--dataset_path)
22 | dataset_path="$2"
23 | shift
24 | ;;
25 | --conversation_template)
26 | conversation_template="$2"
27 | shift
28 | ;;
29 | -o|--output_lora_path)
30 | output_dir="$2"
31 | shift
32 | ;;
33 | --deepspeed_args)
34 | deepspeed_args="$2"
35 | shift
36 | ;;
37 | --trust_remote_code)
38 | trust_remote_code="$2"
39 | shift
40 | ;;
41 | *)
42 | echo "error: unknown option \"${key}\"" 1>&2
43 | exit 1
44 | esac
45 | shift
46 | done
47 |
48 | # Finetune
49 | exp_id=finetune_with_lora
50 | project_dir=$(cd "$(dirname $0)"/..; pwd)
51 | log_dir=${project_dir}/log/${exp_id}
52 | mkdir -p ${output_dir} ${log_dir}
53 |
54 | deepspeed ${deepspeed_args} \
55 | examples/finetune.py \
56 | --model_name_or_path ${model_name_or_path} \
57 | --trust_remote_code ${trust_remote_code} \
58 | --dataset_path ${dataset_path} \
59 | --conversation_template ${conversation_template} \
60 | --output_dir ${output_dir} --overwrite_output_dir \
61 | --num_train_epochs 0.01 \
62 | --learning_rate 1e-4 \
63 | --block_size 512 \
64 | --per_device_train_batch_size 1 \
65 | --use_lora 1 \
66 | --lora_r 8 \
67 | --save_aggregated_lora 0\
68 | --deepspeed configs/ds_config_zero2.json \
69 | --fp16 \
70 | --run_name ${exp_id} \
71 | --validation_split_percentage 0 \
72 | --logging_steps 20 \
73 | --do_train \
74 | --ddp_timeout 72000 \
75 | --save_steps 5000 \
76 | --dataloader_num_workers 1 \
77 | | tee ${log_dir}/train.log \
78 | 2> ${log_dir}/train.err
79 |
--------------------------------------------------------------------------------
/scripts/run_finetune_with_qlora.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Please run this script under ${project_id} in project directory of
3 |
4 | # Parses arguments
5 | model_name_or_path=meta-llama/Llama-2-13b-hf
6 | dataset_path=data/alpaca/train_conversation
7 | conversation_template=llama2
8 | output_dir=output_models/finetune
9 | deepspeed_args="--master_port=11000"
10 |
11 | # Safety related arguments
12 | trust_remote_code=0
13 |
14 | while [[ $# -ge 1 ]]; do
15 | key="$1"
16 | case ${key} in
17 | -m|--model_name_or_path)
18 | model_name_or_path="$2"
19 | shift
20 | ;;
21 | -d|--dataset_path)
22 | dataset_path="$2"
23 | shift
24 | ;;
25 | --conversation_template)
26 | conversation_template="$2"
27 | shift
28 | ;;
29 | -o|--output_model_path)
30 | output_dir="$2"
31 | shift
32 | ;;
33 | --deepspeed_args)
34 | deepspeed_args="$2"
35 | shift
36 | ;;
37 | --trust_remote_code)
38 | trust_remote_code="$2"
39 | shift
40 | ;;
41 | *)
42 | echo "error: unknown option \"${key}\"" 1>&2
43 | exit 1
44 | esac
45 | shift
46 | done
47 |
48 | # Finetune
49 | exp_id=finetune_with_lora
50 | project_dir=$(cd "$(dirname $0)"/..; pwd)
51 | log_dir=${project_dir}/log/${exp_id}
52 | mkdir -p ${output_dir} ${log_dir}
53 |
54 | deepspeed ${deepspeed_args} \
55 | examples/finetune.py \
56 | --model_name_or_path ${model_name_or_path} \
57 | --trust_remote_code ${trust_remote_code} \
58 | --dataset_path ${dataset_path} \
59 | --conversation_template ${conversation_template} \
60 | --output_dir ${output_dir} --overwrite_output_dir \
61 | --num_train_epochs 0.01 \
62 | --learning_rate 1e-4 \
63 | --block_size 512 \
64 | --per_device_train_batch_size 1 \
65 | --use_qlora 1 \
66 | --save_aggregated_lora 0 \
67 | --deepspeed configs/ds_config_zero2.json \
68 | --fp16 \
69 | --run_name ${exp_id} \
70 | --validation_split_percentage 0 \
71 | --logging_steps 20 \
72 | --do_train \
73 | --ddp_timeout 72000 \
74 | --save_steps 5000 \
75 | --dataloader_num_workers 1 \
76 | | tee ${log_dir}/train.log \
77 | 2> ${log_dir}/train.err
--------------------------------------------------------------------------------
/scripts/run_inference.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # An interactive inference script without context history, i.e. the chatbot
3 | # won't have conversation memory.
4 |
5 | model=gpt2
6 | lora_args=""
7 | if [ $# -ge 1 ]; then
8 | model=$1
9 | fi
10 | if [ $# -ge 2 ]; then
11 | lora_args="--lora_model_path $2"
12 | fi
13 |
14 | accelerate launch --config_file configs/accelerator_multigpu_config.yaml \
15 | examples/inference.py \
16 | --deepspeed configs/ds_config_chatbot.json \
17 | --model_name_or_path ${model} \
18 | --use_accelerator True \
19 | --max_new_tokens 256 \
20 | --temperature 1.0 \
21 | ${lora_args}
22 |
--------------------------------------------------------------------------------
/scripts/run_inference_multimodal_model.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | model="Salesforce/blip-image-captioning-base"
4 | lora_args=""
5 | if [ $# -ge 1 ]; then
6 | model=$1
7 | fi
8 | if [ $# -ge 2 ]; then
9 | lora_args="--lora_model_path $2"
10 | fi
11 |
12 | CUDA_VISIBLE_DEVICES=0 \
13 | deepspeed examples/inference.py \
14 | --deepspeed configs/ds_config_multimodal.json \
15 | --model_name_or_path ${model} \
16 | --arch_type vision_encoder_decoder \
17 | ${lora_args}
18 |
--------------------------------------------------------------------------------
/scripts/run_iterative_dpo.sh:
--------------------------------------------------------------------------------
1 | python examples/iterative_dpo_train.py configs/iterative_dpo.yaml
--------------------------------------------------------------------------------
/scripts/run_merge_lora.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Parses arguments
4 | model_name_or_path=gpt2
5 | lora_model_path=output_models/lora
6 | output_model_path=output_models/merge_lora
7 | device=cpu
8 |
9 | # if gpu
10 | deepspeed_args="--master_port=11000"
11 |
12 | while [[ $# -ge 1 ]]; do
13 | key="$1"
14 | case ${key} in
15 | --model_name_or_path)
16 | model_name_or_path="$2"
17 | shift
18 | ;;
19 | --lora_model_path)
20 | lora_model_path="$2"
21 | shift
22 | ;;
23 | --output_model_path)
24 | output_model_path="$2"
25 | shift
26 | ;;
27 | --device)
28 | device="$2"
29 | shift
30 | ;;
31 | --deepspeed_args)
32 | deepspeed_args="$2"
33 | shift
34 | ;;
35 | *)
36 | echo "error: unknown option \"${key}\"" 1>&2
37 | exit 1
38 | esac
39 | shift
40 | done
41 |
42 |
43 | if [ ${device} == "cpu" ]; then
44 | python examples/merge_lora.py \
45 | --model_name_or_path ${model_name_or_path} \
46 | --lora_model_path ${lora_model_path} \
47 | --output_model_path ${output_model_path} \
48 | --device ${device} \
49 | --ds_config configs/ds_config_eval.json
50 | elif [ ${device} == "gpu" ]; then
51 | echo "Error: Merging LoRA weights using gpu not supported yet. Please use cpu."
52 | else
53 | echo "Error: Unknown device \"${device}\"" 1>&2
54 | exit 1
55 | fi
56 |
--------------------------------------------------------------------------------
/scripts/run_multistage_finetune.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Please run this script under ${project_id} in project directory of
3 |
4 | deepspeed_args="--master_port=11000" # Default argument
5 | if [ $# -ge 1 ]; then
6 | deepspeed_args="$1"
7 | fi
8 |
9 | exp_id=multistage_finetune
10 | project_dir=$(cd "$(dirname $0)"/..; pwd)
11 | output_dir=${project_dir}/output_models/${exp_id}
12 | log_dir=${project_dir}/log/${exp_id}
13 | dataset_path="${project_dir}/data/example_dataset/train"
14 | if [ ! -d ${dataset_path} ]; then
15 | cd data && ./download.sh example_dataset && cd -
16 | fi
17 |
18 | mkdir -p ${output_dir} ${log_dir}
19 |
20 | deepspeed ${deepspeed_args} \
21 | examples/multistage_finetune.py \
22 | --num_stages_per_epoch 1 \
23 | --run_name ${exp_id} \
24 | --model_name_or_path facebook/galactica-1.3b \
25 | --dataset_path ${dataset_path} \
26 | --output_dir ${output_dir} --overwrite_output_dir \
27 | --num_train_epochs 3 \
28 | --learning_rate 1e-3 \
29 | --block_size 512 \
30 | --per_device_train_batch_size 2 \
31 | --use_lora 1 \
32 | --lora_r 8 \
33 | --save_aggregated_lora 1 \
34 | --deepspeed configs/ds_config_zero2.json \
35 | --bf16 \
36 | --run_name finetune_with_lora \
37 | --validation_split_percentage 0 \
38 | --logging_steps 20 \
39 | --do_train \
40 | --ddp_timeout 72000 \
41 | --save_steps 5000 \
42 | --dataloader_num_workers 1 \
43 | | tee ${log_dir}/train.log \
44 | 2> ${log_dir}/train.err
45 |
--------------------------------------------------------------------------------
/scripts/run_raft_align.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Please run this script under project directory.
3 |
4 | deepspeed_args="--master_port=11110" # Default argument
5 | if [ $# -ge 1 ]; then
6 | deepspeed_args="$1"
7 | fi
8 |
9 | exp_id=raft_align
10 | project_dir=$(cd "$(dirname $0)"/..; pwd)
11 | output_dir=${project_dir}/output_models/${exp_id}
12 | log_dir=${project_dir}/log/${exp_id}
13 |
14 | if [ ! -d data/hh_rlhf ]; then
15 | cd data && ./download.sh hh_rlhf && cd -
16 | fi
17 |
18 | mkdir -p ${output_dir} ${log_dir}
19 |
20 | export PYTHONPATH=.
21 | deepspeed ${deepspeed_args} \
22 | examples/raft_align.py \
23 | --model_name_or_path gpt2 \
24 | --num_raft_iteration 20 \
25 | --learning_rate 2e-5 \
26 | --lr_scheduler_type "constant" \
27 | --bf16 \
28 | --deepspeed configs/ds_config_zero2.json \
29 | --dataset_path ${project_dir}/data/hh_rlhf/rlhf_prompt \
30 | --output_reward_path ${project_dir}/tmp/raft_aligner/reward.txt \
31 | --output_dir ${output_dir} --overwrite_output_dir \
32 | --run_name ${exp_id} \
33 | --num_train_epochs 4 \
34 | --per_device_train_batch_size 1 \
35 | --per_device_eval_batch_size 1 \
36 | --validation_split_percentage 0 \
37 | --logging_steps 1 \
38 | --do_train \
39 | --ddp_timeout 72000 \
40 | --save_steps 7777 \
41 | --dataloader_num_workers 1 \
42 | --preprocessing_num_workers 12 \
43 | --inference_batch_size_per_device 1 \
44 | --collection_strategy "local" \
45 | --raft_batch_size 1024 \
46 | --output_min_length 96 \
47 | --top_reward_percentage 0.125 \
48 | | tee ${log_dir}/raft_align.log \
49 | 2> ${log_dir}/raft_align.err
50 |
--------------------------------------------------------------------------------
/scripts/run_rm_inference.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
3 |
4 | # Parses arguments
5 | run_name=rm_inference
6 | model_name_or_path=sfairXC/FsfairX-LLaMA3-RM-v0.1
7 | dataset_path=data/alpaca/test
8 | output_dir=data/rm_inference_results
9 | output_file_name=results.json
10 | conversation_template=llama3
11 |
12 | # Safety related arguments
13 | trust_remote_code=0
14 |
15 | while [[ $# -ge 1 ]]; do
16 | key="$1"
17 | case ${key} in
18 | -r|--run_name)
19 | run_name="$2"
20 | shift
21 | ;;
22 | -m|--model_name_or_path)
23 | model_name_or_path="$2"
24 | shift
25 | ;;
26 | -d|--dataset_path)
27 | dataset_path="$2"
28 | shift
29 | ;;
30 | --conversation_template)
31 | conversation_template="$2"
32 | shift
33 | ;;
34 | --output_dir)
35 | output_dir="$2"
36 | shift
37 | ;;
38 | --output_file_name)
39 | output_file_name="$2"
40 | shift
41 | ;;
42 | --trust_remote_code)
43 | trust_remote_code="$2"
44 | shift
45 | ;;
46 | *)
47 | echo "error: unknown option \"${key}\"" 1>&2
48 | exit 1
49 | esac
50 | shift
51 | done
52 |
53 | # inference
54 | project_dir=$(cd "$(dirname $0)"/..; pwd)
55 | log_dir=${project_dir}/log/${run_name}
56 | output_file_path=${output_dir}/${run_name}/${output_file_name}
57 | mkdir -p ${output_dir}/${run_name} ${log_dir}
58 |
59 | accelerate launch --config_file configs/accelerator_multigpu_config.yaml \
60 | examples/rm_inference.py \
61 | --trust_remote_code ${trust_remote_code} \
62 | --model_name_or_path ${model_name_or_path} \
63 | --arch_type text_regression \
64 | --use_accelerator True \
65 | --block_size 4096 \
66 | --inference_batch_size 16 \
67 | --dataset_path ${dataset_path} \
68 | --overwrite_cache True \
69 | --conversation_template ${conversation_template} \
70 | --preprocessing_num_workers 16 \
71 | --save_results True \
72 | --results_path ${output_file_path} \
73 | 2>&1 | tee ${log_dir}/rm_inference.log
--------------------------------------------------------------------------------
/scripts/run_tool.sh:
--------------------------------------------------------------------------------
1 | model="gorilla-llm/gorilla-7b-hf-delta-v1"
2 | python examples/tool_inference.py \
3 | --model ${model} \
--------------------------------------------------------------------------------
/scripts/run_unittest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python -m unittest discover
4 |
--------------------------------------------------------------------------------
/scripts/run_vllm_inference.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
3 |
4 | # Parses arguments
5 | run_name=vllm_inference
6 | model_name_or_path='Qwen/Qwen2-0.5B'
7 | dataset_path=data/alpaca/test_conversation
8 | output_dir=data/inference_results
9 | output_file_name=results.json
10 | apply_chat_template=True
11 |
12 | # Safety related arguments
13 | trust_remote_code=0
14 |
15 | while [[ $# -ge 1 ]]; do
16 | key="$1"
17 | case ${key} in
18 | -r|--run_name)
19 | run_name="$2"
20 | shift
21 | ;;
22 | -m|--model_name_or_path)
23 | model_name_or_path="$2"
24 | shift
25 | ;;
26 | -d|--dataset_path)
27 | dataset_path="$2"
28 | shift
29 | ;;
30 | --output_dir)
31 | output_dir="$2"
32 | shift
33 | ;;
34 | --output_file_name)
35 | output_file_name="$2"
36 | shift
37 | ;;
38 | --apply_chat_template)
39 | apply_chat_template="$2"
40 | shift
41 | ;;
42 | --trust_remote_code)
43 | trust_remote_code="$2"
44 | shift
45 | ;;
46 | *)
47 | echo "error: unknown option \"${key}\"" 1>&2
48 | exit 1
49 | esac
50 | shift
51 | done
52 |
53 | # inference
54 | project_dir=$(cd "$(dirname $0)"/..; pwd)
55 | log_dir=${project_dir}/log/${run_name}
56 | output_file_path=${output_dir}/${run_name}/${output_file_name}
57 | mkdir -p ${output_dir}/${run_name} ${log_dir}
58 |
59 | python examples/vllm_inference.py \
60 | --use_vllm True \
61 | --trust_remote_code ${trust_remote_code} \
62 | --model_name_or_path ${model_name_or_path} \
63 | --dataset_path ${dataset_path} \
64 | --preprocessing_num_workers 16 \
65 | --random_seed 42 \
66 | --apply_chat_template ${apply_chat_template} \
67 | --num_output_sequences 2 \
68 | --use_beam_search False \
69 | --temperature 1.0 \
70 | --top_p 0.9 \
71 | --max_new_tokens 1024 \
72 | --save_results True \
73 | --results_path ${output_file_path} \
74 | --enable_decode_inference_result False \
75 | --vllm_gpu_memory_utilization 0.95 \
76 | --vllm_tensor_parallel_size 2 \
77 | --enable_distributed_vllm_inference False \
78 | 2>&1 | tee ${log_dir}/vllm_inference.log
--------------------------------------------------------------------------------
/scripts/speculative_decoding/README.md:
--------------------------------------------------------------------------------
1 | # Speculative Decoding
2 | ## Introduction
3 | [Speculative Decoding (Ref: arXiv:2211.17192v2)](https://arxiv.org/abs/2211.17192) is now available for playing via:
4 | ```bash
5 | python ./examples/speculative_inference.py \
6 | --model # your_model_name_or_path
7 | --draft_model # your_draft_model_name_or_path
8 | --temperature # your_temperature
9 | --gamma # your_gamma
10 | --max_new_tokens # your_max_new_tokens
11 | --gpu # your_gpu_id
12 | ```
13 | For example,
14 | ```bash
15 | python ./examples/speculative_inference.py \
16 | --model gpt2-xl
17 | --draft_model gpt2
18 | --temperature 0.3
19 | --gamma 5
20 | --max_new_tokens 512
21 | --gpu 0
22 | ```
23 | Another example,
24 | ```bash
25 | python ./examples/speculative_inference.py \
26 | --model /home/eric/Documents/models/gpt2-xl
27 | --draft_model /home/eric/Documents/models/gpt2
28 | --temperature 0
29 | --gamma 3
30 | --max_new_tokens 1024
31 | --gpu 7
32 | ```
33 | ## Parameter Instruction
34 | `model`, `draft_model`
35 | - Huggingface model name or locally cached model path.
36 | - Currently only supports huggingface decoder only models.
37 | - `model` refers to the target model (i.e., the large model you want to accelerate) in the paper.
38 | - `draft_model` refers to the draft model in the paper.
39 |
40 | `temperature`
41 | - Temperature for sampling. When temperature <= 1e-6, will use argmax sampling.
42 |
43 | `gamma`
44 | - Number of tokens that the draft model will generate at each step. See the paper for more details.
45 |
46 | `max_new_tokens`
47 | - Maximum number of tokens that the speculative inference will generate.
48 | - TODO: currently the speculative decoding will always generate `max_new_tokens` tokens. We will add a `stop_token` in the future.
49 |
50 | `gpu`
51 | - gpu id, currently speculative inference only support single gpu.
52 |
53 | ## Experiments
54 | We tested the speculative inference using the first 100 inputs from alpaca test dataset as prompts. When `model=gpt2-xl`, `draft_model=gpt2`, `temperature=0.`, `max_new_tokens=512`, we observed the following acceleration:
55 |
56 | |gamma|speedup (inference time)|speed up (num of forwards)
57 | |--|--|--|
58 | |1|1.75x|1.96x|
59 | |2|2.29x|2.89x|
60 | |3|2.71x|3.77x|
61 | |4|3.06x|4.63x|
62 | |5|3.35x|5.44x|
63 | |6|3.65x|6.23x|
64 | |7|3.82x|6.94x|
65 | |8|3.96x|7.64x|
66 | |9|4.05x|8.33x|
67 | |10|4.14x|9.00x|
68 |
69 | Note that the speedup may be overestimated. When `temperature=0`, `gpt2-xl` and `gpt2` tend to generate duplicated tokens as the number of tokens generated increases, thus making the target model more likely to accept the draft model's output.
--------------------------------------------------------------------------------
/scripts/tools/print_model_architecture.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #coding=utf-8
3 | import argparse
4 | import sys
5 | from transformers import AutoModel
6 |
7 | def parse_argument(sys_argv):
8 | parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
9 | parser.add_argument("--model_name_or_path", type=str, default='gpt2')
10 | args = parser.parse_args(sys_argv[1:])
11 | return args
12 |
13 | def main():
14 | args = parse_argument(sys.argv)
15 | model_name = args.model_name_or_path
16 | model = AutoModel.from_pretrained(model_name)
17 |
18 | print(model.config)
19 | print(model)
20 |
21 | if __name__ == "__main__":
22 | main()
23 |
--------------------------------------------------------------------------------
/scripts/vocab_extension/README.md:
--------------------------------------------------------------------------------
1 | # Vocab Extension
2 | ## Train & Merge Tokenizer
3 | To automatically convert data, train a SentencePiece tokenizer, and merge the tokenizer, you can run the following script:
4 | ```
5 | bash scripts/vocab_extension/train_merge_tokenizer.sh
6 | ```
7 | Alternatively, you can run each of the three steps separately:
8 |
9 | ## Convert JSON Data to TXT
10 | To convert JSON data to TXT for sentencepiece tokenizer training, run:
11 | ```
12 | bash scripts/vocab_extension/convert_json_to_txt.sh
13 | ```
14 | ## Train SentencePiece Tokenizer
15 | To train a SentencePiece tokenizer, run:
16 | ```
17 | bash scripts/vocab_extension/train_tokenizer.sh
18 | ```
19 | ## Merge New Tokenizer with the Origin One
20 | To merge a new tokenizer with the original one, run:
21 | ```
22 | bash scripts/vocab_extension/merge_tokenizer.sh
23 | ```
--------------------------------------------------------------------------------
/scripts/vocab_extension/convert_json_to_txt.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd data && bash download.sh wiki_zh_eval && cd -
4 |
5 | python utils/convert_json_to_txt.py --dataset_path ./data/wiki_zh_eval \
6 | --output_path ./data/wiki_zh_eval/converted_data.txt \
7 | --overwrite True
--------------------------------------------------------------------------------
/scripts/vocab_extension/merge_tokenizer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | mkdir -p ./output_models/new_tokenizer
3 | python utils/merge_tokenizer.py --tokenizer_dir openlm-research/open_llama_3b \
4 | --chinese_sp_model_file ./output_models/new_tokenizer/example.model \
5 | --output_dir ./output_models/merged_tokenizer \
--------------------------------------------------------------------------------
/scripts/vocab_extension/train_merge_tokenizer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # download data
4 | cd data && bash download.sh wiki_zh_eval && cd -
5 |
6 | # convert json to txt for sentencepiece
7 | python utils/convert_json_to_txt.py --dataset_path ./data/wiki_zh_eval \
8 | --output_path ./data/wiki_zh_eval/converted_data.txt \
9 | --overwrite True
10 |
11 | # train a new tokenizer
12 | mkdir -p ./output_models/new_tokenizer
13 | python utils/train_tokenizer.py --dataset_path ./data/wiki_zh_eval/converted_data.txt \
14 | --model_type bpe \
15 | --output_dir ./output_models/new_tokenizer \
16 | --user_defined_symbols 0,1,2,3,4,5,6,7,8,9,% \
17 | --vocab_size 20000 \
18 | --max_sentencepiece_length 4
19 |
20 | # merge the new tokenizer with the old one
21 | mkdir -p ./output_models/merged_tokenizer
22 | python utils/merge_tokenizer.py --chinese_sp_model_file ./output_models/new_tokenizer/example.model \
23 | --tokenizer_dir openlm-research/open_llama_3b \
24 | --output_dir ./output_models/merged_tokenizer
--------------------------------------------------------------------------------
/scripts/vocab_extension/train_tokenizer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | mkdir -p ./output_models/merged_tokenizer
3 | python utils/train_tokenizer.py --dataset_path ./data/wiki_zh_eval/converted_data.txt \
4 | --model_type bpe \
5 | --output_dir ./output_models/new_tokenizer \
6 | --user_defined_symbols 0,1,2,3,4,5,6,7,8,9,% \
7 | --vocab_size 20000 \
8 | --max_sentencepiece_length 4
--------------------------------------------------------------------------------
/service/static/assets/background.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/service/static/assets/background.png
--------------------------------------------------------------------------------
/service/static/assets/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/service/static/assets/logo.png
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | from setuptools import find_packages
3 | from setuptools import setup
4 |
5 | folder = os.path.dirname(__file__)
6 | version_path = os.path.join(folder, "src", "lmflow", "version.py")
7 |
8 | __version__ = None
9 | with open(version_path) as f:
10 | exec(f.read(), globals())
11 |
12 | req_path = os.path.join(folder, "requirements.txt")
13 | install_requires = []
14 | if os.path.exists(req_path):
15 | with open(req_path) as fp:
16 | install_requires = [line.strip() for line in fp]
17 |
18 | extra_require = {
19 | "multimodal": ["Pillow"],
20 | "vllm": ["vllm>=0.4.3"],
21 | "ray": ["ray>=2.22.0"],
22 | "gradio": ["gradio"],
23 | "flask": ["flask", "flask_cors"],
24 | "flash_attn": ["flash-attn>=2.0.2"],
25 | "trl": ["trl==0.8.0"]
26 | }
27 |
28 | readme_path = os.path.join(folder, "README.md")
29 | readme_contents = ""
30 | if os.path.exists(readme_path):
31 | with open(readme_path, encoding="utf-8") as fp:
32 | readme_contents = fp.read().strip()
33 |
34 | setup(
35 | name="lmflow",
36 | version=__version__,
37 | description="LMFlow: Large Model Flow.",
38 | author="The LMFlow Team",
39 | long_description=readme_contents,
40 | long_description_content_type="text/markdown",
41 | package_dir={"": "src"},
42 | packages=find_packages("src"),
43 | package_data={},
44 | install_requires=install_requires,
45 | extras_require=extra_require,
46 | classifiers=[
47 | "Intended Audience :: Science/Research/Engineering",
48 | "Topic :: Scientific/Engineering :: Artificial Intelligence",
49 | "Programming Language :: Python :: 3.9",
50 | "Programming Language :: Python :: 3.10",
51 | ],
52 | requires_python=">=3.9",
53 | )
54 |
55 | # optionals
56 | # lm-eval==0.3.0
--------------------------------------------------------------------------------
/src/lmflow/__init__.py:
--------------------------------------------------------------------------------
1 | from .version import __version__ as internal_version
2 |
3 | __version__ = internal_version
4 |
5 | from transformers.utils import check_min_version
6 | from transformers.utils.versions import require_version
7 |
8 | from lmflow import args, datasets, models, pipeline, utils
9 |
10 | # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
11 | check_min_version("4.27.0.dev0")
12 |
13 | require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
--------------------------------------------------------------------------------
/src/lmflow/datasets/__init__.py:
--------------------------------------------------------------------------------
1 | """This Python code defines a class Dataset with methods for initializing, loading,
2 | and manipulating datasets from different backends such as Hugging Face and JSON.
3 |
4 | The `Dataset` class includes methods for loading datasets from a dictionary and a Hugging
5 | Face dataset, mapping datasets, and retrieving the backend dataset and arguments.
6 | """
7 | from lmflow.utils.versioning import is_multimodal_available
8 |
9 |
10 | from lmflow.datasets.dataset import Dataset
11 | if is_multimodal_available():
12 | from lmflow.datasets.multi_modal_dataset import CustomMultiModalDataset
13 |
--------------------------------------------------------------------------------
/src/lmflow/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/src/lmflow/models/__init__.py
--------------------------------------------------------------------------------
/src/lmflow/models/auto_model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """Automatically get correct model type.
4 | """
5 |
6 | from lmflow.models.hf_decoder_model import HFDecoderModel
7 | from lmflow.models.hf_text_regression_model import HFTextRegressionModel
8 | from lmflow.models.hf_encoder_decoder_model import HFEncoderDecoderModel
9 |
10 | class AutoModel:
11 |
12 | @classmethod
13 | def get_model(self, model_args, *args, **kwargs):
14 | arch_type = model_args.arch_type
15 | if arch_type == "decoder_only":
16 | return HFDecoderModel(model_args, *args, **kwargs)
17 | elif arch_type == "text_regression":
18 | return HFTextRegressionModel(model_args, *args, **kwargs)
19 | elif arch_type == "encoder_decoder" or \
20 | arch_type == "vision_encoder_decoder":
21 | return HFEncoderDecoderModel(model_args, *args, **kwargs)
22 | else:
23 | raise NotImplementedError(
24 | f"model architecture type \"{arch_type}\" is not supported"
25 | )
26 |
--------------------------------------------------------------------------------
/src/lmflow/models/base_model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """Base model class.
4 | """
5 |
6 | from abc import ABC
7 |
8 |
9 | class BaseModel(ABC):
10 |
11 | def __init__(self, *args, **kwargs):
12 | pass
13 |
--------------------------------------------------------------------------------
/src/lmflow/models/decoder_model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """A one-line summary of the module or program, terminated by a period.
4 |
5 | Leave one blank line. The rest of this docstring should contain an
6 | overall description of the module or program. Optionally, it may also
7 | contain a brief description of exported classes and functions and/or usage
8 | examples.
9 |
10 | Typical usage example:
11 |
12 | foo = ClassFoo()
13 | bar = foo.FunctionBar()
14 | """
15 |
16 | from lmflow.models.base_model import BaseModel
17 |
18 |
19 | class DecoderModel(BaseModel):
20 |
21 | def __init__(self, *args, **kwargs):
22 | pass
23 |
--------------------------------------------------------------------------------
/src/lmflow/models/encoder_decoder_model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """A one-line summary of the module or program, terminated by a period.
4 |
5 | Leave one blank line. The rest of this docstring should contain an
6 | overall description of the module or program. Optionally, it may also
7 | contain a brief description of exported classes and functions and/or usage
8 | examples.
9 |
10 | Typical usage example:
11 |
12 | foo = ClassFoo()
13 | bar = foo.FunctionBar()
14 | """
15 |
16 | from lmflow.models.base_model import BaseModel
17 |
18 |
19 | class EncoderDecoderModel(BaseModel):
20 |
21 | def __init__(self, *args, **kwargs):
22 | pass
--------------------------------------------------------------------------------
/src/lmflow/models/interfaces/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/src/lmflow/models/interfaces/__init__.py
--------------------------------------------------------------------------------
/src/lmflow/models/interfaces/tunable.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """Tunable class
4 | """
5 |
6 | from abc import ABC
7 |
8 |
9 | class Tunable(ABC):
10 | pass
11 |
--------------------------------------------------------------------------------
/src/lmflow/models/regression_model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """General regression model."""
4 |
5 | from lmflow.models.base_model import BaseModel
6 |
7 |
8 | class RegressionModel(BaseModel):
9 |
10 | def __init__(self, *args, **kwargs):
11 | pass
12 |
--------------------------------------------------------------------------------
/src/lmflow/models/text_regression_model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """
4 | A model maps "text_only" data to float.
5 | """
6 |
7 | from lmflow.models.regression_model import RegressionModel
8 | from lmflow.datasets.dataset import Dataset
9 |
10 |
11 | class TextRegressionModel(RegressionModel):
12 | r"""
13 | Initializes a TextRegressionModel instance.
14 |
15 | Parameters
16 | ------------
17 |
18 | model_args :
19 | Model arguments such as model name, path, revision, etc.
20 |
21 | args : Optional.
22 | Positional arguments.
23 |
24 | kwargs : Optional.
25 | Keyword arguments.
26 | """
27 |
28 | def __init__(
29 | self,
30 | model_args,
31 | *args,
32 | **kwargs
33 | ):
34 | """
35 | Initializes a TextRegressionModel instance.
36 | :param model_args: dictionary with model arguments such as model name, path, revision, etc.
37 | """
38 | self.inference_func = None
39 |
40 |
41 | def register_inference_function(self, inference_func):
42 | """
43 | Registers a regression function.
44 | """
45 | self.inference_func = inference_func
46 |
47 |
48 | def inference(self, inputs: Dataset):
49 | """
50 | Gets regression results of a given dataset.
51 |
52 | :inputs: Dataset object, only accept type "text_only".
53 | """
54 | if self.inference_func is not None:
55 | return self.inference_func(inputs)
56 | else:
57 | pass
58 |
--------------------------------------------------------------------------------
/src/lmflow/models/vision_encoder/__init__.py:
--------------------------------------------------------------------------------
1 | from .clip_encoder import build_vision_tower
--------------------------------------------------------------------------------
/src/lmflow/optim/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/src/lmflow/optim/__init__.py
--------------------------------------------------------------------------------
/src/lmflow/optim/adadelta.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import torch
5 | from torch.optim.optimizer import Optimizer
6 |
7 | class Adadelta(Optimizer):
8 | def __init__(self, params, lr=1.0, rho=0.95, eps=1e-6):
9 | defaults = dict(lr=lr, rho=rho, eps=eps)
10 | super(Adadelta, self).__init__(params, defaults)
11 |
12 | def step(self, closure=None):
13 | loss = None
14 | if closure is not None:
15 | loss = closure()
16 |
17 | for group in self.param_groups:
18 | for p in group['params']:
19 | if p.grad is None:
20 | continue
21 | grad = p.grad.data
22 | state = self.state[p]
23 |
24 | if len(state) == 0:
25 | state['step'] = 0
26 | state['square_avg'] = torch.zeros_like(p.data)
27 | state['acc_delta'] = torch.zeros_like(p.data)
28 |
29 | square_avg, acc_delta = state['square_avg'], state['acc_delta']
30 | rho, eps = group['rho'], group['eps']
31 |
32 | state['step'] += 1
33 |
34 | square_avg.mul_(rho).addcmul_(1 - rho, grad, grad)
35 |
36 | std = square_avg.add(eps).sqrt_()
37 | delta = acc_delta.add(eps).sqrt_().div_(std).mul_(grad)
38 |
39 | p.data.add_(-delta)
40 |
41 | acc_delta.mul_(rho).addcmul_(1 - rho, delta, delta)
42 |
43 | return loss
--------------------------------------------------------------------------------
/src/lmflow/optim/adagrad.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import torch
5 | from torch.optim.optimizer import Optimizer
6 |
7 | class AdaGrad(torch.optim.Optimizer):
8 | def __init__(self, params, lr=0.001, eps=1e-8, weight_decay=0):
9 | defaults = dict(lr=lr, eps=eps, weight_decay=weight_decay)
10 | super(AdaGrad, self).__init__(params, defaults)
11 |
12 | def step(self, closure=None):
13 | loss = None
14 | if closure is not None:
15 | loss = closure()
16 |
17 | for group in self.param_groups:
18 | for p in group['params']:
19 | if p.grad is None:
20 | continue
21 | grad = p.grad.data
22 | if group['weight_decay'] != 0:
23 | grad = grad.add(group['weight_decay'], p.data)
24 |
25 | state = self.state[p]
26 |
27 | if len(state) == 0:
28 | state['sum'] = torch.zeros_like(p.data)
29 |
30 | sum = state['sum']
31 | sum.addcmul_(1, grad, grad)
32 | std = sum.sqrt().add_(group['eps'])
33 | p.data.addcdiv_(-group['lr'], grad, std)
34 |
35 | return loss
--------------------------------------------------------------------------------
/src/lmflow/optim/adam.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | import torch
5 | from torch.optim.optimizer import Optimizer
6 |
7 | class Adam(Optimizer):
8 | def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-8):
9 | defaults = dict(lr=lr, betas=betas, eps=eps)
10 | super(Adam, self).__init__(params, defaults)
11 |
12 | def step(self, closure=None):
13 | loss = None
14 | if closure is not None:
15 | loss = closure()
16 |
17 | for group in self.param_groups:
18 | for p in group['params']:
19 | if p.grad is None:
20 | continue
21 | grad = p.grad.data
22 |
23 | state = self.state[p]
24 |
25 | if len(state) == 0:
26 | state['step'] = 0
27 | state['exp_avg'] = torch.zeros_like(p.data)
28 | state['exp_avg_sq'] = torch.zeros_like(p.data)
29 |
30 | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
31 | beta1, beta2 = group['betas']
32 |
33 | state['step'] += 1
34 |
35 | exp_avg.mul_(beta1).add_(1 - beta1, grad)
36 |
37 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
38 |
39 | bias_correction1 = 1 - beta1 ** state['step']
40 | bias_correction2 = 1 - beta2 ** state['step']
41 |
42 | step_size = group['lr'] * (bias_correction2 ** 0.5) / bias_correction1
43 | denom = exp_avg_sq.sqrt().add_(group['eps'])
44 | p.data.addcdiv_(-step_size, exp_avg, denom)
45 |
46 | return loss
--------------------------------------------------------------------------------
/src/lmflow/optim/optimizers.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """All optimizers.
4 | """
5 | from lmflow.optim.dummy import Dummy
6 | from lmflow.optim.adabelief import AdaBelief
7 | from lmflow.optim.adabound import AdaBound
8 | from lmflow.optim.lars import LARS
9 | from lmflow.optim.lamb import Lamb
10 | from lmflow.optim.adamax import Adamax
11 | from lmflow.optim.nadam import NAdam
12 | from lmflow.optim.radam import RAdam
13 | from lmflow.optim.adamp import AdamP
14 | from lmflow.optim.sgdp import SGDP
15 | from lmflow.optim.yogi import Yogi
16 | from lmflow.optim.sophia import SophiaG
17 | from lmflow.optim.adan import Adan
18 | from lmflow.optim.novograd import NovoGrad
19 | from lmflow.optim.adam import Adam
20 | from lmflow.optim.adadelta import Adadelta
21 | from lmflow.optim.adagrad import AdaGrad
22 | from lmflow.optim.muon import Muon
23 | from lmflow.optim.adamw_schedule_free import AdamWScheduleFree
24 | from lmflow.optim.sgd_schedule_free import SGDScheduleFree
25 |
--------------------------------------------------------------------------------
/src/lmflow/pipeline/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/src/lmflow/pipeline/__init__.py
--------------------------------------------------------------------------------
/src/lmflow/pipeline/base_aligner.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """ BaseTuner: a subclass of BasePipeline.
4 | """
5 |
6 | from lmflow.pipeline.base_pipeline import BasePipeline
7 |
8 |
9 | class BaseAligner(BasePipeline):
10 | """ A subclass of BasePipeline which is alignable.
11 | """
12 | def __init__(self, *args, **kwargs):
13 | pass
14 |
15 | def _check_if_alignable(self, model, dataset, reward_model):
16 | # TODO: check if the model is alignable and dataset is compatible
17 | # TODO: add reward_model
18 | pass
19 |
20 | def align(self, model, dataset, reward_model):
21 | raise NotImplementedError(".align is not implemented")
22 |
--------------------------------------------------------------------------------
/src/lmflow/pipeline/base_pipeline.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """ BasePipeline.
4 | """
5 |
6 | from abc import ABC # abstract class
7 |
8 | class BasePipeline(ABC):
9 | pass
10 |
--------------------------------------------------------------------------------
/src/lmflow/pipeline/base_tuner.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | """ BaseTuner: a subclass of BasePipeline.
4 | """
5 |
6 | from lmflow.pipeline.base_pipeline import BasePipeline
7 |
8 |
9 | class BaseTuner(BasePipeline):
10 | """ A subclass of BasePipeline which is tunable.
11 | """
12 | def __init__(self, *args, **kwargs):
13 | pass
14 |
15 | def _check_if_tunable(self, model, dataset):
16 | # TODO: check if the model is tunable and dataset is compatible
17 | pass
18 |
19 | def tune(self, model, dataset):
20 | raise NotImplementedError(".tune is not implemented")
21 |
--------------------------------------------------------------------------------
/src/lmflow/pipeline/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/src/lmflow/pipeline/utils/__init__.py
--------------------------------------------------------------------------------
/src/lmflow/pipeline/utils/memory_safe_dpov2_align.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | import logging
5 | import os
6 | import sys
7 | import copy
8 |
9 | from transformers import (
10 | HfArgumentParser
11 | )
12 |
13 | from lmflow.datasets import Dataset
14 | from lmflow.models.hf_decoder_model import HFDecoderModel
15 | from lmflow.pipeline.dpov2_aligner import DPOv2Aligner
16 | from lmflow.args import (
17 | ModelArguments,
18 | DatasetArguments,
19 | DPOv2AlignerArguments,
20 | )
21 | from lmflow.utils.common import remove_dataclass_attr_prefix, create_copied_dataclass
22 |
23 |
24 | logger = logging.getLogger(__name__)
25 |
26 |
27 | ReferenceModelArguments: ModelArguments = create_copied_dataclass(
28 | original_dataclass=ModelArguments,
29 | field_prefix="reference_",
30 | class_prefix="Reference"
31 | )
32 |
33 |
34 | def main():
35 | # Parses arguments
36 | parser = HfArgumentParser((
37 | ModelArguments,
38 | ReferenceModelArguments,
39 | DatasetArguments,
40 | DPOv2AlignerArguments,
41 | ))
42 | target_model_args, ref_model_args, data_args, aligner_args = parser.parse_args_into_dataclasses()
43 |
44 | ref_model_args_dict = remove_dataclass_attr_prefix(ref_model_args, "reference_")
45 | ref_model_args = ModelArguments(**ref_model_args_dict)
46 |
47 | target_model = HFDecoderModel(target_model_args)
48 | ref_model = HFDecoderModel(ref_model_args)
49 | train_dataset = Dataset(data_args)
50 | eval_dataset = copy.deepcopy(train_dataset.sample(
51 | n=100,
52 | seed=aligner_args.random_seed
53 | ))
54 |
55 | aligner = DPOv2Aligner(
56 | model_args=target_model_args,
57 | data_args=train_dataset.data_args,
58 | aligner_args=aligner_args,
59 | ref_model_args=ref_model.model_args,
60 | )
61 | aligner.align(
62 | model=target_model,
63 | ref_model=ref_model,
64 | train_dataset=train_dataset,
65 | eval_dataset=eval_dataset,
66 | )
67 |
68 |
69 | if __name__ == "__main__":
70 | main()
--------------------------------------------------------------------------------
/src/lmflow/pipeline/utils/memory_safe_vllm_inference.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 |
5 | # Note that this is only a workaround, since vllm
6 | # inference engine cannot release GPU memory properly by now. Please see this github
7 | # [issue](https://github.com/vllm-project/vllm/issues/1908).
8 |
9 | import logging
10 | import sys
11 | import os
12 | from typing import Dict
13 |
14 | from transformers import (
15 | HfArgumentParser
16 | )
17 |
18 | from lmflow.datasets import Dataset
19 | from lmflow.models.auto_model import AutoModel
20 | from lmflow.pipeline.vllm_inferencer import VLLMInferencer
21 | from lmflow.args import (
22 | ModelArguments,
23 | DatasetArguments,
24 | AutoArguments,
25 | )
26 | from lmflow.utils.constants import MEMORY_SAFE_VLLM_INFERENCE_FINISH_FLAG
27 |
28 |
29 | logger = logging.getLogger(__name__)
30 |
31 |
32 | def main():
33 | # Parses arguments
34 | pipeline_name = "vllm_inferencer"
35 | PipelineArguments = AutoArguments.get_pipeline_args_class(pipeline_name)
36 |
37 | parser = HfArgumentParser((
38 | ModelArguments,
39 | DatasetArguments,
40 | PipelineArguments
41 | ))
42 | if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
43 | # If we pass only one argument to the script and it's the path to a json file,
44 | # let's parse it to get our arguments.
45 | model_args, data_args, pipeline_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
46 | else:
47 | model_args, data_args, pipeline_args = parser.parse_args_into_dataclasses()
48 |
49 | dataset = Dataset(data_args)
50 | model = AutoModel.get_model(model_args, tune_strategy='none')
51 | inferencer = VLLMInferencer(model_args, data_args, pipeline_args)
52 |
53 | res = inferencer.inference(
54 | model,
55 | dataset,
56 | release_gpu=False,
57 | enable_decode_inference_result=pipeline_args.enable_decode_inference_result,
58 | enable_distributed_inference=pipeline_args.enable_distributed_inference,
59 | distributed_inference_num_instances=pipeline_args.distributed_inference_num_instances,
60 | inference_batch_size=pipeline_args.vllm_inference_batch_size,
61 | )
62 |
63 | # use this as a flag, stdout will be captured by the pipeline
64 | print(MEMORY_SAFE_VLLM_INFERENCE_FINISH_FLAG)
65 |
66 |
67 | if __name__ == "__main__":
68 | main()
--------------------------------------------------------------------------------
/src/lmflow/pipeline/utils/rm_dataprocessor.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from dataclasses import dataclass
3 | from typing import Any, Dict, List, Optional, Union
4 |
5 | from datasets import load_dataset
6 | from transformers import AutoTokenizer
7 | from transformers.utils import PaddingStrategy
8 |
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | @dataclass
14 | class RewardDataCollatorWithPadding:
15 | tokenizer: AutoTokenizer
16 | padding: Union[bool, str, PaddingStrategy] = True
17 | max_length: Optional[int] = None
18 | pad_to_multiple_of: Optional[int] = None
19 | return_tensors: str = "pt"
20 |
21 | def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:
22 | merged_features = []
23 | for feature in features:
24 | merged_features.append(
25 | {
26 | "input_ids": feature["input_ids_chosen"],
27 | "attention_mask": feature["attention_mask_chosen"],
28 | }
29 | )
30 | merged_features.append(
31 | {
32 | "input_ids": feature["input_ids_rejected"],
33 | "attention_mask": feature["attention_mask_rejected"],
34 | }
35 | )
36 | logger.debug(f"Chosen: {self.tokenizer.decode(feature['input_ids_chosen'])}")
37 | logger.debug(f"Rejected: {self.tokenizer.decode(feature['input_ids_rejected'])}")
38 | batch = self.tokenizer.pad(
39 | merged_features,
40 | padding=self.padding,
41 | max_length=self.max_length,
42 | pad_to_multiple_of=self.pad_to_multiple_of,
43 | return_tensors=self.return_tensors,
44 | )
45 | batch = {
46 | "input_ids": batch["input_ids"],
47 | "attention_mask": batch["attention_mask"],
48 | "return_loss": True,
49 | }
50 | return batch
51 |
--------------------------------------------------------------------------------
/src/lmflow/pipeline/utils/rm_trainer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn as nn
4 | from transformers import Trainer
5 |
6 | from .peft_trainer import PeftTrainer
7 |
8 |
9 | def compute_metrics(eval_pred):
10 | result = {}
11 | pos_predictions_scores = eval_pred.predictions[0]
12 | neg_predictions_scores = eval_pred.predictions[1]
13 | # We assume that the first sample is preferred by default in groundtruth
14 | result['accuracy'] = np.sum(
15 | pos_predictions_scores > neg_predictions_scores) / len(pos_predictions_scores)
16 | return result
17 |
18 |
19 | def rm_loss(model, inputs, return_outputs=False):
20 | rewards = model(
21 | input_ids=inputs["input_ids"],
22 | attention_mask=inputs["attention_mask"]
23 | )[0]
24 | bsz = rewards.size(0)
25 | jidx = torch.arange(0, bsz, 2)
26 | kidx = jidx + 1
27 | rewards_j = rewards[jidx]
28 | rewards_k = rewards[kidx]
29 | loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean()
30 | if return_outputs:
31 | return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k}
32 | return loss
33 |
34 |
35 | class RewardTrainer(Trainer):
36 | def compute_loss(self, model, inputs, return_outputs=False):
37 | return rm_loss(model, inputs, return_outputs)
38 |
39 |
40 | class PeftRewardTrainer(PeftTrainer):
41 | def compute_loss(self, model, inputs, return_outputs=False):
42 | return rm_loss(model, inputs, return_outputs)
--------------------------------------------------------------------------------
/src/lmflow/tokenization/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
--------------------------------------------------------------------------------
/src/lmflow/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/src/lmflow/utils/__init__.py
--------------------------------------------------------------------------------
/src/lmflow/utils/conversation_template/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | import logging
5 |
6 | from lmflow.utils.versioning import is_package_version_at_least
7 |
8 | from .base import EMPTY_TEMPLATE, EMPTY_NO_SPECIAL_TOKENS_TEMPLATE, ConversationTemplate, ConversationTemplateForTool
9 | from .chatglm import CHATGLM3_TEMPLATE
10 | from .chatml import CHATML_TEMPLATE
11 | from .deepseek import (
12 | DEEPSEEK_V2_TEMPLATE,
13 | DEEPSEEK_V3_TEMPLATE,
14 | DEEPSEEK_R1_TEMPLATE,
15 | DEEPSEEK_R1_DISTILL_TEMPLATE
16 | )
17 | from .gemma import GEMMA_TEMPLATE
18 | from .hymba import HYMBA_TEMPLATE
19 | from .internlm import INTERNLM2_TEMPLATE
20 | from .llama import LLAMA2_TEMPLATE, LLAMA3_TEMPLATE, LLAMA3_TEMPLATE_FOR_TOOL
21 | from .phi import PHI3_TEMPLATE
22 | from .qwen import (
23 | QWEN2_TEMPLATE,
24 | QWEN2_TEMPLATE_FOR_TOOL,
25 | QWEN2_5_TEMPLATE,
26 | QWEN2_5_1M_TEMPLATE,
27 | QWEN2_5_MATH_TEMPLATE,
28 | QWEN_QWQ_TEMPLATE
29 | )
30 | from .yi import YI1_5_TEMPLATE
31 | from .zephyr import ZEPHYR_TEMPLATE
32 |
33 |
34 | logger = logging.getLogger(__name__)
35 |
36 |
37 | PRESET_TEMPLATES = {
38 | 'chatglm3': CHATGLM3_TEMPLATE,
39 | 'chatml': CHATML_TEMPLATE,
40 | 'deepseek': DEEPSEEK_V2_TEMPLATE,
41 | 'deepseek_v2': DEEPSEEK_V2_TEMPLATE,
42 | 'disable': EMPTY_TEMPLATE,
43 | 'empty': EMPTY_TEMPLATE,
44 | 'empty_no_special_tokens': EMPTY_NO_SPECIAL_TOKENS_TEMPLATE,
45 | 'gemma': GEMMA_TEMPLATE,
46 | 'hymba': HYMBA_TEMPLATE,
47 | 'internlm2': INTERNLM2_TEMPLATE,
48 | 'llama2': LLAMA2_TEMPLATE,
49 | 'llama3': LLAMA3_TEMPLATE,
50 | 'llama3_for_tool': LLAMA3_TEMPLATE_FOR_TOOL,
51 | 'phi3': PHI3_TEMPLATE,
52 | 'qwen2': QWEN2_TEMPLATE,
53 | 'qwen2_for_tool': QWEN2_TEMPLATE_FOR_TOOL,
54 | 'yi': CHATML_TEMPLATE,
55 | 'yi1_5': YI1_5_TEMPLATE,
56 | 'zephyr': ZEPHYR_TEMPLATE
57 | }
58 |
59 | JINJA_TEMPLATES = {
60 | 'deepseek_r1': DEEPSEEK_R1_TEMPLATE,
61 | 'deepseek_r1_distill': DEEPSEEK_R1_DISTILL_TEMPLATE,
62 | 'deepseek_v3': DEEPSEEK_V3_TEMPLATE,
63 | 'qwen2_5': QWEN2_5_TEMPLATE,
64 | 'qwen2_5_1m': QWEN2_5_1M_TEMPLATE,
65 | 'qwen2_5_math': QWEN2_5_MATH_TEMPLATE,
66 | 'qwen_qwq': QWEN_QWQ_TEMPLATE,
67 | }
68 |
69 | if is_package_version_at_least("transformers", "4.43.0"):
70 | for template_name, template in JINJA_TEMPLATES.items():
71 | PRESET_TEMPLATES[template_name] = template
72 | else:
73 | logger.warning(
74 | f"The following conversation templates require transformers>=4.43.0: {JINJA_TEMPLATES.keys()}. "
75 | f"Please upgrade `transformers` to use them."
76 | )
--------------------------------------------------------------------------------
/src/lmflow/utils/conversation_template/chatglm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | from .base import StringFormatter, TemplateComponent, ConversationTemplate
5 |
6 |
7 | CHATGLM3_TEMPLATE = ConversationTemplate(
8 | template_name='chatglm3',
9 | user_formatter=StringFormatter(
10 | template=[
11 | TemplateComponent(type='string', content='<|user|>\n {{content}}')
12 | ]
13 | ),
14 | assistant_formatter=StringFormatter(
15 | template=[
16 | TemplateComponent(type='string', content='<|assistant|>\n {{content}}')
17 | ]
18 | ),
19 | system_formatter=StringFormatter(
20 | template=[
21 | TemplateComponent(type='string', content='<|system|>\n {{content}}')
22 | ]
23 | ),
24 | special_starter=TemplateComponent(type='string', content='[gMASK]sop')
25 | )
--------------------------------------------------------------------------------
/src/lmflow/utils/conversation_template/chatml.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | from .base import StringFormatter, TemplateComponent, ConversationTemplate
5 |
6 |
7 | CHATML_TEMPLATE = ConversationTemplate(
8 | template_name='chatml',
9 | user_formatter=StringFormatter(
10 | template=[
11 | TemplateComponent(type='string', content='<|im_start|>user\n{{content}}<|im_end|>\n')
12 | ]
13 | ),
14 | assistant_formatter=StringFormatter(
15 | template=[
16 | TemplateComponent(type='string', content='<|im_start|>assistant\n{{content}}<|im_end|>\n')
17 | ]
18 | ),
19 | system_formatter=StringFormatter(
20 | template=[
21 | TemplateComponent(type='string', content='<|im_start|>system\n{{content}}<|im_end|>\n')
22 | ]
23 | )
24 | )
--------------------------------------------------------------------------------
/src/lmflow/utils/conversation_template/gemma.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | import logging
5 | from dataclasses import dataclass
6 |
7 | from .base import StringFormatter, TemplateComponent, ConversationTemplate
8 |
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | @dataclass
14 | class GemmaConversationTemplate(ConversationTemplate):
15 | def encode_conversation(self, *args, **kwargs):
16 | if kwargs.get('system'):
17 | logger.warning(
18 | 'As of now, Gemma does not support system messages officially. '
19 | 'ConversationTemplate will add your system messages right after '
20 | 'the bos token and before the user message without any special formatting. '
21 | 'For more details, please refer to the [official template]'
22 | '(https://huggingface.co/google/gemma-1.1-2b-it/blob/bf4924f313df5166dee1467161e886e55f2eb4d4/tokenizer_config.json#L1507).'
23 | )
24 | return super().encode_conversation(*args, **kwargs)
25 |
26 |
27 | GEMMA_TEMPLATE = GemmaConversationTemplate(
28 | template_name='gemma',
29 | user_formatter=StringFormatter(
30 | template=[
31 | TemplateComponent(type='string', content='user\n{{content}}\n')
32 | ]
33 | ),
34 | assistant_formatter=StringFormatter(
35 | template=[
36 | TemplateComponent(type='string', content='model\n{{content}}\n')
37 | ]
38 | ),
39 | system_formatter=StringFormatter(
40 | template=[
41 | TemplateComponent(type='string', content='{{content}}')
42 | ]
43 | ),
44 | special_starter=TemplateComponent(type='token', content='bos_token')
45 | )
--------------------------------------------------------------------------------
/src/lmflow/utils/conversation_template/internlm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | from .base import StringFormatter, TemplateComponent, ConversationTemplate
5 |
6 |
7 | INTERNLM2_TEMPLATE = ConversationTemplate(
8 | template_name='internlm2',
9 | user_formatter=StringFormatter(
10 | template=[
11 | TemplateComponent(type='string', content='<|im_start|>user\n{{content}}<|im_end|>\n')
12 | ]
13 | ),
14 | assistant_formatter=StringFormatter(
15 | template=[
16 | TemplateComponent(type='string', content='<|im_start|>assistant\n{{content}}<|im_end|>\n')
17 | ]
18 | ),
19 | system_formatter=StringFormatter(
20 | template=[
21 | TemplateComponent(type='string', content='<|im_start|>system\n{{content}}<|im_end|>\n')
22 | ]
23 | ),
24 | special_starter=TemplateComponent(type='token', content='bos_token')
25 | )
--------------------------------------------------------------------------------
/src/lmflow/utils/conversation_template/phi.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | from .base import StringFormatter, TemplateComponent, ConversationTemplate
5 |
6 |
7 | PHI3_TEMPLATE = ConversationTemplate(
8 | template_name='phi3',
9 | user_formatter=StringFormatter(
10 | template=[
11 | TemplateComponent(type='string', content='<|user|>\n{{content}}<|end|>\n')
12 | ]
13 | ),
14 | assistant_formatter=StringFormatter(
15 | template=[
16 | TemplateComponent(type='string', content='<|assistant|>\n{{content}}<|end|>\n')
17 | ]
18 | ),
19 | system_formatter=StringFormatter(
20 | template=[
21 | TemplateComponent(type='string', content='<|system|>\n{{content}}<|end|>\n')
22 | ]
23 | ),
24 | special_starter=TemplateComponent(type='token', content='bos_token'),
25 | special_stopper=TemplateComponent(type='token', content='eos_token')
26 | )
--------------------------------------------------------------------------------
/src/lmflow/utils/conversation_template/yi.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | from .base import StringFormatter, TemplateComponent, ConversationTemplate
5 |
6 |
7 | YI1_5_TEMPLATE = ConversationTemplate(
8 | template_name='yi1_5',
9 | user_formatter=StringFormatter(
10 | template=[
11 | TemplateComponent(type='string', content='<|im_start|>user\n{{content}}<|im_end|>\n')
12 | ]
13 | ),
14 | assistant_formatter=StringFormatter(
15 | template=[
16 | TemplateComponent(type='string', content='<|im_start|>assistant\n{{content}}<|im_end|>\n')
17 | ]
18 | ),
19 | system_formatter=StringFormatter(
20 | template=[
21 | TemplateComponent(type='string', content='{{content}}')
22 | ]
23 | )
24 | )
--------------------------------------------------------------------------------
/src/lmflow/utils/debug/profiler.py:
--------------------------------------------------------------------------------
1 | import time
2 | import pprint
3 |
4 |
5 | class Timer:
6 | def __init__(self, name):
7 | self.name = name
8 | self.runtimes = {}
9 | self.runtimes_readable = {}
10 |
11 | def start(self, tag):
12 | self.runtimes[tag] = {"start": time.time()}
13 |
14 | def end(self, tag):
15 | self.runtimes[tag]["end"] = time.time()
16 | self.runtimes[tag]["elapsed"] = self.runtimes[tag]["end"] - self.runtimes[tag]["start"]
17 |
18 | def get_runtime(self, tag):
19 | return self.runtimes[tag]["elapsed"]
20 |
21 | def show(self):
22 | self._to_readable()
23 | pprint.pprint(self.runtimes_readable)
24 |
25 | def _to_readable(self):
26 | for tag, runtime in self.runtimes.items():
27 | self.runtimes_readable[tag] = {"start": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(runtime["start"]))}
28 | self.runtimes_readable[tag]["end"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(runtime["end"]))
29 | self.runtimes_readable[tag]["elapsed"] = round(runtime["elapsed"], 5)
30 |
31 |
32 | if __name__ == "__main__":
33 | timer = Timer("profiler")
34 | timer.start("main")
35 | time.sleep(1)
36 | timer.end("main")
37 | timer.show()
--------------------------------------------------------------------------------
/src/lmflow/utils/flash_attention/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/src/lmflow/utils/flash_attention/__init__.py
--------------------------------------------------------------------------------
/src/lmflow/utils/model.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 | # Copyright 2024 Statistics and Machine Learning Research Group. All rights reserved.
4 | import logging
5 | from typing import Dict, Any, List, Tuple, Union
6 |
7 | from transformers import AutoTokenizer
8 |
9 | from lmflow.args import ModelArguments
10 |
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 |
15 | def check_homogeneity(model_args_list: List[ModelArguments]) -> bool:
16 | assert all(isinstance(model_args, ModelArguments) for model_args in model_args_list), \
17 | "model_args_list should be a list of ModelArguments objects."
18 | assert len(model_args_list) > 1, "model_args_list should have at least two elements."
19 |
20 | tokenizer_names = []
21 | for model_args in model_args_list:
22 | tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, use_fast=False)
23 | tokenizer_names.append(tokenizer.__class__.__name__)
24 |
25 | return len(set(tokenizer_names)) == 1
--------------------------------------------------------------------------------
/src/lmflow/utils/multimodal.py:
--------------------------------------------------------------------------------
1 | import glob
2 | import torch
3 | from transformers import LlamaConfig
4 | from tqdm import tqdm
5 |
6 |
7 | def update_custom_config(config, model_args):
8 | if model_args.llm_model_name_or_path is not None:
9 | text_config = LlamaConfig.from_pretrained(
10 | model_args.llm_model_name_or_path)
11 | config.text_config = text_config
12 | config.with_qformer = model_args.with_qformer
13 | config.custom_vision_model = model_args.custom_vision_model
14 | if model_args.custom_vision_model:
15 | # config.vision_model_args = model_args
16 | config.image_encoder_name_or_path = \
17 | model_args.image_encoder_name_or_path
18 | config.vision_select_layer = model_args.vision_select_layer
19 | if getattr(model_args, "vision_select_feature", None) is not None:
20 | config.vision_select_feature = model_args.vision_select_feature
21 | return config
22 |
23 |
24 | def load_llava_pretrain_model(model, checkpoint_path):
25 | checkpoint_path = glob.glob(checkpoint_path)
26 | for path in tqdm(checkpoint_path):
27 | state_dict = torch.load(path, map_location="cpu")
28 | new_state_dict = adapt_llava_model_to_lmflow_type(state_dict)
29 | # modify the name of the key
30 | # import pdb; pdb.set_trace()
31 | lmflow_keys = model.state_dict().keys()
32 | for key in new_state_dict.keys():
33 | if key not in lmflow_keys:
34 | print("key not in lmflow_keys: ", key)
35 | model.load_state_dict(new_state_dict, strict=False)
36 | return model
37 |
38 | def adapt_llava_model_to_lmflow_type(state_dict):
39 | new_state_dict = {}
40 | for key, item in state_dict.items():
41 | key = key.replace("model.layers", "language_model.model.layers")
42 | key = key.replace("model.embed_tokens",
43 | "language_model.model.embed_tokens")
44 | key = key.replace("model.mm_projector", "language_projection")
45 | key = key.replace("lm_head", "language_model.lm_head")
46 | key = key.replace("model.norm", "language_model.model.norm")
47 | if "vision_tower" in key:
48 | continue
49 | new_state_dict[key] = item
50 | return new_state_dict
--------------------------------------------------------------------------------
/src/lmflow/utils/position_interpolation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/src/lmflow/utils/position_interpolation/__init__.py
--------------------------------------------------------------------------------
/src/lmflow/utils/versioning.py:
--------------------------------------------------------------------------------
1 | import importlib
2 | import sys
3 | import logging
4 | from typing import Tuple, List, Union
5 | from importlib.metadata import version, PackageNotFoundError
6 |
7 | import pkg_resources
8 |
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | def get_python_version():
14 | return sys.version_info
15 |
16 |
17 | def _is_package_available(package_name: str, skippable: bool = False):
18 | assert isinstance(package_name, str), f"Invalid type of package_name: {type(package_name)}"
19 | try:
20 | importlib.import_module(package_name)
21 | return True
22 | except Exception as e:
23 | if e.__class__ == ModuleNotFoundError:
24 | return False
25 | else:
26 | if skippable:
27 | logger.warning(f'An error occurred when importing {package_name}:\n{e}\n{package_name} is disabled.')
28 | return False
29 | else:
30 | raise e
31 |
32 |
33 | def _is_packages_available(packages: Union[List[str], List[Tuple[str, bool]]]):
34 | if isinstance(packages[0], str):
35 | return all([_is_package_available(package) for package in packages])
36 | elif isinstance(packages[0], tuple):
37 | return all([_is_package_available(package, skippable) for package, skippable in packages])
38 | else:
39 | raise ValueError(f"Invalid type of packages: {type(packages[0])}")
40 |
41 |
42 | def is_package_version_at_least(package_name, min_version):
43 | try:
44 | package_version = pkg_resources.get_distribution(package_name).version
45 | if (pkg_resources.parse_version(package_version)
46 | < pkg_resources.parse_version(min_version)):
47 | return False
48 | except pkg_resources.DistributionNotFound:
49 | return False
50 | return True
51 |
52 |
53 | def is_gradio_available():
54 | return _is_package_available("gradio")
55 |
56 |
57 | def is_ray_available():
58 | return _is_package_available("ray")
59 |
60 |
61 | def is_vllm_available():
62 | return _is_package_available("vllm")
63 |
64 |
65 | def is_flash_attn_available():
66 | return _is_package_available("flash_attn", skippable=True)
67 |
68 |
69 | def is_flask_available():
70 | return _is_packages_available(["flask", "flask_cors"])
71 |
72 |
73 | def is_trl_available():
74 | return _is_package_available("trl")
75 |
76 |
77 | def is_multimodal_available():
78 | return _is_packages_available(["PIL"])
--------------------------------------------------------------------------------
/src/lmflow/version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.0.9"
2 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/tests/__init__.py
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | def pytest_addoption(parser):
2 | parser.addoption("--cpu-only", action="store_true", help="run tests that only requires cpu")
3 | parser.addoption("--skip-slow", action="store_true", help="skip slow tests")
4 |
5 | def pytest_collection_modifyitems(config, items):
6 | new_items = []
7 | for item in items:
8 | func = item.function
9 | if config.getoption("--cpu-only"):
10 | if not (func.__doc__ and "#cpu" in func.__doc__.lower()):
11 | continue
12 | if config.getoption("--skip-slow"):
13 | if func.__doc__ and "#slow" in func.__doc__.lower():
14 | continue
15 | new_items.append(item)
16 | items[:] = new_items
--------------------------------------------------------------------------------
/tests/datasets/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/tests/datasets/__init__.py
--------------------------------------------------------------------------------
/tests/datasets/test_dataset.py:
--------------------------------------------------------------------------------
1 | #!/bin/env/python3
2 | # coding=utf-8
3 | """A one-line summary of the module or program, terminated by a period.
4 |
5 | Leave one blank line. The rest of this docstring should contain an
6 | overall description of the module or program. Optionally, it may also
7 | contain a brief description of exported classes and functions and/or usage
8 | examples.
9 |
10 | Typical usage example:
11 |
12 | foo = ClassFoo()
13 | bar = foo.FunctionBar()
14 | """
15 | from __future__ import absolute_import
16 | import unittest
17 |
18 | import json
19 | import os
20 | from pathlib import Path
21 |
22 | from lmflow.args import DatasetArguments
23 | from lmflow.datasets.dataset import Dataset
24 |
25 |
26 | class DatasetTest(unittest.TestCase):
27 |
28 | def test_init(self):
29 | dataset_dir = 'data/example_dataset/train'
30 | data_args = DatasetArguments(
31 | dataset_path=dataset_dir
32 | )
33 | dataset = Dataset(data_args, backend='huggingface')
34 | hf_dataset = dataset.get_backend_dataset()
35 |
36 | with open(os.path.join(Path(dataset_dir), 'train_50.json'), 'r') as fin:
37 | json_obj = json.load(fin)
38 | for i in range(len(hf_dataset)):
39 | self.assertEqual(json_obj['instances'][i], hf_dataset[i])
40 |
41 |
42 | def test_create_from_dict(self):
43 | data_dict = {
44 | "type": "text2text",
45 | "instances": [
46 | { "input": "INPUT 1", "output": "OUTPUT 1" },
47 | { "input": "INPUT 2", "output": "OUTPUT 2" },
48 | ]
49 | }
50 | dataset = Dataset.create_from_dict(data_dict)
51 | self.assertEqual(dataset.to_dict(), data_dict)
52 |
53 |
54 | def test_create_from_dict_bad_type(self):
55 | data_dict = {
56 | "type": "non-supported",
57 | "instances": [
58 | { "input": "INPUT 1", "output": "OUTPUT 1" },
59 | { "input": "INPUT 2", "output": "OUTPUT 2" },
60 | ]
61 | }
62 | with self.assertRaises(ValueError):
63 | dataset = Dataset.create_from_dict(data_dict)
64 |
--------------------------------------------------------------------------------
/tests/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OptimalScale/LMFlow/397f00d9f33b8b781d39376ab1ff84859415f623/tests/models/__init__.py
--------------------------------------------------------------------------------
/tests/models/test_auto_model.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from lmflow.args import ModelArguments
4 | from lmflow.models.auto_model import AutoModel
5 | from lmflow.models.hf_decoder_model import HFDecoderModel
6 | from lmflow.models.text_regression_model import TextRegressionModel
7 | from lmflow.models.hf_encoder_decoder_model import HFEncoderDecoderModel
8 |
9 | MODEL_NAME = "gpt2"
10 |
11 |
12 | class AutoModelTest(unittest.TestCase):
13 |
14 | def test_get_decoder_model(self):
15 | model_args = ModelArguments(
16 | arch_type="decoder_only", model_name_or_path=MODEL_NAME)
17 | model = AutoModel.get_model(model_args)
18 | self.assertTrue(isinstance(model, HFDecoderModel))
19 |
20 |
21 | # This unit test is commented out since the encoder decoder model has not been fully implemented
22 | '''
23 | def test_get_text_regression_model(self):
24 | model_args = ModelArguments(
25 | arch_type="text_regression", model_name_or_path=MODEL_NAME)
26 | model = AutoModel.get_model(model_args)
27 | self.assertTrue(isinstance(model, TextRegressionModel))
28 | '''
29 |
30 |
31 | # This unit test is commented out since the encoder decoder model has not been fully implemented
32 | '''
33 | def test_get_encoder_decoder(self):
34 | model_args = ModelArguments(
35 | arch_type="encoder_decoder", model_name_or_path=MODEL_NAME)
36 | model = AutoModel.get_model(model_args)
37 | self.assertTrue(isinstance(model, HFEncoderDecoderModel))
38 | '''
39 |
40 |
41 | def test_get_unsupported_model(self):
42 | model_args = ModelArguments(
43 | arch_type="unsupported model", model_name_or_path=MODEL_NAME)
44 | with self.assertRaises(NotImplementedError):
45 | model = AutoModel.get_model(model_args)
46 |
--------------------------------------------------------------------------------
/tests/models/test_tool_inferencer.py:
--------------------------------------------------------------------------------
1 | from lmflow.pipeline.inferencer import ToolInferencer
2 | import unittest
3 | from lmflow.args import InferencerArguments
4 | from lmflow.args import ModelArguments
5 | from lmflow.args import DatasetArguments
6 | from lmflow.models import hf_decoder_model
7 |
8 | CODE_1 = "print(\"hello world\")"
9 | RES_1 = "hello world\n"
10 | CODE_2 = "b=a+1\nprint(b)"
11 | RES_2 = """Traceback (most recent call last):
12 | File "", line 1, in
13 | NameError: name 'a' is not defined
14 | """
15 |
16 | class ToolInferencerTest(unittest.TestCase):
17 | def set_up(self):
18 | model_args = ModelArguments(model_name_or_path="codellama/CodeLlama-7b-instruct-hf")
19 | model = hf_decoder_model.HFDecoderModel(model_args)
20 | inferencer_args = InferencerArguments()
21 | data_args = DatasetArguments()
22 | self.toolinf = ToolInferencer(model_args, data_args, inferencer_args)
23 |
24 | def test_code_exec_1(self,code=CODE_1, expected_output=RES_1):
25 |
26 | toolinf_res = self.toolinf.code_exec(code)
27 | self.assertEqual(toolinf_res, expected_output)
28 |
29 | def test_code_exec_2(self,code=CODE_2):
30 | toolinf_res = self.toolinf.code_exec(code)
31 | self.assertNotEqual(toolinf_res.returncode, 0)
32 |
33 | unittest.main()
34 |
35 |
36 |
--------------------------------------------------------------------------------
/tests/pipeline/test_auto_pipeline.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from lmflow.args import DatasetArguments
4 | from lmflow.args import EvaluatorArguments
5 | from lmflow.args import FinetunerArguments
6 | from lmflow.args import InferencerArguments
7 | from lmflow.args import ModelArguments
8 | from lmflow.args import RaftAlignerArguments
9 | from lmflow.pipeline.auto_pipeline import AutoPipeline
10 | from lmflow.pipeline.evaluator import Evaluator
11 | from lmflow.pipeline.finetuner import Finetuner
12 | from lmflow.pipeline.inferencer import Inferencer
13 | from lmflow.pipeline.raft_aligner import RaftAligner
14 |
15 | MODEL_NAME = "gpt2"
16 |
17 |
18 | class AutoPipelineTest(unittest.TestCase):
19 |
20 | def test_get_evaluator_pipeline(self):
21 | model_args = ModelArguments(model_name_or_path=MODEL_NAME)
22 | dataset_args = DatasetArguments()
23 | evaluator_args = EvaluatorArguments()
24 | pipeline = AutoPipeline.get_pipeline(
25 | "evaluator", model_args, dataset_args, evaluator_args)
26 |
27 | self.assertTrue(isinstance(pipeline, Evaluator))
28 |
29 | def test_get_finetuner_pipeline(self):
30 | model_args = ModelArguments(model_name_or_path=MODEL_NAME)
31 | dataset_args = DatasetArguments()
32 | finetuner_args = FinetunerArguments(output_dir="~/tmp")
33 | pipeline = AutoPipeline.get_pipeline(
34 | "finetuner", model_args, dataset_args, finetuner_args)
35 |
36 | self.assertTrue(isinstance(pipeline, Finetuner))
37 |
38 | def test_get_inferencer_pipeline(self):
39 | model_args = ModelArguments(model_name_or_path=MODEL_NAME)
40 | dataset_args = DatasetArguments()
41 | inferencer_args = InferencerArguments()
42 | pipeline = AutoPipeline.get_pipeline(
43 | "inferencer", model_args, dataset_args, inferencer_args)
44 |
45 | self.assertTrue(isinstance(pipeline, Inferencer))
46 |
47 | def test_get_raft_aligner_pipeline(self):
48 | model_args = ModelArguments(model_name_or_path=MODEL_NAME)
49 | dataset_args = DatasetArguments()
50 | raft_aligner_args = RaftAlignerArguments(output_dir="~/tmp")
51 | pipeline = AutoPipeline.get_pipeline(
52 | "raft_aligner", model_args, dataset_args, raft_aligner_args)
53 |
54 | self.assertTrue(isinstance(pipeline, RaftAligner))
55 |
56 | def test_get_unsupported_pipeline(self):
57 | model_args = ModelArguments(model_name_or_path=MODEL_NAME)
58 | dataset_args = DatasetArguments()
59 |
60 | with self.assertRaisesRegex(NotImplementedError, "Pipeline \"unsupported\" is not supported"):
61 | pipeline = AutoPipeline.get_pipeline(
62 | "unsupported", model_args, dataset_args, None)
63 |
--------------------------------------------------------------------------------
/tests/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/tests/utils/test_conversation_formatter.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from lmflow.utils.conversation_template.base import StringFormatter, TemplateComponent
3 |
4 |
5 | class StringFormatterTest(unittest.TestCase):
6 |
7 | def test_format_string_component(self):
8 | formatter = StringFormatter(
9 | template=[
10 | TemplateComponent(type='token', content='bos_token'),
11 | TemplateComponent(type='string', content='[INST] {{content}} [/INST]'),
12 | TemplateComponent(type='token', content='eos_token')
13 | ]
14 | )
15 | formatted_components = formatter.format(content='Who are you?')
16 | expected_components = [
17 | TemplateComponent(type='token', content='bos_token'),
18 | TemplateComponent(type='string', content='[INST] Who are you? [/INST]'),
19 | TemplateComponent(type='token', content='eos_token')
20 | ]
21 | self.assertEqual(formatted_components, expected_components)
22 |
23 |
24 | if __name__ == '__main__':
25 | unittest.main()
--------------------------------------------------------------------------------
/utils/convert_json_to_txt.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | import argparse
5 | import logging
6 |
7 | import json
8 | from pathlib import Path
9 |
10 | logging.basicConfig(level=logging.WARNING)
11 |
12 | if __name__ == '__main__':
13 |
14 | parser = argparse.ArgumentParser()
15 | parser.add_argument('--dataset_path', default='./data/wiki_zh_eval', type=str, required=False)
16 | parser.add_argument('--output_path', default='./data/wiki_zh_eval/converted_data.txt', type=str, required=False)
17 | parser.add_argument('--overwrite', default=False, type=bool, required=False)
18 | args = parser.parse_args()
19 |
20 | dataset_path = args.dataset_path
21 | outputfile = args.output_path
22 |
23 | outputs_list = []
24 | data_files = [
25 | x.absolute().as_posix()
26 | for x in Path(dataset_path).glob("*.json")
27 | ]
28 |
29 | for file_name in data_files:
30 | with open(file_name) as fin:
31 | json_data = json.load(fin)
32 | type = json_data["type"]
33 | for line in json_data["instances"]:
34 | outputs_list.append(line["text"])
35 |
36 |
37 | if Path(outputfile).exists() and not args.overwrite:
38 | logging.warning(f"File %s exists, will not overwrite.", outputfile)
39 | else:
40 | with open(outputfile, "w") as f:
41 | for line in outputs_list:
42 | f.write(line)
43 |
44 |
--------------------------------------------------------------------------------
/utils/convert_minigpt4_checkpoints.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os.path as osp
3 | import torch
4 |
5 | def parse_args():
6 | parser = argparse.ArgumentParser(description="Convert checkpoint from MiniGPT4")
7 | parser.add_argument("--model_path", type=str, help="the model path for the to convert checkpoint")
8 | parser.add_argument("--save_path", default=None, type=str, help="the save path for converted checkpoint")
9 | args = parser.parse_args()
10 | return args
11 |
12 |
13 |
14 |
15 |
16 | if __name__ == "__main__":
17 | args = parse_args()
18 | model = torch.load(args.model_path)
19 | model = model['model']
20 | new_model = {}
21 | for key, item in model.items():
22 | key = key.replace("Qformer", "qformer")
23 | key = key.replace("llama_proj", "language_projection")
24 | key = key.replace("llama_model.model", "language_model.model")
25 | new_model[key] = item
26 | if args.save_path is None:
27 | end_string = osp.splitext(args.model_path)
28 | save_path = osp.dirname(args.model_path) + "/" + \
29 | osp.basename(args.model_path).replace(".pth", "") + \
30 | "-converted" + osp.splitext(args.model_path)[-1]
31 | else:
32 | save_path = args.save_path
33 | print("save_path: {}".format(save_path))
34 |
35 | torch.save(new_model, save_path)
36 |
--------------------------------------------------------------------------------
/utils/download_hf_file.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | from huggingface_hub import hf_hub_download
3 | import os
4 | import shutil
5 |
6 |
7 | def parse_args():
8 | parser = argparse.ArgumentParser(description="Download huggingface file")
9 | parser.add_argument("--repo_id", type=str, help="the repo id")
10 | parser.add_argument("--filename", default=None, type=str, help="the file name for the download file")
11 | parser.add_argument("--target_path", default="./", type=str, help="the target path for the download file")
12 | parser.add_argument("--repo_type", default="dataset", type=str, help="the repo type")
13 | args = parser.parse_args()
14 | return args
15 |
16 |
17 |
18 | if __name__ == "__main__":
19 | args = parse_args()
20 | print("Start downloading repo {} filename: {}".format(
21 | args.repo_id, args.filename))
22 | args.target_path = os.path.abspath(args.target_path)
23 | source_path = hf_hub_download(repo_id=args.repo_id, filename=args.filename, repo_type=args.repo_type)
24 | os.makedirs(args.target_path, exist_ok=True)
25 | target_path = os.path.join(args.target_path, args.filename)
26 | shutil.copyfile(source_path, target_path)
27 | print("Finish downloading repo {} filename: {}".format(
28 | args.repo_id, args.filename))
29 |
--------------------------------------------------------------------------------
/utils/make_delta.py:
--------------------------------------------------------------------------------
1 | """
2 | Make the delta weights by subtracting base weights.
3 |
4 | Usage:
5 | python3 -m fastchat.model.make_delta --base ~/model_weights/llama-13b --target ~/model_weights/vicuna-13b --delta ~/model_weights/vicuna-13b-delta --hub-repo-id lmsys/vicuna-13b-delta-v1.1
6 | """
7 | import argparse
8 |
9 | import torch
10 | from tqdm import tqdm
11 | from transformers import AutoTokenizer, AutoModelForCausalLM
12 |
13 |
14 | def make_delta(base_model_path, target_model_path, delta_path):
15 | print(f"Loading the base model from {base_model_path}")
16 | base = AutoModelForCausalLM.from_pretrained(
17 | base_model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True
18 | )
19 |
20 | print(f"Loading the target model from {target_model_path}")
21 | target = AutoModelForCausalLM.from_pretrained(
22 | target_model_path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True
23 | )
24 | target_tokenizer = AutoTokenizer.from_pretrained(target_model_path, use_fast=False)
25 |
26 | print("Calculating the delta")
27 | for name, param in tqdm(target.state_dict().items(), desc="Calculating delta"):
28 | assert name in base.state_dict()
29 | param.data -= base.state_dict()[name]
30 |
31 | print(f"Saving the delta to {delta_path}")
32 | if args.hub_repo_id:
33 | kwargs = {"push_to_hub": True, "repo_id": args.hub_repo_id}
34 | else:
35 | kwargs = {}
36 | target.save_pretrained(delta_path, **kwargs)
37 | target_tokenizer.save_pretrained(delta_path, **kwargs)
38 |
39 |
40 | if __name__ == "__main__":
41 | parser = argparse.ArgumentParser()
42 | parser.add_argument("--base-model-path", type=str, required=True)
43 | parser.add_argument("--target-model-path", type=str, required=True)
44 | parser.add_argument("--delta-path", type=str, required=True)
45 | parser.add_argument("--hub-repo-id", type=str)
46 | args = parser.parse_args()
47 |
48 | make_delta(args.base_model_path, args.target_model_path, args.delta_path)
49 |
--------------------------------------------------------------------------------
/utils/preprocess_multimodal_data.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os.path as osp
3 | import torch
4 | import json
5 |
6 | def parse_args():
7 | parser = argparse.ArgumentParser(description="Convert checkpoint from MiniGPT4")
8 | parser.add_argument("--data_path", type=str, help="the model path for the to convert checkpoint")
9 | parser.add_argument("--save_path", default=None, type=str, help="the save path for converted checkpoint")
10 | parser.add_argument("--max_length", default=1000, type=int, help="the max length for the text file")
11 | args = parser.parse_args()
12 | return args
13 |
14 |
15 | if __name__ == "__main__":
16 | args = parse_args()
17 | data = json.load(open(args.data_path))
18 | for data_idx in data:
19 | for item in data_idx['conversations']:
20 | if len(item["value"]) > args.max_length:
21 | item["value"] = item["value"][:args.max_length]
22 | with open(args.save_path, 'w') as f:
23 | json.dump(data, f)
24 | print("finish processing the data.")
25 |
--------------------------------------------------------------------------------
/utils/train_tokenizer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | import argparse
5 | import os
6 | import sentencepiece as spm
7 |
8 | if __name__ == '__main__':
9 |
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument('--dataset_path', default='./data/wiki_zh_eval/converted_data.txt', type=str, required=False)
12 | parser.add_argument('--output_dir', default='./output_models/new_tokenizer', type=str, required=False)
13 | parser.add_argument('--vocab_size', default=20000, type=int, required=False)
14 | parser.add_argument('--model_type', default='bpe', type=str, required=False)
15 | parser.add_argument('--user_defined_symbols', default='0,1,2,3,4,5,6,7,8,9,%', type=str, required=False)
16 | parser.add_argument('--max_sentencepiece_length', default=4, type=int, required=False)
17 | args = parser.parse_args()
18 |
19 | dataset_path = args.dataset_path
20 | output_dir = args.output_dir
21 | vocab_size = args.vocab_size
22 | model_type = args.model_type
23 | user_defined_symbols = args.user_defined_symbols
24 | max_sentencepiece_length=args.max_sentencepiece_length
25 |
26 | def mkdir(path):
27 | if not os.path.exists(path):
28 | os.makedirs(path)
29 | mkdir(output_dir)
30 |
31 | spm.SentencePieceTrainer.train(
32 | f'--input={dataset_path}'
33 | f' --model_prefix={output_dir}/example'
34 | f' --model_type={model_type}'
35 | f' --vocab_size={vocab_size}'
36 | f' --user_defined_symbols={user_defined_symbols}'
37 | f' --max_sentencepiece_length={max_sentencepiece_length}'
38 | f' --minloglevel=1'
39 | )
--------------------------------------------------------------------------------