├── verl
├── version
│ └── version
├── utils
│ ├── reward_score
│ │ ├── livecodebench
│ │ │ ├── lcb_runner
│ │ │ │ ├── __init__.py
│ │ │ │ ├── utils
│ │ │ │ │ ├── scenarios.py
│ │ │ │ │ └── path_utils.py
│ │ │ │ ├── prompts
│ │ │ │ │ └── __init__.py
│ │ │ │ ├── evaluation
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── compute_code_execution_metrics.py
│ │ │ │ │ ├── old_results_check.py
│ │ │ │ │ └── pass_k_utils.py
│ │ │ │ ├── benchmarks
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── code_execution.py
│ │ │ │ └── runner
│ │ │ │ │ ├── claude_runner.py
│ │ │ │ │ ├── mistral_runner.py
│ │ │ │ │ ├── cohere_runner.py
│ │ │ │ │ └── claude3_runner.py
│ │ │ └── __init__.py
│ │ ├── deepscaler_math
│ │ │ ├── globals.py
│ │ │ ├── __init__.py
│ │ │ └── utils
│ │ │ │ └── __init__.py
│ │ └── __init__.py
│ ├── checkpoint
│ │ └── __init__.py
│ ├── logger
│ │ ├── __init__.py
│ │ └── aggregate_logger.py
│ ├── megatron
│ │ ├── __init__.py
│ │ ├── memory.py
│ │ ├── sequence_parallel.py
│ │ └── pipeline_parallel.py
│ ├── rendezvous
│ │ └── __init__.py
│ ├── debug
│ │ ├── __init__.py
│ │ └── performance.py
│ ├── __init__.py
│ ├── dataset
│ │ ├── __init__.py
│ │ └── README.md
│ ├── config.py
│ ├── logging_utils.py
│ ├── distributed.py
│ ├── import_utils.py
│ ├── ray_utils.py
│ └── py_functional.py
├── trainer
│ ├── runtime_env.yaml
│ ├── config
│ │ ├── evaluation.yaml
│ │ ├── sft_trainer.yaml
│ │ └── generation.yaml
│ ├── __init__.py
│ └── ppo
│ │ └── __init__.py
├── models
│ ├── __init__.py
│ ├── llama
│ │ ├── __init__.py
│ │ └── megatron
│ │ │ ├── checkpoint_utils
│ │ │ └── __init__.py
│ │ │ ├── layers
│ │ │ ├── __init__.py
│ │ │ └── parallel_rmsnorm.py
│ │ │ └── __init__.py
│ ├── transformers
│ │ └── __init__.py
│ ├── weight_loader_registry.py
│ └── README.md
├── workers
│ ├── __init__.py
│ ├── reward_model
│ │ ├── __init__.py
│ │ ├── megatron
│ │ │ └── __init__.py
│ │ └── base.py
│ ├── rollout
│ │ ├── naive
│ │ │ └── __init__.py
│ │ ├── vllm_rollout
│ │ │ └── __init__.py
│ │ ├── __init__.py
│ │ └── base.py
│ ├── reward_manager
│ │ └── __init__.py
│ ├── actor
│ │ ├── __init__.py
│ │ └── base.py
│ ├── critic
│ │ ├── __init__.py
│ │ └── base.py
│ └── sharding_manager
│ │ ├── base.py
│ │ └── __init__.py
├── third_party
│ ├── __init__.py
│ └── vllm
│ │ ├── vllm_v_0_3_1
│ │ └── __init__.py
│ │ ├── vllm_v_0_4_2
│ │ └── __init__.py
│ │ ├── vllm_v_0_5_4
│ │ ├── __init__.py
│ │ └── hf_weight_loader.py
│ │ ├── vllm_v_0_6_3
│ │ ├── __init__.py
│ │ ├── tokenizer.py
│ │ └── hf_weight_loader.py
│ │ └── __init__.py
├── single_controller
│ ├── base
│ │ ├── megatron
│ │ │ ├── __init__.py
│ │ │ ├── worker.py
│ │ │ └── worker_group.py
│ │ ├── register_center
│ │ │ ├── __init__.py
│ │ │ └── ray.py
│ │ └── __init__.py
│ ├── ray
│ │ └── __init__.py
│ └── __init__.py
└── __init__.py
├── Notice.txt
├── assets
├── 7b_eval.jpg
├── 32b_eval.jpg
├── 32b_perf.jpg
└── skywork-or1-math-7b-multi-stage.png
├── docs
├── _static
│ └── logo.png
├── requirements-docs.txt
├── README.md
├── advance
│ ├── placement.rst
│ └── megatron_extension.rst
├── Makefile
└── faq
│ └── faq.rst
├── or1_data
└── eval
│ ├── aime24.parquet
│ └── aime25.parquet
├── .style.yapf
├── scripts
└── format.sh
├── tests
├── ray
│ ├── detached_worker
│ │ ├── run.sh
│ │ ├── README.md
│ │ └── client.py
│ ├── test_check_worker_alive.py
│ ├── test_rvdz.py
│ ├── test_ray_local_envs.py
│ └── check_worker_alive
│ │ └── main.py
├── e2e
│ ├── arithmetic_sequence
│ │ ├── data
│ │ │ ├── test.parquet
│ │ │ ├── train.parquet
│ │ │ └── create_dataset.py
│ │ ├── model
│ │ │ └── model.safetensors
│ │ └── rl
│ │ │ └── README.md
│ ├── __init__.py
│ ├── run_ray_trainer_rmpad.sh
│ ├── envs
│ │ ├── __init__.py
│ │ └── digit_completion
│ │ │ └── __init__.py
│ ├── run_qwen_gsm8k_function_rm_grpo.sh
│ ├── run_qwen_gsm8k_function_rm_remax.sh
│ ├── run_ray_trainer.sh
│ ├── check_results.py
│ ├── run_deepseek_megatron.sh
│ ├── run_qwen_gsm8k_function_rm.sh
│ ├── run_qwen_gsm8k_function_rm_no_rmpad.sh
│ └── run_qwen_gsm8k_model_rm.sh
├── __init__.py
├── sft
│ ├── run_sft.sh
│ ├── run_sft_sp_loss_match.sh
│ ├── run_sft_qwen05_sp2_liger.sh
│ └── run_sft_qwen05_peft.sh
├── sanity
│ ├── test_import.py
│ └── check_license.py
├── verl
│ └── utils
│ │ └── dataset
│ │ ├── test_rm_dataset.py
│ │ └── test_rl_dataset.py
└── gpu_utility
│ └── test_ops.py
├── requirements.txt
├── .readthedocs.yaml
├── examples
├── generation
│ └── run_deepseek_v2_lite_math.sh
├── sft
│ └── gsm8k
│ │ ├── run_gemma_7b.sh
│ │ ├── run_gemma_2b.sh
│ │ ├── run_deepseek_6b7.sh
│ │ ├── run_qwen_05_sp2.sh
│ │ ├── run_qwen_05_sp2_liger.sh
│ │ └── run_qwen_05_peft.sh
├── split_placement
│ └── run_deepseek7b_llm.sh
├── ppo_trainer
│ ├── run_deepseek_math_gsm8k_megatron.sh
│ ├── run_deepseek_full_hh_rlhf.sh
│ ├── run_gemma.sh
│ ├── run_deepseek7b_llm.sh
│ ├── run_deepseek_megatron.sh
│ ├── run_deepseek7b_llm_sp2.sh
│ ├── run_qwen2-7b.sh
│ └── run_qwen2.5-32b.sh
└── remax_trainer
│ ├── run_qwen2.5-3b_seq_balance.sh
│ └── run_qwen2.5-7b_seq_balance.sh
├── .github
└── workflows
│ ├── dataset.yml
│ ├── sandbox.yml
│ ├── sanity.yml
│ ├── vllm.yml
│ ├── ray_test.yml
│ ├── e2e_digit_completion.yml
│ ├── e2e_lora.yml
│ ├── yapf_format.yml
│ ├── e2e_gsm8k_megatron.yml
│ ├── model.yml
│ └── e2e_sft.yml
├── docker
├── Dockerfile.ngc.vllm
└── Dockerfile.vemlp.vllm.te
├── setup.py
├── .gitignore
└── or1_scripts
└── eval
├── eval_7b.sh
└── eval_32b.sh
/verl/version/version:
--------------------------------------------------------------------------------
1 | 0.1
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Notice.txt:
--------------------------------------------------------------------------------
1 | Copyright 2023-2024 Bytedance Ltd. and/or its affiliates
--------------------------------------------------------------------------------
/assets/7b_eval.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SkyworkAI/Skywork-OR1/HEAD/assets/7b_eval.jpg
--------------------------------------------------------------------------------
/assets/32b_eval.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SkyworkAI/Skywork-OR1/HEAD/assets/32b_eval.jpg
--------------------------------------------------------------------------------
/assets/32b_perf.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SkyworkAI/Skywork-OR1/HEAD/assets/32b_perf.jpg
--------------------------------------------------------------------------------
/docs/_static/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SkyworkAI/Skywork-OR1/HEAD/docs/_static/logo.png
--------------------------------------------------------------------------------
/or1_data/eval/aime24.parquet:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SkyworkAI/Skywork-OR1/HEAD/or1_data/eval/aime24.parquet
--------------------------------------------------------------------------------
/or1_data/eval/aime25.parquet:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SkyworkAI/Skywork-OR1/HEAD/or1_data/eval/aime25.parquet
--------------------------------------------------------------------------------
/.style.yapf:
--------------------------------------------------------------------------------
1 | [style]
2 | based_on_style = google
3 | column_limit = 120
4 | indent_width = 4
5 | split_arguments_when_comma_terminated: true
--------------------------------------------------------------------------------
/scripts/format.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | pip3 install --upgrade yapf
3 | yapf -ir -vv --style ./.style.yapf verl tests single_controller examples
--------------------------------------------------------------------------------
/assets/skywork-or1-math-7b-multi-stage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SkyworkAI/Skywork-OR1/HEAD/assets/skywork-or1-math-7b-multi-stage.png
--------------------------------------------------------------------------------
/tests/ray/detached_worker/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ray start --head --port=6379
3 | python3 server.py
4 | python3 client.py
5 | ray stop --force
--------------------------------------------------------------------------------
/tests/e2e/arithmetic_sequence/data/test.parquet:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SkyworkAI/Skywork-OR1/HEAD/tests/e2e/arithmetic_sequence/data/test.parquet
--------------------------------------------------------------------------------
/tests/e2e/arithmetic_sequence/data/train.parquet:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SkyworkAI/Skywork-OR1/HEAD/tests/e2e/arithmetic_sequence/data/train.parquet
--------------------------------------------------------------------------------
/tests/e2e/arithmetic_sequence/model/model.safetensors:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SkyworkAI/Skywork-OR1/HEAD/tests/e2e/arithmetic_sequence/model/model.safetensors
--------------------------------------------------------------------------------
/verl/trainer/runtime_env.yaml:
--------------------------------------------------------------------------------
1 | working_dir: ./
2 | excludes: ["/.git/"]
3 | env_vars:
4 | TORCH_NCCL_AVOID_RECORD_STREAMS: "1"
5 | VLLM_ATTENTION_BACKEND: "XFORMERS"
--------------------------------------------------------------------------------
/docs/requirements-docs.txt:
--------------------------------------------------------------------------------
1 | # markdown suport
2 | recommonmark
3 | # markdown table suport
4 | sphinx-markdown-tables
5 |
6 | # theme default rtd
7 |
8 | # crate-docs-theme
9 | sphinx-rtd-theme
--------------------------------------------------------------------------------
/verl/trainer/config/evaluation.yaml:
--------------------------------------------------------------------------------
1 | data:
2 | path: /tmp/math_Qwen2-7B-Instruct.parquet
3 | prompt_key: prompt
4 | response_key: responses
5 | data_source_key: data_source
6 | reward_model_key: reward_model
--------------------------------------------------------------------------------
/verl/utils/reward_score/deepscaler_math/globals.py:
--------------------------------------------------------------------------------
1 | """
2 | Global variables for Deepscaler repo.
3 | """
4 |
5 | # Reward function constants
6 | THOUGHT_DELIMITER_START = ""
7 | THOUGHT_DELIMITER_END = ""
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/__init__.py:
--------------------------------------------------------------------------------
1 | from verl.utils.reward_score.livecodebench.unit_test import lcb_compute_score, prepare_unit_test_data
2 |
3 | from verl.utils.reward_score.livecodebench.compute_score import compute_score
4 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/deepscaler_math/__init__.py:
--------------------------------------------------------------------------------
1 | """Import reward-related classes and types from the reward module."""
2 |
3 | from .reward_types import RewardConfig, RewardFn, RewardInput, RewardOutput, RewardType
4 |
5 | __all__ = ['RewardFn', 'RewardInput', 'RewardOutput', 'RewardType']
6 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/utils/scenarios.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 |
3 |
4 | class Scenario(Enum):
5 | codegeneration = "codegeneration"
6 | selfrepair = "selfrepair"
7 | testoutputprediction = "testoutputprediction"
8 | codeexecution = "codeexecution"
9 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | accelerate
2 | codetiming
3 | datasets
4 | dill
5 | flash-attn
6 | hydra-core
7 | numpy
8 | pandas
9 | peft
10 | pyarrow>=15.0.0
11 | pybind11
12 | ray>=2.38
13 | tensordict<0.6
14 | transformers<4.48
15 | vllm==0.6.3
16 | wandb
17 | liger-kernel
18 | pylatexenc
19 | pyext
20 | anthropic
21 | math_verify
--------------------------------------------------------------------------------
/tests/ray/detached_worker/README.md:
--------------------------------------------------------------------------------
1 | # Detached Worker
2 | ## How to run (Only on a single node)
3 | - Start a local ray cluster:
4 | ```bash
5 | ray start --head --port=6379
6 | ```
7 | - Run the server
8 | ```bash
9 | python3 server.py
10 | ```
11 | - On another terminal, Run the client
12 | ```bash
13 | python3 client.py
14 | ```
15 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # Read the Docs configuration file
2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3 |
4 | version: 2
5 |
6 | build:
7 | os: ubuntu-22.04
8 | tools:
9 | python: "3.8"
10 |
11 | sphinx:
12 | configuration: docs/conf.py
13 |
14 | python:
15 | install:
16 | - requirements: docs/requirements-docs.txt
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # verl documents
2 |
3 | ## Build the docs
4 |
5 | ```bash
6 | # Install dependencies.
7 | pip install -r requirements-docs.txt
8 |
9 | # Build the docs.
10 | make clean
11 | make html
12 | ```
13 |
14 | ## Open the docs with your browser
15 |
16 | ```bash
17 | python -m http.server -d _build/html/
18 | ```
19 | Launch your browser and open localhost:8000.
--------------------------------------------------------------------------------
/verl/utils/reward_score/deepscaler_math/utils/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | This module provides utility functions for grading mathematical answers and extracting answers from LaTeX formatted strings.
3 | """
4 |
5 | from verl.utils.reward_score.deepscaler_math.utils.utils import (
6 | extract_answer,
7 | grade_answer_sympy,
8 | grade_answer_mathd,
9 | )
10 |
11 | __all__ = [
12 | "extract_answer",
13 | "grade_answer_sympy",
14 | "grade_answer_mathd"
15 | ]
16 |
--------------------------------------------------------------------------------
/docs/advance/placement.rst:
--------------------------------------------------------------------------------
1 | Ray API Design Tutorial
2 | =======================================
3 |
4 | We provide a tutorial for our Ray API design, including:
5 |
6 | - Ray basic concepts
7 | - Resource Pool and RayWorkerGroup
8 | - Data Dispatch, Execution and Collection
9 | - Initialize the RayWorkerGroup and execute the distributed computation in the given Resource Pool
10 |
11 | See details in `tutorial.ipynb `_.
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/prompts/__init__.py:
--------------------------------------------------------------------------------
1 | from verl.utils.reward_score.livecodebench.lcb_runner.prompts.code_execution import format_prompt_execution, format_prompt_execution_cot
2 | from verl.utils.reward_score.livecodebench.lcb_runner.prompts.code_generation import format_prompt_generation
3 | from verl.utils.reward_score.livecodebench.lcb_runner.prompts.test_output_prediction import format_prompt_test_output
4 | from verl.utils.reward_score.livecodebench.lcb_runner.prompts.self_repair import format_prompt_self_repair
5 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | from verl.utils.reward_score.livecodebench.lcb_runner.evaluation.compute_code_generation_metrics import codegen_metrics
2 | from verl.utils.reward_score.livecodebench.lcb_runner.evaluation.compute_code_execution_metrics import code_execution_metrics
3 | from verl.utils.reward_score.livecodebench.lcb_runner.evaluation.compute_test_output_prediction_metrics import (
4 | test_output_metrics,
5 | )
6 | from verl.utils.reward_score.livecodebench.lcb_runner.evaluation.pass_k_utils import extract_instance_results
7 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/benchmarks/__init__.py:
--------------------------------------------------------------------------------
1 | from verl.utils.reward_score.livecodebench.lcb_runner.benchmarks.code_generation import (
2 | CodeGenerationProblem,
3 | load_code_generation_dataset,
4 | load_code_generation_dataset_not_fast,
5 | )
6 | from verl.utils.reward_score.livecodebench.lcb_runner.benchmarks.test_output_prediction import (
7 | TestOutputPredictionProblem,
8 | load_test_prediction_dataset,
9 | )
10 | from verl.utils.reward_score.livecodebench.lcb_runner.benchmarks.code_execution import (
11 | CodeExecutionProblem,
12 | load_code_execution_dataset,
13 | )
14 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
--------------------------------------------------------------------------------
/tests/e2e/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/models/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/trainer/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/utils/checkpoint/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
--------------------------------------------------------------------------------
/verl/workers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/tests/e2e/run_ray_trainer_rmpad.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e -x
4 |
5 | python3 tests/e2e/arithmetic_sequence/rl/main_trainer.py \
6 | data.train_files=tests/e2e/arithmetic_sequence/data/train.parquet \
7 | data.val_files=tests/e2e/arithmetic_sequence/data/test.parquet \
8 | actor_rollout_ref.model.path=tests/e2e/arithmetic_sequence/model \
9 | actor_rollout_ref.rollout.name=vllm \
10 | actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
11 | actor_rollout_ref.model.tokenizer_path=tests/e2e/arithmetic_sequence/model \
12 | critic.model.path=Qwen/Qwen2.5-0.5B \
13 | critic.model.use_remove_padding=True \
14 | trainer.total_epochs=1
--------------------------------------------------------------------------------
/verl/models/llama/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/third_party/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/trainer/ppo/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/utils/logger/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/utils/megatron/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/models/transformers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/utils/rendezvous/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/examples/generation/run_deepseek_v2_lite_math.sh:
--------------------------------------------------------------------------------
1 | python3 -m verl.trainer.main_generation \
2 | trainer.nnodes=1 \
3 | trainer.n_gpus_per_node=8 \
4 | data.path=~/data/rlhf/gsm8k/test.parquet \
5 | data.prompt_key=prompt \
6 | data.n_samples=1 \
7 | data.output_path=~/data/rlhf/math/deepseek_v2_lite_gen_test.parquet \
8 | model.path=deepseek-ai/deepseek-llm-7b-chat \
9 | +model.trust_remote_code=True \
10 | rollout.temperature=1.0 \
11 | rollout.top_k=50 \
12 | rollout.top_p=0.7 \
13 | rollout.prompt_length=2048 \
14 | rollout.response_length=1024 \
15 | rollout.tensor_model_parallel_size=2 \
16 | rollout.gpu_memory_utilization=0.8
17 |
--------------------------------------------------------------------------------
/verl/third_party/vllm/vllm_v_0_3_1/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/third_party/vllm/vllm_v_0_4_2/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/third_party/vllm/vllm_v_0_5_4/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/third_party/vllm/vllm_v_0_6_3/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = verl
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/verl/single_controller/base/megatron/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/models/llama/megatron/checkpoint_utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/single_controller/base/register_center/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/verl/utils/debug/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .performance import log_gpu_memory_usage
--------------------------------------------------------------------------------
/verl/workers/reward_model/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .base import BasePPORewardModel
16 |
--------------------------------------------------------------------------------
/verl/workers/rollout/naive/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .naive_rollout import NaiveRollout
16 |
--------------------------------------------------------------------------------
/verl/workers/rollout/vllm_rollout/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .vllm_rollout import vLLMRollout
--------------------------------------------------------------------------------
/verl/workers/reward_model/megatron/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .reward_model import MegatronRewardModel
16 |
--------------------------------------------------------------------------------
/tests/e2e/envs/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .digit_completion import DigitCompletion
16 |
17 | __all__ = ['DigitCompletion']
--------------------------------------------------------------------------------
/verl/utils/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from . import tokenizer
16 | from .tokenizer import *
17 |
18 | __all__ = tokenizer.__all__
--------------------------------------------------------------------------------
/verl/single_controller/base/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .worker import Worker
16 | from .worker_group import WorkerGroup, ClassWithInitArgs, ResourcePool
17 |
--------------------------------------------------------------------------------
/verl/utils/dataset/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .rl_dataset import RLHFDataset
16 | from .rm_dataset import RMDataset
17 | from .sft_dataset import SFTDataset
18 |
--------------------------------------------------------------------------------
/verl/workers/reward_manager/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 PRIME team and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .naive import NaiveRewardManager
16 | from .prime import PrimeRewardManager
17 | from .yr_code import YRRewardManager
--------------------------------------------------------------------------------
/verl/workers/actor/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .base import BasePPOActor
16 | from .dp_actor import DataParallelPPOActor
17 |
18 | __all__ = ["BasePPOActor", "DataParallelPPOActor"]
19 |
--------------------------------------------------------------------------------
/verl/workers/critic/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .base import BasePPOCritic
16 | from .dp_critic import DataParallelPPOCritic
17 |
18 | __all__ = ["BasePPOCritic", "DataParallelPPOCritic"]
19 |
--------------------------------------------------------------------------------
/tests/sft/run_sft.sh:
--------------------------------------------------------------------------------
1 | # Tested with 2 & 4 GPUs
2 |
3 | set -x
4 |
5 | torchrun --standalone --nnodes=1 --nproc_per_node=8 \
6 | -m verl.trainer.fsdp_sft_trainer \
7 | data.train_files=$HOME/data/gsm8k/train.parquet \
8 | data.val_files=$HOME/data/gsm8k/test.parquet \
9 | data.prompt_key=extra_info \
10 | data.response_key=extra_info \
11 | +data.prompt_dict_keys=['question'] \
12 | +data.response_dict_keys=['answer'] \
13 | data.micro_batch_size_per_gpu=32 \
14 | model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \
15 | trainer.default_local_dir=$HOME/ckpts/ \
16 | trainer.project_name=qwen2.5-sft \
17 | trainer.experiment_name=gsm8k-sft-gemma-2b-it \
18 | trainer.total_training_steps=1 \
19 | trainer.logger=['console'] \
20 | trainer.default_hdfs_dir=null $@
21 |
22 | rm -rf $HOME/ckpts/
--------------------------------------------------------------------------------
/verl/workers/rollout/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .base import BaseRollout
16 | from .naive import NaiveRollout
17 | from .hf_rollout import HFRollout
18 |
19 | __all__ = ["BaseRollout", "NaiveRollout", "HFRollout"]
20 |
--------------------------------------------------------------------------------
/verl/single_controller/ray/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .base import RayResourcePool, RayClassWithInitArgs, RayWorkerGroup, create_colocated_worker_cls
16 | from .megatron import (MegatronRayWorkerGroup, DistRankInfo, DistGlobalInfo)
--------------------------------------------------------------------------------
/verl/utils/dataset/README.md:
--------------------------------------------------------------------------------
1 | # Dataset Format
2 | ## RLHF dataset
3 | We combine all the data sources into a single parquet files. We directly organize the prompt into the chat format so that multi-turn chats can be easily incorporated. In the prompt, we may add instruction following texts to guide the model output the answers in a particular format so that we can extract the answers.
4 |
5 | Math problems
6 | ```json
7 | {
8 | "data_source": "openai/gsm8k",
9 | "prompt": [{"role": "user", "content": "Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May? Let's think step by step and output the final answer after \"####\""}],
10 | "ability": "math",
11 | "reward_model": {
12 | "style": "rule",
13 | "ground_truth": ["72"]
14 | },
15 | }
16 | ```
17 |
--------------------------------------------------------------------------------
/tests/sanity/test_import.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | def test_import():
17 | import verl
18 | print(verl.__version__)
19 |
20 |
21 | def test_single_controller_import():
22 | import verl.single_controller
23 | print(verl.single_controller.__version__)
24 |
--------------------------------------------------------------------------------
/verl/single_controller/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import os
16 |
17 | version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__)))
18 |
19 | with open(os.path.join(os.path.join(version_folder, os.pardir), 'version/version')) as f:
20 | __version__ = f.read().strip()
21 |
--------------------------------------------------------------------------------
/tests/sft/run_sft_sp_loss_match.sh:
--------------------------------------------------------------------------------
1 | # Tested with 2 & 4 GPUs
2 |
3 | set -x
4 |
5 | torchrun --standalone --nnodes=1 --nproc_per_node=8 \
6 | tests/sft/test_sp_loss_match.py \
7 | data.train_files=$HOME/data/gsm8k/train.parquet \
8 | data.val_files=$HOME/data/gsm8k/test.parquet \
9 | data.prompt_key=extra_info \
10 | data.response_key=extra_info \
11 | +data.prompt_dict_keys=['question'] \
12 | +data.response_dict_keys=['answer'] \
13 | data.micro_batch_size=32 \
14 | model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \
15 | ulysses_sequence_parallel_size=2 \
16 | use_remove_padding=True \
17 | trainer.default_local_dir=$HOME/ckpts/ \
18 | trainer.project_name=qwen2.5-sft \
19 | trainer.experiment_name=gsm8k-sft-gemma-2b-it \
20 | trainer.total_training_steps=1 \
21 | trainer.logger=['console'] \
22 | trainer.default_hdfs_dir=null $@
23 |
24 | rm -rf $HOME/ckpts/
25 |
--------------------------------------------------------------------------------
/verl/models/llama/megatron/layers/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .parallel_attention import ParallelLlamaAttention
16 | from .parallel_decoder import ParallelLlamaDecoderLayer, ParallelLlamaDecoderLayerRmPad
17 | from .parallel_mlp import ParallelLlamaMLP
18 | from .parallel_rmsnorm import ParallelLlamaRMSNorm
19 |
--------------------------------------------------------------------------------
/verl/utils/config.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from typing import Dict
16 |
17 | from omegaconf import DictConfig
18 |
19 |
20 | def update_dict_with_config(dictionary: Dict, config: DictConfig):
21 | for key in dictionary:
22 | if hasattr(config, key):
23 | dictionary[key] = getattr(config, key)
24 |
--------------------------------------------------------------------------------
/verl/utils/logging_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import logging
16 |
17 |
18 | def set_basic_config(level):
19 | """
20 | This function sets the global logging format and level. It will be called when import verl
21 | """
22 | logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s', level=level)
23 |
--------------------------------------------------------------------------------
/examples/sft/gsm8k/run_gemma_7b.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | if [ "$#" -lt 2 ]; then
4 | echo "Usage: run_gemma_7b.sh [other_configs...]"
5 | exit 1
6 | fi
7 |
8 | nproc_per_node=$1
9 | save_path=$2
10 |
11 | # Shift the arguments so $@ refers to the rest
12 | shift 2
13 |
14 | torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \
15 | -m verl.trainer.fsdp_sft_trainer \
16 | data.train_files=$HOME/data/gsm8k/train.parquet \
17 | data.val_files=$HOME/data/gsm8k/test.parquet \
18 | data.prompt_key=prompt \
19 | data.response_key=answer \
20 | data.micro_batch_size_per_gpu=4 \
21 | model.partial_pretrain=google/gemma-1.1-7b-it \
22 | trainer.default_local_dir=$save_path \
23 | trainer.project_name=gsm8k-sft \
24 | trainer.experiment_name=gsm8k-sft-gemma-1.1-7b-it \
25 | trainer.total_epochs=4 \
26 | trainer.logger=['console','wandb'] \
27 | trainer.default_hdfs_dir=null $@
--------------------------------------------------------------------------------
/tests/e2e/envs/digit_completion/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .task import DigitCompletion, generate_ground_truth_response
16 | from .tokenizer import CharTokenizer
17 |
18 | from transformers import AutoTokenizer, LlamaConfig
19 |
20 | AutoTokenizer.register(LlamaConfig, CharTokenizer, exist_ok=True)
21 |
22 | __all__ = ['DigitCompletion', 'generate_ground_truth_response', 'CharTokenizer']
--------------------------------------------------------------------------------
/verl/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import os
16 |
17 | version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__)))
18 |
19 | with open(os.path.join(version_folder, 'version/version')) as f:
20 | __version__ = f.read().strip()
21 |
22 | from .protocol import DataProto
23 |
24 | from .utils.logging_utils import set_basic_config
25 | import logging
26 |
27 | set_basic_config(level=logging.WARNING)
28 |
--------------------------------------------------------------------------------
/docs/faq/faq.rst:
--------------------------------------------------------------------------------
1 | Frequently Asked Questions
2 | ====================================
3 |
4 | Ray related
5 | ------------
6 |
7 | How to add breakpoint for debugging with distributed Ray?
8 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 |
10 | Please checkout the official debugging guide from Ray: https://docs.ray.io/en/latest/ray-observability/ray-distributed-debugger.html
11 |
12 |
13 | Distributed training
14 | ------------------------
15 |
16 | How to run multi-node post-training with Ray?
17 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
18 |
19 | You can start a ray cluster and submit a ray job, following the official guide from Ray: https://docs.ray.io/en/latest/ray-core/starting-ray.html
20 |
21 | If your cluster is managed by Slurm, please refer to the guide for deploying Ray on Slurm: https://docs.ray.io/en/latest/cluster/vms/user-guides/community/slurm.html
22 |
--------------------------------------------------------------------------------
/verl/models/llama/megatron/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from .modeling_llama_megatron import (
16 | # original model with megatron
17 | ParallelLlamaModel,
18 | ParallelLlamaForCausalLM,
19 | # rmpad with megatron
20 | ParallelLlamaForCausalLMRmPad,
21 | ParallelLlamaForValueRmPad,
22 | # rmpad with megatron and pipeline parallelism
23 | ParallelLlamaForCausalLMRmPadPP,
24 | ParallelLlamaForValueRmPadPP)
25 |
--------------------------------------------------------------------------------
/examples/sft/gsm8k/run_gemma_2b.sh:
--------------------------------------------------------------------------------
1 | # Tested with 2 & 4 GPUs
2 |
3 | set -x
4 |
5 | if [ "$#" -lt 2 ]; then
6 | echo "Usage: run_gemma_2b.sh [other_configs...]"
7 | exit 1
8 | fi
9 |
10 | nproc_per_node=$1
11 | save_path=$2
12 |
13 | # Shift the arguments so $@ refers to the rest
14 | shift 2
15 |
16 | torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \
17 | -m verl.trainer.fsdp_sft_trainer \
18 | data.train_files=$HOME/data/gsm8k/train.parquet \
19 | data.val_files=$HOME/data/gsm8k/test.parquet \
20 | data.prompt_key=extra_info \
21 | data.response_key=extra_info \
22 | +data.prompt_dict_keys=['question'] \
23 | +data.response_dict_keys=['answer'] \
24 | data.micro_batch_size_per_gpu=4 \
25 | model.partial_pretrain=google/gemma-2b-it \
26 | trainer.default_local_dir=$save_path \
27 | trainer.project_name=gsm8k-sft \
28 | trainer.experiment_name=gsm8k-sft-gemma-2b-it \
29 | trainer.total_epochs=2 \
30 | trainer.logger=['console','wandb'] \
31 | trainer.default_hdfs_dir=null $@
--------------------------------------------------------------------------------
/examples/sft/gsm8k/run_deepseek_6b7.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | if [ "$#" -lt 2 ]; then
4 | echo "Usage: run_deepseek_6b7.sh [other_configs...]"
5 | exit 1
6 | fi
7 |
8 | nproc_per_node=$1
9 | save_path=$2
10 |
11 | # Shift the arguments so $@ refers to the rest
12 | shift 2
13 |
14 | torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \
15 | -m verl.trainer.fsdp_sft_trainer \
16 | data.train_files=$HOME/data/gsm8k/train.parquet \
17 | data.val_files=$HOME/data/gsm8k/test.parquet \
18 | data.prompt_key=extra_info \
19 | data.response_key=extra_info \
20 | +data.prompt_dict_keys=['question'] \
21 | +data.response_dict_keys=['answer'] \
22 | data.micro_batch_size_per_gpu=4 \
23 | model.partial_pretrain=deepseek-ai/deepseek-coder-6.7b-instruct \
24 | trainer.default_local_dir=$save_path \
25 | trainer.project_name=gsm8k-sft \
26 | trainer.experiment_name=gsm8k-sft-deepseek-coder-6.7b-instruct \
27 | trainer.total_epochs=4 \
28 | trainer.logger=['console','wandb'] \
29 | trainer.default_hdfs_dir=null $@
--------------------------------------------------------------------------------
/verl/single_controller/base/register_center/ray.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import ray
16 |
17 |
18 | @ray.remote
19 | class WorkerGroupRegisterCenter:
20 |
21 | def __init__(self, rank_zero_info):
22 | self.rank_zero_info = rank_zero_info
23 |
24 | def get_rank_zero_info(self):
25 | return self.rank_zero_info
26 |
27 |
28 | def create_worker_group_register_center(name, info):
29 | return WorkerGroupRegisterCenter.options(name=name).remote(info)
30 |
--------------------------------------------------------------------------------
/.github/workflows/dataset.yml:
--------------------------------------------------------------------------------
1 | name: dataset
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/dataset.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/dataset.yml
18 |
19 |
20 |
21 | jobs:
22 | ray:
23 | runs-on: [self-hosted, gpu]
24 | steps:
25 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
26 | with:
27 | fetch-depth: 0
28 | - name: Install the current repository
29 | run: |
30 | pip install -e .[test] --user
31 | - name: Running dataset tests
32 | run: |
33 | [ ! -d "$HOME/verl-data" ] && git clone --depth 1 https://github.com/eric-haibin-lin/verl-data ~/verl-data
34 | pytest -s -x tests/verl
35 | - name: Running ray test using cupy (move it to L20 when dockerfile ready)
36 | run: |
37 | cd tests/ray
38 | pytest -s -x test_rvdz.py
--------------------------------------------------------------------------------
/verl/workers/sharding_manager/base.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | Sharding manager to implement HybridEngine
16 | """
17 |
18 | from verl import DataProto
19 |
20 |
21 | class BaseShardingManager:
22 |
23 | def __enter__(self):
24 | pass
25 |
26 | def __exit__(self, exc_type, exc_value, traceback):
27 | pass
28 |
29 | def preprocess_data(self, data: DataProto) -> DataProto:
30 | return data
31 |
32 | def postprocess_data(self, data: DataProto) -> DataProto:
33 | return data
34 |
--------------------------------------------------------------------------------
/examples/sft/gsm8k/run_qwen_05_sp2.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | if [ "$#" -lt 2 ]; then
4 | echo "Usage: run_qwen_05_sp2.sh [other_configs...]"
5 | exit 1
6 | fi
7 |
8 | nproc_per_node=$1
9 | save_path=$2
10 |
11 | # Shift the arguments so $@ refers to the rest
12 | shift 2
13 |
14 | torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \
15 | -m verl.trainer.fsdp_sft_trainer \
16 | data.train_files=$HOME/data/gsm8k/train.parquet \
17 | data.val_files=$HOME/data/gsm8k/test.parquet \
18 | data.prompt_key=extra_info \
19 | data.response_key=extra_info \
20 | optim.lr=1e-4 \
21 | +data.prompt_dict_keys=['question'] \
22 | +data.response_dict_keys=['answer'] \
23 | data.micro_batch_size=4 \
24 | model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \
25 | trainer.default_local_dir=$save_path \
26 | trainer.project_name=gsm8k-sft \
27 | trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct-sp2 \
28 | trainer.logger=['console'] \
29 | trainer.total_training_steps=1 \
30 | trainer.default_hdfs_dir=null $@ \
31 | ulysses_sequence_parallel_size=2 \
32 | use_remove_padding=true
33 |
--------------------------------------------------------------------------------
/examples/sft/gsm8k/run_qwen_05_sp2_liger.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | if [ "$#" -lt 2 ]; then
4 | echo "Usage: run_qwen_05_sp2.sh [other_configs...]"
5 | exit 1
6 | fi
7 |
8 | nproc_per_node=$1
9 | save_path=$2
10 |
11 | # Shift the arguments so $@ refers to the rest
12 | shift 2
13 |
14 | torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \
15 | -m verl.trainer.fsdp_sft_trainer \
16 | data.train_files=$HOME/data/gsm8k/train.parquet \
17 | data.val_files=$HOME/data/gsm8k/test.parquet \
18 | data.prompt_key=extra_info \
19 | data.response_key=extra_info \
20 | optim.lr=1e-4 \
21 | +data.prompt_dict_keys=['question'] \
22 | +data.response_dict_keys=['answer'] \
23 | data.micro_batch_size=4 \
24 | model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \
25 | model.use_liger=True \
26 | trainer.default_local_dir=$save_path \
27 | trainer.project_name=gsm8k-sft \
28 | trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct-sp2-liger \
29 | trainer.logger=['console'] \
30 | trainer.default_hdfs_dir=null $@ \
31 | ulysses_sequence_parallel_size=2 \
32 | use_remove_padding=true
33 |
--------------------------------------------------------------------------------
/tests/sft/run_sft_qwen05_sp2_liger.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | if [ "$#" -lt 2 ]; then
4 | echo "Usage: run_sft_qwen05_sp2_liger.sh [other_configs...]"
5 | exit 1
6 | fi
7 |
8 | nproc_per_node=$1
9 | save_path=$2
10 |
11 | # Shift the arguments so $@ refers to the rest
12 | shift 2
13 |
14 | torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \
15 | -m verl.trainer.fsdp_sft_trainer \
16 | data.train_files=$HOME/data/gsm8k/train.parquet \
17 | data.val_files=$HOME/data/gsm8k/test.parquet \
18 | data.prompt_key=extra_info \
19 | data.response_key=extra_info \
20 | optim.lr=1e-4 \
21 | +data.prompt_dict_keys=['question'] \
22 | +data.response_dict_keys=['answer'] \
23 | data.micro_batch_size=4 \
24 | model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \
25 | model.use_liger=True \
26 | trainer.default_local_dir=$save_path \
27 | trainer.project_name=gsm8k-sft \
28 | trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct-sp2-liger \
29 | trainer.logger=['console'] \
30 | trainer.total_training_steps=1 \
31 | trainer.default_hdfs_dir=null $@ \
32 | ulysses_sequence_parallel_size=2 \
33 | use_remove_padding=true
--------------------------------------------------------------------------------
/verl/models/weight_loader_registry.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | def get_weight_loader(arch: str):
17 | from verl.models.llama.megatron.checkpoint_utils.llama_loader import load_state_dict_to_megatron_llama
18 | _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY = {'LlamaForCausalLM': load_state_dict_to_megatron_llama}
19 |
20 | if arch in _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY:
21 | return _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY[arch]
22 | raise ValueError(f"Model architectures {arch} are not supported for now. "
23 | f"Supported architectures: {_MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY.keys()}")
24 |
--------------------------------------------------------------------------------
/verl/utils/distributed.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Utilities for distributed training."""
15 | import os
16 |
17 |
18 | def initialize_global_process_group(timeout_second=36000):
19 | import torch.distributed
20 | from datetime import timedelta
21 | torch.distributed.init_process_group('nccl', timeout=timedelta(seconds=timeout_second))
22 | local_rank = int(os.environ["LOCAL_RANK"])
23 | rank = int(os.environ["RANK"])
24 | world_size = int(os.environ["WORLD_SIZE"])
25 |
26 | if torch.distributed.is_initialized():
27 | torch.cuda.set_device(local_rank)
28 | return local_rank, rank, world_size
29 |
--------------------------------------------------------------------------------
/verl/workers/critic/base.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | Base class for a critic
16 | """
17 | from abc import ABC, abstractmethod
18 |
19 | import torch
20 |
21 | from verl import DataProto
22 |
23 | __all__ = ['BasePPOCritic']
24 |
25 |
26 | class BasePPOCritic(ABC):
27 |
28 | def __init__(self, config):
29 | super().__init__()
30 | self.config = config
31 |
32 | @abstractmethod
33 | def compute_values(self, data: DataProto) -> torch.Tensor:
34 | """Compute values"""
35 | pass
36 |
37 | @abstractmethod
38 | def update_critic(self, data: DataProto):
39 | """Update the critic"""
40 | pass
41 |
--------------------------------------------------------------------------------
/.github/workflows/sandbox.yml:
--------------------------------------------------------------------------------
1 | name: sandbox
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/sandbox.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/sandbox.yml
18 |
19 | jobs:
20 | sandbox:
21 | runs-on: [self-hosted, l20-0]
22 | env:
23 | HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
24 | HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
25 | NO_PROXY: "localhost,127.0.0.1"
26 | HF_HUB_ENABLE_HF_TRANSFER: 1
27 | container:
28 | image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
29 | options: --gpus all --shm-size=10g
30 | steps:
31 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
32 | with:
33 | fetch-depth: 0
34 | - name: Install the current repository
35 | run: |
36 | pip3 install hf_transfer
37 | pip3 install -e .[test]
38 | pip3 install vllm==0.5.4
39 | - name: Running sandbox tests on 8 L20 GPUs
40 | run: |
41 | cd tests/sandbox
42 | pytest -s -x .
43 |
--------------------------------------------------------------------------------
/.github/workflows/sanity.yml:
--------------------------------------------------------------------------------
1 | name: sanity
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/sanity.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/sanity.yml
18 |
19 | jobs:
20 | sanity:
21 | runs-on: ubuntu-latest
22 | strategy:
23 | matrix:
24 | python-version: ["3.10"]
25 | steps:
26 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
27 | - name: Set up Python ${{ matrix.python-version }}
28 | uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
29 | with:
30 | python-version: ${{ matrix.python-version }}
31 | - name: Install the current repository
32 | run: |
33 | pip install -e .[test]
34 | - name: Run sanity test
35 | run: |
36 | pytest -s -x tests/sanity
37 | - name: Run untility test
38 | run: |
39 | pytest -s -x tests/utility
40 | - name: Run license test
41 | run: |
42 | python3 tests/sanity/check_license.py --directory .
43 |
--------------------------------------------------------------------------------
/examples/sft/gsm8k/run_qwen_05_peft.sh:
--------------------------------------------------------------------------------
1 | # Tested with 2 & 4 GPUs
2 |
3 | set -x
4 |
5 | if [ "$#" -lt 2 ]; then
6 | echo "Usage: run_qwen_05_peft.sh [other_configs...]"
7 | exit 1
8 | fi
9 |
10 | nproc_per_node=$1
11 | save_path=$2
12 |
13 | # Shift the arguments so $@ refers to the rest
14 | shift 2
15 |
16 | torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \
17 | -m verl.trainer.fsdp_sft_trainer \
18 | data.train_files=$HOME/data/gsm8k/train.parquet \
19 | data.val_files=$HOME/data/gsm8k/test.parquet \
20 | data.prompt_key=extra_info \
21 | data.response_key=extra_info \
22 | optim.lr=1e-4 \
23 | +data.prompt_dict_keys=['question'] \
24 | +data.response_dict_keys=['answer'] \
25 | data.micro_batch_size_per_gpu=4 \
26 | model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \
27 | trainer.default_local_dir=$save_path \
28 | trainer.project_name=gsm8k-sft \
29 | trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct \
30 | trainer.logger=['console'] \
31 | trainer.total_epochs=1 \
32 | trainer.default_hdfs_dir=null $@ \
33 | model.lora_rank=32\
34 | model.lora_alpha=16 \
35 | model.target_modules=all-linear
36 |
37 | # Or you can do this:
38 | # model.target_modules=[q_proj,v_proj] \
39 |
--------------------------------------------------------------------------------
/verl/workers/rollout/base.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from abc import ABC, abstractmethod
16 | from typing import Iterable, Union
17 |
18 | from verl import DataProto
19 |
20 | __all__ = ['BaseRollout']
21 |
22 |
23 | class BaseRollout(ABC):
24 |
25 | def __init__(self):
26 | """
27 |
28 | Args:
29 | dataloader: an Iterable of TensorDict that consistently generates prompts. Note that the dataloader
30 | should handle when the training stops.
31 | """
32 | super().__init__()
33 |
34 | @abstractmethod
35 | def generate_sequences(self, prompts: DataProto) -> DataProto:
36 | """Generate sequences"""
37 | pass
38 |
--------------------------------------------------------------------------------
/tests/sft/run_sft_qwen05_peft.sh:
--------------------------------------------------------------------------------
1 | # Tested with 2 & 4 GPUs
2 |
3 | set -x
4 |
5 | if [ "$#" -lt 2 ]; then
6 | echo "Usage: run_sft_qwen05_peft.sh [other_configs...]"
7 | exit 1
8 | fi
9 |
10 | nproc_per_node=$1
11 | save_path=$2
12 |
13 | # Shift the arguments so $@ refers to the rest
14 | shift 2
15 |
16 | torchrun --standalone --nnodes=1 --nproc_per_node=$nproc_per_node \
17 | -m verl.trainer.fsdp_sft_trainer \
18 | data.train_files=$HOME/data/gsm8k/train.parquet \
19 | data.val_files=$HOME/data/gsm8k/test.parquet \
20 | data.prompt_key=extra_info \
21 | data.response_key=extra_info \
22 | optim.lr=1e-4 \
23 | +data.prompt_dict_keys=['question'] \
24 | +data.response_dict_keys=['answer'] \
25 | data.micro_batch_size_per_gpu=4 \
26 | model.partial_pretrain=Qwen/Qwen2.5-0.5B-Instruct \
27 | trainer.default_local_dir=$save_path \
28 | trainer.project_name=gsm8k-sft \
29 | trainer.experiment_name=gsm8k-sft-qwen-2.5-0.5b-instruct \
30 | trainer.logger=['console'] \
31 | trainer.total_training_steps=1 \
32 | trainer.default_hdfs_dir=null $@ \
33 | model.lora_rank=32\
34 | model.lora_alpha=16 \
35 | model.target_modules=all-linear
36 |
37 | # Or you can do this:
38 | # model.target_modules=[q_proj,v_proj] \
39 |
--------------------------------------------------------------------------------
/verl/workers/sharding_manager/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from verl.utils.import_utils import is_vllm_available, is_megatron_core_available
16 |
17 | from .base import BaseShardingManager
18 | from .fsdp_ulysses import FSDPUlyssesShardingManager
19 |
20 | AllGatherPPModel = None
21 |
22 | if is_megatron_core_available() and is_vllm_available():
23 | from .megatron_vllm import AllGatherPPModel, MegatronVLLMShardingManager
24 | elif AllGatherPPModel is not None:
25 | pass
26 | else:
27 | AllGatherPPModel = None
28 | MegatronVLLMShardingManager = None
29 |
30 | if is_vllm_available():
31 | from .fsdp_vllm import FSDPVLLMShardingManager
32 | else:
33 | FSDPVLLMShardingManager = None
34 |
--------------------------------------------------------------------------------
/.github/workflows/vllm.yml:
--------------------------------------------------------------------------------
1 | name: vllm
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/vllm.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/vllm.yml
18 |
19 | jobs:
20 | vllm:
21 | runs-on: [self-hosted, l20-0]
22 | env:
23 | HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
24 | HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
25 | NO_PROXY: "localhost,127.0.0.1"
26 | HF_HUB_ENABLE_HF_TRANSFER: 1
27 | container:
28 | image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
29 | options: --gpus all --shm-size=10g
30 | steps:
31 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
32 | with:
33 | fetch-depth: 0
34 | - name: Install the current repository
35 | run: |
36 | pip3 install hf_transfer
37 | pip3 install -e .[test]
38 | pip3 install vllm==0.5.4
39 | - name: Running vllm tests on 8 L20 GPUs
40 | run: |
41 | cd tests/rollout
42 | torchrun --standalone --nnodes=1 --nproc_per_node=8 $(which pytest) -s test_vllm_hf_loader.py
43 |
--------------------------------------------------------------------------------
/.github/workflows/ray_test.yml:
--------------------------------------------------------------------------------
1 | name: ray
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/ray_test.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/ray_test.yml
18 |
19 |
20 |
21 | jobs:
22 | ray:
23 | runs-on: [self-hosted, l20-0]
24 | env:
25 | HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
26 | HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
27 | NO_PROXY: "localhost,127.0.0.1"
28 | HF_HUB_ENABLE_HF_TRANSFER: 1
29 | container:
30 | image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
31 | options: --gpus all --shm-size=10g
32 | steps:
33 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
34 | with:
35 | fetch-depth: 0
36 | - name: Install the current repository
37 | run: |
38 | pip install hf_transfer
39 | pip install -e .[test]
40 | pip install --upgrade "ray>=2.40.0"
41 | - name: Running ray tests that need 8 GPUs
42 | run: |
43 | cd tests/ray
44 | pytest -s -x --ignore=test_check_worker_alive.py --ignore=test_rvdz.py .
45 |
--------------------------------------------------------------------------------
/verl/utils/debug/performance.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import torch
16 | import torch.distributed as dist
17 | import logging
18 |
19 |
20 | def log_gpu_memory_usage(head: str, logger: logging.Logger = None, level=logging.DEBUG, rank: int = 0):
21 | if (not dist.is_initialized()) or (rank is None) or (dist.get_rank() == rank):
22 | memory_allocated = torch.cuda.memory_allocated() / 1024**3
23 | memory_reserved = torch.cuda.memory_reserved() / 1024**3
24 |
25 | message = f'{head}, memory allocated (GB): {memory_allocated}, memory reserved (GB): {memory_reserved}'
26 |
27 | if logger is None:
28 | print(message)
29 | else:
30 | logger.log(msg=message, level=level)
31 |
--------------------------------------------------------------------------------
/.github/workflows/e2e_digit_completion.yml:
--------------------------------------------------------------------------------
1 | name: e2e_digit_completion
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/e2e_digit_completion.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/e2e_digit_completion.yml
18 | - "tests/e2e/*.sh"
19 |
20 |
21 |
22 | jobs:
23 | e2e_digit_completion:
24 | runs-on: [self-hosted, l20-0]
25 | env:
26 | HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
27 | HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
28 | NO_PROXY: "localhost,127.0.0.1"
29 | HF_HUB_ENABLE_HF_TRANSFER: 1
30 | container:
31 | image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
32 | options: --gpus all --shm-size=10g
33 | steps:
34 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
35 | with:
36 | fetch-depth: 0
37 | - name: Install the current repository
38 | run: |
39 | pip3 install hf_transfer
40 | pip3 install -e .[test]
41 | - name: Running digit completon e2e training tests on 8 L20 GPUs
42 | run: |
43 | ray stop --force
44 | bash tests/e2e/run_ray_trainer.sh
45 |
--------------------------------------------------------------------------------
/.github/workflows/e2e_lora.yml:
--------------------------------------------------------------------------------
1 | name: e2e_lora
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/e2e_lora.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/e2e_lora.yml
18 | - "tests/e2e/*.sh"
19 |
20 |
21 |
22 | jobs:
23 | e2e_lora:
24 | runs-on: [self-hosted, l20-1]
25 | env:
26 | HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
27 | HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
28 | NO_PROXY: "localhost,127.0.0.1"
29 | HF_HUB_ENABLE_HF_TRANSFER: 1
30 | container:
31 | image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
32 | options: --gpus all --shm-size=10g
33 | steps:
34 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
35 | with:
36 | fetch-depth: 0
37 | - name: Install the current repository
38 | run: |
39 | pip3 install hf_transfer peft
40 | pip3 install -e .[test]
41 | - name: Prepare gsm8k dataset
42 | run: |
43 | ray stop --force
44 | python3 examples/data_preprocess/gsm8k.py
45 | - name: Running gsm8k e2e training tests with LoRA
46 | run: |
47 | ray stop --force
48 | bash tests/sft/run_sft_qwen05_peft.sh 8 $HOME/ckpts/
--------------------------------------------------------------------------------
/tests/e2e/arithmetic_sequence/rl/README.md:
--------------------------------------------------------------------------------
1 | # Digit completion
2 |
3 | This is an example of solving a digit completion problem. The problem is defined as below:
4 |
5 | The prompt is a sequence of numbers with fixed difference. The agent's goal is to complete the next N numbers.
6 | If the max number is reached, the next number should be modulo with max number.
7 |
8 | For example,
9 | - prompt = [1, 2, 3]
10 | - N = 5
11 | - max_number = 6
12 |
13 | The response should be [4, 5, 6, 7%6, 8%6] = [4, 5, 6, 0, 1].
14 |
15 | # Environment definition
16 |
17 | The core definition of the task is defined in verl/envs/digit_completion/task.py
18 |
19 | It is highly recommended to take a look at it for better understanding.
20 |
21 |
22 |
23 | # Run experiments
24 |
25 | The users are required to specify the config path and config name (and the relative model config path to the current working directory)
26 |
27 | ```bash
28 | # cd examples/arithmetic_sequence/rl
29 |
30 | # Specify the config path and config name (current working dir)
31 | python3 -m verl.trainer.ppo.ray_megatron_train_synchronous --config-path=$(pwd)/config --config-name='ray_megatron'
32 |
33 | # The default relative path of model config is 'config/model_config', if you want to change it, you can rewrite it in ray_megatron.yaml or using:
34 | python3 -m verl.trainer.ppo.ray_megatron_train_synchronous --config-path=$(pwd)/config --config-name='ray_megatron' ++model.base_path=config/model_config
35 |
36 | ```
37 |
38 |
--------------------------------------------------------------------------------
/.github/workflows/yapf_format.yml:
--------------------------------------------------------------------------------
1 | name: yapf
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/yapf_format.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/yapf_format.yml
18 |
19 | jobs:
20 | yapf:
21 | runs-on: ubuntu-latest
22 | strategy:
23 | matrix:
24 | python-version: ["3.12"]
25 | steps:
26 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
27 | # - name: checkout
28 | # run: |
29 | # commits=${{ github.event.pull_request.commits }}
30 | # if [[ -n "$commits" ]]; then
31 | # # Prepare enough depth for diffs with main
32 | # git fetch --depth="$(( commits + 1 ))"
33 | # fi
34 | - name: Set up Python ${{ matrix.python-version }}
35 | uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
36 | with:
37 | python-version: ${{ matrix.python-version }}
38 | - name: Install dependencies
39 | run: |
40 | python -m pip install --upgrade pip
41 | pip install --upgrade yapf
42 | pip install toml==0.10.2
43 | - name: Running yapf
44 | run: |
45 | yapf -r -vv -d --style=./.style.yapf verl tests examples
46 |
--------------------------------------------------------------------------------
/verl/trainer/config/sft_trainer.yaml:
--------------------------------------------------------------------------------
1 | data:
2 | train_batch_size: 256
3 | micro_batch_size: null # will be deprecated, use micro_batch_size_per_gpu
4 | micro_batch_size_per_gpu: 4 # this is also val batch size
5 | train_files: ~/data/gsm8k/train.parquet
6 | val_files: ~/data/gsm8k/test.parquet
7 | prompt_key: question
8 | response_key: answer
9 | max_length: 1024
10 | truncation: error
11 | balance_dp_token: False
12 | chat_template: null
13 | model:
14 | partial_pretrain: ~/models/gemma-1.1-7b-it
15 | fsdp_config:
16 | wrap_policy:
17 | min_num_params: 0
18 | cpu_offload: False
19 | offload_params: False
20 | external_lib: null
21 | enable_gradient_checkpointing: False
22 | trust_remote_code: False
23 | lora_rank: 0 # Set to positive value to enable LoRA (e.g., 32)
24 | lora_alpha: 16 # LoRA scaling factor
25 | target_modules: all-linear # Target modules for LoRA adaptation
26 | use_liger: False
27 | optim:
28 | lr: 1e-5
29 | betas: [0.9, 0.95]
30 | weight_decay: 0.01
31 | warmup_steps_ratio: 0.1
32 | clip_grad: 1.0
33 | ulysses_sequence_parallel_size: 1
34 | use_remove_padding: False
35 | trainer:
36 | default_local_dir: /tmp/sft_model
37 | default_hdfs_dir: hdfs://tmp/experiments/gsm8k/gemma-1.1-7b-it/ # change the hdfs path here
38 | resume_path: null
39 | project_name: gsm8k-sft
40 | experiment_name: test
41 | total_epochs: 4
42 | total_training_steps: null
43 | logger: ['console']
44 | seed: 1
45 |
46 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/utils/path_utils.py:
--------------------------------------------------------------------------------
1 | import pathlib
2 |
3 | from verl.utils.reward_score.livecodebench.lcb_runner.lm_styles import LanguageModel, LMStyle
4 | from verl.utils.reward_score.livecodebench.lcb_runner.utils.scenarios import Scenario
5 |
6 |
7 | def ensure_dir(path: str, is_file=True):
8 | if is_file:
9 | pathlib.Path(path).parent.mkdir(parents=True, exist_ok=True)
10 | else:
11 | pathlib.Path(path).mkdir(parents=True, exist_ok=True)
12 | return
13 |
14 |
15 | def get_cache_path(model_repr:str, args) -> str:
16 | scenario: Scenario = args.scenario
17 | n = args.n
18 | temperature = args.temperature
19 | path = f"cache/{model_repr}/{scenario}_{n}_{temperature}.json"
20 | ensure_dir(path)
21 | return path
22 |
23 |
24 | def get_output_path(model_repr:str, args) -> str:
25 | scenario: Scenario = args.scenario
26 | n = args.n
27 | temperature = args.temperature
28 | cot_suffix = "_cot" if args.cot_code_execution else ""
29 | path = f"output/{model_repr}/{scenario}_{n}_{temperature}{cot_suffix}.json"
30 | ensure_dir(path)
31 | return path
32 |
33 |
34 | def get_eval_all_output_path(model_repr:str, args) -> str:
35 | scenario: Scenario = args.scenario
36 | n = args.n
37 | temperature = args.temperature
38 | cot_suffix = "_cot" if args.cot_code_execution else ""
39 | path = f"output/{model_repr}/{scenario}_{n}_{temperature}{cot_suffix}_eval_all.json"
40 | return path
41 |
--------------------------------------------------------------------------------
/verl/utils/import_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | Utilities to check if packages are available.
16 | We assume package availability won't change during runtime.
17 | """
18 |
19 | from functools import cache
20 | from typing import List
21 |
22 |
23 | @cache
24 | def is_megatron_core_available():
25 | try:
26 | from megatron.core import parallel_state as mpu
27 | return True
28 | except ImportError:
29 | return False
30 |
31 |
32 | @cache
33 | def is_vllm_available():
34 | try:
35 | import vllm
36 | return True
37 | except ImportError:
38 | return False
39 |
40 |
41 | def import_external_libs(external_libs=None):
42 | if external_libs is None:
43 | return
44 | if not isinstance(external_libs, List):
45 | external_libs = [external_libs]
46 | import importlib
47 | for external_lib in external_libs:
48 | importlib.import_module(external_lib)
49 |
--------------------------------------------------------------------------------
/verl/utils/logger/aggregate_logger.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | A Ray logger will receive logging info from different processes.
16 | """
17 | import numbers
18 | from typing import Dict
19 |
20 |
21 | def concat_dict_to_str(dict: Dict, step):
22 | output = [f'step:{step}']
23 | for k, v in dict.items():
24 | if isinstance(v, numbers.Number):
25 | output.append(f'{k}:{v:.3f}')
26 | output_str = ' - '.join(output)
27 | return output_str
28 |
29 |
30 | class LocalLogger:
31 |
32 | def __init__(self, remote_logger=None, enable_wandb=False, print_to_console=False):
33 | self.print_to_console = print_to_console
34 | if print_to_console:
35 | print('Using LocalLogger is deprecated. The constructor API will change ')
36 |
37 | def flush(self):
38 | pass
39 |
40 | def log(self, data, step):
41 | if self.print_to_console:
42 | print(concat_dict_to_str(data, step=step), flush=True)
--------------------------------------------------------------------------------
/verl/utils/ray_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | Contains commonly used utilities for ray
16 | """
17 |
18 | import ray
19 |
20 | import concurrent.futures
21 |
22 |
23 | def parallel_put(data_list, max_workers=None):
24 |
25 | def put_data(index, data):
26 | return index, ray.put(data)
27 |
28 | if max_workers is None:
29 | max_workers = min(len(data_list), 16)
30 |
31 | with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
32 | data_list_f = [executor.submit(put_data, i, data) for i, data in enumerate(data_list)]
33 | res_lst = []
34 | for future in concurrent.futures.as_completed(data_list_f):
35 | res_lst.append(future.result())
36 |
37 | # reorder based on index
38 | output = [None for _ in range(len(data_list))]
39 | for res in res_lst:
40 | index, data_ref = res
41 | output[index] = data_ref
42 |
43 | return output
44 |
--------------------------------------------------------------------------------
/tests/verl/utils/dataset/test_rm_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import os
15 |
16 | from transformers import AutoTokenizer
17 | from verl.utils import hf_tokenizer
18 | from verl.utils.dataset.rm_dataset import RMDataset
19 |
20 |
21 | def get_rm_data():
22 | # prepare test dataset
23 | url = "https://github.com/eric-haibin-lin/verl-data/raw/refs/heads/main/full_hh_rlhf/rm/test.parquet"
24 | local_folder = os.path.expanduser('~/verl-data/full_hh_rlhf/rm/')
25 | local_path = os.path.join(local_folder, 'test.parquet')
26 | os.makedirs(local_folder, exist_ok=True)
27 | return local_path
28 |
29 |
30 | def test_rm_dataset():
31 | tokenizer = hf_tokenizer("facebook/opt-1.3b")
32 | local_path = get_rm_data()
33 | dataset = RMDataset(parquet_files=local_path, tokenizer=tokenizer, max_length=512)
34 | data = dataset[0]['input_ids']
35 | output = tokenizer.batch_decode(data)
36 | assert len(output) > 1
37 | assert type(output[0]) == str
38 |
--------------------------------------------------------------------------------
/tests/sanity/check_license.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | license_head_bytedance = "Copyright 2024 Bytedance Ltd. and/or its affiliates"
16 | license_head2_prime = "Copyright 2024 PRIME team and/or its affiliates"
17 |
18 | from pathlib import Path
19 | from argparse import ArgumentParser
20 |
21 | if __name__ == '__main__':
22 | parser = ArgumentParser()
23 | parser.add_argument('--directory', '-d', required=True, type=str)
24 | args = parser.parse_args()
25 | directory_in_str = args.directory
26 |
27 | pathlist = Path(directory_in_str).glob('**/*.py')
28 | for path in pathlist:
29 | # because path is object not string
30 | path_in_str = str(path.absolute())
31 | print(path_in_str)
32 | with open(path_in_str, 'r', encoding='utf-8') as f:
33 | file_content = f.read()
34 |
35 | assert license_head_bytedance in file_content or \
36 | license_head2_prime in file_content, f'file {path_in_str} does not contain license'
37 |
--------------------------------------------------------------------------------
/tests/e2e/run_qwen_gsm8k_function_rm_grpo.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | export VLLM_ATTENTION_BACKEND=XFORMERS
4 |
5 | python3 -m verl.trainer.main_ppo \
6 | data.train_files=$HOME/data/gsm8k/train.parquet \
7 | data.val_files=$HOME/data/gsm8k/test.parquet \
8 | data.train_batch_size=1024 \
9 | data.val_batch_size=1312 \
10 | data.max_prompt_length=512 \
11 | data.max_response_length=512 \
12 | actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
13 | actor_rollout_ref.actor.optim.lr=1e-6 \
14 | actor_rollout_ref.model.use_remove_padding=True \
15 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
16 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
17 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
18 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
19 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
20 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
21 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
22 | actor_rollout_ref.rollout.name=vllm \
23 | actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
24 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
25 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
26 | algorithm.kl_ctrl.kl_coef=0.001 \
27 | algorithm.adv_estimator=grpo \
28 | trainer.critic_warmup=0 \
29 | trainer.logger=['console'] \
30 | trainer.project_name='verl_example_gsm8k' \
31 | trainer.experiment_name='qwen_e2e_ci_function_rm' \
32 | trainer.n_gpus_per_node=8 \
33 | trainer.nnodes=1 \
34 | trainer.save_freq=-1 \
35 | trainer.total_training_steps=1 $@
36 |
--------------------------------------------------------------------------------
/tests/e2e/run_qwen_gsm8k_function_rm_remax.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | export VLLM_ATTENTION_BACKEND=XFORMERS
4 |
5 | python3 -m verl.trainer.main_ppo \
6 | data.train_files=$HOME/data/gsm8k/train.parquet \
7 | data.val_files=$HOME/data/gsm8k/test.parquet \
8 | data.train_batch_size=1024 \
9 | data.val_batch_size=1312 \
10 | data.max_prompt_length=512 \
11 | data.max_response_length=512 \
12 | actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
13 | actor_rollout_ref.actor.optim.lr=1e-6 \
14 | actor_rollout_ref.model.use_remove_padding=True \
15 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
16 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
17 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
18 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
19 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
20 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
21 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
22 | actor_rollout_ref.rollout.name=vllm \
23 | actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
24 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
25 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
26 | algorithm.kl_ctrl.kl_coef=0.001 \
27 | algorithm.adv_estimator=remax \
28 | trainer.critic_warmup=0 \
29 | trainer.logger=['console'] \
30 | trainer.project_name='verl_example_gsm8k' \
31 | trainer.experiment_name='qwen_e2e_ci_function_rm' \
32 | trainer.n_gpus_per_node=8 \
33 | trainer.nnodes=1 \
34 | trainer.save_freq=-1 \
35 | trainer.total_training_steps=1 $@
36 |
--------------------------------------------------------------------------------
/docker/Dockerfile.ngc.vllm:
--------------------------------------------------------------------------------
1 | FROM nvcr.io/nvidia/pytorch:24.05-py3
2 |
3 | # uninstall nv-pytorch fork
4 | RUN pip3 uninstall pytorch-quantization \
5 | pytorch-triton \
6 | torch \
7 | torch-tensorrt \
8 | torchvision \
9 | xgboost transformer_engine flash_attn \
10 | apex megatron-core -y
11 |
12 | RUN pip3 install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu124
13 |
14 | # make sure torch version is kept
15 | RUN pip3 install --no-cache-dir \
16 | "torch==2.4.0" \
17 | accelerate \
18 | codetiming \
19 | datasets \
20 | dill \
21 | hydra-core \
22 | numpy \
23 | pybind11 \
24 | tensordict \
25 | "transformers<=4.46.0"
26 |
27 | # ray is installed via vllm
28 | RUN pip3 install --no-cache-dir vllm==0.6.3
29 |
30 | # we choose flash-attn v2.7.0 or v2.7.2 which contain pre-built wheels
31 | RUN pip3 install --no-cache-dir --no-build-isolation flash-attn==2.7.0.post2
32 |
33 | # install apex, set MAX_JOBS to avoid OOMs
34 | RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \
35 | --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \
36 | git+https://github.com/NVIDIA/apex
37 |
38 | # install Transformer Engine, which requires FA 2.5.8
39 | RUN MAX_JOBS=4 NINJA_FLAGS="-j4" pip3 install flash-attn==2.5.8 --no-cache-dir --no-build-isolation
40 | RUN MAX_JOBS=4 NINJA_FLAGS="-j4" pip3 install git+https://github.com/NVIDIA/TransformerEngine.git@v1.7
41 |
42 | # Pin wandb to v0.18 since v0.19.1 is released with ImportError
43 | RUN pip3 install wandb==0.18.7 py-spy
44 |
--------------------------------------------------------------------------------
/tests/e2e/run_ray_trainer.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e -x
4 |
5 | OUTPUT_FILE="/tmp/output_ray_trainer.txt"
6 |
7 | export PATH=$PATH:~/.local/bin
8 |
9 | rm -rf $OUTPUT_FILE
10 | python3 tests/e2e/arithmetic_sequence/rl/main_trainer.py \
11 | data.train_files=tests/e2e/arithmetic_sequence/data/train.parquet \
12 | data.val_files=tests/e2e/arithmetic_sequence/data/test.parquet \
13 | data.train_batch_size=800 \
14 | data.val_batch_size=200 \
15 | data.max_prompt_length=16 \
16 | data.max_response_length=32 \
17 | data.return_raw_input_ids=True \
18 | actor_rollout_ref.model.path=tests/e2e/arithmetic_sequence/model \
19 | actor_rollout_ref.model.external_lib=tests.e2e.envs.digit_completion \
20 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=200 \
21 | actor_rollout_ref.actor.entropy_coeff=0 \
22 | actor_rollout_ref.actor.optim.lr=1e-4 \
23 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=200 \
24 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=200 \
25 | actor_rollout_ref.rollout.name=hf \
26 | actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
27 | critic.ppo_micro_batch_size_per_gpu=200 \
28 | critic.model.path=tests/e2e/arithmetic_sequence/model \
29 | critic.optim.lr=1e-3 \
30 | algorithm.kl_ctrl.kl_coef=0.005 \
31 | trainer.total_epochs=200 \
32 | trainer.experiment_name=arithmetic_sequences \
33 | trainer.logger=['console'] \
34 | trainer.n_gpus_per_node=1 \
35 | trainer.test_freq=1 \
36 | trainer.save_freq=110 | tee $OUTPUT_FILE;
37 |
38 | python3 tests/e2e/check_results.py --output_file=$OUTPUT_FILE
39 | rm -rf $OUTPUT_FILE
40 |
--------------------------------------------------------------------------------
/.github/workflows/e2e_gsm8k_megatron.yml:
--------------------------------------------------------------------------------
1 | name: e2e_gsm8k_megatron
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/e2e_gsm8k_megatron.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/e2e_gsm8k_megatron.yml
18 | - "tests/e2e/*.sh"
19 |
20 |
21 |
22 | jobs:
23 | e2e_gsm8k_megatron:
24 | runs-on: [self-hosted, l20-0]
25 | env:
26 | HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
27 | HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
28 | NO_PROXY: "localhost,127.0.0.1"
29 | HF_HUB_ENABLE_HF_TRANSFER: 1
30 | container:
31 | image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
32 | options: --gpus all --shm-size=10g
33 | steps:
34 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
35 | with:
36 | fetch-depth: 0
37 | - name: Install the current repository
38 | run: |
39 | pip3 install hf_transfer
40 | pip3 install -e .[test]
41 | - name: Prepare gsm8k dataset
42 | run: |
43 | python3 examples/data_preprocess/gsm8k.py
44 | - name: Running gsm8k e2e training tests on 8 L20 GPUs with Megatron
45 | run: |
46 | ray stop --force
47 | [ ! -d "$HOME/Megatron-LM" ] && git clone -b core_v0.4.0_verl https://github.com/eric-haibin-lin/Megatron-LM $HOME/Megatron-LM
48 | export PYTHONPATH=$PYTHONPATH:$HOME/Megatron-LM
49 | bash tests/e2e/run_deepseek_megatron.sh
--------------------------------------------------------------------------------
/verl/utils/megatron/memory.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import torch
16 |
17 |
18 | class MemoryBuffer:
19 |
20 | def __init__(self, numel, numel_padded, dtype):
21 | self.numel = numel
22 | self.numel_padded = numel_padded
23 | self.dtype = dtype
24 | self.data = torch.zeros(self.numel_padded,
25 | dtype=self.dtype,
26 | device=torch.cuda.current_device(),
27 | requires_grad=False)
28 |
29 | def zero(self):
30 | """Reset the buffer to zero."""
31 | self.data.zero_()
32 |
33 | def get(self, shape, start_index):
34 | """Return a tensor with the input `shape` as a view into the
35 | 1-D data starting at `start_index`."""
36 | end_index = start_index + shape.numel()
37 | assert end_index <= self.numel, \
38 | 'requested tensor is out of the buffer range.'
39 | buffer_tensor = self.data[start_index:end_index]
40 | buffer_tensor = buffer_tensor.view(shape)
41 | return buffer_tensor
42 |
--------------------------------------------------------------------------------
/verl/trainer/config/generation.yaml:
--------------------------------------------------------------------------------
1 |
2 | trainer:
3 | nnodes: 1
4 | n_gpus_per_node: 8
5 |
6 | data:
7 | path: ~/data/rlhf/math/test.parquet
8 | prompt_key: prompt
9 | response_key: responses
10 | data_source_key: data_source
11 | reward_model_key: reward_model
12 | n_samples: 1
13 | output_path: /opt/tiger/math_Qwen2-7B-Instruct.parquet
14 | batch_size: 2048
15 |
16 | model:
17 | path: ~/models/Qwen2-7B-Instruct
18 | external_lib: null
19 |
20 | rollout:
21 | name: vllm
22 | temperature: 0.6
23 | top_k: -1 # 0 for hf rollout, -1 for vllm rollout
24 | top_p: 1
25 | prompt_length: 2048
26 | response_length: 32768
27 | dtype: bfloat16
28 | gpu_memory_utilization: 0.9
29 | ignore_eos: False
30 | micro_batch_size: 256
31 | enforce_eager: True
32 | free_cache_engine: True
33 | load_format: dummy_dtensor
34 | tensor_model_parallel_size: 1
35 | max_num_batched_tokens: 8192
36 | max_num_seqs: 1024
37 | log_prob_micro_batch_size: 8
38 | log_prob_micro_batch_size_per_gpu: 1
39 | do_sample: True
40 | n: 1
41 | n_val: 1
42 | enable_chunked_prefill: True
43 | disable_log_stats: True
44 |
45 | actor:
46 | strategy: fsdp # This is for backward-compatibility
47 | ulysses_sequence_parallel_size: 1 # sp size
48 | fsdp_config:
49 | wrap_policy:
50 | min_num_params: 0
51 | param_offload: False
52 | grad_offload: False
53 | optimizer_offload: False
54 | fsdp_size: -1
55 | optim:
56 | lr: 1e-6
57 | lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime
58 | min_lr_ratio: null # only useful for warmup with cosine
59 | warmup_style: constant # select from constant/cosine
60 | total_training_steps: -1 # must be override by program
61 |
--------------------------------------------------------------------------------
/.github/workflows/model.yml:
--------------------------------------------------------------------------------
1 | name: model_rmpad
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/model.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/model.yml
18 |
19 |
20 |
21 | jobs:
22 | model_rmpad:
23 | runs-on: [self-hosted, l20-1]
24 | env:
25 | HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
26 | HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
27 | NO_PROXY: "localhost,127.0.0.1"
28 | HF_HUB_ENABLE_HF_TRANSFER: 1
29 | container:
30 | image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
31 | options: --gpus all --shm-size=10g
32 | steps:
33 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
34 | with:
35 | fetch-depth: 0
36 | - name: Install the current repository and upgrade to latest transformers/flash_attn
37 | run: |
38 | pip3 install -e .[test]
39 | pip3 install --upgrade transformers
40 | - name: Running rmpad model tests on 8 L20 GPUs + flash_attn 2.5.8
41 | run: |
42 | pytest -s tests/model/test_transformer.py
43 | - name: Running rmpad model tests on 8 L20 GPUs + latest flash_attn
44 | run: |
45 | pip3 install --upgrade flash_attn --no-build-isolation
46 | pytest -s tests/model/test_transformer.py
47 | - name: Running rmpad model tests on 8 L20 GPUs + latest flash_attn
48 | run: |
49 | pip3 install hf_transfer
50 | torchrun --nproc_per_node=8 tests/checkpoint/test_fsdp_ckpt.py
51 |
--------------------------------------------------------------------------------
/tests/ray/test_check_worker_alive.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import time
16 | import os
17 | import subprocess
18 |
19 |
20 | def test():
21 | wait_time = 10
22 |
23 | my_env = os.environ.copy()
24 | my_env["WAIT_TIME"] = str(wait_time)
25 |
26 | p = subprocess.Popen(["python3", "-u", "./check_worker_alive/main.py"], env=my_env, stdout=subprocess.PIPE)
27 |
28 | count = 0
29 | while b"foo started" not in p.stdout.read():
30 | time.sleep(1)
31 | count += 1
32 | if count > 40:
33 | raise RuntimeError("timeout for start foo in check_worker_alive/main.py")
34 |
35 | print(
36 | time.time(),
37 | f"wait 1.5 wait time {wait_time*1.5} to let signal returned to process but still not exceed process wait time")
38 | time.sleep(wait_time * 1.5)
39 | print(time.time(), f"start checking")
40 | assert p.poll() is not None, f"process {p} still alive, expecting signal raised abort"
41 | assert p.returncode != 0, f"process {p} exit with code 0, expecting not-zero exit code"
42 | print(f"test passed")
43 |
44 |
45 | if __name__ == "__main__":
46 | test()
47 |
--------------------------------------------------------------------------------
/verl/single_controller/base/megatron/worker.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import os
16 | from dataclasses import dataclass
17 | from verl.single_controller.base.worker import Worker, DistRankInfo, DistGlobalInfo
18 |
19 |
20 | class MegatronWorker(Worker):
21 |
22 | def __init__(self, cuda_visible_devices=None) -> None:
23 | super().__init__(cuda_visible_devices)
24 |
25 | def get_megatron_global_info(self):
26 | from megatron.core import parallel_state as mpu
27 | tp_size = mpu.get_tensor_model_parallel_world_size()
28 | dp_size = mpu.get_data_parallel_world_size()
29 | pp_size = mpu.get_pipeline_model_parallel_world_size()
30 | info = DistGlobalInfo(tp_size=tp_size, dp_size=dp_size, pp_size=pp_size)
31 | return info
32 |
33 | def get_megatron_rank_info(self):
34 | from megatron.core import parallel_state as mpu
35 | tp_rank = mpu.get_tensor_model_parallel_rank()
36 | dp_rank = mpu.get_data_parallel_rank()
37 | pp_rank = mpu.get_pipeline_model_parallel_rank()
38 | info = DistRankInfo(tp_rank=tp_rank, dp_rank=dp_rank, pp_rank=pp_rank)
39 | return info
--------------------------------------------------------------------------------
/docs/advance/megatron_extension.rst:
--------------------------------------------------------------------------------
1 | Add models with the Megatron-LM backend
2 | =========================================
3 |
4 | Model
5 | -----------
6 |
7 | The most challenging aspect to use the Megatron-LM backend is implementing
8 | the models for training. Currently, we implement Llama model that
9 | support data parallelism, tensor parallelism, pipeline parallelism (also
10 | vPP) and sequence parallelism. We also implement remove padding (sequence packing) on Llama
11 | model, which can be found in `modeling_llama_megatron.py `_.
12 |
13 | To support other model, users are required to implement:
14 |
15 | 1. Implemnt a model similar to ``modeling_llama_megatron.py`` that satisfy the
16 | parallelism requirements of Megatron-LM. Then register your model in
17 | the `registry.py `_.
18 | 2. Checkpoint utils that can load full checkpoint (e.g. huggingface
19 | checkpoint) to partitioned models during the runtime. Then register
20 | your loader to ``weight_loader_registry`` in `weight_loader_registry.py `_.
21 | 3. Weight loader that synchronize the weight from Megatron to rollout
22 | (vLLM) model. Note that both the actor model and rollout model are
23 | partitioned during runtime. So, it's advisable to map the model name
24 | in actor model implementation. Otherwise, you may need an additional
25 | name mapping and even weight transformation. The weight loader implementation
26 | is in `megatron_weight_loaders.py `_.
--------------------------------------------------------------------------------
/tests/ray/test_rvdz.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import ray
16 |
17 |
18 | @ray.remote
19 | class TestWorker:
20 |
21 | def __init__(self, rank, world_size, group_name):
22 | self.rank = rank
23 | self.world_size = world_size
24 | self.group_name = group_name
25 | self.communicator = None
26 |
27 | def init(self):
28 | from verl.utils.rendezvous.ray_backend import create_nccl_communicator_in_ray
29 | self.communicator = create_nccl_communicator_in_ray(self.rank, self.world_size, self.group_name)
30 |
31 | def test(self):
32 | if self.communicator is None:
33 | return None
34 | return self.communicator.rank_id()
35 |
36 |
37 | def test_rvdz():
38 | ray.init()
39 |
40 | group_name = "test_group"
41 | world_size = 2
42 |
43 | workers = [TestWorker.options(num_gpus=1).remote(rank, world_size, group_name) for rank in range(world_size)]
44 |
45 | ray.get([worker.init.remote() for worker in workers])
46 |
47 | ranks = ray.get([worker.test.remote() for worker in workers])
48 |
49 | assert ranks == [0, 1], f"expecting [0, 1], got {ranks}"
50 |
51 | ray.shutdown()
52 |
--------------------------------------------------------------------------------
/verl/third_party/vllm/vllm_v_0_6_3/tokenizer.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | # Copyright 2023 The vLLM team.
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/transformers_utils/tokenizer_group/tokenizer_group.py
15 |
16 | from typing import Optional
17 |
18 | from transformers import PreTrainedTokenizer
19 | from vllm.transformers_utils.tokenizer_group import TokenizerGroup
20 | from vllm.utils import LRUCache
21 |
22 |
23 | class TokenizerGroup(TokenizerGroup):
24 | """A group of tokenizers that can be used for LoRA adapters."""
25 |
26 | def __init__(self, tokenizer: PreTrainedTokenizer, enable_lora: bool, max_num_seqs: int,
27 | max_input_length: Optional[int]):
28 | self.enable_lora = enable_lora
29 | self.max_input_length = max_input_length
30 | self.tokenizer = tokenizer
31 | self.lora_tokenizers = LRUCache[PreTrainedTokenizer](capacity=max_num_seqs) if enable_lora else None
32 |
33 | # FIXME(sgm): for simplicity, we assign the special token here
34 | @property
35 | def pad_token_id(self):
36 | return self.tokenizer.pad_token_id
37 |
38 | @property
39 | def eos_token_id(self):
40 | return self.tokenizer.eos_token_id
41 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/runner/claude_runner.py:
--------------------------------------------------------------------------------
1 | import os
2 | from time import sleep
3 |
4 | try:
5 | from anthropic import Anthropic
6 | except ImportError as e:
7 | pass
8 |
9 | from verl.utils.reward_score.livecodebench.lcb_runner.runner.base_runner import BaseRunner
10 |
11 |
12 | class ClaudeRunner(BaseRunner):
13 | client = Anthropic(api_key=os.getenv("ANTHROPIC_KEY"))
14 |
15 | def __init__(self, args, model):
16 | super().__init__(args, model)
17 | self.client_kwargs: dict[str | str] = {
18 | "model": args.model,
19 | "temperature": args.temperature,
20 | "max_tokens_to_sample": args.max_tokens,
21 | "top_p": args.top_p,
22 | }
23 |
24 | def _run_single(self, prompt: str) -> list[str]:
25 |
26 | def __run_single(counter):
27 | try:
28 | response = self.client.completions.create(
29 | prompt=prompt,
30 | **self.client_kwargs,
31 | )
32 | content = response.completion
33 | return content
34 | except Exception as e:
35 | print("Exception: ", repr(e), "Sleeping for 20 seconds...")
36 | sleep(20 * (11 - counter))
37 | counter = counter - 1
38 | if counter == 0:
39 | print(f"Failed to run model for {prompt}!")
40 | print("Exception: ", repr(e))
41 | raise e
42 | return __run_single(counter)
43 |
44 | outputs = []
45 | try:
46 | for _ in range(self.args.n):
47 | outputs.append(__run_single(10))
48 | except Exception as e:
49 | raise e
50 |
51 | return outputs
52 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/runner/mistral_runner.py:
--------------------------------------------------------------------------------
1 | import os
2 | from time import sleep
3 |
4 | try:
5 | from mistralai.client import MistralClient
6 | except ImportError as e:
7 | pass
8 |
9 | from verl.utils.reward_score.livecodebench.lcb_runner.runner.base_runner import BaseRunner
10 |
11 |
12 | class MistralRunner(BaseRunner):
13 | client = MistralClient(
14 | api_key=os.environ["MISTRAL_API_KEY"],
15 | )
16 |
17 | def __init__(self, args, model):
18 | super().__init__(args, model)
19 | self.client_kwargs: dict[str | str] = {
20 | "model": args.model,
21 | "temperature": args.temperature,
22 | "max_tokens": args.max_tokens,
23 | "top_p": args.top_p,
24 | }
25 |
26 | def _run_single(self, prompt: list[dict[str, str]]) -> list[str]:
27 |
28 | def __run_single(counter):
29 | try:
30 | response = self.client.chat(
31 | messages=prompt,
32 | **self.client_kwargs,
33 | )
34 | content = response.choices[0].message.content
35 | return content
36 | except Exception as e:
37 | print("Exception: ", repr(e), "Sleeping for 20 seconds...")
38 | sleep(20 * (11 - counter))
39 | counter = counter - 1
40 | if counter == 0:
41 | print(f"Failed to run model for {prompt}!")
42 | print("Exception: ", repr(e))
43 | raise e
44 | return __run_single(counter)
45 |
46 | outputs = []
47 | try:
48 | for _ in range(self.args.n):
49 | outputs.append(__run_single(10))
50 | except Exception as e:
51 | raise e
52 |
53 | return outputs
54 |
--------------------------------------------------------------------------------
/tests/e2e/check_results.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import argparse
16 |
17 | import numpy as np
18 |
19 |
20 | def extract_reward_from_line(line):
21 | # TODO: this function needs error handling
22 | try:
23 | key_vals = line.split(' - ')
24 | for key_val in key_vals:
25 | key, val = key_val.split(':')
26 | if key == 'critic/rewards/mean':
27 | reward = float(val)
28 | return reward
29 | return -np.inf
30 | except Exception:
31 | return -np.inf
32 |
33 |
34 | if __name__ == '__main__':
35 | parser = argparse.ArgumentParser()
36 | parser.add_argument('--output_file', required=True, type=str)
37 |
38 | args = parser.parse_args()
39 |
40 | with open(args.output_file, 'r') as f:
41 | output = f.read().split('\n')
42 |
43 | best_reward = -np.inf
44 | for line in output:
45 | if line.startswith('step'):
46 | reward = extract_reward_from_line(line)
47 | if reward > best_reward:
48 | best_reward = reward
49 |
50 | print(f'Best reward is {best_reward}')
51 | assert best_reward > 0.2, f'Best reward must be greater than 0.2. best_reward: {best_reward}'
52 | print('Check passes')
53 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/runner/cohere_runner.py:
--------------------------------------------------------------------------------
1 | import os
2 | from time import sleep
3 |
4 | try:
5 | import cohere
6 | except ImportError as e:
7 | pass
8 |
9 | from verl.utils.reward_score.livecodebench.lcb_runner.runner.base_runner import BaseRunner
10 |
11 |
12 | class CohereRunner(BaseRunner):
13 | client = cohere.Client(os.getenv("COHERE_API_KEY"))
14 |
15 | def __init__(self, args, model):
16 | super().__init__(args, model)
17 | self.client_kwargs: dict[str | str] = {
18 | "model": args.model,
19 | "temperature": args.temperature,
20 | "max_tokens": args.max_tokens,
21 | "p": args.top_p,
22 | }
23 |
24 | def _run_single(self, prompt: tuple[dict[str,str], str]) -> list[str]:
25 | chat_history, message = prompt
26 |
27 | def __run_single(counter):
28 | try:
29 | response = self.client.chat(
30 | message=message,
31 | chat_history=chat_history,
32 | **self.client_kwargs,
33 | )
34 | content = response.text
35 | return content
36 | except Exception as e:
37 | print("Exception: ", repr(e), "Sleeping for 20 seconds...")
38 | sleep(20 * (11 - counter))
39 | counter = counter - 1
40 | if counter == 0:
41 | print(f"Failed to run model for {prompt}!")
42 | print("Exception: ", repr(e))
43 | raise e
44 | return __run_single(counter)
45 |
46 | outputs = []
47 | try:
48 | for _ in range(self.args.n):
49 | outputs.append(__run_single(10))
50 | except Exception as e:
51 | raise e
52 |
53 | return outputs
54 |
--------------------------------------------------------------------------------
/verl/models/README.md:
--------------------------------------------------------------------------------
1 | # Models
2 | Common modelzoo such as huggingface/transformers stuggles when using Pytorch native model parallelism. Following the design principle of vLLM, we keep a simple, parallelizable, highly-optimized with packed inputs in verl.
3 | ## Adding a New Huggingface Model
4 | ### Step 1: Copy the model file from HF to verl
5 | - Add a new file under verl/models/hf
6 | - Copy ONLY the model file from huggingface/transformers/models to verl/models/hf
7 |
8 | ### Step 2: Modify the model file to use packed inputs
9 | - Remove all the code related to inference (kv cache)
10 | - Modify the inputs to include only
11 | - input_ids (total_nnz,)
12 | - cu_seqlens (total_nnz + 1,)
13 | - max_seqlen_in_batch: int
14 | - Note that this requires using flash attention with causal mask.
15 |
16 | ### Step 2.5: Add tests
17 | - Add a test to compare this version and the huggingface version
18 | - Following the infrastructure and add tests to tests/models/hf
19 |
20 | ### Step 3: Add a function to apply tensor parallelism
21 | - Please follow
22 | - https://pytorch.org/docs/stable/distributed.tensor.parallel.html
23 | - https://pytorch.org/tutorials/intermediate/TP_tutorial.html
24 | - General comments
25 | - Tensor Parallelism in native Pytorch is NOT auto-parallelism. The way it works is to specify how model parameters and input/output reshards using configs. These configs are then registered as hooks to perform input/output resharding before/after model forward.
26 |
27 | ### Step 4: Add a function to apply data parallelism
28 | - Please use FSDP2 APIs
29 | - See demo here https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py#L413
30 |
31 | ### Step 5: Add a function to apply pipeline parallelism
32 | - Comes in Pytorch 2.4
33 | - Currently only in alpha in nightly version
34 | - Check torchtitan for more details
35 |
36 |
--------------------------------------------------------------------------------
/examples/split_placement/run_deepseek7b_llm.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | python3 main_ppo_split.py \
4 | data.train_files=$HOME/data/gsm8k/train.parquet \
5 | data.val_files=$HOME/data/gsm8k/test.parquet \
6 | data.train_batch_size=1024 \
7 | data.val_batch_size=1312 \
8 | data.max_prompt_length=512 \
9 | data.max_response_length=512 \
10 | actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \
11 | actor_rollout_ref.actor.optim.lr=1e-6 \
12 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
13 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \
14 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
15 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
16 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
17 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \
18 | actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
19 | actor_rollout_ref.rollout.name=vllm \
20 | actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
21 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=8 \
22 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
23 | critic.optim.lr=1e-5 \
24 | critic.model.path=deepseek-ai/deepseek-llm-7b-chat \
25 | critic.model.enable_gradient_checkpointing=False \
26 | critic.ppo_micro_batch_size_per_gpu=8 \
27 | critic.model.fsdp_config.param_offload=False \
28 | critic.model.fsdp_config.grad_offload=False \
29 | critic.model.fsdp_config.optimizer_offload=False \
30 | algorithm.kl_ctrl.kl_coef=0.001 \
31 | trainer.critic_warmup=0 \
32 | trainer.logger=['console','wandb'] \
33 | trainer.project_name='verl_example_gsm8k' \
34 | trainer.experiment_name='deepseek_llm_7b_function_rm' \
35 | trainer.n_gpus_per_node=8 \
36 | trainer.nnodes=1 \
37 | trainer.save_freq=-1 \
38 | trainer.total_epochs=15 $@
39 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | # setup.py is the fallback installation script when pyproject.toml does not work
16 | from setuptools import setup, find_packages
17 | import os
18 |
19 | version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__)))
20 |
21 | with open(os.path.join(version_folder, 'verl/version/version')) as f:
22 | __version__ = f.read().strip()
23 |
24 |
25 | with open('requirements.txt') as f:
26 | required = f.read().splitlines()
27 | install_requires = [item.strip() for item in required if item.strip()[0] != '#']
28 |
29 | extras_require = {
30 | 'test': ['pytest', 'yapf']
31 | }
32 |
33 | setup(
34 | name='verl',
35 | version=__version__,
36 | package_dir={'': '.'},
37 | packages=find_packages(where='.'),
38 | url='https://github.com/volcengine/verl',
39 | license='Apache 2.0',
40 | author='Bytedance - Seed - MLSys',
41 | author_email='zhangchi.usc1992@bytedance.com, gmsheng@connect.hku.hk',
42 | description='verl: Volcano Engine Reinforcement Learning for LLM',
43 | install_requires=install_requires,
44 | extras_require=extras_require,
45 | package_data={'': ['version/*'],
46 | 'verl': ['trainer/config/*.yaml'],},
47 | include_package_data=True,
48 | )
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/runner/claude3_runner.py:
--------------------------------------------------------------------------------
1 | import os
2 | from time import sleep
3 |
4 | try:
5 | from anthropic import Anthropic
6 | except ImportError as e:
7 | pass
8 |
9 | from verl.utils.reward_score.livecodebench.lcb_runner.runner.base_runner import BaseRunner
10 |
11 |
12 | class Claude3Runner(BaseRunner):
13 | client = Anthropic(api_key=os.getenv("ANTHROPIC_KEY"))
14 |
15 | def __init__(self, args, model):
16 | super().__init__(args, model)
17 | self.client_kwargs: dict[str | str] = {
18 | "model": args.model,
19 | "temperature": args.temperature,
20 | "max_tokens": args.max_tokens,
21 | "top_p": args.top_p,
22 | }
23 |
24 | def _run_single(self, prompt: tuple[str, str]) -> list[str]:
25 |
26 | def __run_single(counter):
27 | try:
28 | response = self.client.messages.create(
29 | system=prompt[0],
30 | messages=prompt[1],
31 | **self.client_kwargs,
32 | )
33 | content = "\n".join([x.text for x in response.content])
34 | return content
35 | except Exception as e:
36 | print("Exception: ", repr(e), "Sleeping for 20 seconds...")
37 | sleep(20 * (11 - counter))
38 | counter = counter - 1
39 | if counter == 0:
40 | print(f"Failed to run model for {prompt}!")
41 | print("Exception: ", repr(e))
42 | raise e
43 | return __run_single(counter)
44 |
45 | outputs = []
46 | try:
47 | for _ in range(self.args.n):
48 | outputs.append(__run_single(10))
49 | except Exception as e:
50 | raise e
51 |
52 | return outputs
53 |
--------------------------------------------------------------------------------
/examples/ppo_trainer/run_deepseek_math_gsm8k_megatron.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | gsm8k_train_path=$HOME/data/gsm8k/train.parquet
4 | gsm8k_test_path=$HOME/data/gsm8k/test.parquet
5 | math_train_path=$HOME/data/math/train.parquet
6 | math_test_path=$HOME/data/math/test.parquet
7 |
8 | train_files="['$gsm8k_train_path', '$math_train_path']"
9 | test_files="['$gsm8k_test_path', '$math_test_path']"
10 |
11 | python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\
12 | data.train_files="$train_files" \
13 | data.val_files="$test_files" \
14 | data.train_batch_size=1024 \
15 | data.val_batch_size=6312 \
16 | data.max_prompt_length=1024 \
17 | data.max_response_length=512 \
18 | actor_rollout_ref.model.path=deepseek-ai/deepseek-coder-6.7b-instruct \
19 | actor_rollout_ref.actor.optim.lr=1e-6 \
20 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
21 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
22 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \
23 | actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
24 | actor_rollout_ref.rollout.name=vllm \
25 | actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
26 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
27 | critic.optim.lr=1e-5 \
28 | critic.model.path=deepseek-ai/deepseek-coder-6.7b-instruct \
29 | critic.model.enable_gradient_checkpointing=False \
30 | critic.ppo_micro_batch_size_per_gpu=4 \
31 | algorithm.kl_ctrl.kl_coef=0.001 \
32 | trainer.critic_warmup=0 \
33 | trainer.logger=['console','wandb'] \
34 | trainer.project_name='verl_megatron_math_gsm8k_examples' \
35 | trainer.experiment_name='deepseek_llm_7b_function_rm' \
36 | trainer.n_gpus_per_node=8 \
37 | trainer.nnodes=1 \
38 | trainer.save_freq=-1 \
39 | trainer.test_freq=5 \
40 | trainer.total_epochs=100 $@
41 |
--------------------------------------------------------------------------------
/verl/utils/py_functional.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | Contain small python utility functions
16 | """
17 |
18 | from typing import Dict
19 | from types import SimpleNamespace
20 |
21 |
22 | def union_two_dict(dict1: Dict, dict2: Dict):
23 | """Union two dict. Will throw an error if there is an item not the same object with the same key.
24 |
25 | Args:
26 | dict1:
27 | dict2:
28 |
29 | Returns:
30 |
31 | """
32 | for key, val in dict2.items():
33 | if key in dict1:
34 | assert dict2[key] == dict1[key], \
35 | f'{key} in meta_dict1 and meta_dict2 are not the same object'
36 | dict1[key] = val
37 |
38 | return dict1
39 |
40 |
41 | def append_to_dict(data: Dict, new_data: Dict):
42 | for key, val in new_data.items():
43 | if key not in data:
44 | data[key] = []
45 | data[key].append(val)
46 |
47 |
48 | class NestedNamespace(SimpleNamespace):
49 |
50 | def __init__(self, dictionary, **kwargs):
51 | super().__init__(**kwargs)
52 | for key, value in dictionary.items():
53 | if isinstance(value, dict):
54 | self.__setattr__(key, NestedNamespace(value))
55 | else:
56 | self.__setattr__(key, value)
57 |
--------------------------------------------------------------------------------
/verl/workers/reward_model/base.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | The base class for reward model
16 | """
17 |
18 | from abc import ABC, abstractmethod
19 |
20 | from verl import DataProto
21 |
22 |
23 | class BasePPORewardModel(ABC):
24 |
25 | def __init__(self, config):
26 | self.config = config
27 |
28 | @abstractmethod
29 | def compute_reward(self, data: DataProto) -> DataProto:
30 | """Computing reward given input_ids. The transformers should output a tensor with shape
31 | [batch_size, sequence_length], and the value at [EOS] mask should be gathered.
32 |
33 | Args:
34 | data: must contain keys "input_ids", "attention_mask" and "position_ids".
35 | - input_ids: [batch_size, sequence_length]
36 | - attention_mask: [batch_size, sequence_length]
37 | - position_ids: [batch_size, sequence_length]
38 |
39 | Returns: a data pass protocol containing "reward". Only the [EOS] position contains the reward.
40 | Other position should have zero reward. Note that this may change in the future if we use
41 | dense reward. So, we leave the interface for general case.
42 | - reward: [batch_size, sequence_length].
43 |
44 | """
45 | pass
46 |
--------------------------------------------------------------------------------
/tests/gpu_utility/test_ops.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | def test_flash_attn_cross_entropy():
17 | from verl.utils.torch_functional import logprobs_from_logits_naive
18 |
19 | from verl.utils.debug import log_gpu_memory_usage
20 |
21 | from flash_attn.ops.triton.cross_entropy import cross_entropy_loss
22 |
23 | import torch
24 | from torch import nn
25 |
26 | log_gpu_memory_usage('At start')
27 |
28 | hidden_states = torch.randn(size=(2048, 5120), device='cuda', requires_grad=True, dtype=torch.bfloat16)
29 |
30 | linear = nn.Linear(in_features=5120, out_features=155136, bias=False, device='cuda', dtype=torch.bfloat16)
31 |
32 | logits = linear(hidden_states)
33 |
34 | # logits = logits.float()
35 | labels = torch.randint(low=0, high=155136, size=(2048,), device='cuda')
36 |
37 | log_gpu_memory_usage('before computation')
38 | # output = checkpoint.checkpoint(logprobs_from_logits, logits, labels, use_reentrant=True)
39 | output = -cross_entropy_loss(logits, labels)[0]
40 | # output = logprobs_from_logits(logits, labels)
41 | log_gpu_memory_usage('After forward')
42 | output.sum().backward()
43 | log_gpu_memory_usage('After backward')
44 |
45 | groundtruth = logprobs_from_logits_naive(logits.float(), labels)
46 |
47 | torch.testing.assert_close(output, groundtruth)
48 |
--------------------------------------------------------------------------------
/verl/third_party/vllm/vllm_v_0_6_3/hf_weight_loader.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | # Copyright 2023 The vLLM team.
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/model_loader
15 |
16 | from typing import Dict
17 |
18 | import torch.nn as nn
19 | from vllm.model_executor.model_loader.utils import set_default_torch_dtype
20 |
21 |
22 | def update_hf_weight_loader():
23 | print("no hf weight loader need to be updated")
24 | return
25 |
26 |
27 | def load_hf_weights(actor_weights: Dict, vllm_model: nn.Module):
28 | assert isinstance(actor_weights, Dict)
29 | with set_default_torch_dtype(next(vllm_model.parameters()).dtype): # TODO
30 | if vllm_model.config.tie_word_embeddings and "lm_head.weight" in actor_weights.keys():
31 | del actor_weights["lm_head.weight"]
32 | vllm_model.load_weights(actor_weights.items())
33 | for _, module in vllm_model.named_modules():
34 | quant_method = getattr(module, "quant_method", None)
35 | if quant_method is not None:
36 | quant_method.process_weights_after_loading(module)
37 | # FIXME: Remove this after Mixtral is updated
38 | # to use quant_method.
39 | if hasattr(module, "process_weights_after_loading"):
40 | module.process_weights_after_loading()
41 | vllm_model = vllm_model.cuda()
42 |
--------------------------------------------------------------------------------
/examples/ppo_trainer/run_deepseek_full_hh_rlhf.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | train_files=$HOME/data/full_hh_rlhf/rl/train.parquet
4 | test_files=$HOME/data/full_hh_rlhf/rl/train.parquet # no use
5 |
6 | python3 -m verl.trainer.main_ppo --config-path=./config --config-name='ppo_megatron_trainer'\
7 | data.train_files="$train_files" \
8 | data.val_files="$test_files" \
9 | data.train_batch_size=512 \
10 | data.val_batch_size=128 \
11 | data.max_prompt_length=128 \
12 | data.max_response_length=128 \
13 | actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \
14 | actor_rollout_ref.actor.optim.lr=1e-6 \
15 | actor_rollout_ref.actor.ppo_mini_batch_size=128 \
16 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
17 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \
18 | actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
19 | actor_rollout_ref.rollout.name=vllm \
20 | actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
21 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
22 | actor_rollout_ref.ref.param_offload=False \
23 | critic.optim.lr=1e-5 \
24 | critic.model.path=deepseek-ai/deepseek-llm-7b-chat \
25 | critic.model.enable_gradient_checkpointing=False \
26 | critic.ppo_micro_batch_size_per_gpu=4 \
27 | reward_model.enable=True \
28 | reward_model.megatron.tensor_model_parallel_size=4 \
29 | reward_model.model.path=deepseek-ai/deepseek-llm-7b-chat \
30 | reward_model.micro_batch_size_per_gpu=4 \
31 | reward_model.param_offload=False \
32 | algorithm.kl_ctrl.kl_coef=0.001 \
33 | trainer.critic_warmup=0 \
34 | trainer.logger=['console','wandb'] \
35 | trainer.project_name='verl_megatron_full_hh_rlhf_examples' \
36 | trainer.experiment_name='deepseek_llm_7b_model_rm' \
37 | trainer.n_gpus_per_node=8 \
38 | trainer.nnodes=1 \
39 | trainer.save_freq=-1 \
40 | trainer.test_freq=5 \
41 | trainer.total_epochs=100 $@
42 |
--------------------------------------------------------------------------------
/tests/e2e/run_deepseek_megatron.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | # the config file used: verl/trainer/main_ppo/config/ppo_megatron_trainer.yaml
4 |
5 | huggingface-cli download deepseek-ai/deepseek-coder-1.3b-instruct
6 |
7 | python3 -m verl.trainer.main_ppo --config-path=config \
8 | --config-name='ppo_megatron_trainer.yaml'\
9 | data.train_files=$HOME/data/gsm8k/train.parquet \
10 | data.val_files=$HOME/data/gsm8k/test.parquet \
11 | data.train_batch_size=1024 \
12 | data.val_batch_size=1312 \
13 | data.max_prompt_length=512 \
14 | data.max_response_length=512 \
15 | actor_rollout_ref.model.path=deepseek-ai/deepseek-coder-1.3b-instruct \
16 | actor_rollout_ref.actor.optim.lr=2e-6 \
17 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
18 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
19 | actor_rollout_ref.actor.megatron.tensor_model_parallel_size=2 \
20 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \
21 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
22 | actor_rollout_ref.rollout.name=vllm \
23 | actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
24 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
25 | actor_rollout_ref.ref.megatron.tensor_model_parallel_size=2 \
26 | critic.optim.lr=2e-5 \
27 | critic.model.path=deepseek-ai/deepseek-coder-1.3b-instruct \
28 | critic.model.enable_gradient_checkpointing=False \
29 | critic.ppo_micro_batch_size_per_gpu=4 \
30 | critic.megatron.tensor_model_parallel_size=2 \
31 | algorithm.kl_ctrl.kl_coef=0.001 \
32 | trainer.critic_warmup=0 \
33 | trainer.logger=['console'] \
34 | trainer.project_name='verl_megatron_gsm8k_examples' \
35 | trainer.experiment_name='deepseek_llm_1b3_function_rm' \
36 | trainer.n_gpus_per_node=8 \
37 | trainer.nnodes=1 \
38 | trainer.save_freq=-1 \
39 | trainer.test_freq=1 \
40 | trainer.total_epochs=15 \
41 | trainer.total_training_steps=3 $@
42 |
--------------------------------------------------------------------------------
/verl/third_party/vllm/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from importlib.metadata import version, PackageNotFoundError
16 |
17 |
18 | def get_version(pkg):
19 | try:
20 | return version(pkg)
21 | except PackageNotFoundError:
22 | return None
23 |
24 |
25 | package_name = 'vllm'
26 | package_version = get_version(package_name)
27 |
28 | if package_version == '0.3.1':
29 | vllm_version = '0.3.1'
30 | from .vllm_v_0_3_1.llm import LLM
31 | from .vllm_v_0_3_1.llm import LLMEngine
32 | from .vllm_v_0_3_1 import parallel_state
33 | elif package_version == '0.4.2':
34 | vllm_version = '0.4.2'
35 | from .vllm_v_0_4_2.llm import LLM
36 | from .vllm_v_0_4_2.llm import LLMEngine
37 | from .vllm_v_0_4_2 import parallel_state
38 | elif package_version == '0.5.4':
39 | vllm_version = '0.5.4'
40 | from .vllm_v_0_5_4.llm import LLM
41 | from .vllm_v_0_5_4.llm import LLMEngine
42 | from .vllm_v_0_5_4 import parallel_state
43 | elif package_version == '0.6.3':
44 | vllm_version = '0.6.3'
45 | from .vllm_v_0_6_3.llm import LLM
46 | from .vllm_v_0_6_3.llm import LLMEngine
47 | from .vllm_v_0_6_3 import parallel_state
48 | else:
49 | raise ValueError(
50 | f'vllm version {package_version} not supported. Currently supported versions are 0.3.1, 0.4.2, 0.5.4 and 0.6.3.'
51 | )
52 |
--------------------------------------------------------------------------------
/examples/ppo_trainer/run_gemma.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | python3 -m verl.trainer.main_ppo \
4 | data.train_files=$HOME/data/gsm8k/train.parquet \
5 | data.val_files=$HOME/data/gsm8k/test.parquet \
6 | data.train_batch_size=512 \
7 | data.val_batch_size=1312 \
8 | data.max_prompt_length=1024 \
9 | data.max_response_length=512 \
10 | actor_rollout_ref.model.path=google/gemma-2-2b-it \
11 | actor_rollout_ref.actor.optim.lr=1e-6 \
12 | actor_rollout_ref.model.use_remove_padding=True \
13 | actor_rollout_ref.actor.ppo_mini_batch_size=128 \
14 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
15 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
16 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
17 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
18 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=4 \
19 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
20 | actor_rollout_ref.rollout.name=vllm \
21 | actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
22 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=4 \
23 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
24 | critic.optim.lr=1e-5 \
25 | critic.model.use_remove_padding=True \
26 | critic.model.path=google/gemma-2-2b-it \
27 | critic.model.enable_gradient_checkpointing=False \
28 | critic.ppo_micro_batch_size_per_gpu=4 \
29 | critic.model.fsdp_config.param_offload=False \
30 | critic.model.fsdp_config.grad_offload=False \
31 | critic.model.fsdp_config.optimizer_offload=False \
32 | algorithm.kl_ctrl.kl_coef=0.001 \
33 | trainer.critic_warmup=0 \
34 | trainer.logger=['console','wandb'] \
35 | trainer.project_name='verl_example' \
36 | trainer.experiment_name='gemma2b_function_rm' \
37 | trainer.n_gpus_per_node=2 \
38 | trainer.nnodes=1 \
39 | trainer.save_freq=-1 \
40 | trainer.test_freq=10 \
41 | trainer.total_epochs=15 $@
42 |
--------------------------------------------------------------------------------
/docker/Dockerfile.vemlp.vllm.te:
--------------------------------------------------------------------------------
1 | # docker buildx build --platform linux/x86_64 -t "verlai/verl:$TAG" -f docker/$FILE .
2 |
3 | # the one in docker.io is an alias for the one veturbo
4 | # FROM vemlp-cn-beijing.cr.volces.com/veturbo/pytorch:2.4-cu124
5 | FROM docker.io/haibinlin/verl:v0.0.5-th2.4.0-cu124-base
6 |
7 | # only config pip index with https://pypi.tuna.tsinghua.edu.cn/simple if needed
8 | # unset for now
9 | RUN pip3 config unset global.index-url
10 |
11 | # transformers 4.47.0 contains the following bug:
12 | # AttributeError: 'Gemma2Attention' object has no attribute '_flash_attn_uses_top_left_mask'
13 | RUN pip3 install --no-cache-dir \
14 | torch==2.4.0 \
15 | accelerate \
16 | codetiming \
17 | dill \
18 | hydra-core \
19 | numpy \
20 | pybind11 \
21 | tensordict \
22 | "transformers <= 4.46.0"
23 |
24 | RUN pip3 install --no-cache-dir flash-attn==2.7.0.post2 --no-build-isolation
25 |
26 | # vllm depends on ray, and veRL does not support ray > 2.37
27 | RUN pip3 install --no-cache-dir vllm==0.6.3 ray==2.10
28 |
29 | # install apex
30 | RUN MAX_JOBS=4 pip3 install -v --disable-pip-version-check --no-cache-dir --no-build-isolation \
31 | --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" \
32 | git+https://github.com/NVIDIA/apex
33 |
34 | # install Transformer Engine
35 | # - flash-attn pinned to 2.5.3 by TransformerEngine, switch to eric-haibin-lin/TransformerEngine.git@v1.7.0 to relax version req
36 | # - install with: MAX_JOBS=1 NINJA_FLAGS="-j1" TE_BUILD_WITH_NINJA=0 to avoid OOM
37 | # - cudnn is required by TransformerEngine
38 | # RUN CUDNN_PATH=/opt/conda/lib/python3.11/site-packages/nvidia/cudnn \
39 | # pip3 install git+https://github.com/eric-haibin-lin/TransformerEngine.git@v1.7.0
40 | RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install flash-attn==2.5.3 --no-cache-dir --no-build-isolation
41 | RUN MAX_JOBS=1 NINJA_FLAGS="-j1" pip3 install git+https://github.com/NVIDIA/TransformerEngine.git@v1.7
42 |
--------------------------------------------------------------------------------
/tests/e2e/arithmetic_sequence/data/create_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from tests.e2e.envs.digit_completion import DigitCompletion, generate_ground_truth_response
16 | from torch.utils import data
17 | import os
18 |
19 | if __name__ == '__main__':
20 | simple_task = DigitCompletion(max_number=9, max_diff=9, max_num_in_response=9)
21 | all_prompts = simple_task.get_all_prompts()
22 |
23 | # 21 * 6 * 4
24 | train_data, test_data = data.random_split(all_prompts, lengths=[0.8, 0.2])
25 | train_data = list(train_data)
26 | test_data = list(test_data)
27 |
28 | train_data = [[{'role': 'user', 'content': str(item)}] \
29 | for item in train_data]
30 | test_data = [[{'role': 'user', 'content': str(item)}] \
31 | for item in test_data]
32 |
33 | print(f'Size of train: {len(train_data)}, size of test: {len(test_data)}')
34 |
35 | train_data = {'prompt': train_data}
36 | test_data = {'prompt': test_data}
37 |
38 | model_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)))
39 |
40 | import pandas as pd
41 |
42 | train_data_frame = pd.DataFrame(train_data)
43 | test_data_frame = pd.DataFrame(test_data)
44 |
45 | train_data_frame.to_parquet(os.path.join(model_folder, 'train.parquet'))
46 | test_data_frame.to_parquet(os.path.join(model_folder, 'test.parquet'))
47 |
--------------------------------------------------------------------------------
/examples/remax_trainer/run_qwen2.5-3b_seq_balance.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | export HF_DATASETS_OFFLINE=1
4 | export TRANSFORMERS_OFFLINE=1
5 |
6 | export VLLM_ATTENTION_BACKEND=XFORMERS
7 |
8 | python3 -m verl.trainer.main_ppo \
9 | algorithm.adv_estimator=remax \
10 | data.train_files=$HOME/data/gsm8k/train.parquet \
11 | data.val_files=$HOME/data/gsm8k/train.parquet \
12 | data.train_batch_size=512 \
13 | data.val_batch_size=1312 \
14 | data.max_prompt_length=512 \
15 | data.max_response_length=1024 \
16 | actor_rollout_ref.model.path=Qwen/Qwen2.5-3B-Instruct \
17 | actor_rollout_ref.actor.optim.lr=1e-6 \
18 | actor_rollout_ref.model.use_remove_padding=True \
19 | actor_rollout_ref.actor.ppo_mini_batch_size=128 \
20 | actor_rollout_ref.actor.use_dynamic_bsz=True \
21 | actor_rollout_ref.actor.ppo_max_token_len_per_gpu=30000 \
22 | actor_rollout_ref.actor.use_kl_loss=True \
23 | actor_rollout_ref.actor.kl_loss_coef=0.001 \
24 | actor_rollout_ref.actor.kl_loss_type=low_var_kl \
25 | actor_rollout_ref.model.enable_gradient_checkpointing=True \
26 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
27 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
28 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
29 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
30 | actor_rollout_ref.rollout.name=vllm \
31 | actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \
32 | actor_rollout_ref.rollout.n=4 \
33 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
34 | algorithm.kl_ctrl.kl_coef=0.001 \
35 | trainer.critic_warmup=0 \
36 | trainer.logger=['console','wandb'] \
37 | trainer.project_name='verl_remax_example_gsm8k' \
38 | trainer.experiment_name='qwen2.5_3b_function_rm_kl1e-3' \
39 | +trainer.val_before_train=False \
40 | trainer.n_gpus_per_node=8 \
41 | trainer.nnodes=1 \
42 | trainer.save_freq=-1 \
43 | trainer.test_freq=5 \
44 | trainer.total_epochs=5 $@
--------------------------------------------------------------------------------
/examples/remax_trainer/run_qwen2.5-7b_seq_balance.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | export HF_DATASETS_OFFLINE=1
4 | export TRANSFORMERS_OFFLINE=1
5 |
6 | export VLLM_ATTENTION_BACKEND=XFORMERS
7 |
8 | python3 -m verl.trainer.main_ppo \
9 | algorithm.adv_estimator=remax \
10 | data.train_files=$HOME/data/gsm8k/train.parquet \
11 | data.val_files=$HOME/data/gsm8k/train.parquet \
12 | data.train_batch_size=1024 \
13 | data.val_batch_size=1312 \
14 | data.max_prompt_length=512 \
15 | data.max_response_length=1024 \
16 | actor_rollout_ref.model.path=Qwen/Qwen2.5-7B-Instruct \
17 | actor_rollout_ref.actor.optim.lr=1e-6 \
18 | actor_rollout_ref.model.use_remove_padding=True \
19 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
20 | actor_rollout_ref.actor.use_dynamic_bsz=True \
21 | actor_rollout_ref.actor.ppo_max_token_len_per_gpu=24000 \
22 | actor_rollout_ref.actor.use_kl_loss=True \
23 | actor_rollout_ref.actor.kl_loss_coef=0.001 \
24 | actor_rollout_ref.actor.kl_loss_type=low_var_kl \
25 | actor_rollout_ref.model.enable_gradient_checkpointing=True \
26 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
27 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
28 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
29 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
30 | actor_rollout_ref.rollout.name=vllm \
31 | actor_rollout_ref.rollout.gpu_memory_utilization=0.8 \
32 | actor_rollout_ref.rollout.n=4 \
33 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
34 | algorithm.kl_ctrl.kl_coef=0.001 \
35 | trainer.critic_warmup=0 \
36 | trainer.logger=['console','wandb'] \
37 | trainer.project_name='verl_remax_example_gsm8k' \
38 | trainer.experiment_name='qwen2.5_7b_function_rm_kl1e-3' \
39 | +trainer.val_before_train=False \
40 | trainer.n_gpus_per_node=8 \
41 | trainer.nnodes=1 \
42 | trainer.save_freq=-1 \
43 | trainer.test_freq=5 \
44 | trainer.total_epochs=10 $@
--------------------------------------------------------------------------------
/tests/e2e/run_qwen_gsm8k_function_rm.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | export VLLM_ATTENTION_BACKEND=XFORMERS
4 |
5 | python3 -m verl.trainer.main_ppo \
6 | data.train_files=$HOME/data/gsm8k/train.parquet \
7 | data.val_files=$HOME/data/gsm8k/test.parquet \
8 | data.train_batch_size=1024 \
9 | data.val_batch_size=1312 \
10 | data.max_prompt_length=512 \
11 | data.max_response_length=512 \
12 | actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
13 | actor_rollout_ref.actor.optim.lr=1e-6 \
14 | actor_rollout_ref.model.use_remove_padding=True \
15 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
16 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
17 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
18 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
19 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
20 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
21 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
22 | actor_rollout_ref.rollout.name=vllm \
23 | actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
24 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
25 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
26 | critic.optim.lr=1e-5 \
27 | critic.model.use_remove_padding=True \
28 | critic.model.path=Qwen/Qwen2.5-0.5B \
29 | critic.model.enable_gradient_checkpointing=False \
30 | critic.ppo_micro_batch_size_per_gpu=4 \
31 | critic.model.fsdp_config.param_offload=False \
32 | critic.model.fsdp_config.grad_offload=False \
33 | critic.model.fsdp_config.optimizer_offload=False \
34 | algorithm.kl_ctrl.kl_coef=0.001 \
35 | trainer.critic_warmup=0 \
36 | trainer.logger=['console'] \
37 | trainer.project_name='verl_example_gsm8k' \
38 | trainer.experiment_name='qwen_e2e_ci_function_rm' \
39 | trainer.n_gpus_per_node=8 \
40 | trainer.nnodes=1 \
41 | trainer.save_freq=1 \
42 | trainer.total_training_steps=1 $@
43 |
--------------------------------------------------------------------------------
/verl/utils/megatron/sequence_parallel.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | import torch
17 | import torch.nn.functional as F
18 | from megatron.core import parallel_state as mpu
19 |
20 |
21 | def mark_parameter_as_sequence_parallel(parameter):
22 | setattr(parameter, 'sequence_parallel', True)
23 |
24 |
25 | def is_sequence_parallel_param(param):
26 | return hasattr(param, 'sequence_parallel') and param.sequence_parallel
27 |
28 |
29 | def pad_to_sequence_parallel(unpad_tokens: torch.Tensor):
30 | """pad the tokens such that the total length is a multiple of sp world size
31 |
32 | Args:
33 | unpad_tokens: (total_nnz, ...). Tokens after removing padding
34 |
35 | Returns:
36 |
37 | """
38 | total_nnz = unpad_tokens.shape[0]
39 | sp_world_size = mpu.get_tensor_model_parallel_world_size()
40 |
41 | if total_nnz % sp_world_size == 0:
42 | pad_size = 0
43 | else:
44 | pad_size = sp_world_size - total_nnz % sp_world_size
45 |
46 | if pad_size > 0:
47 | if unpad_tokens.ndim == 1:
48 | unpad_tokens = F.pad(unpad_tokens, (0, pad_size))
49 | elif unpad_tokens.ndim == 2:
50 | unpad_tokens = F.pad(unpad_tokens, (0, 0, 0, pad_size))
51 | else:
52 | raise NotImplementedError(f'Padding dim {unpad_tokens.ndim()} is not supported')
53 |
54 | return unpad_tokens
55 |
--------------------------------------------------------------------------------
/tests/e2e/run_qwen_gsm8k_function_rm_no_rmpad.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | export VLLM_ATTENTION_BACKEND=XFORMERS
4 |
5 | python3 -m verl.trainer.main_ppo \
6 | data.train_files=$HOME/data/gsm8k/train.parquet \
7 | data.val_files=$HOME/data/gsm8k/test.parquet \
8 | data.train_batch_size=1024 \
9 | data.val_batch_size=1312 \
10 | data.max_prompt_length=512 \
11 | data.max_response_length=512 \
12 | actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
13 | actor_rollout_ref.actor.optim.lr=1e-6 \
14 | actor_rollout_ref.model.use_remove_padding=False \
15 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
16 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
17 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
18 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
19 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
20 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
21 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
22 | actor_rollout_ref.rollout.name=vllm \
23 | actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
24 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
25 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
26 | critic.optim.lr=1e-5 \
27 | critic.model.use_remove_padding=False \
28 | critic.model.path=Qwen/Qwen2.5-0.5B \
29 | critic.model.enable_gradient_checkpointing=False \
30 | critic.ppo_micro_batch_size_per_gpu=4 \
31 | critic.model.fsdp_config.param_offload=False \
32 | critic.model.fsdp_config.grad_offload=False \
33 | critic.model.fsdp_config.optimizer_offload=False \
34 | algorithm.kl_ctrl.kl_coef=0.001 \
35 | trainer.critic_warmup=0 \
36 | trainer.logger=['console'] \
37 | +trainer.val_before_train=False \
38 | trainer.project_name='verl_example_gsm8k' \
39 | trainer.experiment_name='qwen_e2e_ci_function_rm' \
40 | trainer.n_gpus_per_node=8 \
41 | trainer.nnodes=1 \
42 | trainer.save_freq=-1 \
43 | trainer.total_training_steps=1 $@
44 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/evaluation/compute_code_execution_metrics.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from concurrent.futures import ProcessPoolExecutor
3 | import tqdm
4 |
5 | from verl.utils.reward_score.livecodebench.lcb_runner.evaluation.utils_execute import BASE_IMPORTS, check_correctness
6 |
7 | def evaluate_score(args) -> list[bool]:
8 | gs, (c, i, o) = args
9 |
10 | execution_results = []
11 | for g in gs:
12 | if i in g:
13 | pass
14 | else:
15 | code_to_execute = f"{BASE_IMPORTS}\n{c}\nassert {o} == {g}"
16 | execution_results.append(check_correctness(code_to_execute, 3))
17 | if len(execution_results) == 0:
18 | execution_results = [False] * len(gs)
19 | return execution_results
20 |
21 | def pass_at_k(n, c, k):
22 | if n - c < k: return 1.0
23 | return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
24 |
25 | def code_execution_metrics(
26 | samples,
27 | generations,
28 | ):
29 | # execute the code
30 | references = [(doc["code"], doc["input"], doc["output"]) for doc in samples]
31 | with ProcessPoolExecutor() as executor:
32 | args_list = zip(generations, references)
33 | results = executor.map(evaluate_score, args_list)
34 | all_results = list(results)
35 |
36 | # serial version
37 | # all_results = []
38 | # for i in range(len(generations)):
39 | # generation = generations[i]
40 | # result = evaluate_score([generation, references[i]])
41 | # all_results.append(result)
42 |
43 | # compute pass@1
44 | pass_at_1s = []
45 | for execution_result in all_results:
46 | c, n = execution_result.count(True), len(execution_result)
47 | pass_at_1s.append(pass_at_k(n, c, 1))
48 | metrics = {"pass@1": sum(pass_at_1s) / len(pass_at_1s) * 100}
49 |
50 | results = {}
51 | for i, r in enumerate(all_results):
52 | r_new = []
53 | for _r in r:
54 | r_new.append([_r])
55 | results[i] = r_new
56 | return [metrics, results]
57 |
--------------------------------------------------------------------------------
/tests/ray/test_ray_local_envs.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | e2e test verl.single_controller.ray
16 | """
17 | import os
18 | import ray
19 |
20 | from verl.single_controller.ray.base import RayResourcePool, RayClassWithInitArgs, RayWorkerGroup
21 | from verl.single_controller.base.worker import Worker
22 | from verl.single_controller.base.decorator import register, Dispatch, collect_all_to_all, Execute
23 |
24 |
25 | @ray.remote
26 | class TestActor(Worker):
27 |
28 | def __init__(self) -> None:
29 | super().__init__()
30 |
31 | def getenv(self, key):
32 | val = os.getenv(key, f"{key} not set")
33 | return val
34 |
35 |
36 | def test_basics():
37 | ray.init()
38 |
39 | # create 4 workers, each hold a GPU
40 | resource_pool = RayResourcePool([4], use_gpu=True)
41 | class_with_args = RayClassWithInitArgs(cls=TestActor)
42 |
43 | worker_group = RayWorkerGroup(resource_pool=resource_pool,
44 | ray_cls_with_init=class_with_args,
45 | name_prefix="worker_group_basic")
46 |
47 | output = worker_group.execute_all_sync("getenv", key="RAY_LOCAL_WORLD_SIZE")
48 | assert output == ["4", "4", "4", "4"]
49 |
50 | output = worker_group.execute_all_sync("getenv", key="RAY_LOCAL_RANK")
51 | assert set(output) == set(["0", "1", "2", "3"])
52 |
53 | ray.shutdown()
54 |
55 |
56 | if __name__ == '__main__':
57 | test_basics()
58 |
--------------------------------------------------------------------------------
/verl/models/llama/megatron/layers/parallel_rmsnorm.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import numbers
16 | import torch
17 | from megatron.core import ModelParallelConfig
18 | from torch import nn
19 | from transformers import LlamaConfig
20 |
21 | from apex.normalization.fused_layer_norm import fused_rms_norm_affine
22 | from verl.utils.megatron import sequence_parallel as sp_utils
23 |
24 |
25 | class ParallelLlamaRMSNorm(nn.Module):
26 |
27 | def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
28 | """
29 | LlamaRMSNorm is equivalent to T5LayerNorm
30 | """
31 | super().__init__()
32 | if isinstance(config.hidden_size, numbers.Integral):
33 | normalized_shape = (config.hidden_size,)
34 | self.normalized_shape = torch.Size(normalized_shape)
35 | self.weight = nn.Parameter(torch.ones(self.normalized_shape))
36 | self.variance_epsilon = config.rms_norm_eps
37 |
38 | if megatron_config.sequence_parallel:
39 | sp_utils.mark_parameter_as_sequence_parallel(self.weight)
40 |
41 | def forward(self, hidden_states):
42 | return fused_rms_norm_affine(input=hidden_states,
43 | weight=self.weight,
44 | normalized_shape=self.normalized_shape,
45 | eps=self.variance_epsilon,
46 | memory_efficient=True)
--------------------------------------------------------------------------------
/examples/ppo_trainer/run_deepseek7b_llm.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | python3 -m verl.trainer.main_ppo \
4 | data.train_files=$HOME/data/gsm8k/train.parquet \
5 | data.val_files=$HOME/data/gsm8k/test.parquet \
6 | data.train_batch_size=1024 \
7 | data.val_batch_size=1312 \
8 | data.max_prompt_length=512 \
9 | data.max_response_length=512 \
10 | actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \
11 | actor_rollout_ref.actor.optim.lr=1e-6 \
12 | actor_rollout_ref.model.use_remove_padding=True \
13 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
14 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \
15 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
16 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
17 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
18 | actor_rollout_ref.model.enable_gradient_checkpointing=True \
19 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \
20 | actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
21 | actor_rollout_ref.rollout.name=vllm \
22 | actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
23 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \
24 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
25 | critic.optim.lr=1e-5 \
26 | critic.model.use_remove_padding=True \
27 | critic.model.path=deepseek-ai/deepseek-llm-7b-chat \
28 | critic.model.enable_gradient_checkpointing=True \
29 | critic.ppo_micro_batch_size_per_gpu=32 \
30 | critic.model.fsdp_config.param_offload=False \
31 | critic.model.fsdp_config.grad_offload=False \
32 | critic.model.fsdp_config.optimizer_offload=False \
33 | algorithm.kl_ctrl.kl_coef=0.001 \
34 | trainer.critic_warmup=0 \
35 | trainer.logger=['console','wandb'] \
36 | trainer.project_name='verl_example_gsm8k' \
37 | trainer.experiment_name='deepseek_llm_7b_function_rm' \
38 | trainer.n_gpus_per_node=8 \
39 | trainer.nnodes=1 \
40 | trainer.save_freq=-1 \
41 | trainer.test_freq=1 \
42 | trainer.total_epochs=15 $@
43 |
--------------------------------------------------------------------------------
/.github/workflows/e2e_sft.yml:
--------------------------------------------------------------------------------
1 | name: e2e_sft
2 |
3 | on:
4 | # Trigger the workflow on push or pull request,
5 | # but only for the main branch
6 | push:
7 | branches:
8 | - main
9 | paths:
10 | - "**/*.py"
11 | - .github/workflows/e2e_sft.yml
12 | pull_request:
13 | branches:
14 | - main
15 | paths:
16 | - "**/*.py"
17 | - .github/workflows/e2e_sft.yml
18 | - "tests/e2e/*.sh"
19 |
20 |
21 |
22 | jobs:
23 | e2e_sft:
24 | runs-on: [self-hosted, l20-1]
25 | env:
26 | HTTP_PROXY: ${{ secrets.PROXY_HTTP }}
27 | HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }}
28 | NO_PROXY: "localhost,127.0.0.1"
29 | HF_HUB_ENABLE_HF_TRANSFER: 1
30 | container:
31 | image: verlai/verl:vemlp-th2.4.0-cu124-vllm0.6.3-ray2.10-te1.7-v0.0.3
32 | options: --gpus all --shm-size=10g
33 | steps:
34 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
35 | with:
36 | fetch-depth: 0
37 | - name: Install the current repository
38 | run: |
39 | pip3 install hf_transfer
40 | pip3 install -e .[test]
41 | - name: Prepare gsm8k dataset
42 | run: |
43 | ray stop --force
44 | python3 examples/data_preprocess/gsm8k.py
45 | - name: Running gsm8k e2e training tests on 8 L20 GPUs with rmpad using function rm
46 | run: |
47 | ray stop --force
48 | bash tests/sft/run_sft.sh
49 | - name: Running gsm8k e2e training tests on 8 L20 GPUs with sequence parallism
50 | run: |
51 | ray stop --force
52 | bash examples/sft/gsm8k/run_qwen_05_sp2.sh 8 $HOME/ckpts/
53 | - name: Check loss difference between sequence parallel vs. default implementation
54 | run: |
55 | ray stop --force
56 | bash tests/sft/run_sft_sp_loss_match.sh
57 | - name: Running gsm8k e2e training tests on 8 L20 GPUs with sequence parallism and liger
58 | run: |
59 | ray stop --force
60 | bash tests/sft/run_sft_qwen05_sp2_liger.sh 8 $HOME/ckpts/
61 |
--------------------------------------------------------------------------------
/verl/third_party/vllm/vllm_v_0_5_4/hf_weight_loader.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | # Copyright 2023 The vLLM team.
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/models
15 |
16 | from typing import Dict, Union, Optional, Iterable, Tuple
17 |
18 | import torch
19 | import torch.nn as nn
20 |
21 | from vllm.model_executor.model_loader.utils import set_default_torch_dtype
22 | from vllm.model_executor.model_loader.weight_utils import default_weight_loader
23 |
24 |
25 | def update_hf_weight_loader():
26 | print('no hf weight loader need to be updated')
27 | return
28 |
29 |
30 | def load_hf_weights(actor_weights: Dict, vllm_model: nn.Module):
31 | assert isinstance(actor_weights, Dict)
32 | with set_default_torch_dtype(next(vllm_model.parameters()).dtype): # TODO
33 | if vllm_model.config.tie_word_embeddings and "lm_head.weight" in actor_weights.keys():
34 | del actor_weights["lm_head.weight"]
35 | vllm_model.load_weights(actor_weights.items())
36 | for _, module in vllm_model.named_modules():
37 | quant_method = getattr(module, "quant_method", None)
38 | if quant_method is not None:
39 | quant_method.process_weights_after_loading(module)
40 | # FIXME: Remove this after Mixtral is updated
41 | # to use quant_method.
42 | if hasattr(module, "process_weights_after_loading"):
43 | module.process_weights_after_loading()
44 | vllm_model = vllm_model.cuda()
45 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # from . import gsm8k, math, prime_math, prime_code
15 |
16 |
17 | def _default_compute_score(data_source, solution_str, ground_truth):
18 | # if data_source == 'openai/gsm8k':
19 | # from . import gsm8k
20 | # res = gsm8k.compute_score(solution_str, ground_truth)
21 | # elif data_source in ['lighteval/MATH', 'DigitalLearningGmbH/MATH-lighteval']:
22 | # from . import math
23 | # res = math.compute_score(solution_str, ground_truth)
24 | # elif data_source in [
25 | # 'numina_aops_forum', 'numina_synthetic_math', 'numina_amc_aime', 'numina_synthetic_amc', 'numina_cn_k12',
26 | # 'numina_olympiads'
27 | # ]:
28 | # from . import prime_math
29 | # res = prime_math.compute_score(solution_str, ground_truth)
30 | # elif data_source in ['codecontests', 'apps', 'codeforces', 'taco']:
31 | # from . import prime_code
32 | # res = prime_code.compute_score(solution_str, ground_truth, continuous=True)
33 | # else:
34 | # raise NotImplementedError
35 | # from . import math
36 | # res = math.compute_score(solution_str, ground_truth)
37 |
38 | from verl.utils.reward_score.deepscaler_math.math_reward import deepscaler_reward_fn
39 | res = deepscaler_reward_fn(solution_str, ground_truth)
40 |
41 | if isinstance(res, (int, float, bool)):
42 | return float(res)
43 | else:
44 | return float(res[0])
45 |
--------------------------------------------------------------------------------
/tests/ray/check_worker_alive/main.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import time
16 | import sys
17 | import os
18 |
19 | import ray
20 |
21 | from verl.single_controller.ray.base import RayResourcePool, RayClassWithInitArgs, RayWorkerGroup
22 | from verl.single_controller.base.worker import Worker
23 | from verl.single_controller.base.decorator import register, Dispatch
24 |
25 |
26 | @ray.remote
27 | class TestActor(Worker):
28 |
29 | def __init__(self) -> None:
30 | super().__init__()
31 |
32 | @register(dispatch_mode=Dispatch.ONE_TO_ALL, blocking=False)
33 | def foo(self, wait_time):
34 | time.sleep(wait_time)
35 | sys.exit(1)
36 |
37 |
38 | if __name__ == "__main__":
39 | wait_time = int(os.getenv("WAIT_TIME", "10"))
40 |
41 | ray.init()
42 |
43 | # test single-node-no-partition
44 | print(f"test single-node-no-partition")
45 | resource_pool = RayResourcePool([2], use_gpu=True)
46 | class_with_args = RayClassWithInitArgs(cls=TestActor)
47 |
48 | print("create worker group")
49 | wg = RayWorkerGroup(resource_pool, class_with_args, name_prefix="test")
50 |
51 | wg.start_worker_aliveness_check(1)
52 | time.sleep(1)
53 |
54 | print(time.time(), "start foo")
55 |
56 | _ = wg.foo(wait_time)
57 | print("foo started")
58 |
59 | print(time.time(),
60 | f"wait 6x wait time {wait_time*6} to let signal returned to process but still not exceed process wait time")
61 | time.sleep(wait_time * 6)
62 |
63 | ray.shutdown()
64 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | **/*.pt
2 | **/checkpoints
3 | **/wget-log
4 | **/_build/
5 | **/*.ckpt
6 | **/outputs
7 | **/*.tar.gz
8 | **/playground
9 | **/wandb
10 |
11 | # Byte-compiled / optimized / DLL files
12 | __pycache__/
13 | *.py[cod]
14 | *$py.class
15 | dataset/*
16 | tensorflow/my_graph/*
17 | .idea/
18 | # C extensions
19 | *.so
20 |
21 | # Distribution / packaging
22 | .Python
23 | env/
24 | build/
25 | develop-eggs/
26 | dist/
27 | downloads/
28 | eggs/
29 | .eggs/
30 | lib/
31 | lib64/
32 | parts/
33 | sdist/
34 | var/
35 | *.egg-info/
36 | .installed.cfg
37 | *.egg
38 |
39 | # PyInstaller
40 | # Usually these files are written by a python script from a template
41 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
42 | *.manifest
43 | *.spec
44 |
45 | # Installer logs
46 | pip-log.txt
47 | pip-delete-this-directory.txt
48 |
49 | # Unit test / coverage reports
50 | htmlcov/
51 | .tox/
52 | .coverage
53 | .coverage.*
54 | .cache
55 | nosetests.xml
56 | coverage.xml
57 | *,cover
58 | .hypothesis/
59 |
60 | # Translations
61 | *.mo
62 | *.pot
63 |
64 | # Django stuff:
65 | *.log
66 | local_settings.py
67 |
68 | # Flask stuff:
69 | instance/
70 | .webassets-cache
71 |
72 | # Scrapy stuff:
73 | .scrapy
74 |
75 | # Sphinx documentation
76 | docs/_build/
77 |
78 | # PyBuilder
79 | target/
80 |
81 | # IPython Notebook
82 | .ipynb_checkpoints
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # celery beat schedule file
88 | celerybeat-schedule
89 |
90 | # dotenv
91 | .env
92 |
93 | # virtualenv
94 | venv/
95 | ENV/
96 |
97 | # Spyder project settings
98 | .spyderproject
99 |
100 | # Rope project settings
101 | .ropeproject
102 |
103 | # vscode
104 | .vscode
105 |
106 | # Mac
107 | .DS_Store
108 |
109 | # output logs
110 | tests/e2e/toy_examples/deepspeed/synchronous/output.txt
111 |
112 | # vim
113 | *.swp
114 |
115 | # ckpt
116 | *.lock
117 |
118 | verl_ckpt/
119 | yr_test/
120 | tmp/
121 | *.pkl
122 | *.jsonl
123 | *.json
124 | a.py
125 | b.py
126 | c.py
127 | yr_test.py
128 | or1_data/train/
129 | temp
130 | or1_data/eval/livecodebench
131 | Skywork/
--------------------------------------------------------------------------------
/examples/ppo_trainer/run_deepseek_megatron.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | # prepare pre-trained model ckpt
4 | huggingface-cli download deepseek-ai/deepseek-llm-7b-chat --local-dir $HOME/models/deepseek-llm-7b-chat
5 |
6 | # ``actor_rollout_ref.rollout.tensor_model_parallel_size`` in theory could be different from
7 | # ``**.megatron.tensor_model_parallel_size``
8 |
9 | # the config file used: verl/trainer/main_ppo/config/ppo_megatron_trainer.yaml
10 |
11 | python3 -m verl.trainer.main_ppo --config-path=config \
12 | --config-name='ppo_megatron_trainer.yaml'\
13 | data.train_files=$HOME/data/gsm8k/train.parquet \
14 | data.val_files=$HOME/data/gsm8k/test.parquet \
15 | data.train_batch_size=1024 \
16 | data.val_batch_size=1312 \
17 | data.max_prompt_length=512 \
18 | data.max_response_length=512 \
19 | actor_rollout_ref.model.path=$HOME/models/deepseek-llm-7b-chat \
20 | actor_rollout_ref.actor.optim.lr=2e-6 \
21 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
22 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
23 | actor_rollout_ref.actor.megatron.tensor_model_parallel_size=4 \
24 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=8 \
25 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
26 | actor_rollout_ref.rollout.name=vllm \
27 | actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
28 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
29 | actor_rollout_ref.ref.megatron.tensor_model_parallel_size=4 \
30 | critic.optim.lr=2e-5 \
31 | critic.model.path=$HOME/models/deepseek-llm-7b-chat \
32 | critic.model.enable_gradient_checkpointing=False \
33 | critic.ppo_micro_batch_size_per_gpu=4 \
34 | critic.megatron.tensor_model_parallel_size=4 \
35 | algorithm.kl_ctrl.kl_coef=0.001 \
36 | trainer.critic_warmup=0 \
37 | trainer.logger=['console','wandb'] \
38 | trainer.project_name='verl_megatron_gsm8k_examples' \
39 | trainer.experiment_name='deepseek_llm_7b_function_rm' \
40 | trainer.n_gpus_per_node=8 \
41 | trainer.nnodes=1 \
42 | trainer.save_freq=-1 \
43 | trainer.total_epochs=15 \
44 | +trainer.val_before_train=False $@
45 |
--------------------------------------------------------------------------------
/examples/ppo_trainer/run_deepseek7b_llm_sp2.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | python3 -m verl.trainer.main_ppo \
4 | data.train_files=$HOME/data/gsm8k/train.parquet \
5 | data.val_files=$HOME/data/gsm8k/test.parquet \
6 | data.train_batch_size=1024 \
7 | data.val_batch_size=1312 \
8 | data.max_prompt_length=512 \
9 | data.max_response_length=512 \
10 | actor_rollout_ref.model.path=deepseek-ai/deepseek-llm-7b-chat \
11 | actor_rollout_ref.actor.optim.lr=1e-6 \
12 | actor_rollout_ref.model.use_remove_padding=True \
13 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
14 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=32 \
15 | actor_rollout_ref.actor.ulysses_sequence_parallel_size=2 \
16 | actor_rollout_ref.model.enable_gradient_checkpointing=True \
17 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
18 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
19 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
20 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=64 \
21 | actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
22 | actor_rollout_ref.rollout.name=vllm \
23 | actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \
24 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=64 \
25 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
26 | critic.optim.lr=1e-5 \
27 | critic.ulysses_sequence_parallel_size=2 \
28 | critic.model.use_remove_padding=True \
29 | critic.model.path=deepseek-ai/deepseek-llm-7b-chat \
30 | critic.model.enable_gradient_checkpointing=True \
31 | critic.ppo_micro_batch_size_per_gpu=64 \
32 | critic.model.fsdp_config.param_offload=False \
33 | critic.model.fsdp_config.grad_offload=False \
34 | critic.model.fsdp_config.optimizer_offload=False \
35 | algorithm.kl_ctrl.kl_coef=0.001 \
36 | trainer.critic_warmup=0 \
37 | trainer.logger=['console','wandb'] \
38 | trainer.project_name='verl_example_gsm8k' \
39 | trainer.experiment_name='deepseek_llm_7b_function_rm_sp2' \
40 | trainer.n_gpus_per_node=8 \
41 | trainer.nnodes=1 \
42 | trainer.save_freq=-1 \
43 | trainer.test_freq=5 \
44 | trainer.total_epochs=15 $@
45 |
--------------------------------------------------------------------------------
/tests/verl/utils/dataset/test_rl_dataset.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | import os
15 | import torch
16 | from torch.utils.data import DataLoader
17 | from transformers import AutoTokenizer
18 |
19 |
20 | def get_gsm8k_data():
21 | # prepare test dataset
22 | url = "https://github.com/eric-haibin-lin/verl-data/raw/refs/heads/main/gsm8k/train.parquet"
23 | local_folder = os.path.expanduser('~/verl-data/gsm8k/')
24 | local_path = os.path.join(local_folder, 'train.parquet')
25 | os.makedirs(local_folder, exist_ok=True)
26 | return local_path
27 |
28 |
29 | def test_rl_dataset():
30 | from verl.utils.dataset.rl_dataset import RLHFDataset, collate_fn
31 | from verl.utils import hf_tokenizer
32 | tokenizer = hf_tokenizer('deepseek-ai/deepseek-coder-1.3b-instruct')
33 | local_path = get_gsm8k_data()
34 | dataset = RLHFDataset(parquet_files=local_path, tokenizer=tokenizer, prompt_key='prompt', max_prompt_length=256)
35 |
36 | dataloader = DataLoader(dataset=dataset, batch_size=16, shuffle=True, drop_last=True, collate_fn=collate_fn)
37 |
38 | a = next(iter(dataloader))
39 |
40 | from verl import DataProto
41 |
42 | tensors = {}
43 | non_tensors = {}
44 |
45 | for key, val in a.items():
46 | if isinstance(val, torch.Tensor):
47 | tensors[key] = val
48 | else:
49 | non_tensors[key] = val
50 |
51 | data_proto = DataProto.from_dict(tensors=tensors, non_tensors=non_tensors)
52 |
53 | data = dataset[0]['input_ids']
54 | output = tokenizer.batch_decode([data])[0]
55 | print(f'type: type{output}')
56 | print(f'\n\noutput: {output}')
57 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/benchmarks/code_execution.py:
--------------------------------------------------------------------------------
1 | import json
2 | from enum import Enum
3 | from datetime import datetime
4 | from dataclasses import dataclass
5 |
6 | from datasets import load_dataset
7 |
8 |
9 | @dataclass
10 | class CodeExecutionProblem:
11 | question_id: str
12 | contest_id: str
13 | contest_date: datetime
14 | difficulty: str
15 | function_name: str
16 | code: str
17 | input: str
18 | output: str
19 | id: str
20 | problem_id: str
21 | numsteps: int
22 |
23 | def __post_init__(self):
24 | pass
25 |
26 | def insert_output(self, output_list: list[str], pred_list: list[str]) -> dict:
27 | return {
28 | "question_id": self.question_id,
29 | "contest_id": self.contest_id,
30 | "contest_date": self.contest_date.isoformat(),
31 | "difficulty": self.difficulty,
32 | "function_name": self.function_name,
33 | "code": self.code,
34 | "input": self.input,
35 | "output": self.output,
36 | "id": self.id,
37 | "problem_id": self.problem_id,
38 | "numsteps": self.numsteps,
39 | "output_list": output_list,
40 | "pred_list": pred_list,
41 | }
42 |
43 | def insert_output_evaluation(
44 | self, output_list: list[str], code_list: list[str], graded_list: list[bool]
45 | ) -> dict:
46 | output = self.insert_output(output_list, code_list)
47 | output["graded_list"] = graded_list
48 | output["pass@1"] = graded_list.count(True) / len(graded_list)
49 | return output
50 |
51 | def get_evaluation_sample(self) -> dict:
52 | return {
53 | "code": self.code,
54 | "input": self.input,
55 | "output": self.output,
56 | }
57 |
58 |
59 | def load_code_execution_dataset(release_version="release_v1") -> list[CodeExecutionProblem]:
60 | dataset = load_dataset("livecodebench/execution-v2", split="test")
61 | dataset = [CodeExecutionProblem(**p) for p in dataset] # type: ignore
62 | print(f"Loaded {len(dataset)} problems")
63 | return dataset
64 |
65 |
66 | if __name__ == "__main__":
67 | dataset = load_code_execution_dataset()
68 |
--------------------------------------------------------------------------------
/verl/single_controller/base/megatron/worker_group.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from typing import Dict
16 |
17 | from .worker import DistRankInfo, DistGlobalInfo
18 | from verl.single_controller.base import ResourcePool, WorkerGroup
19 |
20 |
21 | class MegatronWorkerGroup(WorkerGroup):
22 |
23 | def __init__(self, resource_pool: ResourcePool, **kwargs):
24 | super().__init__(resource_pool=resource_pool, **kwargs)
25 | self._megatron_rank_info = None
26 | self._megatron_global_info: DistGlobalInfo = None
27 |
28 | def init_megatron(self, default_megatron_kwargs: Dict = None):
29 | raise NotImplementedError(f"MegatronWorkerGroup.init_megatron should be overwritten")
30 |
31 | def get_megatron_rank_info(self, rank: int) -> DistRankInfo:
32 | assert 0 <= rank < self.world_size, f'rank must be from [0, world_size), Got {rank}'
33 | return self._megatron_rank_info[rank]
34 |
35 | @property
36 | def tp_size(self):
37 | assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized"
38 | return self._megatron_global_info.tp_size
39 |
40 | @property
41 | def dp_size(self):
42 | assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized"
43 | return self._megatron_global_info.dp_size
44 |
45 | @property
46 | def pp_size(self):
47 | assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized"
48 | return self._megatron_global_info.pp_size
49 |
50 | def get_megatron_global_info(self):
51 | return self._megatron_global_info
52 |
--------------------------------------------------------------------------------
/tests/ray/detached_worker/client.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | In client, we can get the server handler and send RPC request
16 | """
17 |
18 | import ray
19 | import torch
20 |
21 | from verl import DataProto
22 | from verl.single_controller.ray import RayClassWithInitArgs
23 | from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup
24 |
25 | from tensordict import TensorDict
26 |
27 | from server import Trainer
28 |
29 |
30 | def compute_position_id_with_mask(mask):
31 | return torch.clip(torch.cumsum(mask, dim=-1) - 1, min=0, max=None)
32 |
33 |
34 | if __name__ == '__main__':
35 |
36 | ray.init(address='auto', namespace='verl')
37 | # get the worker group using names
38 | worker_names = ['trainerTrainer_0:0', 'trainerTrainer_0:1']
39 | cls_with_init_args = RayClassWithInitArgs(cls=Trainer)
40 | worker_group = NVMegatronRayWorkerGroup.from_detached(worker_names=worker_names,
41 | ray_cls_with_init=cls_with_init_args)
42 |
43 | batch_size = 16
44 | sequence_length = 1024
45 |
46 | # give Trainer some data to train
47 | input_ids = torch.randint(low=0, high=256, size=(batch_size, sequence_length), dtype=torch.int64, device='cuda')
48 | attention_mask = torch.ones_like(input_ids)
49 | position_ids = compute_position_id_with_mask(attention_mask)
50 |
51 | data = DataProto(batch=TensorDict(
52 | {
53 | 'input_ids': input_ids,
54 | 'attention_mask': attention_mask,
55 | 'position_ids': position_ids
56 | }, batch_size=batch_size),
57 | meta_info={})
58 |
59 | output = worker_group.train_model(data)
60 |
61 | print(output)
62 |
--------------------------------------------------------------------------------
/verl/utils/megatron/pipeline_parallel.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 | import torch
17 | from megatron.core import parallel_state as mpu
18 |
19 | from .sequence_parallel import pad_to_sequence_parallel
20 |
21 |
22 | def compute_transformers_input_shapes(batches, meta_info):
23 | from flash_attn.bert_padding import unpad_input # flash 2 is a must for Megatron
24 | # pre-compute input shapes for each micro-batch at each pp stage
25 | input_shapes = []
26 | for model_inputs in batches:
27 | input_ids = model_inputs['input_ids']
28 | attention_mask = model_inputs['attention_mask']
29 | input_ids_rmpad = unpad_input(input_ids.unsqueeze(dim=-1), attention_mask)[0] # (total_nnz, 1)
30 | if meta_info['sequence_parallel']:
31 | input_ids_rmpad = pad_to_sequence_parallel(input_ids_rmpad)
32 | # compute shapes for model_inputs
33 | input_shapes.append(
34 | torch.Size([
35 | input_ids_rmpad.shape[0] // mpu.get_tensor_model_parallel_world_size(), 1, meta_info['hidden_size']
36 | ]))
37 | else:
38 | # compute shapes for model_inputs
39 | input_shapes.append(torch.Size([input_ids_rmpad.shape[0], 1, meta_info['hidden_size']]))
40 | return input_shapes
41 |
42 |
43 | def make_batch_generator(batches, vpp_size):
44 | if vpp_size > 1:
45 | # has vpp
46 | batch_generator = [batches] * vpp_size # number of vpp chunks
47 | batch_generator = [iter(b) for b in batch_generator]
48 | else:
49 | # no vpp
50 | batch_generator = iter(batches)
51 | return batch_generator
52 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/evaluation/old_results_check.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import json
3 | from verl.utils.reward_score.livecodebench.lcb_runner.benchmarks import load_generation_dataset, CodeGenerationProblem
4 | from verl.utils.reward_score.livecodebench.lcb_runner.evaluation import codegen_metrics
5 |
6 |
7 | dataset = load_generation_dataset()
8 |
9 | dataset = sorted(dataset, key=lambda x: x.question_id)
10 |
11 |
12 | def check_model(model_key):
13 | path = f"/home/naman/Repos/LiveCodeBench/run_models_outputs/{model_key}/chat_0.2_checked.json"
14 | with open(path) as f:
15 | old_results = json.load(f)
16 | old_results = sorted(old_results, key=lambda x: x["question_id"])
17 | assert old_results[0]["question_id"] == dataset[0].question_id
18 |
19 | def debug(idx):
20 | codegen_metrics(
21 | [dataset[idx].get_evaluation_sample()],
22 | [old_results[idx]["code_list"][:1]],
23 | debug=True,
24 | )
25 |
26 | def run(idx):
27 | return codegen_metrics(
28 | [dataset[idx].get_evaluation_sample()],
29 | [old_results[idx]["code_list"]],
30 | )
31 |
32 | debug(380)
33 | exit()
34 | # debug(196)
35 | # debug(352)
36 |
37 | metrics = codegen_metrics(
38 | [d.get_evaluation_sample() for d in dataset],
39 | [r["code_list"] for r in old_results],
40 | num_process_evaluate=12,
41 | )
42 | old_pass1 = np.mean([np.mean(r["pass1_list"]) for r in old_results])
43 |
44 | print(old_pass1)
45 | print(metrics[0]["pass@1"])
46 |
47 | for idx in range(400):
48 | old_pass1 = np.mean(old_results[idx]["pass1_list"])
49 | new_pass1 = metrics[0]["detail"]["pass@1"][idx]
50 | if not abs(old_pass1 - new_pass1) < 1e-4:
51 | print(idx, old_pass1, new_pass1)
52 |
53 |
54 | # model_key = "GPT-4-Turbo-1106"
55 | # check_model(model_key)
56 |
57 | model_key = "Claude-3-Opus"
58 | check_model(model_key)
59 |
60 | model_key = "GPT-4-0613"
61 | check_model(model_key)
62 |
63 | model_key = "Mistral-Large"
64 | check_model(model_key)
65 |
66 | model_key = "Claude-3-Sonnet"
67 | check_model(model_key)
68 |
69 | model_key = "GPT-3.5-Turbo-0301"
70 | check_model(model_key)
71 |
72 | model_key = "Gemini-Pro"
73 | check_model(model_key)
74 |
--------------------------------------------------------------------------------
/verl/workers/actor/base.py:
--------------------------------------------------------------------------------
1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """
15 | The base class for Actor
16 | """
17 | from abc import ABC, abstractmethod
18 | from typing import Iterable, Dict
19 |
20 | from verl import DataProto
21 | import torch
22 |
23 | __all__ = ['BasePPOActor']
24 |
25 |
26 | class BasePPOActor(ABC):
27 |
28 | def __init__(self, config):
29 | """The base class for PPO actor
30 |
31 | Args:
32 | config (DictConfig): a config passed to the PPOActor. We expect the type to be
33 | DictConfig (https://omegaconf.readthedocs.io/), but it can be any namedtuple in general.
34 | """
35 | super().__init__()
36 | self.config = config
37 |
38 | @abstractmethod
39 | def compute_log_prob(self, data: DataProto) -> torch.Tensor:
40 | """Compute logits given a batch of data.
41 |
42 | Args:
43 | data (DataProto): a batch of data represented by DataProto. It must contain key ```input_ids```,
44 | ```attention_mask``` and ```position_ids```.
45 |
46 | Returns:
47 | DataProto: a DataProto containing the key ```log_probs```
48 |
49 |
50 | """
51 | pass
52 |
53 | @abstractmethod
54 | def update_policy(self, data: DataProto) -> Dict:
55 | """Update the policy with an iterator of DataProto
56 |
57 | Args:
58 | data (DataProto): an iterator over the DataProto that returns by
59 | ```make_minibatch_iterator```
60 |
61 | Returns:
62 | Dict: a dictionary contains anything. Typically, it contains the statistics during updating the model
63 | such as ```loss```, ```grad_norm```, etc,.
64 |
65 | """
66 | pass
67 |
--------------------------------------------------------------------------------
/examples/ppo_trainer/run_qwen2-7b.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | gsm8k_train_path=$HOME/data/gsm8k/train.parquet
4 | gsm8k_test_path=$HOME/data/gsm8k/test.parquet
5 | math_train_path=$HOME/data/math/train.parquet
6 | math_test_path=$HOME/data/math/test.parquet
7 |
8 | train_files="['$gsm8k_train_path', '$math_train_path']"
9 | test_files="['$gsm8k_test_path', '$math_test_path']"
10 |
11 | python3 -m verl.trainer.main_ppo \
12 | data.train_files="$train_files" \
13 | data.val_files="$test_files" \
14 | data.train_batch_size=1024 \
15 | data.val_batch_size=6312 \
16 | data.max_prompt_length=1024 \
17 | data.max_response_length=512 \
18 | actor_rollout_ref.model.path=Qwen/Qwen2-7B-Instruct \
19 | actor_rollout_ref.actor.optim.lr=1e-6 \
20 | actor_rollout_ref.model.use_remove_padding=True \
21 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
22 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=16 \
23 | actor_rollout_ref.model.enable_gradient_checkpointing=True \
24 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
25 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
26 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
27 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=32 \
28 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
29 | actor_rollout_ref.rollout.name=vllm \
30 | actor_rollout_ref.rollout.gpu_memory_utilization=0.6 \
31 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=32 \
32 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
33 | critic.optim.lr=1e-5 \
34 | critic.model.use_remove_padding=True \
35 | critic.model.path=Qwen/Qwen2-7B-Instruct \
36 | critic.model.enable_gradient_checkpointing=True \
37 | critic.ppo_micro_batch_size_per_gpu=32 \
38 | critic.model.fsdp_config.param_offload=False \
39 | critic.model.fsdp_config.grad_offload=False \
40 | critic.model.fsdp_config.optimizer_offload=False \
41 | algorithm.kl_ctrl.kl_coef=0.001 \
42 | trainer.critic_warmup=0 \
43 | trainer.logger=['console','wandb'] \
44 | trainer.project_name='verl_example' \
45 | trainer.experiment_name='Qwen2-7B-Instruct_function_rm' \
46 | trainer.n_gpus_per_node=8 \
47 | trainer.nnodes=1 \
48 | trainer.save_freq=-1 \
49 | trainer.test_freq=10 \
50 | trainer.total_epochs=15 $@
51 |
--------------------------------------------------------------------------------
/verl/utils/reward_score/livecodebench/lcb_runner/evaluation/pass_k_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def estimate_pass_at_k(num_samples, num_correct, k):
5 | """Estimates pass@k of each problem and returns them in an array."""
6 |
7 | def estimator(n: int, c: int, k: int) -> float:
8 | """Calculates 1 - comb(n - c, k) / comb(n, k)."""
9 | if n - c < k:
10 | return 1.0
11 | return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1))
12 |
13 | import itertools
14 |
15 | if isinstance(num_samples, int):
16 | num_samples_it = itertools.repeat(num_samples, len(num_correct))
17 | else:
18 | assert len(num_samples) == len(num_correct)
19 | num_samples_it = iter(num_samples)
20 |
21 | return np.array(
22 | [estimator(int(n), int(c), k) for n, c in zip(num_samples_it, num_correct)]
23 | )
24 |
25 |
26 | def compute_metrics_from_results(results, k_list=[1, 5]):
27 | total = []
28 | correct = []
29 | task_ids = []
30 | for task_id, res in results.items():
31 | all_correct = []
32 | for generation in res:
33 | gen = np.array(generation)
34 | all_correct.append(np.all(gen > 0))
35 | task_ids.append(task_id)
36 | total.append(len(all_correct))
37 | correct.append(sum(all_correct))
38 | total = np.array(total)
39 | correct = np.array(correct)
40 | ks = k_list
41 | detail_pass_at_k = {
42 | f"pass@{k}": estimate_pass_at_k(total, correct, k).tolist()
43 | for k in ks
44 | if (total >= k).all()
45 | }
46 | pass_at_k = {
47 | f"pass@{k}": estimate_pass_at_k(total, correct, k).mean()
48 | for k in ks
49 | if (total >= k).all()
50 | }
51 | detail_metrics = {k: dict(zip(task_ids, v)) for k, v in detail_pass_at_k.items()}
52 | pass_at_k["detail"] = detail_metrics
53 | return pass_at_k
54 |
55 |
56 | def extract_instance_results(results):
57 | instance_wise_grades = {}
58 | for task_id, res in results.items():
59 | instance_wise_grades[task_id] = []
60 | for generation in res:
61 | instance_wise_grades[task_id].append(all([g > 0 for g in generation]))
62 |
63 | instance_wise_grades = [
64 | v for _, v in sorted(instance_wise_grades.items(), key=lambda item: item[0])
65 | ]
66 | return instance_wise_grades
67 |
--------------------------------------------------------------------------------
/or1_scripts/eval/eval_7b.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | export VLLM_ATTENTION_BACKEND=XFORMERS
4 | export WORLD_SIZE=${WORLD_SIZE:-1}
5 | export RANK=${RANK:-0}
6 | export MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
7 | export MASTER_PORT=${MASTER_PORT:-29500}
8 | export HYDRA_FULL_ERROR=1
9 | export RAY_BACKEND_LOG_LEVEL=debug
10 | export GPUS_PER_NODE=$(python -c 'import torch; print(torch.cuda.device_count())')
11 | export LIVECODEBENCH_DATA_PATH=${LIVECODEBENCH_DATA_PATH:-./or1_data/eval/livecodebench/livecodebench_2408_2502}
12 |
13 | MODEL_NAME=${MODEL_NAME:-Skywork/Skywork-OR1-Math-7B}
14 |
15 | # Evalation Aime24
16 | python3 -m verl.trainer.main_generation \
17 | trainer.nnodes=$WORLD_SIZE \
18 | trainer.n_gpus_per_node=$GPUS_PER_NODE \
19 | model.path=$MODEL_NAME \
20 | data.path=or1_data/eval/aime24.parquet \
21 | data.output_path=./outputs/evalation/Aime24_Avg32-Skywork_OR1_Math_7B.pkl \
22 | data.n_samples=32 \
23 | data.batch_size=102400 \
24 | rollout.temperature=0.6 \
25 | rollout.response_length=32768 \
26 | rollout.top_k=-1 \
27 | rollout.top_p=1.0 \
28 | rollout.gpu_memory_utilization=0.8 \
29 | rollout.tensor_model_parallel_size=1
30 |
31 | # Evalation Aime25
32 | python3 -m verl.trainer.main_generation \
33 | trainer.nnodes=$WORLD_SIZE \
34 | trainer.n_gpus_per_node=$GPUS_PER_NODE \
35 | model.path=$MODEL_NAME \
36 | data.path=or1_data/eval/aime25.parquet \
37 | data.output_path=./outputs/evalation/Aime25_Avg32-Skywork_OR1_Math_7B.pkl \
38 | data.n_samples=32 \
39 | data.batch_size=102400 \
40 | rollout.temperature=0.6 \
41 | rollout.response_length=32768 \
42 | rollout.top_k=-1 \
43 | rollout.top_p=1.0 \
44 | rollout.gpu_memory_utilization=0.8 \
45 | rollout.tensor_model_parallel_size=1
46 |
47 | # Evalation LiveCodeBench
48 | python3 -m verl.trainer.main_generation \
49 | trainer.nnodes=$WORLD_SIZE \
50 | trainer.n_gpus_per_node=$GPUS_PER_NODE \
51 | model.path=$MODEL_NAME \
52 | data.path=or1_data/eval/livecodebench/livecodebench_2408_2502.parquet \
53 | data.output_path=./outputs/evalation/LCB_Avg4-Skywork_OR1_Math_7B.pkl \
54 | data.n_samples=4 \
55 | data.batch_size=102400 \
56 | rollout.temperature=0.6 \
57 | rollout.response_length=32768 \
58 | rollout.top_k=-1 \
59 | rollout.top_p=1.0 \
60 | rollout.gpu_memory_utilization=0.8 \
61 | rollout.tensor_model_parallel_size=1
--------------------------------------------------------------------------------
/or1_scripts/eval/eval_32b.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | export VLLM_ATTENTION_BACKEND=XFORMERS
4 | export WORLD_SIZE=${WORLD_SIZE:-1}
5 | export RANK=${RANK:-0}
6 | export MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
7 | export MASTER_PORT=${MASTER_PORT:-29500}
8 | export HYDRA_FULL_ERROR=1
9 | export RAY_BACKEND_LOG_LEVEL=debug
10 | export GPUS_PER_NODE=$(python -c 'import torch; print(torch.cuda.device_count())')
11 | export LIVECODEBENCH_DATA_PATH=${LIVECODEBENCH_DATA_PATH:-./or1_data/eval/livecodebench/livecodebench_2408_2502}
12 |
13 | MODEL_NAME=${MODEL_NAME:-Skywork/Skywork-OR1-32B-Preview}
14 |
15 | # Evalation Aime24
16 | python3 -m verl.trainer.main_generation \
17 | trainer.nnodes=$WORLD_SIZE \
18 | trainer.n_gpus_per_node=$GPUS_PER_NODE \
19 | model.path=$MODEL_NAME \
20 | data.path=or1_data/eval/aime24.parquet \
21 | data.output_path=./outputs/evalation/Aime24_Avg32-Skywork_OR1_32B_Preview.pkl \
22 | data.n_samples=32 \
23 | data.batch_size=102400 \
24 | rollout.temperature=1.0 \
25 | rollout.response_length=32768 \
26 | rollout.top_k=-1 \
27 | rollout.top_p=1.0 \
28 | rollout.gpu_memory_utilization=0.8 \
29 | rollout.tensor_model_parallel_size=2
30 |
31 | # Evalation Aime25
32 | python3 -m verl.trainer.main_generation \
33 | trainer.nnodes=$WORLD_SIZE \
34 | trainer.n_gpus_per_node=$GPUS_PER_NODE \
35 | model.path=$MODEL_NAME \
36 | data.path=or1_data/eval/aime25.parquet \
37 | data.output_path=./outputs/evalation/Aime25_Avg32-Skywork_OR1_32B_Preview.pkl \
38 | data.n_samples=32 \
39 | data.batch_size=102400 \
40 | rollout.temperature=1.0 \
41 | rollout.response_length=32768 \
42 | rollout.top_k=-1 \
43 | rollout.top_p=1.0 \
44 | rollout.gpu_memory_utilization=0.8 \
45 | rollout.tensor_model_parallel_size=2
46 |
47 | # Evalation LiveCodeBench
48 | python3 -m verl.trainer.main_generation \
49 | trainer.nnodes=$WORLD_SIZE \
50 | trainer.n_gpus_per_node=$GPUS_PER_NODE \
51 | model.path=$MODEL_NAME \
52 | data.path=or1_data/eval/livecodebench/livecodebench_2408_2502.parquet \
53 | data.output_path=./outputs/evalation/LCB_Avg4-Skywork_OR1_32B_Preview.pkl \
54 | data.n_samples=4 \
55 | data.batch_size=102400 \
56 | rollout.temperature=1.0 \
57 | rollout.response_length=32768 \
58 | rollout.top_k=-1 \
59 | rollout.top_p=1.0 \
60 | rollout.gpu_memory_utilization=0.8 \
61 | rollout.tensor_model_parallel_size=2
--------------------------------------------------------------------------------
/tests/e2e/run_qwen_gsm8k_model_rm.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | export VLLM_ATTENTION_BACKEND=XFORMERS
4 |
5 | python3 -m verl.trainer.main_ppo \
6 | data.train_files=$HOME/data/gsm8k/train.parquet \
7 | data.val_files=$HOME/data/gsm8k/test.parquet \
8 | data.train_batch_size=1024 \
9 | data.val_batch_size=1312 \
10 | data.max_prompt_length=512 \
11 | data.max_response_length=512 \
12 | data.return_raw_chat=True \
13 | actor_rollout_ref.model.path=Qwen/Qwen2.5-0.5B \
14 | actor_rollout_ref.actor.optim.lr=1e-6 \
15 | actor_rollout_ref.model.use_remove_padding=True \
16 | actor_rollout_ref.actor.optim.lr_warmup_steps_ratio=0.1 \
17 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
18 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=4 \
19 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
20 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
21 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
22 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
23 | actor_rollout_ref.rollout.tensor_model_parallel_size=2 \
24 | actor_rollout_ref.rollout.name=vllm \
25 | actor_rollout_ref.rollout.gpu_memory_utilization=0.4 \
26 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
27 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
28 | critic.optim.lr=1e-5 \
29 | critic.model.use_remove_padding=True \
30 | critic.optim.lr_warmup_steps_ratio=0.05 \
31 | critic.model.path=Qwen/Qwen2.5-0.5B \
32 | critic.model.enable_gradient_checkpointing=False \
33 | critic.ppo_micro_batch_size_per_gpu=4 \
34 | critic.model.fsdp_config.param_offload=False \
35 | critic.model.fsdp_config.grad_offload=False \
36 | critic.model.fsdp_config.optimizer_offload=False \
37 | reward_model.enable=True \
38 | reward_model.model.path=Qwen/Qwen2.5-0.5B\
39 | reward_model.model.use_remove_padding=True \
40 | reward_model.model.fsdp_config.param_offload=True \
41 | reward_model.micro_batch_size_per_gpu=16 \
42 | algorithm.kl_ctrl.kl_coef=0.001 \
43 | trainer.critic_warmup=0 \
44 | trainer.logger=['console'] \
45 | +trainer.val_before_train=False \
46 | trainer.project_name='verl_example' \
47 | trainer.experiment_name='Qwen2.5-0.5B-ci_hybrid_rm' \
48 | trainer.n_gpus_per_node=8 \
49 | trainer.nnodes=1 \
50 | trainer.save_freq=-1 \
51 | trainer.total_training_steps=1 $@
52 |
--------------------------------------------------------------------------------
/examples/ppo_trainer/run_qwen2.5-32b.sh:
--------------------------------------------------------------------------------
1 | set -x
2 |
3 | gsm8k_train_path=$HOME/data/gsm8k/train.parquet
4 | gsm8k_test_path=$HOME/data/gsm8k/test.parquet
5 | math_train_path=$HOME/data/math/train.parquet
6 | math_test_path=$HOME/data/math/test.parquet
7 |
8 | train_files="['$gsm8k_train_path', '$math_train_path']"
9 | test_files="['$gsm8k_test_path', '$math_test_path']"
10 |
11 | python3 -m verl.trainer.main_ppo \
12 | data.train_files="$train_files" \
13 | data.val_files="$test_files" \
14 | data.train_batch_size=1024 \
15 | data.val_batch_size=6304 \
16 | data.max_prompt_length=1024 \
17 | data.max_response_length=1024 \
18 | actor_rollout_ref.model.path=Qwen/Qwen2.5-32B-Instruct \
19 | actor_rollout_ref.model.enable_gradient_checkpointing=False \
20 | actor_rollout_ref.actor.optim.lr=1e-6 \
21 | actor_rollout_ref.model.use_remove_padding=True \
22 | actor_rollout_ref.actor.ppo_mini_batch_size=256 \
23 | actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu=8 \
24 | actor_rollout_ref.model.enable_gradient_checkpointing=True \
25 | actor_rollout_ref.actor.fsdp_config.param_offload=False \
26 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \
27 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \
28 | actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu=16 \
29 | actor_rollout_ref.rollout.tensor_model_parallel_size=4 \
30 | actor_rollout_ref.rollout.name=vllm \
31 | actor_rollout_ref.rollout.gpu_memory_utilization=0.5 \
32 | actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu=16 \
33 | actor_rollout_ref.ref.fsdp_config.param_offload=True \
34 | critic.optim.lr=1e-5 \
35 | critic.model.use_remove_padding=True \
36 | critic.model.path=Qwen/Qwen2.5-32B-Instruct \
37 | critic.model.enable_gradient_checkpointing=False \
38 | critic.ppo_micro_batch_size_per_gpu=8 \
39 | critic.model.fsdp_config.param_offload=False \
40 | critic.model.fsdp_config.grad_offload=False \
41 | critic.model.fsdp_config.optimizer_offload=False \
42 | algorithm.kl_ctrl.kl_coef=0.0001 \
43 | trainer.critic_warmup=0 \
44 | trainer.logger=['console','wandb'] \
45 | trainer.project_name='verl_example' \
46 | trainer.experiment_name='Qwen2.5-32B-Instruct_function_rm' \
47 | trainer.n_gpus_per_node=8 \
48 | trainer.nnodes=4 \
49 | trainer.save_freq=-1 \
50 | trainer.test_freq=10 \
51 | trainer.total_epochs=15 $@
52 |
--------------------------------------------------------------------------------