├── nanoverl ├── __init__.py ├── data │ └── __init__.py ├── rewards │ ├── __init__.py │ ├── reward_types.py │ └── deepscaler_rule_reward.py └── config │ └── ppo_trainer.yaml ├── pyproject.toml ├── README.md ├── examples └── deepscaler │ ├── train_grpo_r1_distill_1b_8k.bash │ ├── train_grpo_r1_distill_1b_8k.slurm │ ├── prepare_dataset.py │ └── reasoning_eval.py ├── .gitignore ├── main_ppo.py └── LICENSE /nanoverl/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /nanoverl/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /nanoverl/rewards/__init__.py: -------------------------------------------------------------------------------- 1 | """Import reward-related classes and types from the reward module.""" 2 | 3 | from .reward_types import (RewardConfig, RewardFn, RewardInput, RewardOutput, 4 | RewardType) 5 | 6 | __all__ = ['RewardConfig', 'RewardFn', 'RewardInput', 'RewardOutput', 'RewardType'] 7 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "nanoverl" 3 | version = "0.1.0" 4 | description = "" 5 | authors = [ 6 | {name = "koalazf99",email = "koala99.zf@gmail.com"} 7 | ] 8 | readme = "README.md" 9 | requires-python = ">=3.10" 10 | dependencies = [ 11 | ] 12 | 13 | 14 | [build-system] 15 | requires = ["poetry-core>=2.0.0,<3.0.0"] 16 | build-backend = "poetry.core.masonry.api" 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # nanoverl 2 | 3 | Run RL $\times$ LM experiments using minimal monkey patches for [verl](https://github.com/volcengine/verl). So that you do not need to modify the original code of verl, and keep up with the latest version of verl. We also do not use submodule to avoid the complexity of version control. 4 | 5 | ## Usage 6 | 7 | First follow instructions in verl to install the main repo, then locally install this repo. 8 | ```bash 9 | git clone https://github.com/koalazf99/nanoverl.git nanoverl 10 | cd nanoverl 11 | pip install -e . 12 | ``` 13 | 14 | ## Examples 15 | 16 | All scripts for RL experiments are in `nanoverl/example/`. For example, we can run the following script to train [deepscaler](https://huggingface.co/datasets/agentica-org/DeepScaleR-Preview-Dataset) dataset using [R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B) with GRPO algorithm: 17 | 18 | ```bash 19 | cd examples/deepscaler 20 | python prepare_dataset.py 21 | bash train_grpo_r1_distill_1b_8k.bash 22 | ``` 23 | 24 | The evaluation script is also a "nano" version thanks to [sglang](https://github.com/sgl-project/sglang). We use sglang-router to serve multiple backends. 25 | ```bash 26 | python -m sglang_router.launch_server \ 27 | --model-path deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ 28 | --port 30000 --dp-size 8 29 | python reasoning_eval.py \ 30 | --data-path nanoverl/aime \ 31 | --parallel 256 \ 32 | --num-tries 16 33 | ``` 34 | 35 | 36 | ## Local Installable Package Configuration 37 | ```bash 38 | pip install poetry 39 | poetry init 40 | poetry build 41 | ``` 42 | -------------------------------------------------------------------------------- /examples/deepscaler/train_grpo_r1_distill_1b_8k.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | # Warning: Export VLLM_ATTENTION_BACKEND on every machine before starting Ray cluster. 5 | # vLLM without XFORMERS will results in CUDA errors. 6 | export VLLM_ATTENTION_BACKEND=XFORMERS 7 | 8 | # Parse command line arguments 9 | while [[ $# -gt 0 ]]; do 10 | case $1 in 11 | --model) 12 | MODEL_PATH="$2" 13 | shift 2 14 | ;; 15 | *) 16 | break 17 | ;; 18 | esac 19 | done 20 | 21 | # Set default model path if not provided 22 | if [ -z "$MODEL_PATH" ]; then 23 | MODEL_PATH="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" 24 | fi 25 | 26 | export PYTHONPATH=$PYTHONPATH:$HOME/nanoverl/ 27 | EXP_NAME="deepscaler-1.5b-8k" 28 | 29 | # Train over a single node, 8 A100-80GB GPUs. 30 | python3 -m main_ppo \ 31 | algorithm.adv_estimator=grpo \ 32 | data.train_files=$HOME/nanoverl/data/parquet_data/deepscaler/train.parquet \ 33 | data.val_files=$HOME/nanoverl/data/parquet_data/deepscaler/aime.parquet \ 34 | data.train_batch_size=128 \ 35 | data.val_batch_size=512 \ 36 | data.max_prompt_length=1024 \ 37 | data.max_response_length=8192 \ 38 | actor_rollout_ref.model.path=$MODEL_PATH \ 39 | actor_rollout_ref.actor.optim.lr=1e-6 \ 40 | actor_rollout_ref.model.use_remove_padding=True \ 41 | actor_rollout_ref.actor.ppo_mini_batch_size=64 \ 42 | actor_rollout_ref.actor.use_dynamic_bsz=True \ 43 | actor_rollout_ref.actor.ppo_max_token_len_per_gpu=32768 \ 44 | actor_rollout_ref.actor.use_kl_loss=True \ 45 | actor_rollout_ref.actor.kl_loss_coef=0.001 \ 46 | actor_rollout_ref.actor.kl_loss_type=low_var_kl \ 47 | actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ 48 | actor_rollout_ref.model.enable_gradient_checkpointing=True \ 49 | actor_rollout_ref.actor.fsdp_config.param_offload=False \ 50 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \ 51 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ 52 | actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ 53 | actor_rollout_ref.rollout.name=vllm \ 54 | actor_rollout_ref.rollout.temperature=0.6 \ 55 | actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ 56 | actor_rollout_ref.rollout.n=8 \ 57 | actor_rollout_ref.ref.fsdp_config.param_offload=True \ 58 | algorithm.kl_ctrl.kl_coef=0.001 \ 59 | trainer.critic_warmup=0 \ 60 | trainer.logger=['console','wandb'] \ 61 | +trainer.val_before_train=False \ 62 | trainer.project_name='nanoverl' \ 63 | trainer.experiment_name=$EXP_NAME \ 64 | trainer.n_gpus_per_node=8 \ 65 | trainer.nnodes=1 \ 66 | trainer.save_freq=20 \ 67 | trainer.test_freq=20 \ 68 | trainer.default_hdfs_dir=null \ 69 | trainer.default_local_dir=$HOME/nanoverl/checkpoints/$EXP_NAME \ 70 | trainer.total_epochs=30 "${@:1}" -------------------------------------------------------------------------------- /nanoverl/rewards/reward_types.py: -------------------------------------------------------------------------------- 1 | """ 2 | https://github.com/agentica-project/deepscaler/blob/main/deepscaler/rewards/reward_types.py 3 | This module defines data structures and base classes for reward calculations 4 | to evaluate model responses for various problem types, including math and coding. 5 | """ 6 | 7 | from dataclasses import dataclass, field 8 | from enum import Enum 9 | 10 | 11 | @dataclass 12 | class RewardConfig: 13 | # Use LLM as ORM to evaluate correctness. 14 | use_math_orm: bool = False 15 | 16 | # General reward constants. 17 | correct_reward: float = 1.0 18 | incorrect_reward: float = -1.0 19 | format_error_reward: float = -1.0 20 | unk_error_reward: float = -1.0 21 | 22 | 23 | class RewardType(Enum): 24 | """ 25 | Enum class representing the different types of rewards that can be assigned. 26 | 27 | Attributes: 28 | MATH (str): Represents a math-related problem type. 29 | CODE (str): Represents a coding-related problem type. 30 | UNK (str): Represents an unknown or unclassified problem type. 31 | """ 32 | MATH = 'MATH' 33 | CODE = 'CODE' 34 | UNK = 'UNK' 35 | 36 | 37 | @dataclass 38 | class RewardInput: 39 | """Data structure for input required to calculate rewards. 40 | 41 | Attributes: 42 | problem (str): The original problem text or prompt provided to the model. 43 | model_response (str): The response generated by the model that needs evaluation. 44 | problem_type (RewardType): The category of the problem (e.g., math, code) to be evaluated. 45 | ground_truth (dict): Additional contextual information necessary for evaluation: 46 | - For math problems: This may include the ground truth answer. 47 | - For coding problems: This may include unit tests to validate the solution. 48 | """ 49 | problem: str 50 | model_response: str 51 | problem_type: RewardType = RewardType.UNK 52 | ground_truth: dict = field(default_factory=dict) 53 | 54 | 55 | @dataclass 56 | class RewardOutput: 57 | """Data structure for the output of reward calculations. 58 | 59 | Attributes: 60 | reward (float): The computed reward value based on the evaluation of the model's response. 61 | is_correct (bool): A boolean flag indicating whether the model's response is deemed correct. 62 | """ 63 | reward: float 64 | is_correct: bool 65 | 66 | 67 | class RewardFn: 68 | """Abstract base class for defining reward calculation strategies. 69 | 70 | This class should be subclassed to implement specific reward calculation logic. 71 | The __call__ method must be overridden to provide the functionality for evaluating 72 | the input and returning the corresponding reward output. 73 | """ 74 | def __init__(self, config: RewardConfig): 75 | self.config = config 76 | 77 | def __call__(self, input: RewardInput) -> RewardOutput: 78 | raise NotImplementedError("Subclasses must implement this method.") -------------------------------------------------------------------------------- /examples/deepscaler/train_grpo_r1_distill_1b_8k.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --job-name=nanoverl_deepscaler 3 | #SBATCH --partition=fan 4 | #SBATCH --nodes=8 5 | #SBATCH --ntasks=8 6 | #SBATCH --ntasks-per-node=1 7 | #SBATCH --gres=gpu:8 8 | #SBATCH --cpus-per-task=100 9 | #SBATCH --mem=512G 10 | #SBATCH --output=./logs/slurm-%j.log 11 | #SBATCH --error=./logs/slurm-%j.log 12 | #SBATCH --exclusive 13 | #SBATCH --time=12:00:00 14 | 15 | # set -x 16 | 17 | sleep 10 18 | export worker_num=$SLURM_NNODES 19 | JOBLOG="./logs/slurm-$SLURM_JOB_ID.log" 20 | 21 | nodes=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) 22 | export head_node=${nodes[0]} 23 | export head_node_ip=$(srun --nodes=1 --ntasks=1 -w "$head_node" hostname --ip-address) 24 | export port=30310 25 | export address_head=$head_node_ip:$port 26 | 27 | 28 | export VLLM_ATTENTION_BACKEND=XFORMERS 29 | export EXPERIMENT_NAME=deepscaler-1.5b-8k 30 | export OUTPUT_DIR=$HOME/nanoverl/checkpoints/${EXPERIMENT_NAME} 31 | export GLOO_SOCKET_IFNAME=ens10f0np0 32 | 33 | 34 | srun --nodes=$worker_num --ntasks=$worker_num --ntasks-per-node=1 rm -rf /tmp/ray/ray_current_cluster 35 | srun --nodes=1 --ntasks=1 -w "$head_node" --export=ALL,VLLM_ATTENTION_BACKEND=XFORMERS \ 36 | ray start --head --node-ip-address="$head_node_ip" --port=$port \ 37 | --num-gpus 8 --block & >> ${JOBLOG} 38 | 39 | sleep 10 40 | 41 | for ((i = 1; i < worker_num; i++)); do 42 | node_i=${nodes[$i]} 43 | echo "Starting WORKER $i at $node_i" 44 | srun --nodes=1 --ntasks=1 -w "$node_i" --export=ALL,VLLM_ATTENTION_BACKEND=XFORMERS \ 45 | ray start --address "$address_head" \ 46 | --num-gpus 8 --block & >> ${JOBLOG} 47 | sleep 10 48 | done 49 | 50 | export PYTHONPATH=$PYTHONPATH:$HOME/nanoverl/ 51 | MODEL_PATH="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" 52 | EXP_NAME="deepscaler-1.5b-8k" 53 | 54 | 55 | python -m main_ppo \ 56 | algorithm.adv_estimator=grpo \ 57 | data.train_files=$HOME/nanoverl/data/parquet_data/deepscaler/train.parquet \ 58 | data.val_files=$HOME/nanoverl/data/parquet_data/deepscaler/aime.parquet \ 59 | data.train_batch_size=128 \ 60 | data.val_batch_size=512 \ 61 | data.max_prompt_length=1024 \ 62 | data.max_response_length=8192 \ 63 | actor_rollout_ref.model.path=$MODEL_PATH \ 64 | actor_rollout_ref.actor.optim.lr=1e-6 \ 65 | actor_rollout_ref.model.use_remove_padding=True \ 66 | actor_rollout_ref.actor.ppo_mini_batch_size=128 \ 67 | actor_rollout_ref.actor.use_dynamic_bsz=True \ 68 | actor_rollout_ref.actor.ppo_max_token_len_per_gpu=32768 \ 69 | actor_rollout_ref.actor.use_kl_loss=True \ 70 | actor_rollout_ref.actor.kl_loss_coef=0.001 \ 71 | actor_rollout_ref.actor.kl_loss_type=low_var_kl \ 72 | actor_rollout_ref.actor.ulysses_sequence_parallel_size=1 \ 73 | actor_rollout_ref.model.enable_gradient_checkpointing=True \ 74 | actor_rollout_ref.actor.fsdp_config.param_offload=False \ 75 | actor_rollout_ref.actor.fsdp_config.grad_offload=False \ 76 | actor_rollout_ref.actor.fsdp_config.optimizer_offload=False \ 77 | actor_rollout_ref.rollout.tensor_model_parallel_size=1 \ 78 | actor_rollout_ref.rollout.name=vllm \ 79 | actor_rollout_ref.rollout.temperature=0.6 \ 80 | actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \ 81 | actor_rollout_ref.rollout.n=8 \ 82 | actor_rollout_ref.ref.fsdp_config.param_offload=True \ 83 | algorithm.kl_ctrl.kl_coef=0.001 \ 84 | trainer.critic_warmup=0 \ 85 | trainer.logger=['console','wandb'] \ 86 | +trainer.val_before_train=False \ 87 | trainer.project_name='nanoverl' \ 88 | trainer.experiment_name=$EXP_NAME \ 89 | trainer.n_gpus_per_node=8 \ 90 | trainer.nnodes=8 \ 91 | trainer.save_freq=20 \ 92 | trainer.test_freq=20 \ 93 | trainer.default_hdfs_dir=null \ 94 | trainer.default_local_dir=$HOME/nanoverl/checkpoints/$EXP_NAME \ 95 | trainer.total_epochs=30 >> ${JOBLOG} 96 | -------------------------------------------------------------------------------- /examples/deepscaler/prepare_dataset.py: -------------------------------------------------------------------------------- 1 | """Script to prepare DeepScaler training and test datasets. 2 | 3 | This script processes math problem datasets into a standardized format for training 4 | and testing DeepScaler models. It loads problems from specified datasets, adds 5 | instruction prompts, and saves the processed data as parquet files. 6 | """ 7 | 8 | import argparse 9 | import os 10 | from typing import Any, Dict, List, Optional 11 | 12 | import pandas as pd 13 | from datasets import load_dataset 14 | from verl.utils.hdfs_io import copy, makedirs 15 | from verl.utils.reward_score.math import last_boxed_only_string, remove_boxed 16 | 17 | 18 | def extract_solution(solution_str: str) -> str: 19 | """Extract the final boxed solution from a solution string. 20 | 21 | Args: 22 | solution_str: Raw solution string that may contain multiple boxed answers 23 | 24 | Returns: 25 | The final boxed answer with box notation removed 26 | """ 27 | return remove_boxed(last_boxed_only_string(solution_str)) 28 | 29 | 30 | def make_map_fn(split: str): 31 | """Create a mapping function to process dataset examples. 32 | 33 | Args: 34 | split: Dataset split name ('train' or 'test') 35 | 36 | Returns: 37 | Function that processes individual dataset examples 38 | """ 39 | def process_fn(example: Dict[str, Any], idx: int) -> Optional[Dict[str, Any]]: 40 | question = example.pop('problem') 41 | instruction = "Let's think step by step and output the final answer within \\boxed{}." 42 | question = f"{question} {instruction}" 43 | answer = example.pop('answer') 44 | 45 | data = { 46 | "data_source": "", 47 | "prompt": [{ 48 | "role": "user", 49 | "content": question 50 | }], 51 | "ability": "math", 52 | "reward_model": { 53 | "style": "rule", 54 | "ground_truth": answer 55 | }, 56 | "extra_info": { 57 | 'split': split, 58 | 'index': idx 59 | } 60 | } 61 | return data 62 | return process_fn 63 | 64 | 65 | if __name__ == '__main__': 66 | parser = argparse.ArgumentParser(description='Process datasets for DeepScaler training') 67 | parser.add_argument('--local_dir', default=os.path.expanduser('../../data/parquet_data/deepscaler'), 68 | help='Local directory to save processed datasets') 69 | parser.add_argument('--hdfs_dir', default=None, 70 | help='Optional HDFS directory to copy datasets to') 71 | args = parser.parse_args() 72 | 73 | local_dir = args.local_dir 74 | hdfs_dir = args.hdfs_dir 75 | 76 | # Make local directory if it doesn't exist 77 | makedirs(local_dir, exist_ok=True) 78 | 79 | # Initialize datasets 80 | TRAIN_DATASET = "nanoverl/deepscaler" 81 | train_dataset_data = load_dataset(TRAIN_DATASET, split="train") 82 | TEST_DATASETS = ["nanoverl/minerva", "nanoverl/aime", "nanoverl/amc", "nanoverl/olympiad_bench", "nanoverl/math"] 83 | test_dataset_data = [load_dataset(d, split="test") for d in TEST_DATASETS] 84 | 85 | # Process training data 86 | process_fn = make_map_fn('train') 87 | train_data = train_dataset_data.map(process_fn, with_indices=True) 88 | train_df = pd.DataFrame(train_data) 89 | train_df.to_parquet(os.path.join(local_dir, 'train.parquet')) 90 | print("train data size:", len(train_df)) 91 | 92 | # Process and save each test dataset separately 93 | for test_dataset, test_data in zip(TEST_DATASETS, test_dataset_data): 94 | process_fn = make_map_fn('test') 95 | test_data = test_data.map(process_fn, with_indices=True) 96 | dataset_name = os.path.basename(test_dataset.lower()) 97 | test_df = pd.DataFrame(test_data) 98 | test_df.to_parquet(os.path.join(local_dir, f'{dataset_name}.parquet')) 99 | print(f"{dataset_name} test data size:", len(test_df)) 100 | 101 | # Optionally copy to HDFS 102 | if hdfs_dir is not None: 103 | makedirs(hdfs_dir) 104 | copy(src=local_dir, dst=hdfs_dir) 105 | 106 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # UV 98 | # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | #uv.lock 102 | 103 | # poetry 104 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 105 | # This is especially recommended for binary packages to ensure reproducibility, and is more 106 | # commonly ignored for libraries. 107 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 108 | #poetry.lock 109 | 110 | # pdm 111 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 112 | #pdm.lock 113 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 114 | # in version control. 115 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 116 | .pdm.toml 117 | .pdm-python 118 | .pdm-build/ 119 | 120 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 121 | __pypackages__/ 122 | 123 | # Celery stuff 124 | celerybeat-schedule 125 | celerybeat.pid 126 | 127 | # SageMath parsed files 128 | *.sage.py 129 | 130 | # Environments 131 | .env 132 | .venv 133 | env/ 134 | venv/ 135 | ENV/ 136 | env.bak/ 137 | venv.bak/ 138 | 139 | # Spyder project settings 140 | .spyderproject 141 | .spyproject 142 | 143 | # Rope project settings 144 | .ropeproject 145 | 146 | # mkdocs documentation 147 | /site 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | #.idea/ 169 | 170 | # PyPI configuration file 171 | .pypirc 172 | 173 | dev/ 174 | data/ 175 | **/outputs/ 176 | **/wandb/ 177 | checkpoints/ -------------------------------------------------------------------------------- /main_ppo.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Bytedance Ltd. and/or its affiliates 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """ 15 | Note that we don't combine the main with ray_trainer as ray_trainer is used by other main. 16 | """ 17 | import hydra 18 | import ray 19 | from verl.trainer.ppo.ray_trainer import RayPPOTrainer 20 | 21 | from nanoverl.rewards.deepscaler_rule_reward import deepscaler_reward_fn 22 | 23 | 24 | @hydra.main(config_path='nanoverl/config', config_name='ppo_trainer', version_base=None) 25 | def main(config): 26 | #FIXME skip yaml since it is too complicated, force to use nanoverl rewards for now 27 | compute_score = deepscaler_reward_fn if True else None 28 | run_ppo(config, compute_score) 29 | 30 | 31 | def run_ppo(config, compute_score=None): 32 | if not ray.is_initialized(): 33 | # this is for local ray cluster 34 | ray.init(runtime_env={'env_vars': {'TOKENIZERS_PARALLELISM': 'true', 'NCCL_DEBUG': 'WARN'}}) 35 | 36 | ray.get(main_task.remote(config, compute_score)) 37 | 38 | 39 | @ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head 40 | def main_task(config, compute_score=None): 41 | # print initial config 42 | from pprint import pprint 43 | 44 | from omegaconf import OmegaConf 45 | from verl.utils.fs import copy_local_path_from_hdfs 46 | pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values 47 | OmegaConf.resolve(config) 48 | 49 | # download the checkpoint from hdfs 50 | local_path = copy_local_path_from_hdfs(config.actor_rollout_ref.model.path) 51 | 52 | # instantiate tokenizer 53 | from verl.utils import hf_tokenizer 54 | tokenizer = hf_tokenizer(local_path) 55 | 56 | # define worker classes 57 | if config.actor_rollout_ref.actor.strategy == 'fsdp': 58 | assert config.actor_rollout_ref.actor.strategy == config.critic.strategy 59 | from verl.single_controller.ray import RayWorkerGroup 60 | from verl.workers.fsdp_workers import (ActorRolloutRefWorker, 61 | CriticWorker) 62 | ray_worker_group_cls = RayWorkerGroup 63 | 64 | elif config.actor_rollout_ref.actor.strategy == 'megatron': 65 | assert config.actor_rollout_ref.actor.strategy == config.critic.strategy 66 | from verl.single_controller.ray.megatron import \ 67 | NVMegatronRayWorkerGroup 68 | from verl.workers.megatron_workers import (ActorRolloutRefWorker, 69 | CriticWorker) 70 | ray_worker_group_cls = NVMegatronRayWorkerGroup 71 | 72 | else: 73 | raise NotImplementedError 74 | 75 | from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role 76 | 77 | role_worker_mapping = { 78 | Role.ActorRollout: ray.remote(ActorRolloutRefWorker), 79 | Role.Critic: ray.remote(CriticWorker), 80 | Role.RefPolicy: ray.remote(ActorRolloutRefWorker) 81 | } 82 | 83 | global_pool_id = 'global_pool' 84 | resource_pool_spec = { 85 | global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes, 86 | } 87 | mapping = { 88 | Role.ActorRollout: global_pool_id, 89 | Role.Critic: global_pool_id, 90 | Role.RefPolicy: global_pool_id, 91 | } 92 | 93 | # we should adopt a multi-source reward function here 94 | # - for rule-based rm, we directly call a reward score 95 | # - for model-based rm, we call a model 96 | # - for code related prompt, we send to a sandbox if there are test cases 97 | # - finally, we combine all the rewards together 98 | # - The reward type depends on the tag of the data 99 | if config.reward_model.enable: 100 | if config.reward_model.strategy == 'fsdp': 101 | from verl.workers.fsdp_workers import RewardModelWorker 102 | elif config.reward_model.strategy == 'megatron': 103 | from verl.workers.megatron_workers import RewardModelWorker 104 | else: 105 | raise NotImplementedError 106 | role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker) 107 | mapping[Role.RewardModel] = global_pool_id 108 | 109 | reward_manager_name = config.reward_model.get("reward_manager", "naive") 110 | if reward_manager_name == 'naive': 111 | from verl.workers.reward_manager import NaiveRewardManager 112 | reward_manager_cls = NaiveRewardManager 113 | elif reward_manager_name == 'prime': 114 | from verl.workers.reward_manager import PrimeRewardManager 115 | reward_manager_cls = PrimeRewardManager 116 | else: 117 | raise NotImplementedError 118 | reward_fn = reward_manager_cls(tokenizer=tokenizer, num_examine=0, compute_score=compute_score) 119 | 120 | # Note that we always use function-based RM for validation 121 | val_reward_fn = reward_manager_cls(tokenizer=tokenizer, num_examine=1, compute_score=compute_score) 122 | 123 | resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping) 124 | 125 | trainer = RayPPOTrainer(config=config, 126 | tokenizer=tokenizer, 127 | role_worker_mapping=role_worker_mapping, 128 | resource_pool_manager=resource_pool_manager, 129 | ray_worker_group_cls=ray_worker_group_cls, 130 | reward_fn=reward_fn, 131 | val_reward_fn=val_reward_fn) 132 | trainer.init_workers() 133 | trainer.fit() 134 | 135 | 136 | if __name__ == '__main__': 137 | main() -------------------------------------------------------------------------------- /examples/deepscaler/reasoning_eval.py: -------------------------------------------------------------------------------- 1 | """ 2 | Usage: 3 | python -m sglang_router.launch_server \ 4 | --model-path deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ 5 | --port 30000 --dp-size 8 6 | python reasoning_eval.py \ 7 | --data-path nanoverl/aime \ 8 | --parallel 256 \ 9 | --num-tries 16 \ 10 | --question-key problem 11 | """ 12 | import argparse 13 | import json 14 | import time 15 | 16 | from datasets import load_dataset 17 | from math_verify import parse, verify, LatexExtractionConfig 18 | from latex2sympy2_extended import NormalizationConfig 19 | 20 | import sglang as sgl 21 | from sglang.test.test_utils import ( 22 | add_common_sglang_args_and_parse, 23 | select_sglang_backend, 24 | ) 25 | from sglang.utils import dump_state_text 26 | 27 | @sgl.function 28 | def reasoning_gen(s, question: str): 29 | s += sgl.user( 30 | question 31 | + " Please reason step by step, and put your final answer within \\boxed{}." 32 | ) 33 | s += sgl.assistant( 34 | sgl.gen( 35 | "answer", 36 | ) 37 | ) 38 | 39 | 40 | def convert_dataset(path: str, question_key: str, answer_key: str, num_tries: int): 41 | raw_dataset = load_dataset(path) 42 | questions = [] 43 | answers = [] 44 | for data in raw_dataset["test"]: 45 | question = data[question_key] 46 | answer = data[answer_key] 47 | for _ in range(num_tries): 48 | questions.append({"question": question}) 49 | answers.append({"answer": answer}) 50 | return questions, answers 51 | 52 | 53 | def main(args): 54 | # Select backend 55 | sgl.set_default_backend(select_sglang_backend(args)) 56 | 57 | # Get dataset 58 | questions, answers = convert_dataset( 59 | args.data_path, args.question_key, args.answer_key, args.num_tries 60 | ) 61 | 62 | # Run requests 63 | tic = time.time() 64 | states = reasoning_gen.run_batch( 65 | questions, 66 | num_threads=args.parallel, 67 | progress_bar=True, 68 | temperature=0.6, 69 | max_new_tokens=32768, 70 | top_p=0.95, 71 | ) 72 | latency = time.time() - tic 73 | 74 | # Extract answers 75 | # Calculate Pass@K, since we have multiple tries 76 | problem_group = dict() 77 | for i, state in enumerate(states): 78 | try: 79 | pred_answer = parse( 80 | state["answer"], 81 | extraction_config=[ 82 | LatexExtractionConfig( 83 | normalization_config=NormalizationConfig( 84 | nits=False, 85 | malformed_operators=False, 86 | basic_latex=True, 87 | equations=True, 88 | boxed="all", 89 | units=True, 90 | ), 91 | # Ensures that boxed is tried first 92 | boxed_match_priority=True, 93 | try_extract_without_anchor=False, 94 | ) 95 | ], 96 | extraction_mode="first_match", 97 | ) 98 | # turn number to string 99 | if isinstance(answers[i]["answer"], (int, float)): 100 | gt_answer = str(answers[i]["answer"]) 101 | else: 102 | gt_answer = parse(answers[i]["answer"]) 103 | 104 | correct = 1 if verify(pred_answer, gt_answer) else 0 105 | 106 | question = questions[i]["question"] 107 | if question not in problem_group: 108 | problem_group[question] = [] 109 | if len(pred_answer) > 0: 110 | problem_group[question].append((correct, pred_answer[0])) 111 | else: 112 | problem_group[question].append((correct, None)) 113 | except Exception as e: 114 | print(pred_answer, gt_answer) 115 | print(f"Error extracting answer: {e}") 116 | pass 117 | 118 | # Calculate Pass@1 119 | pass_1 = 0 120 | for question, results in problem_group.items(): 121 | pass_1 += sum([1 for crt, _ in results if crt == 1]) 122 | pass_1 /= len(states) 123 | print(f"Pass@1: {pass_1}") 124 | 125 | # Calculate Cons@K (Majority Vote) 126 | from collections import Counter 127 | cons_k = 0 128 | if args.num_tries > 1: 129 | for question, results in problem_group.items(): 130 | # if most common answer is correct, then it is correct 131 | # print(Counter(results).most_common(1)[0][0][0]) 132 | if Counter(results).most_common(1)[0][0][0] == 1: 133 | cons_k += 1 134 | cons_k /= len(problem_group) 135 | print(f"Cons@{args.num_tries}: {cons_k}") 136 | 137 | # Calculate output throughput 138 | num_output_tokens = sum( 139 | s.get_meta_info("answer")["completion_tokens"] for s in states 140 | ) 141 | output_throughput = num_output_tokens / latency 142 | print(f"Output throughput: {output_throughput} token/s") 143 | 144 | # Dump results 145 | dump_state_text(f"tmp_output_{args.backend}.txt", states) 146 | 147 | # Write results 148 | with open(args.result_file, "a") as fout: 149 | value = { 150 | "task": args.data_path, 151 | "backend": args.backend, 152 | "latency": round(latency, 3), 153 | "pass_1": round(pass_1, 3), 154 | "cons_k": round(cons_k, 3), 155 | "num_requests": len(questions), 156 | "other": { 157 | "num_questions": len(questions), 158 | "parallel": args.parallel, 159 | }, 160 | } 161 | fout.write(json.dumps(value) + "\n") 162 | 163 | 164 | if __name__ == "__main__": 165 | parser = argparse.ArgumentParser() 166 | parser.add_argument("--data-path", type=str, default="nanoverl/aime") 167 | parser.add_argument("--question-key", type=str, default="problem") 168 | parser.add_argument("--answer-key", type=str, default="answer") 169 | parser.add_argument("--num-tries", type=int, default=16) 170 | add_common_sglang_args_and_parse(parser) 171 | args = parser.parse_args() 172 | main(args) -------------------------------------------------------------------------------- /nanoverl/rewards/deepscaler_rule_reward.py: -------------------------------------------------------------------------------- 1 | """ 2 | https://github.com/agentica-project/deepscaler/blob/main/deepscaler/rewards/math_reward.py 3 | This module contains the RewardMathFn class, which evaluates mathematical answers 4 | and assigns rewards based on their correctness. It utilizes a language model to 5 | validate answers when necessary. 6 | """ 7 | from typing import List, Union 8 | 9 | from latex2sympy2_extended import NormalizationConfig 10 | from math_verify import LatexExtractionConfig, parse, verify 11 | 12 | from nanoverl.rewards import (RewardConfig, RewardFn, RewardInput, 13 | RewardOutput, RewardType) 14 | 15 | THOUGHT_DELIMITER_START = "" 16 | THOUGHT_DELIMITER_END = "" 17 | 18 | class RewardMathFn(RewardFn): 19 | """ 20 | Reward function for evaluating mathematical answers. 21 | 22 | This class implements the __call__ method to process the input and determine 23 | the reward based on the correctness of the provided answer compared to the ground truth. 24 | """ 25 | 26 | def __call__(self, input: RewardInput) -> RewardOutput: 27 | assert input.problem_type == RewardType.MATH, \ 28 | "Invalid problem type: expected 'MATH', but got '{}'".format(input.problem_type) 29 | 30 | # @fan: problem only used for ORM 31 | # problem = input.problem 32 | model_response = input.model_response 33 | 34 | # Extract solution. 35 | if THOUGHT_DELIMITER_START in model_response and THOUGHT_DELIMITER_END in model_response: 36 | model_solution = model_response.split(THOUGHT_DELIMITER_END)[1] 37 | else: 38 | return RewardOutput(reward=self.config.format_error_reward, is_correct=False) 39 | 40 | # FIXME @fan use math_verify to parse the model solution for now 41 | # we use open-r1 acc_reward for this: 42 | # https://github.com/huggingface/open-r1/blob/main/src/open_r1/rewards.py 43 | model_answers = parse( 44 | model_solution, 45 | extraction_config=[ 46 | LatexExtractionConfig( 47 | normalization_config=NormalizationConfig( 48 | nits=False, 49 | malformed_operators=False, 50 | basic_latex=True, 51 | equations=True, 52 | boxed="all", 53 | units=True, 54 | ), 55 | # Ensures that boxed is tried first 56 | boxed_match_priority=True, 57 | try_extract_without_anchor=False, 58 | ) 59 | ], 60 | extraction_mode="first_match", 61 | ) 62 | if model_answers is None or len(model_answers) == 0: 63 | return RewardOutput(reward=self.config.format_error_reward, is_correct=False) 64 | 65 | # Process the ground truth(s) 66 | ground_truths = input.ground_truth.get("answer", None) 67 | if ground_truths is None: 68 | return RewardOutput(reward=self.config.unk_error_reward, is_correct=False) 69 | 70 | # Convert single answer to list for uniform processing 71 | if isinstance(ground_truths, (str, float, int)): 72 | ground_truths = [ground_truths] 73 | 74 | # Process each ground truth 75 | processed_ground_truths = [] 76 | for truth in ground_truths: 77 | truth = str(truth) 78 | if "\\boxed" in truth: 79 | processed_truth = parse( 80 | truth, 81 | extraction_config=[ 82 | LatexExtractionConfig( 83 | normalization_config=NormalizationConfig( 84 | nits=False, 85 | malformed_operators=False, 86 | basic_latex=True, 87 | equations=True, 88 | boxed="all", 89 | units=True, 90 | ), 91 | # Ensures that boxed is tried first 92 | boxed_match_priority=True, 93 | try_extract_without_anchor=False, 94 | ) 95 | ], 96 | extraction_mode="first_match", 97 | ) 98 | if processed_truth is not None: 99 | processed_ground_truths.extend(processed_truth) 100 | else: 101 | truth = parse(truth) 102 | processed_ground_truths.extend(truth) 103 | 104 | if not processed_ground_truths: 105 | return RewardOutput(reward=self.config.unk_error_reward, is_correct=False) 106 | 107 | # Check against all possible correct answers 108 | is_correct = verify(model_answers, processed_ground_truths) 109 | if is_correct: 110 | return RewardOutput(reward=self.config.correct_reward, is_correct=True) 111 | 112 | # If latex heuristics fail and ORM is enabled, use LLM as ORM to evaluate correctness 113 | if self.config.use_math_orm: 114 | raise NotImplementedError("ORM is not used in nanoverl yet.") 115 | 116 | return RewardOutput(reward=self.config.incorrect_reward, is_correct=False) 117 | 118 | def deepscaler_reward_fn(data_source: str, solution_str: str, ground_truth: Union[str, List[str]], enable_llm = False): 119 | reward_config = RewardConfig() 120 | reward_config.use_math_orm = enable_llm 121 | reward_fn = RewardMathFn(reward_config) 122 | reward_response = reward_fn(RewardInput(problem=solution_str, problem_type=RewardType.MATH, model_response=solution_str, ground_truth={"answer": ground_truth})) 123 | return reward_response.is_correct 124 | 125 | if __name__ == "__main__": 126 | reward = RewardMathFn(RewardConfig) 127 | input = RewardInput( 128 | problem="Let $P(x)=x^{4}+2 x^{3}-13 x^{2}-14 x+24$ be a polynomial with roots $r_{1}, r_{2}, r_{3}, r_{4}$. Let $Q$ be the quartic polynomial with roots $r_{1}^{2}, r_{2}^{2}, r_{3}^{2}, r_{4}^{2}$, such that the coefficient of the $x^{4}$ term of $Q$ is 1. Simplify the quotient $Q\\left(x^{2}\\right) / P(x)$, leaving your answer in terms of $x$. (You may assume that $x$ is not equal to any of $\\left.r_{1}, r_{2}, r_{3}, r_{4}\\right)$.", 129 | problem_type=RewardType.MATH, 130 | model_response=" I am omniscient. \\boxed{24 + 14*x + (-13)*x^2 - 2*x^3 + x^4} The answer is \\boxed{24 + 14*x + (-13)*x^2 - 2*x^3 + x^4}.", 131 | ground_truth={"answer": ["10", "$x^{4}-2 x^{3}-13 x^{2}+14 x+24$"]}) 132 | output = reward(input) 133 | print(output) -------------------------------------------------------------------------------- /nanoverl/config/ppo_trainer.yaml: -------------------------------------------------------------------------------- 1 | data: 2 | tokenizer: null 3 | train_files: ~/data/rlhf/gsm8k/train.parquet 4 | val_files: ~/data/rlhf/gsm8k/test.parquet 5 | prompt_key: prompt 6 | max_prompt_length: 512 7 | max_response_length: 512 8 | train_batch_size: 1024 9 | val_batch_size: 1312 10 | return_raw_input_ids: False # This should be set to true when the tokenizer between policy and rm differs 11 | return_raw_chat: False 12 | shuffle: True 13 | 14 | actor_rollout_ref: 15 | hybrid_engine: True 16 | model: 17 | path: ~/models/deepseek-llm-7b-chat 18 | external_lib: null 19 | override_config: { } 20 | enable_gradient_checkpointing: True 21 | use_remove_padding: False 22 | actor: 23 | strategy: fsdp # This is for backward-compatibility 24 | ppo_mini_batch_size: 256 25 | ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu 26 | ppo_micro_batch_size_per_gpu: null 27 | use_dynamic_bsz: False 28 | ppo_max_token_len_per_gpu: 16384 # n * ${data.max_prompt_length} + ${data.max_response_length} 29 | grad_clip: 1.0 30 | clip_ratio: 0.2 31 | entropy_coeff: 0.001 32 | use_kl_loss: False # True for GRPO 33 | kl_loss_coef: 0.001 # for grpo 34 | kl_loss_type: low_var_kl # for grpo 35 | ppo_epochs: 1 36 | shuffle: False 37 | ulysses_sequence_parallel_size: 1 # sp size 38 | optim: 39 | lr: 1e-6 40 | lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime 41 | min_lr_ratio: null # only useful for warmup with cosine 42 | warmup_style: constant # select from constant/cosine 43 | total_training_steps: -1 # must be override by program 44 | fsdp_config: 45 | wrap_policy: 46 | # transformer_layer_cls_to_wrap: None 47 | min_num_params: 0 48 | param_offload: False 49 | grad_offload: False 50 | optimizer_offload: False 51 | fsdp_size: -1 52 | ref: 53 | fsdp_config: 54 | param_offload: False 55 | wrap_policy: 56 | # transformer_layer_cls_to_wrap: None 57 | min_num_params: 0 58 | log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu 59 | log_prob_micro_batch_size_per_gpu: null 60 | log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} 61 | log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} 62 | ulysses_sequence_parallel_size: ${actor_rollout_ref.actor.ulysses_sequence_parallel_size} # sp size 63 | rollout: 64 | name: vllm 65 | temperature: 1.0 66 | top_k: -1 # 0 for hf rollout, -1 for vllm rollout 67 | top_p: 1 68 | prompt_length: ${data.max_prompt_length} # not use for opensource 69 | response_length: ${data.max_response_length} 70 | # for vllm rollout 71 | dtype: bfloat16 # should align with FSDP 72 | gpu_memory_utilization: 0.5 73 | ignore_eos: False 74 | enforce_eager: True 75 | free_cache_engine: True 76 | load_format: dummy_dtensor 77 | tensor_model_parallel_size: 2 78 | max_num_batched_tokens: 8192 79 | max_num_seqs: 1024 80 | log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu 81 | log_prob_micro_batch_size_per_gpu: null 82 | log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} 83 | log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu} 84 | disable_log_stats: True 85 | enable_chunked_prefill: True # could get higher throughput 86 | # for hf rollout 87 | do_sample: True 88 | # number of responses (i.e. num sample times) 89 | n: 1 # > 1 for grpo 90 | 91 | critic: 92 | strategy: fsdp 93 | optim: 94 | lr: 1e-5 95 | lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime 96 | min_lr_ratio: null # only useful for warmup with cosine 97 | warmup_style: constant # select from constant/cosine 98 | total_training_steps: -1 # must be override by program 99 | model: 100 | path: ~/models/deepseek-llm-7b-chat 101 | tokenizer_path: ${actor_rollout_ref.model.path} 102 | override_config: { } 103 | external_lib: ${actor_rollout_ref.model.external_lib} 104 | enable_gradient_checkpointing: True 105 | use_remove_padding: False 106 | fsdp_config: 107 | param_offload: False 108 | grad_offload: False 109 | optimizer_offload: False 110 | wrap_policy: 111 | # transformer_layer_cls_to_wrap: None 112 | min_num_params: 0 113 | fsdp_size: -1 114 | ppo_mini_batch_size: ${actor_rollout_ref.actor.ppo_mini_batch_size} 115 | ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu 116 | ppo_micro_batch_size_per_gpu: null 117 | forward_micro_batch_size: ${critic.ppo_micro_batch_size} 118 | forward_micro_batch_size_per_gpu: ${critic.ppo_micro_batch_size_per_gpu} 119 | use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz} 120 | ppo_max_token_len_per_gpu: 32768 # (${actor_rollout_ref.actor.ppo_max_token_len_per_gpu}) * 2 121 | forward_max_token_len_per_gpu: ${critic.ppo_max_token_len_per_gpu} 122 | ulysses_sequence_parallel_size: 1 # sp size 123 | ppo_epochs: ${actor_rollout_ref.actor.ppo_epochs} 124 | shuffle: ${actor_rollout_ref.actor.shuffle} 125 | grad_clip: 1.0 126 | cliprange_value: 0.5 127 | 128 | reward_model: 129 | enable: False 130 | strategy: fsdp 131 | model: 132 | input_tokenizer: ${actor_rollout_ref.model.path} # set this to null if the chat template is identical 133 | path: ~/models/FsfairX-LLaMA3-RM-v0.1 134 | external_lib: ${actor_rollout_ref.model.external_lib} 135 | use_remove_padding: False 136 | fsdp_config: 137 | min_num_params: 0 138 | param_offload: False 139 | fsdp_size: -1 140 | micro_batch_size: null # will be deprecated, use micro_batch_size_per_gpu 141 | micro_batch_size_per_gpu: null # set a number 142 | max_length: null 143 | ulysses_sequence_parallel_size: 1 # sp size 144 | use_dynamic_bsz: ${critic.use_dynamic_bsz} 145 | forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu} 146 | reward_manager: naive 147 | 148 | algorithm: 149 | gamma: 1.0 150 | lam: 1.0 151 | adv_estimator: gae 152 | kl_penalty: kl # how to estimate kl divergence 153 | kl_ctrl: 154 | type: fixed 155 | kl_coef: 0.001 156 | 157 | trainer: 158 | total_epochs: 30 159 | total_training_steps: null 160 | project_name: verl_examples 161 | experiment_name: gsm8k 162 | logger: [ 'console', 'wandb' ] 163 | val_generations_to_log_to_wandb: 0 164 | nnodes: 1 165 | n_gpus_per_node: 8 166 | save_freq: -1 167 | # auto: find the last ckpt to resume. If can't find, start from scratch 168 | resume_mode: auto # or auto or resume_path if 169 | resume_from_path: False 170 | test_freq: -1 171 | critic_warmup: 0 172 | default_hdfs_dir: null 173 | remove_previous_ckpt_in_save: False 174 | del_local_ckpt_after_load: False 175 | default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name} -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------