├── .clang-format
├── .github
└── workflows
│ └── check_format.yml
├── .gitignore
├── .gitmodules
├── .pre-commit-config.yaml
├── CMakeLists.txt
├── CONTRIBUTING.md
├── CONTRIBUTING_zh.md
├── LICENSE
├── NOTICE_Third_Party.md
├── README.md
├── README_zh.md
├── RELEASE.md
├── cmake
├── CMakeDetermineRustCompiler.cmake
├── CMakeRustCompiler.cmake.in
├── CMakeRustInformation.cmake
├── CMakeTestRustCompiler.cmake
├── FindRust.cmake
├── FindSentencePiece.cmake
├── cargo_library.cmake
├── cargo_shared_library.cmake
├── cc_binary.cmake
├── cc_library.cmake
├── cc_test.cmake
├── grpc_proto_library.cmake
├── proto_library.cmake
└── static_analyzers.cmake
├── docs
├── assets
│ ├── service_arch.png
│ ├── wechat_qrcode1.png
│ ├── wechat_qrcode2.png
│ └── xllm_service_title.png
├── en
│ ├── getting_started.md
│ └── overview.md
└── zh
│ ├── getting_started.md
│ └── overview.md
├── prepare.sh
├── third_party
├── CMakeLists.txt
└── custom_cache
│ └── cpprestsdk.patch
├── vcpkg.json
└── xllm_service
├── CMakeLists.txt
├── chat_template
├── CMakeLists.txt
├── jinja_chat_template.cpp
├── jinja_chat_template.h
└── jinja_chat_template_test.cpp
├── common
├── CMakeLists.txt
├── call_data.h
├── closure_guard.h
├── concurrent_queue.h
├── global_gflags.cpp
├── global_gflags.h
├── hash_util.cpp
├── hash_util.h
├── json_reader.cpp
├── json_reader.h
├── macros.h
├── options.h
├── slice.h
├── threadpool.cpp
├── threadpool.h
├── ttft_predictor.cpp
├── ttft_predictor.h
├── types.h
├── utils.cpp
├── utils.h
└── xllm
│ ├── output.h
│ ├── status.h
│ ├── uuid.cpp
│ └── uuid.h
├── examples
├── CMakeLists.txt
├── curl_http_client.sh
├── http_client_test.cpp
├── rpc_client_test.cpp
└── rpc_hello_client.cpp
├── http_service
├── CMakeLists.txt
├── main.cpp
├── request_tracer.cpp
├── request_tracer.h
├── service.cpp
└── service.h
├── master.cpp
├── master.h
├── proto
├── CMakeLists.txt
├── xllm
│ ├── chat.proto
│ ├── common.proto
│ └── completion.proto
├── xllm_http_service.proto
└── xllm_rpc_service.proto
├── request
├── CMakeLists.txt
└── request.h
├── rpc_service
├── CMakeLists.txt
├── client.cpp
├── client.h
├── main.cpp
├── rpc_service_test.cpp
├── service.cpp
└── service.h
├── scheduler
├── CMakeLists.txt
├── etcd_client
│ ├── CMakeLists.txt
│ ├── etcd_client.cpp
│ └── etcd_client.h
├── loadbalance_policy
│ ├── CMakeLists.txt
│ ├── cache_aware_routing.cpp
│ ├── cache_aware_routing.h
│ ├── loadbalance_policy.h
│ ├── round_robin.cpp
│ └── round_robin.h
├── managers
│ ├── CMakeLists.txt
│ ├── global_kvcache_mgr.cpp
│ ├── global_kvcache_mgr.h
│ ├── instance_mgr.cpp
│ └── instance_mgr.h
├── response_handler.cpp
├── response_handler.h
├── scheduler.cpp
└── scheduler.h
└── tokenizer
├── CMakeLists.txt
├── fast_tokenizer.cpp
├── fast_tokenizer.h
├── sentencepiece_tokenizer.cpp
├── sentencepiece_tokenizer.h
├── tiktoken_tokenizer.cpp
├── tiktoken_tokenizer.h
├── tokenizer.h
├── tokenizer_args.cpp
├── tokenizer_args.h
├── tokenizer_factory.cpp
├── tokenizer_factory.h
└── tokenizers
├── CMakeLists.txt
├── Cargo.toml
├── src
└── lib.rs
└── tokenizers.h
/.clang-format:
--------------------------------------------------------------------------------
1 | Language: Cpp
2 | BasedOnStyle: Google
3 | UseTab: Never
4 | IndentWidth: 2
5 | ColumnLimit: 80
6 |
7 | BinPackParameters: false
8 | BinPackArguments: false
9 | ExperimentalAutoDetectBinPacking: false
10 | AllowAllParametersOfDeclarationOnNextLine: false
11 | DerivePointerAlignment: false
12 | PointerAlignment: Left
13 | ...
14 |
--------------------------------------------------------------------------------
/.github/workflows/check_format.yml:
--------------------------------------------------------------------------------
1 | name: CheckFormat
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches: [main]
6 | paths: ['xllm_service/**']
7 | pull_request:
8 | types: [opened, synchronize, reopened]
9 | branches: [main]
10 | paths: ['xllm_service/**']
11 |
12 | jobs:
13 | format-check:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - name: Install clang-format
17 | run: |
18 | pip install clang-format==20.1.6
19 | clang-format --version
20 |
21 | - name: Checkout code
22 | uses: actions/checkout@v4
23 | with:
24 | fetch-depth: 0
25 |
26 | - name: Determine base commit for comparison
27 | id: get_base_commit
28 | run: |
29 | # pull_request action
30 | if [ "${{ github.event_name }}" = "pull_request" ]; then
31 | echo "base_commit=${{ github.event.pull_request.base.sha }}" >> $GITHUB_OUTPUT
32 | else
33 | # push action
34 | echo "base_commit=${{ github.sha }}~1" >> $GITHUB_OUTPUT
35 | fi
36 |
37 | - name: Verify clang-format configuration
38 | run: |
39 | if [ ! -f ".clang-format" ]; then
40 | echo "❌ .clang-format file not found in repository root"
41 | exit 1
42 | fi
43 | clang-format --style=file --dump-config > /dev/null || {
44 | echo "❌ .clang-format file has invalid format"
45 | exit 1
46 | }
47 |
48 | - name: Check code format
49 | shell: /usr/bin/bash {0}
50 | run: |
51 | BASE_COMMIT="${{ steps.get_base_commit.outputs.base_commit }}"
52 | CLANG_FORMAT_FILE="$(pwd)/.clang-format"
53 |
54 | # do clang-format
55 | diff=$(git-clang-format \
56 | --style=file:"$CLANG_FORMAT_FILE" \
57 | --extensions="c,h,cc,cp,cpp,c++,cxx,hh,hpp,hxx,inc,cu,cuh" \
58 | --commit "$BASE_COMMIT" \
59 | --diff)
60 |
61 | # check diff
62 | if [ "$diff" = "no modified files to format" ] || [ "$diff" = "clang-format did not modify any files" ]; then
63 | echo "✅ Code format is correct"
64 | exit 0
65 | fi
66 |
67 | printf "\n❌ You have introduced coding style breakages.\n"
68 |
69 | printf "\n\033[1mSuggested changes:\n\n"
70 | echo "$diff"
71 | exit 1
72 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Visual Studio Code
2 | /.vscode*
3 |
4 | # Idea
5 | /.idea
6 | /cmake-build-debug/
7 | /cmake-build-release/
8 |
9 | # CMake
10 | /build*
11 |
12 | # cache
13 | /.*cache
14 |
15 | # deps
16 | /.deps
17 |
18 | # gtest
19 | /Testing
20 |
21 | # rust
22 | Cargo.lock
23 |
24 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "third_party/brpc"]
2 | path = third_party/brpc
3 | url = https://gitcode.com/xLLM-AI/brpc.git
4 | branch = 1.12.1_cmake
5 | [submodule "third_party/etcd_cpp_apiv3"]
6 | path = third_party/etcd_cpp_apiv3
7 | url = https://gitcode.com/xLLM-AI/etcd-cpp-apiv3.git
8 | branch = v0.15.4
9 | [submodule "third_party/cpprestsdk"]
10 | path = third_party/cpprestsdk
11 | url = https://gitcode.com/xLLM-AI/cpprestsdk.git
12 | branch = v2.10.19
13 | [submodule "third_party/sentencepiece"]
14 | path = third_party/sentencepiece
15 | url = https://gitcode.com/xLLM-AI/sentencepiece.git
16 | [submodule "third_party/minja"]
17 | path = third_party/minja
18 | url = https://gitcode.com/xLLM-AI/minja.git
19 | [submodule "third_party/smhasher"]
20 | path = third_party/smhasher
21 | url = https://gitcode.com/xLLM-AI/smhasher.git
22 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # pre-commit install
2 | # pre-commit run --all-files
3 |
4 | repos:
5 | - repo: https://github.com/pre-commit/mirrors-clang-format
6 | rev: v20.1.6
7 | hooks:
8 | - id: clang-format
9 | types_or: [c++, c, cuda]
10 | exclude: ^(cibuild/|tools/|third_party/|cmake/|build)
11 |
12 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 |
14 |
15 | [English](./CONTRIBUTING.md) | [中文](./CONTRIBUTING_zh.md)
16 |
17 | # Contribute to xLLM-Service
18 |
19 | + Write / translate / fix our documentation
20 | + Raise questions / Answer questions
21 | + Provide demos, examples or test cases
22 | + Give suggestions or other comments
23 | + Paticipate in [issues](https://github.com/xxx/xLLM/issues) or [discussions](https://github.com/xxx/xLLM/discussions)
24 | + Pull requests
25 | + Sharing related research / application
26 | + Any other ways to improve xLLM
27 |
28 | For developers who want to contribute to our code, here is the guidance:
29 |
30 | ## 1. Choose an issue to contribute
31 | + Issues with label `PR welcome`, which means:
32 | + A reproducible bug
33 | + A function in plan
34 |
35 | ## 2. Install environment for development
36 | + We strongly suggest you to read our **[Document](http://xxx/docs/)** before developing
37 | + For setting environment, please check our **[Readme file](/README.md)**
38 |
39 | ## 3. Build our project
40 | + You could run our demo to check whether the requirements are successfully installed:
41 |
42 | ## 4. Test
43 |
44 | After the PR is submitted, we will format and test the code.
45 | Our tests are still far from perfect, so you are welcomed to add tests to our project!
--------------------------------------------------------------------------------
/CONTRIBUTING_zh.md:
--------------------------------------------------------------------------------
1 |
14 |
15 | [English](./CONTRIBUTING.md) | [中文](./CONTRIBUTING_zh.md)
16 |
17 | # xLLM-Service 贡献指南
18 |
19 | xLLM-Service致力于为每一位用户和开发者提供开放的XX,因此无论您是XX开发者还是专注于XX用户,我们都欢迎您参与我们的项目。
20 | 您可以通过以下方法为项目作出贡献:
21 |
22 | + 撰写/翻译/修改文档
23 | + 提出或回答问题
24 | + 提供使用或测试样例
25 | + 提供建议或其他评论
26 | + 参与[issues](https://github.com/xxx/xLLM/issues) 或[discussions](https://github.com/xxx/xLLM/discussions)
27 | + 提交Pull request
28 | + 分享相关研究或应用场景
29 | + 其他任何对xLLM-Service的帮助
30 |
31 | 如果您希望参与xLLM的开发,请参考以下提示:
32 |
33 | ## 1. 选择参与贡献的issue
34 | + 您可以选择带有`PR welcome`标签的issue,包括:
35 | + 可复现的bug
36 | + 计划实现的功能
37 |
38 | ## 2. 配置开发环境
39 | + 在开发之前,可以参考我们的 **[文档](http://xxx/docs/)**
40 | + 关于环境配置,参见 **[Readme file](/README.md)**
41 |
42 | ## 3. 项目构建和运行
43 | + 您可以运行如下样例:
44 |
45 | ## 4. 测试
46 |
47 | 在pr提交之后,我们会对代码进行格式化及进一步测试。
48 | 我们的测试目前还很不完善,因此欢迎开发者为测试作出贡献!
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
14 |
15 | [English](./README.md) | [中文](./README_zh.md)
16 |
17 |
18 |
19 |
20 |
21 |
22 | ## 1. Project Overview
23 | **xLLM-service** is a service-layer framework developed based on the **xLLM** inference engine, providing efficient, fault-tolerant, and flexible LLM inference services for clustered deployment.
24 |
25 | xLLM-service targets to address key challenges in enterprise-level service scenarios:
26 |
27 | - How to ensure the SLA of online services and improve resource utilization of offline tasks in a hybrid online-offline deployment environment.
28 |
29 | - How to react to changing request loads in actual businesses, such as fluctuations in input/output lengths.
30 |
31 | - Resolving performance bottlenecks of multimodal model requests.
32 |
33 | - Ensuring high reliability of computing instances.
34 |
35 | ---
36 |
37 | ## 2. Key Features
38 | With management of computing resource pools, intelligent scheduling and preemption of hybrid requests, and real-time monitoring of computing instances, xLLM-service achieves the following key features:
39 |
40 | - Unified scheduling of online and offline requests, with preemptive execution for online requests and best-effort execution for offline requests.
41 |
42 | - Adaptive dynamic allocation of PD ratios, supporting efficient switching of instance PD roles.
43 |
44 | - EPD three-stage disaggregation for multimodal requests, with intelligent resource allocation for different stages.
45 |
46 | - Fault-tolerant architecture, fast detection of instance error and automatic rescheduling for interrupted requests.
47 |
48 | ---
49 |
50 | ## 3. Core Architecture
51 |
52 | ```
53 | ├── xllm-service/
54 | | : main source folder
55 | │ ├── chat_template/ #
56 | │ ├── common/ #
57 | │ ├── examples/ #
58 | │ ├── http_service/ #
59 | │ ├── rpc_service/ #
60 | | ├── tokenizers/ #
61 | | └── master.cpp #
62 | ```
63 |
64 | ---
65 |
66 |
67 | ## 4. Quick Start
68 | #### Installation
69 | ```bash
70 | git clone git@coding.jd.com:xllm-ai/xllm_service.git
71 | cd xllm_service
72 | git submodule init
73 | git submodule update
74 | ```
75 | #### Compilation
76 | compile xllm-service:
77 | ```bash
78 | sh prepare.sh # apply patch
79 | mkdir -p build && cd build
80 | cmake .. && make -j 8
81 | ```
82 |
83 | ---
84 |
85 | ## 5. Contributing
86 |
87 | There are several ways you can contribute to xLLM:
88 |
89 | 1. Reporting Issues (Bugs & Errors)
90 | 2. Suggesting Enhancements
91 | 3. Improving Documentation
92 | + Fork the repository
93 | + Add your view in document
94 | + Send your pull request
95 | 4. Writing Code
96 | + Fork the repository
97 | + Create a new branch
98 | + Add your feature or improvement
99 | + Send your pull request
100 |
101 | We appreciate all kinds of contributions! 🎉🎉🎉
102 | If you have problems about development, please check our document: * **[Document](./docs/docs/readme.md)**
103 |
104 | ---
105 |
106 | ## 6. Community & Support
107 |
108 | If you encounter any issues along the way, you are welcomed to submit reproducible steps and log snippets in the project's Issues area, or contact the xLLM Core team directly via your internal Slack.
109 |
110 | Welcome to contact us:
111 |
112 |
113 |

114 |

115 |
116 |
117 | ---
118 | ## 7. About the Contributors
119 |
120 | Thanks to all the following [developers](https://github.com/jd-opensource/xllm-service/graphs/contributors) who have contributed to xLLM.
121 |
122 |
123 |
124 |
125 | ---
126 |
127 | ## 8. License
128 |
129 | [Apache License](LICENSE)
130 |
131 | #### xLLM is provided by JD.com
132 | #### Thanks for your Contributions!
133 |
--------------------------------------------------------------------------------
/README_zh.md:
--------------------------------------------------------------------------------
1 |
14 |
15 | [English](./README.md) | [中文](./README_zh.md)
16 |
17 |
18 |
19 |
20 |
21 |
22 | ## 1. 简介
23 | **xLLM-service** 是一个基于 xLLM 推理引擎开发的服务层框架,为集群化部署提供高效率、高容错、高灵活性的大模型推理服务。
24 |
25 | xLLM-service 旨在解决企业级服务场景中的关键挑战:
26 | - 如何于在离线混合部署环境中,保障在线服务的SLA,提升离线任务的资源利用率。
27 | - 如何适应实际业务中动态变化的请求负载,如输入/输出长度出现剧烈波动。
28 | - 解决多模态模型请求的性能瓶颈。
29 | - 保障集群计算实例的高可靠性。
30 |
31 | ---
32 |
33 | ## 2. 核心特性
34 |
35 | xLLM-service 通过对计算资源池的动态管理、请求的智能调度与抢占,以及计算实例的实时监控,实现了以下核心能力:
36 | - 在线与离线任务的统一调度,在线请求的抢占式执行,离线请求best-effort执行;
37 | - PD比例的自适应动态调配,支持实例PD角色的高效切换;
38 | - 多模态请求的EPD三阶段分离,不同阶段的资源智能分配;
39 | - 多节点容错架构,快速感知实例错误信息,自动决策最优的被中断请求再调度方案。
40 |
41 | ---
42 |
43 | ## 3. 代码结构
44 |
45 | ```
46 | ├── xllm-service/
47 | | : 主代码目录
48 | │ ├── chat_template/ #
49 | │ ├── common/ #
50 | │ ├── examples/ #
51 | │ ├── http_service/ #
52 | │ ├── rpc_service/ #
53 | | ├── tokenizers/ #
54 | | └── master.cpp #
55 | ```
56 | ---
57 |
58 |
59 | ## 4. 快速开始
60 | #### 安装
61 | ```bash
62 | git clone git@coding.jd.com:xllm-ai/xllm_service.git
63 | cd xllm_service
64 | git submodule init
65 | git submodule update
66 | ```
67 | #### 编译
68 | 编译执行
69 | ```bash
70 | sh prepare.sh # 应用patch
71 | mkdir -p build && cd build
72 | cmake .. && make -j 8
73 | ```
74 |
75 | ---
76 | ## 5. 成为贡献者
77 | 您可以通过以下方法为 xLLM-Service 作出贡献:
78 |
79 | 1. 在Issue中报告问题
80 | 2. 提供改进建议
81 | 3. 补充文档
82 | + Fork仓库
83 | + 修改文档
84 | + 提出pull request
85 | 4. 修改代码
86 | + Fork仓库
87 | + 创建新分支
88 | + 加入您的修改
89 | + 提出pull request
90 |
91 | 感谢您的贡献! 🎉🎉🎉
92 | 如果您在开发中遇到问题,请参阅**[xLLM-Service中文指南](./docs/docs_zh/readme.md)**
93 |
94 | ---
95 |
96 | ## 6. 社区支持
97 |
98 | 如果你在xLLM的开发或使用过程中遇到任何问题,欢迎在项目的Issue区域提交可复现的步骤或日志片段。
99 | 如果您有企业内部Slack,请直接联系xLLM Core团队。
100 |
101 | 欢迎沟通和联系我们:
102 |
103 |
104 |

105 |

106 |
107 |
108 | ## 7. 致谢
109 |
110 | 感谢以下为xLLM-Servic作出贡献的[开发者](https://github.com/jd-opensource/xllm-service/graphs/contributors)
111 |
112 |
113 |
114 |
115 | ---
116 |
117 | ## 8. 许可证
118 | [Apache License](LICENSE)
119 |
120 | #### xLLM-Service 由 JD.com 提供
121 | #### 感谢您对xLLM的关心与贡献!
122 |
--------------------------------------------------------------------------------
/RELEASE.md:
--------------------------------------------------------------------------------
1 | # Release xllm-service 0.1.0
2 |
3 | ## **Major Features and Improvements**
4 |
5 | - Support disaggregated prefill and decoding.
6 | - Support KV Cache aware routing.
7 | - Support KV Cache Pool.
8 |
--------------------------------------------------------------------------------
/cmake/CMakeDetermineRustCompiler.cmake:
--------------------------------------------------------------------------------
1 | # ported from https://github.com/Devolutions/CMakeRust
2 | if(NOT CMAKE_Rust_COMPILER)
3 | find_package(Rust)
4 | if(RUST_FOUND)
5 | set(CMAKE_Rust_COMPILER "${RUSTC_EXECUTABLE}")
6 | set(CMAKE_Rust_COMPILER_ID "Rust")
7 | set(CMAKE_Rust_COMPILER_VERSION "${RUST_VERSION}")
8 | set(CMAKE_Rust_PLATFORM_ID "Rust")
9 | endif()
10 | endif()
11 |
12 | message(STATUS "Cargo Home: ${CARGO_HOME}")
13 | message(STATUS "Rust Compiler Version: ${RUSTC_VERSION}")
14 |
15 | mark_as_advanced(CMAKE_Rust_COMPILER)
16 |
17 | if(CMAKE_Rust_COMPILER)
18 | set(CMAKE_Rust_COMPILER_LOADED 1)
19 | endif(CMAKE_Rust_COMPILER)
20 |
21 | configure_file(${CMAKE_CURRENT_LIST_DIR}/CMakeRustCompiler.cmake.in
22 | ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${CMAKE_VERSION}/CMakeRustCompiler.cmake IMMEDIATE @ONLY)
23 |
24 | set(CMAKE_Rust_COMPILER_ENV_VAR "RUSTC")
25 |
26 |
--------------------------------------------------------------------------------
/cmake/CMakeRustCompiler.cmake.in:
--------------------------------------------------------------------------------
1 |
2 | # ported from https://github.com/Devolutions/CMakeRust
3 | set(CMAKE_Rust_COMPILER "@CMAKE_Rust_COMPILER@")
4 | set(CMAKE_Rust_COMPILER_ID "@CMAKE_Rust_COMPILER_ID@")
5 | set(CMAKE_Rust_COMPILER_VERSION "@CMAKE_Rust_COMPILER_VERSION@")
6 | set(CMAKE_Rust_COMPILER_LOADED @CMAKE_Rust_COMPILER_LOADED@)
7 | set(CMAKE_Rust_PLATFORM_ID "@CMAKE_Rust_PLATFORM_ID@")
8 |
9 | SET(CMAKE_Rust_SOURCE_FILE_EXTENSIONS rs)
10 | SET(CMAKE_Rust_LINKER_PREFERENCE 40)
11 | set(CMAKE_Rust_COMPILER_ENV_VAR "RUSTC")
12 |
13 |
--------------------------------------------------------------------------------
/cmake/CMakeRustInformation.cmake:
--------------------------------------------------------------------------------
1 | # ported from https://github.com/Devolutions/CMakeRust
2 | #
3 | # Usage: rustc [OPTIONS] INPUT
4 | #
5 | # Options:
6 | # -h --help Display this message
7 | # --cfg SPEC Configure the compilation environment
8 | # -L [KIND=]PATH Add a directory to the library search path. The
9 | # optional KIND can be one of dependency, crate, native,
10 | # framework or all (the default).
11 | # -l [KIND=]NAME Link the generated crate(s) to the specified native
12 | # library NAME. The optional KIND can be one of static,
13 | # dylib, or framework. If omitted, dylib is assumed.
14 | # --crate-type [bin|lib|rlib|dylib|cdylib|staticlib|metadata]
15 | # Comma separated list of types of crates for the
16 | # compiler to emit
17 | # --crate-name NAME Specify the name of the crate being built
18 | # --emit [asm|llvm-bc|llvm-ir|obj|link|dep-info]
19 | # Comma separated list of types of output for the
20 | # compiler to emit
21 | # --print [crate-name|file-names|sysroot|cfg|target-list|target-cpus|target-features|relocation-models|code-models]
22 | # Comma separated list of compiler information to print
23 | # on stdout
24 | # -g Equivalent to -C debuginfo=2
25 | # -O Equivalent to -C opt-level=2
26 | # -o FILENAME Write output to
27 | # --out-dir DIR Write output to compiler-chosen filename in
28 | # --explain OPT Provide a detailed explanation of an error message
29 | # --test Build a test harness
30 | # --target TARGET Target triple for which the code is compiled
31 | # -W --warn OPT Set lint warnings
32 | # -A --allow OPT Set lint allowed
33 | # -D --deny OPT Set lint denied
34 | # -F --forbid OPT Set lint forbidden
35 | # --cap-lints LEVEL Set the most restrictive lint level. More restrictive
36 | # lints are capped at this level
37 | # -C --codegen OPT[=VALUE]
38 | # Set a codegen option
39 | # -V --version Print version info and exit
40 | # -v --verbose Use verbose output
41 | #
42 | # Additional help:
43 | # -C help Print codegen options
44 | # -W help Print 'lint' options and default settings
45 | # -Z help Print internal options for debugging rustc
46 | # --help -v Print the full set of options rustc accepts
47 | #
48 |
49 | #