├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md ├── images │ ├── http_req_per_sec_vs_body_size.png │ ├── http_req_per_sec_vs_worker_threads.png │ ├── https_req_per_sec_vs_body_size.png │ └── https_req_per_sec_vs_worker_threads.png └── workflows │ └── build.yaml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── LICENSE-THIRD-PARTY ├── README.md ├── README_zh.md ├── ROADMAP.md ├── examples ├── README.md ├── config.toml ├── gen_cert.sh └── thrift.toml ├── monolake-core ├── Cargo.toml └── src │ ├── config │ └── mod.rs │ ├── context.rs │ ├── error.rs │ ├── http │ └── mod.rs │ ├── lib.rs │ ├── listener.rs │ ├── orchestrator │ ├── mod.rs │ ├── runtime.rs │ ├── service_executor.rs │ └── worker_manager.rs │ ├── thrift │ └── mod.rs │ └── util │ ├── hash.rs │ ├── mod.rs │ └── uri_serde.rs ├── monolake-services ├── Cargo.toml └── src │ ├── common │ ├── cancel │ │ ├── linked_list.rs │ │ └── mod.rs │ ├── context.rs │ ├── delay.rs │ ├── detect.rs │ ├── erase.rs │ ├── map.rs │ ├── mod.rs │ ├── panic.rs │ ├── selector.rs │ └── timeout.rs │ ├── http │ ├── core.rs │ ├── detect.rs │ ├── handlers │ │ ├── connection_persistence.rs │ │ ├── content_handler.rs │ │ ├── mod.rs │ │ ├── openid.rs │ │ ├── route.rs │ │ └── upstream.rs │ ├── mod.rs │ └── util.rs │ ├── hyper │ └── mod.rs │ ├── lib.rs │ ├── proxy_protocol │ └── mod.rs │ ├── tcp │ ├── echo.rs │ ├── mod.rs │ └── proxy.rs │ ├── thrift │ ├── handlers │ │ ├── mod.rs │ │ └── proxy.rs │ ├── mod.rs │ └── ttheader.rs │ └── tls │ ├── mod.rs │ ├── nativetls.rs │ └── rustls.rs ├── monolake ├── Cargo.toml └── src │ ├── config │ ├── extractor.rs │ ├── manager.rs │ └── mod.rs │ ├── context.rs │ ├── factory.rs │ ├── main.rs │ └── util.rs ├── rust-toolchain.toml └── rustfmt.toml /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | 12 | A clear and concise description of what the bug is. 13 | 14 | **To Reproduce** 15 | 16 | Steps to reproduce the behavior: 17 | 1. Go to '...' 18 | 2. Click on '....' 19 | 3. Scroll down to '....' 20 | 4. See error 21 | 22 | **Expected behavior** 23 | 24 | A clear and concise description of what you expected to happen. 25 | 26 | **Screenshots** 27 | 28 | If applicable, add screenshots to help explain your problem. 29 | 30 | **MonoLake version:** 31 | 32 | Please provide the version of MonoLake you are using. 33 | 34 | **Environment:** 35 | 36 | The output of `uname -a` and `env`. 37 | 38 | **Additional context** 39 | 40 | Add any other context about the problem here. 41 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | 12 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 13 | 14 | **Describe the solution you'd like** 15 | 16 | A clear and concise description of what you want to happen. 17 | 18 | **Describe alternatives you've considered** 19 | 20 | A clear and concise description of any alternative solutions or features you've considered. 21 | 22 | **Additional context** 23 | 24 | Add any other context or screenshots about the feature request here. 25 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | #### What type of PR is this? 2 | 17 | 18 | #### Check the PR title. 19 | 24 | - [ ] This PR title match the format: \(optional scope): \ 25 | - [ ] The description of this PR title is user-oriented and clear enough for others to understand. 26 | - [ ] Attach the PR updating the user documentation if the current PR requires user awareness at the usage level. [User docs repo](https://github.com/cloudwego/cloudwego.github.io) 27 | 28 | 29 | #### (Optional) Translate the PR title into Chinese. 30 | 31 | 32 | #### (Optional) More detailed description for this PR(en: English/zh: Chinese). 33 | 36 | en: 37 | zh(optional): 38 | 39 | 40 | #### (Optional) Which issue(s) this PR fixes: 41 | 45 | 46 | #### (optional) The PR that updates user documentation: 47 | 50 | -------------------------------------------------------------------------------- /.github/images/http_req_per_sec_vs_body_size.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudwego/monolake/855163e849689d1493a946bc81b6f0988b54b111/.github/images/http_req_per_sec_vs_body_size.png -------------------------------------------------------------------------------- /.github/images/http_req_per_sec_vs_worker_threads.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudwego/monolake/855163e849689d1493a946bc81b6f0988b54b111/.github/images/http_req_per_sec_vs_worker_threads.png -------------------------------------------------------------------------------- /.github/images/https_req_per_sec_vs_body_size.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudwego/monolake/855163e849689d1493a946bc81b6f0988b54b111/.github/images/https_req_per_sec_vs_body_size.png -------------------------------------------------------------------------------- /.github/images/https_req_per_sec_vs_worker_threads.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudwego/monolake/855163e849689d1493a946bc81b6f0988b54b111/.github/images/https_req_per_sec_vs_worker_threads.png -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: [ push, pull_request ] 4 | 5 | jobs: 6 | build: 7 | name: monolake-build 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - name: Install latest nightly 12 | uses: actions-rs/toolchain@v1 13 | with: 14 | toolchain: nightly 15 | override: true 16 | components: rustfmt, clippy 17 | - run: sudo apt-get install libssl-dev pkg-config 18 | - name: Run cargo build 19 | uses: actions-rs/cargo@v1 20 | with: 21 | command: check 22 | - name: Run cargo build with openid feature 23 | uses: actions-rs/cargo@v1 24 | with: 25 | command: check 26 | args: --features openid 27 | test: 28 | name: monolake-test 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: actions/checkout@v2 32 | - name: Install latest nightly 33 | uses: actions-rs/toolchain@v1 34 | with: 35 | toolchain: nightly 36 | override: true 37 | components: rustfmt, clippy 38 | - run: sudo apt-get install libssl-dev pkg-config 39 | - name: Run cargo test 40 | uses: actions-rs/cargo@v1 41 | with: 42 | command: test 43 | - name: Run cargo test with openid feature 44 | uses: actions-rs/cargo@v1 45 | with: 46 | command: test 47 | args: --features openid 48 | format: 49 | name: monolake-format 50 | runs-on: ubuntu-latest 51 | steps: 52 | - uses: actions/checkout@v2 53 | - name: Install latest nightly 54 | uses: actions-rs/toolchain@v1 55 | with: 56 | toolchain: nightly 57 | override: true 58 | components: rustfmt, clippy 59 | - run: sudo apt-get install libssl-dev pkg-config 60 | - name: Run cargo build 61 | uses: actions-rs/cargo@v1 62 | with: 63 | command: fmt 64 | args: --all -- --check 65 | clippy: 66 | name: monolake-clippy 67 | runs-on: ubuntu-latest 68 | steps: 69 | - uses: actions/checkout@v2 70 | - name: Install latest nightly 71 | uses: actions-rs/toolchain@v1 72 | with: 73 | toolchain: nightly 74 | override: true 75 | components: rustfmt, clippy 76 | - run: sudo apt-get install libssl-dev pkg-config 77 | - name: Run cargo build 78 | uses: actions-rs/cargo@v1 79 | with: 80 | command: clippy 81 | args: -- -D warnings 82 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .vscode 3 | .idea 4 | .swp 5 | *.pem 6 | 7 | /examples/certs 8 | /docs/cloudwego.github.io 9 | /benchmark/visualization/*.dat 10 | /benchmark/visualization/*.png 11 | /benchmark/visualization/*.txt 12 | /benchmark/visualization/*.csv 13 | /benchmark/visualization/*.log 14 | /benchmark/*.log 15 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | conduct@cloudwego.io. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Monolake is still relatively incomplete, and we welcome anyone to contribute. 4 | 5 | ## Code of Conduct 6 | 7 | This project complies with the [Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct). Violations can be reported to the administrator via email: rain-jiang@outlook.com / ihciah@gmail.com 8 | 9 | ## Pull Requests 10 | 11 | We welcome any code contributions, please try to make them follow the following rules: 12 | 13 | 1. Pass the test and format. Although we have CI to automate testing, it is recommended that you pass `cargo test` and `cargo fmt` locally before submitting. 14 | 2. In the PR, describe your **problem to be solved**, **thought to solve the problem**, and **architecture design** in as much detail as possible. And as far as possible, the working logic of the code is described through rich documents. 15 | 3. Describe your commit concisely and clearly in the git message. 16 | 17 | It is recommended to use GPG to sign your commit. 18 | 19 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["monolake", "monolake-core", "monolake-services"] 3 | resolver = "2" 4 | 5 | [workspace.package] 6 | authors = [ 7 | "ihciah ", 8 | "Rain Jiang ", 9 | "Harsha ", 10 | "Xiaosong Yang ", 11 | "Kingtous ", 12 | ] 13 | categories = ["asynchronous", "network-programming", "development-tools"] 14 | edition = "2021" 15 | keywords = ["proxy", "gateway", "async", "monoio", "http"] 16 | license = "MIT/Apache-2.0" 17 | repository = "https://github.com/cloudwego/monolake" 18 | 19 | [workspace.dependencies] 20 | monoio = "0.2.4" 21 | monoio-codec = "0.3" 22 | monoio-http = "0.3.5" 23 | monoio-thrift = "0.1.1" 24 | monoio-transports = "0.5.3" 25 | monoio-native-tls = "0.3.0" 26 | monoio-rustls = "0.3.0" 27 | native-tls = "0.2" 28 | service-async = "0.2.3" 29 | certain-map = "0.3.1" 30 | local-sync = "0.1" 31 | http = "1.0" 32 | anyhow = "1" 33 | thiserror = "1" 34 | serde = "1" 35 | tracing = "0.1" 36 | bytes = "1" 37 | 38 | [profile.release-lto] 39 | inherits = "release" 40 | lto = true 41 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023 Monolake Contributors 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CloudWeGo-Monolake 2 | 3 | English | [简体中文](README_zh.md) 4 | 5 | [![WebSite](https://img.shields.io/website?up_message=cloudwego&url=https%3A%2F%2Fwww.cloudwego.io%2F)](https://www.cloudwego.io/) 6 | [![License](https://img.shields.io/github/license/cloudwego/monolake)](https://github.com/cloudwego/monolake/blob/main/LICENSE) 7 | [![OpenIssue](https://img.shields.io/github/issues/cloudwego/monolake)](https://github.com/cloudwego/monolake/issues) 8 | [![ClosedIssue](https://img.shields.io/github/issues-closed/cloudwego/monolake)](https://github.com/cloudwego/monolake/issues?q=is%3Aissue+is%3Aclosed) 9 | ![Stars](https://img.shields.io/github/stars/cloudwego/monolake) 10 | ![Forks](https://img.shields.io/github/forks/cloudwego/monolake) 11 | 12 | ## Monolake Framework 13 | 14 | Monolake is a framework for developing high-performance network services like proxies and gateways in **Rust**. It is built from the ground up as a blank slate design, starting with a async runtime called [Monoio](https://docs.rs/crate/monoio/latest) that has first-class support for **io_uring** . 15 | 16 | While the most widely used Rust async runtime is [Tokio](https://docs.rs/tokio/latest/tokio/), which is an excellent and high-performance epoll/kqueue-based runtime, Monolake takes a different approach. The monoio runtime developed by Bytedance is designed with a thread-per-core model in mind, allowing Monolake to extract maximum performance from io_uring's highly efficient asynchronous I/O operations. 17 | 18 | By building Monolake on this novel runtime foundation, the team was able to incorporate new first-class support for io_uring throughout the ecosystem. This includes io_uring specific IO traits and a unique service architecture that differs from the popular Tower implementation. Monolake also includes io_uring optimized implementations for Thrift and HTTP. 19 | 20 | The Monolake framework has been used to build various high-performance proxies and gateways, and it is **actively deployed in production at [ByteDance](https://www.bytedance.com/)**. Its use cases are wide-ranging and include: 21 | 22 | - Application Gateways: For protocol conversion, such as HTTP to Thrift 23 | - Security Gateways: Providing pseudonymization for gRPC and Thrift RPCs 24 | 25 | ## Monolake Proxy 26 | 27 | [Monolake Proxy](https://github.com/cloudwego/monolake/tree/main/monolake) is a reference implementation that leverages the various components within the Monolake framework to build a high-performance HTTP and Thrift proxy. This project serves as a showcase for the unique features and capabilities of the Monolake ecosystem. By utilizing the efficient networking capabilities of the [monoio-transports](https://docs.rs/monoio-transports/latest/monoio_transports/) crate, the modular service composition of [service-async](https://docs.rs/service-async/0.2.4/service_async/index.html), and the type-safe context management provided by [certain-map](https://docs.rs/certain-map/latest/certain_map/), Monolake Proxy demonstrates the practical application of the Monolake framework. Additionally, this reference implementation allows for the collection of benchmarks, enabling comparisons against other popular proxy solutions like Nginx and Envoy. 28 | 29 | ## Basic Features 30 | 31 | - **io_uring-based Async Runtime (Monoio)**: Monolake is built on top of the Monoio runtime, which leverages the advanced capabilities of the io_uring Linux kernel feature to provide a highly efficient and performant asynchronous I/O foundation. 32 | 33 | - **Thread-per-Core Model**: Monoio, the async runtime used by Monolake, follows a thread-per-core architecture, which simplifies concurrent programming and avoids the complexities associated with shared data across multiple threads. 34 | 35 | - **Improved Service Trait and Lifecycle Management**: Monolake introduces an enhanced `Service` trait with improved borrowing semantics and a sophisticated service lifecycle management system, enabling seamless service versioning, rolling updates, and state preservation. 36 | 37 | - **Modular and Composable Connector Architecture**: The `monoio-transports` crate provides a flexible and composable connector system, allowing developers to easily build complex network communication solutions by stacking various connectors (e.g., TCP, TLS, HTTP) on top of each other. 38 | 39 | - **Context Management with `certain_map`**: Monolake utilizes the `certain_map` crate to provide a typed and compile-time guaranteed context management system, simplifying the handling of indirect data dependencies between services. 40 | 41 | - **Optimized Protocol Implementations**: The Monolake framework includes io_uring-optimized implementations for protocols like HTTP and Thrift, taking full advantage of the underlying runtime's capabilities. 42 | 43 | - **Modular and Extensible Design**: The Monolake framework is designed to be modular and extensible, allowing developers to easily integrate custom components or adapt existing ones to their specific needs. 44 | 45 | ## Performance 46 | 47 | ### Test environment 48 | 49 | - AWS instance: c6a.8xlarge 50 | - CPU: AMD EPYC 7R13 Processo, 16 cores, 32 threads 51 | - Memory: 64GB 52 | - OS: 6.1.94-99.176.amzn2023.x86_64, Amazon Linux 2023.5.20240805 53 | - Nginx: 1.24.0 54 | 55 |

56 | Requests per Second vs Body Size (HTTPS) 57 | HTTP Requests per Second vs Body Size (HTTP) 58 |

59 | 60 |

61 | HTTPS Requests per Second vs Worker Threads (HTTPS) 62 | HTTP Requests per Second vs Worker Threads (HTTP) 63 |

64 | 65 | ## Documentation 66 | 67 | - [**Getting Started**](https://www.cloudwego.io/docs/monolake/getting-started/) 68 | 69 | - [**Architecture**](https://www.cloudwego.io/docs/monolake/architecture/) 70 | 71 | - [**Developer guide**](https://www.cloudwego.io/docs/monolake/tutorial/) 72 | 73 | - [**Config guide**](https://www.cloudwego.io/docs/monolake/config-guide/) 74 | 75 | ## Related Crates 76 | 77 | | Crate | Description | 78 | |-------|-------------| 79 | | [monoio-transports](https://crates.io/crates/monoio-transports) | A foundational crate that provides high-performance, modular networking capabilities, including connectors and utilities for efficient network communications | 80 | | [service-async](https://crates.io/crates/service-async) | A foundational crate that introduces a refined Service trait with efficient borrowing and zero-cost abstractions, as well as utilities for service composition and state management | 81 | | [certain-map](https://crates.io/crates/certain-map) | A foundational crate that provides a typed map data structure, ensuring the existence of specific items at compile-time, useful for managing data dependencies between services | 82 | | [monoio-thrift](https://crates.io/crates/monoio-thrift) | Monoio native, io_uring compatible thrift implementation | 83 | | [monoio-http](https://crates.io/crates/monoio-http) | Monoio native, io_uring compatible HTTP/1.1 and HTTP/2 implementation | 84 | | [monoio-nativetls](https://crates.io/crates/monoio-native-tls) | The native-tls implementation compatible with monoio | 85 | | [monoio-rustls](https://crates.io/crates/monoio-rustls) | The rustls implementation compatible with monoio | 86 | 87 | ## Contributing 88 | 89 | Contributor guide: [Contributing](https://github.com/cloudwego/monolake/blob/main/CONTRIBUTING.md). 90 | 91 | ## License 92 | 93 | Monolake is licensed under the MIT license or Apache license. 94 | 95 | ## Community 96 | - Email: [conduct@cloudwego.io](conduct@cloudwego.io) 97 | - How to become a member: [COMMUNITY MEMBERSHIP](https://github.com/cloudwego/community/blob/main/COMMUNITY_MEMBERSHIP.md) 98 | - Issues: [Issues](https://github.com/cloudwego/monolake/issues) 99 | - Discord: Join community with [Discord Channel](https://discord.gg/NuYubnJ8fV). 100 | 101 | ## Landscapes 102 | 103 |

104 |    105 |

106 | CloudWeGo enriches the CNCF CLOUD NATIVE Landscape. 107 |

108 | -------------------------------------------------------------------------------- /README_zh.md: -------------------------------------------------------------------------------- 1 | # CloudWeGo-Monolake 2 | 3 | 简体中文 | [English](README.md) 4 | 5 | [![网站状态](https://img.shields.io/website?up_message=cloudwego&url=https%3A%2F%2Fwww.cloudwego.io%2F)](https://www.cloudwego.io/) 6 | [![许可证](https://img.shields.io/github/license/cloudwego/monolake)](https://github.com/cloudwego/monolake/blob/main/LICENSE) 7 | [![开放议题](https://img.shields.io/github/issues/cloudwego/monolake)](https://github.com/cloudwego/monolake/issues) 8 | [![已关闭议题](https://img.shields.io/github/issues-closed/cloudwego/monolake)](https://github.com/cloudwego/monolake/issues?q=is%3Aissue+is%3Aclosed) 9 | ![星标数](https://img.shields.io/github/stars/cloudwego/monolake) 10 | ![复刻数](https://img.shields.io/github/forks/cloudwego/monolake) 11 | 12 | ## Monolake 框架 13 | 14 | Monolake 是一个用 **Rust** 开发高性能网络服务(如代理和网关)的框架。其基于原生支持 **io_uring** 的异步运行时[Monoio](https://docs.rs/crate/monoio/latest) 从零设计的。 15 | 16 | 虽然目前最流行的 Rust 异步运行时是基于 epoll/kqueue 的 [Tokio](https://docs.rs/tokio/latest/tokio/),但 Monolake 选择了不同的技术路线。由字节跳动开发的 Monoio 运行时采用 **线程-核心(thread-per-core)** 模型,通过 io_uring 的高效异步 I/O 操作充分释放硬件性能。 17 | 18 | 基于这一创新运行时,Monolake 在生态中全面支持 io_uring 特性,包括专为 io_uring 设计的 IO 抽象和与 Tower 不同的服务架构。同时,Monolake 提供了针对 Thrift 和 HTTP 的 io_uring 优化实现。 19 | 20 | Monolake 框架已用于构建多种高性能代理和网关,**并积极部署于 [ByteDance](https://www.bytedance.com/) 下的生产环境**,其典型场景包括: 21 | 22 | - **应用网关**:如 HTTP 到 Thrift 的协议转换 23 | - **安全网关**:为 gRPC 和 Thrift RPC 提供数据脱敏能力 24 | 25 | ## Monolake 代理 26 | 27 | [Monolake 代理](https://github.com/cloudwego/monolake/tree/main/monolake) 是基于 Monolake 框架组件构建的高性能 HTTP/Thrift 代理参考实现,展示了框架的核心能力。通过集成 [monoio-transports](https://docs.rs/monoio-transports/latest/monoio_transports/) 的高效网络能力、[service-async](https://docs.rs/service-async/0.2.4/service_async/index.html) 的模块化服务组合机制,以及 [certain-map](https://docs.rs/certain-map/latest/certain_map/) 的类型安全上下文管理,该实现充分体现了 Monolake 的工程实践价值。此外,通过该代理可进行性能基准测试,与 Nginx、Envoy 等主流方案进行对比。 28 | 29 | ## 核心特性 30 | 31 | - **基于 io_uring 的异步运行时(Monoio)**:底层依赖 Monoio 运行时,通过 Linux 内核的 io_uring 特性提供高效的异步 I/O 支持。 32 | 33 | - **线程-核心模型**:Monoio 运行时采用线程-核心架构,简化并发编程模型,避免跨线程数据共享的复杂性。 34 | 35 | - **增强型服务抽象与生命周期管理**:提供改进的 `Service` 特征,优化借用语义,支持服务版本管理、滚动升级和状态保留等高级生命周期功能。 36 | 37 | - **模块化连接器架构**:通过 [monoio-transports](https://crates.io/crates/monoio-transports) 提供可组合的连接器系统,支持通过堆叠 TCP、TLS、HTTP 等组件构建复杂网络方案。 38 | 39 | - **类型化上下文管理(certain_map)**:使用 [certain-map](https://crates.io/crates/certain-map) 实现编译期类型安全的上下文管理,简化服务间间接数据依赖。 40 | 41 | - **协议优化实现**:提供针对 io_uring 优化的 HTTP/Thrift 协议实现,充分发挥底层运行时性能。 42 | 43 | - **模块化扩展设计**:框架设计高度模块化,支持开发者按需定制组件或适配现有实现。 44 | 45 | ## 性能表现 46 | 47 | ### 测试环境 48 | 49 | - AWS 实例:c6a.8xlarge 50 | - CPU:AMD EPYC 7R13 处理器,16 核 32 线程 51 | - 内存:64GB 52 | - 操作系统:6.1.94-99.176.amzn2023.x86_64(Amazon Linux 2023.5.20240805) 53 | - Nginx 版本:1.24.0 54 | 55 |

56 | HTTPS 请求速率 vs 请求体大小 57 | HTTP 请求速率 vs 请求体大小 58 |

59 | 60 |

61 | HTTPS 请求速率 vs 工作线程数 62 | HTTP 请求速率 vs 工作线程数 63 |

64 | 65 | ## 文档 66 | 67 | - [**快速开始**](https://www.cloudwego.io/zh/docs/monolake/getting-started/) 68 | - [**架构设计**](https://www.cloudwego.io/zh/docs/monolake/architecture/) 69 | - [**开发指南**](https://www.cloudwego.io/zh/docs/monolake/tutorial/) 70 | - [**配置指南**](https://www.cloudwego.io/zh/docs/monolake/config-guid/) 71 | 72 | ## 相关组件 73 | 74 | | 组件 | 描述 | 75 | |-------|-------------| 76 | | [monoio-transports](https://crates.io/crates/monoio-transports) | 基础网络组件,提供高性能模块化网络能力(连接器、工具类等) | 77 | | [service-async](https://crates.io/crates/service-async) | 基础服务抽象组件,提供增强型 Service 特征及零成本抽象组合能力 | 78 | | [certain-map](https://crates.io/crates/certain-map) | 类型安全上下文管理,通过编译期检查确保上下文项存在 | 79 | | [monoio-thrift](https://crates.io/crates/monoio-thrift) | 原生支持 io_uring 的 Thrift 实现 | 80 | | [monoio-http](https://crates.io/crates/monoio-http) | 原生支持 io_uring 的 HTTP/1.1 与 HTTP/2 实现 | 81 | | [monoio-nativetls](https://crates.io/crates/monoio-native-tls) | 兼容 monoio 的 native-tls 实现 | 82 | | [monoio-rustls](https://crates.io/crates/monoio-rustls) | 兼容 monoio 的 rustls 实现 | 83 | 84 | ## 贡献指南 85 | 86 | 贡献说明详见:[贡献指南](https://github.com/cloudwego/monolake/blob/main/CONTRIBUTING.md)。 87 | 88 | ## 许可证 89 | 90 | Monolake 采用 MIT 或 Apache 许可证。 91 | 92 | ## 社区 93 | 94 | - 邮箱:[conduct@cloudwego.io](mailto:conduct@cloudwego.io) 95 | - 加入社区:[社区成员指南](https://github.com/cloudwego/community/blob/main/COMMUNITY_MEMBERSHIP.md) 96 | - 问题反馈:[议题列表](https://github.com/cloudwego/monolake/issues) 97 | - Discord:加入 [Discord 频道](https://discord.gg/b2WgCBRu) 参与讨论 98 | 99 | ## 云原生生态 100 | 101 |

102 |    103 |

104 | CloudWeGo 项目已加入 CNCF 云原生全景图。 105 |

106 | -------------------------------------------------------------------------------- /ROADMAP.md: -------------------------------------------------------------------------------- 1 | # Project Roadmap 2 | 3 | This document outlines the development plan for Monolake. Our roadmap is divided into short-term and long-term goals, with specific milestones and features planned for each phase. 4 | 5 | ## Short-term Goals (Next 3-9 months) 6 | 7 | ### Version 1.1 8 | 9 | - Config update notification support (replace existing polling approach) 10 | - Admin interface to allow users push configuration, observe internal state and control log level 11 | - Connection dispatcher: dispatching new connections to threads with better strategy 12 | - Enhance observability features (logging, tracing, metrics) 13 | - Load balancing with various algorithms 14 | - TLS/SSL Intel QAT support 15 | 16 | ### Version 1.2 17 | 18 | - IP Allow and Deny list 19 | - Rate limiting 20 | - Enhanced authentication/authorization 21 | - Ingress Controller for Kubernetes 22 | - TLS/SSL NIC/DPU acceleration 23 | 24 | ### Version 1.3 25 | 26 | - Proxy protocol support 27 | - WebSocket support 28 | - HTTP/3 support 29 | - Decompression and serialization DPU acceleration 30 | 31 | ## Long-term Goals (9-12 months) 32 | 33 | ### Version 2.0 34 | 35 | - DPDK support 36 | 37 | ### Future Considerations 38 | 39 | - Monolake-powered applications 40 | - Community-driven feature requests (see Issues labeled 'C-feature-request') 41 | 42 | ## How to Contribute 43 | 44 | We welcome contributions from the community! If you're interested in working on any of these items: 45 | 46 | 1. Check the issue tracker for related issues 47 | 2. Open a new issue to discuss your approach if none exists 48 | 3. Submit a pull request referencing the relevant issue 49 | 50 | For more details, please see our CONTRIBUTING.md file. 51 | 52 | ## Disclaimer 53 | 54 | This roadmap is subject to change based on community feedback, project priorities, and available resources. We'll update this document as plans evolve. 55 | 56 | Last updated: [Nov.5, 2024] 57 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | For detailed information on how to get started with the Monolake framework, please refer to the [Getting Started](https://www.cloudwego.io/docs/monolake/getting-started/) guide. 4 | 5 | ## HTTP Example 6 | 7 | 1. Run `gen_cert.sh` to generate needed certificates. 8 | 2. Start monolake with `cargo run -- --config examples/config.toml`. 9 | 3. `curl --resolve gateway.monoio.rs:8081:127.0.0.1 --cacert examples/certs/rootCA.crt -vvv https://gateway.monoio.rs:8081` 10 | 11 | > Note: Except for the `--cacert path_to_ca`, you can also use `--insecure` to skip the certificate verification. 12 | 13 | ## Thrift Example 14 | 15 | 1. Start monolake with `cargo run -- --config examples/thrift.toml`. 16 | 2. Use your client request to `:8081`(will be forwarded to `127.0.0.1:9969`) or `/tmp/thrift_proxy_monolake.sock`(will be forwarded to `/tmp/thrift_server_monolake.sock`) 17 | -------------------------------------------------------------------------------- /examples/config.toml: -------------------------------------------------------------------------------- 1 | # Runtime configuration 2 | [runtime] 3 | runtime_type = "io_uring" # Type of runtime to use (e.g., legacy, io_uring) 4 | worker_threads = 2 # Number of worker threads 5 | entries = 1024 # Number of entries for io_uring 6 | 7 | # Basic HTTP proxy configuration 8 | [servers.demo_http] 9 | name = "monolake.rs" # Proxy name 10 | proxy_type = "http" 11 | listener = { type = "socket", value = "0.0.0.0:8080" } # Listener configuration 12 | upstream_http_version = "http11" # HTTP version for upstream connections 13 | http_opt_handlers = { content_handler = true } # Enable HTTP optional handlers 14 | http_timeout = { server_keepalive_timeout_sec = 60, upstream_connect_timeout_sec = 2, upstream_read_timeout_sec = 2 } 15 | 16 | # Routes for the basic HTTP proxy 17 | [[servers.demo_http.routes]] 18 | path = '/' # Route path 19 | upstreams = [ 20 | { endpoint = { type = "uri", value = "http://ifconfig.co" } }, 21 | ] # Upstream endpoint 22 | 23 | [[servers.demo_http.routes]] 24 | path = '/tls' # Route path for HTTPS endpoint 25 | upstreams = [ 26 | { endpoint = { type = "uri", value = "https://ifconfig.co/cdn-cgi/trace" } }, 27 | ] # Upstream endpoint 28 | 29 | # HTTPS proxy configuration 30 | [servers.demo_https] 31 | tls = { chain = "examples/certs/server.crt", key = "examples/certs/server.key" } 32 | name = "tls.monolake.rs" # Proxy name 33 | proxy_type = "http" 34 | listener = { type = "socket", value = "0.0.0.0:8081" } # Listener configuration 35 | upstream_http_version = "http2" # Upstream connector uses HTTP/2 36 | http_opt_handlers = { content_handler = false } # HTTP optional handlers 37 | 38 | # Routes for the HTTPS server 39 | [[servers.demo_https.routes]] 40 | path = '/' # Route path 41 | load_balancer = "round_robin" # Load balancer type(by default is random) 42 | upstreams = [ 43 | { endpoint = { type = "uri", value = "https://httpbin.org/html" } }, 44 | { endpoint = { type = "uri", value = "https://httpbin.org/json" } }, 45 | ] 46 | 47 | [[servers.demo_https.routes]] 48 | path = '/{*p}' # Wild card route path 49 | upstreams = [{ endpoint = { type = "uri", value = "https://httpbin.org/xml" } }] 50 | 51 | # Unix Domain Socket (UDS) server configuration 52 | [servers.demo_uds] 53 | name = "uds.monolake.rs" # Server name 54 | proxy_type = "http" 55 | listener = { type = "unix", value = "/tmp/monolake.sock" } # Listener configuration 56 | 57 | # Routes for the UDS server 58 | [[servers.demo_uds.routes]] 59 | path = '/' # Route path 60 | upstreams = [ 61 | { endpoint = { type = "uri", value = "https://ifconfig.me" } }, 62 | ] # Upstream endpoint 63 | -------------------------------------------------------------------------------- /examples/gen_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This script is used to generate the needed certificates when run demo. 4 | # Generate accroading to https://github.com/monoio-rs/monoio-tls/blob/master/example/certs/README.md 5 | 6 | mkdir -p certs 7 | cd certs || exit 8 | 9 | openssl genrsa -out rootCA.key 4096 10 | openssl req -x509 -new -nodes -sha512 -days 3650 \ 11 | -subj "/C=CN/ST=Shanghai/L=Shanghai/O=Monoio/OU=TLSDemo/CN=monolake-ca" \ 12 | -key rootCA.key \ 13 | -out rootCA.crt 14 | 15 | openssl genrsa -out server.key 4096 16 | openssl req -sha512 -new \ 17 | -subj "/C=CN/ST=Shanghai/L=Shanghai/O=Monoio/OU=TLSDemoServer/CN=monolake.rs" \ 18 | -key server.key \ 19 | -out server.csr 20 | 21 | cat > v3.ext <<-EOF 22 | authorityKeyIdentifier=keyid,issuer 23 | basicConstraints=CA:FALSE 24 | keyUsage=digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment 25 | extendedKeyUsage=serverAuth 26 | subjectAltName=@alt_names 27 | 28 | [alt_names] 29 | DNS.1=gateway.monolake.rs 30 | EOF 31 | 32 | openssl x509 -req -sha512 -days 3650 \ 33 | -extfile v3.ext \ 34 | -CA rootCA.crt -CAkey rootCA.key -CAcreateserial \ 35 | -in server.csr \ 36 | -out server.crt 37 | 38 | # Convert files 39 | rm -f rootCA.srl server.csr v3.ext 40 | openssl pkcs8 -topk8 -inform PEM -outform PEM -nocrypt -in server.key -out server.pkcs8 41 | -------------------------------------------------------------------------------- /examples/thrift.toml: -------------------------------------------------------------------------------- 1 | [runtime] 2 | runtime_type = "legacy" 3 | worker_threads = 1 4 | entries = 1024 5 | 6 | [servers.thrift_proxy] 7 | name = "thrift_proxy" 8 | proxy_type = "thrift" 9 | listener = { type = "socket", value = "0.0.0.0:8081" } 10 | route.upstreams = [{ endpoint = { type = "socket", value = "127.0.0.1:9969" } }] 11 | 12 | [servers.thrift_proxy_uds] 13 | name = "thrift_proxy" 14 | proxy_type = "thrift" 15 | listener = { type = "unix", value = "/tmp/thrift_proxy_monolake.sock" } 16 | route.upstreams = [ 17 | { endpoint = { type = "unix", value = "/tmp/thrift_server_monolake.sock" } }, 18 | ] 19 | -------------------------------------------------------------------------------- /monolake-core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "monolake-core" 3 | version = "0.3.0" 4 | description = "MonoLake Core Abstraction and Utils" 5 | 6 | authors.workspace = true 7 | categories.workspace = true 8 | edition.workspace = true 9 | keywords.workspace = true 10 | license.workspace = true 11 | repository.workspace = true 12 | 13 | [features] 14 | openid = [] 15 | proxy-protocol = [] 16 | hyper = ["monoio/poll-io"] 17 | 18 | [dependencies] 19 | monoio = { workspace = true, features = ["splice", "sync"] } 20 | monoio-http = { workspace = true } 21 | monoio-thrift = { workspace = true } 22 | service-async = { workspace = true } 23 | http = { workspace = true } 24 | anyhow = { workspace = true } 25 | thiserror = { workspace = true } 26 | serde = { workspace = true, features = ["derive"] } 27 | tracing = { workspace = true } 28 | bytes = { workspace = true } 29 | 30 | # futures 31 | futures-util = { version = "0.3", features = ["sink"] } 32 | futures-channel = { version = "0.3", features = ["sink"] } 33 | 34 | sha2 = "0" 35 | hex = "0" 36 | derive_more = "0.99.0" 37 | 38 | [target.'cfg(target_os = "linux")'.dependencies] 39 | io-uring = "0.6" 40 | -------------------------------------------------------------------------------- /monolake-core/src/config/mod.rs: -------------------------------------------------------------------------------- 1 | //! Runtime configuration and service setup for asynchronous networking applications. 2 | //! 3 | //! This module provides structures and enums for configuring the runtime environment 4 | //! and services in networking applications. It includes options for worker threads, 5 | //! I/O event handling, and runtime type selection. 6 | //! 7 | //! # Key Components 8 | //! 9 | //! - [`ServiceConfig`]: A generic configuration structure for services. 10 | //! - [`RuntimeConfig`]: Configuration options for the runtime environment. 11 | //! - [`RuntimeType`]: Enum representing different runtime implementation options. 12 | use std::num::NonZeroUsize; 13 | 14 | use serde::{Deserialize, Serialize}; 15 | 16 | // Default iouring/epoll entries: 32k 17 | const DEFAULT_ENTRIES: u32 = 32768; 18 | 19 | pub const FALLBACK_PARALLELISM: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(1) }; 20 | 21 | /// Configuration structure for a service, combining listener and server configs. 22 | /// 23 | /// # Type Parameters 24 | /// 25 | /// - `LC`: The type of the listener configuration. 26 | /// - `SC`: The type of the server configuration. 27 | #[derive(Debug, Clone, Serialize, Deserialize)] 28 | pub struct ServiceConfig { 29 | /// Configuration for the service listener. 30 | pub listener: LC, 31 | /// Configuration for the server component of the service. 32 | #[serde(flatten)] 33 | pub server: SC, 34 | } 35 | 36 | /// Configuration options for the runtime environment. 37 | /// 38 | /// This structure allows for fine-tuning of the runtime, including worker threads, 39 | /// I/O multiplexing, and CPU affinity settings. 40 | #[derive(Debug, Clone, Serialize, Deserialize)] 41 | pub struct RuntimeConfig { 42 | /// Number of worker threads for the runtime. 43 | #[serde(default = "default_workers")] 44 | pub worker_threads: usize, 45 | 46 | /// Number of I/O entries for event handling for io_uring. 47 | #[serde(default = "default_entries")] 48 | pub entries: u32, 49 | 50 | /// Idle timeout for squall polling (io_uring specific). 51 | pub sqpoll_idle: Option, 52 | 53 | /// The type of runtime to use. 54 | #[serde(default)] 55 | pub runtime_type: RuntimeType, 56 | 57 | /// Whether to enable CPU affinity for worker threads. 58 | #[serde(default = "default_cpu_affinity")] 59 | pub cpu_affinity: bool, 60 | 61 | /// Optional thread pool size for specific runtime implementations. 62 | pub thread_pool: Option, 63 | } 64 | 65 | impl Default for RuntimeConfig { 66 | fn default() -> Self { 67 | RuntimeConfig { 68 | worker_threads: default_workers(), 69 | entries: default_entries(), 70 | sqpoll_idle: None, 71 | runtime_type: Default::default(), 72 | cpu_affinity: default_cpu_affinity(), 73 | thread_pool: None, 74 | } 75 | } 76 | } 77 | 78 | /// Enum representing different runtime implementation options. 79 | /// 80 | /// This allows for selection between different runtime backends, 81 | /// such as io_uring on Linux or a legacy implementation on other platforms. 82 | #[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)] 83 | #[serde(rename_all = "snake_case")] 84 | pub enum RuntimeType { 85 | /// io_uring-based runtime (Linux only). 86 | #[cfg(target_os = "linux")] 87 | IoUring, 88 | 89 | /// Legacy runtime implementation. 90 | Legacy, 91 | } 92 | impl Default for RuntimeType { 93 | #[cfg(target_os = "linux")] 94 | fn default() -> Self { 95 | Self::IoUring 96 | } 97 | #[cfg(not(target_os = "linux"))] 98 | fn default() -> Self { 99 | Self::Legacy 100 | } 101 | } 102 | 103 | macro_rules! define_const { 104 | ($name: ident, $val: expr, $type: ty) => { 105 | const fn $name() -> $type { 106 | $val 107 | } 108 | }; 109 | } 110 | 111 | fn default_workers() -> usize { 112 | std::thread::available_parallelism() 113 | .unwrap_or(FALLBACK_PARALLELISM) 114 | .into() 115 | } 116 | 117 | define_const!(default_entries, DEFAULT_ENTRIES, u32); 118 | define_const!(default_cpu_affinity, false, bool); 119 | 120 | // #[cfg(test)] 121 | // mod tests { 122 | // use super::Config; 123 | 124 | // #[test] 125 | // fn test_json_deserialize() { 126 | // const TEST_CONFIG: &str = 127 | // " 128 | // { 129 | // \"servers\": { 130 | // \"test-server\": { 131 | // \ 132 | // \"name\": \"test\", 133 | // \"listener\": {\"socket_addr\" : \ 134 | // \"0.0.0.0:8080\"}, 135 | // \"routes\": [{ 136 | // \ 137 | // \"path\": \"/\", 138 | // \"upstreams\": [{ 139 | // \ 140 | // \"endpoint\": {\"uds_path\":\"/tmp/test\"},\"weight\": 1 }, { 141 | // \ 142 | // \"endpoint\": {\"uri\":\"https://gateway.example.com/\"},\"weight\": 2 }] }] 143 | // } 144 | // } 145 | // } 146 | // "; 147 | 148 | // let config = Config::from_slice(TEST_CONFIG.as_bytes()).unwrap(); 149 | // assert_eq!("test-server", config.servers.keys().next().unwrap()); 150 | // } 151 | 152 | // #[test] 153 | // fn test_toml_deserialize() { 154 | // const TEST_CONFIG: &str = " 155 | // [servers.test-server] 156 | // name = 'gateway.example.com' 157 | // listener = { socket_addr = '[::]:8080' } 158 | 159 | // [[servers.test-server.routes]] 160 | // path = '/' 161 | // id = 'test' 162 | 163 | // [[servers.test-server.routes.upstreams]] 164 | // endpoint = {uri = 'test'} 165 | // weight = 1 166 | 167 | // [[servers.test-server.routes.upstreams]] 168 | // endpoint = {uds_path = '/tmp/def.sock'} 169 | // weight = 2 170 | // "; 171 | 172 | // let config: Config = Config::from_slice(TEST_CONFIG.as_bytes()).unwrap(); 173 | // assert_eq!("test-server", config.servers.keys().next().unwrap()); 174 | // } 175 | // } 176 | -------------------------------------------------------------------------------- /monolake-core/src/context.rs: -------------------------------------------------------------------------------- 1 | //! Common data used in context of Service processing. 2 | use derive_more::{From, Into}; 3 | 4 | use crate::listener::AcceptedAddr; 5 | 6 | #[derive(From, Into, Debug, Clone)] 7 | pub struct PeerAddr(pub AcceptedAddr); 8 | 9 | #[derive(From, Into, Debug, Clone)] 10 | pub struct RemoteAddr(pub AcceptedAddr); 11 | -------------------------------------------------------------------------------- /monolake-core/src/error.rs: -------------------------------------------------------------------------------- 1 | /// A type alias for `anyhow::Error`, representing any error type. 2 | /// 3 | /// This type is used throughout the crate to represent errors that can be of any type, 4 | /// leveraging the flexibility of the `anyhow` crate for error handling. 5 | pub type AnyError = anyhow::Error; 6 | 7 | /// A type alias for `Result` where `E` defaults to [`AnyError`](AnyError). 8 | /// 9 | /// This type provides a convenient way to return results that can contain any error type, 10 | /// defaulting to [`AnyError`] if no specific error type is specified. 11 | /// 12 | /// # Type Parameters 13 | /// 14 | /// * `T` - The type of the successful result. 15 | /// * `E` - The error type, defaulting to [`AnyError`]. 16 | pub type AnyResult = std::result::Result; 17 | #[macro_export] 18 | macro_rules! bail_into { 19 | ($msg:literal $(,)?) => { 20 | return Err(::anyhow::anyhow!($msg).into()) 21 | }; 22 | ($err:expr $(,)?) => { 23 | return Err(::anyhow::anyhow!($err).into()) 24 | }; 25 | ($fmt:expr, $($arg:tt)*) => { 26 | return Err(::anyhow::anyhow!($fmt, $($arg)*).into()) 27 | }; 28 | } 29 | -------------------------------------------------------------------------------- /monolake-core/src/http/mod.rs: -------------------------------------------------------------------------------- 1 | //! HTTP handling traits and types for asynchronous services. 2 | //! 3 | //! This module provides traits and types for implementing HTTP handlers 4 | //! that can be used with asynchronous services. It defines a common interface 5 | //! for processing HTTP requests and generating responses, with support for 6 | //! connection management and context-aware handling. 7 | //! 8 | //! # Key Components 9 | //! 10 | //! - [`HttpHandler`]: A trait for implementing HTTP request handlers. 11 | //! - [`ResponseWithContinue`]: A type alias for responses that indicate whether to continue 12 | //! processing the connection. 13 | //! - [`HttpAccept`]: A type alias for connection acceptance information. 14 | //! 15 | //! # Usage 16 | //! 17 | //! The `HttpHandler` trait is automatically implemented for types 18 | //! that implement the [`Service`] trait with 19 | //! request type `(Request, CX)` and return type 20 | //! [`ResponseWithContinue`]. 21 | use std::future::Future; 22 | 23 | use http::{Request, Response}; 24 | use service_async::Service; 25 | 26 | use crate::sealed::SealedT; 27 | 28 | /// A tuple representing an HTTP response along with a connection continuation flag. 29 | /// 30 | /// # Type Parameters 31 | /// 32 | /// - `B`: The body type of the response. 33 | /// 34 | /// # Fields 35 | /// 36 | /// - `Response`: The HTTP response. 37 | /// - `bool`: A flag indicating whether to continue processing the connection. 38 | /// - `true`: Continue processing the connection. 39 | /// - `false`: Close the connection after sending the response. 40 | /// 41 | /// Note: The service does not need to add the `Connection: close` header itself; 42 | /// this is handled by the HTTP core service based on this flag. 43 | // TODO: replace it with HttpError. 44 | pub type ResponseWithContinue = (Response, bool); 45 | 46 | pub trait HttpError { 47 | /// If an error can be turned to an HTTP response, it means the error is 48 | /// a recoverable error and the connection can be kept alive. 49 | fn to_response(&self) -> Option>; 50 | } 51 | 52 | #[derive(Debug, Clone, Default, Copy, PartialEq, Eq)] 53 | pub struct HttpFatalError(pub E); 54 | impl HttpError for HttpFatalError { 55 | #[inline] 56 | fn to_response(&self) -> Option> { 57 | None 58 | } 59 | } 60 | 61 | /// A tuple representing an accepted HTTP connection with its context. 62 | /// 63 | /// # Type Parameters 64 | /// 65 | /// - `Stream`: The type of the I/O stream for the connection. 66 | /// - `CX`: The type of the connection context, typically a `certain_map`. 67 | /// 68 | /// # Fields 69 | /// 70 | /// - `bool`: Indicates whether the connection is using HTTP/2. 71 | /// - `true`: The connection is using HTTP/2. 72 | /// - `false`: The connection is using HTTP/1.x. 73 | /// - `Stream`: The I/O stream for the connection. 74 | /// - `CX`: The context of the connection, providing additional information or state. 75 | pub type HttpAccept = (bool, Stream, CX); 76 | 77 | struct HttpSeal; 78 | 79 | /// A trait for HTTP request handlers. 80 | /// 81 | /// This trait defines the interface for processing HTTP requests and generating responses. 82 | /// It is designed to work with asynchronous services and supports context-aware handling. 83 | /// 84 | /// Implementors of this trait can process HTTP requests and return responses along with 85 | /// a boolean flag indicating whether to continue processing the connection. 86 | /// 87 | /// # Type Parameters 88 | /// 89 | /// - `CX`: The context type for additional request processing information. 90 | /// - `B`: The body type of the incoming request. 91 | /// 92 | /// # Associated Types 93 | /// 94 | /// - `Body`: The body type of the outgoing response. 95 | /// - `Error`: The error type that may occur during request handling. 96 | /// 97 | /// # Examples 98 | /// 99 | /// ```ignore 100 | /// use your_crate::{HttpHandler, ResponseWithContinue}; 101 | /// use http::{Request, Response}; 102 | /// 103 | /// struct MyHandler; 104 | /// 105 | /// impl HttpHandler<(), Vec> for MyHandler { 106 | /// type Body = Vec; 107 | /// type Error = std::io::Error; 108 | /// 109 | /// async fn handle(&self, request: Request>, ctx: ()) 110 | /// -> Result, Self::Error> { 111 | /// // Process the request and generate a response 112 | /// let response = Response::new(Vec::new()); 113 | /// Ok((response, true)) 114 | /// } 115 | /// } 116 | /// ``` 117 | /// 118 | /// The [`HttpHandler`] trait is automatically implemented for types 119 | /// that implement the [`Service`] trait with 120 | /// request type `(Request, CX)` and return type 121 | /// [`ResponseWithContinue`]. 122 | #[allow(private_bounds)] 123 | pub trait HttpHandler: SealedT { 124 | type Body; 125 | type Error; 126 | 127 | fn handle( 128 | &self, 129 | request: Request, 130 | ctx: CX, 131 | ) -> impl Future, Self::Error>>; 132 | } 133 | 134 | impl SealedT for T where 135 | T: Service<(Request, CX), Response = ResponseWithContinue> 136 | { 137 | } 138 | 139 | impl HttpHandler for T 140 | where 141 | T: Service<(Request, CX), Response = ResponseWithContinue>, 142 | { 143 | type Body = OB; 144 | type Error = T::Error; 145 | 146 | async fn handle( 147 | &self, 148 | req: Request, 149 | ctx: CX, 150 | ) -> Result, Self::Error> { 151 | self.call((req, ctx)).await 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /monolake-core/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # monolake-core 2 | //! 3 | //! `monolake-core` is a foundational crate for building high-performance, thread-per-core network 4 | //! services. It provides a robust framework for worker orchestration, service deployment, and 5 | //! lifecycle management, supporting protocols such as HTTP and Thrift. This crate builds upon 6 | //! concepts from the `service_async` crate to implement a thread-per-core worker system with 7 | //! advanced service management capabilities. 8 | //! 9 | //! ## Key Features 10 | //! 11 | //! - **Network Service Foundation**: Core building blocks for creating efficient network services. 12 | //! - **Thread-per-Core Architecture**: Maximizes performance on multi-core processors. 13 | //! - **Service Lifecycle Management**: Seamless updates and deployments of service chains. 14 | //! - **Flexible Deployment Models**: Support for both single-stage and two-stage service deployment 15 | //! processes. 16 | //! - **State Transfer**: Facilitate updates with state preservation between service versions. 17 | //! - **Protocol Support**: Built-in support for HTTP and Thrift protocols. 18 | //! - **Asynchronous Design**: Leverages Rust's async capabilities for efficient, non-blocking 19 | //! operations. 20 | //! 21 | //! ## Service and Service Factory Concepts 22 | //! 23 | //! This crate builds upon the `service_async` crate, providing: 24 | //! 25 | //! - A refined [`Service`](service_async::Service) trait that leverages `impl Trait` for improved 26 | //! performance and flexibility. 27 | //! - The [`AsyncMakeService`](service_async::AsyncMakeService) trait for efficient creation and 28 | //! updating of services, particularly useful for managing stateful resources across service 29 | //! updates. 30 | //! 31 | //! `monolake-core` extends these concepts to provide a comprehensive system for managing network 32 | //! services in a thread-per-core architecture. 33 | //! 34 | //! ## Pre-built Services 35 | //! 36 | //! While `monolake-core` provides the foundation, you can find pre-built services for common 37 | //! protocols in the `monolake-services` crate. This includes ready-to-use implementations for: 38 | //! 39 | //! - HTTP services 40 | //! - Thrift services 41 | //! 42 | //! These pre-built services can be easily integrated into your `monolake-core` based applications, 43 | //! speeding up development for standard network service scenarios. 44 | //! 45 | //! ## Worker-Service Lifecycle Management 46 | //! 47 | //! The core of this crate is the worker-service lifecycle management system, implemented in the 48 | //! [`orchestrator`] module. Key components include: 49 | //! 50 | //! - [`WorkerManager`](orchestrator::WorkerManager): Manages multiple worker threads, each running 51 | //! on a dedicated CPU core. 52 | //! - [`ServiceExecutor`](orchestrator::ServiceExecutor): Handles the lifecycle of services within a 53 | //! single worker thread. 54 | //! - [`ServiceDeploymentContainer`](orchestrator::ServiceDeploymentContainer): Manages individual 55 | //! service instances, including precommitting and deployment. 56 | //! - [`ServiceCommand`](orchestrator::ServiceCommand): Represents actions to be performed on 57 | //! services, such as precommitting, updating, or removing. 58 | //! 59 | //! This system supports dynamic updating of deployed services: 60 | //! 61 | //! - You can update a currently deployed service with a new service chain. 62 | //! - Existing connections continue to use the old service chain. 63 | //! - New connections automatically use the latest service chain. 64 | //! 65 | //! This approach ensures smooth transitions during updates with minimal disruption to ongoing 66 | //! operations. 67 | //! 68 | //! ## Deployment Models 69 | //! 70 | //! The system supports two deployment models: 71 | //! 72 | //! 1. **Two-Stage Deployment**: Ideal for updating services while preserving state. 73 | //! - Precommit a service using [`Precommit`](orchestrator::ServiceCommand::Precommit). 74 | //! - Update using [`Update`](orchestrator::ServiceCommand::Update) or commit using 75 | //! [`Commit`](orchestrator::ServiceCommand::Commit). 76 | //! 77 | //! 2. **Single-Stage Deployment**: Suitable for initial deployments or when state preservation 78 | //! isn't necessary. 79 | //! - Create and deploy in one step using 80 | //! [`PrepareAndCommit`](orchestrator::ServiceCommand::PrepareAndCommit). 81 | //! 82 | //! ## Protocol Handlers 83 | //! 84 | //! ### HTTP Handler 85 | //! 86 | //! The [`http`] module provides the [`HttpHandler`](http::HttpHandler) trait for 87 | //! implementing HTTP request handlers. It supports context-aware handling and connection 88 | //! management. 89 | //! 90 | //! ### Thrift Handler 91 | //! 92 | //! The [`thrift`] module offers the [`ThriftHandler`](thrift::ThriftHandler) trait for 93 | //! implementing Thrift request handlers. 94 | //! 95 | //! Both handler traits are automatically implemented for types that implement the 96 | //! [`Service`](service_async::Service) trait with appropriate request and response types. 97 | //! 98 | //! ## Usage Example 99 | //! 100 | //! ```ignore 101 | //! let mut manager = WorkerManager::new(config.runtime); 102 | //! let join_handlers = manager.spawn_workers_async(); 103 | //! for (name, ServiceConfig { listener, server }) in config.servers.into_iter() { 104 | //! let lis_fac = ListenerBuilder::try_from(listener).expect("build listener failed"); 105 | //! let svc_fac = l7_factory(server); 106 | //! manager 107 | //! .dispatch_service_command(ServiceCommand::PrepareAndCommit( 108 | //! Arc::new(name), 109 | //! AsyncMakeServiceWrapper(svc_fac), 110 | //! AsyncMakeServiceWrapper(Arc::new(lis_fac)), 111 | //! )) 112 | //! .await 113 | //! .err() 114 | //! .expect("apply init config failed"); 115 | //! } 116 | //! ``` 117 | //! 118 | //! ## Modules 119 | //! 120 | //! - [`orchestrator`]: Core functionality for worker management and service deployment. 121 | //! - [`http`]: HTTP-specific implementations and utilities. 122 | //! - [`thrift`]: Thrift protocol support and related functionalities. 123 | //! - [`config`]: Configuration structures and utilities for the system. 124 | //! - [`context`]: Context management for request processing. 125 | //! - [`listener`]: Network listener implementations and abstractions. 126 | //! - [`util`]: Various utility functions and helpers. 127 | //! 128 | //! ## Error Handling 129 | //! 130 | //! This crate uses [`AnyError`] as a type alias for `anyhow::Error`, providing flexible error 131 | //! handling. The [`AnyResult`] type alias offers a convenient way to return results that can 132 | //! contain any error type. 133 | #[macro_use] 134 | mod error; 135 | pub use error::{AnyError, AnyResult}; 136 | 137 | pub mod config; 138 | pub mod context; 139 | pub mod http; 140 | pub mod listener; 141 | pub mod orchestrator; 142 | pub mod thrift; 143 | pub mod util; 144 | 145 | pub(crate) mod sealed { 146 | #[allow(dead_code)] 147 | pub trait Sealed {} 148 | #[allow(dead_code)] 149 | pub trait SealedT {} 150 | } 151 | -------------------------------------------------------------------------------- /monolake-core/src/orchestrator/mod.rs: -------------------------------------------------------------------------------- 1 | //! Worker and service lifecycle management for thread-per-core network services. 2 | //! 3 | //! This module provides the core functionality for managing workers and services 4 | //! in a thread-per-core architecture. It implements a flexible and efficient system 5 | //! for deploying, updating, and managing services across multiple worker threads. 6 | //! 7 | //! # Key Components 8 | //! 9 | //! - [`WorkerManager`]: Manages the entire fleet of worker threads. 10 | //! - [`ServiceExecutor`]: Handles service lifecycle within a single worker thread. 11 | //! - [`ServiceDeploymentContainer`]: Manages the deployment and updates of individual services. 12 | //! - [`ServiceCommand`]: Represents actions to be performed on services. 13 | //! - [`ResultGroup`]: Aggregates results from operations across multiple workers. 14 | //! 15 | //! # Deployment Models 16 | //! 17 | //! This module supports two deployment models: 18 | //! 19 | //! 1. Two-Stage Deployment: For updating services with state preservation. 20 | //! - Precommit a service using [`Precommit`](ServiceCommand::Precommit). 21 | //! - Update or commit using [`Update`](ServiceCommand::Update) or 22 | //! [`Commit`](ServiceCommand::Commit). 23 | //! 24 | //! 2. Single-Stage Deployment: For initial deployments or stateless updates. 25 | //! - Deploy in one step using [`PrepareAndCommit`](ServiceCommand::PrepareAndCommit). 26 | //! 27 | //! # Service Lifecycle 28 | //! 29 | //! Services can be dynamically updated while the system is running: 30 | //! - Existing connections continue using the current service version. 31 | //! - New connections use the latest deployed version. 32 | //! 33 | //! This module is designed to work seamlessly with the `service_async` crate, 34 | //! leveraging its [`Service`] and [`AsyncMakeService`](service_async::AsyncMakeService) 35 | //! traits for efficient service creation and management. 36 | use std::fmt::Debug; 37 | 38 | use futures_channel::oneshot::Sender as OSender; 39 | use monoio::io::stream::Stream; 40 | use service_async::Service; 41 | use tracing::{debug, error, info, warn}; 42 | 43 | use self::runtime::RuntimeWrapper; 44 | 45 | mod runtime; 46 | mod service_executor; 47 | mod worker_manager; 48 | 49 | pub use service_executor::{ 50 | Execute, ServiceCommand, ServiceCommandTask, ServiceDeploymentContainer, ServiceExecutor, 51 | ServiceSlot, 52 | }; 53 | pub use worker_manager::{JoinHandlesWithOutput, WorkerManager}; 54 | 55 | /// A collection of results from multiple worker operations. 56 | /// 57 | /// [`ResultGroup`] is typically used to aggregate the results of dispatching 58 | /// a [`ServiceCommand`] to multiple workers in a [`WorkerManager`]. 59 | /// It provides a convenient way to handle and process multiple results as a single unit. 60 | pub struct ResultGroup(Vec>); 61 | 62 | impl From>> for ResultGroup { 63 | fn from(value: Vec>) -> Self { 64 | Self(value) 65 | } 66 | } 67 | 68 | impl From> for Vec> { 69 | fn from(value: ResultGroup) -> Self { 70 | value.0 71 | } 72 | } 73 | 74 | impl ResultGroup<(), E> { 75 | pub fn err(self) -> Result<(), E> { 76 | for r in self.0.into_iter() { 77 | r?; 78 | } 79 | Ok(()) 80 | } 81 | } 82 | 83 | /// Serves incoming connections using the provided listener and service. 84 | /// 85 | /// This function runs a loop that continuously accepts new connections and handles them 86 | /// using the provided service. It can be gracefully stopped using the provided `stop` channel. 87 | /// 88 | /// # Behavior 89 | /// 90 | /// The function will run until one of the following occurs: 91 | /// - The `stop` channel is triggered, indicating a graceful shutdown. 92 | /// - The listener closes, indicating no more incoming connections. 93 | /// 94 | /// For each accepted connection, a new task is spawned to handle it using the provided service. 95 | pub async fn serve(mut listener: S, handler: ServiceSlot, mut stop: OSender<()>) 96 | where 97 | S: Stream> + 'static, 98 | E: Debug, 99 | Svc: Service + 'static, 100 | Svc::Error: Debug, 101 | A: 'static, 102 | { 103 | let mut cancellation = stop.cancellation(); 104 | loop { 105 | monoio::select! { 106 | _ = &mut cancellation => { 107 | info!("server is notified to stop"); 108 | break; 109 | } 110 | accept_opt = listener.next() => { 111 | let accept = match accept_opt { 112 | Some(accept) => accept, 113 | None => { 114 | info!("listener is closed, serve stopped"); 115 | return; 116 | } 117 | }; 118 | match accept { 119 | Ok(accept) => { 120 | let svc = handler.get_svc(); 121 | monoio::spawn(async move { 122 | match svc.call(accept).await { 123 | Ok(_) => { 124 | debug!("Connection complete"); 125 | } 126 | Err(e) => { 127 | error!("Connection error: {e:?}"); 128 | } 129 | } 130 | }); 131 | } 132 | Err(e) => warn!("Accept connection failed: {e:?}"), 133 | } 134 | } 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /monolake-core/src/orchestrator/runtime.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | 3 | #[cfg(target_os = "linux")] 4 | use monoio::IoUringDriver; 5 | 6 | #[cfg(target_os = "linux")] 7 | const MIN_SQPOLL_IDLE_TIME: u32 = 1000; 8 | 9 | use monoio::{time::TimeDriver, LegacyDriver, Runtime, RuntimeBuilder}; 10 | 11 | use crate::config::{RuntimeConfig, RuntimeType}; 12 | 13 | pub enum RuntimeWrapper { 14 | #[cfg(target_os = "linux")] 15 | IoUring(Runtime>), 16 | Legacy(Runtime>), 17 | } 18 | 19 | impl RuntimeWrapper { 20 | pub fn new( 21 | _config: &RuntimeConfig, 22 | thread_pool: Option>, 23 | ) -> Self { 24 | #[cfg(target_os = "linux")] 25 | let runtime_type = 26 | if _config.runtime_type == RuntimeType::IoUring && monoio::utils::detect_uring() { 27 | RuntimeType::IoUring 28 | } else { 29 | RuntimeType::Legacy 30 | }; 31 | #[cfg(not(target_os = "linux"))] 32 | let runtime_type = RuntimeType::Legacy; 33 | 34 | match runtime_type { 35 | #[cfg(target_os = "linux")] 36 | RuntimeType::IoUring => { 37 | let builder = match _config.sqpoll_idle { 38 | Some(idle) => { 39 | let builder = RuntimeBuilder::::new(); 40 | let idle = MIN_SQPOLL_IDLE_TIME.max(idle); 41 | let mut uring_builder = io_uring::IoUring::builder(); 42 | uring_builder.setup_sqpoll(idle); 43 | builder.uring_builder(uring_builder) 44 | } 45 | None => RuntimeBuilder::::new(), 46 | }; 47 | let mut builder = builder.enable_timer().with_entries(_config.entries); 48 | if let Some(tp) = thread_pool { 49 | builder = builder.attach_thread_pool(tp); 50 | } 51 | let runtime = builder.build().unwrap(); 52 | RuntimeWrapper::IoUring(runtime) 53 | } 54 | RuntimeType::Legacy => { 55 | let mut builder = RuntimeBuilder::::new().enable_timer(); 56 | if let Some(tp) = thread_pool { 57 | builder = builder.attach_thread_pool(tp); 58 | } 59 | let runtime = builder.build().unwrap(); 60 | RuntimeWrapper::Legacy(runtime) 61 | } 62 | } 63 | } 64 | } 65 | 66 | impl RuntimeWrapper { 67 | pub fn block_on(&mut self, future: F) -> F::Output 68 | where 69 | F: Future, 70 | { 71 | match self { 72 | #[cfg(target_os = "linux")] 73 | RuntimeWrapper::IoUring(driver) => driver.block_on(future), 74 | RuntimeWrapper::Legacy(driver) => driver.block_on(future), 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /monolake-core/src/thrift/mod.rs: -------------------------------------------------------------------------------- 1 | //! Thrift protocol handling for asynchronous services. 2 | //! 3 | //! This module provides traits and types for implementing Thrift handlers 4 | //! that can be used with asynchronous services. It defines a common interface 5 | //! for processing Thrift requests and generating responses, with support for 6 | //! context-aware handling. 7 | //! 8 | //! # Key Components 9 | //! 10 | //! - [`ThriftHandler`]: A trait for implementing Thrift request handlers. 11 | //! - [`ThriftRequest`]: A type alias for Thrift requests using TTHeader protocol. 12 | //! - [`ThriftResponse`]: A type alias for Thrift responses using TTHeader protocol. 13 | //! - [`ThriftBody`]: A type alias for the payload of Thrift requests and responses. 14 | 15 | use std::future::Future; 16 | 17 | use monoio_thrift::codec::ttheader::TTHeaderPayload; 18 | use service_async::Service; 19 | 20 | use crate::sealed::SealedT; 21 | 22 | /// Type alias for the Thrift request/response body. 23 | /// 24 | /// Currently uses `bytes::Bytes` for efficient memory management. 25 | /// TODO: Support discontinuous memory in the future. 26 | pub type ThriftBody = bytes::Bytes; 27 | 28 | /// Type alias for a Thrift request using TTHeader protocol. 29 | pub type ThriftRequest = TTHeaderPayload; 30 | 31 | /// Type alias for a Thrift response using TTHeader protocol. 32 | pub type ThriftResponse = TTHeaderPayload; 33 | 34 | struct ThriftSeal; 35 | 36 | /// A trait for Thrift request handlers. 37 | /// 38 | /// This trait defines the interface for processing Thrift requests and generating responses. 39 | /// It is designed to work with asynchronous services and supports context-aware handling. 40 | /// 41 | /// # Type Parameters 42 | /// 43 | /// - `CX`: The context type for additional request processing information. 44 | /// 45 | /// # Associated Types 46 | /// 47 | /// - `Error`: The error type that may occur during request handling. 48 | /// 49 | /// # Examples 50 | /// 51 | /// ```ignore 52 | /// use your_crate::{ThriftHandler, ThriftRequest, ThriftResponse, ThriftBody}; 53 | /// 54 | /// struct MyThriftHandler; 55 | /// 56 | /// impl ThriftHandler<()> for MyThriftHandler { 57 | /// type Error = std::io::Error; 58 | /// 59 | /// async fn handle(&self, request: ThriftRequest, ctx: ()) 60 | /// -> Result, Self::Error> { 61 | /// // Process the Thrift request and generate a response 62 | /// let response = ThriftResponse::new(/* ... */); 63 | /// Ok(response) 64 | /// } 65 | /// } 66 | /// ``` 67 | /// 68 | /// The `ThriftHandler` trait is automatically implemented for types that implement the `Service` 69 | /// trait with request type `(ThriftRequest, CX)` and response type 70 | /// `ThriftResponse`. 71 | #[allow(private_bounds)] 72 | pub trait ThriftHandler: SealedT { 73 | type Error; 74 | 75 | fn handle( 76 | &self, 77 | request: ThriftRequest, 78 | ctx: CX, 79 | ) -> impl Future, Self::Error>>; 80 | } 81 | 82 | impl SealedT for T where 83 | T: Service<(ThriftRequest, CX), Response = ThriftResponse> 84 | { 85 | } 86 | 87 | impl ThriftHandler for T 88 | where 89 | T: Service<(ThriftRequest, CX), Response = ThriftResponse>, 90 | { 91 | type Error = T::Error; 92 | 93 | async fn handle( 94 | &self, 95 | req: ThriftRequest, 96 | ctx: CX, 97 | ) -> Result, Self::Error> { 98 | self.call((req, ctx)).await 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /monolake-core/src/util/hash.rs: -------------------------------------------------------------------------------- 1 | use sha2::{Digest, Sha256}; 2 | 3 | pub fn sha256(token: &str) -> String { 4 | let mut hasher = Sha256::new(); 5 | hasher.update(token.as_bytes()); 6 | let result = hasher.finalize(); 7 | hex::encode(result) 8 | } 9 | 10 | #[cfg(test)] 11 | mod tests { 12 | use super::sha256; 13 | 14 | #[test] 15 | fn test_hash_with_sha256() { 16 | assert_eq!( 17 | "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9", 18 | sha256("hello world") 19 | ); 20 | assert_eq!( 21 | "8a5edab282632443219e051e4ade2d1d5bbc671c781051bf1437897cbdfea0f1", 22 | sha256("/") 23 | ); 24 | assert_eq!( 25 | "439b41782a6650352640cb3ab790a1151d23dd093f4f49577799c6b67f8d195c", 26 | sha256("/ping") 27 | ); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /monolake-core/src/util/mod.rs: -------------------------------------------------------------------------------- 1 | //! Common helper function 2 | use std::path::Path; 3 | 4 | use monoio::buf::IoBufMut; 5 | 6 | pub mod hash; 7 | pub mod uri_serde; 8 | 9 | pub async fn file_read(path: impl AsRef) -> std::io::Result> { 10 | // since monoio has not support statx, we have to use std 11 | let file_length = { 12 | let file = std::fs::File::open(&path)?; 13 | file.metadata().map(|meta| meta.len() as usize)? 14 | }; 15 | 16 | let file = monoio::fs::File::open(path).await?; 17 | let buffer = unsafe { Vec::with_capacity(file_length).slice_mut_unchecked(0..file_length) }; 18 | let (res, buf) = file.read_exact_at(buffer, 0).await; 19 | res?; 20 | Ok(buf.into_inner()) 21 | } 22 | 23 | pub fn file_read_sync(path: impl AsRef) -> std::io::Result> { 24 | std::fs::read(path) 25 | } 26 | -------------------------------------------------------------------------------- /monolake-core/src/util/uri_serde.rs: -------------------------------------------------------------------------------- 1 | use http::Uri; 2 | use serde::{de, Deserialize, Deserializer, Serializer}; 3 | 4 | pub fn deserialize<'de, D>(deserializer: D) -> Result 5 | where 6 | D: Deserializer<'de>, 7 | { 8 | let s = String::deserialize(deserializer)?; 9 | s.parse().map_err(de::Error::custom) 10 | } 11 | 12 | pub fn serialize(uri: &Uri, serializer: S) -> Result 13 | where 14 | S: Serializer, 15 | { 16 | serializer.serialize_str(&uri.to_string()) 17 | } 18 | -------------------------------------------------------------------------------- /monolake-services/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "monolake-services" 3 | version = "0.3.2" 4 | description = "MonoLake Services Implementation" 5 | 6 | authors.workspace = true 7 | categories.workspace = true 8 | edition.workspace = true 9 | keywords.workspace = true 10 | license.workspace = true 11 | repository.workspace = true 12 | 13 | [features] 14 | default = [] 15 | openid = [ 16 | "tls", 17 | "dep:cookie", 18 | "dep:openidconnect", 19 | "dep:url", 20 | "dep:lazy_static", 21 | ] 22 | proxy-protocol = ["dep:proxy-protocol"] 23 | tls = [ 24 | "dep:monoio-rustls", 25 | "dep:rustls", 26 | "dep:rustls-pemfile", 27 | "dep:webpki-roots", 28 | "dep:monoio-native-tls", 29 | "dep:native-tls", 30 | ] 31 | vendored = ["native-tls?/vendored"] 32 | hyper = [ 33 | "dep:hyper", 34 | "dep:hyper-util", 35 | "dep:monoio-compat", 36 | "monoio/poll-io", 37 | "monoio-compat/hyper", 38 | "monolake-core/hyper", 39 | ] 40 | 41 | [dependencies] 42 | monoio = { workspace = true, features = ['splice'] } 43 | monoio-codec = { workspace = true } 44 | monoio-http = { workspace = true, features = ["encoding"] } 45 | monoio-thrift = { workspace = true } 46 | monoio-transports = { workspace = true, features = [ 47 | "logging", 48 | "rustls-unsafe-io", 49 | ] } 50 | local-sync = { workspace = true } 51 | service-async = { workspace = true } 52 | certain-map = { workspace = true } 53 | 54 | monolake-core = { version = "0.3.0", path = "../monolake-core" } 55 | 56 | # common 57 | bytes = { workspace = true } 58 | http = { workspace = true } 59 | anyhow = { workspace = true } 60 | thiserror = { workspace = true } 61 | serde = { workspace = true } 62 | tracing = { workspace = true } 63 | rand = "0.8" 64 | matchit = "0.8" 65 | pin-project-lite = "0.2" 66 | futures = "0.3" 67 | 68 | # for tls 69 | monoio-rustls = { workspace = true, optional = true } 70 | monoio-native-tls = { workspace = true, optional = true } 71 | native-tls = { workspace = true, optional = true } 72 | 73 | rustls = { version = "0.21", optional = true, default-features = false } 74 | rustls-pemfile = { version = "1", optional = true } 75 | webpki-roots = { version = "0.25.2", optional = true } 76 | 77 | # for hyper 78 | hyper = { version = "1.1", features = [ 79 | "http1", 80 | "client", 81 | "server", 82 | ], optional = true } 83 | hyper-util = { version = "0.1.3", features = [ 84 | "http1", 85 | "client", 86 | "server-auto", 87 | ], optional = true } 88 | monoio-compat = { version = "0.2.2", features = ["hyper"], optional = true } 89 | 90 | # for openid 91 | cookie = { version = "0.18", optional = true } 92 | openidconnect = { version = "3", optional = true } 93 | url = { version = "2.3.1", optional = true } 94 | lazy_static = { version = "1.4.0", optional = true } 95 | 96 | # for proxy protocol 97 | proxy-protocol = { version = "0.5.0", optional = true } 98 | -------------------------------------------------------------------------------- /monolake-services/src/common/cancel/linked_list.rs: -------------------------------------------------------------------------------- 1 | /// Vec based linked list. 2 | pub struct LinkedList { 3 | head: usize, 4 | tail: usize, 5 | vacancy_head: usize, 6 | data: Vec>, 7 | } 8 | 9 | pub struct Node { 10 | prev: usize, 11 | next: usize, 12 | data: Option, 13 | } 14 | 15 | pub const NULL: usize = usize::MAX; 16 | 17 | impl Default for LinkedList { 18 | fn default() -> Self { 19 | Self::new() 20 | } 21 | } 22 | 23 | impl LinkedList { 24 | pub const fn new() -> Self { 25 | Self { 26 | head: NULL, 27 | tail: NULL, 28 | vacancy_head: NULL, 29 | data: Vec::new(), 30 | } 31 | } 32 | 33 | pub fn get(&self, idx: usize) -> Option<&T> { 34 | self.data.get(idx).and_then(|node| node.data.as_ref()) 35 | } 36 | 37 | pub fn get_mut(&mut self, idx: usize) -> Option<&mut T> { 38 | self.data.get_mut(idx).and_then(|node| node.data.as_mut()) 39 | } 40 | 41 | pub fn push_back(&mut self, val: T) -> usize { 42 | let idx = if self.vacancy_head != NULL { 43 | let idx = self.vacancy_head; 44 | let node = &mut self.data[idx]; 45 | self.vacancy_head = node.next; 46 | node.next = NULL; 47 | node.data = Some(val); 48 | idx 49 | } else { 50 | let idx = self.data.len(); 51 | self.data.push(Node { 52 | prev: NULL, 53 | next: NULL, 54 | data: Some(val), 55 | }); 56 | idx 57 | }; 58 | 59 | if self.tail == NULL { 60 | self.head = idx; 61 | self.tail = idx; 62 | } else { 63 | let tail = &mut self.data[self.tail]; 64 | tail.next = idx; 65 | self.data[idx].prev = self.tail; 66 | self.tail = idx; 67 | } 68 | 69 | idx 70 | } 71 | 72 | pub fn remove(&mut self, idx: usize) -> Option { 73 | if idx >= self.data.len() { 74 | return None; 75 | } 76 | 77 | let node = &mut self.data[idx]; 78 | let val = node.data.take()?; 79 | let prev = node.prev; 80 | let next = node.next; 81 | 82 | if prev == NULL { 83 | self.head = next; 84 | } else { 85 | self.data[prev].next = next; 86 | } 87 | 88 | if next == NULL { 89 | self.tail = prev; 90 | } else { 91 | self.data[next].prev = prev; 92 | } 93 | 94 | self.data[idx].next = self.vacancy_head; 95 | self.vacancy_head = idx; 96 | Some(val) 97 | } 98 | } 99 | 100 | impl Drop for LinkedList { 101 | // Manually drop the data to make it more efficient. 102 | fn drop(&mut self) { 103 | let mut head = self.head; 104 | while head != NULL { 105 | let node = &mut self.data[head]; 106 | node.data.take(); 107 | head = node.next; 108 | } 109 | unsafe { self.data.set_len(0) }; 110 | } 111 | } 112 | 113 | impl IntoIterator for LinkedList { 114 | type Item = T; 115 | type IntoIter = LinkedListIter; 116 | 117 | fn into_iter(mut self) -> Self::IntoIter { 118 | let head = std::mem::replace(&mut self.head, NULL); 119 | let data = std::mem::take(&mut self.data); 120 | LinkedListIter { head, data } 121 | } 122 | } 123 | 124 | pub struct LinkedListIter { 125 | head: usize, 126 | data: Vec>, 127 | } 128 | 129 | impl Iterator for LinkedListIter { 130 | type Item = T; 131 | fn next(&mut self) -> Option { 132 | if self.head == NULL { 133 | return None; 134 | } 135 | 136 | let node = &mut self.data[self.head]; 137 | let val = node.data.take(); 138 | self.head = node.next; 139 | val 140 | } 141 | } 142 | 143 | impl Drop for LinkedListIter { 144 | // Manually drop the data to make it more efficient. 145 | fn drop(&mut self) { 146 | let mut head = self.head; 147 | while head != NULL { 148 | let node = &mut self.data[head]; 149 | node.data.take(); 150 | head = node.next; 151 | } 152 | unsafe { self.data.set_len(0) }; 153 | } 154 | } 155 | 156 | #[cfg(test)] 157 | mod tests { 158 | use super::*; 159 | #[test] 160 | fn demo() { 161 | let mut linked = LinkedList::new(); 162 | assert_eq!(0, linked.push_back(1)); 163 | assert_eq!(1, linked.push_back(2)); 164 | assert_eq!(2, linked.push_back(3)); 165 | assert_eq!(linked.remove(1).unwrap(), 2); 166 | assert!(linked.remove(1).is_none()); 167 | assert_eq!(linked.push_back(2333), 1); 168 | 169 | let iter = linked.into_iter(); 170 | assert_eq!(iter.collect::>(), vec![1, 3, 2333]); 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /monolake-services/src/common/cancel/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::UnsafeCell, 3 | future::Future, 4 | rc::{Rc, Weak}, 5 | task::Waker, 6 | }; 7 | 8 | use linked_list::LinkedList; 9 | 10 | pub mod linked_list; 11 | 12 | struct CancelHandler { 13 | cancelled: bool, 14 | waiters: LinkedList, 15 | } 16 | 17 | #[derive(Clone)] 18 | pub struct Canceller { 19 | handler: Rc>, 20 | } 21 | 22 | impl Default for Canceller { 23 | #[inline] 24 | fn default() -> Self { 25 | Self::new() 26 | } 27 | } 28 | 29 | impl Canceller { 30 | pub fn new() -> Self { 31 | Self { 32 | handler: Rc::new(UnsafeCell::new(CancelHandler { 33 | cancelled: false, 34 | waiters: LinkedList::new(), 35 | })), 36 | } 37 | } 38 | 39 | pub fn waiter(&self) -> Waiter { 40 | Waiter { 41 | index: UnsafeCell::new(None), 42 | handler: Rc::downgrade(&self.handler), 43 | } 44 | } 45 | 46 | pub fn cancel(&self) { 47 | let handler = unsafe { &mut *self.handler.get() }; 48 | if !handler.cancelled { 49 | handler.cancelled = true; 50 | let waiters: LinkedList = 51 | std::mem::replace(&mut handler.waiters, LinkedList::new()); 52 | for waker in waiters.into_iter() { 53 | waker.wake(); 54 | } 55 | } 56 | } 57 | 58 | pub const fn dropper(self) -> CancellerDropper { 59 | CancellerDropper(self) 60 | } 61 | } 62 | 63 | pub struct CancellerDropper(Canceller); 64 | 65 | impl Drop for CancellerDropper { 66 | fn drop(&mut self) { 67 | self.0.cancel(); 68 | } 69 | } 70 | 71 | pub struct Waiter { 72 | index: UnsafeCell>, 73 | handler: Weak>, 74 | } 75 | 76 | impl Clone for Waiter { 77 | fn clone(&self) -> Self { 78 | Self { 79 | index: UnsafeCell::new(None), 80 | handler: self.handler.clone(), 81 | } 82 | } 83 | } 84 | 85 | impl Waiter { 86 | pub fn cancelled(&self) -> bool { 87 | self.handler 88 | .upgrade() 89 | .is_none_or(|handler| unsafe { &*handler.get() }.cancelled) 90 | } 91 | } 92 | 93 | impl Future for Waiter { 94 | type Output = (); 95 | 96 | fn poll( 97 | self: std::pin::Pin<&mut Self>, 98 | cx: &mut std::task::Context<'_>, 99 | ) -> std::task::Poll { 100 | let handler = match self.handler.upgrade() { 101 | Some(handler) => handler, 102 | None => return std::task::Poll::Ready(()), 103 | }; 104 | let handler = unsafe { &mut *handler.get() }; 105 | if handler.cancelled { 106 | return std::task::Poll::Ready(()); 107 | } 108 | match unsafe { *self.index.get() } { 109 | Some(idx) => { 110 | let val = handler.waiters.get_mut(idx).unwrap(); 111 | val.clone_from(cx.waker()); 112 | } 113 | None => { 114 | let index = handler.waiters.push_back(cx.waker().clone()); 115 | unsafe { *self.index.get() = Some(index) }; 116 | } 117 | } 118 | std::task::Poll::Pending 119 | } 120 | } 121 | 122 | impl Drop for Waiter { 123 | fn drop(&mut self) { 124 | if let Some(index) = unsafe { *self.index.get() } { 125 | if let Some(handler) = self.handler.upgrade() { 126 | let handler = unsafe { &mut *handler.get() }; 127 | if !handler.cancelled { 128 | handler.waiters.remove(index); 129 | } 130 | } 131 | } 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /monolake-services/src/common/context.rs: -------------------------------------------------------------------------------- 1 | //! Context insertion service for request handling, with support for `certain_map`. 2 | //! 3 | //! This module provides a `ContextService` that inserts context information, 4 | //! into the request processing pipeline. It's designed 5 | //! to work seamlessly with the `service_async` framework and the `certain_map` crate 6 | //! for efficient context management. 7 | //! 8 | //! # Key Components 9 | //! 10 | //! - [`ContextService`]: The main service component that adds context information to requests. 11 | //! 12 | //! # Features 13 | //! 14 | //! - Works with `certain_map` for flexible and type-safe context management 15 | //! 16 | //! # Usage with certain_map 17 | //! 18 | //! `ContextService` is designed to work with contexts defined using the `certain_map` macro. 19 | //! This allows for efficient and type-safe context management. Here's an example of how to 20 | //! define a context and use it with `ContextService`: 21 | //! 22 | //! # Usage in a Service Stack 23 | //! 24 | //! `ContextService` is typically used as part of a larger service stack. Here's an example 25 | //! from a Layer 7 proxy factory: 26 | //! 27 | //! ```ignore 28 | //! use monolake_services::common::ContextService; 29 | //! use service_async::stack::FactoryStack; 30 | //! 31 | //! let stacks = FactoryStack::new(config) 32 | //! // ... other layers ... 33 | //! .push(ContextService::::layer()) 34 | //! // ... more processing ... 35 | //! ; 36 | //! // ... rest of the factory setup ... 37 | //! ``` 38 | //! 39 | //! In this example, `ContextService` is used to transform an `EmptyContext` into a `FullContext` 40 | //! by setting the `peer_addr` field. 41 | use std::marker::PhantomData; 42 | 43 | use certain_map::Handler; 44 | use monolake_core::{context::PeerAddr, listener::AcceptedAddr}; 45 | use service_async::{ 46 | layer::{layer_fn, FactoryLayer}, 47 | AsyncMakeService, MakeService, ParamSet, Service, 48 | }; 49 | 50 | /// A service to insert Context into the request processing pipeline, compatible with `certain_map`. 51 | #[derive(Debug)] 52 | pub struct ContextService { 53 | pub inner: T, 54 | pub ctx: PhantomData, 55 | } 56 | 57 | unsafe impl Send for ContextService {} 58 | unsafe impl Sync for ContextService {} 59 | 60 | // Manually impl Clone because CXStore does not have to impl Clone. 61 | impl Clone for ContextService 62 | where 63 | T: Clone, 64 | { 65 | fn clone(&self) -> Self { 66 | Self { 67 | inner: self.inner.clone(), 68 | ctx: PhantomData, 69 | } 70 | } 71 | } 72 | 73 | // Manually impl Copy because CXStore does not have to impl Copy. 74 | impl Copy for ContextService where T: Copy {} 75 | 76 | impl Service<(R, AcceptedAddr)> for ContextService 77 | where 78 | CXStore: Default + Handler, 79 | // HRTB is your friend! 80 | // Please pay attention to when to use bound associated types and when to use associated types 81 | // directly(here `Transformed` is not bound but `Response` and `Error` are). 82 | for<'a> CXStore::Hdr<'a>: ParamSet, 83 | for<'a> T: Service< 84 | (R, as ParamSet>::Transformed), 85 | Response = Resp, 86 | Error = Err, 87 | >, 88 | { 89 | type Response = Resp; 90 | type Error = Err; 91 | 92 | async fn call(&self, (req, addr): (R, AcceptedAddr)) -> Result { 93 | let mut store = CXStore::default(); 94 | let hdr = store.handler(); 95 | let hdr = hdr.param_set(PeerAddr(addr)); 96 | self.inner.call((req, hdr)).await 97 | } 98 | } 99 | 100 | impl ContextService { 101 | pub fn layer() -> impl FactoryLayer { 102 | layer_fn(|_: &C, inner| ContextService { 103 | inner, 104 | ctx: PhantomData, 105 | }) 106 | } 107 | } 108 | 109 | impl MakeService for ContextService { 110 | type Service = ContextService; 111 | type Error = F::Error; 112 | 113 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 114 | Ok(ContextService { 115 | ctx: PhantomData, 116 | inner: self 117 | .inner 118 | .make_via_ref(old.map(|o| &o.inner)) 119 | .map_err(Into::into)?, 120 | }) 121 | } 122 | } 123 | 124 | impl AsyncMakeService for ContextService { 125 | type Service = ContextService; 126 | type Error = F::Error; 127 | 128 | async fn make_via_ref( 129 | &self, 130 | old: Option<&Self::Service>, 131 | ) -> Result { 132 | Ok(ContextService { 133 | ctx: PhantomData, 134 | inner: self 135 | .inner 136 | .make_via_ref(old.map(|o| &o.inner)) 137 | .await 138 | .map_err(Into::into)?, 139 | }) 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /monolake-services/src/common/delay.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use service_async::{ 4 | layer::{layer_fn, FactoryLayer}, 5 | AsyncMakeService, MakeService, Param, Service, 6 | }; 7 | 8 | #[derive(Clone)] 9 | pub struct DelayService { 10 | pub delay: Duration, 11 | pub inner: T, 12 | } 13 | 14 | impl Service for DelayService 15 | where 16 | T: Service, 17 | { 18 | type Response = T::Response; 19 | type Error = T::Error; 20 | 21 | async fn call(&self, req: R) -> Result { 22 | monoio::time::sleep(self.delay).await; 23 | self.inner.call(req).await 24 | } 25 | } 26 | 27 | #[derive(Debug, Clone, Copy)] 28 | pub struct Delay(pub Duration); 29 | 30 | impl DelayService { 31 | pub fn layer() -> impl FactoryLayer 32 | where 33 | C: Param, 34 | { 35 | layer_fn(|c: &C, inner| DelayService { 36 | delay: c.param().0, 37 | inner, 38 | }) 39 | } 40 | } 41 | 42 | impl MakeService for DelayService { 43 | type Service = DelayService; 44 | type Error = F::Error; 45 | 46 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 47 | Ok(DelayService { 48 | delay: self.delay, 49 | inner: self 50 | .inner 51 | .make_via_ref(old.map(|o| &o.inner)) 52 | .map_err(Into::into)?, 53 | }) 54 | } 55 | } 56 | 57 | impl AsyncMakeService for DelayService { 58 | type Service = DelayService; 59 | type Error = F::Error; 60 | 61 | async fn make_via_ref( 62 | &self, 63 | old: Option<&Self::Service>, 64 | ) -> Result { 65 | Ok(DelayService { 66 | delay: self.delay, 67 | inner: self 68 | .inner 69 | .make_via_ref(old.map(|o| &o.inner)) 70 | .await 71 | .map_err(Into::into)?, 72 | }) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /monolake-services/src/common/detect.rs: -------------------------------------------------------------------------------- 1 | use std::{future::Future, io, io::Cursor}; 2 | 3 | use monoio::{ 4 | buf::IoBufMut, 5 | io::{AsyncReadRent, AsyncReadRentExt, PrefixedReadIo}, 6 | }; 7 | use service_async::Service; 8 | 9 | /// Detect is a trait for detecting a certain pattern in the input stream. 10 | /// 11 | /// It accepts an input stream and returns a tuple of the detected pattern and the wrapped input 12 | /// stream which is usually a `PrefixedReadIo`. The implementation can choose to whether add the 13 | /// prefix data. 14 | /// If it fails to detect the pattern, it should represent the error inside the `DetOut`. 15 | pub trait Detect { 16 | type DetOut; 17 | type IOOut; 18 | 19 | fn detect(&self, io: IO) -> impl Future>; 20 | } 21 | 22 | /// DetectService is a service that detects a certain pattern in the input stream and forwards the 23 | /// detected pattern and the wrapped input stream to the inner service. 24 | pub struct DetectService { 25 | pub detector: D, 26 | pub inner: S, 27 | } 28 | 29 | #[derive(thiserror::Error, Debug)] 30 | pub enum DetectError { 31 | #[error("service error: {0:?}")] 32 | Svc(E), 33 | #[error("io error: {0:?}")] 34 | Io(std::io::Error), 35 | } 36 | 37 | impl Service<(R, CX)> for DetectService 38 | where 39 | D: Detect, 40 | S: Service<(D::DetOut, D::IOOut, CX)>, 41 | { 42 | type Response = S::Response; 43 | type Error = DetectError; 44 | 45 | async fn call(&self, (io, cx): (R, CX)) -> Result { 46 | let (det, io) = self.detector.detect(io).await.map_err(DetectError::Io)?; 47 | self.inner 48 | .call((det, io, cx)) 49 | .await 50 | .map_err(DetectError::Svc) 51 | } 52 | } 53 | 54 | /// FixedLengthDetector detects a fixed length of bytes from the input stream. 55 | pub struct FixedLengthDetector(pub F); 56 | 57 | impl Detect for FixedLengthDetector 58 | where 59 | F: Fn(&mut [u8]) -> DetOut, 60 | IO: AsyncReadRent, 61 | { 62 | type DetOut = DetOut; 63 | type IOOut = PrefixedReadIo>>; 64 | 65 | async fn detect(&self, mut io: IO) -> io::Result<(Self::DetOut, Self::IOOut)> { 66 | let buf = Vec::with_capacity(N).slice_mut(..N); 67 | let (r, buf) = io.read_exact(buf).await; 68 | r?; 69 | 70 | let mut buf = buf.into_inner(); 71 | let r = (self.0)(&mut buf); 72 | Ok((r, PrefixedReadIo::new(io, Cursor::new(buf)))) 73 | } 74 | } 75 | 76 | /// PrefixDetector detects a certain prefix from the input stream. 77 | /// 78 | /// If the prefix matches, it returns true and the wrapped input stream with the prefix data. 79 | /// Otherwise, it returns false and the input stream with the prefix data(the prefix maybe less than 80 | /// the static str's length). 81 | pub struct PrefixDetector(pub &'static [u8]); 82 | 83 | impl Detect for PrefixDetector 84 | where 85 | IO: AsyncReadRent, 86 | { 87 | type DetOut = bool; 88 | type IOOut = PrefixedReadIo>>; 89 | 90 | async fn detect(&self, mut io: IO) -> io::Result<(Self::DetOut, Self::IOOut)> { 91 | let l = self.0.len(); 92 | let mut written = 0; 93 | let mut buf: Vec = Vec::with_capacity(l); 94 | let mut eq = true; 95 | loop { 96 | // # Safety 97 | // The buf must have enough capacity to write the data. 98 | let buf_slice = unsafe { buf.slice_mut_unchecked(written..l) }; 99 | let (result, buf_slice) = io.read(buf_slice).await; 100 | buf = buf_slice.into_inner(); 101 | match result? { 102 | 0 => { 103 | break; 104 | } 105 | n => { 106 | let curr = written; 107 | written += n; 108 | if self.0[curr..written] != buf[curr..written] { 109 | eq = false; 110 | break; 111 | } 112 | } 113 | } 114 | } 115 | let io = PrefixedReadIo::new(io, Cursor::new(buf)); 116 | Ok((eq && written == l, io)) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /monolake-services/src/common/erase.rs: -------------------------------------------------------------------------------- 1 | use service_async::{ 2 | layer::{layer_fn, FactoryLayer}, 3 | AsyncMakeService, MakeService, Service, 4 | }; 5 | 6 | #[derive(Debug)] 7 | pub struct EraseResp { 8 | pub svc: T, 9 | } 10 | 11 | impl MakeService for EraseResp { 12 | type Service = EraseResp; 13 | type Error = T::Error; 14 | 15 | #[inline] 16 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 17 | Ok(EraseResp { 18 | svc: self 19 | .svc 20 | .make_via_ref(old.map(|o| &o.svc)) 21 | .map_err(Into::into)?, 22 | }) 23 | } 24 | } 25 | 26 | impl AsyncMakeService for EraseResp { 27 | type Service = EraseResp; 28 | type Error = T::Error; 29 | 30 | #[inline] 31 | async fn make_via_ref( 32 | &self, 33 | old: Option<&Self::Service>, 34 | ) -> Result { 35 | Ok(EraseResp { 36 | svc: self 37 | .svc 38 | .make_via_ref(old.map(|o| &o.svc)) 39 | .await 40 | .map_err(Into::into)?, 41 | }) 42 | } 43 | } 44 | 45 | impl, Req> Service for EraseResp { 46 | type Response = (); 47 | type Error = T::Error; 48 | 49 | #[inline] 50 | async fn call(&self, req: Req) -> Result { 51 | self.svc.call(req).await.map(|_| ()) 52 | } 53 | } 54 | 55 | impl EraseResp { 56 | pub fn layer() -> impl FactoryLayer { 57 | layer_fn(|_c: &C, svc| EraseResp { svc }) 58 | } 59 | } 60 | 61 | impl EraseResp { 62 | #[inline] 63 | pub const fn new(svc: T) -> Self { 64 | Self { svc } 65 | } 66 | 67 | #[inline] 68 | pub fn into_inner(self) -> T { 69 | self.svc 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /monolake-services/src/common/map.rs: -------------------------------------------------------------------------------- 1 | use service_async::{ 2 | layer::{layer_fn, FactoryLayer}, 3 | AsyncMakeService, MakeService, Service, 4 | }; 5 | 6 | pub struct Map { 7 | pub inner: S, 8 | pub rewrite_f: FN, 9 | } 10 | 11 | pub struct MapErr { 12 | pub inner: S, 13 | pub rewrite_f: FN, 14 | } 15 | 16 | pub struct FnSvc { 17 | pub inner: S, 18 | pub rewrite_f: FN, 19 | } 20 | 21 | impl Map { 22 | pub fn layer(f: FN) -> impl FactoryLayer { 23 | layer_fn(move |_c: &C, inner| Map { 24 | inner, 25 | rewrite_f: f.clone(), 26 | }) 27 | } 28 | } 29 | 30 | impl MapErr { 31 | pub fn layer(f: FN) -> impl FactoryLayer { 32 | layer_fn(move |_c: &C, inner| MapErr { 33 | inner, 34 | rewrite_f: f.clone(), 35 | }) 36 | } 37 | } 38 | 39 | impl FnSvc { 40 | pub fn layer(f: FN) -> impl FactoryLayer { 41 | layer_fn(move |_c: &C, inner| FnSvc { 42 | inner, 43 | rewrite_f: f.clone(), 44 | }) 45 | } 46 | } 47 | 48 | impl, R, FN, NR> Service for Map 49 | where 50 | FN: Fn(S::Response) -> NR, 51 | { 52 | type Response = NR; 53 | type Error = S::Error; 54 | 55 | async fn call(&self, req: R) -> Result { 56 | self.inner.call(req).await.map(&self.rewrite_f) 57 | } 58 | } 59 | 60 | impl, R, FN, NE> Service for MapErr 61 | where 62 | FN: Fn(S::Error) -> NE, 63 | { 64 | type Response = S::Response; 65 | type Error = NE; 66 | 67 | async fn call(&self, req: R) -> Result { 68 | self.inner.call(req).await.map_err(&self.rewrite_f) 69 | } 70 | } 71 | 72 | impl, R, FN, FR, FE> Service for FnSvc 73 | where 74 | FN: Fn(Result) -> Result, 75 | { 76 | type Response = FR; 77 | type Error = FE; 78 | 79 | async fn call(&self, req: R) -> Result { 80 | (self.rewrite_f)(self.inner.call(req).await) 81 | } 82 | } 83 | 84 | impl AsyncMakeService for Map { 85 | type Service = Map; 86 | type Error = F::Error; 87 | 88 | async fn make_via_ref( 89 | &self, 90 | old: Option<&Self::Service>, 91 | ) -> Result { 92 | Ok(Map { 93 | inner: self 94 | .inner 95 | .make_via_ref(old.map(|o| &o.inner)) 96 | .await 97 | .map_err(Into::into)?, 98 | rewrite_f: self.rewrite_f.clone(), 99 | }) 100 | } 101 | } 102 | 103 | impl AsyncMakeService for MapErr { 104 | type Service = MapErr; 105 | type Error = F::Error; 106 | 107 | async fn make_via_ref( 108 | &self, 109 | old: Option<&Self::Service>, 110 | ) -> Result { 111 | Ok(MapErr { 112 | inner: self 113 | .inner 114 | .make_via_ref(old.map(|o| &o.inner)) 115 | .await 116 | .map_err(Into::into)?, 117 | rewrite_f: self.rewrite_f.clone(), 118 | }) 119 | } 120 | } 121 | 122 | impl AsyncMakeService for FnSvc { 123 | type Service = FnSvc; 124 | type Error = F::Error; 125 | 126 | async fn make_via_ref( 127 | &self, 128 | old: Option<&Self::Service>, 129 | ) -> Result { 130 | Ok(FnSvc { 131 | inner: self 132 | .inner 133 | .make_via_ref(old.map(|o| &o.inner)) 134 | .await 135 | .map_err(Into::into)?, 136 | rewrite_f: self.rewrite_f.clone(), 137 | }) 138 | } 139 | } 140 | 141 | impl MakeService for Map { 142 | type Service = Map; 143 | type Error = F::Error; 144 | 145 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 146 | Ok(Map { 147 | inner: self 148 | .inner 149 | .make_via_ref(old.map(|o| &o.inner)) 150 | .map_err(Into::into)?, 151 | rewrite_f: self.rewrite_f.clone(), 152 | }) 153 | } 154 | } 155 | 156 | impl MakeService for MapErr { 157 | type Service = MapErr; 158 | type Error = F::Error; 159 | 160 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 161 | Ok(MapErr { 162 | inner: self 163 | .inner 164 | .make_via_ref(old.map(|o| &o.inner)) 165 | .map_err(Into::into)?, 166 | rewrite_f: self.rewrite_f.clone(), 167 | }) 168 | } 169 | } 170 | 171 | impl MakeService for FnSvc { 172 | type Service = FnSvc; 173 | type Error = F::Error; 174 | 175 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 176 | Ok(FnSvc { 177 | inner: self 178 | .inner 179 | .make_via_ref(old.map(|o| &o.inner)) 180 | .map_err(Into::into)?, 181 | rewrite_f: self.rewrite_f.clone(), 182 | }) 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /monolake-services/src/common/mod.rs: -------------------------------------------------------------------------------- 1 | //! Generic services for panic catching, context management, and timeouts. 2 | pub mod cancel; 3 | pub mod context; 4 | pub mod delay; 5 | pub mod detect; 6 | pub mod erase; 7 | pub mod map; 8 | pub mod panic; 9 | pub mod selector; 10 | pub mod timeout; 11 | 12 | // TODO: remove following re-exports 13 | pub use cancel::{linked_list, Canceller, CancellerDropper, Waiter}; 14 | pub use context::ContextService; 15 | pub use delay::{Delay, DelayService}; 16 | pub use detect::{Detect, DetectService, FixedLengthDetector, PrefixDetector}; 17 | pub use erase::EraseResp; 18 | pub use map::{FnSvc, Map, MapErr}; 19 | pub use panic::{CatchPanicError, CatchPanicService}; 20 | pub use timeout::{Timeout, TimeoutError, TimeoutService}; 21 | -------------------------------------------------------------------------------- /monolake-services/src/common/panic.rs: -------------------------------------------------------------------------------- 1 | //! Panic-catching service for enhancing stability in handlerss. 2 | //! 3 | //! This module provides a `CatchPanicService` that wraps an inner service and catches 4 | //! any panics that might occur during its execution, converting them into errors. 5 | //! It's designed to work seamlessly with the `service_async` framework and can be 6 | //! easily integrated into a service stack to improve overall system stability. 7 | //! 8 | //! # Key Components 9 | //! 10 | //! - [`CatchPanicService`]: The main service component that adds panic-catching functionality to an 11 | //! inner service. 12 | //! - [`CatchPanicError`]: Error type that encapsulates both inner service errors and caught panics. 13 | //! 14 | //! # Features 15 | //! 16 | //! - Catches panics in the inner service and converts them to errors 17 | //! - Preserves inner service errors alongside panic-derived errors 18 | //! 19 | //! # Usage 20 | //! 21 | //! `CatchPanicService` is typically used as part of a larger service stack. Here's a basic example: 22 | //! 23 | //! ```ignore 24 | //! use service_async::{layer::FactoryLayer, stack::FactoryStack}; 25 | //! 26 | //! use crate::catch_panic::CatchPanicService; 27 | //! 28 | //! let config = Config { 29 | //! // ... config ... 30 | //! }; 31 | //! let stack = FactoryStack::new(config) 32 | //! .push(MyService::layer()) 33 | //! .push(CatchPanicService::layer()) 34 | //! // ... other layers ... 35 | //! ; 36 | //! 37 | //! let service = stack.make_async().await.unwrap(); 38 | //! // Use the service to handle requests with panic protection 39 | //! ``` 40 | //! 41 | //! # Safety Considerations 42 | //! 43 | //! It's crucial to ensure that the inner service wrapped by `CatchPanicService` is 44 | //! `UnwindSafe`. If the inner service is not `UnwindSafe`, the behavior of 45 | //! `CatchPanicService` is undefined and may lead to unexpected results. 46 | //! 47 | //! # Error Handling 48 | //! 49 | //! The `CatchPanicService` wraps errors from the inner service and adds a new `Panic` 50 | //! error variant for caught panics. Users should handle both inner service errors 51 | //! and panic-derived errors when using this service. 52 | //! 53 | //! # Performance Considerations 54 | //! 55 | //! - Adds minimal overhead to the inner service execution 56 | //! - Uses Rust's `catch_unwind` mechanism, which has a small performance cost 57 | 58 | use std::{fmt::Debug, panic::AssertUnwindSafe}; 59 | 60 | use futures::FutureExt; 61 | use service_async::{ 62 | layer::{layer_fn, FactoryLayer}, 63 | AsyncMakeService, MakeService, Service, 64 | }; 65 | 66 | pub struct CatchPanicService { 67 | pub inner: S, 68 | } 69 | 70 | #[derive(thiserror::Error, Debug)] 71 | pub enum CatchPanicError { 72 | #[error("inner error: {0:?}")] 73 | Inner(E), 74 | // to make it Sync, construct a String instead of Box 75 | #[error("inner panic: {0}")] 76 | Panic(String), 77 | } 78 | 79 | // Service that catches panics from an inner service and converts them to errors. 80 | /// # Safety 81 | /// 82 | /// The inner service must be `UnwindSafe` for this wrapper to function correctly. 83 | /// Using `CatchPanicService` with a non-`UnwindSafe` inner service may lead to 84 | /// undefined behavior. 85 | impl Service for CatchPanicService 86 | where 87 | S: Service, 88 | { 89 | type Response = S::Response; 90 | type Error = CatchPanicError; 91 | 92 | async fn call(&self, req: R) -> Result { 93 | match AssertUnwindSafe(self.inner.call(req)).catch_unwind().await { 94 | Ok(Ok(r)) => Ok(r), 95 | Ok(Err(e)) => Err(CatchPanicError::Inner(e)), 96 | Err(e) => Err(CatchPanicError::Panic(format!("{e:?}"))), 97 | } 98 | } 99 | } 100 | 101 | impl CatchPanicService { 102 | pub fn layer() -> impl FactoryLayer { 103 | layer_fn(|_c: &C, inner| CatchPanicService { inner }) 104 | } 105 | } 106 | 107 | impl MakeService for CatchPanicService { 108 | type Service = CatchPanicService; 109 | type Error = F::Error; 110 | 111 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 112 | Ok(CatchPanicService { 113 | inner: self 114 | .inner 115 | .make_via_ref(old.map(|o| &o.inner)) 116 | .map_err(Into::into)?, 117 | }) 118 | } 119 | } 120 | 121 | impl AsyncMakeService for CatchPanicService { 122 | type Service = CatchPanicService; 123 | type Error = F::Error; 124 | 125 | async fn make_via_ref( 126 | &self, 127 | old: Option<&Self::Service>, 128 | ) -> Result { 129 | Ok(CatchPanicService { 130 | inner: self 131 | .inner 132 | .make_via_ref(old.map(|o| &o.inner)) 133 | .await 134 | .map_err(Into::into)?, 135 | }) 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /monolake-services/src/common/timeout.rs: -------------------------------------------------------------------------------- 1 | //! Timeout service for adding timeout functionality to HTTP handlers. 2 | //! 3 | //! This module provides a `TimeoutService` that wraps an inner service and applies 4 | //! a timeout to its execution. It's designed to work seamlessly with the `service_async` 5 | //! framework and can be easily integrated into a service stack. 6 | //! 7 | //! # Key Components 8 | //! 9 | //! - [`TimeoutService`]: The main service component that adds timeout functionality to an inner 10 | //! service. 11 | //! - [`TimeoutError`]: Error type for timeout-related errors. 12 | //! - [`Timeout`]: A simple wrapper around `Duration` for configuration purposes. 13 | //! 14 | //! # Features 15 | //! 16 | //! - Adds configurable timeout to any inner service 17 | //! - Propagates inner service errors alongside timeout errors 18 | //! 19 | //! # Performance Considerations 20 | //! 21 | //! - Adds minimal overhead to the inner service execution 22 | //! - Uses efficient timeout mechanism provided by the `monoio` runtime 23 | 24 | use std::time::Duration; 25 | 26 | use monoio::time::timeout; 27 | use service_async::{ 28 | layer::{layer_fn, FactoryLayer}, 29 | AsyncMakeService, MakeService, Param, Service, 30 | }; 31 | 32 | /// Service that adds timeout functionality to an inner service. 33 | #[derive(Clone)] 34 | pub struct TimeoutService { 35 | pub timeout: Duration, 36 | pub inner: T, 37 | } 38 | 39 | #[derive(thiserror::Error, Debug)] 40 | pub enum TimeoutError { 41 | #[error("inner error: {0:?}")] 42 | Inner(E), 43 | #[error("timeout")] 44 | Timeout, 45 | } 46 | 47 | impl> Service for TimeoutService { 48 | type Response = T::Response; 49 | type Error = TimeoutError; 50 | 51 | async fn call(&self, req: R) -> Result { 52 | match timeout(self.timeout, self.inner.call(req)).await { 53 | Ok(Ok(resp)) => Ok(resp), 54 | Ok(Err(err)) => Err(TimeoutError::Inner(err)), 55 | Err(_) => Err(TimeoutError::Timeout), 56 | } 57 | } 58 | } 59 | 60 | #[derive(Debug, Clone, Copy)] 61 | pub struct Timeout(pub Duration); 62 | 63 | impl TimeoutService { 64 | pub fn layer() -> impl FactoryLayer 65 | where 66 | C: Param, 67 | { 68 | layer_fn(|c: &C, inner| TimeoutService { 69 | timeout: c.param().0, 70 | inner, 71 | }) 72 | } 73 | } 74 | 75 | impl MakeService for TimeoutService { 76 | type Service = TimeoutService; 77 | type Error = F::Error; 78 | 79 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 80 | Ok(TimeoutService { 81 | timeout: self.timeout, 82 | inner: self 83 | .inner 84 | .make_via_ref(old.map(|o| &o.inner)) 85 | .map_err(Into::into)?, 86 | }) 87 | } 88 | } 89 | 90 | impl AsyncMakeService for TimeoutService { 91 | type Service = TimeoutService; 92 | type Error = F::Error; 93 | 94 | async fn make_via_ref( 95 | &self, 96 | old: Option<&Self::Service>, 97 | ) -> Result { 98 | Ok(TimeoutService { 99 | timeout: self.timeout, 100 | inner: self 101 | .inner 102 | .make_via_ref(old.map(|o| &o.inner)) 103 | .await 104 | .map_err(Into::into)?, 105 | }) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /monolake-services/src/http/detect.rs: -------------------------------------------------------------------------------- 1 | //! HTTP version detection and handling module. 2 | //! 3 | //! This module provides functionality to detect the HTTP version (HTTP/1.x or HTTP/2) 4 | //! of incoming connections and route them accordingly. It is designed to work seamlessly 5 | //! with monoio's asynchronous runtime and the service_async framework. 6 | //! 7 | //! # Key Components 8 | //! 9 | //! - [`H2Detect`]: The main service component responsible for HTTP version detection. 10 | //! - [`H2DetectError`]: Error type for version detection operations. 11 | //! 12 | //! # Features 13 | //! 14 | //! - Automatic detection of HTTP/2 connections based on the client preface 15 | //! - Seamless handling of both HTTP/1.x and HTTP/2 connections 16 | //! - Integration with `service_async` for easy composition in service stacks 17 | //! - Efficient I/O handling using monoio's asynchronous primitives 18 | //! 19 | //! # Usage 20 | //! 21 | //! This service is typically used as part of a larger service stack, placed before 22 | //! the main HTTP handling logic. Here's a basic example: 23 | //! 24 | //! ```ignore 25 | //! use service_async::{layer::FactoryLayer, stack::FactoryStack}; 26 | //! 27 | //! let config = Config { /* ... */ }; 28 | //! let stack = FactoryStack::new(config) 29 | //! .push(HttpCoreService::layer()) 30 | //! .push(H2Detect::layer()) 31 | //! // ... other layers ... 32 | //! ; 33 | //! 34 | //! let service = stack.make_async().await.unwrap(); 35 | //! // Use the service to handle incoming connections 36 | //! ``` 37 | //! 38 | //! # Performance Considerations 39 | //! 40 | //! - Uses efficient buffering to minimize I/O operations during version detection 41 | //! - Implements zero-copy techniques where possible to reduce memory overhead 42 | 43 | use service_async::{ 44 | layer::{layer_fn, FactoryLayer}, 45 | AsyncMakeService, MakeService, 46 | }; 47 | 48 | use crate::common::{DetectService, PrefixDetector}; 49 | 50 | const PREFACE: &[u8; 24] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; 51 | 52 | /// Service for detecting HTTP version and routing connections accordingly. 53 | /// 54 | /// `H2Detect` examines the initial bytes of an incoming connection to 55 | /// determine whether it's an HTTP/2 connection (by checking for the HTTP/2 preface) 56 | /// or an HTTP/1.x connection. It then forwards the connection to the inner service 57 | /// with appropriate version information. 58 | /// For implementation details and example usage, see the 59 | /// [module level documentation](crate::http::detect). 60 | #[derive(Clone)] 61 | pub struct H2Detect { 62 | inner: T, 63 | } 64 | 65 | #[derive(thiserror::Error, Debug)] 66 | pub enum H2DetectError { 67 | #[error("inner error: {0:?}")] 68 | Inner(E), 69 | #[error("io error: {0:?}")] 70 | Io(std::io::Error), 71 | } 72 | 73 | impl MakeService for H2Detect { 74 | type Service = DetectService; 75 | type Error = F::Error; 76 | 77 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 78 | Ok(DetectService { 79 | inner: self.inner.make_via_ref(old.map(|o| &o.inner))?, 80 | detector: PrefixDetector(PREFACE), 81 | }) 82 | } 83 | } 84 | 85 | impl AsyncMakeService for H2Detect { 86 | type Service = DetectService; 87 | type Error = F::Error; 88 | 89 | async fn make_via_ref( 90 | &self, 91 | old: Option<&Self::Service>, 92 | ) -> Result { 93 | Ok(DetectService { 94 | inner: self.inner.make_via_ref(old.map(|o| &o.inner)).await?, 95 | detector: PrefixDetector(PREFACE), 96 | }) 97 | } 98 | } 99 | 100 | impl H2Detect { 101 | pub fn layer() -> impl FactoryLayer { 102 | layer_fn(|_: &C, inner| H2Detect { inner }) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /monolake-services/src/http/handlers/connection_persistence.rs: -------------------------------------------------------------------------------- 1 | //! HTTP connection persistence and keep-alive management module. 2 | //! 3 | //! This module provides functionality to manage HTTP connection persistence (keep-alive) 4 | //! across different HTTP versions. It handles the intricacies of connection reuse for 5 | //! HTTP/1.0, HTTP/1.1, and HTTP/2, ensuring proper header management and version compatibility. 6 | //! # Key Components 7 | //! 8 | //! - [`ConnectionReuseHandler`]: The main service component responsible for managing connection 9 | //! persistence and keep-alive behavior. 10 | //! 11 | //! # Features 12 | //! 13 | //! - Automatic detection and handling of keep-alive support for incoming requests 14 | //! - Version-specific handling for HTTP/1.0, HTTP/1.1, and HTTP/2 15 | //! - Modification of request and response headers to ensure proper keep-alive behavior 16 | //! - Seamless integration with `service_async` for easy composition in service stacks 17 | //! - Support for upgrading HTTP/1.0 connections to HTTP/1.1-like behavior 18 | //! 19 | //! # Usage 20 | //! 21 | //! This handler is typically used as part of a larger HTTP service stack. Here's a basic example: 22 | //! 23 | //! ```rust 24 | //! use monolake_services::{ 25 | //! common::ContextService, 26 | //! http::{ 27 | //! core::HttpCoreService, 28 | //! detect::H2Detect, 29 | //! handlers::{ 30 | //! route::RouteConfig, ConnectionReuseHandler, ContentHandler, RewriteAndRouteHandler, 31 | //! UpstreamHandler, 32 | //! }, 33 | //! HttpServerTimeout, 34 | //! }, 35 | //! }; 36 | //! use service_async::{layer::FactoryLayer, stack::FactoryStack, Param}; 37 | //! 38 | //! // Dummy struct to satisfy Param trait requirements 39 | //! struct DummyConfig; 40 | //! 41 | //! // Implement Param for DummyConfig to return Vec 42 | //! impl Param> for DummyConfig { 43 | //! fn param(&self) -> Vec { 44 | //! vec![] 45 | //! } 46 | //! } 47 | //! impl Param for DummyConfig { 48 | //! fn param(&self) -> HttpServerTimeout { 49 | //! HttpServerTimeout::default() 50 | //! } 51 | //! } 52 | //! 53 | //! let config = DummyConfig; 54 | //! let stacks = FactoryStack::new(config) 55 | //! .replace(UpstreamHandler::factory( 56 | //! Default::default(), 57 | //! Default::default(), 58 | //! )) 59 | //! .push(ContentHandler::layer()) 60 | //! .push(RewriteAndRouteHandler::layer()) 61 | //! .push(ConnectionReuseHandler::layer()) 62 | //! .push(HttpCoreService::layer()) 63 | //! .push(H2Detect::layer()); 64 | //! 65 | //! // Use the service to handle HTTP requests 66 | //! ``` 67 | //! 68 | //! # Performance Considerations 69 | //! 70 | //! - Efficient header manipulation to minimize overhead 71 | //! - Optimized handling for HTTP/2, which has built-in connection persistence 72 | use http::{Request, Version}; 73 | use monolake_core::http::{HttpHandler, ResponseWithContinue}; 74 | use service_async::{ 75 | layer::{layer_fn, FactoryLayer}, 76 | AsyncMakeService, MakeService, Service, 77 | }; 78 | use tracing::debug; 79 | 80 | use crate::http::{CLOSE, CLOSE_VALUE, KEEPALIVE, KEEPALIVE_VALUE}; 81 | 82 | /// Handler for managing HTTP connection persistence and keep-alive behavior. 83 | /// 84 | /// `ConnectionReuseHandler` is responsible for: 85 | /// 1. Detecting whether an incoming request supports keep-alive. 86 | /// 2. Modifying request and response headers to ensure proper keep-alive behavior. 87 | /// 3. Handling version-specific connection persistence logic for HTTP/1.0, HTTP/1.1, and HTTP/2. 88 | /// 89 | /// For implementation details and example usage, see the 90 | /// [module level documentation](crate::http::handlers::connection_persistence). 91 | #[derive(Clone)] 92 | pub struct ConnectionReuseHandler { 93 | inner: H, 94 | } 95 | 96 | impl Service<(Request, CX)> for ConnectionReuseHandler 97 | where 98 | H: HttpHandler, 99 | { 100 | type Response = ResponseWithContinue; 101 | type Error = H::Error; 102 | 103 | async fn call( 104 | &self, 105 | (mut request, ctx): (Request, CX), 106 | ) -> Result { 107 | let version = request.version(); 108 | let keepalive = is_conn_keepalive(request.headers(), version); 109 | debug!("frontend keepalive {:?}", keepalive); 110 | 111 | match version { 112 | // for http 1.0, hack it to 1.1 like setting nginx `proxy_http_version` to 1.1 113 | Version::HTTP_10 => { 114 | // modify to 1.1 and remove connection header 115 | *request.version_mut() = Version::HTTP_11; 116 | let _ = request.headers_mut().remove(http::header::CONNECTION); 117 | 118 | // send 119 | let (mut response, mut cont) = self.inner.handle(request, ctx).await?; 120 | cont &= keepalive; 121 | 122 | // modify back and make sure reply keepalive if client want it and server 123 | // support it. 124 | let _ = response.headers_mut().remove(http::header::CONNECTION); 125 | if cont { 126 | // insert keepalive header 127 | response 128 | .headers_mut() 129 | .insert(http::header::CONNECTION, KEEPALIVE_VALUE); 130 | } 131 | *response.version_mut() = version; 132 | 133 | Ok((response, cont)) 134 | } 135 | Version::HTTP_11 => { 136 | // remove connection header 137 | let _ = request.headers_mut().remove(http::header::CONNECTION); 138 | 139 | // send 140 | let (mut response, mut cont) = self.inner.handle(request, ctx).await?; 141 | cont &= keepalive; 142 | 143 | // modify back and make sure reply keepalive if client want it and server 144 | // support it. 145 | let _ = response.headers_mut().remove(http::header::CONNECTION); 146 | if !cont { 147 | // insert close header 148 | response 149 | .headers_mut() 150 | .insert(http::header::CONNECTION, CLOSE_VALUE); 151 | } 152 | Ok((response, cont)) 153 | } 154 | Version::HTTP_2 => { 155 | let (response, _) = self.inner.handle(request, ctx).await?; 156 | Ok((response, true)) 157 | } 158 | // for http 0.9 and other versions, just relay it 159 | _ => { 160 | let (response, _) = self.inner.handle(request, ctx).await?; 161 | Ok((response, false)) 162 | } 163 | } 164 | } 165 | } 166 | 167 | // ConnReuseHandler is a Service and a MakeService. 168 | impl MakeService for ConnectionReuseHandler { 169 | type Service = ConnectionReuseHandler; 170 | type Error = F::Error; 171 | 172 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 173 | Ok(ConnectionReuseHandler { 174 | inner: self.inner.make_via_ref(old.map(|o| &o.inner))?, 175 | }) 176 | } 177 | } 178 | 179 | impl AsyncMakeService for ConnectionReuseHandler { 180 | type Service = ConnectionReuseHandler; 181 | type Error = F::Error; 182 | 183 | async fn make_via_ref( 184 | &self, 185 | old: Option<&Self::Service>, 186 | ) -> Result { 187 | Ok(ConnectionReuseHandler { 188 | inner: self.inner.make_via_ref(old.map(|o| &o.inner)).await?, 189 | }) 190 | } 191 | } 192 | 193 | impl ConnectionReuseHandler { 194 | pub fn layer() -> impl FactoryLayer { 195 | layer_fn(|_: &C, inner| Self { inner }) 196 | } 197 | } 198 | 199 | fn is_conn_keepalive(headers: &http::HeaderMap, version: Version) -> bool { 200 | match (version, headers.get(http::header::CONNECTION)) { 201 | (Version::HTTP_10, Some(header)) 202 | if header.as_bytes().eq_ignore_ascii_case(KEEPALIVE.as_bytes()) => 203 | { 204 | true 205 | } 206 | (Version::HTTP_11, None) => true, 207 | (Version::HTTP_11, Some(header)) 208 | if !header.as_bytes().eq_ignore_ascii_case(CLOSE.as_bytes()) => 209 | { 210 | true 211 | } 212 | _ => false, 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /monolake-services/src/http/handlers/content_handler.rs: -------------------------------------------------------------------------------- 1 | //! Content encoding and decoding handler for HTTP requests and responses. 2 | //! 3 | //! This module provides a `ContentHandler` that manages content encoding and decoding 4 | //! for both incoming requests and outgoing responses in an HTTP service stack. It supports 5 | //! various content encodings and can be easily integrated into a service pipeline. 6 | //! 7 | //! # Key Components 8 | //! 9 | //! - [`ContentHandler`]: The main service component responsible for content encoding/decoding. 10 | //! 11 | //! # Features 12 | //! 13 | //! - Transparent content decoding for incoming requests 14 | //! - Content encoding for outgoing responses based on client preferences 15 | //! - Support for various content encodings (e.g., gzip, deflate) 16 | //! - Integration with service-async framework for easy composition 17 | //! - Error handling for decoding and encoding failures 18 | //! 19 | //! # Usage 20 | //! 21 | //! This handler is typically used as part of a larger service stack. Here's a basic example: 22 | //! 23 | //! ```rust 24 | //! use monolake_services::{ 25 | //! common::ContextService, 26 | //! http::{ 27 | //! core::HttpCoreService, 28 | //! detect::H2Detect, 29 | //! handlers::{ 30 | //! route::RouteConfig, ConnectionReuseHandler, ContentHandler, RewriteAndRouteHandler, 31 | //! UpstreamHandler, 32 | //! }, 33 | //! HttpServerTimeout, 34 | //! }, 35 | //! }; 36 | //! use service_async::{layer::FactoryLayer, stack::FactoryStack, Param}; 37 | //! 38 | //! // Dummy struct to satisfy Param trait requirements 39 | //! struct DummyConfig; 40 | //! 41 | //! // Implement Param for DummyConfig to return Vec 42 | //! impl Param> for DummyConfig { 43 | //! fn param(&self) -> Vec { 44 | //! vec![] 45 | //! } 46 | //! } 47 | //! impl Param for DummyConfig { 48 | //! fn param(&self) -> HttpServerTimeout { 49 | //! HttpServerTimeout::default() 50 | //! } 51 | //! } 52 | //! 53 | //! let config = DummyConfig; 54 | //! let stacks = FactoryStack::new(config) 55 | //! .replace(UpstreamHandler::factory( 56 | //! Default::default(), 57 | //! Default::default(), 58 | //! )) 59 | //! .push(ContentHandler::layer()) 60 | //! .push(RewriteAndRouteHandler::layer()) 61 | //! .push(ConnectionReuseHandler::layer()) 62 | //! .push(HttpCoreService::layer()) 63 | //! .push(H2Detect::layer()); 64 | //! 65 | //! // Use the service to handle HTTP requests 66 | //! ``` 67 | //! # Error Handling 68 | //! 69 | //! - Decoding errors for incoming requests result in 400 Bad Request responses 70 | //! - Encoding errors for outgoing responses result in 500 Internal Server Error responses 71 | //! 72 | //! # Performance Considerations 73 | //! 74 | //! - Content encoding/decoding is only performed when necessary (i.e., non-identity encoding) 75 | //! - The handler avoids unnecessary allocations and copies where possible 76 | use std::fmt::Debug; 77 | 78 | use http::{Request, StatusCode}; 79 | use monoio_http::common::{ 80 | body::{BodyEncodeExt, FixedBody}, 81 | response::Response, 82 | }; 83 | use monolake_core::http::{HttpHandler, ResponseWithContinue}; 84 | use service_async::{ 85 | layer::{layer_fn, FactoryLayer}, 86 | AsyncMakeService, MakeService, Service, 87 | }; 88 | 89 | use crate::http::generate_response; 90 | 91 | /// Handles content encoding and decoding for HTTP requests and responses. 92 | /// 93 | /// `ContentHandler` is responsible for: 94 | /// 1. Decoding the content of incoming requests based on their Content-Encoding header. 95 | /// 2. Encoding the content of outgoing responses based on the client's Accept-Encoding preferences. 96 | /// 97 | /// It wraps an inner handler and preprocesses requests before passing them to the inner handler, 98 | /// as well as postprocessing responses from the inner handler. For implementation details and 99 | /// example usage, see the [module level documentation](crate::http::handlers::content_handler). 100 | #[derive(Clone)] 101 | pub struct ContentHandler { 102 | inner: H, 103 | } 104 | 105 | impl Service<(Request, CX)> for ContentHandler 106 | where 107 | H: HttpHandler, 108 | B: BodyEncodeExt + FixedBody, 109 | H::Body: BodyEncodeExt + FixedBody, 110 | B::EncodeDecodeError: Debug, 111 | ::EncodeDecodeError: Debug, 112 | { 113 | type Response = ResponseWithContinue; 114 | type Error = H::Error; 115 | 116 | async fn call(&self, (request, ctx): (Request, CX)) -> Result { 117 | let content_encoding = request 118 | .headers() 119 | .get(http::header::CONTENT_ENCODING) 120 | .and_then(|value: &http::HeaderValue| value.to_str().ok()) 121 | .unwrap_or("identity") 122 | .to_string(); 123 | 124 | let accept_encoding = request 125 | .headers() 126 | .get(http::header::ACCEPT_ENCODING) 127 | .and_then(|value| value.to_str().ok()) 128 | .unwrap_or("identity") 129 | .to_string(); 130 | 131 | let content_length = request 132 | .headers() 133 | .get(http::header::CONTENT_LENGTH) 134 | .and_then(|value| value.to_str().ok()) 135 | .map(|value| value.parse::().unwrap_or_default()) 136 | .unwrap_or_default(); 137 | 138 | if content_length == 0 || content_encoding == "identity" { 139 | let (response, _) = self.inner.handle(request, ctx).await?; 140 | return Ok((response, true)); 141 | } 142 | 143 | let (parts, body) = request.into_parts(); 144 | match body.decode_content(content_encoding).await { 145 | Ok(decodec_data) => { 146 | let req = Request::from_parts(parts, B::fixed_body(Some(decodec_data))); 147 | let (mut response, _) = self.inner.handle(req, ctx).await?; 148 | if accept_encoding != "identity" { 149 | let (parts, body) = response.into_parts(); 150 | match body.encode_content(accept_encoding).await { 151 | Ok(encoded_data) => { 152 | response = 153 | Response::from_parts(parts, H::Body::fixed_body(Some(encoded_data))) 154 | } 155 | Err(e) => { 156 | tracing::error!("Response content encoding failed {e:?}"); 157 | return Ok(( 158 | generate_response(StatusCode::INTERNAL_SERVER_ERROR, false), 159 | true, 160 | )); 161 | } 162 | } 163 | } 164 | Ok((response, true)) 165 | } 166 | Err(e) => { 167 | tracing::error!("Request content decode failed {e:?}"); 168 | Ok((generate_response(StatusCode::BAD_REQUEST, false), true)) 169 | } 170 | } 171 | } 172 | } 173 | 174 | // ContentHandler is a Service and a MakeService. 175 | impl MakeService for ContentHandler 176 | where 177 | F: MakeService, 178 | { 179 | type Service = ContentHandler; 180 | type Error = F::Error; 181 | 182 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 183 | Ok(ContentHandler { 184 | inner: self.inner.make_via_ref(old.map(|o| &o.inner))?, 185 | }) 186 | } 187 | } 188 | 189 | impl AsyncMakeService for ContentHandler { 190 | type Service = ContentHandler; 191 | type Error = F::Error; 192 | 193 | async fn make_via_ref( 194 | &self, 195 | old: Option<&Self::Service>, 196 | ) -> Result { 197 | Ok(ContentHandler { 198 | inner: self.inner.make_via_ref(old.map(|o| &o.inner)).await?, 199 | }) 200 | } 201 | } 202 | 203 | impl ContentHandler { 204 | pub fn layer() -> impl FactoryLayer { 205 | layer_fn(|_: &C, inner| Self { inner }) 206 | } 207 | 208 | /// Returns a factory layer for the `ContentHandler`. 209 | /// 210 | /// This allows the 'ContentHandler to be selectively enabled or 211 | /// disabled based on a configuration at runtime. 212 | pub fn opt_layer(enabled: bool) -> Option> { 213 | if enabled { 214 | Some(layer_fn(|_: &C, inner| Self { inner })) 215 | } else { 216 | None 217 | } 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /monolake-services/src/http/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | //! HTTP request handling and processing module. 2 | //! 3 | //! This module provides a collection of handlers and services that work together 4 | //! to process HTTP requests in a modular and efficient manner. Each handler implements 5 | //! the `HttpHandler` trait, allowing for flexible composition and nesting within the 6 | //! [`HttpCoreService`](crate::http::HttpCoreService). 7 | //! 8 | //! # Key Components 9 | //! 10 | //! - [`HttpCoreService`](crate::http::HttpCoreService): Core service responsible for handling 11 | //! HTTP/1.1 and HTTP/2 connections, decoding requests, and encoding responses. 12 | //! - [`ConnectionReuseHandler`]: Manages HTTP connection persistence and keep-alive behavior across 13 | //! different HTTP versions. 14 | //! - [`ContentHandler`]: Handles content encoding and decoding for both requests and responses. 15 | //! - [`UpstreamHandler`]: Manages proxying of requests to upstream servers, including load 16 | //! balancing and error handling. 17 | //! - [`RewriteAndRouteHandler`]: Handles request routing based on predefined rules, directing 18 | //! requests to appropriate handlers or upstream servers. 19 | //! 20 | //! # Optional Components 21 | //! 22 | //! - [`OpenIdHandler`]: Provides OpenID Connect authentication functionality (available with the 23 | //! "openid" feature). 24 | //! 25 | //! # HttpHandler Trait 26 | //! 27 | //! All handlers in this module implement the `HttpHandler` trait, which defines a common 28 | //! interface for processing HTTP requests: 29 | //! 30 | //! ```ignore 31 | //! pub trait HttpHandler: SealedT { 32 | //! type Body; 33 | //! type Error; 34 | //! fn handle( 35 | //! &self, 36 | //! request: Request, 37 | //! ctx: CX, 38 | //! ) -> impl Future, Self::Error>>; 39 | //! } 40 | //! ``` 41 | //! 42 | //! This trait allows handlers to be easily composed and nested within the `HttpCoreService`. 43 | //! 44 | //! # Features 45 | //! 46 | //! - Modular design allowing easy composition of handlers in a service stack 47 | //! - Support for HTTP/1.1 and HTTP/2 protocols through `HttpCoreService` 48 | //! - Efficient connection management and keep-alive handling 49 | //! - Content encoding and decoding support 50 | //! - Flexible routing capabilities 51 | //! - Upstream proxying with load balancing 52 | //! - Optional OpenID Connect authentication 53 | //! 54 | //! # Usage 55 | //! 56 | //! Handlers in this module can be composed and nested within the `HttpCoreService`. 57 | //! Here's a basic example: 58 | //! 59 | //! ```rust 60 | //! use monolake_services::{ 61 | //! common::ContextService, 62 | //! http::{ 63 | //! core::HttpCoreService, 64 | //! detect::H2Detect, 65 | //! handlers::{ 66 | //! route::RouteConfig, ConnectionReuseHandler, ContentHandler, RewriteAndRouteHandler, 67 | //! UpstreamHandler, 68 | //! }, 69 | //! HttpServerTimeout, 70 | //! }, 71 | //! }; 72 | //! use service_async::{layer::FactoryLayer, stack::FactoryStack, Param}; 73 | //! 74 | //! // Dummy struct to satisfy Param trait requirements 75 | //! struct DummyConfig; 76 | //! 77 | //! // Implement Param for DummyConfig to return Vec 78 | //! impl Param> for DummyConfig { 79 | //! fn param(&self) -> Vec { 80 | //! vec![] 81 | //! } 82 | //! } 83 | //! impl Param for DummyConfig { 84 | //! fn param(&self) -> HttpServerTimeout { 85 | //! HttpServerTimeout::default() 86 | //! } 87 | //! } 88 | //! 89 | //! let config = DummyConfig; 90 | //! let stacks = FactoryStack::new(config) 91 | //! .replace(UpstreamHandler::factory( 92 | //! Default::default(), 93 | //! Default::default(), 94 | //! )) 95 | //! .push(ContentHandler::layer()) 96 | //! .push(RewriteAndRouteHandler::layer()) 97 | //! .push(ConnectionReuseHandler::layer()) 98 | //! .push(HttpCoreService::layer()) 99 | //! .push(H2Detect::layer()); 100 | //! 101 | //! // Use the service to handle HTTP requests 102 | //! ``` 103 | //! # Performance Considerations 104 | //! 105 | //! - Each handler is designed to be efficient and add minimal overhead 106 | //! - The modular design allows for fine-grained control over request processing, enabling 107 | //! optimizations based on specific use cases 108 | //! - `HttpCoreService` efficiently handles both HTTP/1.1 and HTTP/2 protocols 109 | //! 110 | //! # Error Handling 111 | //! 112 | //! - Each handler implements its own error handling strategy 113 | //! - The `RoutingFactoryError` type is exposed for handling routing-specific errors 114 | //! - `HttpCoreService` provides high-level error handling for the entire request lifecycle 115 | //! 116 | //! # Feature Flags 117 | //! 118 | //! - `openid`: Enables the OpenID Connect authentication functionality 119 | pub mod connection_persistence; 120 | pub mod content_handler; 121 | #[cfg(feature = "openid")] 122 | pub mod openid; 123 | pub mod route; 124 | pub mod upstream; 125 | 126 | pub use connection_persistence::ConnectionReuseHandler; 127 | pub use content_handler::ContentHandler; 128 | #[cfg(feature = "openid")] 129 | pub use openid::OpenIdHandler; 130 | pub use route::{RewriteAndRouteHandler, RoutingFactoryError}; 131 | pub use upstream::UpstreamHandler; 132 | -------------------------------------------------------------------------------- /monolake-services/src/http/mod.rs: -------------------------------------------------------------------------------- 1 | //! HTTP protocol handling and services module. 2 | //! 3 | //! This module provides a comprehensive set of components for handling HTTP connections, 4 | //! processing requests, and managing responses. It includes core services, various handlers, 5 | //! protocol detection, and utility functions for working with HTTP. 6 | //! 7 | //! # Key Components 8 | //! 9 | //! ## Submodules 10 | //! 11 | //! - [`core`]: Contains the core HTTP service implementation, including `HttpCoreService`. 12 | //! - [`handlers`]: Provides various HTTP request handlers for different aspects of request 13 | //! processing. 14 | //! - [`detect`]: Implements HTTP version detection functionality. 15 | //! 16 | //! ## Structs and Types 17 | //! 18 | //! - [`HttpCoreService`]: The main service for handling HTTP/1.1 and HTTP/2 connections. 19 | //! - [`HttpServerTimeout`]: Configuration for various HTTP server timeout settings. 20 | //! 21 | //! # Features 22 | //! 23 | //! - Support for both HTTP/1.1 and HTTP/2 protocols 24 | //! - Modular design with separate handlers for different aspects of HTTP processing 25 | //! - HTTP version detection capabilities 26 | //! - Configurable timeout settings for various stages of request handling 27 | //! - Utility functions and constants for common HTTP operations 28 | //! 29 | //! # Performance Considerations 30 | //! 31 | //! - The core service and handlers are designed for efficient processing of HTTP requests 32 | //! - Connection keep-alive and HTTP/2 multiplexing are supported for improved performance 33 | //! - Version detection allows for optimized handling based on the HTTP version 34 | //! 35 | //! # Error Handling 36 | //! 37 | //! - Each component implements its own error handling strategy 38 | //! - The core service provides high-level error handling for the entire request lifecycle 39 | //! 40 | //! # Customization 41 | //! 42 | //! - The modular design allows for easy extension and customization of HTTP handling behavior 43 | //! - Custom handlers can be implemented and integrated into the `HttpCoreService` 44 | use http::HeaderValue; 45 | use serde::{Deserialize, Serialize}; 46 | 47 | pub use self::core::{HttpCoreService, HttpServerTimeout}; 48 | pub mod handlers; 49 | 50 | pub mod core; 51 | pub mod detect; 52 | pub mod util; 53 | 54 | pub(crate) const CLOSE: &str = "close"; 55 | pub(crate) const KEEPALIVE: &str = "Keep-Alive"; 56 | #[allow(clippy::declare_interior_mutable_const)] 57 | pub(crate) const CLOSE_VALUE: HeaderValue = HeaderValue::from_static(CLOSE); 58 | #[allow(clippy::declare_interior_mutable_const)] 59 | pub(crate) const KEEPALIVE_VALUE: HeaderValue = HeaderValue::from_static(KEEPALIVE); 60 | pub(crate) use util::generate_response; 61 | 62 | #[derive(Debug, Copy, Clone, Default, Deserialize, Serialize)] 63 | #[serde(rename_all = "lowercase")] 64 | pub enum HttpVersion { 65 | Http2, 66 | Http11, 67 | #[default] 68 | Auto, 69 | } 70 | -------------------------------------------------------------------------------- /monolake-services/src/http/util.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | future::Future, 3 | mem::{ManuallyDrop, MaybeUninit}, 4 | ops::DerefMut, 5 | pin::Pin, 6 | task::Poll, 7 | }; 8 | 9 | use http::{HeaderValue, Request, Response, StatusCode}; 10 | use monoio_http::common::body::FixedBody; 11 | use monolake_core::http::{HttpError, HttpHandler, ResponseWithContinue}; 12 | use service_async::Service; 13 | 14 | union Union { 15 | a: ManuallyDrop, 16 | b: ManuallyDrop, 17 | } 18 | 19 | /// AccompanyPair for http decoder and processor. 20 | /// We have to fill payload when process request 21 | /// since inner logic may read chunked body; also 22 | /// fill payload when process response since we 23 | /// may use the request body stream in response 24 | /// body stream. 25 | pub(crate) struct AccompanyPairBase { 26 | main: MaybeUninit>, 27 | accompany: FACC, 28 | accompany_slot: Option, 29 | } 30 | 31 | impl AccompanyPairBase { 32 | pub(crate) const fn new(accompany: FACC) -> Self { 33 | Self { 34 | main: MaybeUninit::uninit(), 35 | accompany, 36 | accompany_slot: None, 37 | } 38 | } 39 | 40 | pub(crate) fn stage1( 41 | mut self: Pin<&mut Self>, 42 | future1: F1, 43 | ) -> AccompanyPairS1 { 44 | unsafe { 45 | self.as_mut().get_unchecked_mut().main.assume_init_mut().a = ManuallyDrop::new(future1); 46 | } 47 | AccompanyPairS1(self) 48 | } 49 | 50 | pub(crate) fn stage2( 51 | mut self: Pin<&mut Self>, 52 | future2: F2, 53 | ) -> AccompanyPairS2 { 54 | unsafe { 55 | self.as_mut().get_unchecked_mut().main.assume_init_mut().b = ManuallyDrop::new(future2); 56 | } 57 | AccompanyPairS2(self) 58 | } 59 | 60 | pub(crate) const fn stage3(self: Pin<&mut Self>) -> AccompanyPairS3 { 61 | AccompanyPairS3(self) 62 | } 63 | } 64 | 65 | #[repr(transparent)] 66 | pub(crate) struct AccompanyPairS1<'a, F1, F2, FACC, T>( 67 | Pin<&'a mut AccompanyPairBase>, 68 | ); 69 | 70 | impl Drop for AccompanyPairS1<'_, F1, F2, FACC, T> { 71 | fn drop(&mut self) { 72 | unsafe { 73 | ManuallyDrop::drop(&mut self.0.as_mut().get_unchecked_mut().main.assume_init_mut().a); 74 | } 75 | } 76 | } 77 | 78 | impl Future for AccompanyPairS1<'_, F1, F2, FACC, T> 79 | where 80 | F1: Future, 81 | FACC: Future, 82 | { 83 | type Output = F1::Output; 84 | 85 | fn poll( 86 | mut self: std::pin::Pin<&mut Self>, 87 | cx: &mut std::task::Context<'_>, 88 | ) -> Poll { 89 | let this = unsafe { self.0.as_mut().get_unchecked_mut() }; 90 | if this.accompany_slot.is_none() 91 | && let Poll::Ready(t) = unsafe { Pin::new_unchecked(&mut this.accompany) }.poll(cx) 92 | { 93 | this.accompany_slot = Some(t); 94 | } 95 | unsafe { Pin::new_unchecked(this.main.assume_init_mut().a.deref_mut()).poll(cx) } 96 | } 97 | } 98 | 99 | #[repr(transparent)] 100 | pub(crate) struct AccompanyPairS2<'a, F1, F2, FACC, T>( 101 | Pin<&'a mut AccompanyPairBase>, 102 | ); 103 | 104 | impl Drop for AccompanyPairS2<'_, F1, F2, FACC, T> { 105 | fn drop(&mut self) { 106 | unsafe { 107 | ManuallyDrop::drop(&mut self.0.as_mut().get_unchecked_mut().main.assume_init_mut().b); 108 | } 109 | } 110 | } 111 | 112 | impl Future for AccompanyPairS2<'_, F1, F2, FACC, T> 113 | where 114 | F2: Future, 115 | FACC: Future, 116 | { 117 | type Output = F2::Output; 118 | 119 | fn poll( 120 | mut self: std::pin::Pin<&mut Self>, 121 | cx: &mut std::task::Context<'_>, 122 | ) -> Poll { 123 | let this = unsafe { self.0.as_mut().get_unchecked_mut() }; 124 | if this.accompany_slot.is_none() 125 | && let Poll::Ready(t) = unsafe { Pin::new_unchecked(&mut this.accompany) }.poll(cx) 126 | { 127 | this.accompany_slot = Some(t); 128 | } 129 | unsafe { Pin::new_unchecked(this.main.assume_init_mut().b.deref_mut()).poll(cx) } 130 | } 131 | } 132 | 133 | #[repr(transparent)] 134 | pub(crate) struct AccompanyPairS3<'a, F1, F2, FACC, T>( 135 | Pin<&'a mut AccompanyPairBase>, 136 | ); 137 | 138 | impl, T> Future for AccompanyPairS3<'_, F1, F2, FACC, T> { 139 | type Output = FACC::Output; 140 | 141 | fn poll( 142 | mut self: std::pin::Pin<&mut Self>, 143 | cx: &mut std::task::Context<'_>, 144 | ) -> Poll { 145 | let this = unsafe { self.0.as_mut().get_unchecked_mut() }; 146 | if let Some(t) = this.accompany_slot.take() { 147 | return Poll::Ready(t); 148 | } 149 | unsafe { Pin::new_unchecked(&mut this.accompany) }.poll(cx) 150 | } 151 | } 152 | 153 | pub(crate) fn generate_response(status_code: StatusCode, close: bool) -> Response { 154 | let mut resp = Response::builder(); 155 | resp = resp.status(status_code); 156 | let headers = resp.headers_mut().unwrap(); 157 | if close { 158 | headers.insert(http::header::CONNECTION, super::CLOSE_VALUE); 159 | } 160 | headers.insert(http::header::CONTENT_LENGTH, HeaderValue::from_static("0")); 161 | resp.body(B::fixed_body(None)).unwrap() 162 | } 163 | 164 | pub struct HttpErrorResponder(pub T); 165 | impl Service<(Request, CX)> for HttpErrorResponder 166 | where 167 | T: HttpHandler, 168 | T::Error: HttpError, 169 | { 170 | type Response = ResponseWithContinue; 171 | type Error = T::Error; 172 | 173 | async fn call(&self, (req, cx): (Request, CX)) -> Result { 174 | match self.0.handle(req, cx).await { 175 | Ok(resp) => Ok(resp), 176 | Err(e) => { 177 | if let Some(r) = e.to_response() { 178 | Ok((r, true)) 179 | } else { 180 | Err(e) 181 | } 182 | } 183 | } 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /monolake-services/src/hyper/mod.rs: -------------------------------------------------------------------------------- 1 | //! Hyper-based HTTP core service for handling client connections. 2 | //! 3 | //! This module provides a high-performance, asynchronous HTTP service built on top of 4 | //! the Hyper library. It's designed to work with monoio's asynchronous runtime and 5 | //! supports flexible handler composition through the `HttpHandler` trait. 6 | //! 7 | //! # Key Components 8 | //! 9 | //! - [`HyperCoreService`](HyperCoreService): The main service component responsible for handling 10 | //! HTTP connections using Hyper. It can be composed of handlers implementing the `HttpHandler` 11 | //! trait. 12 | //! - [`HyperCoreFactory`](HyperCoreFactory): Factory for creating `HyperCoreService` instances. 13 | //! - [`HyperCoreError`](HyperCoreError): Error type for `HyperCoreService` operations. 14 | //! 15 | //! # Features 16 | //! 17 | //! - Built on Hyper for robust HTTP protocol support 18 | //! - Integration with monoio's asynchronous runtime 19 | //! - Composable design allowing a stack of `HttpHandler` implementations 20 | //! - Configurable through Hyper's `Builder` 21 | //! 22 | //! # Usage 23 | //! 24 | //! `HyperCoreService` is typically used as part of a larger service stack. Here's a basic example: 25 | //! 26 | //! ```ignore 27 | //! use service_async::{layer::FactoryLayer, stack::FactoryStack}; 28 | //! 29 | //! use crate::http::HyperCoreService; 30 | //! 31 | //! let config = Config { /* ... */ }; 32 | //! let stack = FactoryStack::new(config) 33 | //! .push(HyperCoreService::layer()) 34 | //! // ... other handlers implementing HttpHandler ... 35 | //! ; 36 | //! 37 | //! let service = stack.make_async().await.unwrap(); 38 | //! // Use the service to handle incoming HTTP connections 39 | //! ``` 40 | //! 41 | //! # Handler Composition 42 | //! 43 | //! `HyperCoreService` can be composed of multiple handlers implementing the `HttpHandler` trait. 44 | //! This allows for a flexible and modular approach to request processing. Handlers can be 45 | //! chained together to form a processing pipeline, each handling a specific aspect of the 46 | //! HTTP request/response cycle. 47 | //! 48 | //! # Performance Considerations 49 | //! 50 | //! - Leverages Hyper's efficient HTTP implementation 51 | //! - Uses monoio's async I/O operations for improved performance 52 | //! - Supports connection keep-alive and pipelining through Hyper 53 | use std::{error::Error, future::Future, rc::Rc}; 54 | 55 | use http::{Request, Response}; 56 | use hyper::body::{Body, Incoming}; 57 | use hyper_util::server::conn::auto::Builder; 58 | use monoio::io::{ 59 | poll_io::{AsyncRead, AsyncWrite}, 60 | IntoPollIo, 61 | }; 62 | pub use monoio_compat::hyper::{MonoioExecutor, MonoioIo}; 63 | use monolake_core::http::HttpHandler; 64 | use service_async::{ 65 | layer::{layer_fn, FactoryLayer}, 66 | AsyncMakeService, MakeService, Service, 67 | }; 68 | 69 | use crate::tcp::Accept; 70 | 71 | pub struct HyperCoreService { 72 | handler_chain: Rc, 73 | builder: Builder, 74 | } 75 | 76 | /// Hyper-based HTTP core service supporting handler composition. 77 | /// 78 | /// `HyperCoreService` is responsible for handling HTTP connections using Hyper, 79 | /// and can be composed of a chain of handlers implementing the `HttpHandler` trait. 80 | impl HyperCoreService { 81 | #[inline] 82 | pub fn new(handler_chain: H, builder: Builder) -> Self { 83 | Self { 84 | handler_chain: Rc::new(handler_chain), 85 | builder, 86 | } 87 | } 88 | } 89 | 90 | #[derive(thiserror::Error, Debug)] 91 | pub enum HyperCoreError { 92 | #[error("io error: {0:?}")] 93 | Io(#[from] std::io::Error), 94 | #[error("hyper error: {0:?}")] 95 | Hyper(#[from] Box), 96 | } 97 | 98 | impl Service> for HyperCoreService 99 | where 100 | Stream: IntoPollIo, 101 | Stream::PollIo: AsyncRead + AsyncWrite + Unpin + 'static, 102 | H: HttpHandler + 'static, 103 | H::Error: Into>, 104 | H::Body: Body, 105 | ::Error: Into>, 106 | CX: Clone + 'static, 107 | { 108 | type Response = (); 109 | type Error = HyperCoreError; 110 | 111 | async fn call(&self, (io, cx): Accept) -> Result { 112 | tracing::trace!("hyper core handling io"); 113 | let poll_io = io.into_poll_io()?; 114 | let io = MonoioIo::new(poll_io); 115 | let service = HyperServiceWrapper { 116 | cx, 117 | handler_chain: self.handler_chain.clone(), 118 | }; 119 | self.builder 120 | .serve_connection(io, service) 121 | .await 122 | .map_err(Into::into) 123 | } 124 | } 125 | 126 | struct HyperServiceWrapper { 127 | cx: CX, 128 | handler_chain: Rc, 129 | } 130 | 131 | impl hyper::service::Service> for HyperServiceWrapper 132 | where 133 | H: HttpHandler + 'static, 134 | CX: Clone + 'static, 135 | { 136 | type Response = Response; 137 | type Error = H::Error; 138 | type Future = impl Future> + 'static; 139 | 140 | #[inline] 141 | fn call(&self, req: Request) -> Self::Future { 142 | let chain = self.handler_chain.clone(); 143 | let cx = self.cx.clone(); 144 | async move { chain.handle(req, cx).await.map(|r| r.0) } 145 | } 146 | } 147 | 148 | /// Factory for creating `HyperCoreService` instances. 149 | pub struct HyperCoreFactory { 150 | factory_chain: F, 151 | builder: Builder, 152 | } 153 | 154 | impl MakeService for HyperCoreFactory { 155 | type Service = HyperCoreService; 156 | type Error = F::Error; 157 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 158 | let handler_chain = self 159 | .factory_chain 160 | .make_via_ref(old.map(|o| o.handler_chain.as_ref()))?; 161 | Ok(HyperCoreService::new(handler_chain, self.builder.clone())) 162 | } 163 | } 164 | 165 | impl AsyncMakeService for HyperCoreFactory { 166 | type Service = HyperCoreService; 167 | type Error = F::Error; 168 | 169 | async fn make_via_ref( 170 | &self, 171 | old: Option<&Self::Service>, 172 | ) -> Result { 173 | let handler_chain = self 174 | .factory_chain 175 | .make_via_ref(old.map(|o| o.handler_chain.as_ref())) 176 | .await?; 177 | Ok(HyperCoreService::new(handler_chain, self.builder.clone())) 178 | } 179 | } 180 | 181 | impl HyperCoreService { 182 | pub fn layer() -> impl FactoryLayer> { 183 | layer_fn(|_c: &C, inner| HyperCoreFactory { 184 | factory_chain: inner, 185 | builder: Builder::new(MonoioExecutor), 186 | }) 187 | } 188 | 189 | pub fn layer_with_builder( 190 | builder: Builder, 191 | ) -> impl FactoryLayer> { 192 | layer_fn(move |_c: &C, inner| HyperCoreFactory { 193 | factory_chain: inner, 194 | builder: builder.clone(), 195 | }) 196 | } 197 | 198 | #[inline] 199 | pub fn builder(&mut self) -> &mut Builder { 200 | &mut self.builder 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /monolake-services/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(let_chains)] 2 | #![feature(impl_trait_in_assoc_type)] 3 | //! # Monolake Services 4 | //! 5 | //! `monolake-services` is a crate that provides a collection of services 6 | //! for building high-performance, modular HTTP servers and Thrift services. It offers a range of 7 | //! components that can be easily combined with custom user-created services to create robust and 8 | //! flexible server applications. 9 | //! 10 | //! ## Key Components 11 | //! 12 | //! ### HTTP Services 13 | //! 14 | //! #### Connection Handlers 15 | //! 16 | //! - [`HttpCoreService`](http::core): The main service for handling HTTP/1.1 and HTTP/2 17 | //! connections. 18 | //! - [`H2Detect`](http::detect): Automatic detection of HTTP protocol versions. #[cfg_attr(feature 19 | //! = "hyper", doc = "- [`HyperCoreService`](hyper::HyperCoreService): A high-performance HTTP 20 | //! service built on top of the Hyper library.")] 21 | //! 22 | //! #### Request Handlers 23 | //! 24 | //! - [`ConnectionReuseHandler`](http::handlers::connection_persistence): Manages HTTP connection 25 | //! persistence and keep-alive behavior. It ensures proper handling of connection lifecycles 26 | //! across different HTTP versions. 27 | //! 28 | //! - [`ContentHandler`](http::handlers::content_handler): Handles content encoding and decoding for 29 | //! requests and responses. It supports various compression methods and ensures efficient data 30 | //! transfer. 31 | //! 32 | //! - [`RewriteAndRouteHandler`](http::handlers::route): Directs requests to appropriate handlers 33 | //! based on predefined rules. It allows for flexible URL-based routing and request dispatching. 34 | //! 35 | //! - [`UpstreamHandler`](http::handlers::upstream): Manages proxying of requests to upstream 36 | //! servers. It supports load balancing, connection pooling, and error handling for backend 37 | //! services. 38 | //! 39 | //! - [`OpenIdHandler`](crate::http::handlers::OpenIdHandler): Provides OpenID Connect 40 | //! authentication (optional feature). It enables secure user authentication using OpenID Connect 41 | //! protocols. 42 | //! 43 | //! ### Thrift Services 44 | //! 45 | //! - [`TtheaderCoreService`](thrift::ttheader): Core service for handling Thrift THeader protocol 46 | //! connections. 47 | //! - [`ProxyHandler`](thrift::handlers::proxy): Proxy service for routing Thrift requests to 48 | //! upstream servers. 49 | //! 50 | //! The Thrift module provides components for handling Thrift protocol communications, including 51 | //! core services for processing Thrift requests and proxy handlers for routing requests 52 | //! to upstream Thrift servers. It supports the THeader protocol, connection pooling, and 53 | //! integrates seamlessly with the `service_async` framework. 54 | //! 55 | //! ### Common Services 56 | //! 57 | //! - [`CatchPanicService`](common::CatchPanicService): Catches panics in inner services and 58 | //! converts them to errors. It enhances system stability by preventing panics from crashing the 59 | //! entire server. 60 | //! 61 | //! - [`ContextService`](common::ContextService): Inserts context information into the request 62 | //! processing pipeline. It works with `certain_map` for flexible and type-safe context 63 | //! management. 64 | //! 65 | //! - [`TimeoutService`](common::TimeoutService) Adds configurable timeout functionality to any 66 | //! inner service. It ensures that long-running operations don't block the server indefinitely. 67 | //! 68 | //! ### TLS Service 69 | //! 70 | //! - [`UnifiedTlsService`](crate::tls): Provides a unified interface for different TLS 71 | //! implementations (Rustls and Native TLS). It allows for flexible TLS configuration and seamless 72 | //! switching between TLS backends. 73 | //! 74 | //! ### Proxy Protocol Service 75 | //! 76 | //! - [`ProxyProtocolService`](crate::proxy_protocol::ProxyProtocolService): Handles PROXY protocol 77 | //! headers in incoming connections. It preserves client IP information when operating behind load 78 | //! balancers or proxies. 79 | //! 80 | //! ## Service Trait 81 | //! 82 | //! All services in this crate implement the `Service` trait, which is defined as follows: 83 | //! 84 | //! ```ignore 85 | //! pub trait Service { 86 | //! type Response; 87 | //! type Error; 88 | //! 89 | //! fn call(&self, req: Request) -> impl Future>; 90 | //! } 91 | //! ``` 92 | //! 93 | //! This trait allows for efficient and flexible composition of services, enabling 94 | //! the creation of complex processing pipelines. 95 | //! 96 | //! ## Features 97 | //! 98 | //! - Modular design allowing easy composition of services 99 | //! - Support for HTTP/1.x and HTTP/2 protocols 100 | //! - Support for Thrift THeader protocol 101 | //! - Flexible routing and request processing capabilities 102 | //! - TLS support with multiple backend options 103 | //! - PROXY protocol support for preserving client IP information 104 | //! 105 | //! ## Usage Example 106 | //! 107 | //! Here's a basic example of how to compose these services: 108 | //! 109 | //! ```ignore 110 | //! use monolake_services::{ 111 | //! HttpCoreService, H2Detect, ConnectionReuseHandler, 112 | //! ContentHandler, RewriteAndRouteHandler, UpstreamHandler, UnifiedTlsService, 113 | //! ProxyProtocolService, HyperCoreService 114 | //! }; 115 | //! use service_async::{layer::FactoryLayer, stack::FactoryStack}; 116 | //! 117 | //! let config = ServerConfig { 118 | //! // ... configuration options ... 119 | //! }; 120 | //! 121 | //! let stack = FactoryStack::new(config) 122 | //! .push(UpstreamHandler::layer()) 123 | //! .push(RewriteAndRouteHandler::layer()) 124 | //! .push(ContentHandler::layer()) 125 | //! .push(ConnectionReuseHandler::layer()) 126 | //! .push(HyperCoreService::layer()); 127 | //! .push(H2Detect::layer()) 128 | //! .push(UnifiedTlsService::layer()) 129 | //! .push(ContextService::layer()); 130 | //! 131 | //! 132 | //! let service = stack.make_async().await.unwrap(); 133 | //! // Use the service to handle incoming connections 134 | //! ``` 135 | //! 136 | //! ## Performance Considerations 137 | //! 138 | //! - Efficient async I/O operations using the `monoio` runtime 139 | //! - Connection pooling and keep-alive support for improved resource utilization 140 | //! - Optimized routing and request handling 141 | //! - Support for HTTP/2 multiplexing 142 | //! - Efficient Thrift request processing and proxying 143 | //! 144 | //! ## Customization 145 | //! 146 | //! The modular nature of the services allows for easy extension and customization. 147 | //! Users can implement their own services that conform to the `Service` trait 148 | //! and integrate them seamlessly into the processing pipeline. 149 | //! 150 | //! ## Additional Resources 151 | //! 152 | //! For more detailed information on each component, please refer to the documentation 153 | //! of individual modules and the examples directory in the crate's repository. 154 | pub mod common; 155 | pub mod http; 156 | pub mod tcp; 157 | pub mod thrift; 158 | 159 | #[cfg(feature = "proxy-protocol")] 160 | pub mod proxy_protocol; 161 | 162 | #[cfg(feature = "tls")] 163 | pub mod tls; 164 | 165 | #[cfg(feature = "hyper")] 166 | pub mod hyper; 167 | -------------------------------------------------------------------------------- /monolake-services/src/proxy_protocol/mod.rs: -------------------------------------------------------------------------------- 1 | //! Proxy Protocol service for handling PROXY protocol headers in incoming connections. 2 | //! 3 | //! This module provides functionality to parse and handle PROXY protocol headers 4 | //! (version 1 and 2) in incoming TCP connections. It's designed to work seamlessly 5 | //! with the `service_async` framework and can be easily integrated into a service stack. 6 | //! 7 | //! The PROXY protocol allows for the preservation of client IP address information 8 | //! when passing connections through proxies or load balancers. 9 | //! 10 | //! # Key Components 11 | //! 12 | //! - [`ProxyProtocolService`]: The main service component responsible for parsing PROXY protocol 13 | //! headers and forwarding the connection to an inner service. 14 | //! - [`ProxyProtocolServiceFactory`]: Factory for creating `ProxyProtocolService` instances. 15 | //! 16 | //! # Features 17 | //! 18 | //! - Support for both PROXY protocol version 1 and 2 19 | //! - Efficient parsing of PROXY protocol headers 20 | //! - Preservation of original client IP information 21 | //! - Support for IPv4 and IPv6 addresses 22 | //! 23 | //! # Performance Considerations 24 | //! 25 | //! - Efficient parsing with minimal allocations 26 | //! - Uses a fixed-size buffer to limit memory usage 27 | //! - Handles both PROXY and non-PROXY protocol connections gracefully 28 | //! 29 | //! # References 30 | //! 31 | //! - [PROXY Protocol Specification](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) 32 | 33 | use std::{fmt::Display, net::SocketAddr}; 34 | 35 | use monoio::{ 36 | buf::IoBufMut, 37 | io::{AsyncReadRent, AsyncWriteRent, PrefixedReadIo}, 38 | }; 39 | use monolake_core::{context::RemoteAddr, listener::AcceptedAddr, AnyError}; 40 | use proxy_protocol::{parse, version1, version2, ParseError, ProxyHeader}; 41 | use service_async::{ 42 | layer::{layer_fn, FactoryLayer}, 43 | AsyncMakeService, MakeService, ParamSet, Service, 44 | }; 45 | 46 | use crate::tcp::Accept; 47 | 48 | // Ref: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt 49 | // V1 max length is 107-byte. 50 | const V1HEADER: &[u8; 6] = b"PROXY "; 51 | // V2 max length is 14+216 = 230 bytes. 52 | const V2HEADER: &[u8; 12] = &[ 53 | 0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A, 54 | ]; 55 | 56 | /// Service that handles PROXY protocol headers in incoming connections. 57 | /// 58 | /// `ProxyProtocolService` is responsible for: 59 | /// 1. Detecting and parsing PROXY protocol headers (v1 and v2) in incoming connections. 60 | /// 2. Extracting client IP information from the PROXY protocol header. 61 | /// 3. Forwarding the connection to an inner service with the extracted information. 62 | /// 63 | /// If a connection does not use the PROXY protocol, it's passed through unchanged. 64 | pub struct ProxyProtocolService { 65 | inner: T, 66 | } 67 | 68 | impl Service<(S, CX)> for ProxyProtocolService 69 | where 70 | S: AsyncReadRent + AsyncWriteRent, 71 | T: Service>>, CX::Transformed>>, 72 | T::Error: Into + Display, 73 | CX: ParamSet>, 74 | { 75 | type Response = T::Response; 76 | type Error = AnyError; 77 | 78 | async fn call(&self, (mut stream, ctx): Accept) -> Result { 79 | const MAX_HEADER_SIZE: usize = 230; 80 | let mut buffer = Vec::with_capacity(MAX_HEADER_SIZE); 81 | let mut pos = 0; 82 | 83 | // read at-least 1 byte 84 | let (res, buf) = stream 85 | .read(unsafe { buffer.slice_mut_unchecked(0..MAX_HEADER_SIZE) }) 86 | .await; 87 | buffer = buf.into_inner(); 88 | pos += res.map_err(AnyError::from)?; 89 | // match version magic header 90 | let parsed = if let Some(target_header) = match buffer[0] { 91 | b'P' => { 92 | let end = pos.min(V1HEADER.len()); 93 | if buffer[1..end] == V1HEADER[1..end] { 94 | Some(&V1HEADER[..]) 95 | } else { 96 | tracing::warn!("proxy-protocol: v1 magic only partly matched"); 97 | None 98 | } 99 | } 100 | 0x0D => { 101 | let end = pos.min(V2HEADER.len()); 102 | if buffer[1..end] == V2HEADER[1..end] { 103 | Some(&V2HEADER[..]) 104 | } else { 105 | tracing::warn!("proxy-protocol: v2 magic only partly matched"); 106 | None 107 | } 108 | } 109 | _ => None, 110 | } { 111 | // loop {parse; read; check_full;} 112 | let header = loop { 113 | let mut cursor = std::io::Cursor::new(&buffer); 114 | let e = match parse(&mut cursor) { 115 | Ok(header) => break Ok((header, cursor.position())), 116 | // data is not enough to parse version, we should read again 117 | Err( 118 | e @ ParseError::NotProxyHeader 119 | | e @ ParseError::Version1 { 120 | source: version1::ParseError::UnexpectedEof, 121 | } 122 | | e @ ParseError::Version2 { 123 | source: version2::ParseError::UnexpectedEof, 124 | }, 125 | ) => e, 126 | Err(e) => break Err(e), 127 | }; 128 | 129 | let buf = unsafe { buffer.slice_mut_unchecked(pos..MAX_HEADER_SIZE) }; 130 | let (res, buf) = stream.read(buf).await; 131 | buffer = buf.into_inner(); 132 | let read = res.map_err(AnyError::from)?; 133 | // if we are reading magic header, we have to check if the magic header matches 134 | // because ParseError::NotProxyHeader does not always mean data is not enough 135 | if pos < target_header.len() { 136 | let end = target_header.len().min(pos + read); 137 | if buffer[pos..end] != target_header[pos..end] { 138 | break Err(e); 139 | } 140 | } 141 | pos += read; 142 | if pos == MAX_HEADER_SIZE { 143 | return Err(ParseError::NotProxyHeader.into()); 144 | } 145 | }; 146 | Some(header) 147 | } else { 148 | tracing::debug!("proxy-protocol: not proxy protocol at first glance"); 149 | None 150 | }; 151 | 152 | let mut cursor = std::io::Cursor::new(buffer); 153 | let remote_addr = match parsed { 154 | Some(Ok((header, idx))) => { 155 | // advance proxy-protocol length on success parsing 156 | cursor.set_position(idx); 157 | match header { 158 | ProxyHeader::Version1 { 159 | addresses: version1::ProxyAddresses::Ipv4 { source, .. }, 160 | } 161 | | ProxyHeader::Version2 { 162 | addresses: version2::ProxyAddresses::Ipv4 { source, .. }, 163 | .. 164 | } => Some(RemoteAddr(AcceptedAddr::from(SocketAddr::from(source)))), 165 | ProxyHeader::Version1 { 166 | addresses: version1::ProxyAddresses::Ipv6 { source, .. }, 167 | } 168 | | ProxyHeader::Version2 { 169 | addresses: version2::ProxyAddresses::Ipv6 { source, .. }, 170 | .. 171 | } => Some(RemoteAddr(AcceptedAddr::from(SocketAddr::from(source)))), 172 | _ => { 173 | tracing::warn!("proxy protocol get source failed"); 174 | None 175 | } 176 | } 177 | } 178 | _ => None, 179 | }; 180 | 181 | let ctx = ctx.param_set(remote_addr); 182 | let prefix_io = PrefixedReadIo::new(stream, cursor); 183 | 184 | self.inner 185 | .call((prefix_io, ctx)) 186 | .await 187 | .map_err(|e| e.into()) 188 | } 189 | } 190 | 191 | pub struct ProxyProtocolServiceFactory { 192 | inner: F, 193 | } 194 | 195 | impl ProxyProtocolServiceFactory { 196 | pub fn layer() -> impl FactoryLayer { 197 | layer_fn(|_: &C, inner| ProxyProtocolServiceFactory { inner }) 198 | } 199 | } 200 | 201 | impl MakeService for ProxyProtocolServiceFactory { 202 | type Service = ProxyProtocolService; 203 | type Error = F::Error; 204 | 205 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 206 | Ok(ProxyProtocolService { 207 | inner: self.inner.make_via_ref(old.map(|o| &o.inner))?, 208 | }) 209 | } 210 | } 211 | 212 | impl AsyncMakeService for ProxyProtocolServiceFactory { 213 | type Service = ProxyProtocolService; 214 | type Error = F::Error; 215 | 216 | async fn make_via_ref( 217 | &self, 218 | old: Option<&Self::Service>, 219 | ) -> Result { 220 | Ok(ProxyProtocolService { 221 | inner: self.inner.make_via_ref(old.map(|o| &o.inner)).await?, 222 | }) 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /monolake-services/src/tcp/echo.rs: -------------------------------------------------------------------------------- 1 | use std::{convert::Infallible, io}; 2 | 3 | use monoio::io::{AsyncReadRent, AsyncWriteRent, AsyncWriteRentExt}; 4 | use service_async::{ 5 | layer::{layer_fn, FactoryLayer}, 6 | AsyncMakeService, MakeService, Param, Service, 7 | }; 8 | 9 | pub struct EchoService { 10 | buffer_size: usize, 11 | } 12 | 13 | impl Service for EchoService 14 | where 15 | S: AsyncReadRent + AsyncWriteRent, 16 | { 17 | type Response = (); 18 | type Error = io::Error; 19 | 20 | async fn call(&self, mut io: S) -> Result { 21 | let mut buffer = Vec::with_capacity(self.buffer_size); 22 | loop { 23 | let (mut r, buf) = io.read(buffer).await; 24 | if r? == 0 { 25 | break; 26 | } 27 | (r, buffer) = io.write_all(buf).await; 28 | r?; 29 | } 30 | tracing::info!("tcp relay finished successfully"); 31 | Ok(()) 32 | } 33 | } 34 | 35 | impl MakeService for EchoService { 36 | type Service = Self; 37 | type Error = Infallible; 38 | 39 | fn make_via_ref(&self, _old: Option<&Self::Service>) -> Result { 40 | Ok(EchoService { 41 | buffer_size: self.buffer_size, 42 | }) 43 | } 44 | } 45 | 46 | impl AsyncMakeService for EchoService { 47 | type Service = Self; 48 | type Error = Infallible; 49 | 50 | async fn make_via_ref( 51 | &self, 52 | _old: Option<&Self::Service>, 53 | ) -> Result { 54 | Ok(EchoService { 55 | buffer_size: self.buffer_size, 56 | }) 57 | } 58 | } 59 | 60 | #[derive(Debug, Clone)] 61 | pub struct EchoConfig { 62 | pub buffer_size: usize, 63 | } 64 | 65 | impl Default for EchoConfig { 66 | fn default() -> Self { 67 | Self { buffer_size: 4096 } 68 | } 69 | } 70 | 71 | impl EchoService { 72 | pub fn layer() -> impl FactoryLayer 73 | where 74 | C: Param, 75 | { 76 | layer_fn(|c: &C, ()| Self { 77 | buffer_size: c.param().buffer_size, 78 | }) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /monolake-services/src/tcp/mod.rs: -------------------------------------------------------------------------------- 1 | //! Tcp specific Services(Under progress) 2 | pub mod echo; 3 | pub mod proxy; 4 | 5 | pub type Accept = (Stream, Ctx); 6 | -------------------------------------------------------------------------------- /monolake-services/src/tcp/proxy.rs: -------------------------------------------------------------------------------- 1 | //! TODO 2 | -------------------------------------------------------------------------------- /monolake-services/src/thrift/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | //! Thrift specific handlers 2 | pub mod proxy; 3 | pub use proxy::ProxyHandler; 4 | -------------------------------------------------------------------------------- /monolake-services/src/thrift/handlers/proxy.rs: -------------------------------------------------------------------------------- 1 | //! Thrift proxy handler for routing and forwarding Thrift requests to upstream servers. 2 | //! 3 | //! This module provides a high-performance, asynchronous Thrift proxy service that handles 4 | //! routing and forwarding of Thrift requests to configured upstream servers. It is designed 5 | //! to work with monoio's asynchronous runtime and supports connection pooling for efficient 6 | //! resource utilization. 7 | //! 8 | //! # Key Components 9 | //! 10 | //! - [`ProxyHandler`]: The main service component responsible for proxying Thrift requests to 11 | //! upstream servers based on configured routes. 12 | //! - [`ProxyHandlerFactory`]: Factory for creating `ProxyHandler` instances. 13 | //! - [`PoolThriftConnector`]: A pooled connector for managing Thrift connections to upstream 14 | //! servers. 15 | //! 16 | //! # Features 17 | //! 18 | //! - Support for Thrift THeader protocol 19 | //! - Configurable routing of requests to upstream servers 20 | //! - Connection pooling for efficient resource management 21 | //! - Integration with `service_async` for easy composition in service stacks 22 | //! - Support for both TCP and Unix socket connections to upstream servers 23 | //! 24 | //! # Usage 25 | //! 26 | //! `ProxyHandler` is typically used as part of a larger service stack. Here's a basic example: 27 | //! 28 | //! ```ignore 29 | //! use service_async::{layer::FactoryLayer, stack::FactoryStack}; 30 | //! 31 | //! use crate::thrift::ProxyHandler; 32 | //! 33 | //! let config = vec![RouteConfig { /* ... */ }]; 34 | //! let stack = FactoryStack::new(config.clone()) 35 | //! .push(ProxyHandler::factory(config)) 36 | //! // ... other layers ... 37 | //! ; 38 | //! 39 | //! let service = stack.make_async().await.unwrap(); 40 | //! // Use the service to handle incoming Thrift requests and proxy them to upstream servers 41 | //! ``` 42 | //! 43 | //! # Performance Considerations 44 | //! 45 | //! - Uses monoio's efficient async I/O operations for improved performance 46 | //! - Implements connection pooling to reduce connection establishment overhead 47 | //! - Efficient request and response handling using the THeader protocol 48 | 49 | use std::io; 50 | 51 | use monoio::io::{sink::SinkExt, stream::Stream}; 52 | use monoio_codec::Framed; 53 | use monoio_thrift::codec::ttheader::{RawPayloadCodec, TTHeaderPayloadCodec}; 54 | use monoio_transports::{ 55 | connectors::{Connector, UnifiedL4Addr, UnifiedL4Connector, UnifiedL4Stream}, 56 | pool::{ConnectorMap, ConnectorMapper, PooledConnector, Reuse, ReuseConnector}, 57 | }; 58 | use monolake_core::{ 59 | context::{PeerAddr, RemoteAddr}, 60 | thrift::{ThriftBody, ThriftRequest, ThriftResponse}, 61 | }; 62 | use serde::{Deserialize, Serialize}; 63 | use service_async::{AsyncMakeService, MakeService, ParamMaybeRef, ParamRef, Service}; 64 | 65 | use crate::common::selector::{ 66 | IntoWeightedEndpoint, LoadBalanceError, LoadBalanceStrategy, LoadBalancer, Select, 67 | }; 68 | 69 | pub type PoolThriftConnector = PooledConnector< 70 | ReuseConnector>, 71 | UnifiedL4Addr, 72 | Reuse>>, 73 | >; 74 | 75 | #[inline] 76 | fn new_connector() -> PoolThriftConnector { 77 | PooledConnector::new_with_default_pool(ReuseConnector(ConnectorMap::new( 78 | UnifiedL4Connector::default(), 79 | ThriftConnectorMapper, 80 | ))) 81 | } 82 | 83 | /// Mapper for creating Thrift-specific connections from generic network connections. 84 | /// 85 | /// `ThriftConnectorMapper` is responsible for wrapping raw network connections with 86 | /// the appropriate Thrift protocol codec (TTHeaderPayloadCodec in this case). 87 | pub struct ThriftConnectorMapper; 88 | impl ConnectorMapper for ThriftConnectorMapper { 89 | type Connection = Framed>; 90 | type Error = E; 91 | 92 | #[inline] 93 | fn map(&self, inner: Result) -> Result { 94 | inner.map(|io| Framed::new(io, TTHeaderPayloadCodec::new(RawPayloadCodec))) 95 | } 96 | } 97 | 98 | /// Thrift proxy handler for routing and forwarding requests to upstream servers. 99 | /// 100 | /// `ProxyHandler` is responsible for receiving Thrift requests, selecting an appropriate 101 | /// upstream server based on configured routes, and forwarding the request to that server. 102 | /// It manages connections to upstream servers using a connection pool for efficiency. 103 | /// For implementation details and example usage, see the 104 | /// [module level documentation](crate::thrift::handlers::proxy). 105 | pub struct ProxyHandler { 106 | connector: PoolThriftConnector, 107 | endpoints: LoadBalancer, 108 | } 109 | 110 | impl RouteConfig { 111 | fn proxy_handler(&self) -> Result { 112 | Ok(ProxyHandler::new( 113 | new_connector(), 114 | LoadBalancer::try_from_upstreams(self.load_balancer, self.upstreams.clone())?, 115 | )) 116 | } 117 | } 118 | 119 | impl ProxyHandler { 120 | pub fn new(connector: PoolThriftConnector, endpoints: LoadBalancer) -> Self { 121 | ProxyHandler { 122 | connector, 123 | endpoints, 124 | } 125 | } 126 | 127 | pub const fn factory(config: RouteConfig) -> ProxyHandlerFactory { 128 | ProxyHandlerFactory { config } 129 | } 130 | } 131 | 132 | impl Service<(ThriftRequest, CX)> for ProxyHandler 133 | where 134 | CX: ParamRef + ParamMaybeRef>, 135 | { 136 | type Response = ThriftResponse; 137 | type Error = io::Error; // TODO: user error 138 | 139 | async fn call( 140 | &self, 141 | (req, _ctx): (ThriftRequest, CX), 142 | ) -> Result { 143 | self.send_request(req).await 144 | } 145 | } 146 | 147 | impl ProxyHandler { 148 | async fn send_request( 149 | &self, 150 | req: ThriftRequest, 151 | ) -> Result, io::Error> { 152 | let endpoint = self.endpoints.select(&req).unwrap(); 153 | let key = match endpoint { 154 | Endpoint::Socket(addr) => UnifiedL4Addr::Tcp(*addr), 155 | Endpoint::Unix(path) => UnifiedL4Addr::Unix(path.clone()), 156 | }; 157 | let mut io = match self.connector.connect(key).await { 158 | Ok(conn) => conn, 159 | Err(e) => { 160 | tracing::info!("connect upstream error: {:?}", e); 161 | return Err(e); 162 | } 163 | }; 164 | 165 | if let Err(e) = io.send_and_flush(req).await { 166 | io.set_reuse(false); 167 | return Err(e); 168 | } 169 | 170 | match io.next().await { 171 | Some(Ok(resp)) => Ok(resp), 172 | Some(Err(e)) => { 173 | io.set_reuse(false); 174 | Err(e) 175 | } 176 | None => { 177 | io.set_reuse(false); 178 | Err(io::ErrorKind::UnexpectedEof.into()) 179 | } 180 | } 181 | } 182 | } 183 | 184 | /// Factory for creating `ProxyHandler` instances. 185 | /// 186 | /// `ProxyHandlerFactory` is responsible for creating new `ProxyHandler` instances, 187 | /// initializing them with the necessary configuration and connection pool. 188 | pub struct ProxyHandlerFactory { 189 | config: RouteConfig, 190 | } 191 | 192 | impl MakeService for ProxyHandlerFactory { 193 | type Service = ProxyHandler; 194 | type Error = LoadBalanceError; 195 | 196 | fn make_via_ref(&self, _old: Option<&Self::Service>) -> Result { 197 | self.config.proxy_handler() 198 | } 199 | } 200 | 201 | impl AsyncMakeService for ProxyHandlerFactory { 202 | type Service = ProxyHandler; 203 | type Error = LoadBalanceError; 204 | 205 | async fn make_via_ref( 206 | &self, 207 | _old: Option<&Self::Service>, 208 | ) -> Result { 209 | self.config.proxy_handler() 210 | } 211 | } 212 | 213 | /// Configuration for a single route in the routing system. 214 | /// 215 | /// This structure defines how a particular path should be routed to one or more upstream servers. 216 | #[derive(Debug, Clone, Serialize, Deserialize)] 217 | pub struct RouteConfig { 218 | #[serde(default)] 219 | pub load_balancer: LoadBalanceStrategy, 220 | 221 | /// A list of upstream servers that can handle requests matching this route. 222 | /// 223 | /// Multiple upstreams allow for load balancing and failover configurations. 224 | pub upstreams: Vec, 225 | } 226 | 227 | const fn default_weight() -> u16 { 228 | 1 229 | } 230 | 231 | /// Configuration for an upstream server. 232 | /// 233 | /// This structure defines the properties of a single upstream server, 234 | /// including its endpoint, weight for load balancing, and HTTP version. 235 | #[derive(Debug, Clone, Serialize, Deserialize)] 236 | pub struct Upstream { 237 | /// The endpoint of the upstream server. 238 | pub endpoint: Endpoint, 239 | 240 | /// The weight of this upstream for load balancing purposes. 241 | /// 242 | /// A higher weight means the upstream is more likely to be chosen when distributing requests. 243 | /// If not specified, it defaults to a value provided by the `default_weight` function. 244 | #[serde(default = "default_weight")] 245 | pub weight: u16, 246 | } 247 | 248 | impl IntoWeightedEndpoint for Upstream { 249 | type Endpoint = Endpoint; 250 | 251 | #[inline] 252 | fn into_weighted_endpoint(self) -> (Self::Endpoint, u16) { 253 | (self.endpoint, self.weight) 254 | } 255 | } 256 | 257 | /// Represents different types of endpoints for upstream servers. 258 | /// 259 | /// This enum allows for flexibility in specifying how to connect to an upstream server, 260 | /// supporting various protocols and addressing methods. 261 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 262 | #[serde(tag = "type", content = "value", rename_all = "snake_case")] 263 | pub enum Endpoint { 264 | /// A socket address endpoint. 265 | /// 266 | /// This can be used for direct IP:port addressing. 267 | Socket(std::net::SocketAddr), 268 | 269 | /// A Unix domain socket endpoint. 270 | /// 271 | /// This is typically used for local inter-process communication on Unix-like systems. 272 | Unix(std::path::PathBuf), 273 | } 274 | -------------------------------------------------------------------------------- /monolake-services/src/thrift/mod.rs: -------------------------------------------------------------------------------- 1 | //! Thrift protocol support and services for building high-performance Thrift servers and proxies. 2 | //! 3 | //! This module provides components for handling Thrift protocol communications, including 4 | //! core services for processing Thrift requests and proxy handlers for routing requests 5 | //! to upstream Thrift servers. It is designed to work with monoio's asynchronous runtime 6 | //! and the `service_async` framework for efficient and composable service architectures. 7 | //! 8 | //! # Submodules 9 | //! 10 | //! - [`handlers`]: Contains handlers for processing Thrift requests, including proxy functionality. 11 | //! - [`ttheader`]: Implements core services for the Thrift THeader protocol. 12 | //! 13 | //! # Key Components 14 | //! 15 | //! - [`TtheaderCoreService`](ttheader::TtheaderCoreService): Core service for handling Thrift 16 | //! THeader protocol connections. 17 | //! - [`ProxyHandler`](handlers::ProxyHandler): Proxy service for routing Thrift requests to 18 | //! upstream servers. 19 | //! 20 | //! # Features 21 | //! 22 | //! - Support for Thrift THeader protocol 23 | //! - High-performance request processing and proxying 24 | //! - Configurable timeout settings for various stages of request handling 25 | //! - Connection pooling for efficient management of upstream connections 26 | //! - Integration with `service_async` for easy composition in service stacks 27 | //! 28 | //! # Usage 29 | //! 30 | //! Components from this module can be used to build Thrift servers or proxies. Here's a basic 31 | //! example: 32 | //! 33 | //! ```ignore 34 | //! use service_async::{layer::FactoryLayer, stack::FactoryStack}; 35 | //! 36 | //! use crate::thrift::{handlers::ProxyHandler, TtheaderCoreService}; 37 | //! 38 | //! let config = Config { /* ... */ }; 39 | //! let routes = vec![RouteConfig { /* ... */ }]; 40 | //! 41 | //! let stack = FactoryStack::new(config) 42 | //! .push(ProxyHandler::factory(routes)) 43 | //! .push(TtheaderCoreService::layer()); 44 | //! 45 | //! let service = stack.make_async().await.unwrap(); 46 | //! // Use the service to handle incoming Thrift connections 47 | //! ``` 48 | //! 49 | //! # Performance Considerations 50 | //! 51 | //! - Utilizes monoio's efficient async I/O operations 52 | //! - Implements connection pooling to reduce connection establishment overhead 53 | //! - Optimized for the Thrift THeader protocol 54 | //! 55 | //! For more detailed information on specific components, please refer to the documentation 56 | //! of individual submodules and structs. 57 | pub mod handlers; 58 | pub mod ttheader; 59 | 60 | pub use handlers::proxy::{Endpoint, RouteConfig, Upstream}; 61 | -------------------------------------------------------------------------------- /monolake-services/src/tls/nativetls.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Display; 2 | 3 | use monoio::io::{AsyncReadRent, AsyncWriteRent}; 4 | use monoio_native_tls::{TlsAcceptor, TlsStream}; 5 | use monolake_core::AnyError; 6 | use native_tls::Identity; 7 | use service_async::{ 8 | layer::{layer_fn, FactoryLayer}, 9 | AsyncMakeService, MakeService, Param, Service, 10 | }; 11 | 12 | use crate::tcp::Accept; 13 | 14 | type NativeTlsAccept = (TlsStream, SocketAddr); 15 | 16 | #[derive(Clone)] 17 | pub struct NativeTlsService { 18 | acceptor: TlsAcceptor, 19 | inner: T, 20 | } 21 | 22 | impl Service> for NativeTlsService 23 | where 24 | T: Service>, 25 | T::Error: Into + Display, 26 | S: AsyncReadRent + AsyncWriteRent, 27 | { 28 | type Response = T::Response; 29 | type Error = AnyError; 30 | 31 | async fn call(&self, (stream, addr): Accept) -> Result { 32 | let stream = self.acceptor.accept(stream).await?; 33 | self.inner.call((stream, addr)).await.map_err(Into::into) 34 | } 35 | } 36 | 37 | pub struct NativeTlsServiceFactory { 38 | identity: Identity, 39 | inner: F, 40 | } 41 | 42 | impl NativeTlsServiceFactory { 43 | pub fn layer() -> impl FactoryLayer 44 | where 45 | C: Param, 46 | { 47 | layer_fn(|c: &C, inner| NativeTlsServiceFactory { 48 | identity: c.param(), 49 | inner, 50 | }) 51 | } 52 | } 53 | 54 | impl MakeService for NativeTlsServiceFactory 55 | where 56 | F: MakeService, 57 | F::Error: Into, 58 | { 59 | type Service = NativeTlsService; 60 | type Error = AnyError; 61 | 62 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 63 | let builder = native_tls::TlsAcceptor::builder(self.identity.clone()); 64 | let acceptor = TlsAcceptor::from(builder.build().map_err(AnyError::from)?); 65 | Ok(NativeTlsService { 66 | acceptor, 67 | inner: self 68 | .inner 69 | .make_via_ref(old.map(|o| &o.inner)) 70 | .map_err(Into::into)?, 71 | }) 72 | } 73 | } 74 | 75 | impl AsyncMakeService for NativeTlsServiceFactory 76 | where 77 | F: AsyncMakeService, 78 | F::Error: Into, 79 | { 80 | type Service = NativeTlsService; 81 | type Error = AnyError; 82 | 83 | async fn make_via_ref( 84 | &self, 85 | old: Option<&Self::Service>, 86 | ) -> Result { 87 | let builder = native_tls::TlsAcceptor::builder(self.identity.clone()); 88 | let acceptor = TlsAcceptor::from(builder.build().map_err(AnyError::from)?); 89 | Ok(NativeTlsService { 90 | acceptor, 91 | inner: self 92 | .inner 93 | .make_via_ref(old.map(|o| &o.inner)) 94 | .await 95 | .map_err(Into::into)?, 96 | }) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /monolake-services/src/tls/rustls.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, sync::Arc}; 2 | 3 | use monoio::io::{AsyncReadRent, AsyncWriteRent}; 4 | use monoio_rustls::{ServerTlsStream, TlsAcceptor}; 5 | use monolake_core::AnyError; 6 | use rustls::ServerConfig; 7 | use service_async::{ 8 | layer::{layer_fn, FactoryLayer}, 9 | AsyncMakeService, MakeService, Param, Service, 10 | }; 11 | 12 | use crate::tcp::Accept; 13 | 14 | type RustlsAccept = (ServerTlsStream, SocketAddr); 15 | 16 | pub struct RustlsService { 17 | acceptor: TlsAcceptor, 18 | inner: T, 19 | } 20 | 21 | impl Service> for RustlsService 22 | where 23 | T: Service>, 24 | T::Error: Into + Display, 25 | S: AsyncReadRent + AsyncWriteRent, 26 | { 27 | type Response = T::Response; 28 | type Error = AnyError; 29 | 30 | async fn call(&self, (stream, cx): Accept) -> Result { 31 | let stream = self.acceptor.accept(stream).await?; 32 | self.inner.call((stream, cx)).await.map_err(Into::into) 33 | } 34 | } 35 | 36 | pub struct RustlsServiceFactory { 37 | config: Arc, 38 | inner: F, 39 | } 40 | 41 | impl RustlsServiceFactory { 42 | pub fn layer() -> impl FactoryLayer 43 | where 44 | C: Param, 45 | { 46 | layer_fn(|c: &C, inner| RustlsServiceFactory { 47 | config: Arc::new(c.param()), 48 | inner, 49 | }) 50 | } 51 | } 52 | 53 | impl MakeService for RustlsServiceFactory { 54 | type Service = RustlsService; 55 | type Error = F::Error; 56 | 57 | fn make_via_ref(&self, old: Option<&Self::Service>) -> Result { 58 | let acceptor = TlsAcceptor::from(self.config.clone()); 59 | Ok(RustlsService { 60 | acceptor, 61 | inner: self.inner.make_via_ref(old.map(|o| &o.inner))?, 62 | }) 63 | } 64 | } 65 | 66 | impl AsyncMakeService for RustlsServiceFactory { 67 | type Service = RustlsService; 68 | type Error = F::Error; 69 | 70 | async fn make_via_ref( 71 | &self, 72 | old: Option<&Self::Service>, 73 | ) -> Result { 74 | let acceptor = TlsAcceptor::from(self.config.clone()); 75 | Ok(RustlsService { 76 | acceptor, 77 | inner: self.inner.make_via_ref(old.map(|o| &o.inner)).await?, 78 | }) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /monolake/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "monolake" 3 | version = "0.3.0" 4 | description = "High Performance Proxy base on Monoio" 5 | 6 | authors.workspace = true 7 | categories.workspace = true 8 | edition.workspace = true 9 | keywords.workspace = true 10 | license.workspace = true 11 | repository.workspace = true 12 | 13 | [features] 14 | default = ["tls"] 15 | openid = ["monolake-core/openid", "monolake-services/openid"] 16 | proxy-protocol = [ 17 | "monolake-core/proxy-protocol", 18 | "monolake-services/proxy-protocol", 19 | ] 20 | tls = ["dep:monoio-native-tls", "monolake-services/tls"] 21 | vendored = ["monolake-services/vendored"] 22 | 23 | [dependencies] 24 | monoio = { workspace = true, features = ["sync", "async-cancel"] } 25 | service-async = { workspace = true } 26 | certain-map = { workspace = true } 27 | anyhow = { workspace = true } 28 | serde = { workspace = true } 29 | tracing = { workspace = true } 30 | 31 | monolake-core = { version = "0.3.0", path = "../monolake-core" } 32 | monolake-services = { version = "0.3.2", path = "../monolake-services", features = ["hyper"] } 33 | 34 | # tls: needed for native-tls init 35 | native-tls = { workspace = true, optional = true } 36 | monoio-native-tls = { workspace = true, optional = true } 37 | 38 | # log 39 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } 40 | 41 | # parse 42 | clap = { version = "4", features = ['derive'] } 43 | serde_json = "1" 44 | toml = "0.8" 45 | -------------------------------------------------------------------------------- /monolake/src/config/extractor.rs: -------------------------------------------------------------------------------- 1 | use certain_map::Param; 2 | #[cfg(feature = "openid")] 3 | use monolake_services::http::handlers::openid::OpenIdConfig; 4 | use monolake_services::{ 5 | http::{ 6 | handlers::{route::RouteConfig as HttpRouteConfig, upstream::HttpUpstreamTimeout}, 7 | HttpServerTimeout, HttpVersion, 8 | }, 9 | thrift::{ttheader::ThriftServerTimeout, RouteConfig as ThriftRouteConfig}, 10 | }; 11 | 12 | use super::ServerConfig; 13 | 14 | impl Param for ServerConfig { 15 | #[inline] 16 | fn param(&self) -> HttpServerTimeout { 17 | match &self.protocol { 18 | super::ServerProtocolConfig::Http { server_timeout, .. } => *server_timeout, 19 | super::ServerProtocolConfig::Thrift { .. } => { 20 | panic!("extract http server timeout from thrift config") 21 | } 22 | } 23 | } 24 | } 25 | 26 | impl Param for ServerConfig { 27 | #[inline] 28 | fn param(&self) -> HttpUpstreamTimeout { 29 | match &self.protocol { 30 | super::ServerProtocolConfig::Http { 31 | upstream_timeout, .. 32 | } => *upstream_timeout, 33 | super::ServerProtocolConfig::Thrift { .. } => { 34 | panic!("extract http upstream timeout from thrift config") 35 | } 36 | } 37 | } 38 | } 39 | 40 | impl Param for ServerConfig { 41 | #[inline] 42 | fn param(&self) -> ThriftServerTimeout { 43 | match &self.protocol { 44 | super::ServerProtocolConfig::Thrift { server_timeout, .. } => *server_timeout, 45 | super::ServerProtocolConfig::Http { .. } => { 46 | panic!("extract thrift server timeout from http config") 47 | } 48 | } 49 | } 50 | } 51 | 52 | #[cfg(feature = "openid")] 53 | impl Param> for ServerConfig { 54 | fn param(&self) -> Option { 55 | self.auth_config.clone().map(|cfg| cfg.0) 56 | } 57 | } 58 | 59 | impl Param> for ServerConfig { 60 | #[inline] 61 | fn param(&self) -> Vec { 62 | match &self.protocol { 63 | super::ServerProtocolConfig::Http { routes, .. } => routes.clone(), 64 | super::ServerProtocolConfig::Thrift { .. } => { 65 | panic!("extract http routes from thrift config") 66 | } 67 | } 68 | } 69 | } 70 | 71 | impl Param for ServerConfig { 72 | #[inline] 73 | fn param(&self) -> ThriftRouteConfig { 74 | match &self.protocol { 75 | super::ServerProtocolConfig::Thrift { route, .. } => route.clone(), 76 | super::ServerProtocolConfig::Http { .. } => { 77 | panic!("extract thrift routes from http config") 78 | } 79 | } 80 | } 81 | } 82 | 83 | #[cfg(feature = "tls")] 84 | impl Param for ServerConfig { 85 | fn param(&self) -> monolake_services::tls::TlsConfig { 86 | self.tls.clone() 87 | } 88 | } 89 | 90 | impl Param for ServerConfig { 91 | #[inline] 92 | fn param(&self) -> HttpVersion { 93 | match &self.protocol { 94 | super::ServerProtocolConfig::Http { 95 | upstream_http_version, 96 | .. 97 | } => *upstream_http_version, 98 | super::ServerProtocolConfig::Thrift { .. } => { 99 | panic!("extract http version from thrift config") 100 | } 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /monolake/src/config/manager.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::RefCell, 3 | collections::{HashMap, HashSet}, 4 | path::{Path, PathBuf}, 5 | sync::Arc, 6 | time::Duration, 7 | }; 8 | 9 | use monoio::spawn; 10 | use monolake_core::{ 11 | config::ServiceConfig, 12 | orchestrator::{ServiceCommand, WorkerManager}, 13 | }; 14 | use service_async::AsyncMakeService; 15 | 16 | use crate::config::{Config, ListenerConfig, ServerConfig}; 17 | 18 | type ServiceConfigMap = HashMap>; 19 | 20 | pub struct StaticFileConfigManager 21 | where 22 | FP: Fn(ServerConfig) -> F, 23 | LFP: Fn(ListenerConfig) -> LF, 24 | { 25 | online_config_content: RefCell>, 26 | online_services: RefCell, 27 | worker_manager: WorkerManager, 28 | listener_factory_provider: LFP, 29 | server_factory_provider: FP, 30 | } 31 | 32 | impl StaticFileConfigManager 33 | where 34 | F: Send + Clone + 'static, 35 | LF: Send + Clone + 'static, 36 | FP: 'static, 37 | LFP: 'static, 38 | F: AsyncMakeService, 39 | FP: Fn(ServerConfig) -> F, 40 | LFP: Fn(ListenerConfig) -> LF, 41 | { 42 | pub fn new( 43 | worker_manager: WorkerManager, 44 | listener_factory_provider: LFP, 45 | server_factory_provider: FP, 46 | ) -> Self { 47 | Self { 48 | online_config_content: Default::default(), 49 | online_services: Default::default(), 50 | worker_manager, 51 | listener_factory_provider, 52 | server_factory_provider, 53 | } 54 | } 55 | 56 | pub async fn load_and_watch(mut self, path: impl AsRef) -> anyhow::Result<()> { 57 | self.reload_file(&path).await?; 58 | self.watch(path.as_ref().to_path_buf()).await; 59 | Ok(()) 60 | } 61 | 62 | async fn reload_file(&mut self, path: impl AsRef) -> anyhow::Result<()> { 63 | let latest_content = monolake_core::util::file_read(path).await?; 64 | if self.online_config_content.borrow().eq(&latest_content) { 65 | return Ok(()); 66 | } 67 | 68 | tracing::info!("config change detected, reloading"); 69 | let new_services = Config::parse_service_config(&latest_content)?; 70 | self.reload_services(&new_services).await?; 71 | 72 | tracing::info!("config reload success"); 73 | self.online_config_content.replace(latest_content); 74 | self.online_services.replace(new_services); 75 | Ok(()) 76 | } 77 | 78 | async fn reload_services(&mut self, new_services: &ServiceConfigMap) -> anyhow::Result<()> { 79 | let patches = Self::diff(&self.online_services.borrow(), new_services); 80 | match self.prepare(&patches).await { 81 | Ok(_) => { 82 | self.commit(&patches) 83 | .await 84 | .expect("config reload failed at commit stage"); 85 | Ok(()) 86 | } 87 | Err(e) => { 88 | tracing::error!("config reload failed at prepare stage: {}, aborting", e); 89 | self.abort(&patches) 90 | .await 91 | .expect("abort config reload failed"); 92 | Err(e) 93 | } 94 | } 95 | } 96 | 97 | fn diff(old_services: &ServiceConfigMap, new_services: &ServiceConfigMap) -> Vec { 98 | let mut patches = Vec::new(); 99 | 100 | let old_keys = old_services.keys().collect::>(); 101 | let new_keys = new_services.keys().collect::>(); 102 | let all_keys = old_keys.union(&new_keys).collect::>(); 103 | for key in all_keys { 104 | let patch = match (old_keys.contains(key), new_keys.contains(key)) { 105 | (true, true) => { 106 | // TODO: Skip keys whose configuration didn't change 107 | let new_config = new_services.get(*key).unwrap(); 108 | Patch::Update { 109 | key: key.to_string(), 110 | server_config: new_config.server.clone(), 111 | } 112 | } 113 | (true, false) => Patch::Delete { 114 | key: key.to_string(), 115 | }, 116 | (false, true) => { 117 | let new_config = new_services.get(*key).unwrap(); 118 | Patch::Insert { 119 | key: key.to_string(), 120 | listener_config: new_config.listener.clone(), 121 | server_config: new_config.server.clone(), 122 | } 123 | } 124 | (false, false) => { 125 | panic!("unexpected error: illegal key {}", key); 126 | } 127 | }; 128 | patches.push(patch); 129 | } 130 | patches 131 | } 132 | 133 | async fn prepare(&mut self, patches: &[Patch]) -> anyhow::Result<()> { 134 | for patch in patches { 135 | match patch { 136 | Patch::Insert { 137 | key, server_config, .. 138 | } 139 | | Patch::Update { 140 | key, server_config, .. 141 | } => { 142 | self.worker_manager 143 | .dispatch_service_command(ServiceCommand::Precommit( 144 | Arc::new(key.to_string()), 145 | (self.server_factory_provider)(server_config.clone()), 146 | )) 147 | .await 148 | .err()?; 149 | } 150 | Patch::Delete { .. } => { 151 | // nothing to do at prepare stage 152 | } 153 | } 154 | } 155 | Ok(()) 156 | } 157 | 158 | async fn commit(&mut self, patches: &[Patch]) -> anyhow::Result<()> { 159 | for patch in patches { 160 | match patch { 161 | Patch::Insert { 162 | key, 163 | listener_config, 164 | .. 165 | } => { 166 | self.worker_manager 167 | .dispatch_service_command(ServiceCommand::Commit( 168 | Arc::new(key.to_string()), 169 | (self.listener_factory_provider)(listener_config.clone()), 170 | )) 171 | .await 172 | .err()?; 173 | } 174 | Patch::Update { key, .. } => { 175 | self.worker_manager 176 | .dispatch_service_command(ServiceCommand::Update(Arc::new(key.to_string()))) 177 | .await 178 | .err()?; 179 | } 180 | Patch::Delete { key } => { 181 | self.worker_manager 182 | .dispatch_service_command(ServiceCommand::Remove(Arc::new(key.to_string()))) 183 | .await 184 | .err()?; 185 | } 186 | } 187 | } 188 | Ok(()) 189 | } 190 | 191 | async fn abort(&mut self, patches: &[Patch]) -> anyhow::Result<()> { 192 | for patch in patches { 193 | match patch { 194 | Patch::Insert { key, .. } | Patch::Update { key, .. } => { 195 | self.worker_manager 196 | .dispatch_service_command(ServiceCommand::Abort(Arc::new(key.to_string()))) 197 | .await; // discard errors due to partial pre-commits 198 | } 199 | Patch::Delete { .. } => { 200 | // nothing to do at abort stage 201 | } 202 | } 203 | } 204 | Ok(()) 205 | } 206 | 207 | async fn watch(mut self, path: PathBuf) { 208 | spawn(async move { 209 | loop { 210 | if let Err(e) = self.reload_file(&path).await { 211 | tracing::error!("reload config failed: {}", e); 212 | } 213 | monoio::time::sleep(Duration::from_secs(1)).await; 214 | } 215 | }) 216 | .await; 217 | } 218 | } 219 | 220 | enum Patch { 221 | Insert { 222 | key: String, 223 | listener_config: ListenerConfig, 224 | server_config: ServerConfig, 225 | }, 226 | Update { 227 | key: String, 228 | server_config: ServerConfig, // ListenerConfig dynamic update not supported yet 229 | }, 230 | Delete { 231 | key: String, 232 | }, 233 | } 234 | -------------------------------------------------------------------------------- /monolake/src/context.rs: -------------------------------------------------------------------------------- 1 | use monolake_core::context::{PeerAddr, RemoteAddr}; 2 | 3 | // This struct should be a app-defined struct. 4 | // Framework should not bind it. 5 | certain_map::certain_map! { 6 | #[derive(Clone)] 7 | #[full(FullContext)] 8 | pub struct Context { 9 | // Set by ContextService 10 | peer_addr: PeerAddr, 11 | // Set by ProxyProtocolService 12 | remote_addr: Option, 13 | } 14 | } 15 | 16 | #[cfg(test)] 17 | mod test { 18 | use std::net::SocketAddr; 19 | 20 | use certain_map::ParamSet; 21 | use monolake_core::listener::AcceptedAddr; 22 | use service_async::ParamRef; 23 | 24 | use super::*; 25 | 26 | #[test] 27 | pub fn test_add_entries_to_context() { 28 | let mut ctx = Context::new(); 29 | let handler = ctx.handler(); 30 | let addr: SocketAddr = "127.0.0.1:8080".parse().unwrap(); 31 | let peer_addr = PeerAddr::from(AcceptedAddr::from(addr)); 32 | let handler = handler.param_set(peer_addr); 33 | match ParamRef::::param_ref(&handler).0 { 34 | AcceptedAddr::Tcp(socket_addr) => assert_eq!(addr, socket_addr), 35 | _ => unreachable!(), 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /monolake/src/factory.rs: -------------------------------------------------------------------------------- 1 | //! Preconstructed factories. 2 | use std::fmt::Debug; 3 | 4 | use certain_map::Param; 5 | use monoio::net::TcpStream; 6 | use monolake_core::listener::{AcceptedAddr, AcceptedStream}; 7 | #[cfg(feature = "openid")] 8 | use monolake_services::http::handlers::OpenIdHandler; 9 | #[cfg(feature = "proxy-protocol")] 10 | use monolake_services::proxy_protocol::ProxyProtocolServiceFactory; 11 | use monolake_services::{ 12 | common::ContextService, 13 | http::{ 14 | core::HttpCoreService, 15 | detect::H2Detect, 16 | handlers::{ 17 | upstream::HttpUpstreamTimeout, ConnectionReuseHandler, ContentHandler, 18 | RewriteAndRouteHandler, UpstreamHandler, 19 | }, 20 | HttpVersion, 21 | }, 22 | tcp::Accept, 23 | thrift::{handlers::ProxyHandler as TProxyHandler, ttheader::TtheaderCoreService}, 24 | }; 25 | use service_async::{stack::FactoryStack, ArcMakeService, Service}; 26 | 27 | use crate::{ 28 | config::ServerConfig, 29 | context::{Context, FullContext}, 30 | }; 31 | 32 | /// Create a new factory for l7 proxy. 33 | // Here we use a fixed generic type `Accept` 34 | // for simplification and make return impl work. 35 | #[allow(dead_code)] 36 | pub fn l7_factory( 37 | config: ServerConfig, 38 | ) -> ArcMakeService< 39 | impl Service, Error = impl Debug>, 40 | impl Debug, 41 | > { 42 | match &config.protocol { 43 | crate::config::ServerProtocolConfig::Http { opt_handlers, .. } => { 44 | let version: HttpVersion = config.param(); 45 | let http_upstream_timeout: HttpUpstreamTimeout = config.param(); 46 | let enable_content_handler = opt_handlers.content_handler; 47 | let stacks = FactoryStack::new(config.clone()) 48 | .replace(UpstreamHandler::factory(http_upstream_timeout, version)) 49 | .push(ContentHandler::opt_layer(enable_content_handler)) 50 | .push(RewriteAndRouteHandler::layer()); 51 | 52 | #[cfg(feature = "openid")] 53 | let stacks = stacks.push(OpenIdHandler::layer()); 54 | 55 | let stacks = stacks 56 | .push(ConnectionReuseHandler::layer()) 57 | .push(HttpCoreService::layer()) 58 | .push(H2Detect::layer()); 59 | 60 | #[cfg(feature = "tls")] 61 | let stacks = stacks.push(monolake_services::tls::UnifiedTlsFactory::layer()); 62 | 63 | #[cfg(feature = "proxy-protocol")] 64 | let stacks = stacks.push(ProxyProtocolServiceFactory::layer()); 65 | 66 | stacks 67 | .check_make_svc::<(TcpStream, FullContext)>() 68 | .push(ContextService::::layer()) 69 | .check_make_svc::<(TcpStream, AcceptedAddr)>() 70 | .into_boxed_service() 71 | .into_arc_factory() 72 | .into_inner() 73 | } 74 | crate::config::ServerProtocolConfig::Thrift { .. } => { 75 | let proxy_config = config.param(); 76 | let stacks = FactoryStack::new(config) 77 | .replace(TProxyHandler::factory(proxy_config)) 78 | .push(TtheaderCoreService::layer()); 79 | 80 | #[cfg(feature = "tls")] 81 | let stacks = stacks.push(monolake_services::tls::UnifiedTlsFactory::layer()); 82 | 83 | stacks 84 | .check_make_svc::<(TcpStream, FullContext)>() 85 | .push(ContextService::::layer()) 86 | .check_make_svc::<(TcpStream, AcceptedAddr)>() 87 | .into_boxed_service() 88 | .into_arc_factory() 89 | .into_inner() 90 | } 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /monolake/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::{path::Path, sync::Arc}; 2 | 3 | use anyhow::Result; 4 | use clap::Parser; 5 | use monolake_core::{ 6 | config::{RuntimeConfig, RuntimeType}, 7 | listener::ListenerBuilder, 8 | orchestrator::WorkerManager, 9 | }; 10 | use service_async::AsyncMakeServiceWrapper; 11 | use tracing_subscriber::{filter::LevelFilter, fmt, prelude::*, EnvFilter}; 12 | 13 | use crate::{ 14 | config::{manager::StaticFileConfigManager, Config}, 15 | factory::l7_factory, 16 | util::print_logo, 17 | }; 18 | 19 | mod config; 20 | mod context; 21 | mod factory; 22 | mod util; 23 | 24 | #[derive(Parser, Debug)] 25 | #[clap(author, version, about, long_about = None)] 26 | struct Args { 27 | /// Path of the config file 28 | #[clap(short, long, value_parser)] 29 | config: String, 30 | } 31 | 32 | fn main() -> Result<()> { 33 | tracing_subscriber::registry() 34 | .with(fmt::layer()) 35 | .with( 36 | EnvFilter::builder() 37 | .with_default_directive(LevelFilter::INFO.into()) 38 | .from_env_lossy(), 39 | ) 40 | .init(); 41 | #[cfg(feature = "tls")] 42 | monoio_native_tls::init(); 43 | print_logo(); 44 | 45 | let args = Args::parse(); 46 | let mut runtime_config = Config::load_runtime_config(&args.config)?; 47 | #[cfg(target_os = "linux")] 48 | if matches!(runtime_config.runtime_type, RuntimeType::IoUring) && !monoio::utils::detect_uring() 49 | { 50 | runtime_config.runtime_type = RuntimeType::Legacy; 51 | } 52 | match runtime_config.runtime_type { 53 | #[cfg(target_os = "linux")] 54 | monolake_core::config::RuntimeType::IoUring => { 55 | monoio::RuntimeBuilder::::new() 56 | .enable_timer() 57 | .build() 58 | .expect("Failed building the Runtime with IoUringDriver") 59 | .block_on(run(runtime_config, &args.config)); 60 | } 61 | monolake_core::config::RuntimeType::Legacy => { 62 | monoio::RuntimeBuilder::::new() 63 | .enable_timer() 64 | // Since we read file, we need a thread pool to avoid blocking the runtime 65 | .attach_thread_pool(Box::new(monoio::blocking::DefaultThreadPool::new(4))) 66 | .build() 67 | .expect("Failed building the Runtime with LegacyDriver") 68 | .block_on(run(runtime_config, &args.config)); 69 | } 70 | } 71 | Ok(()) 72 | } 73 | 74 | async fn run(runtime_config: RuntimeConfig, service_config_path: impl AsRef) { 75 | // Start workers 76 | let mut manager = WorkerManager::new(runtime_config); 77 | let join_handlers = manager.spawn_workers_async(); 78 | tracing::info!( 79 | "Start monolake with {:?} runtime, {} worker(s), {} entries and sqpoll {:?}.", 80 | manager.config().runtime_type, 81 | join_handlers.len(), 82 | manager.config().entries, 83 | manager.config().sqpoll_idle 84 | ); 85 | 86 | // Create config manager 87 | let config_manager = StaticFileConfigManager::new( 88 | manager, 89 | |config| { 90 | AsyncMakeServiceWrapper(Arc::new( 91 | ListenerBuilder::try_from(config).expect("build listener failed"), 92 | )) 93 | }, 94 | |config| AsyncMakeServiceWrapper(l7_factory(config)), 95 | ); 96 | config_manager 97 | .load_and_watch(&service_config_path) 98 | .await 99 | .expect("apply init config failed"); 100 | tracing::info!("init config broadcast successfully"); 101 | 102 | // Wait for workers 103 | for (_, mut close) in join_handlers.into_iter() { 104 | close.cancellation().await; 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /monolake/src/util.rs: -------------------------------------------------------------------------------- 1 | const MONOLAKE_FIG: &str = r" 2 | __ __ _ _ 3 | | \/ | ___ _ __ ___ | | __ _ | | __ ___ 4 | | |\/| | / _ \ | '_ \ / _ \ | | / _` | | |/ / / _ \ 5 | | | | | | (_) | | | | | | (_) | | | | (_| | | < | __/ 6 | |_| |_| \___/ |_| |_| \___/ |_| \__,_| |_|\_\ \___| 7 | "; 8 | 9 | #[inline] 10 | pub fn print_logo() { 11 | println!("{MONOLAKE_FIG}"); 12 | } 13 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly" 3 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | comment_width = 100 2 | edition = "2021" 3 | format_code_in_doc_comments = true 4 | format_strings = true 5 | group_imports = "StdExternalCrate" 6 | imports_granularity = "Crate" 7 | normalize_comments = true 8 | normalize_doc_attributes = true 9 | wrap_comments = true 10 | --------------------------------------------------------------------------------