├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── pull_request_template.md └── workflows │ └── ci.yml ├── .gitignore ├── .mergify.yml ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── examples ├── example-axum │ ├── Cargo.toml │ └── src │ │ └── main.rs └── example-tonic │ ├── Cargo.toml │ ├── build.rs │ ├── proto │ └── helloworld.proto │ └── src │ └── main.rs ├── src ├── config.rs ├── error_handling.rs ├── health.rs ├── lib.rs ├── middleware.rs ├── middleware │ ├── metrics.rs │ ├── trace.rs │ └── trace │ │ ├── classify.rs │ │ └── opentelemetry.rs ├── pprof.rs ├── request_id.rs └── server.rs └── tests └── integration_tests ├── Cargo.toml ├── build.rs ├── proto └── helloworld.proto └── src ├── lib.rs └── timeout.rs /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Documentation for this file can be found on the GitHub website here: 2 | # https://docs.github.com/en/free-pro-team@latest/github/creating-cloning-and-archiving-repositories/about-code-owners 3 | 4 | * @davidpdrsn 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Device:** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ### Checklist 2 | 3 | * [ ] I have read the [Contributor Guide](../../CONTRIBUTING.md) 4 | * [ ] I have read and agree to the [Code of Conduct](../../CODE_OF_CONDUCT.md) 5 | * [ ] I have added a description of my changes and why I'd like them included in the section below 6 | 7 | ### Description of Changes 8 | 9 | Describe your changes here 10 | 11 | ### Related Issues 12 | 13 | List related issues here 14 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - main 5 | tags: 6 | - "*" 7 | pull_request: {} 8 | 9 | name: CI 10 | jobs: 11 | lint: 12 | name: Lint 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | - uses: actions-rs/toolchain@v1 17 | with: 18 | toolchain: stable 19 | override: true 20 | 21 | # make sure all code has been formatted with rustfmt 22 | - name: Install Protoc 23 | uses: arduino/setup-protoc@v1 24 | with: 25 | repo-token: ${{ secrets.GITHUB_TOKEN }} 26 | - name: check rustfmt 27 | run: | 28 | rustup component add rustfmt 29 | cargo fmt -- --check --color always 30 | 31 | # run clippy to verify we have no warnings 32 | - run: cargo fetch 33 | - name: cargo clippy 34 | run: | 35 | rustup component add clippy 36 | cargo clippy --all-targets --all-features --workspace -- -D warnings 37 | 38 | test: 39 | name: Test 40 | strategy: 41 | matrix: 42 | os: [ubuntu-latest] 43 | runs-on: ${{ matrix.os }} 44 | steps: 45 | - uses: actions/checkout@v2 46 | - uses: actions-rs/toolchain@v1 47 | with: 48 | toolchain: stable 49 | override: true 50 | - run: cargo fetch 51 | - name: Install Protoc 52 | uses: arduino/setup-protoc@v1 53 | with: 54 | repo-token: ${{ secrets.GITHUB_TOKEN }} 55 | - name: cargo test build 56 | run: cargo build --tests --workspace 57 | - name: cargo test 58 | run: cargo test --workspace 59 | 60 | check-docs: 61 | runs-on: ubuntu-latest 62 | steps: 63 | - uses: actions/checkout@v2 64 | - uses: actions-rs/toolchain@v1 65 | with: 66 | toolchain: stable 67 | override: true 68 | 69 | - name: Install Protoc 70 | uses: arduino/setup-protoc@v1 71 | with: 72 | repo-token: ${{ secrets.GITHUB_TOKEN }} 73 | - name: cargo doc 74 | working-directory: ${{ matrix.subcrate }} 75 | env: 76 | RUSTDOCFLAGS: "-D rustdoc::broken_intra_doc_links" 77 | run: cargo doc --all-features --no-deps --workspace 78 | 79 | cargo-hack: 80 | runs-on: ubuntu-latest 81 | steps: 82 | - uses: actions/checkout@v2 83 | - uses: actions-rs/toolchain@v1 84 | with: 85 | toolchain: stable 86 | override: true 87 | - name: Install cargo-hack 88 | run: | 89 | curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin 90 | - name: Install Protoc 91 | uses: arduino/setup-protoc@v1 92 | with: 93 | repo-token: ${{ secrets.GITHUB_TOKEN }} 94 | - name: cargo hack check 95 | working-directory: ${{ matrix.subcrate }} 96 | env: 97 | RUSTFLAGS: "-D unused_imports -D dead_code -D unused_variables" 98 | run: cargo hack check --each-feature --no-dev-deps --workspace 99 | 100 | # This doesn't work with `[patch.crates-io]`. Can enable this when we've published 101 | # new versions of axum and tower-http 102 | # publish-check: 103 | # name: Publish Check 104 | # runs-on: ubuntu-latest 105 | # steps: 106 | # - uses: actions/checkout@v2 107 | # - uses: actions-rs/toolchain@v1 108 | # with: 109 | # toolchain: stable 110 | # override: true 111 | # - run: cargo fetch 112 | # - name: cargo publish check 113 | # run: cargo publish --dry-run 114 | 115 | # # TODO: You must add a crates.io API token to your GH secrets and name it CRATES_IO_TOKEN 116 | # publish: 117 | # name: Publish 118 | # needs: [test, publish-check] 119 | # runs-on: ubuntu-latest 120 | # if: startsWith(github.ref, 'refs/tags/') 121 | # steps: 122 | # - uses: actions/checkout@v1 123 | # - uses: actions-rs/toolchain@v1 124 | # with: 125 | # toolchain: stable 126 | # override: true 127 | # - name: cargo fetch 128 | # uses: actions-rs/cargo@v1 129 | # with: 130 | # command: fetch 131 | # - name: cargo publish 132 | # uses: actions-rs/cargo@v1 133 | # env: 134 | # CARGO_REGISTRY_TOKEN: ${{ secrets.CRATES_IO_TOKEN }} 135 | # with: 136 | # command: publish 137 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | .idea 5 | -------------------------------------------------------------------------------- /.mergify.yml: -------------------------------------------------------------------------------- 1 | pull_request_rules: 2 | - name: automatic merge when CI passes and 1 reviews 3 | conditions: 4 | - "#approved-reviews-by>=1" 5 | - "#review-requested=0" 6 | - "#changes-requested-reviews-by=0" 7 | - base=main 8 | actions: 9 | merge: 10 | method: squash 11 | - name: delete head branch after merge 12 | conditions: 13 | - merged 14 | actions: 15 | delete_head_branch: {} 16 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [Unreleased] 8 | 9 | ## [0.1.1] - 2019-09-03 10 | ### Added 11 | - New features go here in a bullet list 12 | 13 | ### Changed 14 | - Changes to existing functionality go here in a bullet list 15 | 16 | ### Deprecated 17 | - Mark features soon-to-be removed in a bullet list 18 | 19 | ### Removed 20 | - Features that have been removed in a bullet list 21 | 22 | ### Fixed 23 | - Bug fixes in a bullet list 24 | 25 | ### Security 26 | - Changes/fixes related to security vulnerabilities in a bullet list 27 | 28 | ## [0.1.0] - 2019-09-02 29 | ### Added 30 | - Initial add of the thing 31 | 32 | [Unreleased]: https://github.com/EmbarkStudios/server-framework/compare/0.1.1...HEAD 33 | [0.1.1]: https://github.com/EmbarkStudios/server-framework/compare/0.1.0...0.1.1 34 | [0.1.0]: https://github.com/EmbarkStudios/server-framework/releases/tag/0.1.0 35 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at opensource@embark-studios.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Embark Contributor Guidelines 2 | 3 | Welcome! This project is created by the team at [Embark Studios](https://embark.games). We're glad you're interested in contributing! We welcome contributions from people of all backgrounds who are interested in making great software with us. 4 | 5 | At Embark, we aspire to empower everyone to create interactive experiences. To do this, we're exploring and pushing the boundaries of new technologies, and sharing our learnings with the open source community. 6 | 7 | If you have ideas for collaboration, email us at opensource@embark-studios.com. 8 | 9 | We're also hiring full-time engineers to work with us in Stockholm! Check out our current job postings [here](https://www.embark-studios.com/jobs). 10 | 11 | ## Issues 12 | 13 | ### Feature Requests 14 | 15 | If you have ideas or how to improve our projects, you can suggest features by opening a GitHub issue. Make sure to include details about the feature or change, and describe any uses cases it would enable. 16 | 17 | Feature requests will be tagged as `enhancement` and their status will be updated in the comments of the issue. 18 | 19 | ### Bugs 20 | 21 | When reporting a bug or unexpected behaviour in a project, make sure your issue describes steps to reproduce the behaviour, including the platform you were using, what steps you took, and any error messages. 22 | 23 | Reproducible bugs will be tagged as `bug` and their status will be updated in the comments of the issue. 24 | 25 | ### Wontfix 26 | 27 | Issues will be closed and tagged as `wontfix` if we decide that we do not wish to implement it, usually due to being misaligned with the project vision or out of scope. We will comment on the issue with more detailed reasoning. 28 | 29 | ## Contribution Workflow 30 | 31 | ### Open Issues 32 | 33 | If you're ready to contribute, start by looking at our open issues tagged as [`help wanted`](../../issues?q=is%3Aopen+is%3Aissue+label%3A"help+wanted") or [`good first issue`](../../issues?q=is%3Aopen+is%3Aissue+label%3A"good+first+issue"). 34 | 35 | You can comment on the issue to let others know you're interested in working on it or to ask questions. 36 | 37 | ### Making Changes 38 | 39 | 1. Fork the repository. 40 | 41 | 2. Create a new feature branch. 42 | 43 | 3. Make your changes. Ensure that there are no build errors by running the project with your changes locally. 44 | 45 | 4. Open a pull request with a name and description of what you did. You can read more about working with pull requests on GitHub [here](https://help.github.com/en/articles/creating-a-pull-request-from-a-fork). 46 | 47 | 5. A maintainer will review your pull request and may ask you to make changes. 48 | 49 | ## Code Guidelines 50 | 51 | ### Rust 52 | 53 | You can read about our standards and recommendations for working with Rust [here](https://github.com/EmbarkStudios/rust-ecosystem/blob/main/guidelines.md). 54 | 55 | ### Python 56 | 57 | We recommend following [PEP8 conventions](https://www.python.org/dev/peps/pep-0008/) when working with Python modules. 58 | 59 | ### JavaScript & TypeScript 60 | 61 | We use [Prettier](https://prettier.io/) with the default settings to auto-format our JavaScript and TypeScript code. 62 | 63 | ## Licensing 64 | 65 | Unless otherwise specified, all Embark open source projects shall comply with the Rust standard licensing model (MIT + Apache 2.0) and are thereby licensed under a dual license, allowing licensees to choose either MIT OR Apache-2.0 at their option. 66 | 67 | ## Contributor Terms 68 | 69 | Thank you for your interest in Embark Studios’ open source project. By providing a contribution (new or modified code, other input, feedback or suggestions etc.) you agree to these Contributor Terms. 70 | 71 | You confirm that each of your contributions has been created by you and that you are the copyright owner. You also confirm that you have the right to provide the contribution to us and that you do it under the Rust dual licence model (MIT + Apache 2.0). 72 | 73 | If you want to contribute something that is not your original creation, you may submit it to Embark Studios separately from any contribution, including details of its source and of any license or other restriction (such as related patents, trademarks, agreements etc.) 74 | 75 | Please also note that our projects are released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md) to ensure that they are welcoming places for everyone to contribute. By participating in any Embark Studios open source project, you agree to keep to the Contributor Code of Conduct. 76 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server-framework" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["Embark "] 6 | license = "MIT OR Apache-2.0" 7 | readme = "README.md" 8 | documentation = "https://docs.rs/server-framework" 9 | homepage = "https://github.com/EmbarkStudios/server-framework" 10 | repository = "https://github.com/EmbarkStudios/server-framework" 11 | description = "Opinionated framework for running network servers" 12 | categories = ["asynchronous", "network-programming", "web-programming"] 13 | keywords = ["http", "web", "framework", "service"] 14 | 15 | [features] 16 | default = ["tonic"] 17 | 18 | [dependencies] 19 | anyhow = "1.0" 20 | axum = "0.6" 21 | axum-extra = "0.7.4" 22 | futures-util = { version = "0.3", default-features = false, features = [ 23 | "alloc", 24 | ] } 25 | http = "0.2" 26 | http-body = "0.4" 27 | hyper = { version = "0.14", features = ["full"] } 28 | metrics = "0.21.0" 29 | metrics-exporter-prometheus = { version = "0.12.1", default-features = false } 30 | opentelemetry = { version = "0.19" } 31 | opentelemetry-http = { version = "0.8" } 32 | parking_lot = "0.12" 33 | pin-project-lite = "0.2" 34 | serde = { version = "1.0", features = ["derive"] } 35 | tokio = { version = "1.14", features = ["rt-multi-thread", "signal", "macros"] } 36 | tower = { version = "0.4.13", features = ["util", "timeout"] } 37 | tracing = "0.1" 38 | tracing-opentelemetry = "0.19" 39 | uuid = { version = "1.0", features = ["v4"] } 40 | 41 | # optional dependencies 42 | tonic = { optional = true, version = "0.9.1", default_features = false, features = [ 43 | "transport", 44 | "codegen", 45 | ] } 46 | 47 | # pprof doesn't work on windows or android, see https://github.com/tikv/pprof-rs/issues/9 48 | [target.'cfg(all(not(target_os = "windows"), not(target_os = "android")))'.dependencies] 49 | pprof = { version = "0.11", features = ["prost-codec"] } 50 | 51 | [dependencies.tower-http] 52 | version = "0.4" 53 | features = [ 54 | "add-extension", 55 | "compression-gzip", 56 | "map-request-body", 57 | "map-response-body", 58 | "request-id", 59 | "sensitive-headers", 60 | "trace", 61 | "util", 62 | ] 63 | 64 | [dev-dependencies] 65 | assert-json-diff = "2.0" 66 | serde_json = "1.0" 67 | tracing-subscriber = { version = "0.3.15", features = ["json", "env-filter"] } 68 | 69 | [workspace] 70 | members = ["examples/*", "tests/*"] 71 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 Embark Studios 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 |
8 | 9 | # `🗼 server-framework` 10 | 11 | **Opinionated framework for running network servers** 12 | 13 | [![Embark](https://img.shields.io/badge/embark-open%20source-blueviolet.svg)](https://embark.dev) 14 | [![Embark](https://img.shields.io/badge/discord-ark-%237289da.svg?logo=discord)](https://discord.gg/dAuKfZS) 15 | [![Crates.io](https://img.shields.io/crates/v/rust-gpu.svg)](https://crates.io/crates/rust-gpu) 16 | [![Docs](https://docs.rs/rust-gpu/badge.svg)](https://docs.rs/rust-gpu) 17 | [![dependency status](https://deps.rs/repo/github/EmbarkStudios/rust-gpu/status.svg)](https://deps.rs/repo/github/EmbarkStudios/rust-gpu) 18 | [![Build status](https://github.com/EmbarkStudios/physx-rs/workflows/CI/badge.svg)](https://github.com/EmbarkStudios/physx-rs/actions) 19 |
20 | 21 | ## 🚨 Work in progress 🚨 22 | 23 | This project is still very much work in progress. Do not use this yet! The name is also gonna change before the actual release. 24 | 25 | ## Contribution 26 | 27 | [![Contributor Covenant](https://img.shields.io/badge/contributor%20covenant-v1.4-ff69b4.svg)](../main/CODE_OF_CONDUCT.md) 28 | 29 | We welcome community contributions to this project. 30 | 31 | Please read our [Contributor Guide](CONTRIBUTING.md) for more information on how to get started. 32 | Please also read our [Contributor Terms](CONTRIBUTING.md#contributor-terms) before you make any contributions. 33 | 34 | Any contribution intentionally submitted for inclusion in an Embark Studios project, shall comply with the Rust standard licensing model (MIT OR Apache 2.0) and therefore be dual licensed as described below, without any additional terms or conditions: 35 | 36 | ### License 37 | 38 | This contribution is dual licensed under EITHER OF 39 | 40 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or ) 41 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or ) 42 | 43 | at your option. 44 | 45 | For clarity, "your" refers to Embark or any other licensee/user of the contribution. 46 | -------------------------------------------------------------------------------- /examples/example-axum/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "example-axum" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | [dependencies] 8 | server-framework = { path = "../../", default_features = false } 9 | tokio = { version = "1.14", features = ["full"] } 10 | tracing = "0.1" 11 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } 12 | -------------------------------------------------------------------------------- /examples/example-axum/src/main.rs: -------------------------------------------------------------------------------- 1 | // Tonic doesn't derive Eq on generated code 2 | #![allow(clippy::derive_partial_eq_without_eq)] 3 | use server_framework::{ 4 | axum::{response::IntoResponse, routing::get, Router}, 5 | Config, Server, 6 | }; 7 | 8 | #[tokio::main] 9 | async fn main() { 10 | init_tracing(); 11 | 12 | let config = Config::default(); 13 | 14 | let routes = Router::new().route("/", get(root)); 15 | 16 | Server::new(config) 17 | .with(routes) 18 | .always_live_and_ready() 19 | .serve() 20 | .await 21 | .expect("server failed to start"); 22 | } 23 | 24 | fn init_tracing() { 25 | if std::env::var_os("RUST_LOG").is_none() { 26 | std::env::set_var("RUST_LOG", "example_axum=debug,server_framework=debug") 27 | } 28 | tracing_subscriber::fmt::init(); 29 | } 30 | 31 | async fn root() -> impl IntoResponse { 32 | "Hello, World!" 33 | } 34 | -------------------------------------------------------------------------------- /examples/example-tonic/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "example-tonic" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | [dependencies] 8 | prost = "0.11" 9 | server-framework = { path = "../../" } 10 | tokio = { version = "1.14", features = ["full"] } 11 | tonic = "0.9.1" 12 | tracing = "0.1" 13 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } 14 | 15 | [build-dependencies] 16 | tonic-build = "0.9.1" 17 | -------------------------------------------------------------------------------- /examples/example-tonic/build.rs: -------------------------------------------------------------------------------- 1 | fn main() -> Result<(), Box> { 2 | tonic_build::compile_protos("proto/helloworld.proto")?; 3 | Ok(()) 4 | } 5 | -------------------------------------------------------------------------------- /examples/example-tonic/proto/helloworld.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package helloworld; 4 | 5 | service Greeter { 6 | rpc SayHello (HelloRequest) returns (HelloReply); 7 | } 8 | 9 | message HelloRequest { 10 | string name = 1; 11 | } 12 | 13 | message HelloReply { 14 | string message = 1; 15 | } 16 | -------------------------------------------------------------------------------- /examples/example-tonic/src/main.rs: -------------------------------------------------------------------------------- 1 | // Tonic doesn't derive Eq on generated code 2 | #![allow(clippy::derive_partial_eq_without_eq)] 3 | use hello_world::greeter_server::{Greeter, GreeterServer}; 4 | use hello_world::{HelloReply, HelloRequest}; 5 | use server_framework::{Config, Server}; 6 | 7 | mod hello_world { 8 | tonic::include_proto!("helloworld"); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | init_tracing(); 14 | 15 | let config = Config::default(); 16 | 17 | let service = GreeterServer::new(MyGreeter); 18 | 19 | Server::new(config) 20 | .with_tonic(service) 21 | .always_live_and_ready() 22 | .serve() 23 | .await 24 | .expect("server failed to start"); 25 | } 26 | 27 | fn init_tracing() { 28 | if std::env::var_os("RUST_LOG").is_none() { 29 | std::env::set_var("RUST_LOG", "example_tonic=debug,server_framework=debug") 30 | } 31 | tracing_subscriber::fmt::init(); 32 | } 33 | 34 | #[derive(Clone)] 35 | pub struct MyGreeter; 36 | 37 | #[tonic::async_trait] 38 | impl Greeter for MyGreeter { 39 | async fn say_hello( 40 | &self, 41 | request: tonic::Request, 42 | ) -> Result, tonic::Status> { 43 | let reply = hello_world::HelloReply { 44 | message: format!("Hello {}!", request.into_inner().name), 45 | }; 46 | 47 | Ok(tonic::Response::new(reply)) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | /// Server configuration. 4 | #[derive(Debug, Clone)] 5 | #[non_exhaustive] 6 | pub struct Config { 7 | /// The socket address the server will bind to. 8 | /// 9 | /// Defaults to `0.0.0.0:8080`. 10 | pub bind_address: SocketAddr, 11 | 12 | /// The port the metrics and health server will bind to. 13 | /// 14 | /// Defaults to `8081`. 15 | pub metrics_health_port: u16, 16 | 17 | /// Whether or not to only accept http2 traffic. 18 | /// 19 | /// Defaults to `false`. 20 | pub http2_only: bool, 21 | 22 | /// The request timeout in seconds. 23 | /// 24 | /// Defaults to 30. 25 | pub timeout_sec: u64, 26 | 27 | /// The rqeuest id headers. 28 | /// 29 | /// Defaults to `x-request-id`. 30 | pub request_id_header: String, 31 | 32 | /// Whether to run a second server that serves health and metrics. 33 | /// 34 | /// Defaults to `true`. 35 | pub serve_health_and_metrics: bool, 36 | 37 | /// Whether to shutdown the server gracefully. 38 | /// 39 | /// Defaults to `true`. 40 | pub graceful_shutdown: bool, 41 | } 42 | 43 | impl Default for Config { 44 | fn default() -> Self { 45 | Self { 46 | bind_address: SocketAddr::from((std::net::Ipv4Addr::UNSPECIFIED, 8080)), 47 | metrics_health_port: 8081, 48 | http2_only: false, 49 | timeout_sec: 30, 50 | request_id_header: "x-request-id".to_owned(), 51 | serve_health_and_metrics: true, 52 | graceful_shutdown: true, 53 | } 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/error_handling.rs: -------------------------------------------------------------------------------- 1 | use axum::{extract::Extension, response::IntoResponse, BoxError}; 2 | use http::{header::CONTENT_TYPE, HeaderMap, Method, StatusCode, Uri}; 3 | use std::future::{ready, Ready}; 4 | use tower_http::request_id::RequestId; 5 | 6 | #[allow(unreachable_pub)] 7 | pub type DefaultErrorHandler = fn( 8 | Method, 9 | Uri, 10 | HeaderMap, 11 | Extension, 12 | Extension, 13 | BoxError, 14 | ) -> Ready; 15 | 16 | #[cfg(feature = "tonic")] 17 | fn timeout_response(is_grpc: bool) -> axum::response::Response { 18 | if is_grpc { 19 | // Deadline exceeded isn't _completely_ accurate here since the timeout isn't 20 | // propagated to downstream services but it's the most accurate status code we can 21 | // provide 22 | let response = tonic::Status::deadline_exceeded("request timed out"); 23 | // Grpc have internal status codes which differ from the HTTP status codes 24 | (StatusCode::OK, response.to_http()).into_response() 25 | } else { 26 | (StatusCode::REQUEST_TIMEOUT, "request timed out").into_response() 27 | } 28 | } 29 | 30 | #[cfg(not(feature = "tonic"))] 31 | fn timeout_response(_is_grpc: bool) -> axum::response::Response { 32 | (StatusCode::REQUEST_TIMEOUT, "request timed out").into_response() 33 | } 34 | 35 | #[cfg(feature = "tonic")] 36 | fn internal_error_response(is_grpc: bool, body: String) -> axum::response::Response { 37 | if is_grpc { 38 | let response = tonic::Status::internal(body); 39 | // Grpc have internal status codes which differ from the HTTP status codes 40 | (StatusCode::OK, response.to_http()).into_response() 41 | } else { 42 | (StatusCode::INTERNAL_SERVER_ERROR, body).into_response() 43 | } 44 | } 45 | 46 | #[cfg(not(feature = "tonic"))] 47 | fn internal_error_response(_is_grpc: bool, body: String) -> axum::response::Response { 48 | (StatusCode::INTERNAL_SERVER_ERROR, body).into_response() 49 | } 50 | 51 | pub(crate) fn default_error_handler( 52 | method: Method, 53 | uri: Uri, 54 | headers: HeaderMap, 55 | Extension(request_id): Extension, 56 | Extension(TimeoutSec(timeout_sec)): Extension, 57 | err: BoxError, 58 | ) -> Ready { 59 | let request_id = request_id 60 | .header_value() 61 | .to_str() 62 | .unwrap_or(""); 63 | 64 | let is_grpc = headers 65 | .get(CONTENT_TYPE) 66 | .map(|content_type| { 67 | content_type 68 | .to_str() 69 | .unwrap_or_default() 70 | .starts_with("application/grpc") 71 | }) 72 | .unwrap_or_default(); 73 | 74 | if err.is::() { 75 | tracing::warn!( 76 | %method, 77 | %uri, 78 | request_id = %request_id, 79 | timeout_sec = %timeout_sec, 80 | "{}", 81 | error_display_chain(&*err) 82 | ); 83 | 84 | ready(timeout_response(is_grpc)) 85 | } else { 86 | tracing::error!( 87 | err = %error_display_chain(&*err), 88 | %method, 89 | %uri, 90 | request_id = %request_id, 91 | "{}", 92 | error_display_chain(&*err) 93 | ); 94 | 95 | let body = format!("Unhandled internal error: {}", err); 96 | ready(internal_error_response(is_grpc, body)) 97 | } 98 | } 99 | 100 | pub(crate) fn error_display_chain(error: &dyn std::error::Error) -> String { 101 | let mut s = error.to_string(); 102 | if let Some(source) = error.source() { 103 | s.push_str(" -> "); 104 | s.push_str(&error_display_chain(source)); 105 | } 106 | s 107 | } 108 | 109 | #[derive(Clone, Copy, Debug)] 110 | #[allow(unreachable_pub)] 111 | pub struct TimeoutSec(pub(crate) u64); 112 | -------------------------------------------------------------------------------- /src/health.rs: -------------------------------------------------------------------------------- 1 | //! Kubernetes compatible healh check 2 | 3 | use axum::async_trait; 4 | use parking_lot::Mutex; 5 | use std::sync::Arc; 6 | 7 | /// Used to setup health checks for Kubernetes. 8 | /// 9 | /// Learn more at 10 | /// 11 | #[async_trait] 12 | pub trait HealthCheck: Send + Sync + 'static { 13 | /// Corresponds to a `livenessProbe` within Kubernetes. 14 | /// 15 | /// Used to determine if the pod is live and working or if it should be killed and restarted. 16 | async fn is_live(&self) -> anyhow::Result<()>; 17 | 18 | /// Corresponds to a `readinessProbe` within Kubernetes. 19 | /// 20 | /// Used to determine if a pod is ready to receive traffic. Contrary to `is_live`, pods that 21 | /// aren't ready wont be restarted. This can be used to temporarily remove a pod from the load 22 | /// balancer while it performs some heavy task without having the pod killed. 23 | async fn is_ready(&self) -> anyhow::Result<()>; 24 | 25 | /// Combine two health checks into one. 26 | /// 27 | /// The resulting health check is live and ready if both inner health checks are. 28 | fn and(self, rhs: T) -> And 29 | where 30 | Self: Sized, 31 | T: HealthCheck, 32 | { 33 | And { lhs: self, rhs } 34 | } 35 | } 36 | 37 | #[async_trait] 38 | impl HealthCheck for Arc { 39 | async fn is_live(&self) -> anyhow::Result<()> { 40 | HealthCheck::is_live(&**self).await 41 | } 42 | 43 | async fn is_ready(&self) -> anyhow::Result<()> { 44 | HealthCheck::is_ready(&**self).await 45 | } 46 | } 47 | 48 | /// Two health checks combined into one. 49 | /// 50 | /// Created with [`HealthCheck::and`]. 51 | /// 52 | /// `And` is live and ready if both inner health checks are. 53 | #[derive(Clone, Copy, Debug)] 54 | pub struct And { 55 | lhs: A, 56 | rhs: B, 57 | } 58 | 59 | #[async_trait] 60 | impl HealthCheck for And 61 | where 62 | A: HealthCheck, 63 | B: HealthCheck, 64 | { 65 | async fn is_live(&self) -> anyhow::Result<()> { 66 | tokio::try_join!(self.lhs.is_live(), self.rhs.is_live())?; 67 | Ok(()) 68 | } 69 | 70 | async fn is_ready(&self) -> anyhow::Result<()> { 71 | tokio::try_join!(self.lhs.is_ready(), self.rhs.is_ready())?; 72 | Ok(()) 73 | } 74 | } 75 | 76 | /// A [`HealthCheck`] that is always live and ready. 77 | /// 78 | /// Used by [`Server::always_live_and_ready`] for servers for which health checks are not 79 | /// necessary. 80 | /// 81 | /// [`Server::always_live_and_ready`]: crate::Server::always_live_and_ready 82 | #[derive(Clone, Copy, Debug, Default)] 83 | #[non_exhaustive] 84 | pub struct AlwaysLiveAndReady; 85 | 86 | #[async_trait] 87 | impl HealthCheck for AlwaysLiveAndReady { 88 | async fn is_live(&self) -> anyhow::Result<()> { 89 | Ok(()) 90 | } 91 | 92 | async fn is_ready(&self) -> anyhow::Result<()> { 93 | Ok(()) 94 | } 95 | } 96 | 97 | /// Sentinel value used by [`Server`] to signal that you have not yet provided a [`HealthCheck`]. 98 | /// 99 | /// [`Server`]: crate::Server 100 | #[derive(Clone, Copy, Debug)] 101 | #[non_exhaustive] 102 | pub struct NoHealthCheckProvided; 103 | 104 | /// A switch that allows you kill and restart a pod. Must be setup as a health check when booting 105 | /// the service. 106 | #[derive(Debug, Clone, Default)] 107 | pub struct KillSwitch { 108 | killed: Arc>>, 109 | } 110 | 111 | impl KillSwitch { 112 | /// Create a new `KillSwitch`. 113 | pub fn new() -> Self { 114 | Self::default() 115 | } 116 | 117 | /// Flick the kill switch and restart the service. 118 | /// 119 | /// This makes [`is_live`](HealthCheck::is_live) return the given error. It does not impact 120 | /// [`is_ready`](HealthCheck::is_ready). 121 | /// 122 | /// `reason` allows you to add some context that will be logged. 123 | pub fn kill(&mut self, reason: impl Into) { 124 | *self.killed.lock() = Some(reason.into()); 125 | } 126 | } 127 | 128 | #[async_trait] 129 | impl HealthCheck for KillSwitch { 130 | async fn is_live(&self) -> anyhow::Result<()> { 131 | if let Some(killed) = self.killed.lock().take() { 132 | Err(killed) 133 | } else { 134 | Ok(()) 135 | } 136 | } 137 | 138 | async fn is_ready(&self) -> anyhow::Result<()> { 139 | Ok(()) 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Opinionated framework for running network servers. 2 | //! 3 | //! This crate builds on top of libraries like [`tower`], [`hyper`], [`axum`], and [`tonic`], and 4 | //! provides an opinionated way to run services built with those libraries. 5 | //! 6 | //! This is how we run all our Rust network services at [Embark Studios]. 7 | //! 8 | //! [Embark Studios]: https://www.embark-studios.com 9 | //! 10 | //! # Example 11 | //! 12 | //! ```rust 13 | //! use server_framework::{Server, Config}; 14 | //! use axum::{Router, routing::get}; 15 | //! 16 | //! // use the default config 17 | //! let config = Config::default(); 18 | //! 19 | //! // build our application with a few routes 20 | //! let routes = Router::new() 21 | //! .route("/", get(|| async { "Hello, World!" })) 22 | //! .route("/foo", get(|| async { "Hi from `GET /foo`" })); 23 | //! 24 | //! # async { 25 | //! // run our server 26 | //! Server::new(config) 27 | //! .with(routes) 28 | //! .always_live_and_ready() 29 | //! .serve() 30 | //! .await 31 | //! .unwrap(); 32 | //! # }; 33 | //! ``` 34 | //! 35 | //! # Middleware 36 | //! 37 | //! At its core `server-framework` is a collection of `tower` middleware that extends your app with 38 | //! Embark's conventions and best practices. 39 | //! 40 | //! The middleware stack includes: 41 | //! 42 | //! - Timeouts 43 | //! - Setting and propagating request id headers 44 | //! - Metrics recording 45 | //! - Tracing with OpenTelemetry support 46 | //! 47 | //! # Metrics and health checks 48 | //! 49 | //! [`Server::serve`] will also start a second HTTP server separate from your primary application 50 | //! that serves metrics and health checks. The default URLs are: 51 | //! 52 | //! - `GET host:8081/metrics` 53 | //! - `GET host:8081/health/live` 54 | //! - `GET host:8081/health/ready` 55 | //! 56 | //! The port can be configred with [`Config::metrics_health_port`]. 57 | //! 58 | //! # Features 59 | //! 60 | //! `server-framework` includes the following optional features: 61 | //! 62 | //! | Name | Description | Default | 63 | //! |---|---|---| 64 | //! | `tonic` | Enables support for running tonic services | Yes | 65 | 66 | // BEGIN - Embark standard lints v5 for Rust 1.55+ 67 | // do not change or add/remove here, but one can add exceptions after this section 68 | // for more info see: 69 | #![deny(unsafe_code)] 70 | #![warn( 71 | clippy::all, 72 | clippy::await_holding_lock, 73 | clippy::char_lit_as_u8, 74 | clippy::checked_conversions, 75 | clippy::dbg_macro, 76 | clippy::debug_assert_with_mut_call, 77 | clippy::disallowed_methods, 78 | clippy::disallowed_types, 79 | clippy::empty_enum, 80 | clippy::enum_glob_use, 81 | clippy::exit, 82 | clippy::expl_impl_clone_on_copy, 83 | clippy::explicit_deref_methods, 84 | clippy::explicit_into_iter_loop, 85 | clippy::fallible_impl_from, 86 | clippy::filter_map_next, 87 | clippy::flat_map_option, 88 | clippy::float_cmp_const, 89 | clippy::fn_params_excessive_bools, 90 | clippy::from_iter_instead_of_collect, 91 | clippy::if_let_mutex, 92 | clippy::implicit_clone, 93 | clippy::imprecise_flops, 94 | clippy::inefficient_to_string, 95 | clippy::invalid_upcast_comparisons, 96 | clippy::large_digit_groups, 97 | clippy::large_stack_arrays, 98 | clippy::large_types_passed_by_value, 99 | clippy::let_unit_value, 100 | clippy::linkedlist, 101 | clippy::lossy_float_literal, 102 | clippy::macro_use_imports, 103 | clippy::manual_ok_or, 104 | clippy::map_err_ignore, 105 | clippy::map_flatten, 106 | clippy::map_unwrap_or, 107 | clippy::match_on_vec_items, 108 | clippy::match_same_arms, 109 | clippy::match_wild_err_arm, 110 | clippy::match_wildcard_for_single_variants, 111 | clippy::mem_forget, 112 | clippy::mismatched_target_os, 113 | clippy::missing_enforced_import_renames, 114 | clippy::mut_mut, 115 | clippy::mutex_integer, 116 | clippy::needless_borrow, 117 | clippy::needless_continue, 118 | clippy::needless_for_each, 119 | clippy::option_option, 120 | clippy::path_buf_push_overwrite, 121 | clippy::ptr_as_ptr, 122 | clippy::rc_mutex, 123 | clippy::ref_option_ref, 124 | clippy::rest_pat_in_fully_bound_structs, 125 | clippy::same_functions_in_if_condition, 126 | clippy::single_match_else, 127 | clippy::string_add_assign, 128 | clippy::string_add, 129 | clippy::string_lit_as_bytes, 130 | clippy::string_to_string, 131 | clippy::todo, 132 | clippy::trait_duplication_in_bounds, 133 | clippy::unimplemented, 134 | clippy::unnested_or_patterns, 135 | clippy::unused_self, 136 | clippy::useless_transmute, 137 | clippy::verbose_file_reads, 138 | clippy::zero_sized_map_values, 139 | future_incompatible, 140 | nonstandard_style, 141 | rust_2018_idioms 142 | )] 143 | // END - Embark standard lints v0.5 for Rust 1.55+ 144 | // crate-specific exceptions: 145 | #![allow(elided_lifetimes_in_paths, clippy::type_complexity)] 146 | #![cfg_attr(docsrs, feature(doc_cfg))] 147 | #![warn(missing_debug_implementations, missing_docs)] 148 | #![deny(unreachable_pub, private_in_public)] 149 | #![forbid(unsafe_code)] 150 | 151 | pub use anyhow; 152 | pub use axum; 153 | pub use axum::async_trait; 154 | pub use http; 155 | #[cfg(feature = "tonic")] 156 | pub use tonic; 157 | pub use tower; 158 | 159 | pub mod health; 160 | 161 | mod config; 162 | mod error_handling; 163 | mod middleware; 164 | #[cfg(all(not(target_os = "windows"), not(target_os = "android")))] 165 | mod pprof; 166 | mod request_id; 167 | mod server; 168 | 169 | use axum::body::BoxBody; 170 | 171 | pub use self::{config::Config, server::Server}; 172 | 173 | #[cfg(feature = "tonic")] 174 | pub use self::server::router_from_tonic; 175 | 176 | /// Type alias for [`axum::Router`] with [`BoxBody`] as the request body type, which this crate 177 | /// requires. 178 | pub type Router = axum::Router; 179 | 180 | /// Type alias for [`http::Request`] with [`BoxBody`] as the body type, which this crate requires. 181 | pub type Request = http::Request; 182 | 183 | #[doc(inline)] 184 | pub use axum::response::Response; 185 | 186 | pub mod metrics { 187 | //! Types and utilities for metrics. 188 | 189 | pub use metrics_exporter_prometheus::Matcher; 190 | } 191 | -------------------------------------------------------------------------------- /src/middleware.rs: -------------------------------------------------------------------------------- 1 | use std::task::{Context, Poll}; 2 | 3 | use tower::{Layer, Service}; 4 | 5 | pub(crate) mod metrics; 6 | pub(crate) mod trace; 7 | 8 | /// Combine two layers or services into one. 9 | /// 10 | /// This differs from `tower::util::Either` in that it doesn't convert the error to `BoxError` but 11 | /// requires the services to have the same error types. 12 | #[derive(Clone, Copy, Debug)] 13 | pub(crate) enum Either { 14 | A(A), 15 | B(B), 16 | } 17 | 18 | impl Layer for Either 19 | where 20 | A: Layer, 21 | B: Layer, 22 | { 23 | type Service = Either; 24 | 25 | fn layer(&self, inner: S) -> Self::Service { 26 | match self { 27 | Self::A(layer) => Either::A(layer.layer(inner)), 28 | Self::B(layer) => Either::B(layer.layer(inner)), 29 | } 30 | } 31 | } 32 | 33 | impl Service for Either 34 | where 35 | A: Service, 36 | B: Service, 37 | { 38 | type Response = A::Response; 39 | type Error = A::Error; 40 | type Future = futures_util::future::Either; 41 | 42 | fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { 43 | match self { 44 | Either::A(svc) => svc.poll_ready(cx), 45 | Either::B(svc) => svc.poll_ready(cx), 46 | } 47 | } 48 | 49 | fn call(&mut self, req: R) -> Self::Future { 50 | match self { 51 | Either::A(svc) => futures_util::future::Either::Left(svc.call(req)), 52 | Either::B(svc) => futures_util::future::Either::Right(svc.call(req)), 53 | } 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/middleware/metrics.rs: -------------------------------------------------------------------------------- 1 | use axum::{extract::MatchedPath, middleware::Next, response::IntoResponse}; 2 | use http::{header, Request}; 3 | use std::time::Instant; 4 | 5 | pub(crate) async fn track_metrics(req: Request, next: Next) -> impl IntoResponse { 6 | let start = Instant::now(); 7 | let path = path(&req).to_owned(); 8 | let method = req.method().clone(); 9 | 10 | let res = next.run(req).await; 11 | 12 | let latency = start.elapsed().as_secs_f64(); 13 | let status = res.status().as_u16().to_string(); 14 | let labels = [ 15 | ("method", method.to_string()), 16 | ("path", path), 17 | ("status", status), 18 | ]; 19 | 20 | metrics::increment_counter!("http_requests_total", &labels); 21 | metrics::histogram!("http_requests_duration_seconds", latency, &labels); 22 | 23 | res 24 | } 25 | 26 | fn path(req: &Request) -> &str { 27 | if is_grpc(req) { 28 | req.uri().path() 29 | } else if let Some(matched_path) = req.extensions().get::() { 30 | let path = matched_path.as_str(); 31 | 32 | // In axum, if you nest an opaque `Service` at "/" then that will hijack all requests and 33 | // the matched path in the route will simply be `/*axum_nest`. This is what 34 | // `Server::with_service` does. 35 | // 36 | // So if the matched path starts with a wildcard then we don't have the pattern for the 37 | // route (such as `/users/:id`) but have to instead use the literal URI on the request. 38 | if path.starts_with("/*") { 39 | req.uri().path() 40 | } else { 41 | path 42 | } 43 | } else { 44 | req.uri().path() 45 | } 46 | } 47 | 48 | fn is_grpc(req: &Request) -> bool { 49 | if let Some(content_type) = content_type(req) { 50 | content_type.starts_with("application/grpc") 51 | } else { 52 | false 53 | } 54 | } 55 | 56 | fn content_type(req: &Request) -> Option<&str> { 57 | req.headers().get(header::CONTENT_TYPE)?.to_str().ok() 58 | } 59 | -------------------------------------------------------------------------------- /src/middleware/trace.rs: -------------------------------------------------------------------------------- 1 | use self::{ 2 | classify::MakeHttpOrGrpcClassifier, 3 | opentelemetry::{OtelMakeSpan, OtelOnEos, OtelOnFailure, OtelOnResponse}, 4 | }; 5 | use tower_http::trace::TraceLayer; 6 | 7 | pub(crate) mod classify; 8 | pub(crate) mod opentelemetry; 9 | 10 | pub(crate) fn layer() -> TraceLayer< 11 | MakeHttpOrGrpcClassifier, 12 | OtelMakeSpan, 13 | (), // on request 14 | OtelOnResponse, 15 | (), // on body chunk 16 | OtelOnEos, 17 | OtelOnFailure, 18 | > { 19 | TraceLayer::new(MakeHttpOrGrpcClassifier) 20 | .make_span_with(OtelMakeSpan) 21 | .on_request(()) 22 | .on_response(OtelOnResponse) 23 | .on_body_chunk(()) 24 | .on_eos(OtelOnEos) 25 | .on_failure(OtelOnFailure) 26 | } 27 | 28 | #[cfg(test)] 29 | mod tests { 30 | use crate::{config::Config, Server}; 31 | use assert_json_diff::assert_json_include; 32 | use axum::{ 33 | body::Body, 34 | routing::{get, post}, 35 | Router, 36 | }; 37 | use http::{header::HeaderName, HeaderMap, HeaderValue, Method, Request, StatusCode, Version}; 38 | use http_body::Body as _; 39 | use serde_json::{json, Value}; 40 | use std::sync::mpsc::{self, Receiver, SyncSender}; 41 | use tower::{Service, ServiceExt}; 42 | use tracing_subscriber::{ 43 | fmt::{format::FmtSpan, MakeWriter}, 44 | util::SubscriberInitExt, 45 | EnvFilter, 46 | }; 47 | 48 | #[tokio::test] 49 | async fn correct_fields_on_span_for_http() { 50 | let svc = Server::new(Config::default()) 51 | .with( 52 | Router::new() 53 | .route("/", get(|| async { StatusCode::OK })) 54 | .route( 55 | "/users/:id", 56 | get(|| async { StatusCode::INTERNAL_SERVER_ERROR }), 57 | ), 58 | ) 59 | .into_service(); 60 | 61 | let [(root_new, root_close), (users_id_new, users_id_close)] = spans_for_requests( 62 | svc, 63 | [ 64 | Request::builder() 65 | .header("x-request-id", "request-id") 66 | .header("user-agent", "tests") 67 | .uri("/") 68 | .body(Body::empty()) 69 | .unwrap(), 70 | Request::builder() 71 | .uri("/users/123") 72 | .body(Body::empty()) 73 | .unwrap(), 74 | ], 75 | ) 76 | .await; 77 | 78 | assert_json_include!( 79 | actual: root_new, 80 | expected: json!({ 81 | "fields": { 82 | "message": "new", 83 | }, 84 | "level": "INFO", 85 | "span": { 86 | "http.client_ip": "", 87 | "http.flavor": "1.1", 88 | "http.host": "", 89 | "http.method": "GET", 90 | "http.route": "/", 91 | "http.scheme": "HTTP", 92 | "http.target": "/", 93 | "http.user_agent": "tests", 94 | "name": "HTTP request", 95 | "otel.kind": "server", 96 | "request_id": "request-id", 97 | "trace_id": "" 98 | } 99 | }), 100 | ); 101 | 102 | assert_json_include!( 103 | actual: root_close, 104 | expected: json!({ 105 | "fields": { 106 | "message": "close", 107 | }, 108 | "level": "INFO", 109 | "span": { 110 | "http.client_ip": "", 111 | "http.flavor": "1.1", 112 | "http.host": "", 113 | "http.method": "GET", 114 | "http.route": "/", 115 | "http.scheme": "HTTP", 116 | "http.status_code": "200", 117 | "http.target": "/", 118 | "http.user_agent": "tests", 119 | "name": "HTTP request", 120 | "otel.kind": "server", 121 | "otel.status_code": "OK", 122 | "request_id": "request-id", 123 | "trace_id": "" 124 | } 125 | }), 126 | ); 127 | 128 | assert_json_include!( 129 | actual: users_id_new, 130 | expected: json!({ 131 | "span": { 132 | "http.route": "/users/:id", 133 | "http.target": "/users/123", 134 | } 135 | }), 136 | ); 137 | 138 | assert_json_include!( 139 | actual: users_id_close, 140 | expected: json!({ 141 | "span": { 142 | "http.status_code": "500", 143 | "otel.status_code": "ERROR", 144 | } 145 | }), 146 | ); 147 | } 148 | 149 | #[tokio::test] 150 | async fn correct_fields_on_span_for_grpc() { 151 | let svc = Server::new(Config::default()) 152 | .with( 153 | Router::new() 154 | .route( 155 | "/package.service/Success", 156 | post(|| async { send_code_in_trailers(0) }), 157 | ) 158 | .route( 159 | "/package.service/FailUnary", 160 | post(|| async { ([("grpc-status", "13")], StatusCode::OK) }), 161 | ) 162 | .route( 163 | "/package.service/FailStream", 164 | post(|| async { send_code_in_trailers(13) }), 165 | ), 166 | ) 167 | .into_service(); 168 | 169 | let [(_, success), (_, fail_unary), (_, fail_stream)] = spans_for_requests( 170 | svc, 171 | [ 172 | mock_grpc_request_to("/package.service/Success"), 173 | mock_grpc_request_to("/package.service/FailUnary"), 174 | mock_grpc_request_to("/package.service/FailStream"), 175 | ], 176 | ) 177 | .await; 178 | 179 | assert_json_include!( 180 | actual: success, 181 | expected: json!({ 182 | "span": { 183 | "grpc.code": 0, 184 | "http.flavor": "2.0", 185 | "http.route": "/package.service/Success", 186 | "http.status_code": "200", 187 | "http.target": "/package.service/Success", 188 | "otel.status_code": "OK", 189 | } 190 | }), 191 | ); 192 | 193 | assert_json_include!( 194 | actual: fail_unary, 195 | expected: json!({ 196 | "span": { 197 | "grpc.code": 13, 198 | "http.flavor": "2.0", 199 | "http.status_code": "200", 200 | "otel.status_code": "ERROR", 201 | } 202 | }), 203 | ); 204 | 205 | assert_json_include!( 206 | actual: fail_stream, 207 | expected: json!({ 208 | "span": { 209 | "grpc.code": 13, 210 | "http.flavor": "2.0", 211 | "http.status_code": "200", 212 | "otel.status_code": "ERROR", 213 | } 214 | }), 215 | ); 216 | 217 | fn send_code_in_trailers(code: u16) -> impl axum::response::IntoResponse { 218 | let (mut tx, body) = hyper::Body::channel(); 219 | 220 | tokio::spawn(async move { 221 | let mut headers = HeaderMap::new(); 222 | headers.insert( 223 | HeaderName::from_static("grpc-status"), 224 | HeaderValue::from_str(&code.to_string()).unwrap(), 225 | ); 226 | tx.send_trailers(headers).await.unwrap(); 227 | }); 228 | 229 | (StatusCode::OK, body.boxed()) 230 | } 231 | 232 | fn mock_grpc_request_to(uri: &str) -> Request { 233 | Request::builder() 234 | .version(Version::HTTP_2) 235 | .header("content-type", "application/grpc") 236 | .method(Method::POST) 237 | .uri(uri) 238 | .body(Body::empty()) 239 | .unwrap() 240 | } 241 | } 242 | 243 | async fn spans_for_requests( 244 | mut router: Router<(), Body>, 245 | reqs: [Request; N], 246 | ) -> [(Value, Value); N] { 247 | use http_body::Body as _; 248 | 249 | let (make_writer, rx) = duplex_writer(); 250 | let subscriber = tracing_subscriber::fmt::fmt() 251 | .json() 252 | .with_env_filter( 253 | EnvFilter::try_new("server_framework::middleware::trace::opentelemetry=trace") 254 | .unwrap(), 255 | ) 256 | .with_writer(make_writer) 257 | .with_span_events(FmtSpan::NEW | FmtSpan::CLOSE) 258 | .finish(); 259 | let _guard = subscriber.set_default(); 260 | 261 | let mut spans = Vec::new(); 262 | 263 | for req in reqs { 264 | let mut res = router.ready().await.unwrap().call(req).await.unwrap(); 265 | 266 | while res.data().await.is_some() {} 267 | res.trailers().await.unwrap(); 268 | drop(res); 269 | 270 | let logs = std::iter::from_fn(|| rx.try_recv().ok()) 271 | .map(|bytes| serde_json::from_slice::(&bytes).unwrap()) 272 | .collect::>(); 273 | 274 | let [new, close]: [_; 2] = logs.try_into().unwrap(); 275 | 276 | spans.push((new, close)); 277 | } 278 | 279 | spans.try_into().unwrap() 280 | } 281 | 282 | fn duplex_writer() -> (DuplexWriter, Receiver>) { 283 | let (tx, rx) = mpsc::sync_channel(1024); 284 | (DuplexWriter { tx }, rx) 285 | } 286 | 287 | #[derive(Clone)] 288 | struct DuplexWriter { 289 | tx: SyncSender>, 290 | } 291 | 292 | impl<'a> MakeWriter<'a> for DuplexWriter { 293 | type Writer = Self; 294 | 295 | fn make_writer(&'a self) -> Self::Writer { 296 | self.clone() 297 | } 298 | } 299 | 300 | impl std::io::Write for DuplexWriter { 301 | fn write(&mut self, buf: &[u8]) -> std::io::Result { 302 | self.tx.send(buf.to_vec()).unwrap(); 303 | Ok(buf.len()) 304 | } 305 | 306 | fn flush(&mut self) -> std::io::Result<()> { 307 | Ok(()) 308 | } 309 | } 310 | } 311 | -------------------------------------------------------------------------------- /src/middleware/trace/classify.rs: -------------------------------------------------------------------------------- 1 | use http::HeaderMap; 2 | use std::fmt; 3 | use tower_http::classify::{ClassifiedResponse, ClassifyEos, ClassifyResponse, MakeClassifier}; 4 | 5 | /// A classified HTTP or gRPC response. 6 | #[derive(Debug, Clone)] 7 | pub(crate) enum HttpOrGrpcClassification { 8 | Http(http::StatusCode), 9 | Grpc(u16), 10 | } 11 | 12 | impl fmt::Display for HttpOrGrpcClassification { 13 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 14 | match self { 15 | HttpOrGrpcClassification::Http(inner) => inner.fmt(f), 16 | HttpOrGrpcClassification::Grpc(inner) => inner.fmt(f), 17 | } 18 | } 19 | } 20 | 21 | /// [`MakeClassifier`] that classifies responses as either HTTP or gRPC based on the 22 | /// `content-type`. 23 | #[derive(Debug, Clone, Copy)] 24 | pub(crate) struct MakeHttpOrGrpcClassifier; 25 | 26 | impl MakeClassifier for MakeHttpOrGrpcClassifier { 27 | type Classifier = HttpOrGrpcClassifier; 28 | type FailureClass = HttpOrGrpcClassification; 29 | type ClassifyEos = GrpcClassifyEos; 30 | 31 | fn make_classifier(&self, req: &http::Request) -> Self::Classifier { 32 | if is_grpc(req.headers()) { 33 | HttpOrGrpcClassifier::Grpc 34 | } else { 35 | HttpOrGrpcClassifier::Http 36 | } 37 | } 38 | } 39 | 40 | /// [`ClassifyResponse`] that classifies responses as either HTTP or gRPC. Created by 41 | /// [`MakeHttpOrGrpcClassifier`]. 42 | #[derive(Clone, Copy, Debug)] 43 | pub(crate) enum HttpOrGrpcClassifier { 44 | Grpc, 45 | Http, 46 | } 47 | 48 | impl ClassifyResponse for HttpOrGrpcClassifier { 49 | type FailureClass = HttpOrGrpcClassification; 50 | type ClassifyEos = GrpcClassifyEos; 51 | 52 | fn classify_response( 53 | self, 54 | res: &http::Response, 55 | ) -> ClassifiedResponse { 56 | match self { 57 | HttpOrGrpcClassifier::Grpc => { 58 | if let Some(code) = grpc_code_from_headers(res.headers()) { 59 | ClassifiedResponse::Ready( 60 | classify_grpc_code(code).map_err(HttpOrGrpcClassification::Grpc), 61 | ) 62 | } else { 63 | ClassifiedResponse::RequiresEos(GrpcClassifyEos) 64 | } 65 | } 66 | HttpOrGrpcClassifier::Http => { 67 | if res.status().is_server_error() { 68 | ClassifiedResponse::Ready(Err(HttpOrGrpcClassification::Http(res.status()))) 69 | } else { 70 | ClassifiedResponse::Ready(Ok(())) 71 | } 72 | } 73 | } 74 | } 75 | 76 | fn classify_error(self, error: &E) -> Self::FailureClass 77 | where 78 | E: fmt::Display + 'static, 79 | { 80 | unreachable!( 81 | "we handle all errors from middleware so this will never be called. error={}", 82 | error 83 | ) 84 | } 85 | } 86 | 87 | pub(crate) struct GrpcClassifyEos; 88 | 89 | impl ClassifyEos for GrpcClassifyEos { 90 | type FailureClass = HttpOrGrpcClassification; 91 | 92 | fn classify_eos(self, trailers: Option<&HeaderMap>) -> Result<(), Self::FailureClass> { 93 | let trailers = if let Some(trailers) = trailers { 94 | trailers 95 | } else { 96 | return Ok(()); 97 | }; 98 | 99 | let code = if let Some(code) = grpc_code_from_headers(trailers) { 100 | code 101 | } else { 102 | return Ok(()); 103 | }; 104 | 105 | classify_grpc_code(code).map_err(HttpOrGrpcClassification::Grpc) 106 | } 107 | 108 | fn classify_error(self, error: &E) -> Self::FailureClass 109 | where 110 | E: fmt::Display + 'static, 111 | { 112 | unreachable!( 113 | "we handle all errors from middleware so this will never be called. error={}", 114 | error 115 | ) 116 | } 117 | } 118 | 119 | const GRPC_CONTENT_TYPE: &str = "application/grpc"; 120 | const GRPC_STATUS_HEADER: &str = "grpc-status"; 121 | 122 | pub(super) fn is_grpc(headers: &HeaderMap) -> bool { 123 | headers 124 | .get(http::header::CONTENT_TYPE) 125 | .and_then(|value| value.to_str().ok()) 126 | .map_or(false, |value| value.starts_with(GRPC_CONTENT_TYPE)) 127 | || headers.contains_key(GRPC_STATUS_HEADER) 128 | } 129 | 130 | pub(super) fn grpc_code_from_headers(headers: &HeaderMap) -> Option { 131 | headers 132 | .get(GRPC_STATUS_HEADER) 133 | .and_then(|value| value.to_str().ok()) 134 | .and_then(|value| value.parse().ok()) 135 | } 136 | 137 | pub(super) fn classify_grpc_code(code: u16) -> Result<(), u16> { 138 | // these are considered client errors 139 | const OK: u16 = 0; 140 | const INVALID_ARGUMENT: u16 = 3; 141 | const NOT_FOUND: u16 = 5; 142 | const ALREADY_EXISTS: u16 = 6; 143 | const PERMISSION_DENIED: u16 = 7; 144 | const FAILED_PRECONDITION: u16 = 9; 145 | const OUT_OF_RANGE: u16 = 11; 146 | const UNIMPLEMENTED: u16 = 12; 147 | const UNAUTHENTICATED: u16 = 16; 148 | 149 | match code { 150 | OK | INVALID_ARGUMENT | NOT_FOUND | ALREADY_EXISTS | PERMISSION_DENIED 151 | | FAILED_PRECONDITION | OUT_OF_RANGE | UNIMPLEMENTED | UNAUTHENTICATED => Ok(()), 152 | _ => Err(code), 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/middleware/trace/opentelemetry.rs: -------------------------------------------------------------------------------- 1 | use super::classify::{self, HttpOrGrpcClassification}; 2 | use axum::extract::{ConnectInfo, MatchedPath}; 3 | use http::{header, uri::Scheme, Method, Request, Response, Version}; 4 | use opentelemetry::trace::TraceContextExt; 5 | use std::{borrow::Cow, net::SocketAddr, time::Duration}; 6 | use tower_http::{ 7 | request_id::RequestId, 8 | trace::{MakeSpan, OnEos, OnFailure, OnResponse}, 9 | }; 10 | use tracing::{field::Empty, Span}; 11 | use tracing_opentelemetry::OpenTelemetrySpanExt; 12 | 13 | /// A [`MakeSpan`] that creates tracing spans using [OpenTelemetry's conventional field names][otel]. 14 | /// 15 | /// [otel]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md 16 | #[derive(Clone, Copy)] 17 | pub(crate) struct OtelMakeSpan; 18 | 19 | impl MakeSpan for OtelMakeSpan { 20 | fn make_span(&mut self, req: &Request) -> Span { 21 | let user_agent = req 22 | .headers() 23 | .get(header::USER_AGENT) 24 | .map_or("", |h| h.to_str().unwrap_or("")); 25 | 26 | let host = req 27 | .headers() 28 | .get(header::HOST) 29 | .map_or("", |h| h.to_str().unwrap_or("")); 30 | 31 | let scheme = req 32 | .uri() 33 | .scheme() 34 | .map_or_else(|| "HTTP".into(), http_scheme); 35 | 36 | let http_route = if classify::is_grpc(req.headers()) { 37 | req.uri().path().to_owned() 38 | } else if let Some(matched_path) = req.extensions().get::() { 39 | matched_path.as_str().to_owned() 40 | } else { 41 | req.uri().path().to_owned() 42 | }; 43 | 44 | let client_ip = req 45 | .extensions() 46 | .get::>() 47 | .map(|ConnectInfo(client_ip)| Cow::from(client_ip.to_string())) 48 | .unwrap_or_default(); 49 | 50 | let request_id = req 51 | .extensions() 52 | .get::() 53 | .and_then(|id| id.header_value().to_str().ok()) 54 | .unwrap_or_default(); 55 | 56 | let remote_context = extract_remote_context(req.headers()); 57 | let remote_span = remote_context.span(); 58 | let span_context = remote_span.span_context(); 59 | let trace_id = span_context 60 | .is_valid() 61 | .then(|| Cow::from(span_context.trace_id().to_string())) 62 | .unwrap_or_default(); 63 | 64 | let span = tracing::info_span!( 65 | "HTTP request", 66 | grpc.code = Empty, 67 | http.client_ip = %client_ip, 68 | http.flavor = %http_flavor(req.version()), 69 | http.host = %host, 70 | http.method = %http_method(req.method()), 71 | http.route = %http_route, 72 | http.scheme = %scheme, 73 | http.status_code = Empty, 74 | http.target = %req.uri().path_and_query().map_or("", |p| p.as_str()), 75 | http.user_agent = %user_agent, 76 | otel.kind = "server", 77 | otel.status_code = Empty, 78 | request_id = request_id, 79 | trace_id = %trace_id, 80 | ); 81 | 82 | span.set_parent(remote_context); 83 | 84 | span 85 | } 86 | } 87 | 88 | fn http_method(method: &Method) -> Cow<'static, str> { 89 | match method { 90 | &Method::CONNECT => "CONNECT".into(), 91 | &Method::DELETE => "DELETE".into(), 92 | &Method::GET => "GET".into(), 93 | &Method::HEAD => "HEAD".into(), 94 | &Method::OPTIONS => "OPTIONS".into(), 95 | &Method::PATCH => "PATCH".into(), 96 | &Method::POST => "POST".into(), 97 | &Method::PUT => "PUT".into(), 98 | &Method::TRACE => "TRACE".into(), 99 | other => other.to_string().into(), 100 | } 101 | } 102 | 103 | fn http_flavor(version: Version) -> Cow<'static, str> { 104 | match version { 105 | Version::HTTP_09 => "0.9".into(), 106 | Version::HTTP_10 => "1.0".into(), 107 | Version::HTTP_11 => "1.1".into(), 108 | Version::HTTP_2 => "2.0".into(), 109 | Version::HTTP_3 => "3.0".into(), 110 | other => format!("{:?}", other).into(), 111 | } 112 | } 113 | 114 | fn http_scheme(scheme: &Scheme) -> Cow<'static, str> { 115 | if scheme == &Scheme::HTTP { 116 | "http".into() 117 | } else if scheme == &Scheme::HTTPS { 118 | "https".into() 119 | } else { 120 | scheme.to_string().into() 121 | } 122 | } 123 | 124 | // If remote request has no span data the propagator defaults to an unsampled context 125 | fn extract_remote_context(headers: &http::HeaderMap) -> opentelemetry::Context { 126 | let extractor = opentelemetry_http::HeaderExtractor(headers); 127 | opentelemetry::global::get_text_map_propagator(|propagator| propagator.extract(&extractor)) 128 | } 129 | 130 | /// Callback that [`Trace`] will call when it receives a response. This is called regardless if the 131 | /// response is classified as a success or failure. 132 | /// 133 | /// [`Trace`]: tower_http::trace::TRACE 134 | #[derive(Clone, Debug)] 135 | pub(crate) struct OtelOnResponse; 136 | 137 | impl OnResponse for OtelOnResponse { 138 | fn on_response(self, response: &Response, _latency: Duration, span: &Span) { 139 | let status = response.status().as_u16().to_string(); 140 | span.record("http.status_code", &tracing::field::display(status)); 141 | 142 | if let Some(code) = classify::grpc_code_from_headers(response.headers()) { 143 | span.record("grpc.code", code); 144 | } 145 | 146 | // assume there is no error, if there is `OtelOnFailure` will be called and override this 147 | span.record("otel.status_code", "OK"); 148 | } 149 | } 150 | 151 | /// Callback that [`Trace`] will call when a streaming response completes. This is called 152 | /// regardless if the stream is classified as a success or failure. 153 | /// 154 | /// [`Trace`]: tower_http::trace::TRACE 155 | #[derive(Clone, Debug)] 156 | pub(crate) struct OtelOnEos; 157 | 158 | impl OnEos for OtelOnEos { 159 | fn on_eos(self, trailers: Option<&http::HeaderMap>, _stream_duration: Duration, span: &Span) { 160 | if let Some(code) = trailers.and_then(classify::grpc_code_from_headers) { 161 | span.record("grpc.code", code); 162 | } 163 | } 164 | } 165 | 166 | /// Callback that [`Trace`] will call when a response or end-of-stream is classified as a failure. 167 | /// 168 | /// Since we require all services and middleware to be infallible this will never be called for 169 | /// "errors" in the `tower::Service::Error` sense. A response will always be produced. 170 | /// 171 | /// [`Trace`]: tower_http::trace::TRACE 172 | #[derive(Clone, Debug)] 173 | pub(crate) struct OtelOnFailure; 174 | 175 | impl OnFailure for OtelOnFailure { 176 | fn on_failure(&mut self, failure: HttpOrGrpcClassification, _latency: Duration, span: &Span) { 177 | match failure { 178 | HttpOrGrpcClassification::Http(status) => { 179 | if status.is_server_error() { 180 | span.record("otel.status_code", "ERROR"); 181 | } 182 | } 183 | HttpOrGrpcClassification::Grpc(code) => { 184 | if classify::classify_grpc_code(code).is_err() { 185 | span.record("otel.status_code", "ERROR"); 186 | } 187 | } 188 | } 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /src/pprof.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use pprof::protos::Profile; 4 | use serde::Deserialize; 5 | 6 | #[derive(Deserialize)] 7 | pub(crate) struct Params { 8 | #[serde(default = "default_seconds")] 9 | pub(crate) seconds: u64, 10 | } 11 | 12 | fn default_seconds() -> u64 { 13 | 2 14 | } 15 | 16 | pub(crate) async fn collect_profile(duration: Duration) -> anyhow::Result { 17 | let guard = pprof::ProfilerGuardBuilder::default() 18 | .frequency(1000) 19 | .blocklist(&["libc", "libgcc", "pthread", "vdso"]) 20 | .build()?; 21 | 22 | tokio::time::sleep(duration).await; 23 | 24 | Ok(guard.report().build()?.pprof()?) 25 | } 26 | -------------------------------------------------------------------------------- /src/request_id.rs: -------------------------------------------------------------------------------- 1 | use http::Request; 2 | use tower_http::request_id::RequestId; 3 | use uuid::Uuid; 4 | 5 | #[derive(Clone, Copy)] 6 | pub(crate) struct MakeRequestUuid; 7 | 8 | impl tower_http::request_id::MakeRequestId for MakeRequestUuid { 9 | fn make_request_id(&mut self, _: &Request) -> Option { 10 | let request_id = Uuid::new_v4().to_string().parse().ok()?; 11 | Some(RequestId::new(request_id)) 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/server.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::{convert::Infallible, fmt, net::SocketAddr, time::Duration}; 3 | 4 | use axum::extract::Query; 5 | use axum::response::IntoResponse; 6 | use axum::{ 7 | body::{self, BoxBody}, 8 | error_handling::{HandleError, HandleErrorLayer}, 9 | extract::Extension, 10 | response::Response, 11 | routing::{get, Route}, 12 | Router, 13 | }; 14 | use http::{header::HeaderName, StatusCode}; 15 | use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; 16 | use tokio::net::TcpListener; 17 | use tower::{layer::util::Identity, timeout::Timeout, Service, ServiceBuilder}; 18 | use tower_http::sensitive_headers::SetSensitiveHeadersLayer; 19 | use tower_http::ServiceBuilderExt; 20 | use tracing::error; 21 | 22 | use crate::{ 23 | error_handling::{default_error_handler, error_display_chain, DefaultErrorHandler, TimeoutSec}, 24 | health::{AlwaysLiveAndReady, HealthCheck, NoHealthCheckProvided}, 25 | middleware::{metrics::track_metrics, trace, Either}, 26 | request_id::MakeRequestUuid, 27 | Config, Request, 28 | }; 29 | 30 | /// List of default headers that will be marked as sensitive and hidden from logs. 31 | const DEFAULT_SENSITIVE_HEADERS: [HeaderName; 4] = [ 32 | http::header::AUTHORIZATION, 33 | http::header::PROXY_AUTHORIZATION, 34 | http::header::COOKIE, 35 | http::header::SET_COOKIE, 36 | ]; 37 | 38 | /// An HTTP server that runs [`Service`]s with a conventional stack of middleware. 39 | pub struct Server { 40 | config: Config, 41 | router: Router<(), BoxBody>, 42 | error_handler: F, 43 | health_check: H, 44 | metric_setup_callback: Option, 45 | metric_buckets: Option)>>, 46 | } 47 | 48 | impl Default for Server { 49 | fn default() -> Self { 50 | Self { 51 | config: Config::default(), 52 | router: Router::default(), 53 | error_handler: default_error_handler, 54 | health_check: NoHealthCheckProvided, 55 | metric_buckets: None, 56 | metric_setup_callback: None, 57 | } 58 | } 59 | } 60 | 61 | impl fmt::Debug for Server 62 | where 63 | H: fmt::Debug, 64 | { 65 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 66 | let Self { 67 | config, 68 | router, 69 | health_check, 70 | metric_buckets, 71 | metric_setup_callback: _, 72 | error_handler: _, 73 | } = self; 74 | 75 | f.debug_struct("Server") 76 | .field("config", &config) 77 | .field("router", &router) 78 | .field("health_check", &health_check) 79 | .field("metric_buckets", &metric_buckets) 80 | .finish() 81 | } 82 | } 83 | 84 | impl Server { 85 | /// Create a new `Server` with the given config. 86 | pub fn new(config: Config) -> Self { 87 | Self { 88 | config, 89 | router: Default::default(), 90 | error_handler: default_error_handler, 91 | health_check: NoHealthCheckProvided, 92 | metric_buckets: Default::default(), 93 | metric_setup_callback: Default::default(), 94 | } 95 | } 96 | } 97 | 98 | impl Server { 99 | /// Add routes to the server. 100 | /// 101 | /// ```rust 102 | /// use server_framework::Server; 103 | /// use axum::{Router, routing::get}; 104 | /// 105 | /// let routes = Router::new().route("/", get(|| async { "Hello, World!" })); 106 | /// 107 | /// # async { 108 | /// Server::default() 109 | /// .with(routes) 110 | /// .always_live_and_ready() 111 | /// .serve() 112 | /// .await 113 | /// .unwrap(); 114 | /// # }; 115 | /// ``` 116 | /// 117 | /// Or [`Resource`](axum_extra::routing::Resource): 118 | /// 119 | /// ```rust 120 | /// use server_framework::Server; 121 | /// use axum_extra::routing::Resource; 122 | /// use axum::{ 123 | /// Router, 124 | /// async_trait, 125 | /// extract::{Path, FromRequestParts}, 126 | /// routing::get, 127 | /// body::BoxBody, 128 | /// }; 129 | /// 130 | /// struct Users { 131 | /// dependency: SomeDependency, 132 | /// } 133 | /// 134 | /// impl Users { 135 | /// fn resource() -> Resource<(), BoxBody> { 136 | /// Resource::named("users") 137 | /// .index(Self::index) 138 | /// .create(Self::create) 139 | /// .show(Self::show) 140 | /// } 141 | /// 142 | /// async fn index(self) {} 143 | /// 144 | /// async fn create(self) {} 145 | /// 146 | /// async fn show(self, Path(user_id): Path) {} 147 | /// } 148 | /// 149 | /// #[async_trait] 150 | /// impl FromRequestParts for Users 151 | /// where 152 | /// S: Send + Sync + 'static, 153 | /// { 154 | /// // ... 155 | /// # type Rejection = std::convert::Infallible; 156 | /// # async fn from_request_parts(_: &mut http::request::Parts, _: &S) -> Result { 157 | /// # todo!() 158 | /// # } 159 | /// } 160 | /// 161 | /// struct SomeDependency; 162 | /// 163 | /// # async { 164 | /// Server::default() 165 | /// .with(Users::resource()) 166 | /// .always_live_and_ready() 167 | /// .serve() 168 | /// .await 169 | /// .unwrap(); 170 | /// # }; 171 | /// ``` 172 | /// 173 | /// `with` can be called multiple times to add multiples sets of routes: 174 | /// 175 | /// ```rust 176 | /// use server_framework::Server; 177 | /// use axum::{Router, response::Json, routing::get}; 178 | /// use serde_json::json; 179 | /// 180 | /// let routes = Router::new().route("/", get(|| async { "Hello, World!" })); 181 | /// 182 | /// let api_routes = Router::new().route("/api", get(|| async { 183 | /// Json(json!({ "data": [1, 2, 3] })) 184 | /// })); 185 | /// 186 | /// # async { 187 | /// Server::default() 188 | /// .with(routes) 189 | /// .with(api_routes) 190 | /// .always_live_and_ready() 191 | /// // our server now accepts `GET /` and `GET /api` 192 | /// .serve() 193 | /// .await 194 | /// .unwrap(); 195 | /// # }; 196 | /// ``` 197 | pub fn with(mut self, router: T) -> Self 198 | where 199 | T: Into>, 200 | { 201 | self.router = self.router.merge(router); 202 | self 203 | } 204 | 205 | /// Add a tonic service to the server. 206 | /// 207 | /// ```rust 208 | /// use axum::async_trait; 209 | /// use server_framework::Server; 210 | /// # 211 | /// # #[async_trait] 212 | /// # trait Greeter {} 213 | /// # #[derive(Clone)] 214 | /// # struct GreeterServer(T); 215 | /// # impl GreeterServer { 216 | /// # fn new(t: T) -> Self { Self(t) } 217 | /// # } 218 | /// # impl tonic::transport::NamedService for GreeterServer { 219 | /// # const NAME: &'static str = ""; 220 | /// # } 221 | /// # impl tower::Service> for GreeterServer { 222 | /// # type Response = axum::response::Response; 223 | /// # type Error = std::convert::Infallible; 224 | /// # type Future = std::future::Ready>; 225 | /// # fn poll_ready(&mut self, _: &mut std::task::Context<'_>) -> std::task::Poll> { 226 | /// # todo!() 227 | /// # } 228 | /// # fn call(&mut self, _: http::Request) -> Self::Future { 229 | /// # todo!() 230 | /// # } 231 | /// # } 232 | /// 233 | /// #[derive(Clone)] 234 | /// struct MyGreeter; 235 | /// 236 | /// // implement server trait generated by tonic-build 237 | /// #[async_trait] 238 | /// impl Greeter for MyGreeter { 239 | /// // ... 240 | /// } 241 | /// 242 | /// let service = GreeterServer::new(MyGreeter); 243 | /// 244 | /// # async { 245 | /// Server::default() 246 | /// .with_tonic(service) 247 | /// .always_live_and_ready() 248 | /// .serve() 249 | /// .await 250 | /// .unwrap(); 251 | /// # }; 252 | /// ``` 253 | #[cfg(feature = "tonic")] 254 | pub fn with_tonic(self, service: S) -> Self 255 | where 256 | S: Service, Error = Infallible> 257 | + tonic::transport::NamedService 258 | + Clone 259 | + Send 260 | + 'static, 261 | S::Future: Send, 262 | B: http_body::Body + Send + 'static, 263 | B::Error: Into, 264 | { 265 | self.with(router_from_tonic(service)) 266 | } 267 | 268 | /// Router all requests to the given service. 269 | /// 270 | /// Note that _all_ requests will be sent to the service and therefore the server cannot 271 | /// contain other services. If it does you'll get a panic when calling [`Server::serve`]. 272 | pub fn with_service(self, service: S) -> Self 273 | where 274 | S: Service + Clone + Send + 'static, 275 | S::Response: IntoResponse, 276 | S::Future: Send, 277 | B: http_body::Body + Send + 'static, 278 | B::Error: Into, 279 | { 280 | let svc = ServiceBuilder::new() 281 | .map_response(IntoResponse::into_response) 282 | .service(service); 283 | self.with(Router::new().nest_service("/", svc)) 284 | } 285 | 286 | /// Add a fallback service. 287 | /// 288 | /// This service will be called if no routes matches the incoming request. 289 | /// 290 | /// ```rust 291 | /// use server_framework::Server; 292 | /// use axum::{ 293 | /// Router, 294 | /// response::IntoResponse, 295 | /// http::{StatusCode, Uri}, 296 | /// handler::HandlerWithoutStateExt, 297 | /// }; 298 | /// 299 | /// async fn fallback(uri: Uri) -> impl IntoResponse { 300 | /// (StatusCode::NOT_FOUND, format!("No route for {}", uri)) 301 | /// } 302 | /// 303 | /// # async { 304 | /// Server::default() 305 | /// .fallback(fallback.into_service()) 306 | /// .always_live_and_ready() 307 | /// .serve() 308 | /// .await 309 | /// .unwrap(); 310 | /// # }; 311 | /// ``` 312 | pub fn fallback(mut self, svc: S) -> Self 313 | where 314 | S: Service + Clone + Send + 'static, 315 | S::Response: IntoResponse, 316 | S::Future: Send + 'static, 317 | { 318 | self.router = self.router.fallback_service(svc); 319 | self 320 | } 321 | 322 | /// Change how errors from middleware are converted into responses. 323 | /// 324 | /// ```rust 325 | /// use server_framework::Server; 326 | /// use axum::{ 327 | /// Router, 328 | /// Json, 329 | /// BoxError, 330 | /// response::IntoResponse, 331 | /// http::StatusCode, 332 | /// }; 333 | /// use serde_json::json; 334 | /// 335 | /// async fn handle_error(err: BoxError) -> impl IntoResponse { 336 | /// ( 337 | /// StatusCode::INTERNAL_SERVER_ERROR, 338 | /// Json(json!({ 339 | /// "error": { 340 | /// "status": 500, 341 | /// "message": "Something went wrong...", 342 | /// "details": err.to_string(), 343 | /// }, 344 | /// })) 345 | /// ) 346 | /// } 347 | /// 348 | /// # async { 349 | /// Server::default() 350 | /// .handle_error(handle_error) 351 | /// .always_live_and_ready() 352 | /// .serve() 353 | /// .await 354 | /// .unwrap(); 355 | /// # }; 356 | /// ``` 357 | /// 358 | /// Error handles can also run axum extractors: 359 | /// 360 | /// ```rust 361 | /// use server_framework::Server; 362 | /// use axum::{ 363 | /// Router, 364 | /// BoxError, 365 | /// response::IntoResponse, 366 | /// http::{StatusCode, Method, Uri}, 367 | /// }; 368 | /// 369 | /// async fn handle_error( 370 | /// // `Method` and `Uri` are extractors since they implement `axum::extract::FromRequest` 371 | /// method: Method, 372 | /// uri: Uri, 373 | /// // the last argument must be the error 374 | /// err: BoxError, 375 | /// ) -> impl IntoResponse { 376 | /// // ... 377 | /// } 378 | /// 379 | /// # async { 380 | /// Server::default() 381 | /// .handle_error(handle_error) 382 | /// .always_live_and_ready() 383 | /// .serve() 384 | /// .await 385 | /// .unwrap(); 386 | /// # }; 387 | /// ``` 388 | /// 389 | /// Note that "errors" means errors produced by a middleware, not the application itself. The 390 | /// service(s) that makes up the actual application is required to be infallible such that 391 | /// we're to always produce a response. An endpoint returning `500 Internal Server Error` is 392 | /// not considered an "error" and this method is not for handling such cases. 393 | pub fn handle_error(self, error_handler: G) -> Server 394 | where 395 | G: Clone + Send + 'static, 396 | T: 'static, 397 | HandleError: 398 | Service, 399 | as Service>::Future: Send, 400 | { 401 | Server { 402 | config: self.config, 403 | router: self.router, 404 | error_handler, 405 | health_check: self.health_check, 406 | metric_buckets: self.metric_buckets, 407 | metric_setup_callback: self.metric_setup_callback, 408 | } 409 | } 410 | 411 | /// Provide the health check the server should use. 412 | pub fn with_health_check

(self, health_check: H2) -> Server 413 | where 414 | H2: HealthCheck + Clone, 415 | { 416 | Server { 417 | config: self.config, 418 | router: self.router, 419 | error_handler: self.error_handler, 420 | health_check, 421 | metric_buckets: self.metric_buckets, 422 | metric_setup_callback: self.metric_setup_callback, 423 | } 424 | } 425 | 426 | /// Mark this service as always being live and ready. 427 | pub fn always_live_and_ready(self) -> Server { 428 | self.with_health_check(AlwaysLiveAndReady) 429 | } 430 | 431 | /// Set additional metric buckets to define on the prometheus recorder. 432 | /// 433 | /// Calling this multiple times will append to the list of buckets. 434 | pub fn metric_buckets(mut self, buckets: Vec<(Matcher, Vec)>) -> Self { 435 | self.metric_buckets 436 | .get_or_insert(Default::default()) 437 | .extend(buckets); 438 | self 439 | } 440 | 441 | /// A callback that will be called after the metric recorder is initialized 442 | /// 443 | /// This can be use to register metrics 444 | pub fn metric_setup_callback(mut self, callback: C) -> Self 445 | where 446 | C: FnOnce() + Send + 'static, 447 | { 448 | self.metric_setup_callback = Some(Box::new(callback)); 449 | self 450 | } 451 | 452 | /// Run the server. 453 | pub async fn serve(self) -> anyhow::Result<()> 454 | where 455 | F: Clone + Send + 'static, 456 | T: 'static, 457 | HandleError: 458 | Service, 459 | as Service>::Future: Send, 460 | H: HealthCheck + Clone, 461 | { 462 | let listener = TcpListener::bind(&self.config.bind_address).await?; 463 | self.serve_with_listener(listener).await 464 | } 465 | 466 | /// Run the server with the given [`TcpListener`]. 467 | /// 468 | /// Note this disregards `bind_address` from the config. 469 | pub async fn serve_with_listener(mut self, listener: TcpListener) -> anyhow::Result<()> 470 | where 471 | F: Clone + Send + 'static, 472 | T: 'static, 473 | HandleError: 474 | Service, 475 | as Service>::Future: Send, 476 | H: HealthCheck + Clone, 477 | { 478 | let listener = listener.into_std()?; 479 | 480 | if let Ok(addr) = listener.local_addr() { 481 | tracing::debug!("server listening on {}", addr); 482 | } 483 | 484 | let http2_only = self.config.http2_only; 485 | let graceful_shutdown = self.config.graceful_shutdown; 486 | 487 | if self.config.serve_health_and_metrics { 488 | tokio::spawn(expose_metrics_and_health( 489 | self.config.metrics_health_port, 490 | self.metric_buckets.take(), 491 | self.metric_setup_callback.take(), 492 | self.health_check.clone(), 493 | graceful_shutdown, 494 | )); 495 | } 496 | 497 | let make_svc = self 498 | .into_service() 499 | .into_make_service_with_connect_info::(); 500 | 501 | let server = hyper::Server::from_tcp(listener)? 502 | .http2_only(http2_only) 503 | .serve(make_svc); 504 | 505 | if graceful_shutdown { 506 | server.with_graceful_shutdown(signal_listener()).await?; 507 | } else { 508 | server.await?; 509 | } 510 | 511 | Ok(()) 512 | } 513 | 514 | /// Get the underlying service with middleware applied. 515 | pub fn into_service(self) -> Router<(), axum::body::Body> 516 | where 517 | F: Clone + Send + 'static, 518 | T: 'static, 519 | HandleError: 520 | Service, 521 | as Service>::Future: Send, 522 | { 523 | let request_id_header = HeaderName::from_bytes(self.config.request_id_header.as_bytes()) 524 | .unwrap_or_else(|_| panic!("Invalid request id: {:?}", self.config.request_id_header)); 525 | 526 | let metrics_layer = if self.config.serve_health_and_metrics { 527 | Either::A(axum::middleware::from_fn(track_metrics)) 528 | } else { 529 | Either::B(Identity::new()) 530 | }; 531 | 532 | self.router 533 | // these middleware are called for all routes 534 | .layer( 535 | ServiceBuilder::new() 536 | .add_extension(TimeoutSec(self.config.timeout_sec)) 537 | .propagate_request_id(request_id_header.clone()) 538 | .map_request_body(body::boxed) 539 | .layer(HandleErrorLayer::new(self.error_handler)) 540 | .timeout(Duration::from_secs(self.config.timeout_sec)), 541 | ) 542 | // these middleware are _only_ called for known routes 543 | .route_layer( 544 | ServiceBuilder::new() 545 | .layer(trace::layer()) 546 | .layer(metrics_layer), 547 | ) 548 | .layer(ServiceBuilder::new().set_request_id(request_id_header, MakeRequestUuid)) 549 | .layer(SetSensitiveHeadersLayer::from_shared(Arc::new( 550 | DEFAULT_SENSITIVE_HEADERS, 551 | ))) 552 | } 553 | } 554 | 555 | /// The type of service that produces the errors `Server.error_handler` will receive 556 | type FallibleService = Timeout>; 557 | 558 | type Callback = Box; 559 | 560 | /// Run a second HTTP server that exposes metrics and health checks. 561 | async fn expose_metrics_and_health( 562 | metrics_health_port: u16, 563 | metric_buckets: Option)>>, 564 | metric_setup_callback: Option, 565 | health_check: H, 566 | graceful_shutdown: bool, 567 | ) where 568 | H: HealthCheck + Clone, 569 | { 570 | const EXPONENTIAL_SECONDS: &[f64] = &[ 571 | 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 572 | ]; 573 | 574 | let mut recorder_builder = PrometheusBuilder::new() 575 | .set_buckets_for_metric( 576 | Matcher::Full("http_requests_duration_seconds".to_string()), 577 | EXPONENTIAL_SECONDS, 578 | ) 579 | .expect("Setting exponential seconds bucket failed"); 580 | 581 | for (matcher, values) in metric_buckets.into_iter().flatten() { 582 | if values.is_empty() { 583 | error!("Can not set empty bucket values for metrics recorder."); 584 | continue; 585 | } 586 | 587 | recorder_builder = recorder_builder 588 | .set_buckets_for_metric(matcher, &values) 589 | .unwrap(); // save because no empty buckets. 590 | } 591 | 592 | let recorder = recorder_builder.build_recorder(); 593 | 594 | let recorder_handle = recorder.handle(); 595 | 596 | ::metrics::set_boxed_recorder(Box::new(recorder)).expect("failed to set metrics recorder"); 597 | 598 | if let Some(cb) = metric_setup_callback { 599 | cb(); 600 | } 601 | 602 | #[cfg(any(target_os = "windows", target_os = "android"))] 603 | let router = Router::new(); 604 | 605 | #[cfg(all(not(target_os = "windows"), not(target_os = "android")))] 606 | let router = Router::new().route( 607 | "/debug/pprof/profile", 608 | get(|Query(params): Query| async move { 609 | use pprof::protos::Message; 610 | 611 | let profile = crate::pprof::collect_profile(Duration::from_secs(params.seconds)) 612 | .await 613 | .map_err(|err| { 614 | tracing::error!("Failed collecting profile: {:?}", err); 615 | ( 616 | StatusCode::INTERNAL_SERVER_ERROR, 617 | "Failed collecting profile", 618 | ) 619 | })?; 620 | 621 | let mut body = Vec::new(); 622 | profile.encode(&mut body).map_err(|err| { 623 | tracing::error!("Failed encoding profile: {:?}", err); 624 | (StatusCode::INTERNAL_SERVER_ERROR, "Failed encoding profile") 625 | })?; 626 | 627 | Ok::<_, (StatusCode, &str)>((StatusCode::OK, body)) 628 | }), 629 | ); 630 | 631 | let router = 632 | router 633 | .route( 634 | "/metrics", 635 | get(|recorder_handle: Extension| async move { 636 | recorder_handle.render() 637 | }), 638 | ) 639 | .route( 640 | "/health/live", 641 | get(|Extension(health_check): Extension| async move { 642 | if let Err(err) = health_check.is_live().await { 643 | let err = error_display_chain(&*err); 644 | tracing::error!("readiness heath check failed: {}", err); 645 | Err((StatusCode::SERVICE_UNAVAILABLE, err)) 646 | } else { 647 | Ok(()) 648 | } 649 | }), 650 | ) 651 | .route( 652 | "/health/ready", 653 | get(|Extension(health_check): Extension| async move { 654 | if let Err(err) = health_check.is_ready().await { 655 | let err = error_display_chain(&*err); 656 | tracing::error!("liveness heath check failed: {}", err); 657 | Err((StatusCode::SERVICE_UNAVAILABLE, err)) 658 | } else { 659 | Ok(()) 660 | } 661 | }), 662 | ) 663 | .layer( 664 | ServiceBuilder::new() 665 | .add_extension(recorder_handle) 666 | .add_extension(health_check) 667 | .layer(tower_http::compression::CompressionLayer::new()), 668 | ); 669 | 670 | let bind_address = SocketAddr::from((std::net::Ipv4Addr::UNSPECIFIED, metrics_health_port)); 671 | 672 | tracing::debug!("metrics and health server listening on {}", bind_address); 673 | 674 | let server = hyper::Server::bind(&bind_address).serve(router.into_make_service()); 675 | 676 | if graceful_shutdown { 677 | server 678 | .with_graceful_shutdown(signal_listener()) 679 | .await 680 | .unwrap(); 681 | } else { 682 | server.await.unwrap(); 683 | } 684 | } 685 | 686 | #[cfg(target_family = "unix")] 687 | async fn signal_listener() { 688 | use tokio::signal::unix::SignalKind; 689 | let mut sigterm = tokio::signal::unix::signal(SignalKind::terminate()) 690 | .expect("Failed to listen on SIGTERM signal"); 691 | tokio::select! { 692 | _ = sigterm.recv() => { 693 | tracing::info!("SIGTERM received, shutting down server"); 694 | } 695 | _ = tokio::signal::ctrl_c() => { 696 | tracing::info!("Ctrl-c received, shutting down server"); 697 | } 698 | } 699 | } 700 | 701 | #[cfg(not(target_family = "unix"))] 702 | async fn signal_listener() { 703 | tokio::signal::ctrl_c() 704 | .await 705 | .expect("Failed to listen for Ctrl-c signal"); 706 | tracing::info!("Ctrl-c received, shutting down server"); 707 | } 708 | 709 | /// Convert a [`tonic`] service into a [`Router`]. 710 | /// 711 | /// This can be useful for composing a number of services and adding middleware to them. 712 | #[cfg(feature = "tonic")] 713 | pub fn router_from_tonic(service: S) -> Router<(), BoxBody> 714 | where 715 | S: Service, Error = Infallible> 716 | + tonic::transport::NamedService 717 | + Clone 718 | + Send 719 | + 'static, 720 | S::Future: Send, 721 | B: http_body::Body + Send + 'static, 722 | B::Error: Into, 723 | { 724 | let svc = ServiceBuilder::new() 725 | .map_response_body(body::boxed) 726 | .service(service); 727 | Router::new().route_service(&format!("/{}/*rest", S::NAME), svc) 728 | } 729 | -------------------------------------------------------------------------------- /tests/integration_tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "integration_test" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | [dependencies] 8 | prost = "0.11" 9 | server-framework = { path = "../../" } 10 | tokio = { version = "1.14", features = ["full"] } 11 | tonic = "0.9.1" 12 | reqwest = "0.11.10" 13 | 14 | [build-dependencies] 15 | tonic-build = "0.9.1" 16 | -------------------------------------------------------------------------------- /tests/integration_tests/build.rs: -------------------------------------------------------------------------------- 1 | fn main() -> Result<(), Box> { 2 | tonic_build::compile_protos("proto/helloworld.proto")?; 3 | Ok(()) 4 | } 5 | -------------------------------------------------------------------------------- /tests/integration_tests/proto/helloworld.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package helloworld; 4 | 5 | service Greeter { 6 | rpc SayHello (HelloRequest) returns (HelloReply); 7 | } 8 | 9 | message HelloRequest { 10 | string name = 1; 11 | } 12 | 13 | message HelloReply { 14 | string message = 1; 15 | } 16 | -------------------------------------------------------------------------------- /tests/integration_tests/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Tonic doesn't derive Eq on generated code 2 | #![allow(clippy::derive_partial_eq_without_eq)] 3 | mod hello_world { 4 | tonic::include_proto!("helloworld"); 5 | } 6 | 7 | #[cfg(test)] 8 | mod timeout; 9 | -------------------------------------------------------------------------------- /tests/integration_tests/src/timeout.rs: -------------------------------------------------------------------------------- 1 | use crate::hello_world::{ 2 | greeter_client::GreeterClient, 3 | greeter_server::{Greeter, GreeterServer}, 4 | HelloReply, HelloRequest, 5 | }; 6 | use server_framework::{axum::routing::get, Config, Router, Server}; 7 | use std::time::Duration; 8 | use tokio::net::TcpSocket; 9 | use tonic::transport::Uri; 10 | use tonic::{transport::Channel, Status}; 11 | 12 | #[derive(Clone)] 13 | pub struct MyGreeter; 14 | 15 | #[tonic::async_trait] 16 | impl Greeter for MyGreeter { 17 | async fn say_hello( 18 | &self, 19 | request: tonic::Request, 20 | ) -> Result, tonic::Status> { 21 | let reply = crate::hello_world::HelloReply { 22 | message: format!("Hello {}!", request.into_inner().name), 23 | }; 24 | 25 | // Sleep to trigger the server side timeout 26 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 27 | 28 | Ok(tonic::Response::new(reply)) 29 | } 30 | } 31 | 32 | #[tokio::test(flavor = "multi_thread")] 33 | async fn grpc_timeout_response() { 34 | let mut config = Config::default(); 35 | config.timeout_sec = 1; 36 | config.bind_address = "0.0.0.0:8080".parse().unwrap(); 37 | config.serve_health_and_metrics = false; 38 | 39 | let socket = TcpSocket::new_v4().unwrap(); 40 | socket.bind(config.bind_address).unwrap(); 41 | socket.set_reuseport(true).unwrap(); 42 | socket.set_reuseaddr(true).unwrap(); 43 | 44 | let bind_address = config.bind_address; 45 | 46 | let task = tokio::spawn(async move { 47 | Server::new(config) 48 | .with_tonic(GreeterServer::new(MyGreeter)) 49 | .always_live_and_ready() 50 | .serve_with_listener(socket.listen(1024).unwrap()) 51 | .await 52 | .expect("server failed to start"); 53 | }); 54 | 55 | let channel = backoff_try_connect(format!("http://{}", bind_address).parse().unwrap()) 56 | .await 57 | .unwrap(); 58 | let mut client = GreeterClient::new(channel); 59 | 60 | let request = tonic::Request::new(HelloRequest { 61 | name: "test".into(), 62 | }); 63 | 64 | // Will timeout 65 | let status = client.say_hello(request).await.unwrap_err(); 66 | 67 | let expected = Status::deadline_exceeded("request timed out"); 68 | 69 | assert_eq!(status.code(), expected.code()); 70 | assert_eq!(status.message(), expected.message()); 71 | 72 | task.abort(); 73 | } 74 | 75 | async fn backoff_try_connect(addr: Uri) -> Result { 76 | let mut last_err = None; 77 | for i in 0..3 { 78 | match Channel::builder(addr.clone()).connect().await { 79 | Ok(channel) => return Ok(channel), 80 | Err(e) => { 81 | last_err = Some(e); 82 | let wait = 50u64 * 2u64.pow(i); 83 | tokio::time::sleep(Duration::from_millis(wait)).await; 84 | } 85 | } 86 | } 87 | Err(format!( 88 | "Failed to create channel in 3 attempts, last err: {last_err:?}" 89 | )) 90 | } 91 | 92 | #[tokio::test(flavor = "multi_thread")] 93 | async fn http_timeout_response() { 94 | let mut config = Config::default(); 95 | config.timeout_sec = 1; 96 | 97 | config.bind_address = "0.0.0.0:8082".parse().unwrap(); 98 | 99 | let socket = TcpSocket::new_v4().unwrap(); 100 | socket.bind(config.bind_address).unwrap(); 101 | socket.set_reuseport(true).unwrap(); 102 | socket.set_reuseaddr(true).unwrap(); 103 | 104 | let bind_address = config.bind_address; 105 | 106 | let routes = Router::new().route( 107 | "/", 108 | get(|| async { 109 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 110 | "Hello, World!" 111 | }), 112 | ); 113 | 114 | let task = tokio::spawn(async move { 115 | Server::new(config) 116 | .with(routes) 117 | .always_live_and_ready() 118 | .serve_with_listener(socket.listen(1024).unwrap()) 119 | .await 120 | .expect("server failed to start"); 121 | }); 122 | 123 | // Will timeout 124 | let response = reqwest::get(format!("http://{}", bind_address)) 125 | .await 126 | .unwrap(); 127 | 128 | assert_eq!(response.status(), reqwest::StatusCode::REQUEST_TIMEOUT); 129 | 130 | assert_eq!( 131 | response.text().await.unwrap(), 132 | "request timed out".to_string() 133 | ); 134 | 135 | task.abort(); 136 | } 137 | --------------------------------------------------------------------------------