├── .github ├── dependabot.yml └── workflows │ ├── ci-version.yml │ └── ci.yml ├── .gitignore ├── Cargo.toml ├── Makefile ├── README.md ├── rustfmt.toml └── src ├── cli.rs ├── functions.rs ├── lib.rs ├── main.rs ├── server.rs └── uds_serve.rs /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" -------------------------------------------------------------------------------- /.github/workflows/ci-version.yml: -------------------------------------------------------------------------------- 1 | name: CI-version 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | 8 | env: 9 | CARGO_TERM_COLOR: always 10 | 11 | jobs: 12 | tests: 13 | strategy: 14 | fail-fast: false 15 | matrix: 16 | os: 17 | - ubuntu-latest 18 | toolchain: 19 | - stable 20 | - nightly 21 | target: 22 | - x86_64-unknown-linux-gnu 23 | - x86_64-unknown-linux-musl 24 | features: 25 | - 26 | - --no-default-features 27 | name: Test ${{ matrix.toolchain }} on ${{ matrix.os }} to ${{ matrix.target }} (${{ matrix.features }}) 28 | runs-on: ${{ matrix.os }} 29 | steps: 30 | - uses: actions/checkout@v4 31 | - uses: actions-rust-lang/setup-rust-toolchain@v1 32 | with: 33 | toolchain: ${{ matrix.toolchain }} 34 | target: ${{ matrix.target }} 35 | - run: cargo test --release --target ${{ matrix.target }} ${{ matrix.features }} 36 | - run: cargo doc --release --target ${{ matrix.target }} ${{ matrix.features }} 37 | 38 | MSRV: 39 | strategy: 40 | fail-fast: false 41 | matrix: 42 | os: 43 | - ubuntu-latest 44 | toolchain: 45 | - "1.70" 46 | target: 47 | - x86_64-unknown-linux-gnu 48 | - x86_64-unknown-linux-musl 49 | features: 50 | - 51 | - --no-default-features 52 | name: Test ${{ matrix.toolchain }} on ${{ matrix.os }} to ${{ matrix.target }} (${{ matrix.features }}) 53 | runs-on: ${{ matrix.os }} 54 | steps: 55 | - uses: actions/checkout@v4 56 | - uses: actions-rust-lang/setup-rust-toolchain@v1 57 | with: 58 | toolchain: ${{ matrix.toolchain }} 59 | target: ${{ matrix.target }} 60 | - run: cargo test --release --lib --bins --target ${{ matrix.target }} ${{ matrix.features }} -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: [ push, pull_request ] 4 | 5 | env: 6 | CARGO_TERM_COLOR: always 7 | 8 | jobs: 9 | rustfmt: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: actions-rust-lang/setup-rust-toolchain@v1 14 | with: 15 | toolchain: nightly 16 | components: rustfmt 17 | - uses: actions-rust-lang/rustfmt@v1 18 | 19 | clippy: 20 | runs-on: ubuntu-latest 21 | steps: 22 | - uses: actions/checkout@v4 23 | - uses: actions-rust-lang/setup-rust-toolchain@v1 24 | with: 25 | components: clippy 26 | - run: cargo clippy --all-targets --all-features -- -D warnings 27 | 28 | tests: 29 | strategy: 30 | fail-fast: false 31 | matrix: 32 | os: 33 | - ubuntu-latest 34 | toolchain: 35 | - stable 36 | - nightly 37 | target: 38 | - x86_64-unknown-linux-gnu 39 | - x86_64-unknown-linux-musl 40 | features: 41 | - 42 | - --no-default-features 43 | name: Test ${{ matrix.toolchain }} on ${{ matrix.os }} to ${{ matrix.target }} (${{ matrix.features }}) 44 | runs-on: ${{ matrix.os }} 45 | steps: 46 | - uses: actions/checkout@v4 47 | - uses: actions-rust-lang/setup-rust-toolchain@v1 48 | with: 49 | toolchain: ${{ matrix.toolchain }} 50 | target: ${{ matrix.target }} 51 | - run: cargo test --target ${{ matrix.target }} ${{ matrix.features }} 52 | - run: cargo doc --target ${{ matrix.target }} ${{ matrix.features }} 53 | 54 | MSRV: 55 | strategy: 56 | fail-fast: false 57 | matrix: 58 | os: 59 | - ubuntu-latest 60 | toolchain: 61 | - "1.70" 62 | target: 63 | - x86_64-unknown-linux-gnu 64 | - x86_64-unknown-linux-musl 65 | features: 66 | - 67 | - --no-default-features 68 | name: Test ${{ matrix.toolchain }} on ${{ matrix.os }} to ${{ matrix.target }} (${{ matrix.features }}) 69 | runs-on: ${{ matrix.os }} 70 | steps: 71 | - uses: actions/checkout@v4 72 | - uses: actions-rust-lang/setup-rust-toolchain@v1 73 | with: 74 | toolchain: ${{ matrix.toolchain }} 75 | target: ${{ matrix.target }} 76 | - run: cargo test --lib --bins --target ${{ matrix.target }} ${{ matrix.features }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.gitignore.io/api/intellij+all 2 | 3 | ### Intellij+all ### 4 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 5 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 6 | 7 | # User-specific stuff 8 | .idea/**/workspace.xml 9 | .idea/**/tasks.xml 10 | .idea/**/usage.statistics.xml 11 | .idea/**/dictionaries 12 | .idea/**/shelf 13 | 14 | # Sensitive or high-churn files 15 | .idea/**/dataSources/ 16 | .idea/**/dataSources.ids 17 | .idea/**/dataSources.local.xml 18 | .idea/**/sqlDataSources.xml 19 | .idea/**/dynamic.xml 20 | .idea/**/uiDesigner.xml 21 | .idea/**/dbnavigator.xml 22 | 23 | # Gradle 24 | .idea/**/gradle.xml 25 | .idea/**/libraries 26 | 27 | # Gradle and Maven with auto-import 28 | # When using Gradle or Maven with auto-import, you should exclude module files, 29 | # since they will be recreated, and may cause churn. Uncomment if using 30 | # auto-import. 31 | # .idea/modules.xml 32 | # .idea/*.iml 33 | # .idea/modules 34 | 35 | # CMake 36 | cmake-build-*/ 37 | 38 | # Mongo Explorer plugin 39 | .idea/**/mongoSettings.xml 40 | 41 | # File-based project format 42 | *.iws 43 | 44 | # IntelliJ 45 | out/ 46 | 47 | # mpeltonen/sbt-idea plugin 48 | .idea_modules/ 49 | 50 | # JIRA plugin 51 | atlassian-ide-plugin.xml 52 | 53 | # Cursive Clojure plugin 54 | .idea/replstate.xml 55 | 56 | # Crashlytics plugin (for Android Studio and IntelliJ) 57 | com_crashlytics_export_strings.xml 58 | crashlytics.properties 59 | crashlytics-build.properties 60 | fabric.properties 61 | 62 | # Editor-based Rest Client 63 | .idea/httpRequests 64 | 65 | ### Intellij+all Patch ### 66 | # Ignores the whole .idea folder and all .iml files 67 | # See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 68 | 69 | .idea/ 70 | 71 | # Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 72 | 73 | *.iml 74 | modules.xml 75 | .idea/misc.xml 76 | *.ipr 77 | 78 | 79 | # End of https://www.gitignore.io/api/intellij+all 80 | 81 | 82 | ### Rust ### 83 | # Generated by Cargo 84 | # will have compiled files and executables 85 | /target/ 86 | 87 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 88 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 89 | Cargo.lock 90 | 91 | # These are backup files generated by rustfmt 92 | **/*.rs.bk 93 | 94 | 95 | # End of https://www.gitignore.io/api/rust 96 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nginx-cache-purge" 3 | version = "0.4.4" 4 | authors = ["Magic Len "] 5 | edition = "2021" 6 | rust-version = "1.70" 7 | repository = "https://github.com/magiclen/nginx-cache-purge" 8 | homepage = "https://magiclen.org/nginx-cache-purge" 9 | keywords = ["nginx", "cache", "clear", "purge", "delete"] 10 | categories = ["command-line-utilities"] 11 | description = "An alternative way to do `proxy_cache_purge` or `fastcgi_cache_purge` for Nginx." 12 | license = "MIT" 13 | include = ["src/**/*", "Cargo.toml", "README.md", "LICENSE"] 14 | 15 | [profile.release] 16 | lto = true 17 | codegen-units = 1 18 | panic = "abort" 19 | strip = true 20 | 21 | [dependencies] 22 | clap = { version = "4", features = ["derive"] } 23 | concat-with = "0.2" 24 | terminal_size = "0.3" 25 | 26 | anyhow = "1" 27 | 28 | md-5 = "0.10" 29 | scanner-rust = "2" 30 | 31 | tokio = { version = "1", features = ["full"] } 32 | async-recursion = "1" 33 | 34 | serde = { version = "1", features = ["derive"], optional = true } 35 | hyper = { version = "1", optional = true } 36 | hyper-util = { version = "0.1", features = ["server-auto"], optional = true } 37 | tower = { version = "0.4", optional = true } 38 | axum = { version = "0.7", optional = true } 39 | axum-extra = { version = "0.9", features = ["query"], optional = true } 40 | 41 | tracing = { version = "0.1", optional = true } 42 | enable-ansi-support = { version = "0.2", optional = true } 43 | tracing-subscriber = { version = "0.3", features = ["env-filter"], optional = true } 44 | tower-http = { version = "0.5", features = ["trace", "set-header"], optional = true } 45 | 46 | [features] 47 | default = ["service"] 48 | service = [ 49 | "serde", 50 | "hyper", 51 | "hyper-util", 52 | "tower", 53 | "axum", 54 | "axum-extra", 55 | "tracing", 56 | "enable-ansi-support", 57 | "tracing-subscriber", 58 | "tower-http" 59 | ] -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | EXECUTABLE_NAME := nginx-cache-purge 2 | 3 | all: ./target/x86_64-unknown-linux-musl/release/$(EXECUTABLE_NAME) 4 | 5 | ./target/x86_64-unknown-linux-musl/release/$(EXECUTABLE_NAME): $(shell find . -type f -iname '*.rs' -o -name 'Cargo.toml' | grep -v ./target | sed 's/ /\\ /g') 6 | cargo build --release --target x86_64-unknown-linux-musl 7 | 8 | install: 9 | $(MAKE) 10 | sudo cp ./target/x86_64-unknown-linux-musl/release/$(EXECUTABLE_NAME) /usr/local/bin/$(EXECUTABLE_NAME) 11 | sudo chown root: /usr/local/bin/$(EXECUTABLE_NAME) 12 | sudo chmod 0755 /usr/local/bin/$(EXECUTABLE_NAME) 13 | 14 | uninstall: 15 | sudo rm /usr/local/bin/$(EXECUTABLE_NAME) 16 | 17 | test: 18 | cargo test --verbose 19 | 20 | clean: 21 | cargo clean 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Nginx Cache Purge 2 | ==================== 3 | 4 | [![CI](https://github.com/magiclen/nginx-cache-purge/actions/workflows/ci.yml/badge.svg)](https://github.com/magiclen/nginx-cache-purge/actions/workflows/ci.yml) 5 | 6 | An alternative way to do `proxy_cache_purge` or `fastcgi_cache_purge` for Nginx. 7 | 8 | ## Usage 9 | 10 | ### Installation / Uninstallation 11 | 12 | From [crates.io](https://crates.io/crates/nginx-cache-purge), 13 | 14 | ```bash 15 | cargo install nginx-cache-purge 16 | 17 | # cargo uninstall nginx-cache-purge 18 | ``` 19 | 20 | From GitHub (Linux x86_64), 21 | 22 | ```bash 23 | curl -fL "$(curl -fsS https://api.github.com/repos/magiclen/nginx-cache-purge/releases/latest | sed -r -n 's/.*"browser_download_url": *"(.*\/nginx-cache-purge_'$(uname -m)')".*/\1/p')" -O && sudo mv nginx-cache-purge_$(uname -m) /usr/local/bin/nginx-cache-purge && sudo chmod +x /usr/local/bin/nginx-cache-purge 24 | 25 | # sudo rm /usr/local/bin/nginx-cache-purge 26 | ``` 27 | 28 | ### CLI Help 29 | 30 | ``` 31 | EXAMPLES: 32 | nginx-cache-purge p /path/to/cache 1:2 http/blog/ # Purge the cache with the key "http/blog/" in the "cache zone" whose "path" is /path/to/cache, "levels" is 1:2 33 | nginx-cache-purge p /path/to/cache 1:1:1 'http/blog*' # Purge the caches with the key which has "http/blog" as its prefix in the "cache zone" whose "path" is /path/to/cache, "levels" is 1:1:1 34 | nginx-cache-purge p /path/to/cache 2:1 '*/help*' # Purge the caches with the key which contains the substring "/help" in the "cache zone" whose "path" is /path/to/cache, "levels" is 2:1 35 | nginx-cache-purge p /path/to/cache 1 '*' # Purge all caches in the "cache zone" whose "path" is /path/to/cache, "levels" is 1 36 | nginx-cache-purge p /path/to/cache 2 '*' -e 'http/static/*' # Purge all caches except for those whose key starts with "http/static/" in the "cache zone" whose "path" is /path/to/cache, "levels" is 2 37 | nginx-cache-purge s # Start a server which listens on "/tmp/nginx-cache-purge.sock" to handle purge requests 38 | nginx-cache-purge s /run/nginx-cache-purge.sock # Start a server which listens on "/run/nginx-cache-purge.sock" to handle purge requests 39 | 40 | Usage: nginx-cache-purge 41 | 42 | Commands: 43 | purge Purge the cache immediately [aliases: p] 44 | start Start a server to handle purge requests [aliases: s] 45 | help Print this message or the help of the given subcommand(s) 46 | 47 | Options: 48 | -h, --help Print help 49 | -V, --version Print version 50 | ``` 51 | 52 | If the `purge` command successfully removes any cache, it returns the exit status **0**. If no cache needs to be removed, it returns the exit status **44**. 53 | 54 | ### Nginx + Nginx Cache Purge 55 | 56 | #### Start the Service of Nginx Cache Purge (systemd for example) 57 | 58 | Assume we have already put the executable file `nginx-cache-purge` in `/usr/local/bin/`. 59 | 60 | **/etc/systemd/system/nginx-cache-purge.service** 61 | 62 | ``` 63 | [Unit] 64 | Description=Nginx Cache Purge 65 | After=network.target 66 | 67 | [Service] 68 | # same as the user/group of the nginx process 69 | User=www-data 70 | Group=www-data 71 | 72 | ExecStart=/usr/local/bin/nginx-cache-purge start 73 | Restart=always 74 | RestartSec=3s 75 | 76 | [Install] 77 | WantedBy=multi-user.target 78 | ``` 79 | 80 | Run the following commands, 81 | 82 | ```bash 83 | sudo systemctl daemon-reload 84 | sudo systemctl start nginx-cache-purge 85 | sudo systemctl status nginx-cache-purge 86 | 87 | sudo systemctl enable nginx-cache-purge 88 | ``` 89 | 90 | #### Edit Nginx' Configuration File 91 | 92 | Assume we want to put the cache in `/tmp/cache`. 93 | 94 | ```nginx 95 | http { 96 | ... 97 | 98 | map $request_method $is_purge { 99 | default 0; 100 | PURGE 1; 101 | } 102 | 103 | proxy_cache_path /tmp/cache levels=1:2 keys_zone=my_cache:10m; 104 | proxy_cache_key $scheme$request_uri; 105 | 106 | server { 107 | ... 108 | 109 | location / { 110 | if ($is_purge) { 111 | set $my_cache_key $scheme$request_uri; 112 | 113 | proxy_pass http://unix:/tmp/nginx-cache-purge.sock; 114 | 115 | rewrite ^ /?cache_path=/tmp/cache&levels=1:2&key=$my_cache_key break; 116 | } 117 | 118 | proxy_cache my_cache; 119 | proxy_pass upstream; 120 | include proxy_params; 121 | } 122 | } 123 | } 124 | ``` 125 | 126 | Remember to add your access authentication mechanisms to prevent strangers from purging your cache. And note that the cache key should not contain `$proxy_host` because it will be empty when the request is in `proxy_pass http://unix:...`. 127 | 128 | After finishing the settings: 129 | 130 | * Request `PURGE /path/to/abc` to purge the cache from `GET /path/to/abc`. 131 | * Request `PURGE /path/to/*` to purge all caches from `GET /path/to/**/*`. 132 | * Request `PURGE /path/to/*/foo/*/bar` to purge caches from `GET /path/to/**/foo/**/bar`. 133 | 134 | If the service successfully removes any cache, it will respond the HTTP status code **200**. If no cache needs to be removed, it will respond the HTTP status code **202**. 135 | 136 | Other fields that can be set to the query of the `/` endpoint URL: 137 | 138 | * `remove_first`: Allow the exclusion of the prefix from the request path of the `key`. The format should be like `?remove_first=/purge`. 139 | * `exclude_keys` (can be more than one): Exclude those keys from the purging process. It also supports the use of wildcards. The format should be like `?exclude_keys=http/static/*&exclude_keys=http/1`. The `remove_first` field does not affect `exclude_keys` fields. 140 | 141 | ### No Service 142 | 143 | If we want to use `nginx-cache-purge` CLI with [lua-nginx-module](https://github.com/openresty/lua-nginx-module), instead of running the service in the background. 144 | 145 | We can choose to disable the default features to obtain a much smaller executable binary. 146 | 147 | ```bash 148 | cargo install nginx-cache-purge --no-default-features 149 | ``` 150 | 151 | ## License 152 | 153 | [MIT](LICENSE) -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # array_width = 60 2 | # attr_fn_like_width = 70 3 | binop_separator = "Front" 4 | blank_lines_lower_bound = 0 5 | blank_lines_upper_bound = 1 6 | brace_style = "PreferSameLine" 7 | # chain_width = 60 8 | color = "Auto" 9 | # comment_width = 100 10 | condense_wildcard_suffixes = true 11 | control_brace_style = "AlwaysSameLine" 12 | empty_item_single_line = true 13 | enum_discrim_align_threshold = 80 14 | error_on_line_overflow = false 15 | error_on_unformatted = false 16 | # fn_call_width = 60 17 | fn_params_layout = "Tall" 18 | fn_single_line = false 19 | force_explicit_abi = true 20 | force_multiline_blocks = false 21 | format_code_in_doc_comments = true 22 | doc_comment_code_block_width = 80 23 | format_generated_files = true 24 | format_macro_matchers = true 25 | format_macro_bodies = true 26 | skip_macro_invocations = [] 27 | format_strings = true 28 | hard_tabs = false 29 | hex_literal_case = "Upper" 30 | imports_indent = "Block" 31 | imports_layout = "Mixed" 32 | indent_style = "Block" 33 | inline_attribute_width = 0 34 | match_arm_blocks = true 35 | match_arm_leading_pipes = "Never" 36 | match_block_trailing_comma = true 37 | max_width = 100 38 | merge_derives = true 39 | imports_granularity = "Crate" 40 | newline_style = "Unix" 41 | normalize_comments = false 42 | normalize_doc_attributes = true 43 | overflow_delimited_expr = true 44 | remove_nested_parens = true 45 | reorder_impl_items = true 46 | reorder_imports = true 47 | group_imports = "StdExternalCrate" 48 | reorder_modules = true 49 | short_array_element_width_threshold = 10 50 | # single_line_if_else_max_width = 50 51 | space_after_colon = true 52 | space_before_colon = false 53 | spaces_around_ranges = false 54 | struct_field_align_threshold = 80 55 | struct_lit_single_line = false 56 | # struct_lit_width = 18 57 | # struct_variant_width = 35 58 | tab_spaces = 4 59 | trailing_comma = "Vertical" 60 | trailing_semicolon = true 61 | type_punctuation_density = "Wide" 62 | use_field_init_shorthand = true 63 | use_small_heuristics = "Max" 64 | use_try_shorthand = true 65 | where_single_line = false 66 | wrap_comments = false -------------------------------------------------------------------------------- /src/cli.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use clap::{CommandFactory, FromArgMatches, Parser, Subcommand}; 4 | use concat_with::concat_line; 5 | use terminal_size::terminal_size; 6 | 7 | const APP_NAME: &str = "Nginx Cache Purge"; 8 | const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); 9 | const CARGO_PKG_AUTHORS: &str = env!("CARGO_PKG_AUTHORS"); 10 | 11 | const AFTER_HELP: &str = "Enjoy it! https://magiclen.org"; 12 | 13 | const APP_ABOUT: &str = concat!( 14 | "An alternative way to do proxy_cache_purge or fastcgi_cache_purge for Nginx.\n\nEXAMPLES:\n", 15 | concat_line!(prefix "nginx-cache-purge ", 16 | "p /path/to/cache 1:2 http/blog/ # Purge the cache with the key \"http/blog/\" in the \"cache zone\" whose \"path\" is /path/to/cache, \"levels\" is 1:2", 17 | "p /path/to/cache 1:1:1 'http/blog*' # Purge the caches with the key which has \"http/blog\" as its prefix in the \"cache zone\" whose \"path\" is /path/to/cache, \"levels\" is 1:1:1", 18 | "p /path/to/cache 2:1 '*/help*' # Purge the caches with the key which contains the substring \"/help\" in the \"cache zone\" whose \"path\" is /path/to/cache, \"levels\" is 2:1", 19 | "p /path/to/cache 1 '*' # Purge all caches in the \"cache zone\" whose \"path\" is /path/to/cache, \"levels\" is 1", 20 | "p /path/to/cache 2 '*' -e 'http/static/*' # Purge all caches except for those whose key starts with \"http/static/\" in the \"cache zone\" whose \"path\" is /path/to/cache, \"levels\" is 2", 21 | "s # Start a server which listens on \"/tmp/nginx-cache-purge.sock\" to handle purge requests", 22 | "s /run/nginx-cache-purge.sock # Start a server which listens on \"/run/nginx-cache-purge.sock\" to handle purge requests", 23 | ) 24 | ); 25 | 26 | #[derive(Debug, Parser)] 27 | #[command(name = APP_NAME)] 28 | #[command(term_width = terminal_size().map(|(width, _)| width.0 as usize).unwrap_or(0))] 29 | #[command(version = CARGO_PKG_VERSION)] 30 | #[command(author = CARGO_PKG_AUTHORS)] 31 | #[command(after_help = AFTER_HELP)] 32 | pub struct CLIArgs { 33 | #[command(subcommand)] 34 | pub command: CLICommands, 35 | } 36 | 37 | #[derive(Debug, Subcommand)] 38 | pub enum CLICommands { 39 | #[command(visible_alias = "p")] 40 | #[command(about = "Purge the cache immediately")] 41 | #[command(after_help = AFTER_HELP)] 42 | Purge { 43 | #[arg(value_hint = clap::ValueHint::DirPath)] 44 | #[arg(help = "Assign the path set by proxy_cache_path or fastcgi_cache_path")] 45 | cache_path: PathBuf, 46 | 47 | #[arg(help = "Assign the levels set by proxy_cache_path or fastcgi_cache_path")] 48 | levels: String, 49 | 50 | #[arg(help = "Assign the key set by proxy_cache_key or fastcgi_cache_key")] 51 | key: String, 52 | 53 | #[arg(short, long, visible_alias = "exclude-key")] 54 | #[arg(num_args = 1..)] 55 | #[arg(help = "Assign the keys that should be excluded")] 56 | exclude_keys: Vec, 57 | }, 58 | #[cfg(feature = "service")] 59 | #[command(visible_alias = "s")] 60 | #[command(about = "Start a server to handle purge requests")] 61 | #[command(after_help = AFTER_HELP)] 62 | Start { 63 | #[arg(default_value = "/tmp/nginx-cache-purge.sock")] 64 | #[arg(value_hint = clap::ValueHint::FilePath)] 65 | socket_file_path: PathBuf, 66 | }, 67 | } 68 | 69 | pub fn get_args() -> CLIArgs { 70 | let args = CLIArgs::command(); 71 | 72 | let about = format!("{APP_NAME} {CARGO_PKG_VERSION}\n{CARGO_PKG_AUTHORS}\n{APP_ABOUT}"); 73 | 74 | let args = args.about(about); 75 | 76 | let matches = args.get_matches(); 77 | 78 | match CLIArgs::from_arg_matches(&matches) { 79 | Ok(args) => args, 80 | Err(err) => { 81 | err.exit(); 82 | }, 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/functions.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io, 3 | path::{Path, PathBuf}, 4 | sync::Arc, 5 | }; 6 | 7 | use anyhow::{anyhow, Context}; 8 | use async_recursion::async_recursion; 9 | use md5::{Digest, Md5}; 10 | use scanner_rust::{generic_array::typenum::U384, ScannerAscii}; 11 | use tokio::sync::Mutex; 12 | 13 | use crate::AppResult; 14 | 15 | #[inline] 16 | async fn remove_file>(path: P) -> io::Result<()> { 17 | let path = path.as_ref(); 18 | 19 | if cfg!(debug_assertions) { 20 | println!("Remove file: {path:?}"); 21 | 22 | Ok(()) 23 | } else { 24 | tokio::fs::remove_file(path).await 25 | } 26 | } 27 | 28 | #[inline] 29 | async fn remove_dir_all>(path: P) -> io::Result<()> { 30 | let path = path.as_ref(); 31 | 32 | if cfg!(debug_assertions) { 33 | println!("Remove dir all: {path:?}"); 34 | 35 | Ok(()) 36 | } else { 37 | tokio::fs::remove_dir_all(path).await 38 | } 39 | } 40 | 41 | #[inline] 42 | async fn remove_dir>(path: P) -> io::Result<()> { 43 | let path = path.as_ref(); 44 | 45 | if cfg!(debug_assertions) { 46 | println!("Remove dir: {path:?}"); 47 | } else { 48 | match tokio::fs::remove_dir(path).await { 49 | Ok(_) => (), 50 | Err(error) => { 51 | // check if the error is caused by directory is not empty 52 | // TODO we should just use `io::ErrorKind::DirectoryNotEmpty` in the future 53 | return if error.kind().to_string() == "directory not empty" { 54 | Err(io::Error::new(io::ErrorKind::Other, error)) 55 | } else { 56 | Err(error) 57 | }; 58 | }, 59 | } 60 | } 61 | 62 | Ok(()) 63 | } 64 | 65 | async fn remove_empty_ancestors>( 66 | path: P, 67 | relative_degree: usize, 68 | ) -> anyhow::Result<()> { 69 | if let Some(mut path) = path.as_ref().parent() { 70 | for _ in 1..=relative_degree { 71 | match remove_dir(path).await { 72 | Ok(_) => (), 73 | Err(error) 74 | if matches!(error.kind(), io::ErrorKind::NotFound | io::ErrorKind::Other) => 75 | { 76 | return Ok(()); 77 | }, 78 | Err(error) => return Err(error).with_context(|| anyhow!("{path:?}")), 79 | } 80 | 81 | match path.parent() { 82 | Some(parent) => { 83 | path = parent; 84 | }, 85 | None => break, 86 | } 87 | } 88 | } 89 | 90 | Ok(()) 91 | } 92 | 93 | /// Do something like `rm -rf /path/to/*`. The `/path/to` directory will not be deleted. This function may be dangerous. 94 | pub async fn remove_all_files_in_directory>(path: P) -> anyhow::Result { 95 | let mut result = false; 96 | 97 | let path = path.as_ref(); 98 | 99 | for dir_entry in path.read_dir().with_context(|| anyhow!("{path:?}"))? { 100 | let dir_entry = dir_entry.with_context(|| anyhow!("{path:?}"))?; 101 | 102 | let file_type = match dir_entry.file_type() { 103 | Ok(file_type) => file_type, 104 | Err(error) if error.kind() == io::ErrorKind::NotFound => continue, 105 | Err(error) => return Err(error).with_context(|| anyhow!("{dir_entry:?}")), 106 | }; 107 | 108 | let path = dir_entry.path(); 109 | 110 | if file_type.is_dir() { 111 | match remove_dir_all(&path).await { 112 | Ok(_) => result = true, 113 | Err(error) if error.kind() == io::ErrorKind::NotFound => { 114 | result = true; 115 | 116 | continue; 117 | }, 118 | Err(error) => return Err(error).with_context(|| anyhow!("{path:?}")), 119 | } 120 | } else { 121 | match remove_file(&path).await { 122 | Ok(_) => result = true, 123 | Err(error) if error.kind() == io::ErrorKind::NotFound => { 124 | result = true; 125 | 126 | continue; 127 | }, 128 | Err(error) => return Err(error).with_context(|| anyhow!("{path:?}")), 129 | } 130 | } 131 | } 132 | 133 | Ok(result) 134 | } 135 | 136 | /// Purge a cache with a specific key. 137 | pub async fn remove_one_cache, L: AsRef, K: AsRef, EK: AsRef>( 138 | cache_path: P, 139 | levels: L, 140 | key: K, 141 | exclude_keys: Vec, 142 | ) -> anyhow::Result { 143 | let levels = parse_levels(levels)?; 144 | let number_of_levels = levels.len(); 145 | 146 | let key = key.as_ref(); 147 | 148 | for exclude_key in exclude_keys { 149 | let exclude_key = exclude_key.as_ref(); 150 | 151 | if exclude_key.is_empty() && key.is_empty() { 152 | return Ok(AppResult::CacheIgnored); 153 | } 154 | 155 | let keys = parse_key(&exclude_key); 156 | 157 | if hit_key(key, &keys) { 158 | return Ok(AppResult::CacheIgnored); 159 | } 160 | } 161 | 162 | let file_path = create_cache_file_path(cache_path, levels, key); 163 | 164 | match remove_file(&file_path).await { 165 | Ok(_) => { 166 | remove_empty_ancestors(file_path, number_of_levels).await?; 167 | 168 | Ok(AppResult::Ok) 169 | }, 170 | Err(error) if error.kind() == io::ErrorKind::NotFound => { 171 | Ok(AppResult::AlreadyPurged(file_path)) 172 | }, 173 | Err(error) => Err(error).with_context(|| anyhow!("{file_path:?}")), 174 | } 175 | } 176 | 177 | /// Purge multiple caches via wildcard. 178 | pub async fn remove_caches_via_wildcard< 179 | P: AsRef, 180 | L: AsRef, 181 | K: AsRef, 182 | EK: AsRef, 183 | >( 184 | cache_path: P, 185 | levels: L, 186 | key: K, 187 | exclude_keys: Vec, 188 | ) -> anyhow::Result { 189 | #[async_recursion] 190 | async fn iterate( 191 | number_of_levels: usize, 192 | keys: Arc>>, 193 | exclude_key_keys: Arc>>>, 194 | exclude_paths: Arc>>, 195 | path: PathBuf, 196 | level: usize, 197 | ) -> anyhow::Result { 198 | let mut result = false; 199 | 200 | let mut tasks = Vec::new(); 201 | 202 | for dir_entry in path.read_dir().with_context(|| anyhow!("{path:?}"))? { 203 | let dir_entry = dir_entry.with_context(|| anyhow!("{path:?}"))?; 204 | 205 | let file_type = match dir_entry.file_type() { 206 | Ok(file_type) => file_type, 207 | Err(error) if error.kind() == io::ErrorKind::NotFound => continue, 208 | Err(error) => return Err(error).with_context(|| anyhow!("{dir_entry:?}")), 209 | }; 210 | 211 | if number_of_levels == level { 212 | if file_type.is_file() { 213 | let file_path = dir_entry.path(); 214 | 215 | { 216 | let mut exclude_paths = exclude_paths.lock().await; 217 | 218 | let exclude_paths_len = exclude_paths.len(); 219 | 220 | let mut i = 0; 221 | 222 | let file_path = file_path.as_path(); 223 | 224 | while i < exclude_paths_len { 225 | let exclude_path = &exclude_paths[i]; 226 | 227 | if exclude_path == file_path { 228 | break; 229 | } 230 | 231 | i += 1; 232 | } 233 | 234 | if i != exclude_paths_len { 235 | exclude_paths.remove(i); 236 | 237 | continue; 238 | } 239 | } 240 | 241 | tasks.push(tokio::spawn(match_key_and_remove_one_cache( 242 | number_of_levels, 243 | keys.clone(), 244 | exclude_key_keys.clone(), 245 | file_path, 246 | ))); 247 | } 248 | } else if file_type.is_dir() { 249 | tasks.push(tokio::spawn(iterate( 250 | number_of_levels, 251 | keys.clone(), 252 | exclude_key_keys.clone(), 253 | exclude_paths.clone(), 254 | dir_entry.path(), 255 | level + 1, 256 | ))); 257 | } 258 | } 259 | 260 | for task in tasks { 261 | result = task.await.unwrap()? || result; 262 | } 263 | 264 | Ok(result) 265 | } 266 | 267 | let cache_path = cache_path.as_ref(); 268 | 269 | let cache_path = match cache_path.canonicalize() { 270 | Ok(path) => { 271 | if !path.is_dir() { 272 | return Err(anyhow!("{cache_path:?} is not a directory.")); 273 | } 274 | 275 | path 276 | }, 277 | Err(error) if error.kind() == io::ErrorKind::NotFound => { 278 | return Ok(AppResult::AlreadyPurgedWildcard); 279 | }, 280 | Err(error) => return Err(error).with_context(|| anyhow!("{cache_path:?}")), 281 | }; 282 | 283 | let levels = parse_levels(levels)?; 284 | let number_of_levels = levels.len(); 285 | 286 | let mut exclude_key_keys: Vec>> = Vec::new(); 287 | let mut exclude_paths: Vec = Vec::new(); 288 | 289 | for exclude_key in exclude_keys { 290 | let exclude_key = exclude_key.as_ref(); 291 | 292 | if exclude_key.contains('*') { 293 | let keys: Vec> = 294 | parse_key(&exclude_key).into_iter().map(|v| v.to_vec()).collect(); 295 | 296 | if keys.len() == 1 && keys[0].is_empty() { 297 | return Ok(AppResult::AlreadyPurgedWildcard); 298 | } 299 | 300 | exclude_key_keys.push(keys); 301 | } else { 302 | let file_path = create_cache_file_path(cache_path.as_path(), &levels, exclude_key); 303 | 304 | exclude_paths.push(file_path); 305 | } 306 | } 307 | 308 | let keys = parse_key(&key); 309 | 310 | if keys.len() == 1 311 | && keys[0].is_empty() 312 | && exclude_key_keys.is_empty() 313 | && exclude_paths.is_empty() 314 | { 315 | return remove_all_files_in_directory(cache_path).await.map(|modified| { 316 | if modified { 317 | AppResult::Ok 318 | } else { 319 | AppResult::AlreadyPurgedWildcard 320 | } 321 | }); 322 | } 323 | 324 | let keys = keys.into_iter().map(|v| v.to_vec()).collect::>>(); 325 | 326 | iterate( 327 | number_of_levels, 328 | Arc::new(keys), 329 | Arc::new(exclude_key_keys), 330 | Arc::new(Mutex::new(exclude_paths)), 331 | cache_path, 332 | 0, 333 | ) 334 | .await 335 | .map(|modified| if modified { AppResult::Ok } else { AppResult::AlreadyPurgedWildcard }) 336 | } 337 | 338 | fn hit_key, K: AsRef<[u8]>>(read_key: RK, keys: &[K]) -> bool { 339 | let read_key = read_key.as_ref(); 340 | 341 | let mut p = 0; 342 | let mut i = 0; 343 | let read_key_len = read_key.len(); 344 | let keys_len = keys.len(); 345 | 346 | loop { 347 | let key = keys[i].as_ref(); 348 | let key_len = key.len(); 349 | 350 | if key_len == 0 { 351 | i += 1; 352 | 353 | if i == keys_len { 354 | break true; 355 | } 356 | 357 | let key = keys[i].as_ref(); 358 | let key_len = key.len(); 359 | debug_assert!(!key.is_empty()); 360 | 361 | match read_key[p..].windows(key_len).position(|window| window == key).map(|i| i + p) { 362 | Some(index) => { 363 | i += 1; 364 | 365 | if i == keys_len { 366 | break true; 367 | } 368 | 369 | p = index + key_len; 370 | }, 371 | None => { 372 | break false; 373 | }, 374 | } 375 | } else if read_key_len - p < key_len { 376 | break false; 377 | } else { 378 | let e = p + key_len; 379 | 380 | if &read_key[p..e] == key { 381 | i += 1; 382 | 383 | if i == keys_len { 384 | break true; 385 | } 386 | 387 | p = e; 388 | } else { 389 | break false; 390 | } 391 | } 392 | } 393 | } 394 | 395 | async fn match_key_and_remove_one_cache>( 396 | number_of_levels: usize, 397 | keys: Arc>>, 398 | exclude_key_keys: Arc>>>, 399 | file_path: P, 400 | ) -> anyhow::Result { 401 | let file_path = file_path.as_ref(); 402 | 403 | let mut sc: ScannerAscii<_, U384> = 404 | ScannerAscii::scan_path2(file_path).with_context(|| anyhow!("{file_path:?}"))?; 405 | 406 | // skip the header 407 | sc.drop_next_line().with_context(|| anyhow!("{file_path:?}"))?; 408 | 409 | // skip the label 410 | sc.drop_next_bytes("KEY: ".len()).with_context(|| anyhow!("{file_path:?}"))?; 411 | 412 | let read_key = sc 413 | .next_line_raw() 414 | .with_context(|| anyhow!("{file_path:?}"))? 415 | .ok_or(anyhow!("The content of {file_path:?} is incorrect."))?; 416 | 417 | // drop sc 418 | drop(sc); 419 | 420 | for exclude_key_key in exclude_key_keys.as_ref() { 421 | if hit_key(read_key.as_slice(), exclude_key_key) { 422 | return Ok(false); 423 | } 424 | } 425 | 426 | if hit_key(read_key, keys.as_ref()) { 427 | match remove_file(file_path).await { 428 | Ok(_) => (), 429 | Err(error) if error.kind() == io::ErrorKind::NotFound => (), 430 | Err(error) => return Err(error).with_context(|| anyhow!("{file_path:?}")), 431 | } 432 | 433 | remove_empty_ancestors(file_path, number_of_levels).await?; 434 | 435 | Ok(true) 436 | } else { 437 | Ok(false) 438 | } 439 | } 440 | 441 | fn parse_levels>(levels: L) -> anyhow::Result> { 442 | let levels: Vec<&str> = levels.as_ref().split(':').collect(); 443 | 444 | if levels.len() > 3 { 445 | Err(anyhow!("The number of hierarchy levels cannot be bigger than 3.")) 446 | } else { 447 | let number_of_levels = levels.len(); 448 | 449 | let mut levels_usize = Vec::with_capacity(number_of_levels); 450 | 451 | for level in levels { 452 | let level_usize = level 453 | .parse() 454 | .with_context(|| anyhow!("The value of levels should be a positive integer."))?; 455 | 456 | if !(1..=2).contains(&level_usize) { 457 | return Err(anyhow!("The value of levels should be 1 or 2.")); 458 | } 459 | 460 | levels_usize.push(level_usize); 461 | } 462 | 463 | Ok(levels_usize) 464 | } 465 | } 466 | 467 | fn parse_key>(key: &K) -> Vec<&[u8]> { 468 | let key = key.as_ref().as_bytes(); 469 | 470 | debug_assert!(!key.is_empty()); 471 | 472 | let mut v = Vec::new(); 473 | 474 | let mut p = 0; 475 | let key_len = key.len(); 476 | 477 | loop { 478 | match key[p..].iter().cloned().position(|u| u == b'*').map(|i| i + p) { 479 | Some(i) => { 480 | if i == p { 481 | // don't allow duplicated empty string to be added into Vec (when key is like foo**bar) 482 | if v.is_empty() { 483 | v.push([].as_slice()); 484 | } 485 | } else { 486 | v.push(&key[p..i]); 487 | v.push(&[]); 488 | } 489 | 490 | p = i + 1; 491 | 492 | if p >= key_len { 493 | break; 494 | } 495 | }, 496 | None => { 497 | v.push(&key[p..]); 498 | 499 | break; 500 | }, 501 | } 502 | } 503 | 504 | v 505 | } 506 | 507 | fn create_cache_file_path, L: AsRef<[usize]>, K: AsRef>( 508 | cache_path: P, 509 | levels: L, 510 | key: K, 511 | ) -> PathBuf { 512 | let mut hasher = Md5::new(); 513 | hasher.update(key.as_ref()); 514 | 515 | let key_md5_value = u128::from_be_bytes(hasher.finalize().into()); 516 | let hashed_key = format!("{:032x}", key_md5_value); 517 | 518 | let mut file_path = cache_path.as_ref().to_path_buf(); 519 | let mut p = 32; // md5's hex string length 520 | 521 | for level in levels.as_ref() { 522 | file_path.push(&hashed_key[(p - level)..p]); 523 | 524 | p -= level; 525 | } 526 | 527 | file_path.push(hashed_key); 528 | 529 | file_path 530 | } 531 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | # Nginx Cache Purge 3 | 4 | An alternative way to do `proxy_cache_purge` or `fastcgi_cache_purge` for Nginx. 5 | */ 6 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | mod cli; 2 | mod functions; 3 | #[cfg(feature = "service")] 4 | mod server; 5 | #[cfg(feature = "service")] 6 | mod uds_serve; 7 | 8 | use std::{ 9 | path::{Path, PathBuf}, 10 | process::{ExitCode, Termination}, 11 | }; 12 | 13 | use cli::*; 14 | #[cfg(feature = "service")] 15 | use server::*; 16 | use tokio::runtime; 17 | 18 | #[derive(Debug)] 19 | pub enum AppResult { 20 | Ok, 21 | AlreadyPurged(PathBuf), 22 | CacheIgnored, 23 | AlreadyPurgedWildcard, 24 | } 25 | 26 | impl From<()> for AppResult { 27 | #[inline] 28 | fn from(_: ()) -> Self { 29 | AppResult::Ok 30 | } 31 | } 32 | 33 | impl Termination for AppResult { 34 | #[inline] 35 | fn report(self) -> ExitCode { 36 | let exit_code = match self { 37 | AppResult::Ok => 0u8, 38 | AppResult::AlreadyPurged(file_path) => { 39 | eprintln!("Hint: {file_path:?} does not exist"); 40 | 41 | 44 42 | }, 43 | AppResult::CacheIgnored => { 44 | eprintln!("Warning: cache is excluded from being purged"); 45 | 46 | 44 47 | }, 48 | AppResult::AlreadyPurgedWildcard => 44, 49 | }; 50 | 51 | ExitCode::from(exit_code) 52 | } 53 | } 54 | 55 | #[inline] 56 | async fn purge, L: AsRef, K: AsRef, EK: AsRef>( 57 | cache_path: P, 58 | levels: L, 59 | key: K, 60 | exclude_keys: Vec, 61 | ) -> anyhow::Result { 62 | let cache_path = cache_path.as_ref(); 63 | let levels = levels.as_ref(); 64 | let key = key.as_ref(); 65 | 66 | if key.contains('*') { 67 | functions::remove_caches_via_wildcard(cache_path, levels, key, exclude_keys).await 68 | } else { 69 | functions::remove_one_cache(cache_path, levels, key, exclude_keys).await 70 | } 71 | } 72 | 73 | fn main() -> anyhow::Result { 74 | let args = get_args(); 75 | 76 | let runtime = runtime::Runtime::new()?; 77 | 78 | runtime.block_on(async move { 79 | match &args.command { 80 | CLICommands::Purge { 81 | cache_path, 82 | levels, 83 | key, 84 | exclude_keys, 85 | } => { 86 | purge( 87 | cache_path, 88 | levels, 89 | key, 90 | exclude_keys.iter().map(|s| s.as_str()).collect::>(), 91 | ) 92 | .await 93 | }, 94 | #[cfg(feature = "service")] 95 | CLICommands::Start { 96 | socket_file_path, 97 | } => server_main(socket_file_path.as_path()).await, 98 | } 99 | }) 100 | } 101 | -------------------------------------------------------------------------------- /src/server.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::Permissions, 3 | io, 4 | io::IsTerminal, 5 | os::unix::fs::{FileTypeExt, PermissionsExt}, 6 | path::{Path, PathBuf}, 7 | }; 8 | 9 | use anyhow::{anyhow, Context as AnyhowContext}; 10 | use axum::{ 11 | http::{header, HeaderValue, StatusCode}, 12 | response::IntoResponse, 13 | routing::any, 14 | Router, 15 | }; 16 | use axum_extra::extract::Query; 17 | use serde::Deserialize; 18 | use tokio::{fs, net::UnixListener}; 19 | use tower_http::{ 20 | set_header::SetResponseHeaderLayer, 21 | trace::{DefaultMakeSpan, DefaultOnRequest, DefaultOnResponse, TraceLayer}, 22 | }; 23 | use tracing::Level; 24 | use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; 25 | 26 | use crate::{purge, uds_serve::serve, AppResult}; 27 | 28 | #[derive(Debug, Deserialize)] 29 | #[serde(untagged)] 30 | enum OneOrManyString { 31 | One(String), 32 | Many(Vec), 33 | } 34 | 35 | impl From for Vec { 36 | #[inline] 37 | fn from(value: OneOrManyString) -> Self { 38 | match value { 39 | OneOrManyString::One(s) => vec![s], 40 | OneOrManyString::Many(v) => v, 41 | } 42 | } 43 | } 44 | 45 | #[derive(Debug, Deserialize)] 46 | struct Args { 47 | cache_path: PathBuf, 48 | levels: String, 49 | key: String, 50 | remove_first: Option, 51 | exclude_keys: Option, 52 | } 53 | 54 | async fn index_handler( 55 | Query(Args { 56 | cache_path, 57 | levels, 58 | mut key, 59 | remove_first, 60 | exclude_keys, 61 | }): Query, 62 | ) -> impl IntoResponse { 63 | if let Some(remove_first) = remove_first { 64 | if let Some(index) = key.find(remove_first.as_str()) { 65 | key.replace_range(index..index + remove_first.len(), ""); 66 | } 67 | } 68 | 69 | match purge(cache_path, levels, key, exclude_keys.map(|e| e.into()).unwrap_or_else(Vec::new)) 70 | .await 71 | { 72 | Ok(result) => match result { 73 | AppResult::Ok => (StatusCode::OK, "Ok.".to_string()), 74 | _ => (StatusCode::ACCEPTED, "No cache needs to be purged.".to_string()), 75 | }, 76 | Err(error) => (StatusCode::INTERNAL_SERVER_ERROR, format!("{error:?}")), 77 | } 78 | } 79 | 80 | fn create_app() -> Router { 81 | Router::new() 82 | .route("/", any(index_handler)) 83 | .layer(SetResponseHeaderLayer::overriding( 84 | header::CACHE_CONTROL, 85 | HeaderValue::from_static("no-store"), 86 | )) 87 | .layer( 88 | TraceLayer::new_for_http() 89 | .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) 90 | .on_request(DefaultOnRequest::new().level(Level::INFO)) 91 | .on_response(DefaultOnResponse::new().level(Level::INFO)), 92 | ) 93 | } 94 | 95 | pub async fn server_main(socket_file_path: &Path) -> anyhow::Result { 96 | let mut ansi_color = io::stdout().is_terminal(); 97 | 98 | if ansi_color && enable_ansi_support::enable_ansi_support().is_err() { 99 | ansi_color = false; 100 | } 101 | 102 | tracing_subscriber::registry() 103 | .with(tracing_subscriber::fmt::layer().with_ansi(ansi_color)) 104 | .with(EnvFilter::builder().with_default_directive(Level::INFO.into()).from_env_lossy()) 105 | .init(); 106 | 107 | let app = create_app(); 108 | 109 | let uds = { 110 | match fs::metadata(socket_file_path).await { 111 | Ok(metadata) => { 112 | if metadata.file_type().is_socket() { 113 | fs::remove_file(socket_file_path) 114 | .await 115 | .with_context(|| anyhow!("{socket_file_path:?}"))?; 116 | } else { 117 | return Err(anyhow!("{socket_file_path:?} exists but it is not a socket file")); 118 | } 119 | }, 120 | Err(error) if error.kind() == io::ErrorKind::NotFound => { 121 | // do nothing 122 | }, 123 | Err(error) => { 124 | return Err(error).with_context(|| anyhow!("{socket_file_path:?}")); 125 | }, 126 | } 127 | 128 | let uds = UnixListener::bind(socket_file_path) 129 | .with_context(|| anyhow!("{socket_file_path:?}"))?; 130 | 131 | fs::set_permissions(socket_file_path, Permissions::from_mode(0o777)) 132 | .await 133 | .with_context(|| anyhow!("{socket_file_path:?}"))?; 134 | 135 | uds 136 | }; 137 | 138 | tracing::info!("listening on {socket_file_path:?}"); 139 | serve(uds, app).await?; 140 | 141 | // let addr = "127.0.0.1:3000"; 142 | // let listener = tokio::net::TcpListener::bind(addr).await?; 143 | // tracing::info!("listening on http://{addr}"); 144 | // axum::serve(listener, app).await?; 145 | 146 | Ok(AppResult::Ok) 147 | } 148 | -------------------------------------------------------------------------------- /src/uds_serve.rs: -------------------------------------------------------------------------------- 1 | use axum::{extract::Request, Router}; 2 | use hyper::body::Incoming; 3 | use hyper_util::{ 4 | rt::{TokioExecutor, TokioIo}, 5 | server, 6 | }; 7 | use tokio::net::UnixListener; 8 | use tower::Service; 9 | 10 | pub(crate) async fn serve(uds: UnixListener, app: Router) -> anyhow::Result<()> { 11 | loop { 12 | let (socket, _remote_addr) = uds.accept().await?; 13 | 14 | let tower_service = app.clone(); 15 | 16 | tokio::spawn(async move { 17 | let socket = TokioIo::new(socket); 18 | 19 | let hyper_service = hyper::service::service_fn(move |request: Request| { 20 | tower_service.clone().call(request) 21 | }); 22 | 23 | if let Err(error) = server::conn::auto::Builder::new(TokioExecutor::new()) 24 | .serve_connection(socket, hyper_service) 25 | .await 26 | { 27 | eprintln!("failed to serve connection: {error:#}"); 28 | } 29 | }); 30 | } 31 | } 32 | --------------------------------------------------------------------------------