├── .docker ├── rabbitmq │ └── Dockerfile └── rust │ └── Dockerfile ├── .env.dist ├── .github ├── security.yml └── workflows │ ├── cargo_sort.yml │ ├── conventional_commits.yml │ └── rust.yml ├── .gitignore ├── .gitmodules ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── README.md ├── assets └── logo.jpeg ├── cli ├── Cargo.toml └── src │ └── main.rs ├── docker-compose.yml ├── examples ├── Cargo.toml └── src │ ├── destination-server.rs │ └── producer-server.rs ├── justfile ├── scripts └── init-db.sh ├── sdk ├── Cargo.toml └── src │ ├── application.rs │ ├── client.rs │ ├── endpoint.rs │ ├── error.rs │ ├── event.rs │ └── lib.rs └── server ├── Cargo.toml ├── migrations ├── 20240515163040_create_application_table.sql ├── 20240517110248_create_endpoint_table.sql ├── 20240523211340_create_events_table.sql ├── 20240526112326_create_message_table.sql └── 20240531074624_create_attempt_log_table.sql ├── server.http ├── src ├── amqp.rs ├── app.rs ├── bin │ ├── dispatcher.rs │ └── server.rs ├── circuit_breaker.rs ├── cmd.rs ├── config.rs ├── configuration │ ├── domain.rs │ ├── handlers.rs │ ├── mod.rs │ ├── models.rs │ └── storage.rs ├── dispatch_consumer.rs ├── error.rs ├── events │ ├── domain.rs │ ├── handlers.rs │ ├── mod.rs │ ├── models.rs │ └── storage.rs ├── handlers │ ├── health_check.rs │ └── mod.rs ├── lib.rs ├── logs.rs ├── retry.rs ├── routes.rs ├── sender.rs ├── storage.rs ├── tests.rs ├── time.rs └── types │ └── mod.rs └── tests └── api ├── common.rs ├── create_application.rs ├── create_endpoint.rs ├── create_event.rs ├── endpoint_status.rs ├── health_check.rs └── main.rs /.docker/rabbitmq/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG RABBITMQ_VERSION=3.13.0 2 | 3 | FROM rabbitmq:${RABBITMQ_VERSION}-management-alpine 4 | 5 | RUN apk update && apk add curl 6 | RUN curl -L https://github.com/rabbitmq/rabbitmq-delayed-message-exchange/releases/download/v$RABBITMQ_VERSION/rabbitmq_delayed_message_exchange-$RABBITMQ_VERSION.ez > $RABBITMQ_HOME/plugins/rabbitmq_delayed_message_exchange-$RABBITMQ_VERSION.ez 7 | RUN chown rabbitmq:rabbitmq /plugins/rabbitmq_delayed_message_exchange-$RABBITMQ_VERSION.ez 8 | RUN rabbitmq-plugins enable rabbitmq_delayed_message_exchange -------------------------------------------------------------------------------- /.docker/rust/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.78-alpine3.20 as builder 2 | 3 | RUN apk add --no-cache \ 4 | alpine-sdk \ 5 | libressl-dev \ 6 | bash 7 | 8 | RUN cargo install --version=0.7.4 sqlx-cli --no-default-features --features rustls,postgres 9 | RUN cargo install --version=0.1.48 cargo-udeps 10 | RUN cargo install --version=0.30.0 cargo-tarpaulin 11 | RUN cargo install --version=1.0.9 cargo-sort 12 | 13 | FROM rust:1.78-alpine3.20 14 | 15 | RUN apk update \ 16 | && apk upgrade --available \ 17 | && apk add --no-cache \ 18 | alpine-sdk \ 19 | libressl-dev \ 20 | bash \ 21 | gnupg 22 | 23 | RUN rm -rf /var/cache/apk/* 24 | 25 | COPY --from=builder /usr/local/cargo/bin/cargo-tarpaulin /usr/local/cargo/bin/cargo-tarpaulin 26 | COPY --from=builder /usr/local/cargo/bin/cargo-udeps /usr/local/cargo/bin/cargo-udeps 27 | COPY --from=builder /usr/local/cargo/bin/sqlx /usr/local/cargo/bin/sqlx 28 | COPY --from=builder /usr/local/cargo/bin/cargo-sort /usr/local/cargo/bin/cargo-sort 29 | 30 | RUN rustup component add clippy 31 | -------------------------------------------------------------------------------- /.env.dist: -------------------------------------------------------------------------------- 1 | ## SERVER ## 2 | SERVER_PORT=8090 3 | SERVER_HOST=localhost 4 | SERVER_URL=http://${SERVER_HOST}:${SERVER_PORT} 5 | 6 | ## POSTGRES ## 7 | POSTGRES_HOST=postgres 8 | POSTGRES_PORT=5432 9 | POSTGRES_USER=webhooks 10 | POSTGRES_PASSWORD=webhooks 11 | POSTGRES_DB=webhooks 12 | DATABASE_URL=postgres://webhooks:webhooks@postgres:5432/webhooks 13 | 14 | ## AMQP ## 15 | AMQP_HOST=rabbitmq 16 | AMQP_PORT=5672 17 | AMQP_USER=guest 18 | AMQP_PASSWORD=guest 19 | AMQP_SENT_MESSAGE_QUEUE=sent-message -------------------------------------------------------------------------------- /.github/security.yml: -------------------------------------------------------------------------------- 1 | name: Security audit 2 | on: 3 | schedule: 4 | - cron: '0 0 * * *' 5 | jobs: 6 | audit: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v3 10 | - uses: rustsec/audit-check@v1.4.1 11 | with: 12 | token: ${{ secrets.GITHUB_TOKEN }} 13 | -------------------------------------------------------------------------------- /.github/workflows/cargo_sort.yml: -------------------------------------------------------------------------------- 1 | name: Cargo sort 2 | on: 3 | pull_request: 4 | paths: 5 | - 'Cargo.toml' 6 | - '**/Cargo.toml' 7 | push: 8 | branches: 9 | - master 10 | paths: 11 | - 'Cargo.toml' 12 | - '**/Cargo.toml' 13 | jobs: 14 | cargo_sort: 15 | runs-on: ubuntu-latest 16 | container: 17 | image: ghcr.io/manhunto/webhooks-rs-dev:latest 18 | credentials: 19 | username: manhunto 20 | password: ${{ secrets.GHCR_TOKEN }} 21 | steps: 22 | - uses: actions/checkout@v4 23 | - name: Cache setup 24 | uses: Swatinem/rust-cache@v2 25 | - name: Run cargo sort 26 | run: cargo sort --workspace --check 27 | -------------------------------------------------------------------------------- /.github/workflows/conventional_commits.yml: -------------------------------------------------------------------------------- 1 | name: Conventional Commits 2 | on: 3 | push: 4 | branches: 5 | - master 6 | pull_request: 7 | jobs: 8 | build: 9 | name: Conventional Commits 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: webiny/action-conventional-commits@v1.3.0 14 | with: 15 | allowed-commit-types: "feat,fix,docs,style,refactor,test,build,perf,ci,chore,revert,merge,wip,ops" 16 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust checks 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | RUSTFLAGS: "-Dwarnings" 12 | NIGHTLY_VERSION: "nightly-2024-05-18" 13 | 14 | jobs: 15 | build: 16 | runs-on: ubuntu-latest 17 | container: 18 | image: ghcr.io/manhunto/webhooks-rs-dev:latest 19 | credentials: 20 | username: manhunto 21 | password: ${{ secrets.GHCR_TOKEN }} 22 | services: 23 | postgres: 24 | image: postgres:16.3-alpine3.20 25 | env: 26 | POSTGRES_USER: webhooks 27 | POSTGRES_PASSWORD: webhooks 28 | steps: 29 | - uses: actions/checkout@v4 30 | - name: Cache setup 31 | uses: Swatinem/rust-cache@v2 32 | - name: Init db 33 | run: ./scripts/init-db.sh 34 | - name: Build 35 | run: cargo build --all-targets --verbose 36 | tests: 37 | runs-on: ubuntu-latest 38 | container: 39 | image: ghcr.io/manhunto/webhooks-rs-dev:latest 40 | credentials: 41 | username: manhunto 42 | password: ${{ secrets.GHCR_TOKEN }} 43 | services: 44 | postgres: 45 | image: postgres:16.3-alpine3.20 46 | env: 47 | POSTGRES_USER: webhooks 48 | POSTGRES_PASSWORD: webhooks 49 | POSTGRES_DB: webhooks 50 | rabbitmq: 51 | image: ghcr.io/manhunto/webhooks-rs-rabbitmq:latest 52 | credentials: 53 | username: manhunto 54 | password: ${{ secrets.GHCR_TOKEN }} 55 | steps: 56 | - uses: actions/checkout@v4 57 | - name: Cache setup 58 | uses: Swatinem/rust-cache@v2 59 | - name: Init db 60 | run: ./scripts/init-db.sh 61 | - name: Run tests 62 | run: cargo test --workspace --verbose 63 | coverage: 64 | runs-on: ubuntu-latest 65 | container: 66 | image: ghcr.io/manhunto/webhooks-rs-dev:latest 67 | options: --security-opt seccomp=unconfined 68 | credentials: 69 | username: manhunto 70 | password: ${{ secrets.GHCR_TOKEN }} 71 | services: 72 | postgres: 73 | image: postgres:16.3-alpine3.20 74 | env: 75 | POSTGRES_USER: webhooks 76 | POSTGRES_PASSWORD: webhooks 77 | POSTGRES_DB: webhooks 78 | rabbitmq: 79 | image: ghcr.io/manhunto/webhooks-rs-rabbitmq:latest 80 | credentials: 81 | username: manhunto 82 | password: ${{ secrets.GHCR_TOKEN }} 83 | steps: 84 | - uses: actions/checkout@v4 85 | - name: Cache setup 86 | uses: Swatinem/rust-cache@v2 87 | - name: Init db 88 | run: ./scripts/init-db.sh 89 | - name: Install nightly toolchain 90 | uses: actions-rs/toolchain@v1 91 | with: 92 | toolchain: ${{ env.NIGHTLY_VERSION }} 93 | override: true 94 | - name: Generate code coverage 95 | run: cargo +${{ env.NIGHTLY_VERSION }} tarpaulin --verbose --all-features --workspace --ignore-tests --timeout 120 --out xml 96 | - name: Upload coverage reports to Codecov 97 | uses: codecov/codecov-action@v4.6.0 98 | with: 99 | fail_ci_if_error: true 100 | token: ${{ secrets.CODECOV_TOKEN }} 101 | verbose: true 102 | os: alpine 103 | - name: Archive code coverage results 104 | uses: actions/upload-artifact@v4 105 | with: 106 | name: code-coverage-report 107 | path: cobertura.xml 108 | clippy: 109 | runs-on: ubuntu-latest 110 | container: 111 | image: ghcr.io/manhunto/webhooks-rs-dev:latest 112 | credentials: 113 | username: manhunto 114 | password: ${{ secrets.GHCR_TOKEN }} 115 | services: 116 | postgres: 117 | image: postgres:16.3-alpine3.20 118 | env: 119 | POSTGRES_USER: webhooks 120 | POSTGRES_PASSWORD: webhooks 121 | POSTGRES_DB: webhooks 122 | steps: 123 | - uses: actions/checkout@v4 124 | - name: Cache setup 125 | uses: Swatinem/rust-cache@v2 126 | - name: Init db 127 | run: ./scripts/init-db.sh 128 | - name: Run Clippy 129 | run: cargo clippy --all-targets --all-features 130 | format: 131 | runs-on: ubuntu-latest 132 | steps: 133 | - uses: actions/checkout@v4 134 | - name: Run fmt 135 | run: cargo fmt --all --check -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /.idea 3 | .env 4 | /.sqlx 5 | build_rs_cov.profraw -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "docs"] 2 | path = docs 3 | url = git@github.com:manhunto/webhooks-rs.wiki.git 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = ["cli", "examples", "sdk", "server"] 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.78-alpine3.20 as builder 2 | 3 | RUN apk add --no-cache \ 4 | alpine-sdk \ 5 | libressl-dev \ 6 | bash 7 | 8 | RUN cargo install --version=0.7.4 sqlx-cli --no-default-features --features rustls,postgres 9 | RUN cargo install --version=0.1.48 cargo-udeps 10 | RUN cargo install --version=0.30.0 cargo-tarpaulin 11 | RUN cargo install --version=1.0.9 cargo-sort 12 | 13 | FROM rust:1.78-alpine3.20 14 | 15 | RUN apk update \ 16 | && apk upgrade --available \ 17 | && apk add --no-cache \ 18 | alpine-sdk \ 19 | libressl-dev \ 20 | bash \ 21 | ca-certificates \ 22 | gnupg 23 | 24 | RUN rm -rf /var/cache/apk/* 25 | 26 | COPY --from=builder /usr/local/cargo/bin/cargo-tarpaulin /usr/local/cargo/bin/cargo-tarpaulin 27 | COPY --from=builder /usr/local/cargo/bin/cargo-udeps /usr/local/cargo/bin/cargo-udeps 28 | COPY --from=builder /usr/local/cargo/bin/sqlx /usr/local/cargo/bin/sqlx 29 | COPY --from=builder /usr/local/cargo/bin/cargo-sort /usr/local/cargo/bin/cargo-sort 30 | 31 | RUN rustup component add clippy 32 | 33 | ADD .docker/cacert.pem /usr/local/share/ca-certificates/my.crt 34 | 35 | RUN update-ca-certificates -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Jakub Sładek 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > [!CAUTION] 2 | > The project is not finished, it is not stable and it is constantly being developed. 3 | 4 | # webhooks-rs 5 | 6 |
7 | 8 |
9 | 10 |

11 | 12 | build 13 | contributors 14 | commit-activity 15 | 16 |

17 | 18 | ## ℹ️ About 19 | 20 | **webhooks-rs** is a project for sending webhooks using the http protocol. The main goals and objectives are to create 21 | an application that is high-performing, configurable and scalable. 22 | 23 | > 24 | > \[!NOTE] 25 | > 26 | > This project takes part and was created thanks to the [100 Commits](https://100commitow.pl/) challenge and is my first 27 | > significant project written in Rust. 28 | 29 | ### MVP features 30 | 31 | - [x] Retry policy for failed messages 32 | - [x] Endpoint can be disabled manually 33 | - [x] Circuit breaker 34 | - [x] Persistence 35 | - [x] SDK Beta 36 | - [x] CLI Beta 37 | - [x] Documentation 38 | - [x] Integration tests 39 | - [x] Error handling and validation (as POC) 40 | 41 | ### Roadmap 42 | 43 | - [ ] Release sdk as crate and bins (with GitHub action) 44 | - [ ] Sem ver 45 | - [ ] Rate-limit 46 | - [ ] Auth 47 | - [ ] Signed webhooks - server can verify that message was sent from valid server 48 | - [ ] Distributed architecture 49 | - [ ] Data retention 50 | - [ ] Logging and monitoring 51 | - [ ] Dockerized 52 | 53 | ## 📚 Domain explanation 54 | 55 | **Application** - Is a container that groups endpoints. In a multi-tenant architecture, it can be a separate tenant. 56 | Each application can have a separate configuration and secrets (in progress...). 57 | 58 | **Endpoint** - This is the url of the server to which messages are sent. Each endpoint can be deactivated individually - 59 | either manually or automatically by the circuit breaker. Endpoint can be only in one application. 60 | 61 | **Event** - This is an event that originated in your system. The event has a topic and a payload. For now, it only 62 | supports JSON payload. 63 | 64 | **Message** - In a nutshell, it can be said to be an event for a given endpoint. A given event can be distributed to 65 | several endpoints. 66 | 67 | **Attempt** - This is a log of attempts to deliver a particular message. A given message may have multiple delivery 68 | attempts (e.g. endpoint is temporarily unavailable and message had to be retried by retry policy). 69 | 70 | ## ⚙️ How to use? 71 | 72 | ### Server 73 | 74 | Before run environment by using `just init`. This command run a docker and execute migrations. Server is split into two 75 | parts - server and dispatcher. Run `just rs` and `just rd`. 76 | 77 | Server has rest api interface. Example commands you can find in `server/server.http`. Please familiarise oneself 78 | with [Domain Explanation](#domain-explanation) 79 | 80 | ### SDK 81 | 82 | > \[!IMPORTANT] 83 | > 84 | > SKD requires running server and dispatcher. See [Server](#server) section. 85 | 86 | You can find an example of the use of the sdk in the [examples/src/producer-server.rs](examples/src/producer-server.rs) 87 | 88 | ### Cli 89 | 90 | > \[!IMPORTANT] 91 | > 92 | > Cli requires running server and dispatcher. See [Server](#server) section. 93 | 94 | To explore all possibilities run `cargo run --package=cli`. Cli is divided by resources sections. 95 | 96 | #### Create application 97 | 98 | ```shell 99 | $ cargo run --package=cli application create "example application" 100 | App app_2hV5JuBgjMAQlDNNbepHTFnkicy with name 'example application' has been created 101 | ``` 102 | 103 | #### Create endpoint 104 | 105 | To create an endpoint in a recently created application 106 | 107 | ```shell 108 | $ cargo run --package=cli endpoint create app_2hV5JuBgjMAQlDNNbepHTFnkicy http://localhost:8090/ contact.created,contact.updated 109 | Endpoint ep_2hV67JEIXUvFCN4bv43TUXVmX0s has been created 110 | ``` 111 | 112 | #### Create event 113 | 114 | ```shell 115 | $ cargo run --package=cli event create app_2hV5JuBgjMAQlDNNbepHTFnkicy contact.created '{"foo":"bar"}' 116 | Event evt_2hV6UoIY9p6YnLmiawSvh4nh4Uf has been created 117 | ``` 118 | 119 | ## 👨‍💻 Development 120 | 121 | ### Prerequisites 122 | 123 | - **[just](https://github.com/casey/just)** - optional, if you want to run raw commands 124 | - **[docker with docker-compose](https://www.docker.com/products/docker-desktop/)** - optional, if you want to set up 125 | the environment on your own 126 | 127 | ### Troubleshoots 128 | 129 | #### 1. "Too many open files" during running tests 130 | 131 | ``` 132 | called `Result::unwrap()` on an `Err` value: Os { code: 24, kind: Uncategorized, message: "Too many open files" } 133 | ``` 134 | 135 | Execute (on linux/mac os) `ulimit -n 10000` (default is 1024) 136 | 137 | ## 🤝 Contribution 138 | 139 | If you want to contribute to the growth of this project, please follow 140 | the [conventional commits](https://www.conventionalcommits.org/) in your pull requests. 141 | -------------------------------------------------------------------------------- /assets/logo.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manhunto/webhooks-rs/ad94b8b927ef9a6acab11969c3025c74538b8a5f/assets/logo.jpeg -------------------------------------------------------------------------------- /cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cli" 3 | version = "0.1.0" 4 | edition = "2021" 5 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 6 | 7 | [dependencies] 8 | anyhow = "1.0.93" 9 | clap = { version = "4.5.20", features = ["derive"] } 10 | dotenv = "0.15.0" 11 | sdk = { path = "../sdk" } 12 | serde_json = "1.0.132" 13 | tokio = { version = "1.41.1", features = ["full"] } 14 | -------------------------------------------------------------------------------- /cli/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use clap::{Parser, Subcommand}; 4 | use dotenv::dotenv; 5 | use serde_json::Value; 6 | 7 | use sdk::WebhooksSDK; 8 | 9 | /// Cli app to manage webhook-rs server 10 | #[derive(Debug, Parser, PartialEq)] 11 | #[clap(name = "webhooks-cli", version, about)] 12 | pub struct Cli { 13 | #[clap(subcommand)] 14 | command: Command, 15 | } 16 | 17 | #[derive(Clone, Debug, Subcommand, PartialEq)] 18 | enum Command { 19 | /// Resource for application management 20 | Application { 21 | #[clap(subcommand)] 22 | subcommand: ApplicationSubcommand, 23 | }, 24 | /// Resource for endpoints management 25 | Endpoint { 26 | #[clap(subcommand)] 27 | subcommand: EndpointSubcommand, 28 | }, 29 | /// Resource for events management 30 | Event { 31 | #[clap(subcommand)] 32 | subcommand: EventSubcommand, 33 | }, 34 | } 35 | 36 | #[derive(Clone, Debug, Subcommand, PartialEq)] 37 | enum ApplicationSubcommand { 38 | /// Creates an application 39 | Create { 40 | /// Application name 41 | name: String, 42 | }, 43 | } 44 | 45 | #[derive(Clone, Debug, Subcommand, PartialEq)] 46 | enum EndpointSubcommand { 47 | /// Creates an endpoint 48 | Create { 49 | app_id: String, 50 | url: String, 51 | #[arg(value_parser, num_args = 1.., value_delimiter = ',', required = true)] 52 | topics: Vec, 53 | }, 54 | } 55 | 56 | #[derive(Clone, Debug, Subcommand, PartialEq)] 57 | enum EventSubcommand { 58 | Create { 59 | app_id: String, 60 | topic: String, 61 | #[arg(help = "JSON payload", value_parser(parse_json_value))] 62 | payload: Value, 63 | }, 64 | } 65 | 66 | fn parse_json_value(val: &str) -> Result { 67 | let payload = serde_json::from_str(val).map_err(|e| e.to_string())?; 68 | 69 | Ok(payload) 70 | } 71 | 72 | #[tokio::main] 73 | async fn main() -> anyhow::Result<()> { 74 | dotenv().ok(); 75 | 76 | let cli = Cli::parse(); 77 | let url = env::var("SERVER_URL").expect("env SERVER_URL is not set"); 78 | let sdk = WebhooksSDK::new(&url); 79 | 80 | match cli.command { 81 | Command::Application { subcommand } => match subcommand { 82 | ApplicationSubcommand::Create { name } => { 83 | let app = sdk.application().create(name.as_str()).await?; 84 | 85 | println!("App {} with name '{}' has been created", app.id, app.name); 86 | } 87 | }, 88 | Command::Endpoint { subcommand } => match subcommand { 89 | EndpointSubcommand::Create { 90 | app_id, 91 | url, 92 | topics, 93 | } => { 94 | let topics_str = topics.iter().map(|s| s.as_str()).collect(); 95 | let endpoint = sdk.endpoints().create(&app_id, &url, topics_str).await?; 96 | 97 | println!("Endpoint {} has been created", endpoint.id); 98 | } 99 | }, 100 | Command::Event { subcommand } => match subcommand { 101 | EventSubcommand::Create { 102 | app_id, 103 | topic, 104 | payload, 105 | } => { 106 | let event = sdk.events().create(&app_id, &topic, &payload).await?; 107 | 108 | println!("Event {} has been created", event.id); 109 | } 110 | }, 111 | }; 112 | 113 | Ok(()) 114 | } 115 | 116 | #[cfg(test)] 117 | mod test { 118 | use clap::error::ErrorKind::MissingRequiredArgument; 119 | use clap::{CommandFactory, Parser}; 120 | use serde_json::json; 121 | 122 | use crate::Command::{Endpoint, Event}; 123 | use crate::{Cli, EndpointSubcommand, EventSubcommand}; 124 | 125 | #[test] 126 | fn verify_cli() { 127 | Cli::command().debug_assert() 128 | } 129 | 130 | #[test] 131 | fn endpoint_create_topics_cannot_be_empty() { 132 | let result = Cli::try_parse_from([ 133 | "webhooks-cli", 134 | "endpoint", 135 | "create", 136 | "app_2hRzcGs8D5aLaHBWHyqIcibuFA1", 137 | "http://localhost:8080", 138 | ]); 139 | 140 | assert!(result.is_err()); 141 | assert_eq!(MissingRequiredArgument, result.err().unwrap().kind()); 142 | } 143 | 144 | #[test] 145 | fn endpoint_create_single_topic() { 146 | let result = Cli::try_parse_from([ 147 | "webhooks-cli", 148 | "endpoint", 149 | "create", 150 | "app_2hRzcGs8D5aLaHBWHyqIcibuFA1", 151 | "http://localhost:8080", 152 | "contact.created", 153 | ]); 154 | 155 | let expected = Cli { 156 | command: Endpoint { 157 | subcommand: EndpointSubcommand::Create { 158 | app_id: "app_2hRzcGs8D5aLaHBWHyqIcibuFA1".to_string(), 159 | url: "http://localhost:8080".to_string(), 160 | topics: vec!["contact.created".to_string()], 161 | }, 162 | }, 163 | }; 164 | 165 | assert!(result.is_ok()); 166 | assert_eq!(expected, result.unwrap()); 167 | } 168 | 169 | #[test] 170 | fn endpoint_create_multiple_topics() { 171 | let result = Cli::try_parse_from([ 172 | "webhooks-cli", 173 | "endpoint", 174 | "create", 175 | "app_2hRzcGs8D5aLaHBWHyqIcibuFA1", 176 | "http://localhost:8080", 177 | "contact.created,contact.updated,contact.deleted", 178 | ]); 179 | 180 | let expected = Cli { 181 | command: Endpoint { 182 | subcommand: EndpointSubcommand::Create { 183 | app_id: "app_2hRzcGs8D5aLaHBWHyqIcibuFA1".to_string(), 184 | url: "http://localhost:8080".to_string(), 185 | topics: vec![ 186 | "contact.created".to_string(), 187 | "contact.updated".to_string(), 188 | "contact.deleted".to_string(), 189 | ], 190 | }, 191 | }, 192 | }; 193 | 194 | assert!(result.is_ok()); 195 | assert_eq!(expected, result.unwrap()); 196 | } 197 | 198 | #[test] 199 | fn event_create_handle_json() { 200 | let result = Cli::try_parse_from([ 201 | "webhooks-cli", 202 | "event", 203 | "create", 204 | "app_2hRzcGs8D5aLaHBWHyqIcibuFA1", 205 | "contact.created", 206 | "{\"foo\":{\"bar\":\"baz\"}}", 207 | ]); 208 | 209 | let expected = Cli { 210 | command: Event { 211 | subcommand: EventSubcommand::Create { 212 | app_id: "app_2hRzcGs8D5aLaHBWHyqIcibuFA1".to_string(), 213 | topic: "contact.created".to_string(), 214 | payload: json!({ 215 | "foo": { 216 | "bar" : "baz" 217 | } 218 | }), 219 | }, 220 | }, 221 | }; 222 | 223 | assert!(result.is_ok()); 224 | assert_eq!(expected, result.unwrap()); 225 | } 226 | } 227 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | services: 3 | rabbitmq: 4 | container_name: rabbitmq 5 | build: 6 | context: . 7 | dockerfile: .docker/rabbitmq/Dockerfile 8 | ports: 9 | - "5672:5672" 10 | - "15672:15672" 11 | privileged: true 12 | networks: 13 | - rabbitmq 14 | postgres: 15 | container_name: postgres 16 | image: postgres:16.3-alpine3.20 17 | ports: 18 | - "5432:5432" 19 | env_file: 20 | - .env 21 | networks: 22 | rabbitmq: 23 | driver: bridge 24 | -------------------------------------------------------------------------------- /examples/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "webhooks_examples" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | [[example]] 8 | name = "destination-server" 9 | path = "src/destination-server.rs" 10 | 11 | [[example]] 12 | name = "producer-server" 13 | path = "src/producer-server.rs" 14 | 15 | [dependencies] 16 | actix-web = "4.9.0" 17 | dotenv = "0.15.0" 18 | futures = "0.3.31" 19 | rand = "0.8.5" 20 | sdk = { path = "../sdk" } 21 | serde_json = "1.0.132" 22 | tokio = { version = "1.41.1", features = ["full"] } 23 | -------------------------------------------------------------------------------- /examples/src/destination-server.rs: -------------------------------------------------------------------------------- 1 | use actix_web::rt::time::sleep; 2 | use actix_web::web::Payload; 3 | use actix_web::{web, App, HttpResponse, HttpServer, Responder}; 4 | use futures::StreamExt; 5 | use rand::Rng; 6 | use std::time::Duration; 7 | use web::BytesMut; 8 | 9 | async fn index(payload: Payload) -> impl Responder { 10 | let mut rng = rand::thread_rng(); 11 | let delay = rng.gen_range(40..=300); 12 | 13 | let body = get_body(payload).await; 14 | 15 | println!("Request. Delay: {} ms :: Body: {}", delay, body,); 16 | 17 | sleep(Duration::from_millis(delay)).await; 18 | 19 | HttpResponse::NoContent() 20 | } 21 | 22 | async fn get_body(mut payload: Payload) -> String { 23 | let mut bytes = BytesMut::new(); 24 | while let Some(item) = payload.next().await { 25 | let item = item.unwrap(); 26 | bytes.extend_from_slice(&item); 27 | } 28 | 29 | String::from_utf8_lossy(&bytes).to_string() 30 | } 31 | 32 | #[actix_web::main] 33 | async fn main() -> std::io::Result<()> { 34 | let ip = "127.0.0.1"; 35 | let port = 8080; 36 | 37 | println!("Server is listening for requests on {}:{}", ip, port); 38 | 39 | HttpServer::new(|| App::new().route("/", web::post().to(index))) 40 | .bind((ip, port))? 41 | .run() 42 | .await 43 | } 44 | -------------------------------------------------------------------------------- /examples/src/producer-server.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use dotenv::dotenv; 4 | use serde_json::json; 5 | 6 | use sdk::error::Error; 7 | use sdk::WebhooksSDK; 8 | 9 | #[tokio::main] 10 | async fn main() -> Result<(), Error> { 11 | dotenv().ok(); 12 | 13 | let url: String = env::var("SERVER_URL").unwrap(); 14 | 15 | println!("{}", url); 16 | 17 | let sdk = WebhooksSDK::new(url.as_str()); 18 | let app = sdk.application().create("dummy").await?; 19 | 20 | println!("App created - {:?}", app); 21 | 22 | let topic = "contact.created"; 23 | let endpoint = sdk 24 | .endpoints() 25 | .create(&app.id, "http://localhost:8080", vec![topic]) 26 | .await?; 27 | 28 | println!("Endpoint created - {:?}", endpoint); 29 | 30 | let payload = json!({ 31 | "foo": { 32 | "bar": "baz" 33 | } 34 | }); 35 | 36 | let event = sdk.events().create(&app.id, topic, &payload).await?; 37 | 38 | println!("Event created - {:?}", event); 39 | 40 | Ok(()) 41 | } 42 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | rust-dev-name := "ghcr.io/manhunto/webhooks-rs-dev" 2 | rust-dev-version := "latest" 3 | rust-dev-image := rust-dev-name + ":" + rust-dev-version 4 | 5 | rabbitmq-dev-name := "ghcr.io/manhunto/webhooks-rs-rabbitmq" 6 | rabbitmq-dev-version := "latest" 7 | rabbitmq-dev-image := rabbitmq-dev-name + ":" + rabbitmq-dev-version 8 | 9 | alias b := build 10 | alias f := format 11 | alias fmt := format 12 | alias c := clippy 13 | alias t := test 14 | alias rs := run-server 15 | alias rd := run-dispatcher 16 | alias rps := run-producer-server 17 | alias rds := run-destination-server 18 | alias du := docker-up 19 | alias dd := docker-down 20 | alias rdb := rust-dev-build 21 | alias rdp := rust-dev-push 22 | 23 | default: 24 | @just --list 25 | 26 | build *OPTIONS: 27 | cargo build --all-targets --workspace {{ OPTIONS }} 28 | 29 | format: 30 | cargo fmt --all 31 | 32 | # Run main server 33 | run-server *OPTIONS: 34 | cargo run --package=server {{ OPTIONS }} 35 | 36 | # Run consumer that sends messages to destination servers 37 | run-dispatcher *OPTIONS: 38 | cargo run --package=server --bin=dispatcher {{ OPTIONS }} 39 | 40 | # Run example server that produces messages 41 | run-producer-server *OPTIONS: 42 | cargo run --example producer-server {{ OPTIONS }} 43 | 44 | # Run example server that listens for messages and act like real server (with random response delay) 45 | run-destination-server *OPTIONS: 46 | cargo run --example destination-server {{ OPTIONS }} 47 | 48 | # Run cli with args 49 | run-cli *ARGS: 50 | cargo run --package=cli -- {{ ARGS }} 51 | 52 | test: 53 | cargo test --workspace 54 | 55 | clippy: 56 | cargo clippy --all-targets --all-features -- -D warnings 57 | 58 | clippy-pedantic: 59 | cargo clippy --all-targets --all-features -- -D warnings -W clippy::pedantic 60 | 61 | udeps: 62 | cargo +nightly udeps --all-targets 63 | 64 | coverage: 65 | cargo +nightly-2024-05-18 tarpaulin --all-features --workspace --ignore-tests --timeout 120 66 | 67 | docker-up *OPTIONS: 68 | docker compose --env-file=.env up {{ OPTIONS }} 69 | 70 | docker-down: 71 | docker compose down --remove-orphans 72 | 73 | check: 74 | just build 75 | cargo fmt --check --all 76 | just clippy 77 | just test 78 | cargo sort --workspace 79 | 80 | init: 81 | just docker-up --detach 82 | ./scripts/init-db.sh 83 | 84 | rust-dev-build: 85 | docker build --platform linux/amd64 . -t {{ rust-dev-image }} -f .docker/rust/Dockerfile 86 | 87 | rust-dev-push: 88 | docker push {{ rust-dev-image }} 89 | 90 | rabbitmq-dev-build: 91 | docker build --platform linux/amd64 . -t {{ rabbitmq-dev-image }} -f .docker/rabbitmq/Dockerfile 92 | 93 | rabbitmq-dev-push: 94 | docker push {{ rabbitmq-dev-image }} 95 | 96 | create-migration NAME: 97 | sqlx migrate add --source=server/migrations "{{ NAME }}" -------------------------------------------------------------------------------- /scripts/init-db.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cp -n .env.dist .env 4 | 5 | source .env 6 | 7 | echo "Run migrations"; 8 | 9 | RETRIES=30 10 | until sqlx migrate run --source=server/migrations || [ $RETRIES -eq 0 ]; 11 | do 12 | echo "Waiting for postgres server, $((RETRIES--)) remaining attempts..." 13 | sleep 1; 14 | done -------------------------------------------------------------------------------- /sdk/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sdk" 3 | version = "0.1.0" 4 | edition = "2021" 5 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 6 | 7 | [dependencies] 8 | reqwest = { version = "0.12.9", features = ["json"] } 9 | serde = { version = "1.0.214", features = ["derive"] } 10 | serde_json = "1.0.132" 11 | thiserror = "2.0.0" 12 | tokio = { version = "1.41.1", features = ["full"] } 13 | url = "2.5.2" 14 | 15 | [dev-dependencies] 16 | mockito = "1.5.0" 17 | -------------------------------------------------------------------------------- /sdk/src/application.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use serde::Deserialize; 4 | use serde_json::json; 5 | 6 | use crate::client::{Client, EndpointUrl}; 7 | use crate::error::Error; 8 | 9 | #[derive(Deserialize, Debug, PartialEq)] 10 | pub struct Application { 11 | pub id: String, 12 | pub name: String, 13 | } 14 | 15 | pub struct ApplicationApi { 16 | client: Client, 17 | } 18 | 19 | impl ApplicationApi { 20 | #[must_use] 21 | pub fn new(client: Client) -> Self { 22 | Self { client } 23 | } 24 | 25 | pub async fn create(&self, name: &str) -> Result { 26 | let body = json!({ 27 | "name": name, 28 | }); 29 | 30 | self.client 31 | .post(EndpointUrl::from_str("application").unwrap(), body) 32 | .await 33 | } 34 | } 35 | 36 | #[cfg(test)] 37 | mod tests { 38 | use mockito::Matcher::Json; 39 | use serde_json::json; 40 | 41 | use crate::application::Application; 42 | use crate::error::Error; 43 | use crate::WebhooksSDK; 44 | 45 | #[tokio::test] 46 | async fn create_application() { 47 | let mut server = mockito::Server::new_async().await; 48 | let url = server.url(); 49 | 50 | let mock = server 51 | .mock("POST", "/application") 52 | .match_body(Json(json!({"name": "dummy application"}))) 53 | .with_body(r#"{"id":"app_2dSZgxc6qw0vR7hwZVXDJFleRXj","name":"dummy application"}"#) 54 | .with_header("content-type", "application/json") 55 | .with_status(201) 56 | .create_async() 57 | .await; 58 | 59 | let app = WebhooksSDK::new(url.as_str()) 60 | .application() 61 | .create("dummy application") 62 | .await 63 | .unwrap(); 64 | 65 | mock.assert_async().await; 66 | 67 | assert_eq!( 68 | Application { 69 | id: "app_2dSZgxc6qw0vR7hwZVXDJFleRXj".to_string(), 70 | name: "dummy application".to_string(), 71 | }, 72 | app 73 | ); 74 | } 75 | 76 | #[tokio::test] 77 | async fn can_handle_bad_request() { 78 | let mut server = mockito::Server::new_async().await; 79 | let url = server.url(); 80 | 81 | server 82 | .mock("POST", "/application") 83 | .match_body(Json(json!({"name": ""}))) 84 | .with_body(r#"{"error":"Validation error","messages":["Name cannot be empty"]}"#) 85 | .with_header("content-type", "application/json") 86 | .with_status(400) 87 | .create_async() 88 | .await; 89 | 90 | let error = WebhooksSDK::new(url.as_str()) 91 | .application() 92 | .create("") 93 | .await 94 | .err() 95 | .unwrap(); 96 | 97 | match error { 98 | Error::Reqwest(req) => panic!("is reqwest error {}", req), 99 | Error::Unknown => panic!("is unknown error"), 100 | Error::BadRequest(br) => { 101 | assert_eq!("Validation error", br.error()); 102 | assert_eq!(vec!["Name cannot be empty"], br.messages()); 103 | } 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /sdk/src/client.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | use std::str::FromStr; 3 | 4 | use reqwest::header; 5 | use reqwest::header::USER_AGENT; 6 | use serde::de::DeserializeOwned; 7 | use serde::Serialize; 8 | use url::Url; 9 | 10 | use crate::error::Error; 11 | use crate::error::Error::BadRequest; 12 | 13 | #[derive(Clone)] 14 | pub struct Client { 15 | base_url: Url, 16 | client: reqwest::Client, 17 | } 18 | 19 | impl Client { 20 | pub fn new(api_url: Url) -> Self { 21 | Self { 22 | base_url: api_url, 23 | client: Self::client(), 24 | } 25 | } 26 | 27 | pub async fn post(&self, endpoint: EndpointUrl, body: I) -> Result 28 | where 29 | I: Serialize, 30 | O: DeserializeOwned, 31 | { 32 | let url = self.url(endpoint); 33 | let response = self.client.post(url).json(&body).send().await?; 34 | 35 | if 400 == response.status().as_u16() { 36 | let result = response.json::().await?; 37 | 38 | return Err(BadRequest(result)); 39 | } 40 | 41 | Ok(response.json::().await?) 42 | } 43 | 44 | fn url(&self, endpoint: EndpointUrl) -> Url { 45 | self.base_url.join(endpoint.as_str()).unwrap_or_else(|_| { 46 | panic!( 47 | "Could not join strings to create endpoint url: '{}', '{}'", 48 | self.base_url, 49 | endpoint.as_str() 50 | ) 51 | }) 52 | } 53 | 54 | fn client() -> reqwest::Client { 55 | let mut headers = header::HeaderMap::new(); 56 | let sdk_version = env!("CARGO_PKG_VERSION"); 57 | 58 | headers.insert( 59 | USER_AGENT, 60 | header::HeaderValue::from_str( 61 | format!("webhooks-rs rust sdk v{}", sdk_version).as_str(), 62 | ) 63 | .unwrap(), 64 | ); 65 | 66 | reqwest::Client::builder() 67 | .default_headers(headers) 68 | .build() 69 | .unwrap() 70 | } 71 | } 72 | 73 | #[derive(Debug)] 74 | pub struct EndpointUrl { 75 | path: PathBuf, // fixme: it won't work on windows 76 | } 77 | 78 | impl EndpointUrl { 79 | #[must_use] 80 | pub fn new(path: String) -> Self { 81 | let path_buf = PathBuf::from(path); 82 | 83 | Self { path: path_buf } 84 | } 85 | 86 | fn as_str(&self) -> &str { 87 | self.path.to_str().unwrap() 88 | } 89 | } 90 | 91 | impl FromStr for EndpointUrl { 92 | type Err = Self; 93 | 94 | fn from_str(s: &str) -> Result { 95 | Ok(Self::new(s.to_string())) 96 | } 97 | } 98 | 99 | impl TryFrom for EndpointUrl { 100 | type Error = Self; 101 | 102 | fn try_from(value: String) -> Result { 103 | Ok(Self::new(value)) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /sdk/src/endpoint.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | use serde_json::json; 3 | 4 | use crate::client::{Client, EndpointUrl}; 5 | use crate::error::Error; 6 | 7 | #[derive(Deserialize, Debug, PartialEq)] 8 | pub struct Endpoint { 9 | pub id: String, 10 | pub app_id: String, 11 | pub url: String, 12 | pub topics: Vec, 13 | } 14 | 15 | pub struct EndpointApi { 16 | client: Client, 17 | } 18 | 19 | impl EndpointApi { 20 | #[must_use] 21 | pub fn new(client: Client) -> Self { 22 | Self { client } 23 | } 24 | 25 | pub async fn create( 26 | &self, 27 | app_id: &str, 28 | url: &str, 29 | topics: Vec<&str>, 30 | ) -> Result { 31 | let body = json!({ 32 | "url": url, 33 | "topics": topics 34 | }); 35 | 36 | self.client 37 | .post( 38 | EndpointUrl::try_from(format!("application/{}/endpoint", app_id)).unwrap(), 39 | body, 40 | ) 41 | .await 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /sdk/src/error.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Display, Formatter}; 2 | 3 | use serde::Deserialize; 4 | use thiserror::Error; 5 | 6 | use crate::error::Error::Reqwest; 7 | 8 | #[derive(Debug, Error)] 9 | pub enum Error { 10 | #[error("Error occurred during request: {0}")] 11 | Reqwest(reqwest::Error), 12 | #[error("Unknown error")] 13 | Unknown, 14 | #[error("Bad request: {0}")] 15 | BadRequest(BadRequest), 16 | } 17 | 18 | impl From for Error { 19 | fn from(value: reqwest::Error) -> Self { 20 | Reqwest(value) 21 | } 22 | } 23 | 24 | #[derive(Deserialize, Debug)] 25 | pub struct BadRequest { 26 | error: String, 27 | messages: Vec, 28 | } 29 | 30 | impl BadRequest { 31 | pub fn error(&self) -> String { 32 | self.error.clone() 33 | } 34 | 35 | pub fn messages(&self) -> Vec { 36 | self.messages.clone() 37 | } 38 | } 39 | 40 | impl Display for BadRequest { 41 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 42 | write!(f, "error: {}, messages: {:?}", self.error(), self.messages) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /sdk/src/event.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | use serde_json::{json, Value}; 3 | 4 | use crate::client::{Client, EndpointUrl}; 5 | use crate::error::Error; 6 | 7 | #[derive(Deserialize, Debug, PartialEq)] 8 | pub struct CreateEventResponse { 9 | pub id: String, 10 | } 11 | 12 | pub struct EventsApi { 13 | client: Client, 14 | } 15 | 16 | impl EventsApi { 17 | #[must_use] 18 | pub fn new(client: Client) -> Self { 19 | Self { client } 20 | } 21 | 22 | pub async fn create( 23 | &self, 24 | app_id: &str, 25 | topic: &str, 26 | payload: &Value, 27 | ) -> Result { 28 | let body = json!({ 29 | "topic": topic, 30 | "payload": payload 31 | }); 32 | 33 | self.client 34 | .post( 35 | EndpointUrl::try_from(format!("application/{}/event", app_id)).unwrap(), 36 | body, 37 | ) 38 | .await 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /sdk/src/lib.rs: -------------------------------------------------------------------------------- 1 | use url::Url; 2 | 3 | use client::Client; 4 | 5 | use crate::application::ApplicationApi; 6 | use crate::endpoint::EndpointApi; 7 | use crate::event::EventsApi; 8 | 9 | mod application; 10 | mod client; 11 | mod endpoint; 12 | pub mod error; 13 | mod event; 14 | 15 | pub struct WebhooksSDK { 16 | client: Client, 17 | } 18 | 19 | impl WebhooksSDK { 20 | pub fn new(api_url: &str) -> Self { 21 | let url = Url::parse(api_url).unwrap(); 22 | 23 | Self { 24 | client: Client::new(url), 25 | } 26 | } 27 | 28 | pub fn application(&self) -> ApplicationApi { 29 | ApplicationApi::new(self.client.clone()) 30 | } 31 | 32 | pub fn endpoints(&self) -> EndpointApi { 33 | EndpointApi::new(self.client.clone()) 34 | } 35 | 36 | pub fn events(&self) -> EventsApi { 37 | EventsApi::new(self.client.clone()) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "server" 3 | version = "0.1.0" 4 | edition = "2021" 5 | default-run = "server" 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | actix-web = "4.9.0" 10 | chrono = { version = "0.4.38", features = ["serde"] } 11 | dotenv = "0.15.0" 12 | envconfig = "0.11.0" 13 | futures = "0.3.31" 14 | futures-lite = "2.4.0" 15 | itertools = "0.13.0" 16 | lapin = "2.5.0" 17 | lazy_static = "1.5.0" 18 | log = "0.4.22" 19 | log4rs = "1.3.0" 20 | rand = "0.8.5" 21 | regex = "1.11.1" 22 | reqwest = { version = "0.12.9", features = ["json"] } 23 | serde = { version = "1.0.214", features = ["derive"] } 24 | serde_json = { version = "1.0.132", features = ["raw_value"] } 25 | sqlx = { version = "0.8", features = ["runtime-tokio", "postgres", "chrono"] } 26 | svix-ksuid = { version = "^0.8.0", features = ["serde"] } 27 | tokio = { version = "1.41.1", features = ["full"] } 28 | url = "2.5.2" 29 | validator = { version = "0.19.0", features = ["derive"] } 30 | 31 | [dev-dependencies] 32 | fake = "3.0.0" 33 | mockito = "1.5.0" 34 | test-case = "3.3.1" 35 | 36 | [[bin]] 37 | name = "server" 38 | path = "src/bin/server.rs" 39 | 40 | [[bin]] 41 | name = "dispatcher" 42 | path = "src/bin/dispatcher.rs" 43 | -------------------------------------------------------------------------------- /server/migrations/20240515163040_create_application_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE applications 2 | ( 3 | id char(27) not null, 4 | primary key (id), 5 | name TEXT NOT NULL 6 | ); -------------------------------------------------------------------------------- /server/migrations/20240517110248_create_endpoint_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE endpoints 2 | ( 3 | id char(27) NOT NULL, 4 | primary key (id), 5 | app_id char(27) NOT NULL, 6 | url TEXT NOT NULL, 7 | topics JSON NOT NULL, 8 | status char(127) NOT NULL 9 | ); -------------------------------------------------------------------------------- /server/migrations/20240523211340_create_events_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE events 2 | ( 3 | id char(27) NOT NULL, 4 | primary key (id), 5 | app_id char(27) NOT NULL, 6 | payload JSON NOT NULL, 7 | topic text NOT NULL, 8 | created_at TIMESTAMP NOT NULL 9 | ); 10 | -------------------------------------------------------------------------------- /server/migrations/20240526112326_create_message_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE messages 2 | ( 3 | id char(27) NOT NULL, 4 | primary key (id), 5 | event_id char(27) NOT NULL, 6 | endpoint_id char(27) NOT NULL 7 | ); 8 | 9 | CREATE TABLE attempts 10 | ( 11 | message_id char(27) NOT NULL, 12 | attempt SMALLINT NOT NULL, 13 | primary key(message_id, attempt), 14 | status_numeric SMALLINT NULL, 15 | status_unknown TEXT NULL 16 | ); 17 | -------------------------------------------------------------------------------- /server/migrations/20240531074624_create_attempt_log_table.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE attempt_logs 2 | ( 3 | message_id char(27) NOT NULL, 4 | attempt SMALLINT NOT NULL, 5 | primary key(message_id, attempt), 6 | processing_time INT NOT NULL, 7 | response_time INT NOT NULL, 8 | response_body TEXT NULL 9 | ); 10 | -------------------------------------------------------------------------------- /server/server.http: -------------------------------------------------------------------------------- 1 | @url = http://localhost:8090 2 | 3 | ### Health check 4 | GET {{url}}/health_check 5 | Content-Type: application/json 6 | 7 | ### Create application 8 | POST {{url}}/application 9 | Content-Type: application/json 10 | 11 | { 12 | "name": "Dummy application" 13 | } 14 | 15 | > {% 16 | client.global.set("app_id", response.body.id); 17 | %} 18 | 19 | ### Create endpoint 20 | POST {{url}}/application/{{app_id}}/endpoint 21 | Content-Type: application/json 22 | 23 | { 24 | "url": "http://localhost:8080", 25 | "topics": [ 26 | "contact.updated", 27 | "contact.created" 28 | ] 29 | } 30 | 31 | > {% 32 | client.global.set("endpoint_id", response.body.id); 33 | %} 34 | 35 | ### Create event 36 | POST {{url}}/application/{{app_id}}/event 37 | Content-Type: application/json 38 | 39 | { 40 | "topic": "contact.created", 41 | "payload": { 42 | "foo": "bar", 43 | "nested": { 44 | "test": [ 45 | "123", 46 | "ABC" 47 | ] 48 | } 49 | } 50 | } 51 | 52 | ### Disable endpoint 53 | POST {{url}}/application/{{app_id}}/endpoint/{{endpoint_id}}/disable 54 | Content-Type: application/json 55 | 56 | ### Enable endpoint 57 | POST {{url}}/application/{{app_id}}/endpoint/{{endpoint_id}}/enable 58 | Content-Type: application/json -------------------------------------------------------------------------------- /server/src/amqp.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::time::Duration; 3 | 4 | use lapin::options::{ 5 | BasicPublishOptions, ExchangeDeclareOptions, QueueBindOptions, QueueDeclareOptions, 6 | }; 7 | use lapin::publisher_confirm::Confirmation; 8 | use lapin::types::{AMQPType, AMQPValue, FieldTable, ShortString}; 9 | use lapin::{BasicProperties, Channel, Connection, ConnectionProperties, ExchangeKind}; 10 | use log::info; 11 | use serde::de::DeserializeOwned; 12 | use serde::Serialize; 13 | use serde_json::Value; 14 | 15 | use crate::cmd::AsyncMessage; 16 | use crate::config::AMQPConfig; 17 | 18 | pub async fn establish_connection_with_rabbit(amqp_config: AMQPConfig) -> Channel { 19 | let addr = amqp_config.connection_string(); 20 | let conn = Connection::connect(&addr, ConnectionProperties::default()) 21 | .await 22 | .unwrap(); 23 | 24 | info!("connected established with rabbitmq"); 25 | 26 | let channel = conn.create_channel().await.unwrap(); 27 | 28 | let args = FieldTable::from(BTreeMap::from( 29 | [( 30 | ShortString::from("x-delayed-type"), 31 | AMQPValue::try_from(&Value::String(String::from("direct")), AMQPType::LongString) 32 | .unwrap(), 33 | ); 1], 34 | )); 35 | 36 | channel 37 | .exchange_declare( 38 | &amqp_config.sent_message_exchange_name(), 39 | ExchangeKind::Custom(String::from("x-delayed-message")), 40 | ExchangeDeclareOptions::default(), 41 | args, 42 | ) 43 | .await 44 | .unwrap(); 45 | 46 | let queue = channel 47 | .queue_declare( 48 | &amqp_config.sent_message_queue_name(), 49 | QueueDeclareOptions::default(), 50 | FieldTable::default(), 51 | ) 52 | .await 53 | .unwrap(); 54 | 55 | channel 56 | .queue_bind( 57 | queue.name().as_str(), 58 | &amqp_config.sent_message_exchange_name(), 59 | "", 60 | QueueBindOptions::default(), 61 | FieldTable::default(), 62 | ) 63 | .await 64 | .unwrap(); 65 | 66 | info!("queue declared {:?}", queue); 67 | 68 | channel 69 | } 70 | 71 | pub struct Publisher { 72 | channel: Channel, 73 | amqp_config: AMQPConfig, 74 | } 75 | 76 | impl Publisher { 77 | pub fn new(channel: Channel, amqp_config: AMQPConfig) -> Self { 78 | Self { 79 | channel, 80 | amqp_config, 81 | } 82 | } 83 | 84 | pub async fn publish(&self, message: AsyncMessage) { 85 | self.do_publish(message, BasicProperties::default()).await 86 | } 87 | 88 | pub async fn publish_delayed(&self, message: AsyncMessage, delay: Duration) { 89 | let btree: BTreeMap<_, _> = [( 90 | ShortString::from("x-delay"), 91 | AMQPValue::LongLongInt(delay.as_millis() as i64), 92 | )] 93 | .into(); 94 | let headers = FieldTable::from(btree); 95 | let properties = BasicProperties::default().with_headers(headers); 96 | 97 | self.do_publish(message, properties).await 98 | } 99 | 100 | fn resolve_exchange(&self, message: &AsyncMessage) -> String { 101 | match message { 102 | AsyncMessage::SentMessage(_) => self.amqp_config.sent_message_exchange_name().clone(), 103 | } 104 | } 105 | 106 | async fn do_publish(&self, message: AsyncMessage, properties: BasicProperties) { 107 | let confirm = self 108 | .channel 109 | .basic_publish( 110 | self.resolve_exchange(&message).as_str(), 111 | "", 112 | BasicPublishOptions::default(), 113 | &Serializer::serialize(message), 114 | properties, 115 | ) 116 | .await 117 | .unwrap() 118 | .await 119 | .unwrap(); 120 | 121 | assert_eq!(confirm, Confirmation::NotRequested); 122 | } 123 | } 124 | 125 | pub struct Serializer {} 126 | 127 | impl Serializer { 128 | pub fn deserialize(binary: &[u8]) -> T 129 | where 130 | T: DeserializeOwned, 131 | { 132 | let msg = String::from_utf8_lossy(binary); 133 | 134 | serde_json::from_str(&msg).unwrap() 135 | } 136 | 137 | pub fn serialize(value: T) -> Vec 138 | // is possible to return &[u8] ? 139 | where 140 | T: Serialize, 141 | { 142 | let string = serde_json::to_string(&value); 143 | 144 | string.unwrap().as_bytes().to_vec() 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /server/src/app.rs: -------------------------------------------------------------------------------- 1 | use std::net::TcpListener; 2 | 3 | use actix_web::dev::Server; 4 | use actix_web::middleware::Logger; 5 | use actix_web::web::Data; 6 | use actix_web::{App, HttpServer}; 7 | use log::info; 8 | use sqlx::PgPool; 9 | 10 | use crate::amqp::{establish_connection_with_rabbit, Publisher}; 11 | use crate::config::AMQPConfig; 12 | use crate::dispatch_consumer::consume; 13 | use crate::routes::routes; 14 | use crate::storage::Storage; 15 | 16 | pub async fn run_server( 17 | listener: TcpListener, 18 | pool: PgPool, 19 | amqp_config: AMQPConfig, 20 | ) -> Result { 21 | let channel = establish_connection_with_rabbit(amqp_config.clone()).await; 22 | let storage = Data::new(Storage::new(pool)); 23 | let publisher = Data::new(Publisher::new(channel.clone(), amqp_config)); 24 | let app = move || { 25 | App::new() 26 | .wrap(Logger::default()) 27 | .app_data(storage.clone()) 28 | .app_data(publisher.clone()) 29 | .configure(routes) 30 | }; 31 | 32 | let addr = listener.local_addr().unwrap(); 33 | let server = HttpServer::new(app).listen(listener)?.run(); 34 | 35 | info!("Webhooks server is listening for requests on {}", addr); 36 | 37 | Ok(server) 38 | } 39 | 40 | pub async fn run_dispatcher(pool: PgPool, amqp_config: AMQPConfig) { 41 | let channel = establish_connection_with_rabbit(amqp_config.clone()).await; 42 | 43 | consume(channel, "dispatcher", Storage::new(pool), amqp_config).await; 44 | } 45 | -------------------------------------------------------------------------------- /server/src/bin/dispatcher.rs: -------------------------------------------------------------------------------- 1 | use dotenv::dotenv; 2 | use envconfig::Envconfig; 3 | use sqlx::PgPool; 4 | 5 | use server::app::run_dispatcher; 6 | use server::config::{AMQPConfig, PostgresConfig}; 7 | use server::logs::init_log; 8 | 9 | #[tokio::main] 10 | async fn main() { 11 | dotenv().ok(); 12 | init_log(); 13 | 14 | let con_string = PostgresConfig::init_from_env().unwrap().connection_string(); 15 | let pool = PgPool::connect(&con_string).await.unwrap(); 16 | 17 | let amqp_config = AMQPConfig::init_from_env().unwrap(); 18 | 19 | run_dispatcher(pool, amqp_config).await; 20 | } 21 | -------------------------------------------------------------------------------- /server/src/bin/server.rs: -------------------------------------------------------------------------------- 1 | use std::net::TcpListener; 2 | 3 | use dotenv::dotenv; 4 | use envconfig::Envconfig; 5 | use sqlx::PgPool; 6 | 7 | use server::app::run_server; 8 | use server::config::{AMQPConfig, PostgresConfig, ServerConfig}; 9 | use server::logs::init_log; 10 | 11 | #[actix_web::main] 12 | async fn main() -> Result<(), std::io::Error> { 13 | dotenv().ok(); 14 | init_log(); 15 | 16 | let config = ServerConfig::init_from_env().unwrap(); 17 | let listener = TcpListener::bind((config.host, config.port)) 18 | .unwrap_or_else(|_| panic!("Failed to bind port {}", config.port)); 19 | 20 | let con_string = PostgresConfig::init_from_env().unwrap().connection_string(); 21 | let pool = PgPool::connect(&con_string).await.unwrap(); 22 | 23 | let amqp_config = AMQPConfig::init_from_env().unwrap(); 24 | 25 | run_server(listener, pool, amqp_config).await?.await 26 | } 27 | -------------------------------------------------------------------------------- /server/src/circuit_breaker.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::future::Future; 3 | 4 | use log::debug; 5 | 6 | #[derive(Copy, Clone, PartialEq)] 7 | pub enum State { 8 | Closed, 9 | Open, 10 | } 11 | 12 | #[derive(PartialEq, Debug)] 13 | pub enum Error { 14 | Rejected, 15 | Open(T), 16 | Closed(T), 17 | } 18 | 19 | // todo extract policy 20 | pub struct CircuitBreaker { 21 | max_fails: u32, 22 | storage: HashMap, 23 | // todo extract trait, allow to persist in redis, 24 | states: HashMap, 25 | } 26 | 27 | impl CircuitBreaker { 28 | pub fn new(max_fails: u32) -> Self { 29 | Self { 30 | max_fails, 31 | storage: HashMap::new(), 32 | states: HashMap::new(), 33 | } 34 | } 35 | 36 | // todo: key can be AsRef 37 | pub async fn call(&mut self, key: &String, function: F) -> Result> 38 | where 39 | F: FnOnce() -> Fut, 40 | Fut: Future>, 41 | { 42 | if self.is_call_permitted(key.clone()) { 43 | debug!("Service {} closed", key); 44 | 45 | return Err(Error::Rejected); 46 | } 47 | 48 | match function().await { 49 | Ok(ok) => { 50 | self.reset_counter(key); 51 | 52 | Ok(ok) 53 | } 54 | Err(err) => { 55 | *self.storage.entry(key.clone()).or_insert(0) += 1; 56 | 57 | if let Some(fail_count) = self.storage.get(key) { 58 | debug!("Service {} current fail count: {}", key, fail_count); 59 | 60 | if fail_count.ge(&self.max_fails) { 61 | debug!("Service {} reached a limit and is closed", key); 62 | 63 | self.update(key.clone(), State::Closed); 64 | 65 | return Err(Error::Closed(err)); 66 | } 67 | } 68 | 69 | Err(Error::Open(err)) 70 | } 71 | } 72 | } 73 | 74 | pub fn revive(&mut self, key: &str) -> Option<()> { 75 | if self.state(key.to_string()) == &State::Closed { 76 | self.reset_counter(key); 77 | self.update(key.to_owned(), State::Open); 78 | 79 | return Some(()); 80 | } 81 | 82 | None 83 | } 84 | 85 | fn reset_counter(&mut self, key: &str) { 86 | *self.storage.entry(key.to_owned()).or_insert(0) = 0; 87 | } 88 | 89 | fn is_call_permitted(&self, key: String) -> bool { 90 | self.state(key) == &State::Closed 91 | } 92 | 93 | fn state(&self, key: String) -> &State { 94 | self.states.get(&key).unwrap_or(&State::Open) 95 | } 96 | 97 | fn update(&mut self, key: String, state: State) { 98 | *self.states.entry(key).or_insert(State::Open) = state 99 | } 100 | } 101 | 102 | impl Default for CircuitBreaker { 103 | fn default() -> Self { 104 | Self::new(3) 105 | } 106 | } 107 | 108 | #[cfg(test)] 109 | mod tests { 110 | use crate::circuit_breaker::CircuitBreaker; 111 | use crate::circuit_breaker::Error::{Closed, Open, Rejected}; 112 | 113 | #[tokio::test] 114 | async fn successful_calls_doesnt_close_the_endpoint() { 115 | let mut sut = CircuitBreaker::new(3); 116 | let key = "key".to_string(); 117 | 118 | assert_eq!(Ok(0), sut.call(&key, ok).await); 119 | assert_eq!(Ok(0), sut.call(&key, ok).await); 120 | assert_eq!(Ok(0), sut.call(&key, ok).await); 121 | assert_eq!(Ok(0), sut.call(&key, ok).await); 122 | assert_eq!(Ok(0), sut.call(&key, ok).await); 123 | assert_eq!(Ok(0), sut.call(&key, ok).await); 124 | assert_eq!(Ok(0), sut.call(&key, ok).await); 125 | assert_eq!(Ok(0), sut.call(&key, ok).await); 126 | assert_eq!(Ok(0), sut.call(&key, ok).await); 127 | } 128 | 129 | #[tokio::test] 130 | async fn erroneous_calls_close_the_endpoint() { 131 | let mut sut = CircuitBreaker::new(3); 132 | let key = "key".to_string(); 133 | 134 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 135 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 136 | assert_eq!(Err(Closed(255)), sut.call(&key, err).await); 137 | } 138 | 139 | #[tokio::test] 140 | async fn calls_are_rejected_to_closed_endpoint() { 141 | let mut sut = CircuitBreaker::new(3); 142 | let key = "key".to_string(); 143 | 144 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 145 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 146 | assert_eq!(Err(Closed(255)), sut.call(&key, err).await); 147 | 148 | assert_eq!(Err(Rejected), sut.call(&key, ok).await); 149 | assert_eq!(Err(Rejected), sut.call(&key, err).await); 150 | } 151 | 152 | #[tokio::test] 153 | async fn successful_call_resets_counter() { 154 | let mut sut = CircuitBreaker::new(3); 155 | let key = "key".to_string(); 156 | 157 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 158 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 159 | assert_eq!(Ok(0), sut.call(&key, ok).await); 160 | 161 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 162 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 163 | assert_eq!(Ok(0), sut.call(&key, ok).await); 164 | } 165 | 166 | #[tokio::test] 167 | async fn every_key_has_own_counter() { 168 | let mut sut = CircuitBreaker::new(3); 169 | let key = "key".to_string(); 170 | let key2 = "key2".to_string(); 171 | 172 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 173 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 174 | assert_eq!(Err(Closed(255)), sut.call(&key, err).await); 175 | 176 | assert_eq!(Err(Open(255)), sut.call(&key2, err).await); 177 | assert_eq!(Err(Open(255)), sut.call(&key2, err).await); 178 | assert_eq!(Err(Closed(255)), sut.call(&key2, err).await); 179 | } 180 | 181 | #[tokio::test] 182 | async fn revive_closed() { 183 | let mut sut = CircuitBreaker::new(3); 184 | let key = "key".to_string(); 185 | 186 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 187 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 188 | assert_eq!(Err(Closed(255)), sut.call(&key, err).await); 189 | 190 | sut.revive(&key); 191 | 192 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 193 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 194 | assert_eq!(Err(Closed(255)), sut.call(&key, err).await); 195 | 196 | sut.revive(&key); 197 | 198 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 199 | } 200 | 201 | #[tokio::test] 202 | async fn revive_opened_doesnt_reset_counter() { 203 | let mut sut = CircuitBreaker::new(3); 204 | let key = "key".to_string(); 205 | 206 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 207 | assert_eq!(Err(Open(255)), sut.call(&key, err).await); 208 | 209 | sut.revive(&key); 210 | 211 | assert_eq!(Err(Closed(255)), sut.call(&key, err).await); 212 | } 213 | 214 | #[tokio::test] 215 | async fn revive_already_opened_returns_none() { 216 | let mut sut = CircuitBreaker::new(3); 217 | let key = "key".to_string(); 218 | 219 | let _ = sut.call(&key, err).await; 220 | assert!(sut.revive(&key).is_none()); 221 | 222 | let _ = sut.call(&key, err).await; 223 | assert!(sut.revive(&key).is_none()); 224 | 225 | let _ = sut.call(&key, err).await; 226 | assert!(sut.revive(&key).is_some()); 227 | } 228 | 229 | async fn ok() -> Result { 230 | Ok(0) 231 | } 232 | 233 | async fn err() -> Result { 234 | Err(255) 235 | } 236 | } 237 | -------------------------------------------------------------------------------- /server/src/cmd.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use crate::types::MessageId; 4 | 5 | #[derive(Serialize, Deserialize)] 6 | #[serde(tag = "t", content = "c")] 7 | pub enum AsyncMessage { 8 | SentMessage(SentMessage), 9 | } 10 | 11 | #[derive(Serialize, Deserialize, Debug, Clone)] 12 | pub struct SentMessage { 13 | msg_id: String, 14 | pub attempt: usize, 15 | } 16 | 17 | impl SentMessage { 18 | pub fn new(message_id: MessageId) -> Self { 19 | Self { 20 | msg_id: message_id.to_string(), 21 | attempt: 1, 22 | } 23 | } 24 | 25 | pub fn with_increased_attempt(&self) -> SentMessage { 26 | Self { 27 | msg_id: self.msg_id.clone(), 28 | attempt: self.attempt + 1, 29 | } 30 | } 31 | 32 | pub fn msg_id(&self) -> MessageId { 33 | MessageId::try_from(self.msg_id.clone()).unwrap() 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /server/src/config.rs: -------------------------------------------------------------------------------- 1 | use envconfig::Envconfig; 2 | 3 | #[derive(Envconfig, Clone)] 4 | pub struct ServerConfig { 5 | #[envconfig(from = "SERVER_PORT")] 6 | pub port: u16, 7 | #[envconfig(from = "SERVER_HOST")] 8 | pub host: String, 9 | } 10 | 11 | #[derive(Envconfig)] 12 | pub struct PostgresConfig { 13 | #[envconfig(from = "POSTGRES_HOST")] 14 | host: String, 15 | #[envconfig(from = "POSTGRES_PORT")] 16 | port: u16, 17 | #[envconfig(from = "POSTGRES_USER")] 18 | user: String, 19 | #[envconfig(from = "POSTGRES_PASSWORD")] 20 | password: String, 21 | #[envconfig(from = "POSTGRES_DB")] 22 | db: String, 23 | } 24 | 25 | impl PostgresConfig { 26 | pub fn connection_string(&self) -> String { 27 | format!( 28 | "postgres://{}:{}@{}:{}/{}", 29 | self.user, self.password, self.host, self.port, self.db 30 | ) 31 | } 32 | 33 | pub fn connection_string_without_db(&self) -> String { 34 | format!( 35 | "postgres://{}:{}@{}:{}", 36 | self.user, self.password, self.host, self.port 37 | ) 38 | } 39 | 40 | pub fn with_db(&self, db: &str) -> Self { 41 | Self { 42 | host: self.host.clone(), 43 | port: self.port, 44 | user: self.user.clone(), 45 | password: self.password.clone(), 46 | db: db.to_string(), 47 | } 48 | } 49 | 50 | pub fn db(&self) -> String { 51 | self.db.clone() 52 | } 53 | } 54 | 55 | #[derive(Envconfig, Clone)] 56 | pub struct AMQPConfig { 57 | #[envconfig(from = "AMQP_HOST")] 58 | host: String, 59 | #[envconfig(from = "AMQP_PORT")] 60 | port: u16, 61 | #[envconfig(from = "AMQP_USER")] 62 | user: String, 63 | #[envconfig(from = "AMQP_PASSWORD")] 64 | password: String, 65 | #[envconfig(from = "AMQP_SENT_MESSAGE_QUEUE")] 66 | sent_message_queue: String, 67 | } 68 | 69 | impl AMQPConfig { 70 | pub fn connection_string(&self) -> String { 71 | format!( 72 | "amqp://{}:{}@{}:{}", 73 | self.user, self.password, self.host, self.port 74 | ) 75 | } 76 | 77 | pub fn with_sent_message_queue(&self, queue_name: &str) -> Self { 78 | Self { 79 | host: self.host.clone(), 80 | port: self.port, 81 | user: self.user.clone(), 82 | password: self.password.clone(), 83 | sent_message_queue: queue_name.to_string(), 84 | } 85 | } 86 | 87 | pub fn sent_message_queue_name(&self) -> String { 88 | self.sent_message_queue.clone() 89 | } 90 | 91 | pub fn sent_message_exchange_name(&self) -> String { 92 | format!("{}-exchange", self.sent_message_queue) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /server/src/configuration/domain.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Display, Formatter}; 2 | use std::vec::IntoIter; 3 | 4 | use itertools::Itertools; 5 | use lazy_static::lazy_static; 6 | use regex::Regex; 7 | use sqlx::postgres::PgRow; 8 | use sqlx::types::JsonValue; 9 | use sqlx::{FromRow, Row}; 10 | use url::Url; 11 | 12 | use crate::error::Error; 13 | use crate::error::Error::InvalidArgument; 14 | use crate::types::{ApplicationId, EndpointId}; 15 | 16 | #[derive(Debug, Clone)] 17 | pub struct Application { 18 | pub id: ApplicationId, 19 | pub name: String, 20 | } 21 | 22 | impl Application { 23 | pub fn new(name: String) -> Self { 24 | Self { 25 | id: ApplicationId::new(), 26 | name, 27 | } 28 | } 29 | } 30 | 31 | impl FromRow<'_, PgRow> for Application { 32 | fn from_row(row: &'_ PgRow) -> Result { 33 | Ok(Application { 34 | id: row.try_get("id")?, 35 | name: row.try_get("name")?, 36 | }) 37 | } 38 | } 39 | 40 | #[derive(Clone, Debug, PartialEq)] 41 | pub enum EndpointStatus { 42 | Initial, 43 | DisabledManually, 44 | DisabledFailing, 45 | EnabledManually, 46 | } 47 | 48 | impl EndpointStatus { 49 | fn is_active(&self) -> bool { 50 | match self { 51 | Self::Initial | Self::EnabledManually => true, 52 | Self::DisabledManually | Self::DisabledFailing => false, 53 | } 54 | } 55 | } 56 | 57 | impl Display for EndpointStatus { 58 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 59 | let str = match self { 60 | EndpointStatus::Initial => "initial", 61 | EndpointStatus::DisabledManually => "disabled_manually", 62 | EndpointStatus::DisabledFailing => "disabled_failing", 63 | EndpointStatus::EnabledManually => "enabled_manually", 64 | }; 65 | 66 | write!(f, "{str}") 67 | } 68 | } 69 | 70 | impl TryFrom for EndpointStatus { 71 | type Error = String; 72 | 73 | fn try_from(value: String) -> Result { 74 | match value.as_str() { 75 | // fixme: why I need to trim it? 76 | "initial" => Ok(EndpointStatus::Initial), 77 | "disabled_manually" => Ok(EndpointStatus::DisabledManually), 78 | "disabled_failing" => Ok(EndpointStatus::DisabledFailing), 79 | "enabled_manually" => Ok(EndpointStatus::EnabledManually), 80 | _ => Err(format!("Unexpected endpoint status: {value}")), 81 | } 82 | } 83 | } 84 | 85 | #[derive(Debug, Clone)] 86 | pub struct Endpoint { 87 | pub id: EndpointId, 88 | pub app_id: ApplicationId, 89 | pub url: Url, 90 | pub topics: TopicsList, 91 | pub status: EndpointStatus, 92 | } 93 | 94 | impl Endpoint { 95 | pub fn new(url: &str, app_id: ApplicationId, topics: TopicsList) -> Self { 96 | Self { 97 | id: EndpointId::new(), 98 | url: Url::parse(url).unwrap(), 99 | topics, 100 | app_id, 101 | status: EndpointStatus::Initial, 102 | } 103 | } 104 | 105 | pub fn is_active(&self) -> bool { 106 | self.status.is_active() 107 | } 108 | 109 | pub fn disable_manually(&mut self) { 110 | self.status = EndpointStatus::DisabledManually; 111 | } 112 | 113 | pub fn disable_failing(&mut self) { 114 | self.status = EndpointStatus::DisabledManually; 115 | } 116 | 117 | pub fn enable_manually(&mut self) { 118 | self.status = EndpointStatus::EnabledManually; 119 | } 120 | } 121 | 122 | impl FromRow<'_, PgRow> for Endpoint { 123 | fn from_row(row: &'_ PgRow) -> Result { 124 | let url: String = row.try_get("url")?; 125 | let status: String = row.try_get("status")?; 126 | let topics: JsonValue = row.try_get("topics")?; 127 | 128 | let topics: Vec = topics 129 | .as_array() 130 | .unwrap() 131 | .iter() 132 | .map(|t| t.as_str().unwrap().to_string()) 133 | .collect(); 134 | 135 | Ok(Endpoint { 136 | id: row.try_get("id")?, 137 | app_id: row.try_get("app_id")?, 138 | url: Url::parse(&url).unwrap(), 139 | topics: TopicsList::try_from(topics).unwrap(), 140 | status: EndpointStatus::try_from(status.trim().to_string()).unwrap(), 141 | }) 142 | } 143 | } 144 | 145 | #[derive(Debug, Clone, Eq, PartialEq)] 146 | pub struct Topic { 147 | name: String, 148 | } 149 | 150 | impl Topic { 151 | pub fn new(name: T) -> Result 152 | where 153 | T: AsRef, 154 | { 155 | lazy_static! { 156 | static ref RE: Regex = Regex::new(r"^[a-zA-Z_\.\-]+$").unwrap(); 157 | } 158 | 159 | if !RE.is_match(name.as_ref()) { 160 | return Err(InvalidArgument("Invalid topic name".to_string())); 161 | } 162 | 163 | Ok(Self { 164 | name: name.as_ref().to_string(), 165 | }) 166 | } 167 | } 168 | 169 | impl TryFrom for Topic { 170 | type Error = Error; 171 | 172 | fn try_from(value: String) -> Result { 173 | Self::new(value) 174 | } 175 | } 176 | 177 | impl TryFrom<&str> for Topic { 178 | type Error = Error; 179 | 180 | fn try_from(value: &str) -> Result { 181 | Self::new(value) 182 | } 183 | } 184 | 185 | impl Display for Topic { 186 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 187 | write!(f, "{}", self.name) 188 | } 189 | } 190 | 191 | #[derive(Debug, Clone, PartialEq)] 192 | pub struct TopicsList { 193 | topics: Vec, 194 | } 195 | 196 | impl TopicsList { 197 | pub fn new(topics: Vec) -> Result { 198 | if topics.is_empty() { 199 | return Err(InvalidArgument( 200 | "Topic collection could not be empty".to_string(), 201 | )); 202 | } 203 | 204 | Ok(Self { topics }) 205 | } 206 | 207 | pub fn contains(&self, topic: &Topic) -> bool { 208 | self.topics.contains(topic) 209 | } 210 | 211 | pub fn as_strings(&self) -> Vec { 212 | self.topics.clone().into_iter().map(|t| t.name).collect() 213 | } 214 | } 215 | 216 | impl TryFrom> for TopicsList { 217 | type Error = Error; 218 | 219 | fn try_from(value: Vec) -> Result { 220 | let topics: Vec = value.iter().map(Topic::new).try_collect()?; 221 | 222 | Self::new(topics) 223 | } 224 | } 225 | 226 | impl From> for TopicsList { 227 | fn from(value: Vec<&'static str>) -> Self { 228 | let vec: Vec = value.into_iter().map(ToString::to_string).collect(); 229 | 230 | Self::try_from(vec).unwrap() 231 | } 232 | } 233 | 234 | impl FromIterator for TopicsList { 235 | fn from_iter>(iter: T) -> Self { 236 | let mut vec = Vec::new(); 237 | for v in iter { 238 | vec.push(v); 239 | } 240 | 241 | TopicsList::try_from(vec).unwrap() 242 | } 243 | } 244 | 245 | impl From for Vec { 246 | fn from(value: TopicsList) -> Self { 247 | value.into_iter().map(|t| t.name).collect() 248 | } 249 | } 250 | 251 | impl IntoIterator for TopicsList { 252 | type Item = Topic; 253 | type IntoIter = IntoIter; 254 | 255 | fn into_iter(self) -> Self::IntoIter { 256 | self.topics.into_iter() 257 | } 258 | } 259 | 260 | #[cfg(test)] 261 | mod endpoint_tests { 262 | use crate::configuration::domain::{ApplicationId, Endpoint, TopicsList}; 263 | 264 | #[test] 265 | fn endpoint_disable_manually_is_not_active() { 266 | let mut endpoint = EndpointObjectMother::init_new(); 267 | assert!(endpoint.is_active()); 268 | 269 | endpoint.disable_manually(); 270 | assert!(!endpoint.is_active()); 271 | } 272 | 273 | #[test] 274 | fn endpoint_disable_failing_is_not_active() { 275 | let mut endpoint = EndpointObjectMother::init_new(); 276 | assert!(endpoint.is_active()); 277 | 278 | endpoint.disable_failing(); 279 | assert!(!endpoint.is_active()); 280 | } 281 | 282 | #[test] 283 | fn endpoint_enable_manually_is_active() { 284 | let mut endpoint = EndpointObjectMother::init_disabled(); 285 | assert!(!endpoint.is_active()); 286 | 287 | endpoint.enable_manually(); 288 | assert!(endpoint.is_active()); 289 | } 290 | 291 | struct EndpointObjectMother; 292 | 293 | impl EndpointObjectMother { 294 | fn init_new() -> Endpoint { 295 | Endpoint::new( 296 | "https://localhost", 297 | ApplicationId::new(), 298 | TopicsList::try_from(vec![String::from("test")]).unwrap(), 299 | ) 300 | } 301 | 302 | fn init_disabled() -> Endpoint { 303 | let mut endpoint = Self::init_new(); 304 | endpoint.disable_manually(); 305 | 306 | endpoint 307 | } 308 | } 309 | } 310 | 311 | #[cfg(test)] 312 | mod topic_tests { 313 | use crate::configuration::domain::Topic; 314 | use crate::tests::assert_strings; 315 | 316 | #[test] 317 | fn topic_name_construct() { 318 | assert!(Topic::new("customer_updated").is_ok()); 319 | assert!(Topic::new("customer-updated").is_ok()); 320 | assert!(Topic::new("customer.updated").is_ok()); 321 | assert!(Topic::new("customer.updated2").is_err()); 322 | assert!(Topic::new("customer updated").is_err()); 323 | assert!(Topic::new("").is_err()); 324 | assert!(Topic::new(" ").is_err()); 325 | } 326 | 327 | #[test] 328 | fn topic_can_be_build_from_any_type_of_str() { 329 | assert_strings!("order.purchased", |str| Topic::new(str).is_ok()); 330 | } 331 | } 332 | 333 | #[cfg(test)] 334 | mod topics_list_tests { 335 | use crate::configuration::domain::{Topic, TopicsList}; 336 | use crate::error::Error::InvalidArgument; 337 | 338 | #[test] 339 | fn cannot_be_empty_from_vec_string() { 340 | let vec: Vec = Vec::new(); 341 | let sut = TopicsList::try_from(vec); 342 | 343 | assert_eq!( 344 | Err(InvalidArgument( 345 | "Topic collection could not be empty".to_string() 346 | )), 347 | sut 348 | ); 349 | } 350 | 351 | #[test] 352 | fn cannot_be_empty_new_() { 353 | let sut = TopicsList::new(vec![]); 354 | 355 | assert_eq!( 356 | Err(InvalidArgument( 357 | "Topic collection could not be empty".to_string() 358 | )), 359 | sut 360 | ); 361 | } 362 | 363 | #[test] 364 | fn cannot_be_build_with_invalid_topics_name() { 365 | let topics = vec![ 366 | String::from("contact.updated"), 367 | String::from("contact.updated2"), 368 | ]; 369 | let sut = TopicsList::try_from(topics); 370 | 371 | assert_eq!(Err(InvalidArgument("Invalid topic name".to_string())), sut); 372 | } 373 | 374 | #[test] 375 | fn can_be_build_with_valid_topic_names_from_vec_string() { 376 | let topics = vec![ 377 | String::from("contact.updated"), 378 | String::from("contact.created"), 379 | ]; 380 | let sut = TopicsList::try_from(topics); 381 | 382 | assert!(sut.is_ok()); 383 | } 384 | 385 | #[test] 386 | fn can_be_build_with_valid_topic_names_new() { 387 | let topics = vec![ 388 | Topic::new("contact.updated").unwrap(), 389 | Topic::new("contact.created").unwrap(), 390 | ]; 391 | let sut = TopicsList::new(topics); 392 | 393 | assert!(sut.is_ok()); 394 | } 395 | 396 | #[test] 397 | fn can_iterate() { 398 | let a = Topic::new("contact.updated").unwrap(); 399 | let b = Topic::new("contact.created").unwrap(); 400 | let all = [a.clone(), b.clone()]; 401 | 402 | let sut = TopicsList::new(vec![a, b]).unwrap(); 403 | 404 | let mut count: u8 = 0; 405 | for topic in sut { 406 | assert!(all.contains(&topic)); 407 | count += 1; 408 | } 409 | 410 | assert_eq!(2, count); 411 | } 412 | } 413 | -------------------------------------------------------------------------------- /server/src/configuration/handlers.rs: -------------------------------------------------------------------------------- 1 | use actix_web::web::{Data, Json, Path}; 2 | use actix_web::{HttpResponse, Responder}; 3 | use log::debug; 4 | use validator::Validate; 5 | 6 | use crate::configuration::domain::{Application, Endpoint, TopicsList}; 7 | use crate::configuration::models::{ 8 | CreateAppRequest, CreateAppResponse, CreateEndpointRequest, CreateEndpointResponse, 9 | }; 10 | use crate::error::ResponseError; 11 | use crate::storage::Storage; 12 | use crate::types::{ApplicationId, EndpointId}; 13 | 14 | pub async fn create_application_handler( 15 | storage: Data, 16 | request: Json, 17 | ) -> Result { 18 | if let Err(err) = request.validate() { 19 | return Err(ResponseError::ValidationError(err)); 20 | } 21 | 22 | let app = Application::new(request.name.to_string()); 23 | 24 | storage.applications.save(app.clone()).await; 25 | 26 | debug!("Application created: {:?}", app,); 27 | 28 | Ok(HttpResponse::Created().json(CreateAppResponse::from(app))) 29 | } 30 | 31 | pub async fn create_endpoint_handler( 32 | storage: Data, 33 | request: Json, 34 | path: Path, 35 | ) -> Result { 36 | if let Err(err) = request.validate() { 37 | return Err(ResponseError::ValidationError(err)); 38 | } 39 | 40 | let app_id = ApplicationId::try_from(path.into_inner())?; 41 | let app = storage.applications.get(&app_id).await?; 42 | 43 | let url = request.url.clone(); 44 | let topics: TopicsList = request.topics.clone().into_iter().collect(); 45 | 46 | let endpoint = Endpoint::new(&url, app.id, topics); 47 | 48 | storage.endpoints.save(endpoint.clone()).await; 49 | 50 | debug!("Endpoint created: {:?}", endpoint,); 51 | 52 | Ok(HttpResponse::Created().json(CreateEndpointResponse::from(endpoint))) 53 | } 54 | 55 | pub async fn disable_endpoint_handler( 56 | storage: Data, 57 | path: Path<(String, String)>, 58 | ) -> Result { 59 | handle_status(storage, path, StatusAction::Disable).await 60 | } 61 | 62 | pub async fn enable_endpoint_handler( 63 | storage: Data, 64 | path: Path<(String, String)>, 65 | ) -> Result { 66 | handle_status(storage, path, StatusAction::Enable).await 67 | } 68 | 69 | enum StatusAction { 70 | Enable, 71 | Disable, 72 | } 73 | 74 | async fn handle_status( 75 | storage: Data, 76 | path: Path<(String, String)>, 77 | action: StatusAction, 78 | ) -> Result { 79 | let (app_id, endpoint_id) = path.into_inner(); 80 | 81 | let app_id = ApplicationId::try_from(app_id)?; 82 | let app = storage.applications.get(&app_id).await?; 83 | 84 | let endpoint_id = EndpointId::try_from(endpoint_id)?; 85 | let mut endpoint = storage.endpoints.get(&endpoint_id).await?; 86 | 87 | if !endpoint.app_id.eq(&app.id) { 88 | // todo get endpoint with one query - app_id + endpoint_id 89 | return Err(ResponseError::NotFound("Endpoint not found".to_string())); 90 | } 91 | 92 | match action { 93 | StatusAction::Enable => endpoint.enable_manually(), 94 | StatusAction::Disable => endpoint.disable_manually(), 95 | } 96 | 97 | storage.endpoints.save(endpoint).await; 98 | 99 | match action { 100 | StatusAction::Enable => debug!("Endpoint {} enabled", endpoint_id), 101 | StatusAction::Disable => debug!("Endpoint {} disabled", endpoint_id), 102 | } 103 | 104 | Ok(HttpResponse::NoContent()) 105 | } 106 | -------------------------------------------------------------------------------- /server/src/configuration/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod domain; 2 | pub mod handlers; 3 | mod models; 4 | pub mod storage; 5 | -------------------------------------------------------------------------------- /server/src/configuration/models.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use validator::{Validate, ValidationError}; 3 | 4 | use crate::configuration::domain::{Application, Endpoint, Topic}; 5 | 6 | fn is_not_empty(value: &str) -> Result<(), ValidationError> { 7 | let value = value.trim(); 8 | 9 | if value.is_empty() { 10 | return Err(ValidationError::new("is_empty")); 11 | } 12 | 13 | Ok(()) 14 | } 15 | 16 | fn topic_are_valid(value: &Vec) -> Result<(), ValidationError> { 17 | for v in value { 18 | if Topic::try_from(v.as_str()).is_err() { 19 | let err = ValidationError::new("invalid_topic_name") 20 | .with_message(format!("'{}' is invalid topic name", v).into()); 21 | 22 | return Err(err); 23 | } 24 | } 25 | 26 | Ok(()) 27 | } 28 | 29 | #[derive(Deserialize, Validate)] 30 | pub struct CreateAppRequest { 31 | #[validate(custom(function = is_not_empty, message = "Name cannot be empty"))] 32 | pub name: String, 33 | } 34 | 35 | #[derive(Serialize)] 36 | pub struct CreateAppResponse { 37 | id: String, 38 | name: String, 39 | } 40 | 41 | impl From for CreateAppResponse { 42 | fn from(value: Application) -> Self { 43 | Self { 44 | id: value.id.to_string(), 45 | name: value.name, 46 | } 47 | } 48 | } 49 | 50 | #[derive(Deserialize, Validate)] 51 | pub struct CreateEndpointRequest { 52 | #[validate(url(message = "Url should be valid"))] 53 | pub url: String, 54 | #[validate(length(min = 1, message = "Should be at leas one topic"))] 55 | #[validate(custom(function = topic_are_valid))] 56 | pub topics: Vec, 57 | } 58 | 59 | #[derive(Serialize)] 60 | pub struct CreateEndpointResponse { 61 | id: String, 62 | app_id: String, 63 | url: String, 64 | topics: Vec, 65 | } 66 | 67 | impl From for CreateEndpointResponse { 68 | fn from(value: Endpoint) -> Self { 69 | Self { 70 | id: value.id.to_string(), 71 | app_id: value.app_id.to_string(), 72 | url: value.url.to_string(), 73 | topics: value.topics.into(), 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /server/src/configuration/storage.rs: -------------------------------------------------------------------------------- 1 | use serde_json::json; 2 | use sqlx::{query, query_as, PgPool}; 3 | 4 | use crate::configuration::domain::{Application, Endpoint, Topic}; 5 | use crate::error::Error; 6 | use crate::types::{ApplicationId, EndpointId}; 7 | 8 | pub struct ApplicationStorage { 9 | pool: PgPool, 10 | } 11 | 12 | impl ApplicationStorage { 13 | pub fn new(pool: PgPool) -> Self { 14 | Self { pool } 15 | } 16 | 17 | pub async fn save(&self, app: Application) { 18 | query( 19 | r" 20 | INSERT INTO applications (id, name) 21 | VALUES ($1, $2) 22 | ", 23 | ) 24 | .bind(app.id) 25 | .bind(app.name) 26 | .execute(&self.pool) 27 | .await 28 | .unwrap(); 29 | } 30 | 31 | pub async fn get(&self, app_id: &ApplicationId) -> Result { 32 | Ok(query_as::<_, Application>( 33 | r" 34 | SELECT * FROM applications WHERE id = $1 35 | ", 36 | ) 37 | .bind(app_id) 38 | .fetch_one(&self.pool) 39 | .await?) 40 | } 41 | } 42 | 43 | pub struct EndpointStorage { 44 | pool: PgPool, 45 | } 46 | 47 | impl EndpointStorage { 48 | pub fn new(pool: PgPool) -> Self { 49 | Self { pool } 50 | } 51 | 52 | pub async fn save(&self, endpoint: Endpoint) { 53 | query( 54 | r" 55 | INSERT INTO endpoints (id, app_id, url, topics, status) 56 | VALUES ($1, $2, $3, $4, $5) 57 | ON CONFLICT (id) DO UPDATE 58 | SET url = EXCLUDED.url, 59 | topics = EXCLUDED.topics, 60 | status = EXCLUDED.status 61 | ", 62 | ) 63 | .bind(endpoint.id) 64 | .bind(endpoint.app_id) 65 | .bind(endpoint.url.to_string()) 66 | .bind(json!(endpoint.topics.as_strings())) 67 | .bind(endpoint.status.to_string()) 68 | .execute(&self.pool) 69 | .await 70 | .unwrap(); 71 | } 72 | 73 | pub async fn for_topic(&self, application_id: &ApplicationId, topic: &Topic) -> Vec { 74 | let endpoints = query_as::<_, Endpoint>( 75 | r" 76 | SELECT * FROM endpoints WHERE app_id = $1 77 | ", 78 | ) 79 | .bind(application_id) 80 | .fetch_all(&self.pool) 81 | .await 82 | .expect("Error in query"); 83 | 84 | endpoints 85 | .into_iter() 86 | .filter(|e| e.topics.contains(topic)) 87 | .collect() // todo: add it to the query 88 | } 89 | 90 | pub async fn get(&self, endpoint_id: &EndpointId) -> Result { 91 | Ok(query_as::<_, Endpoint>( 92 | r" 93 | SELECT * FROM endpoints WHERE id = $1 94 | ", 95 | ) 96 | .bind(endpoint_id) 97 | .fetch_one(&self.pool) 98 | .await?) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /server/src/dispatch_consumer.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use futures_lite::StreamExt; 4 | use lapin::options::{BasicAckOptions, BasicConsumeOptions}; 5 | use lapin::types::FieldTable; 6 | use lapin::Channel; 7 | use log::{debug, error, info}; 8 | 9 | use crate::amqp::{Publisher, Serializer}; 10 | use crate::circuit_breaker::{CircuitBreaker, Error}; 11 | use crate::cmd::AsyncMessage; 12 | use crate::config::AMQPConfig; 13 | use crate::retry::RetryPolicyBuilder; 14 | use crate::sender::Sender; 15 | use crate::storage::Storage; 16 | use crate::time::Clock; 17 | 18 | pub async fn consume( 19 | channel: Channel, 20 | consumer_tag: &str, 21 | storage: Storage, 22 | amqp_config: AMQPConfig, 23 | ) { 24 | let retry_policy = RetryPolicyBuilder::new() 25 | .max_retries(5) 26 | .exponential(2, Duration::from_secs(2)) 27 | .randomize(0.5) 28 | .build() 29 | .unwrap(); 30 | 31 | let mut circuit_breaker = CircuitBreaker::default(); 32 | 33 | let mut consumer = channel 34 | .basic_consume( 35 | &amqp_config.sent_message_queue_name(), 36 | consumer_tag, 37 | BasicConsumeOptions::default(), 38 | FieldTable::default(), 39 | ) 40 | .await 41 | .unwrap(); 42 | 43 | let publisher = Publisher::new(channel, amqp_config); 44 | let clock = Clock::chrono(); 45 | 46 | info!("consumer is ready"); 47 | 48 | while let Some(delivery) = consumer.next().await { 49 | let delivery = delivery.expect("error in consumer"); 50 | let async_msg: AsyncMessage = Serializer::deserialize(&delivery.data); 51 | 52 | let AsyncMessage::SentMessage(cmd) = async_msg; 53 | 54 | info!("message consumed: {:?}", cmd); 55 | 56 | let msg = storage.messages.get(cmd.msg_id()).await; 57 | if msg.is_err() { 58 | error!( 59 | "Message {} doesn't exist and cannot be dispatched", 60 | cmd.msg_id() 61 | ); 62 | 63 | delivery.ack(BasicAckOptions::default()).await.expect("ack"); 64 | 65 | continue; 66 | } 67 | 68 | let mut msg = msg.unwrap(); 69 | 70 | let event = storage.events.get(msg.event_id).await; 71 | if event.is_err() { 72 | error!( 73 | "Message {} doesn't exist and cannot be dispatched", 74 | msg.event_id 75 | ); 76 | 77 | delivery.ack(BasicAckOptions::default()).await.expect("ack"); 78 | 79 | continue; 80 | } 81 | 82 | let endpoint_id = msg.endpoint_id; 83 | let endpoint = storage.endpoints.get(&endpoint_id).await; 84 | if endpoint.is_err() { 85 | error!( 86 | "Endpoint {} doesn't not exists and message {} cannot be dispatched", 87 | endpoint_id, msg.event_id 88 | ); 89 | 90 | delivery.ack(BasicAckOptions::default()).await.expect("ack"); 91 | 92 | continue; 93 | } 94 | 95 | let event = event.unwrap(); 96 | let endpoint = endpoint.unwrap(); 97 | 98 | let sender = Sender::new(event.payload.clone(), endpoint.url.clone()); 99 | let key = endpoint_id.to_string(); 100 | 101 | if endpoint.is_active() && circuit_breaker.revive(&key).is_some() { 102 | debug!("Endpoint {} has been reopened", key); 103 | } 104 | 105 | let processing_time = event.calculate_processing_time(&clock); 106 | 107 | debug!( 108 | "Message {} for endpoint {} is being prepared to send. Processing time: {:?}", 109 | event.id.to_string(), 110 | endpoint.id.to_string(), 111 | processing_time, 112 | ); 113 | 114 | match circuit_breaker.call(&key, || sender.send()).await { 115 | Ok(res) => { 116 | let log = msg.record_attempt(res, processing_time); 117 | storage.messages.save(msg).await; 118 | storage.attempt_log.save(log).await; 119 | } 120 | Err(err) => match err { 121 | Error::Closed(res) => { 122 | let log = msg.record_attempt(res, processing_time); 123 | storage.messages.save(msg).await; 124 | storage.attempt_log.save(log).await; 125 | 126 | let mut endpoint = endpoint; 127 | let endpoint_id = endpoint.id; 128 | 129 | endpoint.disable_failing(); 130 | storage.endpoints.save(endpoint).await; 131 | 132 | debug!("Endpoint {} has been disabled", endpoint_id); 133 | } 134 | Error::Open(res) => { 135 | let log = msg.record_attempt(res, processing_time); 136 | storage.messages.save(msg).await; 137 | storage.attempt_log.save(log).await; 138 | 139 | if retry_policy.is_retryable(cmd.attempt) { 140 | let cmd_to_retry = cmd.with_increased_attempt(); 141 | let duration = retry_policy.get_waiting_time(cmd.attempt); 142 | 143 | publisher 144 | .publish_delayed( 145 | AsyncMessage::SentMessage(cmd_to_retry.clone()), 146 | duration, 147 | ) 148 | .await; 149 | 150 | debug!( 151 | "Message queued again. Attempt: {}. Delay: {:?}", 152 | cmd_to_retry.attempt, duration 153 | ); 154 | } 155 | 156 | // todo add message that wasn't delivered to some storage 157 | } 158 | Error::Rejected => { 159 | debug!( 160 | "Endpoint {} is closed. Message {} rejected.", 161 | key, msg.event_id 162 | ); 163 | 164 | // todo do something with message? add to some "not delivered" bucket? 165 | } 166 | }, 167 | } 168 | 169 | delivery.ack(BasicAckOptions::default()).await.expect("ack"); 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /server/src/error.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Display, Formatter}; 2 | 3 | use actix_web::http::header::ContentType; 4 | use actix_web::http::StatusCode; 5 | use actix_web::HttpResponse; 6 | use serde_json::json; 7 | use sqlx::Error as SqlxError; 8 | use validator::{ValidationErrors, ValidationErrorsKind}; 9 | 10 | #[derive(Debug, PartialEq)] 11 | pub enum Error { 12 | InvalidArgument(String), 13 | EntityNotFound(String), 14 | Sqlx(String), 15 | } 16 | 17 | #[derive(Debug)] 18 | pub enum ResponseError { 19 | NotFound(String), 20 | BadRequest(String), 21 | InternalError, 22 | ValidationError(ValidationErrors), 23 | } 24 | 25 | impl Display for ResponseError { 26 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 27 | let msg = match self { 28 | ResponseError::NotFound(val) | ResponseError::BadRequest(val) => val, 29 | ResponseError::InternalError => "", 30 | ResponseError::ValidationError(_) => "Validation errors", 31 | }; 32 | 33 | write!(f, "{msg}") 34 | } 35 | } 36 | 37 | impl actix_web::error::ResponseError for ResponseError { 38 | fn status_code(&self) -> StatusCode { 39 | match *self { 40 | ResponseError::NotFound(_) => StatusCode::NOT_FOUND, 41 | ResponseError::BadRequest(_) => StatusCode::BAD_REQUEST, 42 | ResponseError::InternalError => StatusCode::INTERNAL_SERVER_ERROR, 43 | ResponseError::ValidationError(_) => StatusCode::BAD_REQUEST, 44 | } 45 | } 46 | 47 | fn error_response(&self) -> HttpResponse { 48 | let error = self.to_string(); 49 | 50 | let messages: Vec = match self { 51 | ResponseError::NotFound(_) 52 | | ResponseError::BadRequest(_) 53 | | ResponseError::InternalError => Vec::::new(), 54 | ResponseError::ValidationError(errors) => { 55 | let inner: Vec> = errors 56 | .errors() 57 | .iter() 58 | .map(|e| match e.1 { 59 | ValidationErrorsKind::Field(err) => { 60 | err.iter().map(|e| e.to_string()).collect() 61 | } 62 | _ => unreachable!("this is error type is not handled yet"), 63 | }) 64 | .collect(); 65 | 66 | inner.into_iter().flatten().collect() 67 | } 68 | }; 69 | 70 | HttpResponse::build(self.status_code()) 71 | .insert_header(ContentType::json()) 72 | .body( 73 | json!({ 74 | "error": error, 75 | "messages": messages 76 | }) 77 | .to_string(), 78 | ) 79 | } 80 | } 81 | 82 | impl From for ResponseError { 83 | fn from(value: Error) -> Self { 84 | match value { 85 | Error::EntityNotFound(msg) => ResponseError::NotFound(msg), 86 | Error::InvalidArgument(msg) => ResponseError::BadRequest(msg), 87 | Error::Sqlx(_) => ResponseError::InternalError, 88 | } 89 | } 90 | } 91 | 92 | impl From for Error { 93 | fn from(value: SqlxError) -> Self { 94 | match value { 95 | SqlxError::RowNotFound => Self::EntityNotFound("Entity not found".to_string()), 96 | _ => Self::Sqlx(value.to_string()), 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /server/src/events/domain.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{Display, Formatter}; 2 | use std::time::Duration; 3 | 4 | use chrono::{DateTime, NaiveDateTime, Utc}; 5 | use serde::{Serialize, Serializer}; 6 | use serde_json::Value; 7 | use sqlx::postgres::PgRow; 8 | use sqlx::{Error, FromRow, Row}; 9 | 10 | use crate::configuration::domain::{Endpoint, Topic}; 11 | use crate::sender::{SentResult, Status}; 12 | use crate::time::Clock; 13 | use crate::types::{ApplicationId, AttemptId, EndpointId, EventId, MessageId}; 14 | 15 | #[derive(Debug, Clone)] 16 | pub struct Payload { 17 | body: String, 18 | } 19 | 20 | impl From for Payload { 21 | fn from(value: Value) -> Self { 22 | Self { 23 | body: value.to_string(), 24 | } 25 | } 26 | } 27 | 28 | impl Serialize for Payload { 29 | fn serialize(&self, serializer: S) -> Result 30 | where 31 | S: Serializer, 32 | { 33 | let body: Value = serde_json::from_str(self.body.to_string().as_str()).unwrap(); 34 | 35 | serializer.serialize_some(&body) 36 | } 37 | } 38 | 39 | impl Display for Payload { 40 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { 41 | write!(f, "{}", self.body) 42 | } 43 | } 44 | 45 | #[derive(Debug, Clone)] 46 | pub struct Event { 47 | pub id: EventId, 48 | pub app_id: ApplicationId, 49 | pub payload: Payload, 50 | pub topic: Topic, 51 | pub created_at: DateTime, 52 | } 53 | 54 | impl Event { 55 | #[must_use] 56 | pub fn new(app_id: ApplicationId, payload: Payload, topic: Topic, clock: &Clock) -> Self { 57 | Self { 58 | id: EventId::new(), 59 | app_id, 60 | payload, 61 | topic, 62 | created_at: clock.now(), 63 | } 64 | } 65 | 66 | #[must_use] 67 | pub fn calculate_processing_time(&self, clock: &Clock) -> Duration { 68 | let now = clock.now(); 69 | if now < self.created_at { 70 | unreachable!( 71 | "Unable to calculate processing time because created_at_date is after now date" 72 | ); 73 | } 74 | 75 | let processing_time = now - self.created_at; 76 | 77 | processing_time 78 | .to_std() 79 | .unwrap_or_else(|_| Duration::from_secs(i64::MAX as u64)) // fixme: is max correct? 80 | } 81 | } 82 | 83 | impl FromRow<'_, PgRow> for Event { 84 | fn from_row(row: &'_ PgRow) -> Result { 85 | let created_at: NaiveDateTime = row.try_get("created_at")?; 86 | let topic: String = row.try_get("topic")?; 87 | let payload: Value = row.try_get("payload")?; 88 | 89 | Ok(Event { 90 | id: row.try_get("id")?, 91 | app_id: row.try_get("app_id")?, 92 | created_at: created_at.and_utc(), 93 | topic: Topic::try_from(topic).unwrap(), 94 | payload: Payload::from(payload), 95 | }) 96 | } 97 | } 98 | 99 | #[derive(Debug, Clone)] 100 | pub struct Message { 101 | pub id: MessageId, 102 | pub event_id: EventId, 103 | pub endpoint_id: EndpointId, 104 | pub attempts: AttemptCollection, 105 | } 106 | 107 | impl From<(Event, Endpoint)> for Message { 108 | fn from(value: (Event, Endpoint)) -> Self { 109 | let (event, endpoint) = value; 110 | 111 | Self::new(event.id, endpoint.id) 112 | } 113 | } 114 | 115 | impl Message { 116 | fn new(event_id: EventId, endpoint_id: EndpointId) -> Self { 117 | let id = MessageId::new(); 118 | 119 | Self { 120 | id, 121 | event_id, 122 | endpoint_id, 123 | attempts: AttemptCollection::new(id), 124 | } 125 | } 126 | 127 | pub fn record_attempt(&mut self, result: SentResult, processing_time: Duration) -> AttemptLog { 128 | let id = self.attempts.push(result.status); 129 | 130 | AttemptLog::new(id, processing_time, result.response_time, result.body) 131 | } 132 | 133 | #[must_use] 134 | pub fn attempts(&self) -> Vec { 135 | self.attempts.all() 136 | } 137 | } 138 | 139 | #[derive(Debug, Clone, PartialEq)] 140 | pub struct Attempt { 141 | id: AttemptId, 142 | status: Status, 143 | } 144 | 145 | impl Attempt { 146 | #[must_use] 147 | pub fn attempt_id(&self) -> u16 { 148 | self.id.attempt_no() 149 | } 150 | 151 | #[must_use] 152 | pub fn message_id(&self) -> MessageId { 153 | self.id.message_id() 154 | } 155 | 156 | #[must_use] 157 | pub fn status(&self) -> Status { 158 | self.status.clone() 159 | } 160 | 161 | fn new(id: AttemptId, status: Status) -> Self { 162 | Self { id, status } 163 | } 164 | 165 | fn is_delivered(&self) -> bool { 166 | match self.status { 167 | Status::Numeric(status) => (200..=299).contains(&status), 168 | Status::Unknown(_) => false, 169 | } 170 | } 171 | } 172 | 173 | impl FromRow<'_, PgRow> for Attempt { 174 | fn from_row(row: &'_ PgRow) -> Result { 175 | let message_id: MessageId = row.try_get("message_id")?; 176 | let attempt_no: i16 = row.try_get("attempt")?; 177 | let id = AttemptId::new(message_id, attempt_no as u16).unwrap(); 178 | 179 | let status: Status = Status::from_row(row)?; 180 | 181 | Ok(Self { id, status }) 182 | } 183 | } 184 | 185 | #[derive(Debug, Clone)] 186 | pub struct AttemptCollection { 187 | message_id: MessageId, 188 | attempts: Vec, 189 | } 190 | 191 | impl AttemptCollection { 192 | fn new(message_id: MessageId) -> Self { 193 | Self { 194 | message_id, 195 | attempts: Vec::new(), 196 | } 197 | } 198 | 199 | // todo add clock here or to logs? 200 | // fixme: improve returning id 201 | fn push(&mut self, status: Status) -> AttemptId { 202 | let attempt = Attempt::new(self.next_id(), status); 203 | 204 | if self.attempts.iter().any(Attempt::is_delivered) { 205 | panic!("Could not push to the attempt collection when was delivered"); 206 | } 207 | 208 | let id = attempt.id; 209 | self.attempts.push(attempt); 210 | 211 | id 212 | } 213 | 214 | fn next_id(&self) -> AttemptId { 215 | AttemptId::new(self.message_id, self.attempts.len() as u16 + 1).unwrap() 216 | } 217 | 218 | fn all(&self) -> Vec { 219 | let mut vec = self.attempts.clone(); 220 | vec.sort_unstable_by(|a, b| a.id.attempt_no().cmp(&b.id.attempt_no())); 221 | 222 | vec 223 | } 224 | } 225 | 226 | impl From<(MessageId, Vec)> for AttemptCollection { 227 | fn from(value: (MessageId, Vec)) -> Self { 228 | Self { 229 | message_id: value.0, 230 | attempts: value.1, 231 | } 232 | } 233 | } 234 | 235 | pub struct AttemptLog { 236 | id: AttemptId, 237 | processing_time: Duration, 238 | response_time: Duration, 239 | response_body: Option, 240 | } 241 | 242 | impl AttemptLog { 243 | #[must_use] 244 | pub fn new( 245 | id: AttemptId, 246 | processing_time: Duration, 247 | response_time: Duration, 248 | response_body: Option, 249 | ) -> Self { 250 | Self { 251 | id, 252 | processing_time, 253 | response_time, 254 | response_body, 255 | } 256 | } 257 | 258 | #[must_use] 259 | pub fn attempt_id(&self) -> u16 { 260 | self.id.attempt_no() 261 | } 262 | 263 | #[must_use] 264 | pub fn message_id(&self) -> MessageId { 265 | self.id.message_id() 266 | } 267 | 268 | pub fn processing_time(&self) -> Duration { 269 | self.processing_time 270 | } 271 | 272 | pub fn response_time(&self) -> Duration { 273 | self.response_time 274 | } 275 | 276 | pub fn response_body(&self) -> Option { 277 | self.response_body.clone() 278 | } 279 | } 280 | 281 | #[cfg(test)] 282 | mod message_test { 283 | use chrono::{DateTime, Utc}; 284 | use serde_json::json; 285 | use test_case::test_case; 286 | 287 | use crate::configuration::domain::Topic; 288 | use crate::events::domain::{Event, Payload}; 289 | use crate::tests::dt; 290 | use crate::time::Clock::Fixed; 291 | use crate::types::ApplicationId; 292 | 293 | #[test] 294 | #[should_panic( 295 | expected = "Unable to calculate processing time because created_at_date is after now date" 296 | )] 297 | fn processing_time_cannot_be_in_future() { 298 | let created_at = dt!("2014-11-28T12:00:09Z"); 299 | let now = dt!("2014-11-28T12:00:08Z"); 300 | 301 | let sut = MessageObjectMother::with_created_at_str(created_at); 302 | 303 | let _ = sut.calculate_processing_time(&Fixed(now)); 304 | } 305 | 306 | #[test_case("2014-11-28T12:00:09Z", "2014-11-28T12:00:10Z", 1000; "1 sec")] 307 | #[test_case("2014-11-28T12:00:09Z", "2014-11-28T12:00:09.425Z", 425; "425 ms")] 308 | #[test_case("2014-11-28T12:00:09Z", "2014-11-28T12:01:12.997Z", 63_997; "1 min")] 309 | fn processing_time(created_at: &str, now: &str, expected_id_ms: u128) { 310 | let created_at = dt!(created_at); 311 | let now = dt!(now); 312 | 313 | let sut = MessageObjectMother::with_created_at_str(created_at); 314 | 315 | let processing_time = sut.calculate_processing_time(&Fixed(now)); 316 | 317 | assert_eq!(expected_id_ms, processing_time.as_millis()); 318 | } 319 | 320 | struct MessageObjectMother; 321 | 322 | impl MessageObjectMother { 323 | fn with_created_at_str(created_at: DateTime) -> Event { 324 | let clock = Fixed(created_at); 325 | 326 | Event::new( 327 | ApplicationId::new(), 328 | Payload::from(json!({"foo": "bar"})), 329 | Topic::new("contact.created").unwrap(), 330 | &clock, 331 | ) 332 | } 333 | } 334 | } 335 | 336 | #[cfg(test)] 337 | mod attempt_test { 338 | use test_case::test_case; 339 | 340 | use crate::events::domain::Attempt; 341 | use crate::sender::Status; 342 | use crate::types::{AttemptId, MessageId}; 343 | 344 | #[test] 345 | #[should_panic] 346 | fn attempt_id_should_be_greater_than_0() { 347 | let attempt_id = AttemptId::new(MessageId::new(), 0).unwrap(); 348 | 349 | Attempt::new(attempt_id, Status::Numeric(200)); 350 | } 351 | 352 | #[test_case(Status::Numeric(200), true)] 353 | #[test_case(Status::Numeric(201), true)] 354 | #[test_case(Status::Numeric(299), true)] 355 | #[test_case(Status::Numeric(300), false)] 356 | #[test_case(Status::Numeric(400), false)] 357 | #[test_case(Status::Numeric(502), false)] 358 | #[test_case(Status::Unknown("test".to_string()), false)] 359 | fn attempt_is_delivered(status: Status, expected: bool) { 360 | let attempt_id = AttemptId::new(MessageId::new(), 1).unwrap(); 361 | let sut = Attempt::new(attempt_id, status); 362 | 363 | assert_eq!(expected, sut.is_delivered()); 364 | } 365 | } 366 | 367 | #[cfg(test)] 368 | mod attempt_collection_test { 369 | use crate::events::domain::AttemptCollection; 370 | use crate::sender::Status::Numeric; 371 | use crate::types::MessageId; 372 | 373 | #[test] 374 | fn get_attempts_from_collection() { 375 | let mut sut = AttemptCollection::new(MessageId::new()); 376 | 377 | sut.push(Numeric(504)); 378 | sut.push(Numeric(502)); 379 | sut.push(Numeric(500)); 380 | sut.push(Numeric(400)); 381 | sut.push(Numeric(200)); 382 | 383 | let mut vec = sut.all().into_iter(); 384 | 385 | assert_eq!(Numeric(504), vec.next().unwrap().status); 386 | assert_eq!(Numeric(502), vec.next().unwrap().status); 387 | assert_eq!(Numeric(500), vec.next().unwrap().status); 388 | assert_eq!(Numeric(400), vec.next().unwrap().status); 389 | assert_eq!(Numeric(200), vec.next().unwrap().status); 390 | assert_eq!(None, vec.next()); 391 | } 392 | 393 | #[test] 394 | #[should_panic(expected = "Could not push to the attempt collection when was delivered")] 395 | fn cannot_push_attempt_when_collection_is_delivered() { 396 | let mut sut = AttemptCollection::new(MessageId::new()); 397 | 398 | sut.push(Numeric(200)); 399 | sut.push(Numeric(200)); 400 | } 401 | 402 | #[test] 403 | fn should_have_ordered_unique_attempts() { 404 | let mut sut = AttemptCollection::new(MessageId::new()); 405 | 406 | sut.push(Numeric(500)); 407 | sut.push(Numeric(501)); 408 | sut.push(Numeric(502)); 409 | sut.push(Numeric(200)); 410 | 411 | let vec = sut.all(); 412 | let mut iter = vec.iter(); 413 | 414 | assert_eq!(1, iter.next().unwrap().id.attempt_no()); 415 | assert_eq!(2, iter.next().unwrap().id.attempt_no()); 416 | assert_eq!(3, iter.next().unwrap().id.attempt_no()); 417 | assert_eq!(4, iter.next().unwrap().id.attempt_no()); 418 | } 419 | } 420 | -------------------------------------------------------------------------------- /server/src/events/handlers.rs: -------------------------------------------------------------------------------- 1 | use actix_web::web::{Data, Json, Path}; 2 | use actix_web::{HttpResponse, Responder, Result}; 3 | use log::debug; 4 | 5 | use crate::amqp::Publisher; 6 | use crate::cmd::{AsyncMessage, SentMessage}; 7 | use crate::configuration::domain::{Endpoint, Topic}; 8 | use crate::error::ResponseError; 9 | use crate::events::domain::{Event, Message, Payload}; 10 | use crate::events::models::{CreateEventRequest, CreateEventResponse}; 11 | use crate::storage::Storage; 12 | use crate::time::Clock; 13 | use crate::types::ApplicationId; 14 | 15 | pub async fn create_event_handler( 16 | storage: Data, 17 | dispatcher: Data, 18 | request: Json, 19 | path: Path, 20 | ) -> Result { 21 | let app_id = ApplicationId::try_from(path.into_inner())?; 22 | let app = storage.applications.get(&app_id).await?; 23 | let topic = Topic::new(request.topic.clone())?; 24 | let clock = Clock::chrono(); 25 | let event = Event::new( 26 | app.id, 27 | Payload::from(request.payload.clone()), 28 | topic.clone(), 29 | &clock, 30 | ); 31 | 32 | storage.events.save(event.clone()).await; 33 | 34 | debug!("Message created: {:?}", event,); 35 | 36 | let endpoints: Vec = storage.endpoints.for_topic(&app_id, &event.topic).await; 37 | let endpoints_count = endpoints.len(); 38 | 39 | let active_endpoints: Vec = 40 | endpoints.into_iter().filter(Endpoint::is_active).collect(); 41 | 42 | debug!( 43 | "in app {} - {} ({}) endpoints found for event {}", 44 | event.app_id, 45 | active_endpoints.len(), 46 | endpoints_count, 47 | event.id 48 | ); 49 | 50 | for endpoint in active_endpoints { 51 | debug!("{} sending to {}", event.id, endpoint.url); 52 | 53 | let msg = Message::from((event.clone(), endpoint.clone())); 54 | 55 | storage.messages.save(msg.clone()).await; 56 | 57 | let cmd = SentMessage::new(msg.id); 58 | let message = AsyncMessage::SentMessage(cmd); 59 | 60 | dispatcher.publish(message).await; 61 | 62 | debug!("Message {} published on the queue", msg.id); 63 | } 64 | 65 | Ok(HttpResponse::Ok().json(CreateEventResponse::from(event))) 66 | } 67 | -------------------------------------------------------------------------------- /server/src/events/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod domain; 2 | pub mod handlers; 3 | mod models; 4 | pub mod storage; 5 | -------------------------------------------------------------------------------- /server/src/events/models.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use serde_json::Value; 3 | 4 | use crate::events::domain::Event; 5 | 6 | #[derive(Deserialize)] 7 | pub struct CreateEventRequest { 8 | pub payload: Value, 9 | pub topic: String, 10 | } 11 | 12 | #[derive(Serialize)] 13 | pub struct CreateEventResponse { 14 | id: String, 15 | } 16 | 17 | impl From for CreateEventResponse { 18 | fn from(value: Event) -> Self { 19 | Self { 20 | id: value.id.to_string(), 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /server/src/events/storage.rs: -------------------------------------------------------------------------------- 1 | use serde_json::json; 2 | use sqlx::{query, query_as, FromRow, PgPool, Row}; 3 | 4 | use crate::error::Error; 5 | use crate::events::domain::{Attempt, AttemptCollection, AttemptLog, Event, Message}; 6 | use crate::sender::Status; 7 | use crate::types::{EndpointId, EventId, MessageId}; 8 | 9 | pub struct EventStorage { 10 | pool: PgPool, 11 | } 12 | 13 | impl EventStorage { 14 | #[must_use] 15 | pub fn new(pool: PgPool) -> Self { 16 | Self { pool } 17 | } 18 | 19 | pub async fn save(&self, event: Event) { 20 | query( 21 | r" 22 | INSERT INTO events (id, app_id, payload, topic, created_at) 23 | VALUES ($1, $2, $3, $4, $5) 24 | ", 25 | ) 26 | .bind(event.id) 27 | .bind(event.app_id) 28 | .bind(json!(event.payload)) 29 | .bind(event.topic.to_string()) 30 | .bind(event.created_at.naive_utc()) 31 | .execute(&self.pool) 32 | .await 33 | .unwrap(); 34 | } 35 | 36 | pub async fn get(&self, event_id: EventId) -> Result { 37 | Ok(query_as::<_, Event>( 38 | r" 39 | SELECT * FROM events WHERE id = $1 40 | ", 41 | ) 42 | .bind(event_id) 43 | .fetch_one(&self.pool) 44 | .await?) 45 | } 46 | } 47 | 48 | pub struct MessageStorage { 49 | pool: PgPool, 50 | } 51 | 52 | impl MessageStorage { 53 | pub fn new(pool: PgPool) -> Self { 54 | Self { pool } 55 | } 56 | 57 | pub async fn save(&self, message: Message) { 58 | let mut tx = self.pool.begin().await.unwrap(); 59 | 60 | query( 61 | r" 62 | INSERT INTO messages (id, event_id, endpoint_id) 63 | VALUES ($1, $2, $3) 64 | ON CONFLICT DO NOTHING 65 | ", 66 | ) 67 | .bind(message.id) 68 | .bind(message.event_id) 69 | .bind(message.endpoint_id) 70 | .execute(&mut *tx) 71 | .await 72 | .unwrap(); 73 | 74 | // todo optimize 75 | for attempt in message.attempts() { 76 | query( 77 | r" 78 | INSERT INTO attempts (message_id, attempt, status_numeric, status_unknown) 79 | VALUES ($1, $2, $3, $4) 80 | ON CONFLICT DO NOTHING 81 | ", 82 | ) 83 | .bind(attempt.message_id()) 84 | .bind(attempt.attempt_id() as i16) 85 | .bind(match attempt.status() { 86 | Status::Numeric(val) => Some(val as i16), 87 | Status::Unknown(_) => None, 88 | }) 89 | .bind(match attempt.status() { 90 | Status::Numeric(_) => None, 91 | Status::Unknown(val) => Some(val), 92 | }) 93 | .execute(&mut *tx) 94 | .await 95 | .unwrap(); 96 | } 97 | 98 | tx.commit().await.unwrap(); 99 | } 100 | 101 | pub async fn get(&self, message_id: MessageId) -> Result { 102 | let row = query( 103 | r" 104 | SELECT * FROM messages WHERE id = $1 105 | ", 106 | ) 107 | .bind(message_id) 108 | .fetch_one(&self.pool) 109 | .await?; 110 | 111 | let event_id: EventId = row.try_get("event_id")?; 112 | let endpoint_id: EndpointId = row.try_get("endpoint_id")?; 113 | 114 | let attempt_rows = query( 115 | r" 116 | SELECT * FROM attempts WHERE message_id = $1 117 | ", 118 | ) 119 | .bind(message_id) 120 | .fetch_all(&self.pool) 121 | .await 122 | .unwrap_or_default(); 123 | 124 | let attempts: Vec = attempt_rows 125 | .iter() 126 | .map(|p| Attempt::from_row(p).unwrap()) 127 | .collect(); 128 | let collection = AttemptCollection::from((message_id, attempts)); 129 | 130 | Ok(Message { 131 | id: message_id, 132 | endpoint_id, 133 | event_id, 134 | attempts: collection, 135 | }) 136 | } 137 | } 138 | 139 | pub struct AttemptLogStorage { 140 | pool: PgPool, 141 | } 142 | 143 | impl AttemptLogStorage { 144 | pub fn new(pool: PgPool) -> Self { 145 | Self { pool } 146 | } 147 | 148 | pub async fn save(&self, attempt_log: AttemptLog) { 149 | let processing_time = attempt_log.processing_time(); 150 | let response_time = attempt_log.response_time(); 151 | 152 | query( 153 | r" 154 | INSERT INTO attempt_logs (message_id, attempt, processing_time, response_time, response_body) 155 | VALUES ($1, $2, $3, $4, $5) 156 | ", 157 | ) 158 | .bind(attempt_log.message_id()) 159 | .bind(attempt_log.attempt_id() as i16) 160 | .bind(processing_time.as_millis() as i64) 161 | .bind(response_time.as_millis() as i64) 162 | .bind(attempt_log.response_body()) 163 | .execute(&self.pool) 164 | .await 165 | .unwrap(); 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /server/src/handlers/health_check.rs: -------------------------------------------------------------------------------- 1 | use actix_web::HttpResponse; 2 | 3 | pub async fn health_check() -> HttpResponse { 4 | HttpResponse::NoContent().finish() 5 | } 6 | -------------------------------------------------------------------------------- /server/src/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod health_check; 2 | -------------------------------------------------------------------------------- /server/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod amqp; 2 | pub mod app; 3 | pub mod circuit_breaker; 4 | pub mod cmd; 5 | pub mod config; 6 | pub mod configuration; 7 | pub mod dispatch_consumer; 8 | mod error; 9 | pub mod events; 10 | pub mod handlers; 11 | pub mod logs; 12 | pub mod retry; 13 | pub mod routes; 14 | mod sender; 15 | pub mod storage; 16 | #[cfg(test)] 17 | mod tests; 18 | pub mod time; 19 | pub mod types; 20 | -------------------------------------------------------------------------------- /server/src/logs.rs: -------------------------------------------------------------------------------- 1 | use log::LevelFilter; 2 | use log4rs::append::console::ConsoleAppender; 3 | use log4rs::config::{Appender, Root}; 4 | use log4rs::encode::pattern::PatternEncoder; 5 | use log4rs::Config; 6 | 7 | pub fn init_log() { 8 | let stdout = ConsoleAppender::builder() 9 | .encoder(Box::new(PatternEncoder::new( 10 | "{d(%+)(utc)} [{f}:{L}] {h({l})} {M}:{m}{n}", 11 | ))) 12 | .build(); 13 | let config = Config::builder() 14 | .appender(Appender::builder().build("stdout", Box::new(stdout))) 15 | .build(Root::builder().appender("stdout").build(LevelFilter::Debug)) 16 | .unwrap(); 17 | 18 | log4rs::init_config(config).unwrap(); 19 | } 20 | -------------------------------------------------------------------------------- /server/src/retry.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use rand::{thread_rng, Rng}; 4 | 5 | use crate::retry::RetryPolicyConfig::{Constant, Exponential}; 6 | 7 | type ShouldRetryPolicyType = dyn ShouldRetryPolicy + Sync + Send; 8 | type DelayRetryPolicyType = dyn RetryPolicy + Sync + Send; 9 | type RandomGeneratorType = dyn RandomGenerator + Sync + Send; 10 | 11 | pub struct Retry { 12 | should_retry_policy: Box, 13 | delay_retry_policy: Box, 14 | } 15 | 16 | impl Retry { 17 | #[must_use] 18 | fn new( 19 | should_retry_policy: Box, 20 | delay_retry_policy: Box, 21 | ) -> Self { 22 | Self { 23 | should_retry_policy, 24 | delay_retry_policy, 25 | } 26 | } 27 | 28 | #[must_use] 29 | pub fn is_retryable(&self, attempt: usize) -> bool { 30 | self.should_retry_policy.is_retryable(attempt) 31 | } 32 | 33 | #[must_use] 34 | pub fn get_waiting_time(&self, attempt: usize) -> Duration { 35 | self.delay_retry_policy.get_waiting_time(attempt) 36 | } 37 | } 38 | 39 | pub trait ShouldRetryPolicy { 40 | fn is_retryable(&self, attempt: usize) -> bool; 41 | } 42 | 43 | struct MaxAttemptsShouldRetryPolicy { 44 | max_retries: usize, 45 | } 46 | 47 | impl MaxAttemptsShouldRetryPolicy { 48 | fn new(max_retries: usize) -> Self { 49 | Self { max_retries } 50 | } 51 | } 52 | 53 | impl ShouldRetryPolicy for MaxAttemptsShouldRetryPolicy { 54 | fn is_retryable(&self, attempt: usize) -> bool { 55 | attempt < self.max_retries 56 | } 57 | } 58 | 59 | // todo extract Attempt, validation at least 1 60 | trait RetryPolicy { 61 | fn get_waiting_time(&self, attempt: usize) -> Duration; 62 | } 63 | 64 | struct ExponentialRetryPolicy { 65 | delay: Duration, 66 | multiplier: usize, 67 | } 68 | 69 | impl ExponentialRetryPolicy { 70 | fn new(config: &ExponentialConfig) -> Self { 71 | Self { 72 | delay: config.delay, 73 | multiplier: config.multiplier, 74 | } 75 | } 76 | } 77 | 78 | impl RetryPolicy for ExponentialRetryPolicy { 79 | fn get_waiting_time(&self, attempt: usize) -> Duration { 80 | self.delay * self.multiplier.pow(attempt as u32) as u32 81 | } 82 | } 83 | 84 | trait RandomGenerator { 85 | fn random(&self, min: f64, max: f64) -> f64; 86 | } 87 | 88 | struct RandCrateRandomGenerator {} 89 | 90 | impl RandCrateRandomGenerator { 91 | fn new() -> Self { 92 | Self {} 93 | } 94 | } 95 | 96 | impl RandomGenerator for RandCrateRandomGenerator { 97 | fn random(&self, min: f64, max: f64) -> f64 { 98 | thread_rng().gen_range(min..=max) 99 | } 100 | } 101 | 102 | struct RandomizeDecoratedRetryPolicy { 103 | random_generator: Box, 104 | decorated: Box, 105 | factor: f64, 106 | } 107 | 108 | impl RandomizeDecoratedRetryPolicy { 109 | pub fn new( 110 | random_generator: Box, 111 | decorated: Box, 112 | factor: f64, 113 | ) -> Self { 114 | Self { 115 | random_generator, 116 | decorated, 117 | factor, 118 | } 119 | } 120 | } 121 | 122 | impl RetryPolicy for RandomizeDecoratedRetryPolicy { 123 | fn get_waiting_time(&self, attempt: usize) -> Duration { 124 | let base = self.decorated.get_waiting_time(attempt).as_millis() as f64; 125 | let diff = base * self.factor; 126 | 127 | let min = base - diff; 128 | let max = base + diff; 129 | 130 | let randomized_duration = self.random_generator.random(min, max); 131 | 132 | Duration::from_millis(randomized_duration as u64) 133 | } 134 | } 135 | 136 | struct ConstantRetryPolicy { 137 | delay: Duration, 138 | } 139 | 140 | impl ConstantRetryPolicy { 141 | fn new(config: &ConstantConfig) -> Self { 142 | Self { 143 | delay: config.delay, 144 | } 145 | } 146 | } 147 | 148 | impl RetryPolicy for ConstantRetryPolicy { 149 | fn get_waiting_time(&self, _attempt: usize) -> Duration { 150 | self.delay 151 | } 152 | } 153 | 154 | #[derive(Clone)] 155 | enum RetryPolicyConfig { 156 | Exponential(ExponentialConfig), 157 | Constant(ConstantConfig), 158 | } 159 | 160 | #[derive(Clone)] 161 | struct ExponentialConfig { 162 | multiplier: usize, 163 | delay: Duration, 164 | } 165 | 166 | impl ExponentialConfig { 167 | fn new(multiplier: usize, delay: Duration) -> Self { 168 | Self { multiplier, delay } 169 | } 170 | } 171 | 172 | #[derive(Clone)] 173 | struct ConstantConfig { 174 | delay: Duration, 175 | } 176 | 177 | impl ConstantConfig { 178 | fn new(delay: Duration) -> Self { 179 | Self { delay } 180 | } 181 | } 182 | 183 | pub struct RetryPolicyBuilder { 184 | max_retries: Option, 185 | config: Option, 186 | random_factor: Option, 187 | } 188 | 189 | impl RetryPolicyBuilder { 190 | #[must_use] 191 | pub fn new() -> Self { 192 | Self { 193 | max_retries: None, 194 | config: None, 195 | random_factor: None, 196 | } 197 | } 198 | 199 | pub fn max_retries(&mut self, max_retries: usize) -> &mut Self { 200 | self.max_retries = Some(max_retries); 201 | self 202 | } 203 | 204 | pub fn exponential(&mut self, multiplier: usize, delay: Duration) -> &mut Self { 205 | self.config = Some(Exponential(ExponentialConfig::new(multiplier, delay))); 206 | self 207 | } 208 | 209 | pub fn constant(&mut self, delay: Duration) -> &mut Self { 210 | self.config = Some(Constant(ConstantConfig::new(delay))); 211 | self 212 | } 213 | 214 | pub fn randomize(&mut self, factor: f64) -> &mut Self { 215 | self.random_factor = Some(factor); 216 | self 217 | } 218 | 219 | pub fn build(&self) -> Result { 220 | if self.max_retries.is_none() { 221 | return Err(String::from("Max retries should be set")); 222 | } 223 | 224 | if self.config.is_none() { 225 | return Err(String::from("Any base policy wasn't chosen")); 226 | } 227 | 228 | let mut delay_policy: Box = match self.config.clone().unwrap() { 229 | Exponential(config) => Box::new(ExponentialRetryPolicy::new(&config)), 230 | Constant(config) => Box::new(ConstantRetryPolicy::new(&config)), 231 | }; 232 | 233 | if let Some(factor) = self.random_factor { 234 | delay_policy = Box::new(RandomizeDecoratedRetryPolicy::new( 235 | Box::new(RandCrateRandomGenerator::new()), 236 | delay_policy, 237 | factor, 238 | )); 239 | } 240 | 241 | let should_retry = Box::new(MaxAttemptsShouldRetryPolicy::new(self.max_retries.unwrap())); 242 | 243 | Ok(Retry::new(should_retry, delay_policy)) 244 | } 245 | } 246 | 247 | impl Default for RetryPolicyBuilder { 248 | fn default() -> Self { 249 | Self::new() 250 | } 251 | } 252 | 253 | #[cfg(test)] 254 | mod tests { 255 | use std::time::Duration; 256 | 257 | use test_case::test_case; 258 | 259 | use crate::retry::{ 260 | ConstantConfig, ConstantRetryPolicy, ExponentialConfig, ExponentialRetryPolicy, 261 | RandomGenerator, RandomGeneratorType, RandomizeDecoratedRetryPolicy, RetryPolicy, 262 | }; 263 | 264 | // todo write tests for builder with the same cases 265 | #[test_case(1, 2, 1, 2)] 266 | #[test_case(1, 2, 2, 4)] 267 | #[test_case(1, 2, 3, 8)] 268 | #[test_case(1, 2, 4, 16)] 269 | #[test_case(2, 3, 1, 6)] 270 | #[test_case(2, 3, 3, 54)] 271 | fn exponential_retry_policy( 272 | delay_in_secs: u64, 273 | multiplier: usize, 274 | attempt: usize, 275 | result: u64, 276 | ) { 277 | let config = ExponentialConfig::new(multiplier, Duration::from_secs(delay_in_secs)); 278 | let sut = ExponentialRetryPolicy::new(&config); 279 | 280 | assert_eq!(Duration::from_secs(result), sut.get_waiting_time(attempt)); 281 | } 282 | 283 | // todo write tests for builder with the same cases 284 | #[test_case(5000, 0.5, 2500, 7500; "5 sec, 0.5 factor")] 285 | #[test_case(5000, 0.25, 3750, 6250; "5 sec, 0.25 factor")] 286 | #[test_case(3000, 0.1, 2700, 3300; "3 sec, 0.1 factor")] 287 | fn randomize_decorated_retry_policy(delay: u64, factor: f64, min: u64, max: u64) { 288 | let sut = 289 | build_randomize_decorated_retry_policy(delay, Box::new(MinRandomGenerator {}), factor); 290 | 291 | let min_delay = sut.get_waiting_time(1); 292 | assert_eq!(Duration::from_millis(min), min_delay); 293 | 294 | let sut = 295 | build_randomize_decorated_retry_policy(delay, Box::new(MaxRandomGenerator {}), factor); 296 | 297 | let max_delay = sut.get_waiting_time(1); 298 | assert_eq!(Duration::from_millis(max), max_delay); 299 | } 300 | 301 | fn build_randomize_decorated_retry_policy( 302 | delay: u64, 303 | random_generator: Box, 304 | factor: f64, 305 | ) -> RandomizeDecoratedRetryPolicy { 306 | let config = ConstantConfig::new(Duration::from_millis(delay)); 307 | let constant = Box::new(ConstantRetryPolicy::new(&config)); 308 | 309 | RandomizeDecoratedRetryPolicy::new(random_generator, constant, factor) 310 | } 311 | 312 | struct MinRandomGenerator {} 313 | 314 | impl RandomGenerator for MinRandomGenerator { 315 | fn random(&self, min: f64, _max: f64) -> f64 { 316 | min 317 | } 318 | } 319 | 320 | struct MaxRandomGenerator {} 321 | 322 | impl RandomGenerator for MaxRandomGenerator { 323 | fn random(&self, _min: f64, max: f64) -> f64 { 324 | max 325 | } 326 | } 327 | } 328 | -------------------------------------------------------------------------------- /server/src/routes.rs: -------------------------------------------------------------------------------- 1 | use actix_web::web; 2 | 3 | use crate::configuration::handlers::{ 4 | create_application_handler, create_endpoint_handler, disable_endpoint_handler, 5 | enable_endpoint_handler, 6 | }; 7 | use crate::events::handlers::create_event_handler; 8 | use crate::handlers::health_check::health_check; 9 | 10 | pub fn routes(cfg: &mut web::ServiceConfig) { 11 | cfg.route("/health_check", web::get().to(health_check)); 12 | cfg.route("/application", web::post().to(create_application_handler)); 13 | cfg.route( 14 | "/application/{app_id}/endpoint", 15 | web::post().to(create_endpoint_handler), 16 | ); 17 | cfg.route( 18 | "/application/{app_id}/endpoint/{endpoint_id}/disable", 19 | web::post().to(disable_endpoint_handler), 20 | ); 21 | cfg.route( 22 | "/application/{app_id}/endpoint/{endpoint_id}/enable", 23 | web::post().to(enable_endpoint_handler), 24 | ); 25 | cfg.route( 26 | "application/{app_id}/event", 27 | web::post().to(create_event_handler), 28 | ); 29 | } 30 | -------------------------------------------------------------------------------- /server/src/sender.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | use log::debug; 4 | use reqwest::StatusCode; 5 | use sqlx::postgres::PgRow; 6 | use sqlx::{Error, FromRow, Row}; 7 | use url::Url; 8 | 9 | use crate::events::domain::Payload; 10 | use crate::sender::Status::{Numeric, Unknown}; 11 | 12 | #[derive(Debug, Clone, PartialEq)] 13 | pub enum Status { 14 | Numeric(u16), 15 | Unknown(String), 16 | } 17 | 18 | impl FromRow<'_, PgRow> for Status { 19 | fn from_row(row: &'_ PgRow) -> Result { 20 | let numeric: Option = row.try_get("status_numeric")?; 21 | if let Some(val) = numeric { 22 | return Ok(Numeric(val as u16)); 23 | } 24 | 25 | let unknown: Option = row.try_get("status_unknown")?; 26 | if let Some(val) = unknown { 27 | return Ok(Unknown(val)); 28 | } 29 | 30 | unreachable!("Could not encode status from postgres") 31 | } 32 | } 33 | 34 | pub struct SentResult { 35 | pub status: Status, 36 | pub response_time: Duration, 37 | pub body: Option, 38 | } 39 | 40 | impl SentResult { 41 | fn with_body(status: Status, response_time: Duration, body: String) -> Self { 42 | Self { 43 | status, 44 | response_time, 45 | body: Some(body), 46 | } 47 | } 48 | 49 | fn without_body(status: Status, response_time: Duration) -> Self { 50 | Self { 51 | status, 52 | response_time, 53 | body: None, 54 | } 55 | } 56 | } 57 | 58 | pub struct Sender { 59 | payload: Payload, 60 | url: Url, 61 | } 62 | 63 | impl Sender { 64 | #[must_use] 65 | pub fn new(payload: Payload, url: Url) -> Self { 66 | Self { payload, url } 67 | } 68 | 69 | pub async fn send(&self) -> Result { 70 | let start = Instant::now(); 71 | 72 | let response = reqwest::Client::new() 73 | .post(self.url.clone()) 74 | .json(&self.payload) 75 | .send() 76 | .await; 77 | 78 | let end = start.elapsed(); 79 | 80 | match response { 81 | Ok(res) => { 82 | if res.status().is_success() { 83 | debug!("Success response! {}", res.status()); 84 | 85 | return Ok(SentResult::with_body( 86 | Numeric(res.status().as_u16()), 87 | end, 88 | res.text().await.unwrap(), 89 | )); 90 | } 91 | 92 | let status_code = res.status(); 93 | let status = status_code.as_u16(); 94 | let body = res.text().await.unwrap(); 95 | 96 | Self::log_error_response(Some(status_code), &body.clone()); 97 | 98 | Err(SentResult::with_body(Numeric(status), end, body)) 99 | } 100 | Err(err) => { 101 | Self::log_error_response(err.status(), &err.to_string()); 102 | 103 | Err(SentResult::without_body(Unknown(err.to_string()), end)) 104 | } 105 | } 106 | } 107 | 108 | fn log_error_response(status_code: Option, response: &str) { 109 | let status: String = status_code.map_or(String::from("-"), |s| s.to_string()); 110 | 111 | debug!("Error response! Status: {}, Error: {}", status, response); 112 | } 113 | } 114 | 115 | #[cfg(test)] 116 | mod tests { 117 | use std::str::FromStr; 118 | 119 | use mockito::Matcher::Json; 120 | use serde_json::json; 121 | use url::Url; 122 | 123 | use crate::events::domain::Payload; 124 | use crate::sender::Sender; 125 | 126 | #[test_case::test_case(200, Ok(()))] 127 | #[test_case::test_case(201, Ok(()))] 128 | #[test_case::test_case(299, Ok(()))] 129 | #[test_case::test_case(300, Err(()))] 130 | #[test_case::test_case(304, Err(()))] 131 | #[test_case::test_case(400, Err(()))] 132 | #[test_case::test_case(403, Err(()))] 133 | #[test_case::test_case(500, Err(()))] 134 | #[test_case::test_case(505, Err(()))] 135 | #[tokio::test] 136 | async fn only_status_2xx_is_valid_as_response(status_code: usize, expected: Result<(), ()>) { 137 | let mut server = mockito::Server::new_async().await; 138 | let url = Url::from_str(server.url().as_str()).unwrap(); 139 | let payload = Payload::from(json!({"foo": "bar"})); 140 | 141 | let mock = server 142 | .mock("POST", "/") 143 | .match_body(Json(json!({"foo": "bar"}))) 144 | .with_body("response") 145 | .with_status(status_code) 146 | .create_async() 147 | .await; 148 | 149 | let result = Sender::new(payload, url).send().await; 150 | 151 | mock.assert_async().await; 152 | 153 | match expected { 154 | Ok(_) => assert!(result.is_ok()), 155 | Err(_) => assert!(result.is_err()), 156 | } 157 | } 158 | 159 | #[tokio::test] 160 | async fn request_to_unavailable_server_is_error() { 161 | let url = Url::from_str("http://localhost:0").unwrap(); 162 | let payload = Payload::from(json!({"foo": "bar"})); 163 | 164 | let result = Sender::new(payload, url).send().await; 165 | 166 | assert!(result.is_err()) 167 | } 168 | 169 | //todo: test response object 170 | } 171 | -------------------------------------------------------------------------------- /server/src/storage.rs: -------------------------------------------------------------------------------- 1 | use sqlx::PgPool; 2 | 3 | use crate::configuration::storage::{ApplicationStorage, EndpointStorage}; 4 | use crate::events::storage::{AttemptLogStorage, EventStorage, MessageStorage}; 5 | 6 | pub struct Storage { 7 | pub applications: ApplicationStorage, 8 | pub endpoints: EndpointStorage, 9 | pub events: EventStorage, 10 | pub messages: MessageStorage, 11 | pub attempt_log: AttemptLogStorage, 12 | } 13 | 14 | impl Storage { 15 | #[must_use] 16 | pub fn new(pool: PgPool) -> Self { 17 | Self { 18 | applications: ApplicationStorage::new(pool.clone()), 19 | endpoints: EndpointStorage::new(pool.clone()), 20 | events: EventStorage::new(pool.clone()), 21 | messages: MessageStorage::new(pool.clone()), 22 | attempt_log: AttemptLogStorage::new(pool), 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /server/src/tests.rs: -------------------------------------------------------------------------------- 1 | macro_rules! assert_strings { 2 | ($str: literal, $func: expr) => { 3 | let a: &str = $str; 4 | let b: String = String::from($str); 5 | let c: &String = &b; 6 | 7 | let fmt = |t: &str| { 8 | format!( 9 | "callable {} with param of type {} failed", 10 | stringify!($func), 11 | t 12 | ) 13 | }; 14 | 15 | #[allow(clippy::redundant_closure_call)] 16 | let a_result = $func(a); 17 | #[allow(clippy::redundant_closure_call)] 18 | let c_result = $func(c); 19 | #[allow(clippy::redundant_closure_call)] 20 | let b_result = $func(b); 21 | 22 | assert!(a_result, "{}", fmt("$str")); 23 | assert!(c_result, "{}", fmt("&String")); 24 | assert!(b_result, "{}", fmt("String")); 25 | }; 26 | } 27 | 28 | macro_rules! dt { 29 | ($str: expr) => { 30 | $str.parse::>().unwrap() 31 | }; 32 | } 33 | 34 | pub(crate) use assert_strings; 35 | pub(crate) use dt; 36 | -------------------------------------------------------------------------------- /server/src/time.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | 3 | pub enum Clock { 4 | Chrono, 5 | #[cfg(test)] 6 | Fixed(DateTime), 7 | } 8 | 9 | impl Clock { 10 | #[must_use] 11 | pub fn now(&self) -> DateTime { 12 | match self { 13 | Clock::Chrono => Utc::now(), 14 | #[cfg(test)] 15 | Clock::Fixed(date) => *date, 16 | } 17 | } 18 | 19 | #[must_use] 20 | pub fn chrono() -> Self { 21 | Self::Chrono 22 | } 23 | 24 | #[cfg(test)] 25 | pub fn fixed(time: DateTime) -> Self { 26 | Self::Fixed(time) 27 | } 28 | } 29 | 30 | #[cfg(test)] 31 | mod tests { 32 | use chrono::{TimeZone, Utc}; 33 | 34 | use crate::time::Clock; 35 | 36 | #[test] 37 | fn test_fixed_time() { 38 | let date = Utc.with_ymd_and_hms(2024, 5, 30, 14, 0, 0).unwrap(); 39 | 40 | let sut = Clock::fixed(date); 41 | 42 | assert_eq!(date, sut.now()); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /server/src/types/mod.rs: -------------------------------------------------------------------------------- 1 | macro_rules! make_ksuid { 2 | ($name: ident, $prefix: literal) => { 3 | #[derive(Clone, Copy, Eq, PartialEq)] 4 | pub struct $name ([u8; 27]); 5 | 6 | impl $name { 7 | const PREFIX: &'static str = $prefix; 8 | const TERMINATOR: char = '_'; 9 | 10 | #[must_use] 11 | pub fn new() -> Self { 12 | use svix_ksuid::{Ksuid, KsuidLike}; 13 | 14 | Self (Ksuid::new(None, None).to_base62().as_bytes().try_into().unwrap()) 15 | } 16 | 17 | #[must_use] 18 | pub fn to_base62(self) -> String { 19 | String::from_utf8_lossy(&self.0).to_string() 20 | } 21 | } 22 | 23 | impl std::fmt::Display for $name { 24 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 25 | write!(f, "{}{}{}", Self::PREFIX, Self::TERMINATOR, self.to_base62()) 26 | } 27 | } 28 | 29 | impl std::fmt::Debug for $name { 30 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 31 | write!(f, "{}", self.to_string()) 32 | } 33 | } 34 | 35 | impl TryFrom for $name { 36 | type Error = crate::error::Error; 37 | 38 | fn try_from(value: String) -> Result { 39 | use std::str::FromStr; 40 | 41 | Self::from_str(value.as_str()) 42 | } 43 | } 44 | 45 | impl std::str::FromStr for $name { 46 | type Err = crate::error::Error; 47 | 48 | fn from_str(s: &str) -> Result { 49 | use itertools::Itertools; 50 | use svix_ksuid::KsuidLike; 51 | 52 | let tuple = s 53 | .split_terminator(Self::TERMINATOR) 54 | .collect_tuple(); 55 | 56 | if tuple.is_none() { 57 | return Err(crate::error::Error::InvalidArgument(format!( 58 | "'{}' type should has '{}' prefix and valid id. Example '{}_1srOrx2ZWZBpBUvZwXKQmoEYga2'", 59 | stringify!($name), 60 | Self::PREFIX, 61 | Self::PREFIX, 62 | ))); 63 | } 64 | 65 | let (prefix, id) = tuple.unwrap(); 66 | 67 | if prefix != Self::PREFIX { 68 | return Err(crate::error::Error::InvalidArgument(format!( 69 | "'{}' type should have prefix '{}' but have '{}'", 70 | stringify!($name), 71 | Self::PREFIX, 72 | prefix, 73 | ))); 74 | } 75 | 76 | let ksuid = svix_ksuid::Ksuid::from_str(id); 77 | if ksuid.is_err() { 78 | return Err(crate::error::Error::InvalidArgument(format!( 79 | "'{}' type received invalid id '{}'", 80 | stringify!($name), 81 | id, 82 | ))); 83 | } 84 | 85 | Ok(Self(ksuid.unwrap().to_base62().as_bytes().try_into().unwrap())) 86 | } 87 | } 88 | 89 | impl Default for $name { 90 | fn default() -> Self { 91 | Self::new() 92 | } 93 | } 94 | 95 | impl sqlx::Decode<'_, sqlx::Postgres> for $name { 96 | fn decode(value: ::ValueRef<'_>) -> Result { 97 | let value = <&str as sqlx::Decode>::decode(value)?; 98 | 99 | Ok(Self(value.as_bytes().try_into().unwrap())) 100 | } 101 | } 102 | 103 | impl sqlx::Type for $name { 104 | fn type_info() -> sqlx::postgres::PgTypeInfo { 105 | <&str as sqlx::Type>::type_info() 106 | } 107 | 108 | fn compatible(_ty: &sqlx::postgres::PgTypeInfo) -> bool { 109 | true 110 | } 111 | } 112 | 113 | impl sqlx::Encode<'_, sqlx::Postgres> for $name { 114 | fn encode_by_ref(&self, buf: &mut ::ArgumentBuffer<'_>) -> Result> { 115 | buf.extend(self.0); 116 | 117 | Ok(sqlx::encode::IsNull::No) 118 | } 119 | } 120 | }; 121 | } 122 | 123 | make_ksuid!(EventId, "evt"); 124 | make_ksuid!(MessageId, "msg"); 125 | make_ksuid!(ApplicationId, "app"); 126 | make_ksuid!(EndpointId, "ep"); 127 | 128 | #[derive(Debug, Clone, PartialEq, Copy, Eq)] 129 | pub struct AttemptId(MessageId, u16); 130 | 131 | impl AttemptId { 132 | pub fn new(message_id: MessageId, id: u16) -> Result { 133 | if id < 1 { 134 | return Err(format!("Id should be greater than 0. Was {id}")); 135 | } 136 | 137 | Ok(Self(message_id, id)) 138 | } 139 | 140 | #[must_use] 141 | pub fn attempt_no(&self) -> u16 { 142 | self.1 143 | } 144 | 145 | #[must_use] 146 | pub fn message_id(&self) -> MessageId { 147 | self.0 148 | } 149 | } 150 | 151 | #[cfg(test)] 152 | mod ksuid_tests { 153 | use std::str::FromStr; 154 | 155 | use itertools::Itertools; 156 | use test_case::test_case; 157 | 158 | use crate::error::Error::InvalidArgument; 159 | 160 | make_ksuid!(TestId, "test"); 161 | 162 | #[test] 163 | fn can_be_build_from_string() { 164 | assert!(TestId::from_str("test_1srOrx2ZWZBpBUvZwXKQmoEYga2").is_ok()) 165 | } 166 | 167 | #[test_case( 168 | "invalid_1srOrx2ZWZBpBUvZwXKQmoEYga2", "'TestId' type should have prefix 'test' but have 'invalid'"; "invalid prefix" 169 | )] 170 | #[test_case( 171 | "1srOrx2ZWZBpBUvZwXKQmoEYga2", "'TestId' type should has 'test' prefix and valid id. Example 'test_1srOrx2ZWZBpBUvZwXKQmoEYga2'"; "without prefix" 172 | )] 173 | #[test_case( 174 | "invalid_", "'TestId' type should has 'test' prefix and valid id. Example 'test_1srOrx2ZWZBpBUvZwXKQmoEYga2'"; "only prefix" 175 | )] 176 | #[test_case("test_foo", "'TestId' type received invalid id 'foo'"; "invalid id")] 177 | fn invalid(id: &str, error: &str) { 178 | assert_eq!( 179 | Err(InvalidArgument(error.to_string())), 180 | TestId::try_from(id.to_string()) 181 | ); 182 | } 183 | 184 | #[test] 185 | fn eq_test() { 186 | let a = TestId::from_str("test_1srOrx2ZWZBpBUvZwXKQmoEYga2").unwrap(); 187 | let b = TestId::from_str("test_1srOrx2ZWZBpBUvZwXKQmoEYga2").unwrap(); 188 | let c = TestId::from_str("test_0ujtsYcgvSTl8PAuAdqWYSMnLOv").unwrap(); 189 | 190 | assert!(a.eq(&b)); 191 | assert!(a.eq(&a)); 192 | assert!(!a.eq(&c)); 193 | assert!(!b.eq(&c)); 194 | } 195 | 196 | #[test] 197 | fn display_with_prefix() { 198 | let sut = TestId::new(); 199 | 200 | let binding = sut.to_string(); 201 | let (prefix, _) = binding.split_terminator('_').collect_tuple().unwrap(); 202 | 203 | assert_eq!("test", prefix); 204 | } 205 | 206 | #[test] 207 | fn debug_format() { 208 | let sut = TestId::from_str("test_1srOrx2ZWZBpBUvZwXKQmoEYga2").unwrap(); 209 | 210 | assert_eq!("test_1srOrx2ZWZBpBUvZwXKQmoEYga2", &format!("{:?}", sut)); 211 | } 212 | 213 | #[test] 214 | fn test_to_base62() { 215 | let sut = TestId::from_str("test_1srOrx2ZWZBpBUvZwXKQmoEYga2").unwrap(); 216 | 217 | assert_eq!("1srOrx2ZWZBpBUvZwXKQmoEYga2", sut.to_base62()); 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /server/tests/api/common.rs: -------------------------------------------------------------------------------- 1 | use std::net::TcpListener; 2 | 3 | use dotenv::dotenv; 4 | use envconfig::Envconfig; 5 | use fake::{Fake, Faker}; 6 | use reqwest::Client; 7 | use serde_json::{json, Value}; 8 | use sqlx::{migrate, Connection, Executor, PgConnection, PgPool}; 9 | use svix_ksuid::{Ksuid, KsuidLike}; 10 | 11 | use server::app::{run_dispatcher, run_server}; 12 | use server::config::{AMQPConfig, PostgresConfig}; 13 | use server::logs::init_log; 14 | use server::storage::Storage; 15 | use server::types::{ApplicationId, EndpointId}; 16 | 17 | struct TestEnvironmentBuilder; 18 | 19 | impl TestEnvironmentBuilder { 20 | pub async fn build() -> TestEnvironment { 21 | dotenv().ok(); 22 | 23 | let test_id = Ksuid::new(None, None).to_base62(); 24 | 25 | TestEnvironment { 26 | pool: Self::prepare_db(test_id.as_str()).await, 27 | amqp_config: Self::prepare_amqp(test_id.as_str()), 28 | } 29 | } 30 | 31 | pub async fn build_with_logs() -> TestEnvironment { 32 | init_log(); 33 | 34 | Self::build().await 35 | } 36 | 37 | async fn prepare_db(test_id: &str) -> PgPool { 38 | // Create db 39 | let pg_config = PostgresConfig::init_from_env().unwrap(); 40 | let mut connection = PgConnection::connect(&pg_config.connection_string_without_db()) 41 | .await 42 | .expect("Failed to connect to postgres"); 43 | 44 | let pg_config = pg_config.with_db(test_id); 45 | 46 | connection 47 | .execute(format!(r#"CREATE DATABASE "{}";"#, pg_config.db()).as_str()) 48 | .await 49 | .expect("Failed to create database."); 50 | 51 | // Migrate db 52 | let pool = PgPool::connect(&pg_config.connection_string()) 53 | .await 54 | .expect("Failed to connect to postgres"); 55 | 56 | migrate!("./migrations") 57 | .run(&pool) 58 | .await 59 | .expect("Failed to migrate"); 60 | 61 | pool 62 | } 63 | 64 | fn prepare_amqp(test_id: &str) -> AMQPConfig { 65 | let config = AMQPConfig::init_from_env().unwrap(); 66 | 67 | config.with_sent_message_queue(test_id) 68 | } 69 | } 70 | 71 | pub struct TestEnvironment { 72 | pool: PgPool, 73 | amqp_config: AMQPConfig, 74 | } 75 | 76 | impl TestEnvironment { 77 | pub async fn new() -> Self { 78 | TestEnvironmentBuilder::build().await 79 | } 80 | 81 | #[allow(dead_code)] 82 | pub async fn new_with_logs() -> Self { 83 | TestEnvironmentBuilder::build_with_logs().await 84 | } 85 | 86 | pub async fn server(&self) -> TestServer { 87 | TestServerBuilder::new(self.pool.clone(), self.amqp_config.clone()) 88 | .run() 89 | .await 90 | } 91 | 92 | pub async fn dispatcher(&self) { 93 | TestDispatcherBuilder::new(self.pool.clone(), self.amqp_config.clone()) 94 | .run() 95 | .await 96 | } 97 | } 98 | 99 | struct TestServerBuilder { 100 | pool: PgPool, 101 | amqp_config: AMQPConfig, 102 | } 103 | 104 | impl TestServerBuilder { 105 | fn new(pool: PgPool, amqp_config: AMQPConfig) -> Self { 106 | Self { pool, amqp_config } 107 | } 108 | 109 | async fn run(&self) -> TestServer { 110 | let listener = TcpListener::bind("127.0.0.1:0").unwrap(); 111 | let addr = format!("http://{}", listener.local_addr().unwrap()); 112 | 113 | let server = run_server(listener, self.pool.clone(), self.amqp_config.clone()) 114 | .await 115 | .unwrap(); 116 | 117 | #[allow(clippy::let_underscore_future)] 118 | let _ = tokio::spawn(server); 119 | 120 | TestServer { 121 | server_url: addr, 122 | storage: Storage::new(self.pool.clone()), 123 | } 124 | } 125 | } 126 | 127 | pub struct TestServer { 128 | server_url: String, 129 | storage: Storage, 130 | } 131 | 132 | impl TestServer { 133 | pub fn url(&self, endpoint: &str) -> String { 134 | format!("{}/{}", self.base_url(), endpoint) 135 | } 136 | 137 | fn base_url(&self) -> String { 138 | self.server_url.to_string() 139 | } 140 | 141 | pub fn storage(&self) -> &Storage { 142 | &self.storage 143 | } 144 | } 145 | 146 | struct TestDispatcherBuilder { 147 | pool: PgPool, 148 | amqp_config: AMQPConfig, 149 | } 150 | 151 | impl TestDispatcherBuilder { 152 | fn new(pool: PgPool, amqp_config: AMQPConfig) -> Self { 153 | Self { pool, amqp_config } 154 | } 155 | 156 | async fn run(&self) { 157 | let pool = self.pool.clone(); 158 | let amqp_config = self.amqp_config.clone(); 159 | 160 | #[allow(clippy::let_underscore_future)] 161 | tokio::spawn(async move { run_dispatcher(pool, amqp_config).await }); 162 | } 163 | } 164 | 165 | macro_rules! run_test_server { 166 | () => { 167 | TestEnvironment::new().await.server().await 168 | }; 169 | } 170 | 171 | macro_rules! run_test_server_and_dispatcher { 172 | () => {{ 173 | let environment = TestEnvironment::new().await; 174 | let server = environment.server().await; 175 | 176 | environment.dispatcher().await; 177 | 178 | server 179 | }}; 180 | } 181 | 182 | macro_rules! assert_mock_with_retry { 183 | ($mock: ident) => {{ 184 | let mut attempt: u8 = 1; 185 | const MAX_ATTEMPTS: u8 = 10; 186 | let mock: mockito::Mock = $mock; 187 | 188 | while !mock.matched_async().await && attempt <= MAX_ATTEMPTS { 189 | let delay = std::time::Duration::from_millis((10 * attempt).into()); 190 | 191 | println!("Delay for asserting mockito mock server: {:?}", delay); 192 | 193 | tokio::time::sleep(delay).await; 194 | attempt += 1; 195 | } 196 | 197 | mock.assert_async().await; 198 | }}; 199 | } 200 | 201 | pub(crate) use assert_mock_with_retry; 202 | pub(crate) use run_test_server; 203 | pub(crate) use run_test_server_and_dispatcher; 204 | 205 | pub struct Given { 206 | url: String, 207 | } 208 | 209 | impl Given { 210 | fn new(url: String) -> Given { 211 | Self { url } 212 | } 213 | 214 | pub async fn app(&self) -> ApplicationId { 215 | let name: String = Faker.fake::(); 216 | 217 | let response = Client::new() 218 | .post(&format!("{}/application", self.url)) 219 | .json(&json!({ 220 | "name": name 221 | })) 222 | .send() 223 | .await 224 | .expect("Failed to executed request"); 225 | 226 | let body = response.json::().await.unwrap(); 227 | 228 | let id = ApplicationId::try_from(body["id"].as_str().unwrap().to_string()) 229 | .expect("Invalid application id"); 230 | 231 | id 232 | } 233 | 234 | pub async fn endpoint_with_app( 235 | &self, 236 | url: &str, 237 | topics: Vec<&str>, 238 | ) -> (ApplicationId, EndpointId) { 239 | let app_id = self.app().await; 240 | 241 | let response = Client::new() 242 | .post(&format!("{}/application/{}/endpoint", self.url, app_id)) 243 | .json(&json!({ 244 | "url": url, 245 | "topics": topics 246 | })) 247 | .send() 248 | .await 249 | .expect("Failed to executed request"); 250 | 251 | let body = response.json::().await.unwrap(); 252 | 253 | let endpoint_id = EndpointId::try_from(body["id"].as_str().unwrap().to_string()) 254 | .expect("Invalid endpoint id"); 255 | 256 | (app_id, endpoint_id) 257 | } 258 | 259 | pub async fn disable_endpoint(&self, app_id: &ApplicationId, endpoint_id: &EndpointId) { 260 | Client::new() 261 | .post(&format!( 262 | "{}/application/{}/endpoint/{}/disable", 263 | self.url, app_id, endpoint_id 264 | )) 265 | .send() 266 | .await 267 | .expect("Failed to executed request"); 268 | } 269 | } 270 | 271 | impl From<&TestServer> for Given { 272 | fn from(value: &TestServer) -> Self { 273 | Self::new(value.base_url()) 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /server/tests/api/create_application.rs: -------------------------------------------------------------------------------- 1 | use reqwest::Client; 2 | use serde_json::{json, Value}; 3 | 4 | use server::types::ApplicationId; 5 | 6 | use crate::common::{run_test_server, TestEnvironment}; 7 | 8 | #[tokio::test] 9 | async fn application_is_created() { 10 | // Arrange 11 | let server = run_test_server!(); 12 | 13 | // Act 14 | let response = Client::new() 15 | .post(server.url("application")) 16 | .json(&json!({ 17 | "name": "Dummy application" 18 | })) 19 | .send() 20 | .await 21 | .expect("Failed to executed request"); 22 | 23 | // Assert 24 | assert_eq!(201, response.status()); 25 | 26 | let body = response.json::().await.unwrap(); 27 | assert_eq!("Dummy application", body["name"].as_str().unwrap()); 28 | 29 | let id = ApplicationId::try_from(body["id"].as_str().unwrap().to_string()) 30 | .expect("Invalid application id"); 31 | 32 | let app = server 33 | .storage() 34 | .applications 35 | .get(&id) 36 | .await 37 | .expect("Application was not created"); 38 | 39 | assert_eq!("Dummy application", app.name); 40 | } 41 | 42 | #[tokio::test] 43 | async fn application_names_can_be_without_space() { 44 | // Arrange 45 | let server = run_test_server!(); 46 | 47 | // Act 48 | let response = Client::new() 49 | .post(server.url("application")) 50 | .json(&json!({ 51 | "name": "test" 52 | })) 53 | .send() 54 | .await 55 | .expect("Failed to executed request"); 56 | 57 | // Assert 58 | assert_eq!(201, response.status()); 59 | } 60 | 61 | #[tokio::test] 62 | async fn validation() { 63 | // Arrange 64 | let server = run_test_server!(); 65 | 66 | let test_cases = vec![ 67 | ( 68 | json!({"name": ""}), 69 | json!({"error": "Validation errors", "messages": ["Name cannot be empty"]}), 70 | ), 71 | ( 72 | json!({"name": " "}), 73 | json!({"error": "Validation errors", "messages": ["Name cannot be empty"]}), 74 | ), 75 | ]; 76 | 77 | for test_case in test_cases { 78 | // Act 79 | let response = Client::new() 80 | .post(server.url("application")) 81 | .json(&test_case.0) 82 | .send() 83 | .await 84 | .expect("Failed to executed request"); 85 | 86 | // Assert 87 | assert_eq!(400, response.status()); 88 | assert_eq!(test_case.1, response.json::().await.unwrap()); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /server/tests/api/create_endpoint.rs: -------------------------------------------------------------------------------- 1 | use reqwest::Client; 2 | use serde_json::{json, Value}; 3 | use url::Url; 4 | 5 | use server::configuration::domain::{EndpointStatus, TopicsList}; 6 | use server::types::{ApplicationId, EndpointId}; 7 | 8 | use crate::common::{run_test_server, Given, TestEnvironment}; 9 | 10 | #[tokio::test] 11 | async fn endpoint_is_created() { 12 | // Arrange 13 | let server = run_test_server!(); 14 | let app_id = Given::from(&server).app().await; 15 | 16 | // Act 17 | let response = Client::new() 18 | .post(server.url(&format!("application/{}/endpoint", app_id))) 19 | .json(&json!({ 20 | "url": "http://localhost:8080", 21 | "topics": [ 22 | "contact.updated", 23 | "contact.created" 24 | ] 25 | })) 26 | .send() 27 | .await 28 | .expect("Failed to executed request"); 29 | 30 | // Assert 31 | assert_eq!(201, response.status()); 32 | 33 | let body = response.json::().await.unwrap(); 34 | 35 | let id = EndpointId::try_from(body["id"].as_str().unwrap().to_string()) 36 | .expect("Invalid endpoint id"); 37 | 38 | let endpoint = server 39 | .storage() 40 | .endpoints 41 | .get(&id) 42 | .await 43 | .expect("Endpoint not found"); 44 | 45 | assert_eq!( 46 | TopicsList::from(vec!["contact.updated", "contact.created"]), 47 | endpoint.topics 48 | ); 49 | assert_eq!(EndpointStatus::Initial, endpoint.status); 50 | assert_eq!(app_id, endpoint.app_id); 51 | assert_eq!(Url::parse("http://localhost:8080").unwrap(), endpoint.url); 52 | } 53 | 54 | #[tokio::test] 55 | async fn validation() { 56 | // Arrange 57 | let server = run_test_server!(); 58 | let app_id = Given::from(&server).app().await; 59 | 60 | let test_cases = vec![ 61 | ( 62 | ApplicationId::new(), 63 | json!({"url": "http://localhost", "topics": ["contact.created"]}), 64 | 404, 65 | json!({"error": "Entity not found", "messages": []}), // fixme: change for something like "Application not found" 66 | ), 67 | ( 68 | app_id, 69 | json!({"url": "", "topics": ["contact.created"]}), 70 | 400, 71 | json!({"error": "Validation errors", "messages": ["Url should be valid"]}), 72 | ), 73 | ( 74 | app_id, 75 | json!({"url": "invalid-url", "topics": ["contact.created"]}), 76 | 400, 77 | json!({"error": "Validation errors", "messages": ["Url should be valid"]}), 78 | ), 79 | ( 80 | app_id, 81 | json!({"url": "http://localhost", "topics": []}), 82 | 400, 83 | json!({"error": "Validation errors", "messages": ["Should be at leas one topic"]}), 84 | ), 85 | ( 86 | app_id, 87 | json!({"url": "http://localhost", "topics": ["foo bar"]}), 88 | 400, 89 | json!({"error": "Validation errors", "messages": ["'foo bar' is invalid topic name"]}), 90 | ), 91 | ( 92 | app_id, 93 | json!({"url": "http://localhost", "topics": ["foo.bar", "bar baz"]}), 94 | 400, 95 | json!({"error": "Validation errors", "messages": ["'bar baz' is invalid topic name"]}), 96 | ), 97 | // ( 98 | // app_id, 99 | // json!({"url": "http://localhost", "topics": ["foo bar", "bar baz"]}), 100 | // 400, 101 | // json!({"error": "Validation errors", "messages": ["'foo bar' is invalid topic name", "'bar baz' is invalid topic name"]}), 102 | // ), 103 | ]; 104 | 105 | for test_case in test_cases { 106 | // Act 107 | let response = Client::new() 108 | .post(server.url(&format!("application/{}/endpoint", test_case.0))) 109 | .json(&test_case.1) 110 | .send() 111 | .await 112 | .expect("Failed to executed request"); 113 | 114 | // Assert 115 | assert_eq!(test_case.2, response.status()); 116 | assert_eq!(test_case.3, response.json::().await.unwrap()); 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /server/tests/api/create_event.rs: -------------------------------------------------------------------------------- 1 | use mockito::Matcher::Json; 2 | use mockito::Server; 3 | use reqwest::Client; 4 | use serde_json::{json, Value}; 5 | 6 | use server::configuration::domain::Topic; 7 | use server::types::EventId; 8 | 9 | use crate::common::{ 10 | assert_mock_with_retry, run_test_server_and_dispatcher, Given, TestEnvironment, 11 | }; 12 | 13 | #[tokio::test] 14 | async fn event_is_created_and_dispatched() { 15 | // Arrange 16 | let server = run_test_server_and_dispatcher!(); 17 | 18 | let mut destination_server = Server::new_async().await; 19 | let mock = destination_server 20 | .mock("POST", "/some_endpoint") 21 | .match_body(Json(json!({ 22 | "nested": { 23 | "foo": "bar" 24 | } 25 | }))) 26 | .with_status(201) 27 | .create_async() 28 | .await; 29 | 30 | let topic = "contact.created"; 31 | let (app_id, _) = Given::from(&server) 32 | .endpoint_with_app( 33 | &format!("{}/some_endpoint", destination_server.url()), 34 | vec![topic], 35 | ) 36 | .await; 37 | let payload = json!({ 38 | "nested": { 39 | "foo": "bar" 40 | } 41 | }); 42 | 43 | // Act 44 | let response = Client::new() 45 | .post(server.url(&format!("application/{}/event", app_id))) 46 | .json(&json!({ 47 | "topic": topic, 48 | "payload": payload 49 | })) 50 | .send() 51 | .await 52 | .expect("Failed to executed request"); 53 | 54 | // Assert 55 | assert_eq!(200, response.status()); 56 | let body = response.json::().await.unwrap(); 57 | let id = EventId::try_from(body["id"].as_str().unwrap().to_string()).expect("Invalid event id"); 58 | 59 | let event = server 60 | .storage() 61 | .events 62 | .get(id) 63 | .await 64 | .expect("Event wasn't persisted"); 65 | 66 | assert_eq!(id, event.id); 67 | assert_eq!( 68 | serde_json::to_value(payload).unwrap(), 69 | serde_json::to_value(event.payload.clone()).unwrap() 70 | ); 71 | assert_eq!(Topic::try_from("contact.created").unwrap(), event.topic); 72 | assert_mock_with_retry!(mock); 73 | } 74 | -------------------------------------------------------------------------------- /server/tests/api/endpoint_status.rs: -------------------------------------------------------------------------------- 1 | use reqwest::Client; 2 | 3 | use server::configuration::domain::EndpointStatus; 4 | 5 | use crate::common::{run_test_server, Given, TestEnvironment}; 6 | 7 | const FAKE_URL: &str = "http://localhost:0"; 8 | const FAKE_TOPIC: &str = "contact.created"; 9 | 10 | #[tokio::test] 11 | async fn endpoint_can_be_disabled() { 12 | // Arrange 13 | let server = run_test_server!(); 14 | let (app_id, endpoint_id) = Given::from(&server) 15 | .endpoint_with_app(FAKE_URL, vec![FAKE_TOPIC]) 16 | .await; 17 | 18 | // Act 19 | let response = Client::new() 20 | .post(server.url(&format!( 21 | "application/{}/endpoint/{}/disable", 22 | app_id, endpoint_id 23 | ))) 24 | .send() 25 | .await 26 | .expect("Failed to executed request"); 27 | 28 | // Assert 29 | assert_eq!(204, response.status()); 30 | 31 | let endpoint = server 32 | .storage() 33 | .endpoints 34 | .get(&endpoint_id) 35 | .await 36 | .expect("Endpoint doesn't exist"); 37 | 38 | assert_eq!(EndpointStatus::DisabledManually, endpoint.status); 39 | } 40 | 41 | #[tokio::test] 42 | async fn endpoint_can_be_enabled() { 43 | // Arrange 44 | let server = run_test_server!(); 45 | let given = Given::from(&server); 46 | let (app_id, endpoint_id) = given.endpoint_with_app(FAKE_URL, vec![FAKE_TOPIC]).await; 47 | 48 | given.disable_endpoint(&app_id, &endpoint_id).await; 49 | 50 | // Act 51 | let response = Client::new() 52 | .post(server.url(&format!( 53 | "application/{}/endpoint/{}/enable", 54 | app_id, endpoint_id 55 | ))) 56 | .send() 57 | .await 58 | .expect("Failed to executed request"); 59 | 60 | // Assert 61 | assert_eq!(204, response.status()); 62 | 63 | let endpoint = server 64 | .storage() 65 | .endpoints 66 | .get(&endpoint_id) 67 | .await 68 | .expect("Endpoint doesn't exist"); 69 | 70 | assert_eq!(EndpointStatus::EnabledManually, endpoint.status); 71 | } 72 | -------------------------------------------------------------------------------- /server/tests/api/health_check.rs: -------------------------------------------------------------------------------- 1 | use reqwest::Client; 2 | 3 | use crate::common::{run_test_server, TestEnvironment}; 4 | 5 | #[tokio::test] 6 | async fn health_check_works() { 7 | // Arrange 8 | let server = run_test_server!(); 9 | 10 | // Act 11 | let response = Client::new() 12 | .get(server.url("health_check")) 13 | .send() 14 | .await 15 | .expect("Failed to executed request"); 16 | 17 | // Assert 18 | assert_eq!(204, response.status()); 19 | assert_eq!(0, response.content_length().unwrap()); 20 | } 21 | -------------------------------------------------------------------------------- /server/tests/api/main.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | mod create_application; 3 | mod create_endpoint; 4 | mod create_event; 5 | mod endpoint_status; 6 | mod health_check; 7 | --------------------------------------------------------------------------------