├── .deepsource.toml ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── images │ ├── logo.png │ ├── logo2.png │ └── logo3.png └── workflows │ └── rust.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── benches └── benchmark.rs ├── examples ├── ack-transaction.rs ├── amqp-send-recv.rs ├── auth-example.rs ├── auth-send-recv.rs ├── batch-transaction.rs ├── broker-integration-test.rs ├── idempotency-test.rs ├── improved-transaction.rs └── persistent-ack.rs ├── src ├── amqp_handler.rs ├── auth │ ├── authentication.rs │ ├── authorization.rs │ ├── mod.rs │ ├── tests.rs │ └── token.rs ├── bin │ ├── commands │ │ ├── consume.rs │ │ ├── mod.rs │ │ ├── schema.rs │ │ ├── send.rs │ │ ├── start.rs │ │ ├── status.rs │ │ └── stop.rs │ ├── error │ │ └── mod.rs │ └── pilgrimage.rs ├── broker │ ├── cluster.rs │ ├── config.rs │ ├── consumer │ │ ├── group.rs │ │ └── mod.rs │ ├── error.rs │ ├── leader │ │ ├── election.rs │ │ ├── heartbeat.rs │ │ ├── mod.rs │ │ └── state.rs │ ├── log_compression.rs │ ├── message_queue.rs │ ├── metrics.rs │ ├── mod.rs │ ├── node.rs │ ├── node_management.rs │ ├── replication.rs │ ├── scaling.rs │ ├── storage.rs │ └── topic.rs ├── crypto │ └── mod.rs ├── lib.rs ├── main.rs ├── message │ ├── ack.rs │ ├── message.rs │ ├── metadata.rs │ └── mod.rs ├── schema │ ├── compatibility │ │ └── mod.rs │ ├── error.rs │ ├── message_schema.rs │ ├── mod.rs │ ├── registry │ │ └── mod.rs │ └── version │ │ └── mod.rs ├── subscriber │ ├── mod.rs │ └── types.rs ├── tests │ ├── broker_tests.rs │ └── schema_tests.rs └── web_console.rs └── tests └── simple_message_test.rs /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | test_patterns = [ 4 | ".github/**", 5 | ".gitignore", 6 | "CODE_OF_CONDUCT.md", 7 | "CONTRIBUTING.md", 8 | "LICENSE", 9 | "README.md" 10 | ] 11 | 12 | [[analyzers]] 13 | name = "rust" 14 | 15 | [analyzers.meta] 16 | msrv = "stable" -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mila411/pilgrimage/f86012c5bf6c1de69f6de7fad4b31206d0e50119/.github/images/logo.png -------------------------------------------------------------------------------- /.github/images/logo2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mila411/pilgrimage/f86012c5bf6c1de69f6de7fad4b31206d0e50119/.github/images/logo2.png -------------------------------------------------------------------------------- /.github/images/logo3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mila411/pilgrimage/f86012c5bf6c1de69f6de7fad4b31206d0e50119/.github/images/logo3.png -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: ["main"] 6 | paths: 7 | - "bench/**" 8 | - "examples/**" 9 | - "src/**" 10 | - "Cargo.toml" 11 | pull_request: 12 | branches: ["main"] 13 | paths: 14 | - "bench/**" 15 | - "examples/**" 16 | - "src/**" 17 | - "Cargo.toml" 18 | 19 | env: 20 | CARGO_TERM_COLOR: always 21 | 22 | jobs: 23 | build: 24 | runs-on: ubuntu-latest 25 | 26 | steps: 27 | - uses: actions/checkout@v4 28 | 29 | - name: Install nightly Rust (2024 Edition) 30 | uses: actions-rs/toolchain@v1 31 | with: 32 | toolchain: nightly 33 | target: x86_64-unknown-linux-gnu 34 | override: true 35 | 36 | - name: Verify Rust version 37 | run: rustc --version 38 | 39 | - name: Build 40 | run: cargo build 41 | 42 | test: 43 | runs-on: ubuntu-latest 44 | needs: build 45 | 46 | services: 47 | rabbitmq: 48 | image: rabbitmq:3.12-management 49 | ports: 50 | - 5672:5672 51 | options: >- 52 | --health-cmd "rabbitmq-diagnostics -q ping" 53 | --health-interval 10s 54 | --health-timeout 5s 55 | --health-retries 5 56 | 57 | steps: 58 | - uses: actions/checkout@v4 59 | 60 | - name: Install nightly Rust (2024 Edition) 61 | uses: actions-rs/toolchain@v1 62 | with: 63 | toolchain: nightly 64 | target: x86_64-unknown-linux-gnu 65 | override: true 66 | 67 | - name: Install Tarpaulin 68 | run: cargo install cargo-tarpaulin 69 | 70 | - name: Run tests with coverage 71 | run: cargo tarpaulin --out Xml 72 | 73 | - name: Upload to Codecov 74 | uses: codecov/codecov-action@v5 75 | with: 76 | token: ${{ secrets.CODECOV_TOKEN }} 77 | files: ./cobertura.xml 78 | 79 | - name: Upload test results to Codecov 80 | if: ${{ !cancelled() }} 81 | uses: codecov/test-results-action@v1 82 | with: 83 | token: ${{ secrets.CODECOV_TOKEN }} 84 | files: ./cobertura.xml 85 | 86 | publish: 87 | runs-on: ubuntu-latest 88 | needs: test 89 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' 90 | 91 | steps: 92 | - uses: actions/checkout@v4 93 | 94 | - name: Check for NONE in commit message 95 | id: check_none 96 | run: | 97 | if git log -1 --pretty=%B | grep -q "NONE"; then 98 | echo "skip_version_bump=true" >> $GITHUB_ENV 99 | else 100 | echo "skip_version_bump=false" >> $GITHUB_ENV 101 | fi 102 | 103 | - name: Install nightly Rust (2024 Edition) 104 | if: env.skip_version_bump == 'false' 105 | uses: actions-rs/toolchain@v1 106 | with: 107 | toolchain: nightly 108 | target: x86_64-unknown-linux-gnu 109 | override: true 110 | 111 | - name: Bump version and create Git tag 112 | if: env.skip_version_bump == 'false' 113 | id: bump_version 114 | run: | 115 | # Get the latest commit message 116 | COMMITS=$(git log --format=%B -n 1) 117 | 118 | # Initial value for version bump 119 | BUMP="patch" 120 | 121 | # Determine the version based on the commit message 122 | if echo "$COMMITS" | grep -q "BREAKING CHANGE"; then 123 | BUMP="major" 124 | elif echo "$COMMITS" | grep -q "^feat"; then 125 | BUMP="minor" 126 | elif echo "$COMMITS" | grep -q "^fix"; then 127 | BUMP="patch" 128 | fi 129 | 130 | echo "Determined version bump: $BUMP" 131 | 132 | # Get the current version 133 | CURRENT_VERSION=$(grep '^version' Cargo.toml | awk -F\" '{print $2}') 134 | echo "Current version: $CURRENT_VERSION" 135 | 136 | # Calculate the new version 137 | IFS='.' read -r MAJOR MINOR PATCH <<< "$CURRENT_VERSION" 138 | if [ "$BUMP" = "major" ]; then 139 | MAJOR=$((MAJOR + 1)) 140 | MINOR=0 141 | PATCH=0 142 | elif [ "$BUMP" = "minor" ]; then 143 | MINOR=$((MINOR + 1)) 144 | PATCH=0 145 | elif [ "$BUMP" = "patch" ]; then 146 | PATCH=$((PATCH + 1)) 147 | fi 148 | NEW_VERSION="$MAJOR.$MINOR.$PATCH" 149 | echo "New version: $NEW_VERSION" 150 | 151 | # Update the version of Cargo.toml 152 | sed -i "s/^version = \".*\"/version = \"$NEW_VERSION\"/" Cargo.toml 153 | 154 | # Update Cargo.lock 155 | cargo check 156 | 157 | # Git settings 158 | git config user.name "github-actions[bot]" 159 | git config user.email "github-actions[bot]@users.noreply.github.com" 160 | 161 | # Commit changes 162 | git add Cargo.toml Cargo.lock 163 | git commit -m "Bump version to $NEW_VERSION" || echo "No changes to commit" 164 | 165 | # Create a tag 166 | git tag "v$NEW_VERSION" 167 | 168 | # Output a new version to GitHub Actions 169 | echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT 170 | 171 | - name: Push changes and tags 172 | if: env.skip_version_bump == 'false' 173 | run: git push origin main --tags 174 | env: 175 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 176 | 177 | - name: Create GitHub Release 178 | if: env.skip_version_bump == 'false' 179 | env: 180 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 181 | run: | 182 | gh release create "v${{ steps.bump_version.outputs.new_version }}" \ 183 | --title "v${{ steps.bump_version.outputs.new_version }}" \ 184 | --notes "Release version v${{ steps.bump_version.outputs.new_version }}" \ 185 | --generate-notes 186 | 187 | - name: Publish to crates.io 188 | if: env.skip_version_bump == 'false' 189 | env: 190 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} 191 | run: cargo publish 192 | # run: cargo publish --allow-dirty 193 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /logs 3 | *log* 4 | !log_compression.rs 5 | *old 6 | tmp 7 | note.txt 8 | storage_path 9 | test_* 10 | test_db_path 11 | processed_messages.json 12 | 13 | ### RustRover ### 14 | # Generated automatically 15 | .idea 16 | 17 | # Created by https://www.toptal.com/developers/gitignore/api/macos,rust,rust-analyzer,visualstudiocode 18 | # Edit at https://www.toptal.com/developers/gitignore?templates=macos,rust,rust-analyzer,visualstudiocode 19 | 20 | ### macOS ### 21 | # General 22 | .DS_Store 23 | .AppleDouble 24 | .LSOverride 25 | 26 | # Icon must end with two \r 27 | Icon 28 | 29 | # Thumbnails 30 | ._* 31 | 32 | # Files that might appear in the root of a volume 33 | .DocumentRevisions-V100 34 | .fseventsd 35 | .Spotlight-V100 36 | .TemporaryItems 37 | .Trashes 38 | .VolumeIcon.icns 39 | .com.apple.timemachine.donotpresent 40 | 41 | # Directories potentially created on remote AFP share 42 | .AppleDB 43 | .AppleDesktop 44 | Network Trash Folder 45 | Temporary Items 46 | .apdisk 47 | 48 | ### macOS Patch ### 49 | # iCloud generated files 50 | *.icloud 51 | 52 | ### Rust ### 53 | # Generated by Cargo 54 | # will have compiled files and executables 55 | debug/ 56 | target/ 57 | 58 | # These are backup files generated by rustfmt 59 | **/*.rs.bk 60 | 61 | # MSVC Windows builds of rustc generate these, which store debugging information 62 | *.pdb 63 | 64 | ### rust-analyzer ### 65 | # Can be generated by other build systems other than cargo (ex: bazelbuild/rust_rules) 66 | rust-project.json 67 | 68 | ### VisualStudioCode ### 69 | .vscode/* 70 | !.vscode/settings.json 71 | !.vscode/tasks.json 72 | !.vscode/launch.json 73 | !.vscode/extensions.json 74 | !.vscode/*.code-snippets 75 | 76 | # Local History for Visual Studio Code 77 | .history/ 78 | 79 | # Built Visual Studio Code Extensions 80 | *.vsix 81 | 82 | ### VisualStudioCode Patch ### 83 | # Ignore all local history of files 84 | .history 85 | .ionide 86 | 87 | # End of https://www.toptal.com/developers/gitignore/api/macos,rust,rust-analyzer,visualstudiocode 88 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | . 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to pilgrimage 2 | 3 | Thank you for your interest in contributing to **[Project Name]**! We appreciate your time and effort in improving this project. This guide will help you get started. 4 | 5 | --- 6 | 7 | ## Table of Contents 8 | 1. [Getting Started](#getting-started) 9 | 2. [How to Report Issues](#how-to-report-issues) 10 | 3. [Submitting Changes](#submitting-changes) 11 | 4. [Code of Conduct](#code-of-conduct) 12 | 5. [Style Guide](#style-guide) 13 | 6. [License](#license) 14 | 15 | --- 16 | 17 | ## Getting Started 18 | 19 | 1. **Fork the repository**: 20 | - Click the "Fork" button at the top-right corner of this repository. 21 | - Clone your fork locally: 22 | ```bash 23 | git clone https://github.com/your-username/[project-name].git 24 | ``` 25 | 26 | 2. **Set up the project**: 27 | - Follow the instructions in the `README.md` to install dependencies and run the project locally. 28 | 29 | 3. **Create a branch**: 30 | - Always create a new branch for your work: 31 | ```bash 32 | git checkout -b feature/your-feature-name 33 | ``` 34 | 35 | --- 36 | 37 | ## How to Report Issues 38 | 39 | 1. **Search for existing issues**: 40 | - Before opening a new issue, check the [Issues](https://github.com/[organization-name]/[project-name]/issues) tab to see if it has already been reported. 41 | 42 | 2. **Create a new issue**: 43 | - If no existing issue matches, open a new one. Include: 44 | - A clear and descriptive title. 45 | - Steps to reproduce (if applicable). 46 | - Expected vs. actual results. 47 | - Screenshots or error logs, if possible. 48 | 49 | 3. **Label the issue**: 50 | - Use relevant labels such as `bug`, `enhancement`, or `documentation`. 51 | 52 | --- 53 | 54 | ## Submitting Changes 55 | 56 | 1. **Write clear, concise commits**: 57 | - Follow this format for commit messages: 58 | ``` 59 | feat: Short description of your feature 60 | fix: Short description of the bug fixed 61 | docs: Update documentation 62 | ``` 63 | 64 | 2. **Push to your branch**: 65 | ```bash 66 | git push origin feature/your-feature-name 67 | 68 | 3. **Open a pull request:** 69 | - Go to your fork on GitHub and click "Compare & Pull Request". 70 | - Include: 71 | - A clear and descriptive title. 72 | - A detailed description of your changes. 73 | - References to related issues (e.g., "Closes #123"). 74 | 75 | 4. **Wait for review:** 76 | - Address feedback from maintainers promptly. 77 | 78 | ## Style Guide 79 | 80 | 1. Follow project conventions: 81 | - Code must adhere to existing styles and patterns in the project. 82 | 83 | 2. Linting and formatting: 84 | - Run linting tools before committing: 85 | ```sh 86 | cargo fmt && cargo clippy 87 | ``` 88 | 89 | 3. Write tests: 90 | - Ensure new features or fixes are accompanied by appropriate tests. 91 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "pilgrimage" 3 | version = "0.15.0" 4 | edition = "2024" 5 | authors = ["Kenny Miller Song"] 6 | description = "A Kafka-like message broker in Rust" 7 | readme = "README.md" 8 | repository = "https://github.com/mila411/rust-kafka-like" 9 | license = "MIT" 10 | keywords = ["kafka", "message", "broker", "rust"] 11 | categories = ["rust-patterns"] 12 | 13 | [lib] 14 | path = "src/lib.rs" 15 | crate-type = ["rlib"] 16 | 17 | [[bin]] 18 | name = "pilgrimage" 19 | path = "src/bin/pilgrimage.rs" 20 | 21 | [[bin]] 22 | name = "web" 23 | path = "src/main.rs" 24 | 25 | [dependencies] 26 | clap = "3.2.0" 27 | ctrlc = "3.4.5" 28 | serde = { version = "1.0", features = ["derive"] } 29 | serde_json = "1.0" 30 | log = "0.4" 31 | simplelog = "0.9" 32 | aes-gcm = "0.10.3" 33 | rand = "0.8.5" 34 | jsonwebtoken = "8.1.1" 35 | tempfile = "3.2" 36 | flate2 = "1.0.35" 37 | actix-web = "4" 38 | tokio = { version = "1", features = ["full"] } 39 | prometheus = "0.13" 40 | lazy_static = "1.4" 41 | uuid = { version = "1.0", features = ["v4", "serde"] } 42 | chrono = { version = "0.4", features = ["serde"] } 43 | tokio-amqp = "2.0.0" 44 | futures-util = "0.3" 45 | lapin = "2.5.0" 46 | parking_lot = "0.12.4" 47 | glob = "0.3.2" 48 | tracing = "0.1.41" 49 | thiserror = "2.0.12" 50 | reqwest = { version = "0.12.19", features = ["json"] } 51 | 52 | [dev-dependencies] 53 | env_logger = "0.10" 54 | criterion = "0.3" 55 | tempfile = "3.2" 56 | assert_cmd = "2" 57 | predicates = "3" 58 | actix-rt = "2.5" 59 | actix-test = "0.1.5" 60 | 61 | [[bench]] 62 | name = "benchmark" 63 | harness = false 64 | 65 | [profile.bench] 66 | opt-level = 3 67 | 68 | [profile.release] 69 | codegen-units = 1 70 | lto = true 71 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Kenny Miller Song 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /benches/benchmark.rs: -------------------------------------------------------------------------------- 1 | use criterion::{BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; 2 | use pilgrimage::broker::Broker; 3 | use pilgrimage::schema::message_schema::MessageSchema; 4 | use std::fs; 5 | use std::sync::{Arc, Mutex}; 6 | use uuid::Uuid; 7 | 8 | /// Common setup for benchmarks 9 | struct BenchmarkSetup { 10 | broker: Arc>, 11 | topic_name: String, 12 | } 13 | 14 | impl BenchmarkSetup { 15 | fn new(broker_id: &str, storage_path: &str) -> Self { 16 | // Clean up any existing storage directory 17 | let storage_dir = std::path::Path::new(storage_path) 18 | .parent() 19 | .unwrap_or(std::path::Path::new(".")); 20 | let _ = fs::remove_dir_all(storage_dir); 21 | 22 | // Create storage directory 23 | fs::create_dir_all(storage_dir).expect("Failed to create storage directory"); 24 | 25 | let broker = Arc::new(Mutex::new(Broker::new(broker_id, 3, 2, storage_path))); 26 | 27 | let topic_name = format!("bench-topic-{}", Uuid::new_v4()); 28 | 29 | // Create test topic 30 | { 31 | let mut broker_instance = broker.lock().unwrap(); 32 | broker_instance 33 | .create_topic(&topic_name, None) 34 | .expect("Failed to create topic"); 35 | } 36 | 37 | BenchmarkSetup { broker, topic_name } 38 | } 39 | 40 | fn create_test_message(&self, content: &str, partition: usize) -> MessageSchema { 41 | MessageSchema::new() 42 | .with_content(content.to_string()) 43 | .with_topic(self.topic_name.clone()) 44 | .with_partition(partition) 45 | } 46 | } 47 | 48 | impl Drop for BenchmarkSetup { 49 | fn drop(&mut self) { 50 | // Clean up storage after benchmark - just remove the storage directory 51 | let _ = fs::remove_dir_all("storage"); 52 | } 53 | } 54 | 55 | /// Benchmark message sending operations 56 | fn benchmark_message_sending(c: &mut Criterion) { 57 | let mut group = c.benchmark_group("message_sending"); 58 | 59 | // Test different message sizes 60 | let medium_content = "x".repeat(1024); 61 | let large_content = "x".repeat(10240); 62 | let message_sizes = vec![ 63 | ("small", "Hello World!"), 64 | ("medium", medium_content.as_str()), 65 | ("large", large_content.as_str()), 66 | ]; 67 | 68 | for (size_name, content) in message_sizes { 69 | let setup = BenchmarkSetup::new( 70 | &format!("send-bench-{}", size_name), 71 | &format!("storage/bench-send-{}.log", size_name), 72 | ); 73 | 74 | group.bench_with_input( 75 | BenchmarkId::new("send_message", size_name), 76 | &(setup, content), 77 | |b, (setup, content)| { 78 | b.iter(|| { 79 | let mut broker_instance = setup.broker.lock().unwrap(); 80 | let msg = setup.create_test_message(content, 0); 81 | let _ = black_box(broker_instance.send_message(msg)); 82 | }) 83 | }, 84 | ); 85 | } 86 | 87 | group.finish(); 88 | } 89 | 90 | /// Benchmark message receiving operations 91 | fn benchmark_message_receiving(c: &mut Criterion) { 92 | let mut group = c.benchmark_group("message_receiving"); 93 | 94 | let setup = BenchmarkSetup::new("receive-bench", "storage/bench-receive.log"); 95 | 96 | // Pre-populate with messages 97 | { 98 | let mut broker_instance = setup.broker.lock().unwrap(); 99 | for i in 0..100 { 100 | let msg = setup.create_test_message(&format!("Message {}", i), 0); 101 | broker_instance.send_message(msg).unwrap(); 102 | } 103 | } 104 | 105 | group.bench_function("receive_message", |b| { 106 | b.iter(|| { 107 | let broker_instance = setup.broker.lock().unwrap(); 108 | let _ = black_box(broker_instance.receive_message(&setup.topic_name, 0)); 109 | }) 110 | }); 111 | 112 | group.finish(); 113 | } 114 | 115 | /// Benchmark topic operations 116 | fn benchmark_topic_operations(c: &mut Criterion) { 117 | let mut group = c.benchmark_group("topic_operations"); 118 | 119 | let setup = BenchmarkSetup::new("topic-bench", "storage/bench-topic.log"); 120 | 121 | group.bench_function("create_topic", |b| { 122 | b.iter(|| { 123 | let mut broker_instance = setup.broker.lock().unwrap(); 124 | let topic_name = format!("temp-topic-{}", Uuid::new_v4()); 125 | let _ = black_box(broker_instance.create_topic(&topic_name, None)); 126 | }) 127 | }); 128 | 129 | group.bench_function("list_topics", |b| { 130 | b.iter(|| { 131 | let broker_instance = setup.broker.lock().unwrap(); 132 | let _ = black_box(broker_instance.list_topics()); 133 | }) 134 | }); 135 | 136 | group.finish(); 137 | } 138 | 139 | /// Benchmark partition operations 140 | fn benchmark_partition_operations(c: &mut Criterion) { 141 | let mut group = c.benchmark_group("partition_operations"); 142 | 143 | // Test different partition counts 144 | for partition_count in [1usize, 2, 4, 8].iter() { 145 | let setup = BenchmarkSetup::new( 146 | &format!("partition-bench-{}", partition_count), 147 | &format!("storage/bench-partition-{}.log", partition_count), 148 | ); 149 | 150 | group.bench_with_input( 151 | BenchmarkId::new("send_to_partitions", partition_count), 152 | &(setup, *partition_count), 153 | |b, (setup, partition_count)| { 154 | b.iter(|| { 155 | let mut broker_instance = setup.broker.lock().unwrap(); 156 | let partition = (0..*partition_count).collect::>(); 157 | for &p in &partition { 158 | let msg = setup.create_test_message("Partition test", p); 159 | let _ = black_box(broker_instance.send_message(msg)); 160 | } 161 | }) 162 | }, 163 | ); 164 | } 165 | 166 | group.finish(); 167 | } 168 | 169 | /// Benchmark concurrent operations 170 | fn benchmark_concurrent_operations(c: &mut Criterion) { 171 | let mut group = c.benchmark_group("concurrent_operations"); 172 | 173 | let setup = BenchmarkSetup::new("concurrent-bench", "storage/bench-concurrent.log"); 174 | 175 | group.bench_function("concurrent_send_receive", |b| { 176 | b.iter(|| { 177 | // Send a message 178 | { 179 | let mut broker_instance = setup.broker.lock().unwrap(); 180 | let msg = setup.create_test_message("Concurrent test", 0); 181 | broker_instance.send_message(msg).unwrap(); 182 | } 183 | 184 | // Immediately try to receive 185 | { 186 | let broker_instance = setup.broker.lock().unwrap(); 187 | let _ = black_box(broker_instance.receive_message(&setup.topic_name, 0)); 188 | } 189 | }) 190 | }); 191 | 192 | group.finish(); 193 | } 194 | 195 | /// Benchmark throughput with batch operations 196 | fn benchmark_throughput(c: &mut Criterion) { 197 | let mut group = c.benchmark_group("throughput"); 198 | group.sample_size(10); // Reduce sample size for batch operations 199 | 200 | let batch_sizes = vec![10, 100, 1000]; 201 | 202 | for batch_size in batch_sizes { 203 | let setup = BenchmarkSetup::new( 204 | &format!("throughput-bench-{}", batch_size), 205 | &format!("storage/bench-throughput-{}.log", batch_size), 206 | ); 207 | 208 | group.bench_with_input( 209 | BenchmarkId::new("batch_send", batch_size), 210 | &(setup, batch_size), 211 | |b, (setup, batch_size)| { 212 | b.iter(|| { 213 | let mut broker_instance = setup.broker.lock().unwrap(); 214 | for i in 0..*batch_size { 215 | let msg = setup.create_test_message(&format!("Batch message {}", i), 0); 216 | broker_instance.send_message(msg).unwrap(); 217 | } 218 | }) 219 | }, 220 | ); 221 | } 222 | 223 | group.finish(); 224 | } 225 | 226 | criterion_group!( 227 | benches, 228 | benchmark_message_sending, 229 | benchmark_message_receiving, 230 | benchmark_topic_operations, 231 | benchmark_partition_operations, 232 | benchmark_concurrent_operations, 233 | benchmark_throughput 234 | ); 235 | criterion_main!(benches); 236 | -------------------------------------------------------------------------------- /examples/ack-transaction.rs: -------------------------------------------------------------------------------- 1 | use chrono::Utc; 2 | use pilgrimage::broker::Broker; 3 | use pilgrimage::message::ack::{AckStatus, MessageAck}; 4 | use pilgrimage::message::message::Message; 5 | use std::time::Duration; 6 | use tokio::time::sleep; 7 | 8 | #[tokio::main] 9 | async fn main() -> Result<(), Box> { 10 | let mut broker = Broker::new("broker1", 3, 2, "logs"); 11 | broker.create_topic("test_topic", None)?; 12 | 13 | let mut handles = vec![]; 14 | 15 | // Send multiple messages 16 | for i in 0..5 { 17 | let message = Message::new(format!("Message {}", i)); 18 | println!("Send: ID={}, Content={}", message.id, message.content); 19 | 20 | let msg_clone = message.clone(); 21 | let handle = tokio::spawn(async move { 22 | // Set a different delay for each message 23 | sleep(Duration::from_millis(500 * (i + 1) as u64)).await; 24 | 25 | let ack = MessageAck::new( 26 | msg_clone.id, 27 | Utc::now(), 28 | AckStatus::Processed, 29 | format!("consumer{}", i % 3 + 1), 30 | 0, 31 | ); 32 | println!("ACK issued: {:?}", ack); 33 | }); 34 | handles.push(handle); 35 | } 36 | 37 | // Waiting for all ACKs to be completed 38 | for handle in handles { 39 | handle.await?; 40 | } 41 | 42 | Ok(()) 43 | } 44 | -------------------------------------------------------------------------------- /examples/amqp-send-recv.rs: -------------------------------------------------------------------------------- 1 | use futures_util::StreamExt; 2 | use lapin::{ 3 | BasicProperties, Channel, Connection, ConnectionProperties, 4 | options::{BasicAckOptions, BasicConsumeOptions, BasicPublishOptions, QueueDeclareOptions}, 5 | types::FieldTable, 6 | }; 7 | use std::{error::Error, time::Duration}; 8 | use tokio::{signal, time::timeout}; 9 | 10 | const MESSAGE_COUNT: usize = 5; 11 | 12 | async fn publish_message(channel: &Channel, message: &str) -> Result<(), Box> { 13 | channel 14 | .basic_publish( 15 | "", 16 | "hello", 17 | BasicPublishOptions::default(), 18 | message.as_bytes(), 19 | BasicProperties::default(), 20 | ) 21 | .await? 22 | .await?; 23 | println!("Sent '{}'", message); 24 | Ok(()) 25 | } 26 | 27 | #[tokio::main] 28 | async fn main() -> Result<(), Box> { 29 | let addr = "amqp://127.0.0.1:5672/%2f"; 30 | let conn = Connection::connect(addr, ConnectionProperties::default()).await?; 31 | let channel = conn.create_channel().await?; 32 | 33 | let _queue = channel 34 | .queue_declare( 35 | "hello", 36 | QueueDeclareOptions::default(), 37 | FieldTable::default(), 38 | ) 39 | .await?; 40 | 41 | // Send multiple messages 42 | for i in 0..MESSAGE_COUNT { 43 | let message = format!("Message {}", i + 1); 44 | publish_message(&channel, &message).await?; 45 | } 46 | 47 | let mut consumer = channel 48 | .basic_consume( 49 | "hello", 50 | "my_consumer", 51 | BasicConsumeOptions::default(), 52 | FieldTable::default(), 53 | ) 54 | .await?; 55 | 56 | println!("Waiting for {} messages...", MESSAGE_COUNT); 57 | let mut received_count = 0; 58 | 59 | loop { 60 | tokio::select! { 61 | _ = signal::ctrl_c() => { 62 | println!("Received Ctrl+C, shutting down..."); 63 | break; 64 | } 65 | message = timeout(Duration::from_secs(5), consumer.next()) => { 66 | match message { 67 | Ok(Some(delivery_result)) => { 68 | if let Ok(delivery) = delivery_result { 69 | if let Ok(message) = std::str::from_utf8(&delivery.data) { 70 | println!("Received message: {}", message); 71 | delivery.ack(BasicAckOptions::default()).await?; 72 | received_count += 1; 73 | if received_count >= MESSAGE_COUNT { 74 | println!("All messages received. Shutting down..."); 75 | break; 76 | } 77 | } 78 | } 79 | }, 80 | Ok(None) => break, 81 | Err(_) => continue, 82 | } 83 | } 84 | } 85 | } 86 | 87 | Ok(()) 88 | } 89 | -------------------------------------------------------------------------------- /examples/auth-example.rs: -------------------------------------------------------------------------------- 1 | use pilgrimage::auth::authentication::{Authenticator, BasicAuthenticator}; 2 | use pilgrimage::auth::authorization::{Permission, RoleBasedAccessControl}; 3 | use pilgrimage::auth::token::TokenManager; 4 | 5 | fn main() { 6 | // Authentication Setup 7 | let mut authenticator = BasicAuthenticator::new(); 8 | authenticator.add_user("user1", "password1"); 9 | 10 | // Approval Setup 11 | let mut rbac = RoleBasedAccessControl::new(); 12 | rbac.add_role( 13 | "admin", 14 | vec![Permission::Read, Permission::Write, Permission::Admin], 15 | ); 16 | rbac.add_role("user", vec![Permission::Read]); 17 | rbac.assign_role("user1", "admin"); 18 | 19 | // Token Manager Setup 20 | let token_manager = TokenManager::new(b"secret"); 21 | 22 | // User Authentication 23 | let username = "user1"; 24 | let password = "password1"; 25 | if authenticator.authenticate(username, password).unwrap() { 26 | println!("User {} authenticated successfully", username); 27 | 28 | // Generating Tokens 29 | let roles = vec!["admin".to_string()]; 30 | let token = token_manager 31 | .generate_token(username, roles.clone()) 32 | .unwrap(); 33 | println!("Generated token: {}", token); 34 | 35 | // Token verification 36 | let claims = token_manager.verify_token(&token).unwrap(); 37 | println!("Token verified for user: {}", claims.sub); 38 | 39 | // Approval confirmation 40 | if rbac.has_permission(&claims.sub, &Permission::Admin) { 41 | println!("User {} has admin permission", claims.sub); 42 | } else { 43 | println!("User {} does not have admin permission", claims.sub); 44 | } 45 | } else { 46 | println!("Authentication failed for user {}", username); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /examples/auth-send-recv.rs: -------------------------------------------------------------------------------- 1 | use pilgrimage::auth::authentication::{Authenticator, BasicAuthenticator}; 2 | use pilgrimage::auth::authorization::{Permission, RoleBasedAccessControl}; 3 | use pilgrimage::auth::token::TokenManager; 4 | use pilgrimage::broker::{Broker, MessageSchema}; 5 | use pilgrimage::crypto::Encryptor; 6 | use std::error::Error; 7 | 8 | fn main() -> Result<(), Box> { 9 | let mut authenticator = BasicAuthenticator::new(); 10 | authenticator.add_user("user1", "password1"); 11 | 12 | let mut rbac = RoleBasedAccessControl::new(); 13 | rbac.add_role( 14 | "admin", 15 | vec![Permission::Read, Permission::Write, Permission::Admin], 16 | ); 17 | rbac.assign_role("user1", "admin"); 18 | 19 | let token_manager = TokenManager::new(b"secret"); 20 | let username = "user1"; 21 | let password = "password1"; 22 | 23 | // 32-byte encryption key generation 24 | let key: [u8; 32] = rand::random(); 25 | let encryptor = Encryptor::new(&key); 26 | 27 | // Authentication process 28 | if authenticator.authenticate(username, password)? { 29 | println!("User {} authentication successful", username); 30 | 31 | let roles = vec!["admin".to_string()]; 32 | let _token = token_manager.generate_token(username, roles)?; 33 | 34 | let mut broker = Broker::new("broker1", 1, 5, "storage/secure_broker.log"); 35 | broker.create_topic("secure_messages", None)?; 36 | let message = "Secret Message"; 37 | 38 | // Encrypting and sending messages 39 | let encrypted_data = encryptor.encrypt(message.as_bytes())?; 40 | let encrypted_content = String::from_utf8_lossy(&encrypted_data).to_string(); 41 | 42 | let message = MessageSchema::new() 43 | .with_content(encrypted_content) 44 | .with_topic("secure_messages".to_string()) 45 | .with_partition(0); 46 | 47 | broker.send_message(message)?; 48 | println!("Encrypted message sent."); 49 | } 50 | 51 | Ok(()) 52 | } 53 | -------------------------------------------------------------------------------- /examples/batch-transaction.rs: -------------------------------------------------------------------------------- 1 | use pilgrimage::broker::{Broker, MessageSchema}; 2 | use std::sync::{Arc, Mutex}; 3 | use std::thread; 4 | use std::time::Duration; 5 | 6 | // message receiving handler 7 | fn start_message_receiver(broker: Arc>) -> thread::JoinHandle<()> { 8 | thread::spawn(move || { 9 | loop { 10 | if let Ok(broker) = broker.lock() { 11 | if let Ok(Some(message)) = broker.receive_message("test_topic", 0) { 12 | println!("Received: content={}", message.content); 13 | } 14 | } 15 | thread::sleep(Duration::from_millis(100)); 16 | } 17 | }) 18 | } 19 | 20 | // batch transaction processing 21 | fn process_batch_transaction(broker: &Mutex, messages: Vec) -> Result<(), String> { 22 | let mut broker = broker.lock().map_err(|e| e.to_string())?; 23 | 24 | // Create Topic 25 | let _ = broker.delete_topic("test_topic"); 26 | broker.create_topic("test_topic", None)?; 27 | 28 | // transaction initiation 29 | broker.begin_transaction(); 30 | println!("Transaction start: {} messages", messages.len()); 31 | 32 | // Send all messages 33 | for (index, content) in messages.into_iter().enumerate() { 34 | let message = MessageSchema::new() 35 | .with_content(content.clone()) 36 | .with_topic("test_topic".to_string()) 37 | .with_partition(0); 38 | 39 | println!("Message {}: content={}", index + 1, content); 40 | 41 | if let Err(e) = broker.send_message(message) { 42 | // Rollback in case of error 43 | broker.rollback_transaction(); 44 | return Err(format!("Transmission error (message {}): {}", index + 1, e)); 45 | } 46 | } 47 | 48 | // Commit Transaction 49 | if let Err(e) = broker.commit_transaction() { 50 | return Err(format!("Commit error: {}", e)); 51 | } 52 | 53 | println!("transaction complete"); 54 | Ok(()) 55 | } 56 | 57 | fn main() -> Result<(), Box> { 58 | // Broker Initialization 59 | let broker = Arc::new(Mutex::new(Broker::new( 60 | "broker1", 61 | 3, 62 | 2, 63 | "storage/batch_broker.log", 64 | ))); 65 | 66 | // Start message receiving thread 67 | let _receiver = start_message_receiver(Arc::clone(&broker)); 68 | 69 | // Message preparation for batch processing 70 | let messages = vec![ 71 | "Order no: 001, Item: apple, Qty: 5".to_string(), 72 | "Order no: 002, Item: mandarin oranges, Qty: 3".to_string(), 73 | "Order no: 003, Item: bananas, Qty: 2".to_string(), 74 | ]; 75 | 76 | // Execute batch transaction processing 77 | match process_batch_transaction(&broker, messages) { 78 | Ok(_) => println!("Batch processing completed successfully"), 79 | Err(e) => eprintln!("batch processing error: {}", e), 80 | } 81 | 82 | // Wait for receiving thread to complete processing 83 | thread::sleep(Duration::from_secs(2)); 84 | Ok(()) 85 | } 86 | -------------------------------------------------------------------------------- /examples/broker-integration-test.rs: -------------------------------------------------------------------------------- 1 | use pilgrimage::broker::{Broker, MessageSchema}; 2 | use tokio::time::{Duration, sleep}; 3 | 4 | // Auxiliary functions to display test results 5 | fn display_test_result(test_name: &str, result: Result<(), String>) { 6 | match result { 7 | Ok(_) => println!("✅ test success: {}", test_name), 8 | Err(e) => println!("❌ test failure: {} - error: {}", test_name, e), 9 | } 10 | } 11 | 12 | // Testing of basic messaging functionality 13 | async fn test_basic_messaging(broker: &mut Broker) -> Result<(), String> { 14 | println!("\n📝 Testing of basic messaging functionality begins...."); 15 | 16 | let topic = "test-topic"; 17 | // Delete and recreate existing topics 18 | let _ = broker.delete_topic(topic); 19 | broker.create_topic(topic, None)?; 20 | println!("Topic '{}' has been created.", topic); 21 | 22 | // Sending a message 23 | let message = MessageSchema::new() 24 | .with_content("test message".to_string()) 25 | .with_topic(topic.to_string()) 26 | .with_partition(0); 27 | 28 | broker.send_message(message)?; 29 | println!("Message sent."); 30 | 31 | // Wait a moment and wait for the message to be processed. 32 | sleep(Duration::from_millis(100)).await; 33 | 34 | // Attempt to receive a message 35 | if let Ok(Some(received)) = broker.receive_message(topic, 0) { 36 | println!("Message received: {}", received.content); 37 | Ok(()) 38 | } else { 39 | Err("Message not received".to_string()) 40 | } 41 | } 42 | 43 | // Transaction functionality testing 44 | async fn test_transactions(broker: &mut Broker) -> Result<(), String> { 45 | println!("\n💼 Transaction functionality testing begins...."); 46 | 47 | let topic = "test-topic"; 48 | // Delete and recreate existing topics 49 | let _ = broker.delete_topic(topic); 50 | broker.create_topic(topic, None)?; 51 | println!("Topic '{}' has been created.", topic); 52 | 53 | broker.begin_transaction(); 54 | println!("Transaction initiated."); 55 | 56 | // Sending a message 57 | let message1 = MessageSchema::new() 58 | .with_content("Transaction message 1".to_string()) 59 | .with_topic("test-topic".to_string()) 60 | .with_partition(0); 61 | 62 | let message2 = MessageSchema::new() 63 | .with_content("Transaction Message 2".to_string()) 64 | .with_topic("test-topic".to_string()) 65 | .with_partition(0); 66 | 67 | broker.send_message(message1)?; 68 | broker.send_message(message2)?; 69 | println!("Two messages were sent within the transaction"); 70 | 71 | // Commit Transaction 72 | broker.commit_transaction()?; 73 | println!("Transaction committed."); 74 | 75 | // Check that messages are saved correctly 76 | if let Ok(Some(received1)) = broker.receive_message("test-topic", 0) { 77 | println!("Received 1st message: {}", received1.content); 78 | if let Ok(Some(received2)) = broker.receive_message("test-topic", 0) { 79 | println!("Second message received: {}", received2.content); 80 | Ok(()) 81 | } else { 82 | Err("Second message not received".to_string()) 83 | } 84 | } else { 85 | Err("The first message was not received.".to_string()) 86 | } 87 | } 88 | 89 | // Testing the rollback function 90 | async fn test_rollback(broker: &mut Broker) -> Result<(), String> { 91 | println!("\n🔄 Started testing rollback function..."); 92 | 93 | let topic = "test-topic"; 94 | // Delete and recreate existing topics 95 | let _ = broker.delete_topic(topic); 96 | broker.create_topic(topic, None)?; 97 | println!("Topic '{}' has been created.", topic); 98 | 99 | // Send a normal message first 100 | let normal_message = MessageSchema::new() 101 | .with_content("Normal message".to_string()) 102 | .with_topic(topic.to_string()) 103 | .with_partition(0); 104 | 105 | broker.send_message(normal_message)?; 106 | 107 | // Start transaction 108 | broker.begin_transaction(); 109 | println!("Transaction initiated."); 110 | 111 | // Send messages within a transaction 112 | let message = MessageSchema::new() 113 | .with_content("Message to be rolled back".to_string()) 114 | .with_topic(topic.to_string()) 115 | .with_partition(0); 116 | 117 | broker.send_message(message)?; 118 | println!("Message sent within transaction"); 119 | 120 | // Transaction rollback 121 | broker.rollback_transaction(); 122 | println!("Transaction rolled back."); 123 | 124 | // Ensure that only normal messages remain 125 | if let Ok(Some(received)) = broker.receive_message(topic, 0) { 126 | if received.content == "Normal message" { 127 | println!("Only non-transactional messages remain"); 128 | Ok(()) 129 | } else { 130 | Err("Unexpected messages remain.".to_string()) 131 | } 132 | } else { 133 | Err("Message not found".to_string()) 134 | } 135 | } 136 | 137 | #[tokio::main] 138 | async fn main() -> Result<(), Box> { 139 | println!("🚀 Begin comprehensive testing of brokers...\n"); 140 | 141 | // Broker Initialization 142 | let mut broker = Broker::new("test-broker", 2, 1, "test_storage/broker.log"); 143 | println!("Broker initialized."); 144 | 145 | // Test execution and result collection for each function 146 | let test_results = vec![ 147 | ("Basic Messaging", test_basic_messaging(&mut broker).await), 148 | ("transaction", test_transactions(&mut broker).await), 149 | ("rollback", test_rollback(&mut broker).await), 150 | ]; 151 | 152 | // Display of test results 153 | println!("\n📊 Test Result Summary:"); 154 | for (name, result) in test_results { 155 | display_test_result(name, result); 156 | } 157 | 158 | Ok(()) 159 | } 160 | -------------------------------------------------------------------------------- /examples/idempotency-test.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Utc}; 2 | use serde::{Deserialize, Serialize}; 3 | use std::{ 4 | collections::HashMap, 5 | fs::{File, OpenOptions}, 6 | io::{BufReader, BufWriter}, 7 | }; 8 | use tokio::time::sleep; 9 | use uuid::Uuid; 10 | 11 | #[derive(Debug, Serialize, Deserialize)] 12 | struct MessageProcessingState { 13 | processed_at: DateTime, 14 | consumer_id: String, 15 | status: ProcessingStatus, 16 | } 17 | 18 | #[derive(Debug, Serialize, Deserialize)] 19 | enum ProcessingStatus { 20 | Processing, 21 | Completed, 22 | Failed, 23 | } 24 | 25 | #[derive(Debug, Serialize, Deserialize)] 26 | struct ProcessedState { 27 | processed_messages: HashMap, 28 | } 29 | 30 | impl ProcessedState { 31 | fn load(path: &str) -> Self { 32 | let file = OpenOptions::new() 33 | .read(true) 34 | .open(path) 35 | .unwrap_or_else(|_| File::create(path).unwrap()); 36 | let reader = BufReader::new(file); 37 | serde_json::from_reader(reader).unwrap_or_else(|_| ProcessedState { 38 | processed_messages: HashMap::new(), 39 | }) 40 | } 41 | 42 | fn save(&self, path: &str) { 43 | let file = OpenOptions::new() 44 | .write(true) 45 | .truncate(true) 46 | .open(path) 47 | .unwrap(); 48 | let writer = BufWriter::new(file); 49 | serde_json::to_writer(writer, self).unwrap(); 50 | } 51 | 52 | fn is_processed(&self, message_id: &Uuid) -> bool { 53 | self.processed_messages 54 | .contains_key(&message_id.to_string()) 55 | } 56 | 57 | fn mark_processing(&mut self, message_id: Uuid, consumer_id: String) { 58 | let key = message_id.to_string(); 59 | let state = MessageProcessingState { 60 | processed_at: Utc::now(), 61 | consumer_id, 62 | status: ProcessingStatus::Processing, 63 | }; 64 | self.processed_messages.insert(key, state); 65 | } 66 | 67 | fn mark_completed(&mut self, message_id: Uuid) { 68 | let key = message_id.to_string(); 69 | if let Some(state) = self.processed_messages.get_mut(&key) { 70 | state.status = ProcessingStatus::Completed; 71 | } 72 | } 73 | } 74 | 75 | #[derive(Debug, Clone)] 76 | struct Message { 77 | id: Uuid, 78 | content: String, 79 | } 80 | 81 | impl Message { 82 | fn new(content: String) -> Self { 83 | Message { 84 | id: Uuid::new_v4(), 85 | content, 86 | } 87 | } 88 | } 89 | 90 | #[tokio::main] 91 | async fn main() -> Result<(), Box> { 92 | let state_path = "processed_messages.json"; 93 | let mut state = ProcessedState::load(state_path); 94 | 95 | for i in 0..5 { 96 | let message = Message::new(format!("Message {}", i)); 97 | let message_id = message.id; 98 | 99 | // Determine if it has already been processed 100 | if state.is_processed(&message_id) { 101 | println!( 102 | "Skip: Messages that have already been processed ID={}", 103 | message_id 104 | ); 105 | continue; 106 | } 107 | 108 | state.mark_processing(message_id, format!("consumer{}", i % 3 + 1)); 109 | println!("Send: ID={}, Content={}", message_id, message.content); 110 | 111 | let msg_clone = message.clone(); 112 | let handle = tokio::spawn(async move { 113 | // Simulate message processing 114 | sleep(std::time::Duration::from_secs(1)).await; 115 | println!("ACK: Received for ID={}", msg_clone.id); 116 | }); 117 | 118 | handle.await?; 119 | state.mark_completed(message_id); 120 | println!("ACK: Sent for ID={}", message_id); 121 | } 122 | 123 | // Checking for commutativity 124 | for i in 0..5 { 125 | let message = Message::new(format!("Message {}", i)); 126 | let message_id = message.id; 127 | 128 | if state.is_processed(&message_id) { 129 | println!( 130 | "Checking for idempotence: The message has not been reprocessed. ID={}", 131 | message_id 132 | ); 133 | } else { 134 | println!( 135 | "Checking for idempotence: The message is being reprocessed. ID={}", 136 | message_id 137 | ); 138 | } 139 | } 140 | 141 | state.save(state_path); 142 | Ok(()) 143 | } 144 | -------------------------------------------------------------------------------- /examples/improved-transaction.rs: -------------------------------------------------------------------------------- 1 | use pilgrimage::broker::{Broker, MessageSchema}; 2 | use std::sync::{Arc, Mutex}; 3 | use std::thread; 4 | use std::time::Duration; 5 | 6 | // Extract message reception processing as a separate function 7 | fn start_message_receiver(broker: Arc>) -> thread::JoinHandle<()> { 8 | thread::spawn(move || { 9 | loop { 10 | if let Ok(broker) = broker.lock() { 11 | if let Ok(Some(message)) = broker.receive_message("test_topic", 0) { 12 | println!("Received: Contents={}", message.content); 13 | } 14 | } 15 | thread::sleep(Duration::from_millis(100)); 16 | } 17 | }) 18 | } 19 | 20 | // Extract transaction processing as a separate function 21 | fn process_transaction(broker: &Mutex, content: String) -> Result<(), String> { 22 | let mut broker = broker.lock().map_err(|e| e.to_string())?; 23 | 24 | // Create Topic 25 | let _ = broker.delete_topic("test_topic"); 26 | broker.create_topic("test_topic", None)?; 27 | 28 | broker.begin_transaction(); 29 | 30 | // Creating MessageSchema 31 | let message = MessageSchema::new() 32 | .with_content(content.clone()) 33 | .with_topic("test_topic".to_string()) 34 | .with_partition(0); 35 | 36 | println!("Send: Contents={}", content); 37 | 38 | // Transaction processing 39 | if let Err(e) = broker.send_message(message) { 40 | broker.rollback_transaction(); 41 | return Err(format!("transmission error: {}", e)); 42 | } 43 | 44 | if let Err(e) = broker.commit_transaction() { 45 | return Err(format!("commit error: {}", e)); 46 | } 47 | 48 | Ok(()) 49 | } 50 | 51 | fn main() -> Result<(), Box> { 52 | // Broker Initialization 53 | let broker = Arc::new(Mutex::new(Broker::new( 54 | "broker1", 55 | 3, 56 | 2, 57 | "storage/improved_broker.log", 58 | ))); 59 | 60 | // Start message receiving thread 61 | let _receiver = start_message_receiver(Arc::clone(&broker)); // Creating and Sending Messages 62 | let content = "Hello world!".to_string(); 63 | 64 | // Transaction processing execution 65 | if let Err(e) = process_transaction(&broker, content) { 66 | eprintln!("transaction processing error: {}", e); 67 | } 68 | 69 | // Wait for receiving thread to complete processing 70 | thread::sleep(Duration::from_secs(1)); 71 | Ok(()) 72 | } 73 | -------------------------------------------------------------------------------- /examples/persistent-ack.rs: -------------------------------------------------------------------------------- 1 | use chrono::Utc; 2 | use pilgrimage::broker::Broker; 3 | use pilgrimage::message::ack::{AckStatus, MessageAck}; 4 | use pilgrimage::message::message::Message; 5 | use serde::{Deserialize, Serialize}; 6 | use std::{ 7 | collections::HashSet, 8 | fs::{File, OpenOptions}, 9 | io::{BufReader, BufWriter}, 10 | time::Duration, 11 | }; 12 | use tokio::time::sleep; 13 | use uuid::Uuid; 14 | 15 | #[derive(Debug, Serialize, Deserialize)] 16 | struct ProcessedState { 17 | processed_ids: HashSet, 18 | } 19 | 20 | impl ProcessedState { 21 | fn new() -> Self { 22 | Self { 23 | processed_ids: HashSet::new(), 24 | } 25 | } 26 | 27 | fn load(path: &str) -> Self { 28 | File::open(path) 29 | .ok() 30 | .and_then(|f| { 31 | let reader = BufReader::new(f); 32 | serde_json::from_reader(reader).ok() 33 | }) 34 | .unwrap_or_else(Self::new) 35 | } 36 | 37 | fn save(&self, path: &str) -> std::io::Result<()> { 38 | let file = OpenOptions::new() 39 | .create(true) 40 | .write(true) 41 | .truncate(true) 42 | .open(path)?; 43 | let writer = BufWriter::new(file); 44 | serde_json::to_writer_pretty(writer, self)?; 45 | Ok(()) 46 | } 47 | } 48 | 49 | #[tokio::main] 50 | async fn main() -> Result<(), Box> { 51 | let state_path = "processed_messages.json"; 52 | let mut state = ProcessedState::load(state_path); 53 | 54 | let mut broker = Broker::new("broker1", 3, 2, "storage/persistent_broker.log"); 55 | // Create topic (delete existing topic) 56 | let _ = broker.delete_topic("test_topic"); 57 | broker.create_topic("test_topic", None)?; 58 | 59 | let mut handles = vec![]; 60 | 61 | for i in 0..5 { 62 | let message = Message::new(format!("Message {}", i)); 63 | let message_id = message.id; 64 | 65 | if state.processed_ids.contains(&message_id) { 66 | println!( 67 | "Skip: Messages that have already been processed ID={}", 68 | message_id 69 | ); 70 | continue; 71 | } 72 | 73 | println!("Send: ID={}, Content={}", message_id, message.content); 74 | let msg_clone = message.clone(); 75 | 76 | let handle = tokio::spawn(async move { 77 | sleep(Duration::from_millis(500 * (i + 1) as u64)).await; 78 | 79 | let ack = MessageAck::new( 80 | msg_clone.id, 81 | Utc::now(), 82 | AckStatus::Processed, 83 | format!("consumer{}", i % 3 + 1), 84 | 0, 85 | ); 86 | println!("ACK issued: {:?}", ack); 87 | 88 | msg_clone.id 89 | }); 90 | handles.push(handle); 91 | } 92 | 93 | for handle in handles { 94 | let processed_id = handle.await?; 95 | state.processed_ids.insert(processed_id); 96 | } 97 | 98 | state.save(state_path)?; 99 | println!("The processed message has been saved.: {}", state_path); 100 | 101 | Ok(()) 102 | } 103 | -------------------------------------------------------------------------------- /src/amqp_handler.rs: -------------------------------------------------------------------------------- 1 | //! Module for interacting with an AMQP (Advanced Message Queuing Protocol) server. 2 | //! 3 | //! The module contains the `AmqpConnection` struct, 4 | //! which provides methods to send and receive messages, as well as manage the queue. 5 | 6 | use std::time::Duration; 7 | 8 | use futures_util::StreamExt; 9 | use lapin::{ 10 | BasicProperties, Channel, Connection, ConnectionProperties, 11 | options::{ 12 | BasicAckOptions, BasicConsumeOptions, BasicPublishOptions, QueueDeclareOptions, 13 | QueuePurgeOptions, 14 | }, 15 | types::FieldTable, 16 | }; 17 | use tokio::time::timeout; 18 | 19 | /// The `AmqpConnection` struct provides methods to interact with an 20 | /// AMQP (Advanced Message Queuing Protocol) server. 21 | /// 22 | /// It allows sending and receiving messages, as well as managing the queue. 23 | #[derive(Clone)] 24 | pub struct AmqpConnection { 25 | /// The AMQP channel. 26 | channel: Channel, 27 | /// The name of the queue. 28 | queue_name: String, 29 | } 30 | 31 | impl AmqpConnection { 32 | /// Creates a new `AmqpConnection` instance. 33 | /// 34 | /// It connects to the AMQP server at the specified address and creates a channel. 35 | /// It also declares a queue with the specified name. 36 | /// 37 | /// # Arguments 38 | /// * `addr` - The address of the AMQP server. 39 | /// * `queue_name` - The name of the queue. 40 | /// 41 | /// # Returns 42 | /// A `Result` containing the `AmqpConnection` instance if successful, or an error. 43 | pub async fn new(addr: &str, queue_name: &str) -> lapin::Result { 44 | let conn = Connection::connect(addr, ConnectionProperties::default()).await?; 45 | let channel = conn.create_channel().await?; 46 | 47 | channel 48 | .queue_declare( 49 | queue_name, 50 | QueueDeclareOptions::default(), 51 | FieldTable::default(), 52 | ) 53 | .await?; 54 | 55 | Ok(Self { 56 | channel, 57 | queue_name: queue_name.to_string(), 58 | }) 59 | } 60 | 61 | /// Sends a message to the queue. 62 | /// 63 | /// # Arguments 64 | /// * `message` - The message to send. 65 | /// 66 | /// # Returns 67 | /// * `Ok(())` - If the message is successfully sent. 68 | /// * `Err(lapin::Error)` - If an error occurs during message sending. 69 | pub async fn send_message(&self, message: &str) -> lapin::Result<()> { 70 | self.channel 71 | .basic_publish( 72 | "", 73 | &self.queue_name, 74 | BasicPublishOptions::default(), 75 | message.as_bytes(), 76 | BasicProperties::default(), 77 | ) 78 | .await? 79 | .await?; 80 | Ok(()) 81 | } 82 | 83 | /// Receives a message from the queue. 84 | /// 85 | /// # Returns 86 | /// * `Ok(String)` - The received message. 87 | /// * `Err(lapin::Error)` - If an error occurs during message receiving. 88 | /// * `Err(lapin::Error::InvalidChannel(0))` - If the channel is invalid. 89 | pub async fn receive_message(&self) -> lapin::Result { 90 | let mut consumer = self 91 | .channel 92 | .basic_consume( 93 | &self.queue_name, 94 | "my_consumer", 95 | BasicConsumeOptions::default(), 96 | FieldTable::default(), 97 | ) 98 | .await?; 99 | 100 | if let Some(delivery_result) = consumer.next().await { 101 | match delivery_result { 102 | Ok(delivery) => { 103 | let message = match std::str::from_utf8(&delivery.data) { 104 | Ok(s) => s.to_string(), 105 | Err(_) => return Err(lapin::Error::InvalidChannel(0)), 106 | }; 107 | delivery 108 | .ack(BasicAckOptions::default()) 109 | .await 110 | .map_err(lapin::Error::from)?; 111 | Ok(message) 112 | } 113 | Err(error) => Err(error), 114 | } 115 | } else { 116 | Err(lapin::Error::InvalidChannel(0)) 117 | } 118 | } 119 | 120 | /// Receives a message from the queue with a specific consumer tag. 121 | /// 122 | /// # Arguments 123 | /// * `consumer_tag` - The consumer tag to use. 124 | /// 125 | /// # Returns 126 | /// * `Ok(String)` - The received message. 127 | /// * `Err(lapin::Error)` - If an error occurs during message receiving. 128 | pub async fn receive_message_with_tag(&self, consumer_tag: &str) -> lapin::Result { 129 | let mut consumer = self 130 | .channel 131 | .basic_consume( 132 | &self.queue_name, 133 | consumer_tag, 134 | BasicConsumeOptions::default(), 135 | FieldTable::default(), 136 | ) 137 | .await?; 138 | 139 | match timeout(Duration::from_secs(5), consumer.next()).await { 140 | Ok(Some(delivery_result)) => match delivery_result { 141 | Ok(delivery) => { 142 | let message = match std::str::from_utf8(&delivery.data) { 143 | Ok(s) => s.to_string(), 144 | Err(_) => return Err(lapin::Error::InvalidChannel(0)), 145 | }; 146 | delivery.ack(BasicAckOptions::default()).await?; 147 | Ok(message) 148 | } 149 | Err(e) => Err(e), 150 | }, 151 | Ok(None) => Err(lapin::Error::InvalidChannel(0)), 152 | Err(_) => Err(lapin::Error::InvalidChannel(0)), 153 | } 154 | } 155 | 156 | /// Purges the queue, removing all messages from it. 157 | /// 158 | /// # Returns 159 | /// * `Ok(())` - If the queue is successfully purged. 160 | /// * `Err(lapin::Error)` - If an error occurs during queue purging. 161 | pub async fn purge_queue(&self) -> lapin::Result<()> { 162 | self.channel 163 | .queue_purge(&self.queue_name, QueuePurgeOptions::default()) 164 | .await?; 165 | Ok(()) 166 | } 167 | } 168 | 169 | #[cfg(test)] 170 | mod tests { 171 | use super::*; 172 | use std::error::Error; 173 | use uuid::Uuid; 174 | 175 | /// Utility function to generate a unique queue name. 176 | fn generate_unique_queue_name(base: &str) -> String { 177 | format!("{}_{}", base, Uuid::new_v4()) 178 | } 179 | 180 | /// Tests sending and receiving a message. 181 | /// 182 | /// # Purpose 183 | /// The test verifies that a message can be sent to a queue and then received from it. 184 | /// 185 | /// # Steps 186 | /// 1. Create a new `AmqpConnection` instance. 187 | /// 2. Purge the queue to remove any existing messages. 188 | /// 3. Send a message to the queue. 189 | /// 4. Receive a message from the queue. 190 | /// 5. Assert that the received message is the same as the sent message. 191 | #[tokio::test] 192 | async fn test_message_send_receive() -> Result<(), Box> { 193 | let queue_name = generate_unique_queue_name("test_send_receive"); 194 | let amqp = AmqpConnection::new("amqp://127.0.0.1:5672/%2f", &queue_name).await?; 195 | amqp.purge_queue().await?; 196 | 197 | amqp.send_message("Hello, Test!").await?; 198 | 199 | let receive_amqp = amqp.clone(); 200 | let received = receive_amqp.receive_message().await?; 201 | assert_eq!(received, "Hello, Test!"); 202 | 203 | Ok(()) 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /src/auth/authentication.rs: -------------------------------------------------------------------------------- 1 | //! Module for user authentication. 2 | //! 3 | //! This module provides functionality for authenticating users based on username and password. 4 | //! 5 | //! The [`Authenticator`] trait defines the interface for authenticating users. 6 | //! 7 | //! The [`BasicAuthenticator`] struct is a simple implementation 8 | //! of the [`Authenticator`] trait that uses a HashMap for storing credentials. 9 | //! The [`BasicAuthenticator`] struct also provides a method for adding new users to the 10 | //! authenticator. 11 | //! 12 | //! # Examples 13 | //! ```rust 14 | //! use pilgrimage::auth::authentication::{Authenticator, BasicAuthenticator}; 15 | //! 16 | //! // Create a new BasicAuthenticator instance 17 | //! let mut authenticator = BasicAuthenticator::new(); 18 | //! 19 | //! // Add some users 20 | //! authenticator.add_user("user1", "password"); 21 | //! authenticator.add_user("user2", "password"); 22 | //! 23 | //! // Authenticate users 24 | //! assert!(authenticator.authenticate("user1", "password").unwrap()); 25 | //! assert!(!authenticator.authenticate("user1", "wrong_password").unwrap()); 26 | //! assert!(!authenticator.authenticate("user3", "password").unwrap()); 27 | //! ``` 28 | 29 | use std::collections::HashMap; 30 | use std::error::Error; 31 | 32 | /// A trait for authenticating users based on username and password. 33 | pub trait Authenticator { 34 | /// Authenticates a user with the given username and password. 35 | /// 36 | /// # Arguments 37 | /// * `username`: A string slice representing the username. 38 | /// * `password`: A string slice representing the password. 39 | /// 40 | /// # Returns 41 | /// * `Result>`: A result indicating whether the authentication 42 | /// was successful (`true` for success, `false` for failure), or an error if an error occurs. 43 | fn authenticate(&self, username: &str, password: &str) -> Result>; 44 | } 45 | 46 | /// A struct representing a basic authenticator that uses a HashMap for storing credentials. 47 | pub struct BasicAuthenticator { 48 | /// A HashMap containing the username-password pairs. 49 | credentials: std::collections::HashMap, 50 | } 51 | 52 | impl BasicAuthenticator { 53 | /// Creates a new instance of `BasicAuthenticator`. It simply initializes the credential store. 54 | /// 55 | /// # Returns 56 | /// * A new `BasicAuthenticator` instance with an empty credentials store. 57 | pub fn new() -> Self { 58 | Self { 59 | credentials: HashMap::new(), 60 | } 61 | } 62 | 63 | /// Adds a new user with the given username and password to the authenticator. 64 | /// 65 | /// # Arguments 66 | /// * `username`: A string slice representing the username. 67 | /// * `password`: A string slice representing the password. 68 | pub fn add_user(&mut self, username: &str, password: &str) { 69 | self.credentials 70 | .insert(username.to_string(), password.to_string()); 71 | } 72 | } 73 | 74 | impl Default for BasicAuthenticator { 75 | fn default() -> Self { 76 | Self::new() 77 | } 78 | } 79 | 80 | impl Authenticator for BasicAuthenticator { 81 | /// Authenticates a user with the given username and password. 82 | /// 83 | /// This method checks if the given username exists in the credentials store 84 | /// and if the password matches. 85 | /// # Arguments 86 | /// * `username`: A string slice representing the username. 87 | /// * `password`: A string slice representing the password. 88 | /// 89 | /// # Returns 90 | /// * `Result>`: A result indicating whether the authentication 91 | /// was successful (`true` for success, `false` for failure), or an error if an error occurs. 92 | fn authenticate(&self, username: &str, password: &str) -> Result> { 93 | Ok(self 94 | .credentials 95 | .get(username) 96 | .is_some_and(|stored_password| stored_password == password)) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/auth/authorization.rs: -------------------------------------------------------------------------------- 1 | //! Module for managing role-based access control. 2 | //! 3 | //! This module provides functionality for managing roles and permissions. 4 | //! 5 | //! The [`RoleBasedAccessControl`] struct is used to manage roles and permissions. 6 | //! 7 | //! The [`Permission`] enum represents the permissions that can be assigned to a role. 8 | //! 9 | //! # Examples 10 | //! Below we will demonstrate how to use the [`RoleBasedAccessControl`] 11 | //! structure to manage roles and permissions. 12 | //! ``` 13 | //! use crate::pilgrimage::auth::authorization::{RoleBasedAccessControl, Permission}; 14 | //! 15 | //! // Create a new role-based access control instance 16 | //! let mut rbac = RoleBasedAccessControl::new(); 17 | //! 18 | //! // Create a role and a user 19 | //! let role = "guest"; 20 | //! let user = "user1"; 21 | //! 22 | //! // Add your custom role with the required permissions 23 | //! rbac.add_role(role, vec![ 24 | //! Permission::Read 25 | //! ]); 26 | //! 27 | //! // Assign the role to a user 28 | //! rbac.assign_role(user, role); 29 | //! 30 | //! // Check if the user has the required permission 31 | //! assert!(rbac.has_permission(user, &Permission::Read)); 32 | //! 33 | //! // Finally, remove the role from the user 34 | //! rbac.remove_role(user, role); 35 | //! 36 | //! // Check if the user still has the required permission 37 | //! assert!(!rbac.has_permission(user, &Permission::Read)); 38 | //! ``` 39 | 40 | use std::collections::HashMap; 41 | 42 | /// Enum representing the permissions that can be assigned to a role. 43 | /// 44 | /// The enum can be cloned and compared for equality. 45 | #[derive(Debug, Clone, PartialEq)] 46 | pub enum Permission { 47 | Read, 48 | Write, 49 | Admin, 50 | } 51 | 52 | /// A struct for managing role-based access control. 53 | /// 54 | /// The struct can be used to add roles with permissions, 55 | /// assign roles to users, and check if a user has a specific permission. 56 | pub struct RoleBasedAccessControl { 57 | /// A HashMap containing the roles and their associated [`Permission`]. 58 | roles: HashMap>, 59 | /// A HashMap containing the users and their assigned roles. 60 | user_roles: HashMap>, 61 | } 62 | 63 | /// The implementation of the `RoleBasedAccessControl` struct offers methods for adding roles, 64 | /// assigning roles to users, and checking permissions. 65 | /// 66 | /// The struct can be created with the [`RoleBasedAccessControl::new`] method, 67 | /// and roles can be added with the [`RoleBasedAccessControl::add_role`] method. 68 | /// 69 | /// The roles that are added can be assigned to users with the 70 | /// [`RoleBasedAccessControl::assign_role`] method. 71 | /// 72 | /// Also, the permissions of a user can be checked with the 73 | /// [`RoleBasedAccessControl::has_permission`] method. 74 | /// 75 | /// Finally, roles can be removed from users with the 76 | /// [`RoleBasedAccessControl::remove_role`] method. 77 | impl RoleBasedAccessControl { 78 | /// Creates a new instance of `RoleBasedAccessControl`. 79 | /// 80 | /// # Returns 81 | /// * A new `RoleBasedAccessControl` instance with empty roles and user roles stores. 82 | pub fn new() -> Self { 83 | Self { 84 | roles: HashMap::new(), 85 | user_roles: HashMap::new(), 86 | } 87 | } 88 | 89 | /// Adds a new role with the specified permissions. 90 | /// 91 | /// # Arguments 92 | /// * `role`: A string slice representing the role name. 93 | /// Each role should have a unique name, 94 | /// if a role with the same name already exists, it will be overwritten. 95 | /// * `permissions`: A vector of [`Permission`] associated with the role. 96 | /// 97 | /// # Warning 98 | /// If a role with the same name already exists, it will be overwritten. 99 | pub fn add_role(&mut self, role: &str, permissions: Vec) { 100 | self.roles.insert(role.to_string(), permissions); 101 | } 102 | 103 | /// Assigns a role to a user. 104 | /// 105 | /// If the user already has roles assigned, the new role will be added to the existing roles. 106 | /// 107 | /// # Arguments 108 | /// * `username`: A string slice representing the username. 109 | /// * `role`: A string slice representing the role name. 110 | pub fn assign_role(&mut self, username: &str, role: &str) { 111 | self.user_roles 112 | .entry(username.to_string()) 113 | .or_default() 114 | .push(role.to_string()); 115 | } 116 | 117 | /// Removes a role from a user. 118 | /// 119 | /// If the user has multiple roles assigned, only the specified role will be removed. 120 | /// 121 | /// If the user does not have the specified role, nothing will happen. 122 | /// 123 | /// # Arguments 124 | /// * `username`: A string slice representing the username. 125 | /// * `role`: A string slice representing the role name. 126 | pub fn remove_role(&mut self, username: &str, role: &str) { 127 | if let Some(roles) = self.user_roles.get_mut(username) { 128 | roles.retain(|r| r != role); 129 | } 130 | } 131 | 132 | /// Checks if a user has the required permission. 133 | /// 134 | /// # Arguments 135 | /// * `username`: A string slice representing the username. 136 | /// * `required_permission`: A reference to the [`Permission`] that the user should have. 137 | /// 138 | /// # Returns 139 | /// * `bool`: A boolean indicating whether the user has the required permission. 140 | /// Returns `true` if the user has the required permission, `false` in all other cases. 141 | pub fn has_permission(&self, username: &str, required_permission: &Permission) -> bool { 142 | self.user_roles.get(username).is_some_and(|roles| { 143 | roles.iter().any(|role| { 144 | self.roles 145 | .get(role) 146 | .is_some_and(|permissions| permissions.contains(required_permission)) 147 | }) 148 | }) 149 | } 150 | } 151 | 152 | impl Default for RoleBasedAccessControl { 153 | fn default() -> Self { 154 | Self::new() 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /src/auth/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module for authentication, authorization, and token management. 2 | //! 3 | //! This module provides functionality for user authentication, role-based access control, 4 | //! and token management. 5 | //! * The [`authentication`] module provides functionality for authenticating users 6 | //! based on username and password. 7 | //! * The [`authorization`] module provides functionality for managing roles and permissions. 8 | //! * The [`token`] module provides functionality for managing JWT tokens. 9 | //! 10 | //! # Examples 11 | //! Below we will demonstrate how to use the authentication, authorization, and token modules. 12 | //! ``` 13 | //! use pilgrimage::auth::authentication::{Authenticator, BasicAuthenticator}; 14 | //! use pilgrimage::auth::authorization::{RoleBasedAccessControl, Permission}; 15 | //! use pilgrimage::auth::token::TokenManager; 16 | //! 17 | //! /************************* 18 | //! * Authentication example 19 | //! *************************/ 20 | //! // Create a new BasicAuthenticator instance 21 | //! let mut authenticator = BasicAuthenticator::new(); 22 | //! // Add some users 23 | //! authenticator.add_user("user1", "password"); 24 | //! authenticator.add_user("user2", "password"); 25 | //! // Authenticate users 26 | //! assert!(authenticator.authenticate("user1", "password").unwrap()); 27 | //! assert!(!authenticator.authenticate("user1", "wrong_password").unwrap()); 28 | //! assert!(!authenticator.authenticate("user3", "password").unwrap()); 29 | //! 30 | //! /************************* 31 | //! * Authorization example 32 | //! *************************/ 33 | //! // Create a new role-based access control instance 34 | //! let mut rbac = RoleBasedAccessControl::new(); 35 | //! // Create a role and a user 36 | //! let role = "guest"; 37 | //! let user = "user1"; 38 | //! // Add your custom role with the required permissions 39 | //! rbac.add_role(role, vec![ Permission::Read ]); 40 | //! // Assign the role to a user 41 | //! rbac.assign_role(user, role); 42 | //! // Check if the user has the required permission 43 | //! assert!(rbac.has_permission(user, &Permission::Read)); 44 | //! // Finally, remove the role from the user 45 | //! rbac.remove_role(user, role); 46 | //! // Check if the user still has the required permission 47 | //! assert!(!rbac.has_permission(user, &Permission::Read)); 48 | //! 49 | //! /************************** 50 | //! * Token management example 51 | //! **************************/ 52 | //! // Generate a new token manager with the secret key 53 | //! let token_manager = TokenManager::new(b"MySuperSecret"); 54 | //! // Generate a new token for the user "admin" with the role "admin" 55 | //! let token = token_manager.generate_token("admin", vec!["admin".to_string()]).unwrap(); 56 | //! // Verify the token and get the claims 57 | //! let claims = token_manager.verify_token(&token).unwrap(); 58 | //! // Check the claims of the token (username, roles) 59 | //! assert_eq!(claims.sub, "admin"); 60 | //! assert_eq!(claims.roles, vec!["admin".to_string()]); 61 | //! ``` 62 | 63 | pub mod authentication; 64 | pub mod authorization; 65 | pub mod token; 66 | 67 | #[cfg(test)] 68 | mod tests; 69 | -------------------------------------------------------------------------------- /src/auth/token.rs: -------------------------------------------------------------------------------- 1 | //! Module for generating and verifying JSON Web Tokens (JWTs). 2 | //! 3 | //! It includes a [`TokenManager`] struct for managing JWT encoding and decoding, 4 | //! and a [`Claims`] struct for representing the claims in a JWT. 5 | //! 6 | //! # Examples 7 | //! Below we will demonstrate how to use the [`TokenManager`] to generate and verify JWT tokens. 8 | //! ``` 9 | //! use crate::pilgrimage::auth::token::TokenManager; 10 | //! 11 | //! // Generate a new token manager with the secret key 12 | //! let token_manager = TokenManager::new(b"MySuperSecret"); 13 | //! 14 | //! // Generate a new token for the user "admin" with the role "admin" 15 | //! let token = token_manager.generate_token("admin", vec!["admin".to_string()]).unwrap(); 16 | //! 17 | //! // Verify the token and get the claims 18 | //! let claims = token_manager.verify_token(&token).unwrap(); 19 | //! 20 | //! // Check the claims of the token (username, roles) 21 | //! assert_eq!(claims.sub, "admin"); 22 | //! assert_eq!(claims.roles, vec!["admin".to_string()]); 23 | //! ``` 24 | 25 | use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header, Validation, decode, encode}; 26 | use serde::{Deserialize, Serialize}; 27 | use std::time::{SystemTime, UNIX_EPOCH}; 28 | 29 | /// A struct representing the [claims of a JWT token][claims]. 30 | /// 31 | /// The struct can be serialized and deserialized. 32 | /// 33 | /// [claims]: https://auth0.com/docs/secure/tokens/json-web-tokens/json-web-token-claims 34 | #[derive(Debug, Serialize, Deserialize)] 35 | pub struct Claims { 36 | /// The subject of the token, which is the username. 37 | pub sub: String, 38 | /// The expiration time of the token. 39 | pub exp: usize, 40 | /// The roles associated with the token. 41 | pub roles: Vec, 42 | } 43 | 44 | /// A struct responsible for managing the encoding and decoding of JWT tokens. 45 | pub struct TokenManager { 46 | /// The key used to encode (sign) the JWT token. 47 | encoding_key: EncodingKey, 48 | /// The key used to decode (verify) the JWT token. 49 | decoding_key: DecodingKey, 50 | } 51 | 52 | impl TokenManager { 53 | /// Creates a new `TokenManager` with the provided secret key. 54 | /// 55 | /// # Arguments 56 | /// * `secret`: A slice of bytes representing the secret key used for encoding and decoding. 57 | /// The key will be used to sign and verify the JWT token. 58 | /// 59 | /// # Returns 60 | /// * A new `TokenManager` instance. 61 | /// 62 | /// # Example 63 | /// ``` 64 | /// use crate::pilgrimage::auth::token::TokenManager; 65 | /// 66 | /// // Generate a new token manager with the secret key 67 | /// let token_manager = TokenManager::new(b"MySuperSecret"); 68 | /// ``` 69 | pub fn new(secret: &[u8]) -> Self { 70 | Self { 71 | encoding_key: EncodingKey::from_secret(secret), 72 | decoding_key: DecodingKey::from_secret(secret), 73 | } 74 | } 75 | 76 | /// Generates a JWT for the given username and roles. 77 | /// 78 | /// The token will expire in 1 hour. 79 | /// 80 | /// The header of the JWT will contain the default algorithm `HS256`, 81 | /// provided by [Header::default()]. 82 | /// 83 | /// # Arguments 84 | /// * `username`: A string slice representing the username. 85 | /// * `roles`: A vector of strings representing the roles. 86 | /// 87 | /// # Returns 88 | /// * `Result`: A result containing 89 | /// the encoded JWT as a string, or an error if encoding fails. 90 | /// 91 | /// # Example 92 | /// ``` 93 | /// use crate::pilgrimage::auth::token::TokenManager; 94 | /// 95 | /// // Generate a new token manager with the secret key 96 | /// let token_manager = TokenManager::new(b"MySuperSecret"); 97 | /// 98 | /// // Generate a new token for the user "admin" with the role "admin" 99 | /// let token = token_manager.generate_token("admin", vec!["admin".to_string()]); 100 | /// ``` 101 | pub fn generate_token( 102 | &self, 103 | username: &str, 104 | roles: Vec, 105 | ) -> Result { 106 | let expiration = SystemTime::now() 107 | .duration_since(UNIX_EPOCH) 108 | .unwrap() 109 | .as_secs() as usize 110 | + 3600; 111 | 112 | let claims = Claims { 113 | sub: username.to_string(), 114 | exp: expiration, 115 | roles, 116 | }; 117 | 118 | encode(&Header::default(), &claims, &self.encoding_key) 119 | } 120 | 121 | /// Verifies a JWT token and returns the claims. 122 | /// 123 | /// # Arguments 124 | /// * `token`: A string slice representing the JWT token. 125 | /// 126 | /// # Returns 127 | /// * `Result`: A result containing the decoded claims, 128 | /// or an error if decoding fails. 129 | /// 130 | /// # Example 131 | /// ``` 132 | /// use crate::pilgrimage::auth::token::TokenManager; 133 | /// 134 | /// // Generate a new token manager with the secret key 135 | /// let token_manager = TokenManager::new(b"MySuperSecret"); 136 | /// 137 | /// // Generate a new token for the user "admin" with the role "admin" 138 | /// let token = token_manager.generate_token("admin", vec!["admin".to_string()]).unwrap(); 139 | /// 140 | /// // Verify the token and get the claims 141 | /// let claims = token_manager.verify_token(&token).unwrap(); 142 | /// ``` 143 | pub fn verify_token(&self, token: &str) -> Result { 144 | let validation = Validation::new(Algorithm::HS256); 145 | decode::(token, &self.decoding_key, &validation).map(|token_data| token_data.claims) 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/bin/commands/consume.rs: -------------------------------------------------------------------------------- 1 | use crate::error::{BrokerErrorKind, CliError, CliResult}; 2 | use clap::ArgMatches; 3 | use pilgrimage::Broker; 4 | use std::sync::Arc; 5 | use tokio::sync::Mutex; 6 | 7 | pub async fn handle_consume_command(matches: &ArgMatches) -> CliResult<()> { 8 | let broker_id = matches.value_of("id").ok_or_else(|| CliError::ParseError { 9 | field: "id".to_string(), 10 | message: "Broker ID is not specified".to_string(), 11 | })?; 12 | 13 | let _group_id = matches.value_of("group").unwrap_or("default"); 14 | let partition = matches 15 | .value_of("partition") 16 | .map(|p| p.parse::()) 17 | .transpose() 18 | .map_err(|_| CliError::ParseError { 19 | field: "partition".to_string(), 20 | message: "Invalid partition number".to_string(), 21 | })?; 22 | let topic = matches.value_of("topic").unwrap_or("default"); 23 | let storage_path = format!("/tmp/{}", broker_id); 24 | 25 | println!("Receiving messages from broker {}...", broker_id); 26 | 27 | // Initialize broker instance 28 | let broker = Broker::new( 29 | broker_id, 30 | 1, // Default number of partitions 31 | 1, // default replication factor 32 | &storage_path, 33 | ); 34 | 35 | let broker = Arc::new(Mutex::new(broker)); 36 | 37 | // Receiving and processing messages 38 | let broker = broker.lock().await; 39 | match broker.receive_message(topic, partition.unwrap_or(0)) { 40 | Ok(Some(message)) => { 41 | println!("Received message: {}", message.content); 42 | Ok(()) 43 | } 44 | Ok(None) => { 45 | println!("No messages available to receive"); 46 | Ok(()) 47 | } 48 | Err(e) => { 49 | if e.to_string().contains("timeout") { 50 | println!("Timeout: Could not receive message"); 51 | Ok(()) 52 | } else { 53 | Err(CliError::BrokerError { 54 | kind: BrokerErrorKind::OperationFailed, 55 | message: format!("Failed to receive message: {}", e), 56 | }) 57 | } 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/bin/commands/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod consume; 2 | pub mod schema; 3 | pub mod send; 4 | pub mod start; 5 | pub mod status; 6 | pub mod stop; 7 | 8 | pub use consume::handle_consume_command; 9 | pub use schema::{handle_schema_list_command, handle_schema_register_command}; 10 | pub use send::handle_send_command; 11 | pub use start::handle_start_command; 12 | pub use status::handle_status_command; 13 | pub use stop::handle_stop_command; 14 | -------------------------------------------------------------------------------- /src/bin/commands/schema.rs: -------------------------------------------------------------------------------- 1 | use crate::error::{CliError, CliResult, SchemaErrorKind}; 2 | use clap::ArgMatches; 3 | use pilgrimage::schema::compatibility::Compatibility; 4 | use pilgrimage::schema::registry::{Schema, SchemaRegistry}; 5 | use pilgrimage::schema::version::SchemaVersion; 6 | use std::fs; 7 | 8 | fn parse_compatibility(value: Option<&str>) -> Compatibility { 9 | value.map_or(Compatibility::Backward, |c| match c { 10 | "BACKWARD" => Compatibility::Backward, 11 | "FORWARD" => Compatibility::Forward, 12 | "FULL" => Compatibility::Full, 13 | "NONE" => Compatibility::None, 14 | _ => Compatibility::Backward, 15 | }) 16 | } 17 | 18 | fn get_compatibility_description(comp: &Compatibility) -> &'static str { 19 | match comp { 20 | Compatibility::Backward => "BACKWARD", 21 | Compatibility::Forward => "FORWARD", 22 | Compatibility::Full => "FULL", 23 | Compatibility::None => "NONE", 24 | } 25 | } 26 | 27 | fn validate_schema_compatibility( 28 | registry: &SchemaRegistry, 29 | topic: &str, 30 | schema_content: &str, 31 | compatibility: &Compatibility, 32 | ) -> CliResult<()> { 33 | if let Some(schemas) = registry.get_all_schemas(topic) { 34 | if let Some(latest_schema) = schemas.last() { 35 | let new_schema = Schema { 36 | id: latest_schema.id + 1, 37 | version: SchemaVersion::new(latest_schema.version.major + 1), 38 | definition: schema_content.to_string(), 39 | }; 40 | 41 | if !compatibility.check(&new_schema, latest_schema) { 42 | return Err(CliError::SchemaError { 43 | kind: SchemaErrorKind::IncompatibleChange, 44 | message: format!( 45 | "Schema compatibility validation error:\nTopic.: {}\nRequired Compatibility: {}\nIncompatible with current schema", 46 | topic, 47 | get_compatibility_description(compatibility) 48 | ), 49 | }); 50 | } 51 | } 52 | } 53 | Ok(()) 54 | } 55 | 56 | pub async fn handle_schema_register_command(matches: &ArgMatches) -> CliResult<()> { 57 | // Argument Validation 58 | let topic = matches 59 | .value_of("topic") 60 | .ok_or_else(|| CliError::ParseError { 61 | field: "topic".to_string(), 62 | message: "Topic not specified".to_string(), 63 | })?; 64 | 65 | let schema_file = matches 66 | .value_of("schema") 67 | .ok_or_else(|| CliError::ParseError { 68 | field: "schema".to_string(), 69 | message: "Schema file not specified".to_string(), 70 | })?; 71 | 72 | let schema_content = fs::read_to_string(schema_file).map_err(|e| CliError::IoError(e))?; 73 | 74 | let compatibility = parse_compatibility(matches.value_of("compatibility")); 75 | 76 | let mut registry = SchemaRegistry::new(); 77 | registry.set_compatibility(compatibility); 78 | 79 | // Schema compatibility check 80 | validate_schema_compatibility(®istry, topic, &schema_content, &compatibility)?; 81 | 82 | // Register schema 83 | match registry.register_schema(topic, &schema_content) { 84 | Ok(schema) => { 85 | println!( 86 | "Schema registered:\nTopic: {}\nSchema ID: {}\nVersion: {}", 87 | topic, schema.id, schema.version 88 | ); 89 | Ok(()) 90 | } 91 | Err(e) => Err(CliError::SchemaError { 92 | kind: SchemaErrorKind::RegistryError, 93 | message: format!("Failed to register schema: {}", e), 94 | }), 95 | } 96 | } 97 | 98 | pub async fn handle_schema_list_command(matches: &ArgMatches) -> CliResult<()> { 99 | let topic = matches 100 | .value_of("topic") 101 | .ok_or_else(|| CliError::ParseError { 102 | field: "topic".to_string(), 103 | message: "Topic is not specified".to_string(), 104 | })?; 105 | 106 | let registry = SchemaRegistry::new(); 107 | match registry.get_all_schemas(topic) { 108 | Some(schemas) => { 109 | if schemas.is_empty() { 110 | println!("No schemas registered for topic {}", topic); 111 | } else { 112 | println!("Schema list for topic {}:", topic); 113 | for schema in schemas { 114 | println!( 115 | "ID: {}, Version: {}\nDefinition:\n{}", 116 | schema.id, schema.version, schema.definition 117 | ); 118 | } 119 | } 120 | Ok(()) 121 | } 122 | None => Err(CliError::SchemaError { 123 | kind: SchemaErrorKind::NotFound, 124 | message: format!("Topic {} schema not found", topic), 125 | }), 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/bin/commands/send.rs: -------------------------------------------------------------------------------- 1 | use crate::error::{BrokerErrorKind, CliError, CliResult, SchemaErrorKind}; 2 | use clap::ArgMatches; 3 | use pilgrimage::broker::{Broker, MessageSchema}; 4 | use pilgrimage::schema::registry::SchemaRegistry; 5 | 6 | fn validate_args(matches: &ArgMatches) -> CliResult<(String, String)> { 7 | let topic = matches 8 | .value_of("topic") 9 | .ok_or_else(|| CliError::ParseError { 10 | field: "topic".to_string(), 11 | message: "Topic not specified. Please use the --topic option.".to_string(), 12 | })?; 13 | 14 | if topic.trim().is_empty() { 15 | return Err(CliError::ParseError { 16 | field: "topic".to_string(), 17 | message: "Topic name cannot be empty.".to_string(), 18 | }); 19 | } 20 | 21 | let message = matches 22 | .value_of("message") 23 | .ok_or_else(|| CliError::ParseError { 24 | field: "message".to_string(), 25 | message: "No message specified. Please specify the --message option.".to_string(), 26 | })?; 27 | 28 | if message.trim().is_empty() { 29 | return Err(CliError::ParseError { 30 | field: "message".to_string(), 31 | message: "The message content cannot be empty.".to_string(), 32 | }); 33 | } 34 | 35 | Ok((topic.to_string(), message.to_string())) 36 | } 37 | 38 | fn handle_schema( 39 | registry: &SchemaRegistry, 40 | topic: &str, 41 | schema_file: Option<&str>, 42 | ) -> CliResult { 43 | if let Some(schema_path) = schema_file { 44 | let schema_content = 45 | std::fs::read_to_string(schema_path).map_err(|e| CliError::IoError(e))?; 46 | 47 | let schema = registry 48 | .register_schema(topic, &schema_content) 49 | .map_err(|e| CliError::SchemaError { 50 | kind: SchemaErrorKind::RegistryError, 51 | message: format!("Schema registration error: {}", e), 52 | })?; 53 | 54 | Ok(MessageSchema::new_with_schema(schema)) 55 | } else { 56 | Ok(MessageSchema::new()) 57 | } 58 | } 59 | 60 | pub async fn handle_send_command(matches: &ArgMatches) -> CliResult<()> { 61 | // Validate arguments 62 | let (topic, message) = validate_args(matches)?; 63 | let schema_file = matches.value_of("schema"); 64 | 65 | // Process schema 66 | let registry = SchemaRegistry::new(); 67 | let topic_schema = handle_schema(®istry, &topic, schema_file)?; 68 | 69 | // Validate message 70 | if let Err(e) = topic_schema.validate(&message) { 71 | return Err(CliError::SchemaError { 72 | kind: SchemaErrorKind::ValidationFailed, 73 | message: format!( 74 | "The message does not comply with the schema definition.\nTopic: {}\nError: {}", 75 | topic, e 76 | ), 77 | }); 78 | } 79 | 80 | // Execute broker operations 81 | send_message_to_broker(message, topic, schema_file.is_some())?; 82 | 83 | Ok(()) 84 | } 85 | 86 | fn send_message_to_broker( 87 | message: String, 88 | topic: String, 89 | schema_registered: bool, 90 | ) -> CliResult<()> { 91 | // Create message schema 92 | let schema = MessageSchema::new() 93 | .with_content(message.clone()) 94 | .with_topic(topic.clone()) 95 | .with_partition(0); 96 | 97 | // Broker initialization and connection 98 | let mut broker = Broker::new( 99 | "cli-broker", 100 | 1, // Number of partitions 101 | 1, // Replication factor 102 | "storage", 103 | ); 104 | 105 | // Send message and process results 106 | broker.send_message(schema).map_err(|e| { 107 | let error_str = e.to_string().to_lowercase(); 108 | match error_str { 109 | e if e.contains("connection") => CliError::BrokerError { 110 | kind: BrokerErrorKind::ConnectionFailed, 111 | message: format!( 112 | "Failed to connect to broker.\nPlease check:\n- The broker is running\n- The connection port is correct\nError details: {}", 113 | e 114 | ), 115 | }, 116 | e if e.contains("timeout") => CliError::BrokerError { 117 | kind: BrokerErrorKind::Timeout, 118 | message: format!( 119 | "Message sending timed out.\nTopic: {}\nMessage length: {} bytes\nError details: {}", 120 | topic, 121 | message.len(), 122 | e 123 | ), 124 | }, 125 | e if e.contains("topic") => CliError::BrokerError { 126 | kind: BrokerErrorKind::TopicNotFound, 127 | message: format!( 128 | "The specified topic '{}' was not found.\n- Check if the topic name is correct\n- The topic will be created automatically if it doesn't exist\nError details: {}", 129 | topic, 130 | e 131 | ), 132 | }, 133 | e if e.contains("partition") => CliError::BrokerError { 134 | kind: BrokerErrorKind::PartitionError, 135 | message: format!( 136 | "A partition-related error occurred.\nTopic: {}\nError details: {}", 137 | topic, 138 | e 139 | ), 140 | }, 141 | e if e.contains("operation") => CliError::BrokerError { 142 | kind: BrokerErrorKind::OperationFailed, 143 | message: format!( 144 | "Message sending operation failed.\nTopic: {}\nMessage length: {} bytes\nError details: {}", 145 | topic, 146 | message.len(), 147 | e 148 | ), 149 | }, 150 | _ => CliError::BrokerError { 151 | kind: BrokerErrorKind::Unknown, 152 | message: format!( 153 | "An unexpected error occurred while sending the message.\nTopic: {}\nError details: {}", 154 | topic, 155 | e 156 | ), 157 | }, 158 | } 159 | })?; 160 | 161 | println!("Message sent successfully."); 162 | if schema_registered { 163 | println!("Schema validation is enabled."); 164 | } 165 | 166 | Ok(()) 167 | } 168 | -------------------------------------------------------------------------------- /src/bin/commands/start.rs: -------------------------------------------------------------------------------- 1 | use crate::error::{BrokerErrorKind, CliError, CliResult}; 2 | use clap::ArgMatches; 3 | use pilgrimage::broker::Broker; 4 | use std::sync::Arc; 5 | use tokio::sync::Mutex; 6 | 7 | fn validate_args(matches: &ArgMatches) -> CliResult<(String, usize, usize, String)> { 8 | let id = matches.value_of("id").ok_or_else(|| CliError::ParseError { 9 | field: "broker_id".to_string(), 10 | message: "Broker ID is not specified. Please use the --id option.".to_string(), 11 | })?; 12 | 13 | if id.trim().is_empty() { 14 | return Err(CliError::ParseError { 15 | field: "broker_id".to_string(), 16 | message: "Broker ID cannot be empty.".to_string(), 17 | }); 18 | } 19 | 20 | let partitions: usize = matches 21 | .value_of("partitions") 22 | .ok_or_else(|| CliError::ParseError { 23 | field: "partitions".to_string(), 24 | message: "Number of partitions not specified. Please use the --partitions option." 25 | .to_string(), 26 | })? 27 | .parse() 28 | .map_err(|e| CliError::ParseError { 29 | field: "partitions".to_string(), 30 | message: format!( 31 | "Invalid partition number: {} (Please specify a positive integer)", 32 | e 33 | ), 34 | })?; 35 | 36 | if partitions == 0 { 37 | return Err(CliError::ParseError { 38 | field: "partitions".to_string(), 39 | message: "Number of partitions must be at least 1.".to_string(), 40 | }); 41 | } 42 | 43 | let replication: usize = matches 44 | .value_of("replication") 45 | .ok_or_else(|| CliError::ParseError { 46 | field: "replication".to_string(), 47 | message: "Replication factor not specified. Please use the --replication option." 48 | .to_string(), 49 | })? 50 | .parse() 51 | .map_err(|e| CliError::ParseError { 52 | field: "replication".to_string(), 53 | message: format!( 54 | "Invalid replication factor: {} (Please specify a positive integer)", 55 | e 56 | ), 57 | })?; 58 | 59 | if replication == 0 { 60 | return Err(CliError::ParseError { 61 | field: "replication".to_string(), 62 | message: "Replication factor must be at least 1.".to_string(), 63 | }); 64 | } 65 | 66 | let storage = matches 67 | .value_of("storage") 68 | .ok_or_else(|| CliError::ParseError { 69 | field: "storage".to_string(), 70 | message: "Storage path not specified. Please use the --storage option.".to_string(), 71 | })?; 72 | 73 | if storage.trim().is_empty() { 74 | return Err(CliError::ParseError { 75 | field: "storage".to_string(), 76 | message: "Storage path cannot be empty.".to_string(), 77 | }); 78 | } 79 | 80 | // Check if storage path exists and create it 81 | if !std::path::Path::new(storage).exists() { 82 | if let Err(e) = std::fs::create_dir_all(storage) { 83 | return Err(CliError::IoError(e)); 84 | } 85 | println!("Created storage directory: {}", storage); 86 | } 87 | 88 | Ok((id.to_string(), partitions, replication, storage.to_string())) 89 | } 90 | 91 | pub async fn handle_start_command(matches: &ArgMatches) -> CliResult<()> { 92 | let (id, partitions, replication, storage) = validate_args(matches)?; 93 | 94 | println!( 95 | "Starting broker {}... Number of partitions: {}, Replication factor: {}, Storage path: {}", 96 | id, partitions, replication, storage 97 | ); 98 | 99 | // Initialize the broker 100 | let broker = Broker::new(&id, partitions, replication, &storage); 101 | let _broker = Arc::new(Mutex::new(broker)); 102 | 103 | // Create PID file for the broker process 104 | let pid = std::process::id(); // Get current process ID 105 | let pid_file = format!("/tmp/pilgrimage_broker_{}.pid", id); 106 | let pid_path = std::path::Path::new(&pid_file); 107 | 108 | if let Err(e) = std::fs::write(pid_path, pid.to_string()) { 109 | return Err(CliError::BrokerError { 110 | kind: BrokerErrorKind::OperationFailed, 111 | message: format!("Failed to create PID file: {}", e), 112 | }); 113 | } 114 | 115 | println!("Created PID file: {} (PID: {})", pid_file, pid); 116 | 117 | // Wait for initialization to complete 118 | tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; 119 | 120 | // Check if storage file exists 121 | let storage_path = std::path::Path::new(&storage); 122 | if !storage_path.exists() { 123 | // If initialization failed, clean up PID file 124 | if let Err(e) = std::fs::remove_file(pid_path) { 125 | eprintln!("Warning: Failed to remove PID file: {}", e); 126 | } 127 | 128 | return Err(CliError::BrokerError { 129 | kind: BrokerErrorKind::OperationFailed, 130 | message: format!( 131 | "Failed to initialize broker {}.\nStorage directory {} was not created.", 132 | id, storage 133 | ), 134 | }); 135 | } 136 | 137 | println!("Broker {} started successfully", id); 138 | Ok(()) 139 | } 140 | -------------------------------------------------------------------------------- /src/bin/commands/status.rs: -------------------------------------------------------------------------------- 1 | use crate::error::{CliError, CliResult}; 2 | use clap::ArgMatches; 3 | 4 | pub async fn handle_status_command(matches: &ArgMatches) -> CliResult<()> { 5 | let id = matches.value_of("id").ok_or_else(|| CliError::ParseError { 6 | field: "broker_id".to_string(), 7 | message: "Broker ID is not specified".to_string(), 8 | })?; 9 | 10 | println!("Checking status of broker {}...", id); 11 | 12 | // TODO: Implement actual status check process 13 | // let status = match broker.get_status().await { 14 | // Ok(status) => status, 15 | // Err(e) => return Err(CliError::BrokerError { 16 | // kind: BrokerErrorKind::OperationFailed, 17 | // message: format!("Failed to check status: {}", e) 18 | // }) 19 | // }; 20 | // println!("Status: {:?}", status); 21 | 22 | Ok(()) 23 | } 24 | -------------------------------------------------------------------------------- /src/bin/commands/stop.rs: -------------------------------------------------------------------------------- 1 | use crate::error::{BrokerErrorKind, CliError, CliResult}; 2 | use clap::ArgMatches; 3 | 4 | use std::fs; 5 | use std::path::Path; 6 | use std::process::Command; 7 | 8 | pub async fn handle_stop_command(matches: &ArgMatches) -> CliResult<()> { 9 | let id = matches.value_of("id").ok_or_else(|| CliError::ParseError { 10 | field: "broker_id".to_string(), 11 | message: "Broker ID is not specified".to_string(), 12 | })?; 13 | 14 | println!("Stopping broker {}...", id); 15 | 16 | // First try to connect to the web console to stop the broker 17 | let stop_via_api = stop_broker_via_api(id).await; 18 | if stop_via_api.is_ok() { 19 | println!("Stopped broker {} via web console API", id); 20 | return Ok(()); 21 | } 22 | 23 | // If API call failed, try to find and stop the broker process 24 | let pid_file = format!("/tmp/pilgrimage_broker_{}.pid", id); 25 | let pid_path = Path::new(&pid_file); 26 | 27 | if pid_path.exists() { 28 | match fs::read_to_string(pid_path) { 29 | Ok(pid_str) => { 30 | let pid = pid_str 31 | .trim() 32 | .parse::() 33 | .map_err(|_| CliError::BrokerError { 34 | kind: BrokerErrorKind::OperationFailed, 35 | message: format!("Failed to parse PID from file: {}", pid_file), 36 | })?; 37 | 38 | // Try to terminate the process 39 | match stop_process(pid) { 40 | Ok(_) => { 41 | // Clean up the PID file 42 | if let Err(e) = fs::remove_file(pid_path) { 43 | eprintln!("Warning: Failed to remove PID file: {}", e); 44 | } 45 | 46 | println!("Stopped broker {} (PID: {})", id, pid); 47 | Ok(()) 48 | } 49 | Err(e) => Err(CliError::BrokerError { 50 | kind: BrokerErrorKind::OperationFailed, 51 | message: format!("Failed to stop broker process: {}", e), 52 | }), 53 | } 54 | } 55 | Err(e) => Err(CliError::BrokerError { 56 | kind: BrokerErrorKind::OperationFailed, 57 | message: format!("Failed to read PID file: {}", e), 58 | }), 59 | } 60 | } else { 61 | Err(CliError::BrokerError { 62 | kind: BrokerErrorKind::NotFound, 63 | message: format!("Broker {} is not running or PID file not found", id), 64 | }) 65 | } 66 | } 67 | 68 | /// Try to stop a broker by calling the web console API 69 | async fn stop_broker_via_api(id: &str) -> Result<(), String> { 70 | // Use reqwest to call the web console API 71 | let client = reqwest::Client::new(); 72 | let response = client 73 | .post("http://localhost:8080/stop") 74 | .json(&serde_json::json!({ "id": id })) 75 | .send() 76 | .await; 77 | 78 | match response { 79 | Ok(res) => { 80 | if res.status().is_success() { 81 | Ok(()) 82 | } else { 83 | Err(format!("API returned error: {:?}", res.status())) 84 | } 85 | } 86 | Err(e) => Err(format!("Failed to connect to web console: {}", e)), 87 | } 88 | } 89 | 90 | /// Stop a process by sending a signal 91 | fn stop_process(pid: i32) -> Result<(), String> { 92 | #[cfg(target_family = "unix")] 93 | { 94 | use std::os::unix::process::ExitStatusExt; 95 | 96 | // Send SIGTERM signal first 97 | let status = Command::new("kill") 98 | .arg(pid.to_string()) 99 | .status() 100 | .map_err(|e| format!("Failed to send SIGTERM: {}", e))?; 101 | 102 | if !status.success() { 103 | return Err(format!( 104 | "Failed to terminate process with exit code: {:?}", 105 | status.code().or_else(|| status.signal()).unwrap_or(-1) 106 | )); 107 | } 108 | 109 | // Wait a bit for the process to terminate 110 | std::thread::sleep(std::time::Duration::from_millis(500)); 111 | 112 | // Check if process still exists 113 | let ps_check = Command::new("ps") 114 | .arg("-p") 115 | .arg(pid.to_string()) 116 | .output() 117 | .map_err(|e| format!("Failed to check if process exists: {}", e))?; 118 | 119 | if !ps_check.status.success() { 120 | return Ok(()); // Process is no longer running 121 | } 122 | 123 | // If still running, send SIGKILL 124 | let kill_status = Command::new("kill") 125 | .arg("-9") 126 | .arg(pid.to_string()) 127 | .status() 128 | .map_err(|e| format!("Failed to send SIGKILL: {}", e))?; 129 | 130 | if kill_status.success() { 131 | Ok(()) 132 | } else { 133 | Err(format!( 134 | "Failed to kill process with exit code: {:?}", 135 | kill_status 136 | .code() 137 | .or_else(|| kill_status.signal()) 138 | .unwrap_or(-1) 139 | )) 140 | } 141 | } 142 | 143 | #[cfg(target_family = "windows")] 144 | { 145 | let status = Command::new("taskkill") 146 | .arg("/PID") 147 | .arg(pid.to_string()) 148 | .arg("/F") // Force kill 149 | .status() 150 | .map_err(|e| format!("Failed to terminate process: {}", e))?; 151 | 152 | if status.success() { 153 | Ok(()) 154 | } else { 155 | Err(format!( 156 | "Failed to kill process with exit code: {:?}", 157 | status.code().unwrap_or(-1) 158 | )) 159 | } 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /src/bin/error/mod.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::fmt; 3 | 4 | #[derive(Debug)] 5 | pub enum CliError { 6 | NoCommand, 7 | UnknownCommand(String), 8 | InvalidCommand(String), 9 | BrokerError { 10 | kind: BrokerErrorKind, 11 | message: String, 12 | }, 13 | ParseError { 14 | field: String, 15 | message: String, 16 | }, 17 | IoError(std::io::Error), 18 | SchemaError { 19 | kind: SchemaErrorKind, 20 | message: String, 21 | }, 22 | } 23 | 24 | #[derive(Debug)] 25 | pub enum BrokerErrorKind { 26 | ConnectionFailed, 27 | OperationFailed, 28 | NotFound, 29 | Timeout, 30 | TopicNotFound, 31 | PartitionError, 32 | Unknown, 33 | } 34 | 35 | #[derive(Debug)] 36 | pub enum SchemaErrorKind { 37 | ValidationFailed, 38 | IncompatibleChange, 39 | RegistryError, 40 | NotFound, 41 | } 42 | 43 | impl fmt::Display for CliError { 44 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 45 | match self { 46 | CliError::NoCommand => write!(f, "No command specified"), 47 | CliError::UnknownCommand(cmd) => write!(f, "Unknown command: {}", cmd), 48 | CliError::InvalidCommand(cmd) => write!(f, "Invalid {} command", cmd), 49 | CliError::BrokerError { kind, message } => { 50 | let kind_str = match kind { 51 | BrokerErrorKind::ConnectionFailed => "Connection error", 52 | BrokerErrorKind::OperationFailed => "Operation error", 53 | BrokerErrorKind::NotFound => "Broker not found", 54 | BrokerErrorKind::Timeout => "Timeout", 55 | BrokerErrorKind::TopicNotFound => "Topic not found", 56 | BrokerErrorKind::PartitionError => "Partition error", 57 | BrokerErrorKind::Unknown => "Unknown error", 58 | }; 59 | write!(f, "{}: {}", kind_str, message) 60 | }, 61 | CliError::ParseError { field, message } => { 62 | write!(f, "Parse error for {}: {}", field, message) 63 | }, 64 | CliError::IoError(e) => write!(f, "IO error: {}", e), 65 | CliError::SchemaError { kind, message } => { 66 | let kind_str = match kind { 67 | SchemaErrorKind::ValidationFailed => "Schema validation error", 68 | SchemaErrorKind::IncompatibleChange => "Compatibility error", 69 | SchemaErrorKind::RegistryError => "Registry error", 70 | SchemaErrorKind::NotFound => "Schema not found", 71 | }; 72 | write!(f, "{}: {}", kind_str, message) 73 | }, 74 | } 75 | } 76 | } 77 | 78 | impl Error for CliError {} 79 | 80 | impl From for CliError { 81 | fn from(err: std::io::Error) -> Self { 82 | CliError::IoError(err) 83 | } 84 | } 85 | 86 | pub type CliResult = Result; 87 | -------------------------------------------------------------------------------- /src/broker/config.rs: -------------------------------------------------------------------------------- 1 | #[derive(Clone, Debug, Default)] 2 | pub struct TopicConfig { 3 | pub num_partitions: usize, 4 | pub replication_factor: usize, 5 | } 6 | -------------------------------------------------------------------------------- /src/broker/consumer/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module for the consumer. 2 | //! 3 | //! The consumer module contains the implementation of the consumer struct and its methods. 4 | //! 5 | //! Main submodules: 6 | //! * [`group`] - Contains the implementation of the consumer group. 7 | 8 | pub mod group; 9 | -------------------------------------------------------------------------------- /src/broker/error.rs: -------------------------------------------------------------------------------- 1 | //! Module for handling errors in the broker system. 2 | //! 3 | //! This module provides an enum for categorizing and handling errors that may 4 | //! arise within the broker system, such as issues with topics, partitions, 5 | //! acknowledgments, I/O operations, and scaling. 6 | //! 7 | //! # Examples 8 | //! The following example demonstrates how to create a new `BrokerError` and 9 | //! convert it to a string: 10 | //! ``` 11 | //! use pilgrimage::broker::error::BrokerError; 12 | //! 13 | //! let error = BrokerError::TopicError("topic not found".to_string()); 14 | //! assert_eq!(format!("{}", error), "Topic error: topic not found"); 15 | //! ``` 16 | 17 | use std::error::Error; 18 | use std::fmt; 19 | 20 | /// Represents different types of errors that can occur in the broker. 21 | /// 22 | /// This enum is used to categorize and handle errors that may arise within 23 | /// the broker system, such as issues with topics, partitions, acknowledgments, 24 | /// I/O operations, and scaling. 25 | #[derive(Debug)] 26 | pub enum BrokerError { 27 | /// Represents an error related to a specific topic. 28 | /// 29 | /// This error is used when a topic is not found or is invalid. 30 | TopicError(String), 31 | /// Represents an error related to a specific partition. 32 | /// 33 | /// This error is used when a partition is not found or is invalid. 34 | PartitionError(String), 35 | /// Represents an error related to acknowledgments. 36 | /// 37 | /// This error is used when an acknowledgment fails or is invalid. 38 | AckError(String), 39 | /// Represents an error related to I/O operations. 40 | /// 41 | /// This error is used when an I/O operation fails. 42 | IoError(std::io::Error), 43 | /// Represents an error related to scaling operations. 44 | /// 45 | /// This error is used when a scaling operation fails. 46 | /// 47 | /// This could be due to a variety of reasons, such as a lack of resources, 48 | /// a failure to communicate with the scaling system, or an invalid scaling 49 | /// operation. 50 | ScalingError(String), 51 | } 52 | 53 | impl From for BrokerError { 54 | /// Converts a standard I/O error into a [`BrokerError`]. 55 | /// 56 | /// # Examples 57 | /// 58 | /// ``` 59 | /// use pilgrimage::broker::error::BrokerError; 60 | /// use std::io; 61 | /// 62 | /// let io_error = io::Error::new(io::ErrorKind::Other, "an I/O error"); 63 | /// let broker_error: BrokerError = io_error.into(); 64 | /// if let BrokerError::IoError(err) = broker_error { 65 | /// assert_eq!(err.to_string(), "an I/O error"); 66 | /// } 67 | /// ``` 68 | fn from(error: std::io::Error) -> Self { 69 | BrokerError::IoError(error) 70 | } 71 | } 72 | 73 | impl fmt::Display for BrokerError { 74 | /// Formats the BrokerError for display purposes. 75 | /// 76 | /// # Examples 77 | /// 78 | /// ``` 79 | /// use pilgrimage::broker::error::BrokerError; 80 | /// 81 | /// let error = BrokerError::TopicError("topic not found".to_string()); 82 | /// assert_eq!(format!("{}", error), "Topic error: topic not found"); 83 | /// ``` 84 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 85 | match self { 86 | BrokerError::TopicError(msg) => write!(f, "Topic error: {}", msg), 87 | BrokerError::PartitionError(msg) => write!(f, "Partition error: {}", msg), 88 | BrokerError::AckError(msg) => write!(f, "Acknowledgment error: {}", msg), 89 | BrokerError::IoError(err) => write!(f, "IO error: {}", err), 90 | BrokerError::ScalingError(msg) => write!(f, "Scaling Error: {}", msg), 91 | } 92 | } 93 | } 94 | 95 | impl Error for BrokerError { 96 | /// Returns the source of the error, if any. 97 | /// 98 | /// # Examples 99 | /// 100 | /// ``` 101 | /// use pilgrimage::broker::error::BrokerError; 102 | /// use std::error::Error; 103 | /// use std::io; 104 | /// 105 | /// let io_error = io::Error::new(io::ErrorKind::Other, "an I/O error"); 106 | /// let broker_error: BrokerError = io_error.into(); 107 | /// assert!(broker_error.source().is_some()); 108 | /// ``` 109 | fn source(&self) -> Option<&(dyn Error + 'static)> { 110 | match self { 111 | BrokerError::IoError(err) => Some(err), 112 | _ => None, 113 | } 114 | } 115 | } 116 | 117 | #[cfg(test)] 118 | mod tests { 119 | use super::*; 120 | use std::io; 121 | 122 | /// Tests the display format for the [`BrokerError::TopicError`] enum. 123 | /// 124 | /// # Purpose 125 | /// This test verifies that the display format for the [`BrokerError::TopicError`] 126 | /// enum is correct. 127 | /// 128 | /// # Steps 129 | /// 1. Create a [`BrokerError::TopicError`] variant with a message. 130 | /// 2. Verify that the display format is correct. 131 | #[test] 132 | fn test_topic_error() { 133 | let error = BrokerError::TopicError("Test topic error".to_string()); 134 | assert_eq!(format!("{}", error), "Topic error: Test topic error"); 135 | } 136 | 137 | /// Tests the display format for the [`BrokerError::PartitionError`] enum. 138 | /// 139 | /// # Purpose 140 | /// This test verifies that the display format for the [`BrokerError::PartitionError`] 141 | /// enum is correct. 142 | /// 143 | /// # Steps 144 | /// 1. Create a [`BrokerError::PartitionError`] variant with a message. 145 | /// 2. Verify that the display format is correct. 146 | #[test] 147 | fn test_partition_error() { 148 | let error = BrokerError::PartitionError("Test partition error".to_string()); 149 | assert_eq!( 150 | format!("{}", error), 151 | "Partition error: Test partition error" 152 | ); 153 | } 154 | 155 | /// Tests the display format for the [`BrokerError::AckError`] enum. 156 | /// 157 | /// # Purpose 158 | /// This test verifies that the display format for the [`BrokerError::AckError`] 159 | /// 160 | /// # Steps 161 | /// 1. Create a [`BrokerError::AckError`] variant with a message. 162 | /// 2. Verify that the display format is correct. 163 | #[test] 164 | fn test_ack_error() { 165 | let error = BrokerError::AckError("Test ack error".to_string()); 166 | assert_eq!(format!("{}", error), "Acknowledgment error: Test ack error"); 167 | } 168 | 169 | /// Tests the display format for the [`BrokerError::IoError`] enum. 170 | /// 171 | /// # Purpose 172 | /// This test verifies that the display format for the [`BrokerError::IoError`] 173 | /// enum is correct. 174 | /// 175 | /// # Steps 176 | /// 1. Create a [`BrokerError::IoError`] variant with a message. 177 | /// 2. Verify that the display format is correct. 178 | #[test] 179 | fn test_io_error() { 180 | let io_error = io::Error::new(io::ErrorKind::Other, "Test IO error"); 181 | let error = BrokerError::IoError(io_error); 182 | assert_eq!(format!("{}", error), "IO error: Test IO error"); 183 | } 184 | 185 | /// Tests the form conversion from an I/O error to a [`BrokerError`]. 186 | /// 187 | /// # Purpose 188 | /// This test verifies that an I/O error can be converted into a [`BrokerError`]. 189 | /// 190 | /// # Steps 191 | /// 1. Create an I/O error. 192 | /// 2. Convert the I/O error into a [`BrokerError`]. 193 | /// 3. Verify that the converted error is correct. 194 | #[test] 195 | fn test_from_io_error() { 196 | let io_error = io::Error::new(io::ErrorKind::Other, "Test IO error"); 197 | let error: BrokerError = io_error.into(); 198 | assert_eq!(format!("{}", error), "IO error: Test IO error"); 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /src/broker/leader/election.rs: -------------------------------------------------------------------------------- 1 | //! Module for leader election. 2 | //! 3 | //! The `LeaderElection` struct manages the leader election process for a broker. 4 | //! This struct provides methods to create a new leader election instance, 5 | //! start the election process, and request votes from peers. 6 | //! 7 | //! # Example 8 | //! The following example demonstrates how to create a new leader election instance 9 | //! and start the election process. 10 | //! ``` 11 | //! use pilgrimage::broker::leader::election::LeaderElection; 12 | //! use pilgrimage::broker::leader::state::BrokerState; 13 | //! use std::collections::HashMap; 14 | //! 15 | //! let peers = HashMap::new(); 16 | //! let mut election = LeaderElection::new("broker1", peers); 17 | //! let elected = election.start_election(); 18 | //! 19 | //! // Check if the broker was elected as leader 20 | //! // Note: In a single-node setup without peers, the election will succeed 21 | //! assert!(elected); 22 | //! assert_eq!(*election.state.lock().unwrap(), BrokerState::Leader); 23 | //! ``` 24 | 25 | use crate::broker::leader::state::BrokerState; 26 | use std::collections::HashMap; 27 | use std::sync::{Arc, Mutex}; 28 | 29 | #[derive(Clone)] 30 | /// The `LeaderElection` struct manages the leader election process for a broker. 31 | /// 32 | /// This struct provides methods to create a new leader election instance, 33 | /// start the election process, and request votes from peers. 34 | pub struct LeaderElection { 35 | /// The ID of the broker. 36 | pub node_id: String, 37 | /// The current state of the broker. 38 | /// This is shared between the leader election and heartbeat threads. 39 | pub state: Arc>, 40 | /// A hashmap of peer broker IDs and their addresses. 41 | pub votes: Arc>>, 42 | } 43 | 44 | impl LeaderElection { 45 | /// Creates a new leader election instance. 46 | /// 47 | /// # Arguments 48 | /// 49 | /// * `broker_id` - The ID of the broker. 50 | /// * `peers` - A hashmap of peer broker IDs and their addresses. 51 | /// 52 | /// # Examples 53 | /// 54 | /// ``` 55 | /// use pilgrimage::broker::leader::election::LeaderElection; 56 | /// use std::collections::HashMap; 57 | /// 58 | /// let peers = HashMap::new(); 59 | /// let election = LeaderElection::new("broker1", peers); 60 | /// assert_eq!(election.node_id, "broker1"); 61 | /// ``` 62 | pub fn new(node_id: &str, votes: HashMap) -> Self { 63 | Self { 64 | node_id: node_id.to_string(), 65 | state: Arc::new(Mutex::new(BrokerState::Follower)), 66 | votes: Arc::new(Mutex::new(votes)), 67 | } 68 | } 69 | 70 | /// Starts the leader election process. 71 | /// 72 | /// This method transitions the broker to a candidate state, increments the current term, 73 | /// and requests votes from peers. 74 | /// If the broker receives a majority of votes, it transitions 75 | /// to a leader state and starts the heartbeat thread. 76 | /// 77 | /// # Returns 78 | /// `true` if the broker was elected as leader, `false` otherwise. 79 | /// 80 | /// # Examples 81 | /// 82 | /// ``` 83 | /// use pilgrimage::broker::leader::election::LeaderElection; 84 | /// use std::collections::HashMap; 85 | /// 86 | /// let peers = HashMap::new(); 87 | /// let mut election = LeaderElection::new("broker1", peers); 88 | /// let elected = election.start_election(); 89 | /// assert!(elected); 90 | /// ``` 91 | pub fn start_election(&mut self) -> bool { 92 | *self.state.lock().unwrap() = BrokerState::Candidate; 93 | 94 | // Add your own vote 95 | self.receive_vote(self.node_id.clone(), self.node_id.clone()); 96 | 97 | let votes = self.get_vote_count(); 98 | let total_nodes = { 99 | if let Ok(votes_map) = self.votes.lock() { 100 | std::cmp::max(1, votes_map.len()) // At a minimum, include yourself. 101 | } else { 102 | 1 103 | } 104 | }; 105 | let votes_needed = (total_nodes + 1) / 2; 106 | 107 | if votes >= votes_needed { 108 | self.become_leader(); 109 | true 110 | } else { 111 | false 112 | } 113 | } 114 | 115 | /// Transitions the broker to a leader state. 116 | pub fn become_leader(&mut self) { 117 | *self.state.lock().unwrap() = BrokerState::Leader; 118 | } 119 | 120 | /// Transitions the broker to a follower state. 121 | pub fn step_down(&mut self) { 122 | *self.state.lock().unwrap() = BrokerState::Follower; 123 | } 124 | 125 | /// Checks if the broker is the leader. 126 | /// 127 | /// # Returns 128 | /// `true` if the broker is the leader, `false` otherwise. 129 | pub fn is_leader(&self) -> bool { 130 | *self.state.lock().unwrap() == BrokerState::Leader 131 | } 132 | 133 | /// Receives a vote from a peer. 134 | /// 135 | /// # Arguments 136 | /// 137 | /// * `voter_id` - The ID of the voter broker. 138 | /// * `vote` - The vote (usually the ID of the broker being voted for). 139 | pub fn receive_vote(&mut self, voter_id: String, vote: String) { 140 | if let Ok(mut votes) = self.votes.lock() { 141 | votes.insert(voter_id, vote); 142 | } 143 | } 144 | 145 | /// Gets the number of votes received. 146 | /// 147 | /// # Returns 148 | /// The number of votes. 149 | pub fn get_vote_count(&self) -> usize { 150 | if let Ok(votes) = self.votes.lock() { 151 | votes.len() 152 | } else { 153 | 0 154 | } 155 | } 156 | 157 | /// Gets the current state of the broker. 158 | /// 159 | /// # Returns 160 | /// The current state of the broker. 161 | pub fn get_state(&self) -> BrokerState { 162 | self.state.lock().unwrap().clone() 163 | } 164 | 165 | /// Sets the state of the broker. 166 | /// 167 | /// # Arguments 168 | /// 169 | /// * `state` - The new state of the broker. 170 | pub fn set_state(&self, state: BrokerState) { 171 | if let Ok(mut current_state) = self.state.lock() { 172 | *current_state = state; 173 | } 174 | } 175 | 176 | /// Gets the votes received from peers. 177 | /// 178 | /// # Returns 179 | /// A vector of votes. 180 | pub fn get_votes(&self) -> Vec { 181 | if let Ok(votes) = self.votes.lock() { 182 | votes.values().cloned().collect() 183 | } else { 184 | Vec::new() 185 | } 186 | } 187 | } 188 | 189 | #[cfg(test)] 190 | mod tests { 191 | use super::*; 192 | 193 | /// Tests the leader election process. 194 | /// 195 | /// # Purpose 196 | /// This test verifies that a broker can be elected as leader. 197 | /// 198 | /// # Steps 199 | /// 1. Create a new leader election instance. 200 | /// 2. Start the election process. 201 | /// 3. Verify that the broker was elected as leader. 202 | #[test] 203 | fn test_leader_election() { 204 | let mut peers = HashMap::new(); 205 | peers.insert("peer1".to_string(), "localhost:8081".to_string()); 206 | peers.insert("peer2".to_string(), "localhost:8082".to_string()); 207 | 208 | let mut election = LeaderElection::new("broker1", peers); 209 | assert_eq!(*election.state.lock().unwrap(), BrokerState::Follower); 210 | 211 | let elected = election.start_election(); 212 | assert!(elected); 213 | assert_eq!(*election.state.lock().unwrap(), BrokerState::Leader); 214 | } 215 | } 216 | -------------------------------------------------------------------------------- /src/broker/leader/heartbeat.rs: -------------------------------------------------------------------------------- 1 | //! Module for the heartbeat mechanism of the broker leader. 2 | //! 3 | //! The `Heartbeat` struct manages the heartbeat mechanism for a broker. 4 | //! This struct provides methods to create a new heartbeat instance, 5 | //! start the heartbeat mechanism, send heartbeats to peers, and check for timeouts. 6 | //! 7 | //! The heartbeat is sent every 500 milliseconds and monitored every 100 milliseconds. 8 | 9 | use super::election::LeaderElection; 10 | use super::state::BrokerState; 11 | use std::sync::{Arc, Mutex}; 12 | use std::time::{Duration, Instant}; 13 | 14 | /// The `Heartbeat` struct manages the heartbeat mechanism for a broker. 15 | /// 16 | /// This struct provides methods to create a new heartbeat instance, 17 | /// start the heartbeat mechanism, send heartbeats to peers, and check for timeouts. 18 | pub struct Heartbeat { 19 | pub last_beat: Arc>, 20 | pub timeout: Duration, 21 | } 22 | 23 | impl Heartbeat { 24 | /// Creates a new heartbeat instance. 25 | /// 26 | /// # Arguments 27 | /// 28 | /// * `timeout` - The duration after which the heartbeat times out. 29 | /// 30 | /// # Examples 31 | /// 32 | /// ``` 33 | /// use pilgrimage::broker::leader::heartbeat::Heartbeat; 34 | /// use std::time::Duration; 35 | /// 36 | /// let heartbeat = Heartbeat::new(Duration::from_secs(1)); 37 | /// assert!(heartbeat.last_beat.lock().unwrap().elapsed() < Duration::from_secs(1)); 38 | /// ``` 39 | pub fn new(timeout: Duration) -> Self { 40 | Heartbeat { 41 | last_beat: Arc::new(Mutex::new(Instant::now())), 42 | timeout, 43 | } 44 | } 45 | 46 | /// Starts the heartbeat mechanism. 47 | /// 48 | /// This method starts two threads: 49 | /// 1. A thread to send heartbeats to peers. 50 | /// 2. A thread to monitor the heartbeat and start an election if the heartbeat times out. 51 | /// 52 | /// The heartbeat is sent every 500 milliseconds and monitored every 100 milliseconds. 53 | /// 54 | /// # Arguments 55 | /// 56 | /// * `election` - The leader election instance. 57 | /// 58 | /// # Examples 59 | /// 60 | /// ``` 61 | /// use pilgrimage::broker::leader::heartbeat::Heartbeat; 62 | /// use pilgrimage::broker::leader::election::LeaderElection; 63 | /// use std::collections::HashMap; 64 | /// use std::time::Duration; 65 | /// use std::sync::{Arc, Mutex}; 66 | /// 67 | /// let peers = HashMap::new(); 68 | /// let election = Arc::new(Mutex::new(LeaderElection::new("broker1", peers))); 69 | /// Heartbeat::start(election); 70 | /// ``` 71 | pub fn start(election: Arc>) { 72 | let heartbeat = Arc::new(Self::new(Duration::from_secs(1))); 73 | 74 | // Heartbeat Transmission Thread 75 | let send_election = election.clone(); 76 | let send_heartbeat = heartbeat.clone(); 77 | std::thread::spawn(move || { 78 | loop { 79 | { 80 | let election_guard = send_election.lock().unwrap(); 81 | if *election_guard.state.lock().unwrap() != BrokerState::Leader { 82 | break; 83 | } 84 | Self::send_heartbeat(&election_guard); 85 | } 86 | *send_heartbeat.last_beat.lock().unwrap() = Instant::now(); 87 | std::thread::sleep(Duration::from_millis(500)); 88 | } 89 | }); 90 | 91 | // Heartbeat monitoring thread 92 | let monitor_election = election; 93 | let monitor_heartbeat = heartbeat; 94 | std::thread::spawn(move || { 95 | loop { 96 | { 97 | let mut election_guard = monitor_election.lock().unwrap(); 98 | if *election_guard.state.lock().unwrap() == BrokerState::Leader { 99 | break; 100 | } 101 | if Self::check_timeout(&monitor_heartbeat) { 102 | election_guard.start_election(); 103 | } 104 | } 105 | std::thread::sleep(Duration::from_millis(100)); 106 | } 107 | }); 108 | } 109 | 110 | /// Sends a heartbeat to the peers. 111 | /// 112 | /// This method sends a heartbeat to all the peers in the leader election. 113 | /// **Unfortunately, the actual network communication is not implemented yet.** 114 | /// 115 | /// # Arguments 116 | /// 117 | /// * `election` - The leader election instance. 118 | /// 119 | /// # Examples 120 | /// 121 | /// ``` 122 | /// use pilgrimage::broker::leader::heartbeat::Heartbeat; 123 | /// use pilgrimage::broker::leader::election::LeaderElection; 124 | /// use std::collections::HashMap; 125 | /// 126 | /// let peers = HashMap::new(); 127 | /// let election = LeaderElection::new("broker1", peers); 128 | /// Heartbeat::send_heartbeat(&election); 129 | /// ``` 130 | pub fn send_heartbeat(election: &LeaderElection) { 131 | if let Ok(votes) = election.votes.lock() { 132 | for (_peer_id, _) in votes.iter() { 133 | // TODO Implementing actual network communication here 134 | } 135 | } 136 | } 137 | 138 | /// Checks if the heartbeat has timed out. 139 | /// 140 | /// This method checks if the heartbeat has timed out based on the timeout duration. 141 | /// 142 | /// # Returns 143 | /// * `bool` - Returns `true` if the heartbeat has timed out, otherwise `false`. 144 | fn check_timeout(&self) -> bool { 145 | let last = *self.last_beat.lock().unwrap(); 146 | last.elapsed() > self.timeout 147 | } 148 | } 149 | 150 | #[cfg(test)] 151 | mod tests { 152 | use super::*; 153 | 154 | /// Tests the timeout of the heartbeat. 155 | /// 156 | /// # Purpose 157 | /// The purpose of this test is to verify that the heartbeat times out 158 | /// after the specified duration. 159 | /// 160 | /// # Steps 161 | /// 1. Create a new heartbeat instance with a timeout of 100 milliseconds. 162 | /// 2. Sleep for 150 milliseconds. 163 | /// 3. Check if the heartbeat has timed out. 164 | #[test] 165 | fn test_heartbeat_timeout() { 166 | let heartbeat = Heartbeat::new(Duration::from_millis(100)); 167 | std::thread::sleep(Duration::from_millis(150)); 168 | assert!(heartbeat.check_timeout()); 169 | } 170 | 171 | /// Tests the heartbeat within the timeout. 172 | /// 173 | /// # Purpose 174 | /// The purpose of this test is to verify that the heartbeat does not time out 175 | /// within the specified duration. 176 | /// 177 | /// # Steps 178 | /// 1. Create a new heartbeat instance with a timeout of 100 milliseconds. 179 | /// 2. Check if the heartbeat has timed out. 180 | #[test] 181 | fn test_heartbeat_within_timeout() { 182 | let heartbeat = Heartbeat::new(Duration::from_millis(100)); 183 | assert!(!heartbeat.check_timeout()); 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /src/broker/leader/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module for leader election and broker leader state. 2 | //! 3 | //! This module contains the implementation of leader election and broker leader state. 4 | 5 | pub mod election; 6 | pub mod heartbeat; 7 | pub mod state; 8 | 9 | use std::collections::HashMap; 10 | use std::sync::{Arc, Mutex}; 11 | 12 | // Re-export types from submodules 13 | pub use election::LeaderElection as Election; 14 | pub use state::BrokerState; 15 | 16 | #[derive(Clone)] 17 | pub struct LeaderElection { 18 | pub node_id: String, 19 | pub state: Arc>, 20 | pub peers: Arc>>, 21 | pub terms: Arc>>, 22 | pub current_term: Arc>, 23 | } 24 | 25 | impl LeaderElection { 26 | pub fn new(node_id: &str, peers: HashMap) -> Self { 27 | Self { 28 | node_id: node_id.to_string(), 29 | state: Arc::new(Mutex::new(BrokerState::Follower)), 30 | peers: Arc::new(Mutex::new(peers)), 31 | terms: Arc::new(Mutex::new(HashMap::new())), 32 | current_term: Arc::new(Mutex::new(0)), 33 | } 34 | } 35 | 36 | pub fn get_term(&self, node_id: &str) -> u64 { 37 | self.terms 38 | .lock() 39 | .unwrap() 40 | .get(node_id) 41 | .copied() 42 | .unwrap_or(0) 43 | } 44 | 45 | pub fn increment_term(&self, node_id: &str) { 46 | let mut terms = self.terms.lock().unwrap(); 47 | let term = terms.entry(node_id.to_string()).or_insert(0); 48 | *term += 1; 49 | } 50 | 51 | pub fn update_term(&self, node_id: &str, term: u64) { 52 | let mut terms = self.terms.lock().unwrap(); 53 | terms.insert(node_id.to_string(), term); 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/broker/leader/state.rs: -------------------------------------------------------------------------------- 1 | //! Module for the broker leader state. 2 | //! 3 | //! The `BrokerState` enum represents the different states a broker can be in. 4 | //! A broker can be a follower, a candidate in an election, or a leader. 5 | //! 6 | //! The `Term` struct represents the term information for a broker. 7 | //! It contains the current term number and the ID of the broker that received the vote. 8 | 9 | use std::fmt; 10 | use std::sync::atomic::AtomicU64; 11 | 12 | /// The `BrokerState` enum represents the different states a broker can be in. 13 | /// 14 | /// A broker can be a follower, a candidate in an election, or a leader. 15 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 16 | pub enum BrokerState { 17 | /// The broker is a follower, not leading an election or managing state. 18 | Follower, 19 | /// The broker is a candidate in an election, trying to become a leader. 20 | Candidate, 21 | /// The broker is the leader, managing the state and directing the followers. 22 | Leader, 23 | } 24 | 25 | impl fmt::Display for BrokerState { 26 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 27 | match self { 28 | BrokerState::Follower => write!(f, "Follower"), 29 | BrokerState::Candidate => write!(f, "Candidate"), 30 | BrokerState::Leader => write!(f, "Leader"), 31 | } 32 | } 33 | } 34 | 35 | impl Default for BrokerState { 36 | fn default() -> Self { 37 | BrokerState::Follower 38 | } 39 | } 40 | 41 | /// The `Term` struct represents the term information for a broker. 42 | /// 43 | /// It contains the current term number and the ID of the broker that received the vote. 44 | pub struct Term { 45 | /// The current term number. 46 | pub current: AtomicU64, 47 | /// The ID of the broker that received the vote. 48 | pub voted_for: Option, 49 | } 50 | -------------------------------------------------------------------------------- /src/broker/log_compression.rs: -------------------------------------------------------------------------------- 1 | //! Module for compressing and decompressing log files using gzip. 2 | //! 3 | //! The [`LogCompressor`] struct provides methods for compressing and decompressing log files. 4 | //! 5 | //! It is useful for managing log file sizes and ensuring efficient storage. 6 | 7 | use flate2::Compression; 8 | use flate2::write::{GzDecoder, GzEncoder}; 9 | use std::fs::File; 10 | use std::io::{self, Read, Write}; 11 | use std::path::Path; 12 | 13 | /// A utility for compressing and decompressing log files. 14 | /// 15 | /// It uses the gzip format to compress and decompress files. 16 | /// 17 | /// It is useful for managing log file sizes and ensuring efficient storage. 18 | pub struct LogCompressor; 19 | 20 | impl LogCompressor { 21 | /// Compresses a file using gzip compression. 22 | /// 23 | /// Compresses the contents of the input file and writes the compressed data to the output file. 24 | /// 25 | /// # Arguments 26 | /// * `input_path` - The path to the input file. 27 | /// * `output_path` - The path to the output file. 28 | /// 29 | /// # Returns 30 | /// An `io::Result` indicating the success or failure of the operation. 31 | /// 32 | /// # Errors 33 | /// If the input file cannot be read, 34 | /// or the output file cannot be written to, an error is returned. 35 | pub fn compress_file, Q: AsRef>( 36 | input_path: P, 37 | output_path: Q, 38 | ) -> io::Result<()> { 39 | let input_file = File::open(input_path)?; 40 | let output_file = File::create(output_path)?; 41 | let mut encoder = GzEncoder::new(output_file, Compression::default()); 42 | let mut buffer = Vec::new(); 43 | input_file.take(1024 * 1024).read_to_end(&mut buffer)?; 44 | encoder.write_all(&buffer)?; 45 | encoder.finish()?; 46 | Ok(()) 47 | } 48 | 49 | /// Decompresses a gzip-compressed file. 50 | /// 51 | /// Reads the compressed data from the input file 52 | /// and writes the decompressed data to the output file. 53 | /// 54 | /// # Arguments 55 | /// * `input_path` - The path to the input file. 56 | /// * `output_path` - The path to the output file. 57 | /// 58 | /// # Returns 59 | /// An `io::Result` indicating the success or failure of the operation. 60 | /// 61 | /// # Errors 62 | /// If the input file cannot be read, 63 | /// or the output file cannot be written to, an error is returned. 64 | pub fn decompress_file>(input_path: P, output_path: P) -> io::Result<()> { 65 | let input_file = File::open(input_path)?; 66 | let output_file = File::create(output_path)?; 67 | let mut decoder = GzDecoder::new(output_file); 68 | let mut buffer = Vec::new(); 69 | input_file.take(1024 * 1024).read_to_end(&mut buffer)?; 70 | decoder.write_all(&buffer)?; 71 | decoder.finish()?; 72 | Ok(()) 73 | } 74 | } 75 | 76 | #[cfg(test)] 77 | mod tests { 78 | use super::*; 79 | use std::fs::{File, remove_file}; 80 | use std::io::{Read, Write}; 81 | use tempfile::tempdir; 82 | 83 | /// Tests for compressing and decompressing files. 84 | /// 85 | /// # Purpose 86 | /// The tests verify that the [`LogCompressor`] can compress and decompress files correctly. 87 | /// 88 | /// # Steps 89 | /// 1. Create a temporary directory. 90 | /// 2. Create an input file with some content. 91 | /// 3. Compress the input file. 92 | /// 4. Decompress the compressed file. 93 | /// 5. Verify that the decompressed file has the same content as the input file. 94 | /// 6. Clean up the temporary directory. 95 | #[test] 96 | fn test_compress_and_decompress() { 97 | let dir = tempdir().unwrap(); 98 | let input_path = dir.path().join("input.txt"); 99 | let compressed_path = dir.path().join("output.gz"); 100 | let decompressed_path = dir.path().join("decompressed.txt"); 101 | 102 | { 103 | let mut file = File::create(&input_path).unwrap(); 104 | writeln!(file, "Hello, compression!").unwrap(); 105 | } 106 | 107 | // Normal Flow: compression 108 | LogCompressor::compress_file(&input_path, &compressed_path).unwrap(); 109 | assert!(compressed_path.exists()); 110 | 111 | // Normal Flow: Defrosting 112 | LogCompressor::decompress_file(&compressed_path, &decompressed_path).unwrap(); 113 | assert!(decompressed_path.exists()); 114 | 115 | // Checking the contents 116 | let mut content = String::default(); 117 | let mut file = File::open(&decompressed_path).unwrap(); 118 | file.read_to_string(&mut content).unwrap(); 119 | assert_eq!(content.trim(), "Hello, compression!"); 120 | 121 | let _ = remove_file(&input_path); 122 | let _ = remove_file(&compressed_path); 123 | let _ = remove_file(&decompressed_path); 124 | } 125 | 126 | /// Tests for compressing a file that does not exist. 127 | /// 128 | /// # Purpose 129 | /// The test verifies that the [`LogCompressor::compress_file`] method 130 | /// returns an error when the input file does not exist. 131 | /// 132 | /// # Steps 133 | /// 1. Create a temporary directory. 134 | /// 2. Attempt to compress a file that does not exist. 135 | /// 3. Verify that the method returns an error. 136 | #[test] 137 | fn test_compress_file_not_found() { 138 | let dir = tempdir().unwrap(); 139 | let missing_input = dir.path().join("no_such_file.txt"); 140 | let compressed = dir.path().join("compressed.gz"); 141 | 142 | // Exceptional Flow: The input file does not exist. 143 | let result = LogCompressor::compress_file(&missing_input, &compressed); 144 | assert!(result.is_err()); 145 | } 146 | 147 | /// Tests for decompressing a file that does not exist. 148 | /// 149 | /// # Purpose 150 | /// The test verifies that the [`LogCompressor::decompress_file`] method 151 | /// returns an error when the input file does not exist. 152 | /// 153 | /// # Steps 154 | /// 1. Create a temporary directory. 155 | /// 2. Attempt to decompress a file that does not exist. 156 | /// 3. Verify that the method returns an error. 157 | #[test] 158 | fn test_decompress_file_not_found() { 159 | let dir = tempdir().unwrap(); 160 | let missing_input = dir.path().join("no_such_file.gz"); 161 | let output = dir.path().join("output.txt"); 162 | 163 | // Exceptional Flow: The input file does not exist. 164 | let result = LogCompressor::decompress_file(&missing_input, &output); 165 | assert!(result.is_err()); 166 | } 167 | 168 | /// Tests for compressing and decompressing an empty file. 169 | /// 170 | /// # Purpose 171 | /// The test verifies that the [`LogCompressor`] can compress and decompress an empty file. 172 | /// 173 | /// # Steps 174 | /// 1. Create a temporary directory. 175 | /// 2. Create an empty file. 176 | /// 3. Compress the empty file. 177 | /// 4. Decompress the compressed file. 178 | /// 5. Verify that the decompressed file is also empty. 179 | /// 6. Clean up the temporary directory. 180 | #[test] 181 | fn test_compress_and_decompress_empty_file() { 182 | let dir = tempdir().unwrap(); 183 | let input_path = dir.path().join("empty.txt"); 184 | let compressed = dir.path().join("empty.gz"); 185 | let decompressed = dir.path().join("decompressed_empty.txt"); 186 | 187 | // Edge case: Create a blank file 188 | File::create(&input_path).unwrap(); 189 | 190 | LogCompressor::compress_file(&input_path, &compressed).unwrap(); 191 | assert!(compressed.exists()); 192 | 193 | LogCompressor::decompress_file(&compressed, &decompressed).unwrap(); 194 | assert!(decompressed.exists()); 195 | 196 | // Check the contents 197 | let mut buf = String::default(); 198 | let mut file = File::open(&decompressed).unwrap(); 199 | file.read_to_string(&mut buf).unwrap(); 200 | assert_eq!(buf, ""); 201 | 202 | let _ = remove_file(&input_path); 203 | let _ = remove_file(&compressed); 204 | let _ = remove_file(&decompressed); 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /src/broker/metrics.rs: -------------------------------------------------------------------------------- 1 | use std::time::Instant; 2 | 3 | #[derive(Debug, Clone)] 4 | pub struct SystemMetrics { 5 | pub cpu_usage: f32, 6 | pub memory_usage: f32, 7 | pub network_io: f32, 8 | pub timestamp: Instant, 9 | } 10 | 11 | impl SystemMetrics { 12 | pub fn new(cpu_usage: f32, memory_usage: f32, network_io: f32) -> Self { 13 | Self { 14 | cpu_usage, 15 | memory_usage, 16 | network_io, 17 | timestamp: Instant::now(), 18 | } 19 | } 20 | 21 | pub fn is_overloaded(&self, threshold: f32) -> bool { 22 | self.cpu_usage > threshold || self.memory_usage > threshold 23 | } 24 | 25 | pub fn is_underutilized(&self, threshold: f32) -> bool { 26 | self.cpu_usage < threshold && self.memory_usage < threshold 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/broker/node.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::sync::{Arc, Mutex}; 3 | use std::time::Instant; 4 | 5 | #[derive(Debug, Clone)] 6 | pub struct Node { 7 | pub id: String, 8 | pub address: String, 9 | pub data: Arc>>, 10 | pub last_heartbeat: Arc>, 11 | pub is_active: Arc>, 12 | } 13 | 14 | impl Node { 15 | pub fn new(id: &str, address: &str, is_active: bool) -> Self { 16 | Self { 17 | id: id.to_string(), 18 | address: address.to_string(), 19 | data: Arc::new(Mutex::new(Vec::new())), 20 | last_heartbeat: Arc::new(Mutex::new(Instant::now())), 21 | is_active: Arc::new(Mutex::new(is_active)), 22 | } 23 | } 24 | 25 | pub fn create_test_node(id: &str) -> Self { 26 | Self::new(id, "127.0.0.1:8080", true) 27 | } 28 | 29 | pub fn store_data(&self, data: &[u8]) -> Result<(), Box> { 30 | let mut node_data = self.data.lock().map_err(|e| e.to_string())?; 31 | node_data.clear(); 32 | node_data.extend_from_slice(data); 33 | Ok(()) 34 | } 35 | 36 | pub fn update_heartbeat(&self) { 37 | if let Ok(mut last_heartbeat) = self.last_heartbeat.lock() { 38 | *last_heartbeat = Instant::now(); 39 | } 40 | } 41 | 42 | pub fn is_alive(&self) -> bool { 43 | if let Ok(is_active) = self.is_active.lock() { 44 | return *is_active; 45 | } 46 | false 47 | } 48 | 49 | pub fn set_active(&self, active: bool) { 50 | if let Ok(mut is_active) = self.is_active.lock() { 51 | *is_active = active; 52 | } 53 | } 54 | 55 | pub fn get_id(&self) -> &str { 56 | &self.id 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/broker/node_management.rs: -------------------------------------------------------------------------------- 1 | //! Module for managing storage nodes and consumer groups. 2 | //! 3 | //! This module provides functions for checking the health of storage nodes and 4 | //! recovering them when they become unavailable. It also provides functions for 5 | //! resetting the assignments of consumer groups when a storage node is recovered. 6 | 7 | use crate::broker::consumer::group::ConsumerGroup; 8 | use crate::broker::storage::Storage; 9 | use std::collections::HashMap; 10 | use std::sync::Mutex; 11 | 12 | /// A type alias for a collection of consumer groups, keyed by their group names. 13 | /// 14 | /// This type is used to store the consumer groups that are managed by the broker. 15 | pub type ConsumerGroups = HashMap; 16 | 17 | /// Checks the health of a storage node. 18 | /// 19 | /// This function locks the provided storage and checks if it is available. 20 | /// 21 | /// # Arguments 22 | /// * `storage` - A reference to a `Mutex` that represents the storage node. 23 | /// 24 | /// # Returns 25 | /// * `true` - If the storage node is available. 26 | /// * `false` - If the storage node is not available. 27 | /// 28 | /// # Examples 29 | /// ``` 30 | /// use std::sync::Mutex; 31 | /// use std::path::PathBuf; 32 | /// use pilgrimage::broker::node_management::check_node_health; 33 | /// use pilgrimage::broker::storage::Storage; 34 | /// 35 | /// // Create a new storage node 36 | /// let storage = Mutex::new(Storage::new(PathBuf::from("test_check_node_health")).unwrap()); 37 | /// 38 | /// // Check the health of the storage node 39 | /// let is_available = check_node_health(&storage); 40 | /// // Assert that the storage node is available 41 | /// assert!(is_available); 42 | /// ``` 43 | pub fn check_node_health(storage: &Mutex) -> bool { 44 | let storage_guard = storage.lock().unwrap(); 45 | storage_guard.is_available() 46 | } 47 | 48 | /// Recovers a storage node and resets the assignments of consumer groups. 49 | /// 50 | /// This function attempts to reinitialize the provided storage and resets the 51 | /// assignments of all consumer groups. 52 | /// 53 | /// # Arguments 54 | /// * `storage` - A reference to a `Mutex` that represents the storage node. 55 | /// * `consumer_groups` - A reference to a `Mutex` that represents 56 | /// the collection of consumer groups. 57 | /// 58 | /// # Examples 59 | /// ``` 60 | /// use std::sync::Mutex; 61 | /// use std::path::PathBuf; 62 | /// use pilgrimage::broker::node_management::recover_node; 63 | /// use pilgrimage::broker::consumer::group::ConsumerGroup; 64 | /// use pilgrimage::broker::storage::Storage; 65 | /// use std::collections::HashMap; 66 | /// 67 | /// // Create a new storage node 68 | /// let storage = Mutex::new(Storage::new(PathBuf::from("test_recover_node")).unwrap()); 69 | /// 70 | /// // Create a collection of consumer groups 71 | /// let consumer_groups = Mutex::new(HashMap::new()); 72 | /// 73 | /// // Create a consumer group 74 | /// let group = ConsumerGroup::new("test_group"); 75 | /// 76 | /// // Insert the consumer group into the collection 77 | /// consumer_groups.lock().unwrap().insert("test_group".to_string(), group); 78 | /// 79 | /// // Recover the storage node 80 | /// recover_node(&storage, &consumer_groups); 81 | /// // Assert that the storage node is available 82 | /// assert!(storage.lock().unwrap().is_available()); 83 | /// ``` 84 | pub fn recover_node(storage: &Mutex, consumer_groups: &Mutex) { 85 | let mut storage_guard = storage.lock().unwrap(); 86 | if let Err(e) = storage_guard.reinitialize() { 87 | eprintln!("Storage initialization failed.: {}", e); 88 | } 89 | 90 | let mut groups_guard = consumer_groups.lock().unwrap(); 91 | for group in groups_guard.values_mut() { 92 | group.reset_assignments(); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/broker/replication.rs: -------------------------------------------------------------------------------- 1 | use log::{debug, error, info, warn}; 2 | use std::collections::HashMap; 3 | use std::sync::{Arc, Mutex}; 4 | use std::time::Instant; 5 | use tokio::sync::mpsc; 6 | 7 | use crate::broker::node::Node; 8 | 9 | #[derive(Debug, Clone)] 10 | pub struct ReplicationState { 11 | last_replicated: Instant, 12 | status: ReplicationStatus, 13 | replicas: Vec, 14 | } 15 | 16 | #[derive(Debug, Clone)] 17 | pub enum ReplicationStatus { 18 | InProgress, 19 | Complete, 20 | Failed(String), 21 | } 22 | 23 | pub struct ReplicationManager { 24 | states: Arc>>, 25 | nodes: Arc>>>, 26 | replication_factor: usize, 27 | tx: mpsc::Sender, 28 | rx: mpsc::Receiver, 29 | } 30 | 31 | #[derive(Debug)] 32 | pub enum ReplicationEvent { 33 | DataUpdate { 34 | partition_id: String, 35 | data: Vec, 36 | }, 37 | StateUpdate { 38 | partition_id: String, 39 | status: ReplicationStatus, 40 | }, 41 | NodeFailure { 42 | node_id: String, 43 | }, 44 | } 45 | 46 | impl ReplicationManager { 47 | pub fn new(nodes: Arc>>>, replication_factor: usize) -> Self { 48 | let (tx, rx) = mpsc::channel(100); 49 | 50 | Self { 51 | states: Arc::new(Mutex::new(HashMap::new())), 52 | nodes, 53 | replication_factor, 54 | tx, 55 | rx, 56 | } 57 | } 58 | 59 | pub async fn send_data_update( 60 | &self, 61 | partition_id: String, 62 | data: Vec, 63 | ) -> Result<(), String> { 64 | self.tx 65 | .send(ReplicationEvent::DataUpdate { partition_id, data }) 66 | .await 67 | .map_err(|e| format!("Failed to send replication event: {}", e)) 68 | } 69 | 70 | pub async fn send_state_update( 71 | &self, 72 | partition_id: String, 73 | status: ReplicationStatus, 74 | ) -> Result<(), String> { 75 | self.tx 76 | .send(ReplicationEvent::StateUpdate { 77 | partition_id, 78 | status, 79 | }) 80 | .await 81 | .map_err(|e| format!("Failed to send status update event: {}", e)) 82 | } 83 | 84 | pub async fn notify_node_failure(&self, node_id: String) -> Result<(), String> { 85 | self.tx 86 | .send(ReplicationEvent::NodeFailure { node_id }) 87 | .await 88 | .map_err(|e| format!("Failed to send node failure event: {}", e)) 89 | } 90 | 91 | pub async fn start(&mut self) { 92 | info!("Start Replication Manager"); 93 | while let Some(event) = self.rx.recv().await { 94 | match event { 95 | ReplicationEvent::DataUpdate { partition_id, data } => { 96 | self.handle_data_update(&partition_id, &data).await; 97 | } 98 | ReplicationEvent::StateUpdate { 99 | partition_id, 100 | status, 101 | } => { 102 | self.update_replication_state(&partition_id, status); 103 | } 104 | ReplicationEvent::NodeFailure { node_id } => { 105 | self.handle_node_failure(&node_id).await; 106 | } 107 | } 108 | } 109 | } 110 | 111 | async fn handle_data_update(&self, partition_id: &str, data: &[u8]) { 112 | let nodes = self.nodes.lock().unwrap(); 113 | let mut states = self.states.lock().unwrap(); 114 | 115 | let state = states 116 | .entry(partition_id.to_string()) 117 | .or_insert_with(|| ReplicationState { 118 | last_replicated: Instant::now(), 119 | status: ReplicationStatus::InProgress, 120 | replicas: Vec::new(), 121 | }); 122 | 123 | let healthy_nodes: Vec<_> = nodes.iter().filter(|(_, node)| node.is_alive()).collect(); 124 | 125 | if healthy_nodes.len() < self.replication_factor { 126 | warn!( 127 | "Not enough nodes available. Required: {}, Current: {}", 128 | self.replication_factor, 129 | healthy_nodes.len() 130 | ); 131 | } 132 | 133 | for (node_id, node) in healthy_nodes.iter().take(self.replication_factor) { 134 | match node.store_data(data) { 135 | Ok(_) => { 136 | if !state.replicas.contains(node_id) { 137 | state.replicas.push(node_id.to_string()); 138 | } 139 | debug!("Successful replication to node {}", node_id); 140 | } 141 | Err(e) => { 142 | error!("Replication failure to node {}: {}", node_id, e); 143 | state.status = ReplicationStatus::Failed(format!( 144 | "Replication failure to node {}", 145 | node_id 146 | )); 147 | } 148 | } 149 | } 150 | 151 | if state.replicas.len() >= self.replication_factor { 152 | state.status = ReplicationStatus::Complete; 153 | state.last_replicated = Instant::now(); 154 | } 155 | } 156 | 157 | fn update_replication_state(&self, partition_id: &str, status: ReplicationStatus) { 158 | let mut states = self.states.lock().unwrap(); 159 | if let Some(state) = states.get_mut(partition_id) { 160 | state.status = status; 161 | state.last_replicated = Instant::now(); 162 | } 163 | } 164 | 165 | async fn handle_node_failure(&self, failed_node_id: &str) { 166 | info!("Failure of node {} is being handled", failed_node_id); 167 | let mut states = self.states.lock().unwrap(); 168 | 169 | for (partition_id, state) in states.iter_mut() { 170 | if state.replicas.contains(&failed_node_id.to_string()) { 171 | // Find alternative node and re-replicate 172 | self.re_replicate(partition_id).await; 173 | } 174 | } 175 | } 176 | 177 | async fn re_replicate(&self, partition_id: &str) { 178 | debug!("Start re-replication of partition {}", partition_id); 179 | // Implementation: Retrieve data from healthy nodes and replicate to new nodes 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/broker/topic.rs: -------------------------------------------------------------------------------- 1 | //! Module for message topics 2 | //! 3 | //! This module provides the `Topic` struct, which represents a topic in the message broker. 4 | //! 5 | //! A topic has a name, a set of partitions, and a list of subscribers. 6 | //! It provides methods to create new topics, add subscribers, and publish messages. 7 | //! 8 | //! # Example 9 | //! The following example demonstrates how to create a new topic, 10 | //! add a subscriber, and publish a message. 11 | //! ``` 12 | //! use pilgrimage::broker::topic::Topic; 13 | //! use pilgrimage::subscriber::types::Subscriber; 14 | //! 15 | //! // Create a new topic 16 | //! let mut topic = Topic::new("test_topic", 3, 2); 17 | //! // Create a subscriber 18 | //! let subscriber = Subscriber::new("sub1", Box::new(|msg: String| { 19 | //! println!("Received message: {}", msg); 20 | //! })); 21 | //! // Add the subscriber to the topic 22 | //! topic.add_subscriber(subscriber); 23 | //! ``` 24 | 25 | use crate::subscriber::types::Subscriber; 26 | use std::fmt; 27 | 28 | /// Represents a message topic in the broker 29 | #[derive(Clone)] 30 | pub struct Topic { 31 | /// The name of the topic 32 | pub name: String, 33 | /// The number of partitions 34 | pub num_partitions: usize, 35 | /// The replication factor 36 | pub replication_factor: usize, 37 | /// The list of subscribers 38 | pub subscribers: Vec, 39 | } 40 | 41 | impl fmt::Debug for Topic { 42 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 43 | f.debug_struct("Topic") 44 | .field("name", &self.name) 45 | .field("num_partitions", &self.num_partitions) 46 | .field("replication_factor", &self.replication_factor) 47 | .field("subscribers_count", &self.subscribers.len()) 48 | .finish() 49 | } 50 | } 51 | 52 | impl Topic { 53 | /// Creates a new topic with the given name, number of partitions, and replication factor 54 | /// 55 | /// # Arguments 56 | /// * `name` - The name of the topic 57 | /// * `num_partitions` - The number of partitions 58 | /// * `replication_factor` - The replication factor 59 | /// 60 | /// # Example 61 | /// ``` 62 | /// use pilgrimage::broker::topic::Topic; 63 | /// 64 | /// let topic = Topic::new("test_topic", 3, 2); 65 | /// assert_eq!(topic.name, "test_topic"); 66 | /// assert_eq!(topic.num_partitions, 3); 67 | /// assert_eq!(topic.replication_factor, 2); 68 | /// ``` 69 | pub fn new(name: &str, num_partitions: usize, replication_factor: usize) -> Self { 70 | Self { 71 | name: name.to_string(), 72 | num_partitions, 73 | replication_factor, 74 | subscribers: Vec::new(), 75 | } 76 | } 77 | 78 | /// Adds a subscriber to the topic 79 | /// 80 | /// # Arguments 81 | /// * `subscriber` - The subscriber to add 82 | /// 83 | /// # Example 84 | /// ``` 85 | /// use pilgrimage::broker::topic::Topic; 86 | /// use pilgrimage::subscriber::types::Subscriber; 87 | /// 88 | /// let mut topic = Topic::new("test_topic", 3, 2); 89 | /// let subscriber = Subscriber::new("sub1", Box::new(|msg: String| { 90 | /// println!("Received message: {}", msg); 91 | /// })); 92 | /// topic.add_subscriber(subscriber); 93 | /// assert_eq!(topic.subscribers.len(), 1); 94 | /// ``` 95 | pub fn add_subscriber(&mut self, subscriber: Subscriber) { 96 | self.subscribers.push(subscriber); 97 | } 98 | 99 | /// Removes a subscriber from the topic 100 | /// 101 | /// # Arguments 102 | /// * `id` - The ID of the subscriber to remove 103 | /// 104 | /// # Example 105 | /// ``` 106 | /// use pilgrimage::broker::topic::Topic; 107 | /// use pilgrimage::subscriber::types::Subscriber; 108 | /// 109 | /// let mut topic = Topic::new("test_topic", 3, 2); 110 | /// let subscriber = Subscriber::new("sub1", Box::new(|msg: String| { 111 | /// println!("Received message: {}", msg); 112 | /// })); 113 | /// topic.add_subscriber(subscriber); 114 | /// topic.remove_subscriber("sub1"); 115 | /// assert_eq!(topic.subscribers.len(), 0); 116 | /// ``` 117 | pub fn remove_subscriber(&mut self, id: &str) -> Option { 118 | if let Some(pos) = self.subscribers.iter().position(|s| s.id == id) { 119 | Some(self.subscribers.remove(pos)) 120 | } else { 121 | None 122 | } 123 | } 124 | 125 | /// Get the number of subscribers 126 | /// 127 | /// # Example 128 | /// ``` 129 | /// use pilgrimage::broker::topic::Topic; 130 | /// use pilgrimage::subscriber::types::Subscriber; 131 | /// 132 | /// let mut topic = Topic::new("test_topic", 3, 2); 133 | /// let subscriber = Subscriber::new("sub1", Box::new(|msg: String| { 134 | /// println!("Received message: {}", msg); 135 | /// })); 136 | /// topic.add_subscriber(subscriber); 137 | /// assert_eq!(topic.subscriber_count(), 1); 138 | /// ``` 139 | pub fn subscriber_count(&self) -> usize { 140 | self.subscribers.len() 141 | } 142 | 143 | /// Calculates the partition for a message based on a key 144 | /// 145 | /// # Arguments 146 | /// * `key` - The key to use for partitioning 147 | /// 148 | /// # Returns 149 | /// The partition number 150 | /// 151 | /// # Example 152 | /// ``` 153 | /// use pilgrimage::broker::topic::Topic; 154 | /// 155 | /// let topic = Topic::new("test_topic", 3, 2); 156 | /// let partition = topic.get_partition_for_key("test_key"); 157 | /// assert!(partition < topic.num_partitions); 158 | /// ``` 159 | pub fn get_partition_for_key(&self, key: &str) -> usize { 160 | let mut hasher = std::collections::hash_map::DefaultHasher::new(); 161 | std::hash::Hash::hash(key, &mut hasher); 162 | let hash = std::hash::Hasher::finish(&hasher); 163 | (hash % self.num_partitions as u64) as usize 164 | } 165 | 166 | /// Gets the next partition in a round-robin fashion 167 | /// 168 | /// # Arguments 169 | /// * `last_partition` - The last partition used 170 | /// 171 | /// # Returns 172 | /// The next partition to use 173 | /// 174 | /// # Example 175 | /// ``` 176 | /// use pilgrimage::broker::topic::Topic; 177 | /// 178 | /// let topic = Topic::new("test_topic", 3, 2); 179 | /// let partition = topic.get_next_partition(0); 180 | /// assert_eq!(partition, 1); 181 | /// ``` 182 | pub fn get_next_partition(&self, last_partition: usize) -> usize { 183 | (last_partition + 1) % self.num_partitions 184 | } 185 | } 186 | 187 | #[cfg(test)] 188 | mod tests { 189 | use super::*; 190 | 191 | #[test] 192 | fn test_topic_creation() { 193 | let topic = Topic::new("test_topic", 3, 2); 194 | assert_eq!(topic.name, "test_topic"); 195 | assert_eq!(topic.num_partitions, 3); 196 | assert_eq!(topic.replication_factor, 2); 197 | assert_eq!(topic.subscribers.len(), 0); 198 | } 199 | 200 | #[test] 201 | fn test_add_subscriber() { 202 | let mut topic = Topic::new("test_topic", 3, 2); 203 | let subscriber = Subscriber::new("sub1", Box::new(|_msg: String| {})); 204 | topic.add_subscriber(subscriber); 205 | assert_eq!(topic.subscribers.len(), 1); 206 | } 207 | 208 | #[test] 209 | fn test_remove_subscriber() { 210 | let mut topic = Topic::new("test_topic", 3, 2); 211 | let subscriber = Subscriber::new("sub1", Box::new(|_msg: String| {})); 212 | topic.add_subscriber(subscriber); 213 | let removed = topic.remove_subscriber("sub1"); 214 | assert!(removed.is_some()); 215 | assert_eq!(topic.subscribers.len(), 0); 216 | } 217 | 218 | #[test] 219 | fn test_get_partition_for_key() { 220 | let topic = Topic::new("test_topic", 3, 2); 221 | let partition = topic.get_partition_for_key("test_key"); 222 | assert!(partition < topic.num_partitions); 223 | } 224 | 225 | #[test] 226 | fn test_get_next_partition() { 227 | let topic = Topic::new("test_topic", 3, 2); 228 | assert_eq!(topic.get_next_partition(0), 1); 229 | assert_eq!(topic.get_next_partition(1), 2); 230 | assert_eq!(topic.get_next_partition(2), 0); 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | mod web_console; 2 | 3 | /// The main entry point for the Pilgrimage application. 4 | /// 5 | /// This function creates and initializes a new HTTP server to provide the user 6 | /// with various commands to manage brokers via REST API (`/start`, `/stop`, `/status`, etc.). 7 | /// For more details about the HTTP server, see [`web_console::run_server`]. 8 | /// 9 | /// The application also uses the [Tokio runtime](https://tokio.rs/), 10 | /// which provides asynchronous support for Rust, 11 | /// enabling the development of high performance network applications. 12 | /// 13 | /// # Returns 14 | /// 15 | /// Returns a `std::io::Result<()>` indicating the success or failure of the 16 | /// web server execution. 17 | #[tokio::main] 18 | async fn main() -> std::io::Result<()> { 19 | web_console::run_server().await 20 | } 21 | -------------------------------------------------------------------------------- /src/message/ack.rs: -------------------------------------------------------------------------------- 1 | //! Module for message acknowledgments. 2 | //! 3 | //! This module provides the [`MessageAck`] struct and associated methods to handle message 4 | //! acknowledgments. 5 | //! 6 | //! The [`MessageAck`] struct is used to acknowledge the receipt or processing status of a message. 7 | //! This includes metadata such as the message ID, timestamp, topic, and partition. It also defines 8 | //! the various statuses an acknowledgment can have. 9 | //! 10 | //! # Example 11 | //! The following example demonstrates how to create a new acknowledgment for a message: 12 | //! ```rust 13 | //! use chrono::{DateTime, Utc}; 14 | //! use uuid::Uuid; 15 | //! use pilgrimage::message::ack::{AckStatus, MessageAck}; 16 | //! 17 | //! // Create a new acknowledgment for a message 18 | //! let message_id = Uuid::new_v4(); 19 | //! let timestamp = Utc::now(); 20 | //! let status = AckStatus::Received; 21 | //! let topic = String::from("test-topic"); 22 | //! let partition = 0; 23 | //! let ack = MessageAck::new( 24 | //! message_id, timestamp, status, topic.as_str().parse().unwrap(), partition 25 | //! ); 26 | //! 27 | //! // Check the message ID and topic of the acknowledgment 28 | //! assert_eq!(ack.message_id, message_id); 29 | //! assert_eq!(ack.topic, topic); 30 | //! ``` 31 | 32 | use chrono::{DateTime, Utc}; 33 | use serde::{Deserialize, Serialize}; 34 | use uuid::Uuid; 35 | 36 | /// Represents an acknowledgment for a message. 37 | /// 38 | /// The `MessageAck` struct is used to acknowledge the receipt or processing status 39 | /// of a message, along with additional metadata such as the topic and partition. 40 | #[derive(Debug, Clone, Serialize, Deserialize)] 41 | pub struct MessageAck { 42 | /// The unique identifier of the message (UUID). 43 | pub message_id: Uuid, 44 | /// The timestamp when the acknowledgment was created (UTC). 45 | pub timestamp: DateTime, 46 | /// The status of the acknowledgment. 47 | pub status: AckStatus, 48 | /// The topic of the message. 49 | pub topic: String, 50 | /// The partition of the message. 51 | pub partition: usize, 52 | } 53 | 54 | /// Represents the acknowledgment status. 55 | /// 56 | /// The `AckStatus` enum defines the possible states of an acknowledgment, 57 | /// such as [`AckStatus::Received`], [`AckStatus::Processed`], or [`AckStatus::Failed`] 58 | /// with an error message. 59 | #[derive(Debug, Clone, Serialize, Deserialize)] 60 | pub enum AckStatus { 61 | /// The message was received successfully. 62 | Received, 63 | /// The message was processed successfully. 64 | Processed, 65 | /// The message processing failed with an error message. 66 | Failed(String), 67 | } 68 | 69 | impl MessageAck { 70 | /// Creates a new `MessageAck` instance. 71 | /// 72 | /// This method initializes a new acknowledgment with the given message ID, timestamp, 73 | /// acknowledgment status, topic, and partition. 74 | /// 75 | /// # Parameters 76 | /// * `message_id` - The unique identifier of the message. 77 | /// * `timestamp` - The timestamp when the acknowledgment was created. 78 | /// * `status` - The status of the acknowledgment. 79 | /// * `topic` - The topic of the message. 80 | /// * `partition` - The partition of the message. 81 | /// 82 | /// # Returns 83 | /// A new `MessageAck` instance with the provided data. 84 | /// 85 | /// # Example 86 | /// ```rust 87 | /// use chrono::{DateTime, Utc}; 88 | /// use uuid::Uuid; 89 | /// use pilgrimage::message::ack::{AckStatus, MessageAck}; 90 | /// 91 | /// // Create a new acknowledgment for a message 92 | /// let message_id = Uuid::new_v4(); 93 | /// let timestamp = Utc::now(); 94 | /// let status = AckStatus::Received; 95 | /// let topic = String::from("test-topic"); 96 | /// let partition = 0; 97 | /// let ack = MessageAck::new( 98 | /// message_id, timestamp, status, topic.as_str().parse().unwrap(), partition 99 | /// ); 100 | /// 101 | /// // Check the message ID and topic of the acknowledgment 102 | /// assert_eq!(ack.message_id, message_id); 103 | /// assert_eq!(ack.topic, topic); 104 | /// ``` 105 | pub fn new( 106 | message_id: Uuid, 107 | timestamp: DateTime, 108 | status: AckStatus, 109 | topic: String, 110 | partition: usize, 111 | ) -> Self { 112 | Self { 113 | message_id, 114 | timestamp, 115 | status, 116 | topic, 117 | partition, 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/message/message.rs: -------------------------------------------------------------------------------- 1 | //! Module for the [`Message`] struct. 2 | //! 3 | //! This module provides the [`Message`] struct and associated method to create and manage messages. 4 | //! 5 | //! The [`Message`] struct encapsulates the ID, content, and timestamp of a message. 6 | //! It includes implementations for creating a new message, converting to and from a string, 7 | //! and displaying the message in a human-readable format. 8 | //! 9 | //! # Example 10 | //! ``` 11 | //! use pilgrimage::message::message::Message; 12 | //! use chrono::Utc; 13 | //! 14 | //! // Create a new message with content "Hello, world!" 15 | //! let message = Message::new(String::from("Hello, world!")); 16 | //! 17 | //! // Check the content of the message 18 | //! assert_eq!(message.content, "Hello, world!"); 19 | //! ``` 20 | 21 | use crate::schema::message_schema::MessageSchema; 22 | use chrono::{DateTime, Utc}; 23 | use serde::{Deserialize, Serialize}; 24 | use uuid::Uuid; 25 | 26 | /// Represents a message with an ID, content, and timestamp. 27 | /// 28 | /// The `Message` struct is used to encapsulate a message with a unique identifier (`id`), 29 | /// text content (`content`), and a time (`timestamp`) when the message was created. 30 | #[derive(Debug, Clone, Serialize, Deserialize)] 31 | pub struct Message { 32 | /// The unique identifier of the message (UUID). 33 | pub id: Uuid, 34 | /// The content of the message. 35 | pub content: String, 36 | /// The timestamp when the message was created (UTC). 37 | pub timestamp: DateTime, 38 | /// The ID of the topic this message belongs to. 39 | #[serde(default)] 40 | pub topic_id: String, 41 | /// The ID of the partition this message belongs to. 42 | #[serde(default)] 43 | pub partition_id: usize, 44 | /// The schema of the message, if any. 45 | #[serde(skip_serializing_if = "Option::is_none")] 46 | pub schema: Option, 47 | } 48 | 49 | impl Message { 50 | /// Creates a new `Message` with the given content. 51 | /// 52 | /// This method generates a unique ID and timestamp for the message. 53 | /// 54 | /// ## Example 55 | /// 56 | /// ``` 57 | /// use pilgrimage::message::message::Message; 58 | /// 59 | /// // Create a new message with content "Hello, world!" 60 | /// let message = Message::new(String::from("Hello, world!")); 61 | /// 62 | /// // Check the content of the message 63 | /// assert_eq!(message.content, "Hello, world!"); 64 | /// ``` 65 | pub fn new(content: String) -> Self { 66 | Self { 67 | id: Uuid::new_v4(), 68 | content, 69 | timestamp: Utc::now(), 70 | topic_id: String::new(), 71 | partition_id: 0, 72 | schema: None, 73 | } 74 | } 75 | 76 | /// Creates a new `Message` with the given content and schema. 77 | /// 78 | /// This method generates a unique ID and timestamp for the message. 79 | /// 80 | /// ## Example 81 | /// 82 | /// ``` 83 | /// use pilgrimage::message::message::Message; 84 | /// use pilgrimage::schema::message_schema::MessageSchema; 85 | /// 86 | /// // Create a schema for the message 87 | /// let schema = MessageSchema::new(); 88 | /// 89 | /// // Create a new message with content "Hello, world!" and the given schema 90 | /// let message = Message::new_with_schema(String::from("Hello, world!"), schema); 91 | /// 92 | /// // Check the content and schema of the message 93 | /// assert_eq!(message.content, "Hello, world!"); 94 | /// ``` 95 | pub fn new_with_schema(content: String, schema: MessageSchema) -> Self { 96 | Self { 97 | id: Uuid::new_v4(), 98 | content, 99 | timestamp: Utc::now(), 100 | topic_id: String::new(), 101 | partition_id: 0, 102 | schema: Some(schema), 103 | } 104 | } 105 | 106 | /// Sets the topic ID for this message. 107 | /// 108 | /// ## Example 109 | /// 110 | /// ``` 111 | /// use pilgrimage::message::message::Message; 112 | /// 113 | /// // Create a new message 114 | /// let message = Message::new(String::from("Hello, world!")); 115 | /// 116 | /// // Set the topic ID for the message 117 | /// let message = message.with_topic(String::from("topic1")); 118 | /// 119 | /// // Check the topic ID of the message 120 | /// assert_eq!(message.topic_id, "topic1"); 121 | /// ``` 122 | pub fn with_topic(mut self, topic_id: String) -> Self { 123 | self.topic_id = topic_id; 124 | self 125 | } 126 | 127 | /// Sets the partition ID for this message. 128 | /// 129 | /// ## Example 130 | /// 131 | /// ``` 132 | /// use pilgrimage::message::message::Message; 133 | /// 134 | /// // Create a new message 135 | /// let message = Message::new(String::from("Hello, world!")); 136 | /// 137 | /// // Set the partition ID for the message 138 | /// let message = message.with_partition(1); 139 | /// 140 | /// // Check the partition ID of the message 141 | /// assert_eq!(message.partition_id, 1); 142 | /// ``` 143 | pub fn with_partition(mut self, partition_id: usize) -> Self { 144 | self.partition_id = partition_id; 145 | self 146 | } 147 | } 148 | 149 | impl From for Message { 150 | /// Converts a [`String`] into a [`Message`]. 151 | /// 152 | /// This `From` implementation allows creating a [`Message`] directly from a string. 153 | fn from(content: String) -> Self { 154 | Message::new(content) 155 | } 156 | } 157 | 158 | impl From for String { 159 | /// Converts a [`Message`] back into a [`String`]. 160 | /// 161 | /// This `From` implementation allows obtaining the content of a [`Message`] as a string. 162 | fn from(message: Message) -> Self { 163 | message.content 164 | } 165 | } 166 | 167 | impl std::fmt::Display for Message { 168 | /// Formats the [`Message`] for display purposes. 169 | /// 170 | /// This method returns a string representation of the message in the format: 171 | /// ```text 172 | /// Message[] (at ) 173 | /// ``` 174 | /// Where: 175 | /// - `id` is the unique identifier of the message. 176 | /// - `content` is the text content of the message. 177 | /// - `timestamp` is the time when the message was created. 178 | /// 179 | /// # Parameters 180 | /// * `f`: A mutable reference to a [`std::fmt::Formatter`] instance. 181 | /// 182 | /// # Returns 183 | /// A [`std::fmt::Result`] indicating the success or failure of the operation. 184 | /// 185 | /// # Example 186 | /// ``` 187 | /// use pilgrimage::message::message::Message; 188 | /// use chrono::Utc; 189 | /// use uuid::Uuid; 190 | /// 191 | /// // Create a new message with content "Hello, world!" 192 | /// let message = Message::new(String::from("Hello, world!")); 193 | /// 194 | /// // Format the message for display 195 | /// let formatted = format!("{}", message); 196 | /// 197 | /// // Check the formatted message 198 | /// assert_eq!( 199 | /// formatted, 200 | /// format!("Message[{}] {} (at {})", message.id, message.content, message.timestamp) 201 | /// ); 202 | /// ``` 203 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 204 | write!( 205 | f, 206 | "Message[{}] {} (at {})", 207 | self.id, self.content, self.timestamp 208 | ) 209 | } 210 | } 211 | 212 | impl From for crate::message::metadata::MessageMetadata { 213 | fn from(msg: Message) -> Self { 214 | Self { 215 | id: msg.id.to_string(), 216 | content: msg.content.clone(), 217 | timestamp: msg.timestamp.to_rfc3339(), 218 | topic_id: Some(msg.topic_id), 219 | partition_id: Some(msg.partition_id), 220 | schema: msg.schema, 221 | } 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /src/message/metadata.rs: -------------------------------------------------------------------------------- 1 | use crate::schema::message_schema::MessageSchema; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | /// Represents metadata for a message in the broker 5 | #[derive(Debug, Clone, Serialize, Deserialize)] 6 | pub struct MessageMetadata { 7 | /// The unique identifier of the message 8 | pub id: String, 9 | /// The content of the message 10 | pub content: String, 11 | /// The timestamp when the message was created 12 | pub timestamp: String, 13 | /// The topic this message belongs to 14 | pub topic_id: Option, 15 | /// The partition this message belongs to 16 | pub partition_id: Option, 17 | /// The schema of the message, if any 18 | pub schema: Option, 19 | } 20 | 21 | impl MessageMetadata { 22 | /// Creates a new instance of MessageMetadata 23 | pub fn new(id: String, content: String, timestamp: String) -> Self { 24 | Self { 25 | id, 26 | content, 27 | timestamp, 28 | topic_id: None, 29 | partition_id: None, 30 | schema: None, 31 | } 32 | } 33 | 34 | /// Sets the topic ID for this metadata 35 | pub fn with_topic(mut self, topic_id: String) -> Self { 36 | self.topic_id = Some(topic_id); 37 | self 38 | } 39 | 40 | /// Sets the partition ID for this metadata 41 | pub fn with_partition(mut self, partition_id: usize) -> Self { 42 | self.partition_id = Some(partition_id); 43 | self 44 | } 45 | 46 | /// Sets the schema for this metadata 47 | pub fn with_schema(mut self, schema: MessageSchema) -> Self { 48 | self.schema = Some(schema); 49 | self 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/message/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module for handling messages and acknowledgements. 2 | //! 3 | //! This module provides the core components for handling messages and their acknowledgements. 4 | //! 5 | //! It includes the following submodules: 6 | //! * [`message`]: Defines the [`message::Message`] struct, 7 | //! representing individual messages with an ID, content, and timestamp. 8 | //! This module provides methods to create and manage messages. 9 | //! * [`ack`]: Defines the [`ack::MessageAck`] struct, 10 | //! used to acknowledge the receipt or processing status of messages. 11 | //! This module includes metadata such as message ID, timestamp, topic, and partition, 12 | //! as well as various acknowledgement statuses. 13 | 14 | pub mod ack; 15 | pub mod message; 16 | pub mod metadata; 17 | 18 | pub use self::ack::{MessageAck, AckStatus}; 19 | pub use self::message::Message; 20 | pub use self::metadata::MessageMetadata; 21 | -------------------------------------------------------------------------------- /src/schema/compatibility/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module containing the compatibility check for schema evolution. 2 | //! 3 | //! The compatibility check ensures that new schemas can read data written by old schemas. 4 | //! 5 | //! # Example 6 | //! The following example demonstrates how to check the compatibility between two schemas. 7 | //! ``` 8 | //! use pilgrimage::schema::compatibility::Compatibility; 9 | //! use pilgrimage::schema::version::SchemaVersion; 10 | //! use pilgrimage::schema::registry::Schema; 11 | //! 12 | //! // Get an old schema (we are using a dummy schema for demonstration purposes) 13 | //! let old_schema = Schema { 14 | //! id: 1, 15 | //! version: SchemaVersion::new(1), 16 | //! definition: r#"{"type":"record","fields":[{"name":"id","type":"string"}]}"#.to_string(), 17 | //! }; 18 | //! 19 | //! // Get a new schema (we are using a dummy schema for demonstration purposes) 20 | //! let new_schema = Schema { 21 | //! id: 2, 22 | //! version: SchemaVersion::new(2), 23 | //! definition: r#"{"type":"record","fields":[{"name":"id","type":"string"},{"name":"value","type":"string","default":""}]}"#.to_string(), 24 | //! }; 25 | //! 26 | //! // Check the compatibility between the new schema and the old schema 27 | //! let compatibility = Compatibility::BACKWARD; 28 | //! assert!(compatibility.check(&new_schema, &old_schema)); 29 | //! ``` 30 | 31 | use crate::schema::registry::Schema; 32 | use serde::{Deserialize, Serialize}; 33 | use serde_json::Value; 34 | 35 | /// Enum representing the compatibility modes for schema evolution. 36 | /// 37 | /// # Variants 38 | /// 39 | /// * `BACKWARD` - Ensures that new schemas can read data written by old schemas. 40 | /// * `FORWARD` - Ensures that old schemas can read data written by new schemas. 41 | /// * `FULL` - Ensures both backward and forward compatibility. 42 | /// * `NONE` - No compatibility checks. 43 | #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] 44 | #[serde(rename_all = "UPPERCASE")] 45 | pub enum Compatibility { 46 | Backward, 47 | Forward, 48 | Full, 49 | None, 50 | } 51 | 52 | impl Compatibility { 53 | /// Backward compatibility mode (new schema can read old data) 54 | pub const BACKWARD: Self = Self::Backward; 55 | /// Forward compatibility mode (old schema can read new data) 56 | pub const FORWARD: Self = Self::Forward; 57 | /// Full compatibility mode (guaranteed bi-directional compatibility) 58 | pub const FULL: Self = Self::Full; 59 | /// No compatibility check 60 | pub const NONE: Self = Self::None; 61 | 62 | /// Checks the compatibility between a new schema and an old schema. 63 | /// 64 | /// # Arguments 65 | /// 66 | /// * `new_schema` - The new schema to be checked. 67 | /// * `old_schema` - The existing schema to check against. 68 | /// 69 | /// # Returns 70 | /// 71 | /// * `true` if the schemas are compatible according to the specified mode. 72 | /// * `false` otherwise. 73 | pub fn check(&self, new_schema: &Schema, old_schema: &Schema) -> bool { 74 | match self { 75 | Compatibility::Backward => Self::check_backward(new_schema, old_schema), 76 | Compatibility::Forward => Self::check_forward(new_schema, old_schema), 77 | Compatibility::Full => { 78 | Self::check_backward(new_schema, old_schema) 79 | && Self::check_forward(new_schema, old_schema) 80 | } 81 | Compatibility::None => true, 82 | } 83 | } 84 | 85 | /// Checks backward compatibility between a new schema and an old schema. 86 | /// 87 | /// # Arguments 88 | /// 89 | /// * `new_schema` - The new schema to be checked. 90 | /// * `old_schema` - The existing schema to check against. 91 | /// 92 | /// # Returns 93 | /// 94 | /// * `true` if the new schema is backward compatible with the old schema. 95 | /// * `false` otherwise. 96 | fn check_backward(new_schema: &Schema, old_schema: &Schema) -> bool { 97 | if let (Ok(new_json), Ok(old_json)) = ( 98 | serde_json::from_str::(&new_schema.definition), 99 | serde_json::from_str::(&old_schema.definition), 100 | ) { 101 | // The new schema must contain all the fields of the old schema. 102 | Self::contains_all_required_fields(&new_json, &old_json) 103 | } else { 104 | false 105 | } 106 | } 107 | 108 | /// Checks forward compatibility between a new schema and an old schema. 109 | /// 110 | /// # Arguments 111 | /// 112 | /// * `new_schema` - The new schema to be checked. 113 | /// * `old_schema` - The existing schema to check against. 114 | /// 115 | /// # Returns 116 | /// 117 | /// * `true` if the new schema is forward compatible with the old schema. 118 | /// * `false` otherwise. 119 | fn check_forward(new_schema: &Schema, old_schema: &Schema) -> bool { 120 | if let (Ok(new_json), Ok(old_json)) = ( 121 | serde_json::from_str::(&new_schema.definition), 122 | serde_json::from_str::(&old_schema.definition), 123 | ) { 124 | // New fields must be optional 125 | Self::new_fields_are_optional(&new_json, &old_json) 126 | } else { 127 | false 128 | } 129 | } 130 | 131 | /// Ensures that all required fields in the old schema are present in the new schema. 132 | /// 133 | /// # Arguments 134 | /// 135 | /// * `new_schema` - The new schema to be checked. 136 | /// * `old_schema` - The existing schema to check against. 137 | /// 138 | /// # Returns 139 | /// 140 | /// * `true` if all required fields are present. 141 | /// * `false` otherwise. 142 | fn contains_all_required_fields(new_schema: &Value, old_schema: &Value) -> bool { 143 | if let (Some(new_fields), Some(old_fields)) = ( 144 | new_schema.get("fields").and_then(Value::as_array), 145 | old_schema.get("fields").and_then(Value::as_array), 146 | ) { 147 | old_fields.iter().all(|old_field| { 148 | let old_name = old_field.get("name").and_then(Value::as_str); 149 | new_fields.iter().any(|new_field| { 150 | let new_name = new_field.get("name").and_then(Value::as_str); 151 | old_name == new_name 152 | }) 153 | }) 154 | } else { 155 | true // If there is no field, it is determined to be compatible. 156 | } 157 | } 158 | 159 | /// Ensures that new fields in the new schema are optional. 160 | /// 161 | /// # Arguments 162 | /// 163 | /// * `new_schema` - The new schema to be checked. 164 | /// * `old_schema` - The existing schema to check against. 165 | /// 166 | /// # Returns 167 | /// 168 | /// * `true` if new fields are optional. 169 | /// * `false` otherwise. 170 | fn new_fields_are_optional(new_schema: &Value, old_schema: &Value) -> bool { 171 | if let (Some(new_fields), Some(old_fields)) = ( 172 | new_schema.get("fields").and_then(Value::as_array), 173 | old_schema.get("fields").and_then(Value::as_array), 174 | ) { 175 | new_fields.iter().all(|new_field| { 176 | let new_name = new_field.get("name").and_then(Value::as_str); 177 | old_fields.iter().any(|old_field| { 178 | let old_name = old_field.get("name").and_then(Value::as_str); 179 | new_name == old_name || new_field.get("default").is_some() 180 | }) 181 | }) 182 | } else { 183 | true 184 | } 185 | } 186 | } 187 | 188 | #[cfg(test)] 189 | mod tests { 190 | use super::*; 191 | 192 | /// Tests backward compatibility. 193 | /// 194 | /// # Purpose 195 | /// This test ensures that the new schema is backward compatible with the old schema. 196 | /// 197 | /// # Steps 198 | /// 1. Create an old schema with a single field. 199 | /// 2. Create a new schema with the same field and an additional field. 200 | /// 3. Check if the new schema is backward compatible with the old schema. 201 | #[test] 202 | fn test_backward_compatibility() { 203 | let old_schema = Schema { 204 | id: 1, 205 | version: crate::schema::version::SchemaVersion::new(1), 206 | definition: r#"{"type":"record","fields":[{"name":"id","type":"string"}]}"#.to_string(), 207 | }; 208 | 209 | let new_schema = Schema { 210 | id: 2, 211 | version: crate::schema::version::SchemaVersion::new(2), 212 | definition: r#"{"type":"record","fields":[{"name":"id","type":"string"},{"name":"value","type":"string","default":""}]}"#.to_string(), 213 | }; 214 | 215 | let compatibility = Compatibility::BACKWARD; 216 | assert!(compatibility.check(&new_schema, &old_schema)); 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /src/schema/error.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mila411/pilgrimage/f86012c5bf6c1de69f6de7fad4b31206d0e50119/src/schema/error.rs -------------------------------------------------------------------------------- /src/schema/message_schema.rs: -------------------------------------------------------------------------------- 1 | use crate::schema::compatibility::Compatibility; 2 | use crate::schema::registry::Schema; 3 | use crate::schema::version::SchemaVersion; 4 | use serde::{Deserialize, Serialize}; 5 | use serde_json::Value; 6 | use std::collections::HashMap; 7 | 8 | #[derive(Debug, Clone, Serialize, Deserialize)] 9 | pub struct MessageSchema { 10 | pub id: u32, 11 | pub definition: String, 12 | pub version: SchemaVersion, 13 | pub compatibility: Compatibility, 14 | #[serde(skip_serializing_if = "Option::is_none")] 15 | pub metadata: Option>, 16 | #[serde(skip_serializing_if = "Option::is_none")] 17 | pub topic_id: Option, 18 | #[serde(skip_serializing_if = "Option::is_none")] 19 | pub partition_id: Option, 20 | } 21 | 22 | impl MessageSchema { 23 | pub fn new() -> Self { 24 | MessageSchema { 25 | id: 0, 26 | definition: String::new(), 27 | version: SchemaVersion::new(1), 28 | compatibility: Compatibility::Backward, 29 | metadata: None, 30 | topic_id: None, 31 | partition_id: None, 32 | } 33 | } 34 | 35 | pub fn new_with_definition(definition: String) -> Self { 36 | MessageSchema { 37 | id: 0, 38 | definition, 39 | version: SchemaVersion::new(1), 40 | compatibility: Compatibility::Backward, 41 | metadata: None, 42 | topic_id: None, 43 | partition_id: None, 44 | } 45 | } 46 | 47 | pub fn new_with_schema(schema: Schema) -> Self { 48 | MessageSchema { 49 | id: schema.id, 50 | definition: schema.definition, 51 | version: schema.version, 52 | compatibility: Compatibility::Backward, 53 | metadata: None, 54 | topic_id: None, 55 | partition_id: None, 56 | } 57 | } 58 | 59 | /// Verify that messages conform to the schema 60 | pub fn validate(&self, message: &str) -> Result<(), String> { 61 | let value = serde_json::from_str::(message) 62 | .map_err(|e| format!("Message is not valid JSON: {}", e))?; 63 | 64 | let schema = serde_json::from_str::(&self.definition) 65 | .map_err(|e| format!("Schema is not valid JSON: {}", e))?; 66 | 67 | if self.validate_against_schema(&value, &schema) { 68 | Ok(()) 69 | } else { 70 | Err("Message does not conform to schema".to_string()) 71 | } 72 | } 73 | 74 | fn validate_against_schema(&self, value: &Value, schema: &Value) -> bool { 75 | match (schema, value) { 76 | (Value::Object(schema_obj), Value::Object(value_obj)) => { 77 | if let Some(Value::Array(required)) = schema_obj.get("required") { 78 | for field in required { 79 | if let Value::String(field_name) = field { 80 | if !value_obj.contains_key(field_name) { 81 | return false; 82 | } 83 | } 84 | } 85 | } 86 | 87 | if let Some(Value::Object(properties)) = schema_obj.get("properties") { 88 | for (key, schema_type) in properties { 89 | if let Some(value) = value_obj.get(key) { 90 | if !self.validate_type(value, schema_type) { 91 | return false; 92 | } 93 | } 94 | } 95 | } 96 | true 97 | } 98 | _ => false, 99 | } 100 | } 101 | 102 | fn validate_type(&self, value: &Value, schema_type: &Value) -> bool { 103 | match schema_type { 104 | Value::Object(type_obj) => { 105 | if let Some(Value::String(type_name)) = type_obj.get("type") { 106 | match type_name.as_str() { 107 | "string" => matches!(value, Value::String(_)), 108 | "number" => matches!(value, Value::Number(_)), 109 | "boolean" => matches!(value, Value::Bool(_)), 110 | "null" => matches!(value, Value::Null), 111 | "array" => { 112 | if let Value::Array(items) = value { 113 | if let Some(item_schema) = type_obj.get("items") { 114 | items 115 | .iter() 116 | .all(|item| self.validate_type(item, item_schema)) 117 | } else { 118 | true 119 | } 120 | } else { 121 | false 122 | } 123 | } 124 | "object" => { 125 | if let Value::Object(_) = value { 126 | self.validate_against_schema(value, schema_type) 127 | } else { 128 | false 129 | } 130 | } 131 | _ => false, 132 | } 133 | } else { 134 | false 135 | } 136 | } 137 | _ => false, 138 | } 139 | } 140 | 141 | pub fn with_content(mut self, content: String) -> Self { 142 | self.definition = content; 143 | self 144 | } 145 | 146 | pub fn with_topic(mut self, topic: String) -> Self { 147 | self.topic_id = Some(topic); 148 | self 149 | } 150 | 151 | pub fn with_partition(mut self, partition: usize) -> Self { 152 | self.partition_id = Some(partition); 153 | self 154 | } 155 | 156 | pub fn update_version(&mut self, version: SchemaVersion) { 157 | self.version = version; 158 | } 159 | 160 | pub fn set_compatibility(&mut self, compatibility: Compatibility) { 161 | self.compatibility = compatibility; 162 | } 163 | 164 | pub fn add_metadata(&mut self, key: String, value: String) { 165 | let metadata = self.metadata.get_or_insert_with(HashMap::new); 166 | metadata.insert(key, value); 167 | } 168 | 169 | pub fn get_metadata(&self, key: &str) -> Option<&String> { 170 | self.metadata.as_ref().and_then(|m| m.get(key)) 171 | } 172 | } 173 | 174 | impl From for MessageSchema { 175 | fn from(schema: Schema) -> Self { 176 | MessageSchema { 177 | id: schema.id, 178 | definition: schema.definition, 179 | version: schema.version, 180 | compatibility: Compatibility::Backward, 181 | metadata: None, 182 | topic_id: None, 183 | partition_id: None, 184 | } 185 | } 186 | } 187 | 188 | impl From for Schema { 189 | fn from(schema: MessageSchema) -> Self { 190 | Schema { 191 | id: schema.id, 192 | definition: schema.definition, 193 | version: schema.version, 194 | } 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /src/schema/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module for schema related functionality. 2 | //! 3 | //! This module contains the following submodules: 4 | //! * [`compatibility`] - Module for schema compatibility checks. 5 | //! * [`registry`] - Module for schema registry functionality. 6 | //! * [`version`] - Module for schema versioning. 7 | 8 | pub mod compatibility; 9 | pub mod message_schema; 10 | pub mod registry; 11 | pub mod version; 12 | 13 | pub use self::message_schema::MessageSchema; 14 | pub use self::registry::Schema; 15 | -------------------------------------------------------------------------------- /src/schema/version/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module containing the schema version struct. 2 | //! 3 | //! The schema version is a three-part version number that follows the format `major.minor.patch`. 4 | //! 5 | //! Each part of the version number is an unsigned 32-bit integer. 6 | //! 7 | //! # Example 8 | //! The following example demonstrates how to create a new schema version. 9 | //! ``` 10 | //! use pilgrimage::schema::version::SchemaVersion; 11 | //! 12 | //! let version = SchemaVersion::new(1); 13 | //! assert_eq!(version.major, 1); 14 | //! assert_eq!(version.minor, 0); 15 | //! assert_eq!(version.patch, 0); 16 | //! ``` 17 | 18 | use core::fmt; 19 | use serde::{Deserialize, Serialize}; 20 | use std::cmp::Ordering; 21 | 22 | /// Struct representing a schema version. It follows the [Semantic Versioning][SV] format. 23 | /// 24 | /// A schema version is a three-part version number that follows the format `major.minor.patch`. 25 | /// 26 | /// Each part of the version number is an unsigned 32-bit integer. 27 | /// 28 | /// [SV]: https://en.wikipedia.org/wiki/Software_versioning#Semantic_versioning 29 | #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] 30 | pub struct SchemaVersion { 31 | /// The major version number. 32 | pub major: u32, 33 | /// The minor version number. 34 | pub minor: u32, 35 | /// The patch version number. 36 | pub patch: u32, 37 | } 38 | 39 | impl SchemaVersion { 40 | /// Creates a new `SchemaVersion` with the specified major version. 41 | /// 42 | /// # Arguments 43 | /// 44 | /// * `version` - The major version number. 45 | /// 46 | /// # Examples 47 | /// 48 | /// ``` 49 | /// use pilgrimage::schema::version::SchemaVersion; 50 | /// 51 | /// let version = SchemaVersion::new(1); 52 | /// assert_eq!(version.major, 1); 53 | /// assert_eq!(version.minor, 0); 54 | /// assert_eq!(version.patch, 0); 55 | /// ``` 56 | pub fn new(version: u32) -> Self { 57 | SchemaVersion { 58 | major: version, 59 | minor: 0, 60 | patch: 0, 61 | } 62 | } 63 | 64 | /// Creates a new `SchemaVersion` with the specified major, minor, and patch versions. 65 | /// 66 | /// # Arguments 67 | /// 68 | /// * `major` - The major version number. 69 | /// * `minor` - The minor version number. 70 | /// * `patch` - The patch version number. 71 | /// 72 | /// # Examples 73 | /// 74 | /// ``` 75 | /// use pilgrimage::schema::version::SchemaVersion; 76 | /// 77 | /// let version = SchemaVersion::new_with_version(1, 2, 3); 78 | /// assert_eq!(version.major, 1); 79 | /// assert_eq!(version.minor, 2); 80 | /// assert_eq!(version.patch, 3); 81 | /// ``` 82 | pub fn new_with_version(major: u32, minor: u32, patch: u32) -> Self { 83 | SchemaVersion { 84 | major, 85 | minor, 86 | patch, 87 | } 88 | } 89 | 90 | /// Increments the major version, resetting minor and patch versions to 0. 91 | /// 92 | /// # Examples 93 | /// 94 | /// ``` 95 | /// use pilgrimage::schema::version::SchemaVersion; 96 | /// 97 | /// let mut version = SchemaVersion::new(1); 98 | /// version.increment_major(); 99 | /// assert_eq!(version.major, 2); 100 | /// assert_eq!(version.minor, 0); 101 | /// assert_eq!(version.patch, 0); 102 | /// ``` 103 | pub fn increment_major(&mut self) { 104 | self.major += 1; 105 | self.minor = 0; 106 | self.patch = 0; 107 | } 108 | 109 | /// Increments the minor version, resetting the patch version to 0. 110 | /// 111 | /// # Examples 112 | /// 113 | /// ``` 114 | /// use pilgrimage::schema::version::SchemaVersion; 115 | /// 116 | /// let mut version = SchemaVersion::new(1); 117 | /// version.increment_minor(); 118 | /// assert_eq!(version.minor, 1); 119 | /// assert_eq!(version.patch, 0); 120 | /// ``` 121 | pub fn increment_minor(&mut self) { 122 | self.minor += 1; 123 | self.patch = 0; 124 | } 125 | 126 | /// Increments the patch version. 127 | /// 128 | /// # Examples 129 | /// 130 | /// ``` 131 | /// use pilgrimage::schema::version::SchemaVersion; 132 | /// 133 | /// let mut version = SchemaVersion::new(1); 134 | /// version.increment_patch(); 135 | /// assert_eq!(version.patch, 1); 136 | /// ``` 137 | pub fn increment_patch(&mut self) { 138 | self.patch += 1; 139 | } 140 | } 141 | 142 | impl fmt::Display for SchemaVersion { 143 | /// Formats the schema version as a string. 144 | /// 145 | /// The format is `major.minor.patch`. 146 | /// 147 | /// # Examples 148 | /// ``` 149 | /// use pilgrimage::schema::version::SchemaVersion; 150 | /// 151 | /// let version = SchemaVersion::new_with_version(1, 2, 3); 152 | /// assert_eq!(version.to_string(), "1.2.3"); 153 | /// ``` 154 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 155 | write!(f, "{}.{}.{}", self.major, self.minor, self.patch) 156 | } 157 | } 158 | 159 | impl PartialOrd for SchemaVersion { 160 | /// Compares two schema versions. 161 | /// 162 | /// # Arguments 163 | /// * `other` - The other schema version to compare. 164 | /// 165 | /// # Returns 166 | /// * `Some(Ordering)` if the comparison is possible. 167 | /// The ordering is based on the major, minor, and patch versions. 168 | /// * `None` if the comparison is not possible. 169 | fn partial_cmp(&self, other: &Self) -> Option { 170 | Some(self.cmp(other)) 171 | } 172 | } 173 | 174 | impl Ord for SchemaVersion { 175 | /// Compares two schema versions. 176 | /// 177 | /// The ordering is based on the major, minor, and patch versions. 178 | /// 179 | /// # Arguments 180 | /// * `other` - The other schema version to compare. 181 | /// 182 | /// # Returns 183 | /// * `Ordering` based on the major, minor, and patch versions. 184 | /// 185 | /// # Examples 186 | /// ``` 187 | /// use pilgrimage::schema::version::SchemaVersion; 188 | /// use std::cmp::Ordering; 189 | /// 190 | /// let v1 = SchemaVersion::new_with_version(1, 0, 0); 191 | /// let v2 = SchemaVersion::new_with_version(1, 1, 0); 192 | /// let v3 = SchemaVersion::new_with_version(2, 0, 0); 193 | /// 194 | /// assert!(v1 < v2); 195 | /// assert!(v2 < v3); 196 | /// assert!(v1 < v3); 197 | /// ``` 198 | fn cmp(&self, other: &Self) -> Ordering { 199 | match self.major.cmp(&other.major) { 200 | Ordering::Equal => match self.minor.cmp(&other.minor) { 201 | Ordering::Equal => self.patch.cmp(&other.patch), 202 | ord => ord, 203 | }, 204 | ord => ord, 205 | } 206 | } 207 | } 208 | 209 | #[cfg(test)] 210 | mod tests { 211 | use super::*; 212 | 213 | /// Tests the creation of a new schema version. 214 | /// 215 | /// # Purpose 216 | /// This test ensures that a new schema version can be created. 217 | /// 218 | /// # Steps 219 | /// 1. Create a new schema version with a major version. 220 | /// 2. Verify the major, minor, and patch versions. 221 | #[test] 222 | fn test_version_creation() { 223 | let version = SchemaVersion::new(1); 224 | assert_eq!(version.major, 1); 225 | assert_eq!(version.minor, 0); 226 | assert_eq!(version.patch, 0); 227 | } 228 | 229 | /// Tests the incrementing of a schema version. 230 | /// 231 | /// # Purpose 232 | /// This test ensures that a schema version can be incremented. 233 | /// 234 | /// # Steps 235 | /// 1. Create a new schema version. 236 | /// 2. Increment the major version. 237 | /// 3. Increment the minor version. 238 | /// 4. Increment the patch version. 239 | /// 5. Verify the major, minor, and patch versions. 240 | #[test] 241 | fn test_version_increment() { 242 | let mut version = SchemaVersion::new(1); 243 | version.increment_minor(); 244 | assert_eq!(version.to_string(), "1.1.0"); 245 | version.increment_patch(); 246 | assert_eq!(version.to_string(), "1.1.1"); 247 | version.increment_major(); 248 | assert_eq!(version.to_string(), "2.0.0"); 249 | } 250 | 251 | /// Tests the comparison of schema versions. 252 | /// 253 | /// # Purpose 254 | /// This test ensures that schema versions can be compared. 255 | /// 256 | /// # Steps 257 | /// 1. Create three schema versions. 258 | /// 2. Compare the schema versions. 259 | /// 3. Verify the ordering of the schema versions. 260 | #[test] 261 | fn test_version_comparison() { 262 | let v1 = SchemaVersion::new_with_version(1, 0, 0); 263 | let v2 = SchemaVersion::new_with_version(1, 1, 0); 264 | let v3 = SchemaVersion::new_with_version(2, 0, 0); 265 | 266 | assert!(v1 < v2); 267 | assert!(v2 < v3); 268 | assert!(v1 < v3); 269 | } 270 | } 271 | -------------------------------------------------------------------------------- /src/subscriber/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module for the subscriber entity and its implementation. 2 | //! 3 | //! The module contains the following submodules: 4 | //! * [`types`] - Defines the subscriber struct and its implementation. 5 | 6 | pub mod types; 7 | -------------------------------------------------------------------------------- /src/subscriber/types.rs: -------------------------------------------------------------------------------- 1 | //! Module that defines the subscriber struct and its implementation. 2 | //! 3 | //! The [`Subscriber`] struct is used to manage subscribers with unique identifiers (`id`) 4 | //! and callback functions (`callback`). When a message is published to the topic a subscriber 5 | //! is subscribed to, the callback function is executed. 6 | //! 7 | //! The subscriber struct is implemented to be thread-safe by wrapping the callback function 8 | //! in an [`Arc`] and implementing the [`Send`] and [`Sync`] traits. 9 | //! 10 | //! # Example 11 | //! ```rust 12 | //! use pilgrimage::subscriber::types::Subscriber; 13 | //! 14 | //! // Create a new subscriber with the id "example-id" and 15 | //! // a callback function that prints the message 16 | //! let subscriber = Subscriber::new("example-id", Box::new(|message| { 17 | //! println!("Received message: {}", message); 18 | //! })); 19 | //! 20 | //! // Check if the subscriber was created successfully 21 | //! assert_eq!(subscriber.id, "example-id"); 22 | //! ``` 23 | 24 | use std::fmt::{self, Debug}; 25 | use std::sync::Arc; 26 | 27 | /// Represents a subscriber to a topic. 28 | /// 29 | /// A subscriber is a struct that contains an id and a callback function. 30 | /// * The `id` is used to identify the subscriber; 31 | /// * The `callback` function is called when a message is published to the topic 32 | /// the subscriber is subscribed to. 33 | /// 34 | /// The `callback` function takes a string as an argument, which is the message that was published. 35 | /// 36 | /// # Under the hood 37 | /// The `callback` function is wrapped in an [`Arc`] to allow the subscriber to be cloned 38 | /// and shared between threads safely. 39 | /// 40 | /// Also, data races are prevented by implementing the [`Send`] and [`Sync`] traits. 41 | /// 42 | /// The subscriber struct implements: 43 | /// * The [`Clone`] trait to allow it to be cloned. 44 | /// * The [`Debug`] trait is implemented to allow the subscriber 45 | /// to be printed for debugging purposes. 46 | pub struct Subscriber { 47 | /// Unique identifier for the subscriber. 48 | pub id: String, 49 | /// Callback function to be executed when a message is published. 50 | pub callback: Arc>, 51 | } 52 | 53 | impl Clone for Subscriber { 54 | /// Clones the subscriber struct. 55 | /// 56 | /// This method makes a deep copy of the [`Subscriber`]'s ID and 57 | /// a clone of the callback function. 58 | fn clone(&self) -> Self { 59 | Self { 60 | id: self.id.clone(), 61 | callback: Arc::clone(&self.callback), 62 | } 63 | } 64 | } 65 | 66 | impl Debug for Subscriber { 67 | /// Formats the subscriber struct for debugging purposes. 68 | /// 69 | /// This method returns a string representation of the subscriber struct: 70 | /// ```text 71 | /// Subscriber { 72 | /// id: "example-id", 73 | /// } 74 | /// ``` 75 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 76 | f.debug_struct("Subscriber").field("id", &self.id).finish() 77 | } 78 | } 79 | 80 | impl Subscriber { 81 | /// Creates a new subscriber with the given id and callback function. 82 | /// 83 | /// The callback function is wrapped in an [`Arc`] to guarantee thread safety. 84 | /// 85 | /// # Arguments 86 | /// * `id` - A unique identifier for the subscriber. 87 | /// * `callback` - A callback function to be executed when a message is published. 88 | /// The callback function is executed when a message is published to the topic 89 | /// the subscriber is subscribed to. 90 | /// It takes a string as an argument, which is the message that was published. 91 | /// 92 | /// # Returns 93 | /// A new subscriber with the given id and callback function. 94 | /// 95 | /// # Example 96 | /// ```rust 97 | /// use pilgrimage::subscriber::types::Subscriber; 98 | /// 99 | /// // Create a new subscriber with the id "example-id" and 100 | /// // a callback function that prints the message 101 | /// let subscriber = Subscriber::new("example-id", Box::new(|message| { 102 | /// println!("Received message: {}", message); 103 | /// })); 104 | /// 105 | /// // Check if the subscriber was created successfully 106 | /// assert_eq!(subscriber.id, "example-id"); 107 | /// ``` 108 | pub fn new>(id: S, callback: Box) -> Self { 109 | Self { 110 | id: id.into(), 111 | callback: Arc::new(callback), 112 | } 113 | } 114 | 115 | /// Notify the subscriber with a message 116 | pub fn notify(&self, message: &str) -> Result<(), String> { 117 | (self.callback)(message.to_string()); 118 | Ok(()) 119 | } 120 | } 121 | 122 | #[cfg(test)] 123 | mod tests { 124 | use super::*; 125 | 126 | /// Tests the [`Subscriber::new`] method. 127 | /// 128 | /// # Purpose 129 | /// The test checks if the subscriber is created successfully. 130 | /// 131 | /// # Steps 132 | /// 1. Create a new subscriber with the id `test-id` and a callback function. 133 | /// 2. Check if the subscriber was created successfully. 134 | #[test] 135 | fn test_subscriber_new() { 136 | let message_received = Arc::new(std::sync::Mutex::new(false)); 137 | let message_clone = message_received.clone(); 138 | 139 | let callback = Box::new(move |_| { 140 | let mut received = message_clone.lock().unwrap(); 141 | *received = true; 142 | }); 143 | 144 | let subscriber = Subscriber::new("test-id", callback); 145 | assert_eq!(subscriber.id, "test-id"); 146 | } 147 | 148 | /// Tests the [`Subscriber::clone`] method. 149 | /// 150 | /// # Purpose 151 | /// The test checks if the subscriber is cloned successfully. 152 | /// 153 | /// # Steps 154 | /// 1. Create a new subscriber with the id `test-id` and a callback function. 155 | /// 2. Clone the subscriber. 156 | /// 3. Check if the subscriber was cloned successfully. 157 | #[test] 158 | fn test_subscriber_clone() { 159 | let subscriber = Subscriber::new("test-id", Box::new(|_| {})); 160 | let cloned = subscriber.clone(); 161 | 162 | assert_eq!(subscriber.id, cloned.id); 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /src/tests/broker_tests.rs: -------------------------------------------------------------------------------- 1 | use crate::broker::Broker; 2 | use crate::message::Message; 3 | use crate::message::MessageMetadata; 4 | use crate::schema::message_schema::MessageSchema; 5 | use std::time::Duration; 6 | use tempfile::tempdir; 7 | 8 | #[tokio::test] 9 | async fn test_message_send_receive() { 10 | let dir = tempdir().unwrap(); 11 | let storage_path = dir.path().to_str().unwrap(); 12 | let broker = Broker::new("test-broker", 3, 2, storage_path); 13 | 14 | // Send a test message 15 | let content = "Hello, World!"; 16 | let metadata = MessageMetadata { 17 | id: uuid::Uuid::new_v4().to_string(), 18 | content: content.to_string(), 19 | timestamp: chrono::Utc::now().to_rfc3339(), 20 | topic_id: Some("test-topic".to_string()), 21 | partition_id: Some(0), 22 | schema: None, 23 | }; 24 | 25 | broker.send_message(metadata).unwrap(); 26 | 27 | // Receive the message 28 | let received = broker.receive_message("test-topic", 0).unwrap(); 29 | assert_eq!(received.content, content); 30 | } 31 | 32 | #[tokio::test] 33 | async fn test_message_with_schema() { 34 | let dir = tempdir().unwrap(); 35 | let storage_path = dir.path().to_str().unwrap(); 36 | let broker = Broker::new("test-broker", 3, 2, storage_path); 37 | 38 | // Create a schema 39 | let mut schema = MessageSchema::new(); 40 | schema.definition = r#"{"type":"object","properties":{"name":{"type":"string"}}}"#.to_string(); 41 | 42 | // Send a test message with schema 43 | let content = r#"{"name":"test"}"#; 44 | let metadata = MessageMetadata { 45 | id: uuid::Uuid::new_v4().to_string(), 46 | content: content.to_string(), 47 | timestamp: chrono::Utc::now().to_rfc3339(), 48 | topic_id: Some("test-topic".to_string()), 49 | partition_id: Some(0), 50 | schema: Some(schema), 51 | }; 52 | 53 | broker.send_message(metadata).unwrap(); 54 | 55 | // Receive the message 56 | let received = broker.receive_message("test-topic", 0).unwrap(); 57 | assert_eq!(received.content, content); 58 | } 59 | 60 | #[tokio::test] 61 | async fn test_message_acknowledgment() { 62 | let dir = tempdir().unwrap(); 63 | let storage_path = dir.path().to_str().unwrap(); 64 | let broker = Broker::new("test-broker", 3, 2, storage_path); 65 | 66 | // Create and send a message 67 | let message = Message::new("Test message".to_string()) 68 | .with_topic("test-topic".to_string()) 69 | .with_partition(0); 70 | 71 | let timeout = Duration::from_secs(5); 72 | let ack = broker.send_message_with_ack(message.clone(), timeout).await; 73 | 74 | assert!(ack.is_ok()); 75 | let ack = ack.unwrap(); 76 | assert_eq!(ack.message_id, message.id); 77 | } 78 | -------------------------------------------------------------------------------- /src/tests/schema_tests.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mila411/pilgrimage/f86012c5bf6c1de69f6de7fad4b31206d0e50119/src/tests/schema_tests.rs -------------------------------------------------------------------------------- /tests/simple_message_test.rs: -------------------------------------------------------------------------------- 1 | use pilgrimage::broker::Broker; 2 | use pilgrimage::schema::message_schema::MessageSchema; 3 | 4 | #[test] 5 | fn test_simple_send_receive() { 6 | // Create a unique storage path for this test 7 | use std::time::{SystemTime, UNIX_EPOCH}; 8 | let timestamp = SystemTime::now() 9 | .duration_since(UNIX_EPOCH) 10 | .unwrap() 11 | .as_nanos(); 12 | let storage_path = format!("test_storage_{}", timestamp); 13 | 14 | let mut broker = Broker::new("test-broker", 4, 2, &storage_path); 15 | 16 | // Create Topic 17 | broker 18 | .create_topic("test-topic", None) 19 | .expect("Topic creation failed"); 20 | 21 | // Send a message 22 | let message = MessageSchema::new() 23 | .with_content("test message".to_string()) 24 | .with_topic("test-topic".to_string()) 25 | .with_partition(0); 26 | 27 | println!("Sending message..."); 28 | broker 29 | .send_message(message) 30 | .expect("Failed to send message"); 31 | 32 | println!("Receiving messages..."); 33 | match broker.receive_message("test-topic", 0) { 34 | Ok(Some(message)) => { 35 | println!("Message received.: {}", message.content); 36 | assert!(message.content.contains("test message")); 37 | } 38 | Ok(None) => { 39 | panic!("Message not found"); 40 | } 41 | Err(e) => { 42 | panic!("Error in receiving message: {:?}", e); 43 | } 44 | } 45 | 46 | // Attempt to receive again (should not get the same message) 47 | println!("Receiving message again..."); 48 | match broker.receive_message("test-topic", 0) { 49 | Ok(Some(_)) => { 50 | panic!("Already consumed message retrieved again"); 51 | } 52 | Ok(None) => { 53 | println!("As expected, messages already consumed were not retrieved"); 54 | } 55 | Err(e) => { 56 | panic!("Error in receiving message: {:?}", e); 57 | } 58 | } 59 | 60 | // Clean up test file 61 | let _ = std::fs::remove_file(&storage_path); 62 | } 63 | --------------------------------------------------------------------------------