├── .github
├── CODEOWNERS
├── dependabot.yaml
├── labels.json
├── test_build_release.yml
└── workflows
│ ├── all_build.yml
│ ├── build.yml
│ └── rust.yml
├── .gitignore
├── .idea
├── .gitignore
├── jsLibraryMappings.xml
├── modules.xml
├── torrust-axum.iml
└── vcs.xml
├── CODE_OF_CONDUCT.md
├── Cargo.lock
├── Cargo.toml
├── LICENSE
├── README.md
├── build.rs
├── docker
├── Dockerfile
├── docker-compose.yml
├── healthcheck
├── healthcheck.py
└── init.sh
├── icon.ico
├── sonar-project.properties
└── src
├── api
├── api.rs
├── api_blacklists.rs
├── api_keys.rs
├── api_stats.rs
├── api_torrents.rs
├── api_users.rs
├── api_whitelists.rs
├── mod.rs
├── structs.rs
└── structs
│ ├── api_service_data.rs
│ └── query_token.rs
├── common
├── common.rs
├── impls.rs
├── impls
│ └── custom_error.rs
├── mod.rs
├── structs.rs
└── structs
│ ├── custom_error.rs
│ ├── get_torrent_api.rs
│ ├── get_torrents_api.rs
│ ├── number_of_bytes.rs
│ └── number_of_bytes_def.rs
├── config
├── enums.rs
├── enums
│ └── configuration_error.rs
├── impls.rs
├── impls
│ ├── configuration.rs
│ └── configuration_error.rs
├── mod.rs
├── structs.rs
└── structs
│ ├── api_trackers_config.rs
│ ├── configuration.rs
│ ├── database_config.rs
│ ├── database_structure_config.rs
│ ├── database_structure_config_blacklist.rs
│ ├── database_structure_config_keys.rs
│ ├── database_structure_config_torrents.rs
│ ├── database_structure_config_users.rs
│ ├── database_structure_config_whitelist.rs
│ ├── http_trackers_config.rs
│ ├── sentry_config.rs
│ ├── tracker_config.rs
│ └── udp_trackers_config.rs
├── database
├── enums.rs
├── enums
│ └── database_drivers.rs
├── impls.rs
├── impls
│ ├── database_connector.rs
│ ├── database_connector_mysql.rs
│ ├── database_connector_pgsql.rs
│ └── database_connector_sqlite.rs
├── mod.rs
├── structs.rs
└── structs
│ ├── database_connector.rs
│ ├── database_connector_mysql.rs
│ ├── database_connector_pgsql.rs
│ └── database_connector_sqlite.rs
├── http
├── enums.rs
├── http.rs
├── impls.rs
├── mod.rs
├── structs.rs
├── structs
│ └── http_service_data.rs
└── types.rs
├── lib.rs
├── main.rs
├── openapi.json
├── stats
├── enums.rs
├── enums
│ └── stats_event.rs
├── impls.rs
├── impls
│ └── torrent_tracker.rs
├── mod.rs
├── structs.rs
├── structs
│ ├── stats.rs
│ └── stats_atomics.rs
└── tests.rs
├── structs.rs
├── tracker
├── enums.rs
├── enums
│ ├── announce_event.rs
│ ├── announce_event_def.rs
│ ├── torrent_peers_type.rs
│ └── updates_action.rs
├── impls.rs
├── impls
│ ├── announce_event.rs
│ ├── info_hash.rs
│ ├── info_hash_visitor.rs
│ ├── peer_id.rs
│ ├── peer_id_visitor.rs
│ ├── torrent_entry.rs
│ ├── torrent_peer.rs
│ ├── torrent_sharding.rs
│ ├── torrent_tracker.rs
│ ├── torrent_tracker_cert_gen.rs
│ ├── torrent_tracker_export.rs
│ ├── torrent_tracker_handlers.rs
│ ├── torrent_tracker_import.rs
│ ├── torrent_tracker_keys.rs
│ ├── torrent_tracker_keys_updates.rs
│ ├── torrent_tracker_peers.rs
│ ├── torrent_tracker_torrents.rs
│ ├── torrent_tracker_torrents_blacklist.rs
│ ├── torrent_tracker_torrents_blacklist_updates.rs
│ ├── torrent_tracker_torrents_updates.rs
│ ├── torrent_tracker_torrents_whitelist.rs
│ ├── torrent_tracker_torrents_whitelist_updates.rs
│ ├── torrent_tracker_users.rs
│ ├── torrent_tracker_users_updates.rs
│ ├── user_id.rs
│ └── user_id_visitor.rs
├── mod.rs
├── structs.rs
├── structs
│ ├── announce_query_request.rs
│ ├── info_hash.rs
│ ├── info_hash_visitor.rs
│ ├── peer_id.rs
│ ├── peer_id_visitor.rs
│ ├── scrape_query_request.rs
│ ├── torrent_entry.rs
│ ├── torrent_peer.rs
│ ├── torrent_peers.rs
│ ├── torrent_sharding.rs
│ ├── torrent_tracker.rs
│ ├── user_entry_item.rs
│ ├── user_id.rs
│ └── user_id_visitor.rs
├── tests.rs
├── types.rs
└── types
│ ├── keys_updates.rs
│ ├── torrents_updates.rs
│ └── users_updates.rs
└── udp
├── enums.rs
├── enums
├── request.rs
├── request_parse_error.rs
├── response.rs
└── server_error.rs
├── impls.rs
├── impls
├── ipv4_addr.rs
├── ipv6_addr.rs
├── request.rs
├── request_parse_error.rs
├── response.rs
└── udp_server.rs
├── mod.rs
├── structs.rs
├── structs
├── announce_interval.rs
├── announce_request.rs
├── announce_response.rs
├── connect_request.rs
├── connect_response.rs
├── connection_id.rs
├── error_response.rs
├── number_of_downloads.rs
├── number_of_peers.rs
├── peer_key.rs
├── port.rs
├── response_peer.rs
├── scrape_request.rs
├── scrape_response.rs
├── torrent_scrape_statistics.rs
├── transaction_id.rs
└── udp_server.rs
├── traits.rs
└── udp.rs
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | /.github/**/* @power2all/maintainers
--------------------------------------------------------------------------------
/.github/dependabot.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: github-actions
4 | directory: /
5 | schedule:
6 | interval: daily
7 | target-branch: "develop"
8 | labels:
9 | - "Continuous Integration"
10 | - "Dependencies"
11 |
12 | - package-ecosystem: cargo
13 | directory: /
14 | schedule:
15 | interval: daily
16 | target-branch: "develop"
17 | labels:
18 | - "Build | Project System"
19 | - "Dependencies"
--------------------------------------------------------------------------------
/.github/labels.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "- Admin -",
4 | "color": "FFFFFF",
5 | "description": "Enjoyable to Install and Setup our Software",
6 | "aliases": []
7 | },
8 | {
9 | "name": "- Contributor -",
10 | "color": "FFFFFF",
11 | "description": "Nice to support Torrust",
12 | "aliases": []
13 | },
14 | {
15 | "name": "- Developer -",
16 | "color": "FFFFFF",
17 | "description": "Torrust Improvement Experience",
18 | "aliases": []
19 | },
20 | {
21 | "name": "- User -",
22 | "color": "FFFFFF",
23 | "description": "Enjoyable to Use our Software",
24 | "aliases": []
25 | },
26 | {
27 | "name": "Blocked",
28 | "color": "000000",
29 | "description": "Has Unsatisfied Dependency",
30 | "aliases": []
31 | },
32 | {
33 | "name": "Bug",
34 | "color": "a80506",
35 | "description": "Incorrect Behavior",
36 | "aliases": []
37 | },
38 | {
39 | "name": "Build | Project System",
40 | "color": "99AAAA",
41 | "description": "Compiling and Packaging",
42 | "aliases": ["Rust"]
43 | },
44 | {
45 | "name": "Cannot Reproduce",
46 | "color": "D3D3D3",
47 | "description": "Inconsistent Observations",
48 | "aliases": []
49 | },
50 | {
51 | "name": "Code Cleanup / Refactoring",
52 | "color": "055a8b",
53 | "description": "Tidying and Making Neat",
54 | "aliases": []
55 | },
56 | {
57 | "name": "Continuous Integration",
58 | "color": "41c6b3",
59 | "description": "Workflows and Automation",
60 | "aliases": []
61 | },
62 | {
63 | "name": "Dependencies",
64 | "color": "d4f8f6",
65 | "description": "Related to Dependencies",
66 | "aliases": []
67 | },
68 | {
69 | "name": "Documentation",
70 | "color": "3d2133",
71 | "description": "Improves Instructions, Guides, and Notices",
72 | "aliases": []
73 | },
74 | {
75 | "name": "Duplicate",
76 | "color": "cfd3d7",
77 | "description": "Not Unique",
78 | "aliases": []
79 | },
80 | {
81 | "name": "Easy",
82 | "color": "f0cff0",
83 | "description": "Good for Newcomers",
84 | "aliases": []
85 | },
86 | {
87 | "name": "Enhancement / Feature Request",
88 | "color": "c9ecbf",
89 | "description": "Something New",
90 | "aliases": []
91 | },
92 | {
93 | "name": "External Tools",
94 | "color": "a6006b",
95 | "description": "3rd Party Systems",
96 | "aliases": []
97 | },
98 | {
99 | "name": "First Time Contribution",
100 | "color": "f1e0e6",
101 | "description": "Welcome to Torrust",
102 | "aliases": []
103 | },
104 | {
105 | "name": "Fixed",
106 | "color": "8e4c42",
107 | "description": "Not a Concern Anymore",
108 | "aliases": []
109 | },
110 | {
111 | "name": "Hard",
112 | "color": "2c2c2c",
113 | "description": "Non-Trivial",
114 | "aliases": []
115 | },
116 | {
117 | "name": "Help Wanted",
118 | "color": "00896b",
119 | "description": "More Contributions are Appreciated",
120 | "aliases": []
121 | },
122 | {
123 | "name": "High Priority",
124 | "color": "ba3fbc",
125 | "description": "Focus Required",
126 | "aliases": []
127 | },
128 | {
129 | "name": "Hold Merge",
130 | "color": "9aafbe",
131 | "description": "We are not Ready Yet",
132 | "aliases": []
133 | },
134 | {
135 | "name": "Installer | Package",
136 | "color": "ed8b24",
137 | "description": "Distribution to Users",
138 | "aliases": []
139 | },
140 | {
141 | "name": "Invalid",
142 | "color": "c1c1c1",
143 | "description": "This doesn't seem right",
144 | "aliases": []
145 | },
146 | {
147 | "name": "Legal",
148 | "color": "463e60",
149 | "description": "Licenses and other Official Documents",
150 | "aliases": []
151 | },
152 | {
153 | "name": "Low Priority",
154 | "color": "43536b",
155 | "description": "Not our Focus Now",
156 | "aliases": []
157 | },
158 | {
159 | "name": "Needs Feedback",
160 | "color": "d6946c",
161 | "description": "What dose the Community Think?",
162 | "aliases": []
163 | },
164 | {
165 | "name": "Needs Rebase",
166 | "color": "FBC002",
167 | "description": "Base Branch has Incompatibilities",
168 | "aliases": []
169 | },
170 | {
171 | "name": "Needs Research",
172 | "color": "4bc021",
173 | "description": "We Need to Know More About This",
174 | "aliases": []
175 | },
176 | {
177 | "name": "Optimization",
178 | "color": "faeba8",
179 | "description": "Make it Faster",
180 | "aliases": []
181 | },
182 | {
183 | "name": "Portability",
184 | "color": "95de82",
185 | "description": "Distribution to More Places",
186 | "aliases": []
187 | },
188 | {
189 | "name": "Postponed",
190 | "color": "dadada",
191 | "description": "For Later",
192 | "aliases": []
193 | },
194 | {
195 | "name": "Quality & Assurance",
196 | "color": "eea2e8",
197 | "description": "Relates to QA, Testing, and CI",
198 | "aliases": []
199 | },
200 | {
201 | "name": "Question / Discussion",
202 | "color": "f89d00",
203 | "description": "Community Feedback",
204 | "aliases": []
205 | },
206 | {
207 | "name": "Regression",
208 | "color": "d10588",
209 | "description": "It dose not work anymore",
210 | "aliases": []
211 | },
212 | {
213 | "name": "Reviewed",
214 | "color": "f4f4ea",
215 | "description": "This Looks Good",
216 | "aliases": []
217 | },
218 | {
219 | "name": "Security",
220 | "color": "650606",
221 | "description": "Publicly Connected to Security",
222 | "aliases": []
223 | },
224 | {
225 | "name": "Testing",
226 | "color": "c5def5",
227 | "description": "Checking Torrust",
228 | "aliases": []
229 | },
230 | {
231 | "name": "Translations",
232 | "color": "0c86af",
233 | "description": "Localization and Cultural Adaptions",
234 | "aliases": []
235 | },
236 | {
237 | "name": "Trivial",
238 | "color": "5f9685",
239 | "description": "Something Easy",
240 | "aliases": []
241 | },
242 | {
243 | "name": "Won't Fix",
244 | "color": "070003",
245 | "description": "Something Not Relevant",
246 | "aliases": []
247 | },
248 | {
249 | "name": "Workaround Possible",
250 | "color": "eae3e7",
251 | "description": "You can still do it another way",
252 | "aliases": []
253 | },
254 | {
255 | "name": "good first issue",
256 | "color": "b0fc38",
257 | "description": "Feel free to seek assistance when needed",
258 | "aliases": []
259 | }
260 | ]
--------------------------------------------------------------------------------
/.github/test_build_release.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | # Only trigger, when the test workflow succeeded
4 | on: [push, pull_request]
5 |
6 | jobs:
7 | test:
8 | runs-on: ubuntu-latest
9 | env:
10 | CARGO_TERM_COLOR: always
11 | steps:
12 | - uses: actions/checkout@v4
13 | - uses: actions-rs/toolchain@v1
14 | with:
15 | profile: minimal
16 | toolchain: stable
17 | - uses: Swatinem/rust-cache@v1
18 | - name: Run tests
19 | run: cargo test
20 |
21 | build:
22 | needs: test
23 | if: |
24 | github.event_name == 'push' &&
25 | github.event.base_ref == 'refs/heads/main' &&
26 | startsWith(github.ref, 'refs/tags/v')
27 | runs-on: ubuntu-latest
28 | env:
29 | CARGO_TERM_COLOR: always
30 | steps:
31 | - uses: actions/checkout@v4
32 | - uses: actions-rs/toolchain@v1
33 | with:
34 | profile: minimal
35 | toolchain: stable
36 | - uses: Swatinem/rust-cache@v1
37 | - name: Build Torrust-Actix Tracker
38 | run: cargo build --release
39 | - name: Upload build artifact
40 | uses: actions/upload-artifact@v2
41 | with:
42 | name: torrust-actix tracker
43 | path: ./target/release/torrust-actix
44 |
45 | release:
46 | needs: build
47 | runs-on: ubuntu-latest
48 | steps:
49 | - name: Download build artifact
50 | uses: actions/download-artifact@v2
51 | with:
52 | name: torrust-actix tracker
53 | - name: Release
54 | uses: softprops/action-gh-release@v1
55 |
--------------------------------------------------------------------------------
/.github/workflows/all_build.yml:
--------------------------------------------------------------------------------
1 | name: All Build
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 |
8 | env:
9 | CARGO_TERM_COLOR: always
10 |
11 | jobs:
12 | upgrade:
13 | name: Build on ${{ matrix.os }} (${{ matrix.toolchain }})
14 | runs-on: ${{ matrix.os }}
15 |
16 | strategy:
17 | matrix:
18 | os: [ubuntu-latest, macos-latest, windows-latest]
19 | toolchain: [nightly, stable]
20 |
21 | steps:
22 | - name: Checkout code
23 | uses: actions/checkout@v4
24 |
25 | - id: setup
26 | name: Setup Toolchain
27 | uses: dtolnay/rust-toolchain@stable
28 | with:
29 | toolchain: ${{ matrix.toolchain }}
30 |
31 | - name: Build project
32 | run: cargo build --release --verbose
33 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: Build
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 |
8 |
9 | jobs:
10 | build:
11 | name: Build
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v4
15 | with:
16 | fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
17 | - uses: sonarsource/sonarqube-scan-action@master
18 | env:
19 | SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
20 | SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }}
21 | # If you wish to fail your job when the Quality Gate is red, uncomment the
22 | # following lines. This would typically be used to fail a deployment.
23 | # - uses: sonarsource/sonarqube-quality-gate-action@master
24 | # timeout-minutes: 5
25 | # env:
26 | # SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
27 |
--------------------------------------------------------------------------------
/.github/workflows/rust.yml:
--------------------------------------------------------------------------------
1 | name: Rust
2 |
3 | on:
4 | push:
5 | branches: [ "master" ]
6 | pull_request:
7 | branches: [ "master" ]
8 |
9 | env:
10 | CARGO_TERM_COLOR: always
11 |
12 | jobs:
13 | build:
14 |
15 | runs-on: ubuntu-latest
16 |
17 | steps:
18 | - uses: actions/checkout@v4
19 | - name: Build
20 | run: cargo build --release --verbose
21 | - name: Run tests
22 | run: cargo test --verbose
23 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /target
2 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/jsLibraryMappings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/torrust-axum.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at
63 | GitHub Message.
64 | All complaints will be reviewed and investigated promptly and fairly.
65 |
66 | All community leaders are obligated to respect the privacy and security of the
67 | reporter of any incident.
68 |
69 | ## Enforcement Guidelines
70 |
71 | Community leaders will follow these Community Impact Guidelines in determining
72 | the consequences for any action they deem in violation of this Code of Conduct:
73 |
74 | ### 1. Correction
75 |
76 | **Community Impact**: Use of inappropriate language or other behavior deemed
77 | unprofessional or unwelcome in the community.
78 |
79 | **Consequence**: A private, written warning from community leaders, providing
80 | clarity around the nature of the violation and an explanation of why the
81 | behavior was inappropriate. A public apology may be requested.
82 |
83 | ### 2. Warning
84 |
85 | **Community Impact**: A violation through a single incident or series
86 | of actions.
87 |
88 | **Consequence**: A warning with consequences for continued behavior. No
89 | interaction with the people involved, including unsolicited interaction with
90 | those enforcing the Code of Conduct, for a specified period of time. This
91 | includes avoiding interactions in community spaces as well as external channels
92 | like social media. Violating these terms may lead to a temporary or
93 | permanent ban.
94 |
95 | ### 3. Temporary Ban
96 |
97 | **Community Impact**: A serious violation of community standards, including
98 | sustained inappropriate behavior.
99 |
100 | **Consequence**: A temporary ban from any sort of interaction or public
101 | communication with the community for a specified period of time. No public or
102 | private interaction with the people involved, including unsolicited interaction
103 | with those enforcing the Code of Conduct, is allowed during this period.
104 | Violating these terms may lead to a permanent ban.
105 |
106 | ### 4. Permanent Ban
107 |
108 | **Community Impact**: Demonstrating a pattern of violation of community
109 | standards, including sustained inappropriate behavior, harassment of an
110 | individual, or aggression toward or disparagement of classes of individuals.
111 |
112 | **Consequence**: A permanent ban from any sort of public interaction within
113 | the community.
114 |
115 | ## Attribution
116 |
117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118 | version 2.0, available at
119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120 |
121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
122 | enforcement ladder](https://github.com/mozilla/diversity).
123 |
124 | [homepage]: https://www.contributor-covenant.org
125 |
126 | For answers to common questions about this code of conduct, see the FAQ at
127 | https://www.contributor-covenant.org/faq. Translations are available at
128 | https://www.contributor-covenant.org/translations.
129 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "torrust-actix"
3 | version = "4.0.11"
4 | edition = "2024"
5 | license = "AGPL-3.0"
6 | authors = [
7 | "Jasper Lingers ",
8 | "Mick van Dijke "
9 | ]
10 | description = "A rich, fast and efficient Bittorrent Tracker."
11 |
12 | [profile.release]
13 | opt-level = 'z'
14 | debug = false
15 | debug-assertions = false
16 | overflow-checks = false
17 | lto = true
18 | panic = 'abort'
19 | incremental = false
20 | codegen-units = 1
21 | rpath = false
22 | strip = "debuginfo"
23 |
24 | [dependencies]
25 | actix-cors = "^0.7"
26 | actix-web = { version = "^4", features = ["rustls-0_23"] }
27 | async-std = "^1"
28 | binascii = "^0.1"
29 | bip_bencode = "^0.4"
30 | byteorder = "^1.5"
31 | chrono = "^0.4"
32 | clap = { version = "^4.5", features = ["derive"] }
33 | fern = { version = "^0.7", features = ["colored"] }
34 | futures-util = "^0.3"
35 | hex = "^0.4"
36 | log = "^0.4"
37 | parking_lot = { version = "^0.12", features = ["arc_lock", "hardware-lock-elision", "serde", "deadlock_detection"] }
38 | percent-encoding = "^2.3"
39 | rcgen = "^0.13"
40 | regex = "^1.11"
41 | rustls = { version = "^0.23", default-features = false, features = ["std", "ring"] }
42 | rustls-pemfile = "^2.2"
43 | sentry = { version = "^0.37", default-features = false, features = ["rustls", "backtrace", "contexts", "panic", "transport", "debug-images", "reqwest"] }
44 | sentry-actix = "^0.37"
45 | serde = { version = "^1.0", features = ["derive"] }
46 | serde_json = { version = "^1.0", features = ["preserve_order"] }
47 | serde_millis = "^0.1"
48 | sha1 = "^0.10"
49 | sqlx = { version = "^0.8", features = ["mysql", "postgres", "sqlite", "runtime-tokio-rustls"] }
50 | thiserror = "^2.0"
51 | tokio = { version = "^1.44", features = ["full"] }
52 | tokio-shutdown = "^0.1"
53 | toml = "^0.8"
54 | tracing = "^0.1"
55 | utoipa = { version = "^5", features = ["actix_extras"] }
56 | utoipa-swagger-ui = { version = "^9", features = ["actix-web"] }
57 |
58 | [target.'cfg(windows)'.build-dependencies]
59 | winres = "^0.1"
60 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2024 Power2All
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in all
11 | copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
20 |
--------------------------------------------------------------------------------
/build.rs:
--------------------------------------------------------------------------------
1 | use std::io;
2 | #[cfg(windows)] use winres::WindowsResource;
3 |
4 | fn main() -> io::Result<()>
5 | {
6 | #[cfg(windows)] {
7 | WindowsResource::new()
8 | .set_icon("icon.ico")
9 | .compile()?;
10 | }
11 | Ok(())
12 | }
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM rust:alpine
2 |
3 | RUN apk add git musl-dev curl pkgconfig openssl-dev openssl-libs-static
4 | RUN git clone https://github.com/Power2All/torrust-actix.git /app/torrust-actix
5 | RUN cd /app/torrust-actix && git checkout tags/v4.0.11
6 | WORKDIR /app/torrust-actix
7 | RUN cd /app/torrust-actix
8 | RUN cargo build --release && rm -Rf target/release/.fingerprint target/release/build target/release/deps target/release/examples target/release/incremental
9 | COPY init.sh /app/torrust-actix/target/release/init.sh
10 | COPY healthcheck.py /app/torrust-actix/healthcheck
11 | RUN chmod +x /app/torrust-actix/target/release/init.sh
12 | RUN chmod +x /app/torrust-actix/healthcheck.py
13 | EXPOSE 8080/tcp
14 | EXPOSE 6969/tcp
15 | EXPOSE 6969/udp
16 | CMD /app/torrust-actix/target/release/init.sh
17 | HEALTHCHECK CMD /app/torrust-actix/healthcheck || exit 1
--------------------------------------------------------------------------------
/docker/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | torrust_actix:
3 | image: power2all/torrust-actix:latest
4 | build: .
5 | environment:
6 | LOG_LEVEL: "debug"
7 | TRACKER__API_KEY: "MyVerySecureAPIKey"
8 | TRACKER__SWAGGER: "true"
9 | DATABASE__PERSISTENT: "false"
10 | DATABASE__ENGINE: "sqlite3"
11 | DATABASE__PATH: "sqlite://:memory:"
12 | container_name: "torrust_actix"
13 | hostname: "torrust_actix"
14 | ports:
15 | - "8080:8080/tcp" # API
16 | - "6969:6969/tcp" # TCP Tracker
17 | - "6969:6969/udp" # UDP Tracker
--------------------------------------------------------------------------------
/docker/healthcheck:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Power2All/torrust-actix/55622882ecfbd6612fc172f130a677b8a83a65b4/docker/healthcheck
--------------------------------------------------------------------------------
/docker/healthcheck.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | import tomllib
4 | import sys
5 | import re
6 | from socket import *
7 | from urllib.parse import urlparse
8 | from http.client import HTTPConnection, HTTPSConnection
9 | from datetime import datetime
10 | from enum import Enum
11 |
12 | class messageType(Enum):
13 | INFO = "I"
14 | ERROR = "E"
15 | WARN = "W"
16 | DEBUG = "D"
17 |
18 | def check_udp_port(host, port):
19 | try:
20 | s = socket(AF_INET, SOCK_DGRAM)
21 | code = s.bind((host, port))
22 | s.close()
23 | return False
24 | except:
25 | return True
26 |
27 | def consoleLog(messageType, message):
28 | print("%s [%s] %s" % (messageType.value, datetime.now().strftime("%Y-%m-%d %H:%M:%S"), message))
29 |
30 | ##############################################
31 | # Get the DATA from the config file to parse #
32 | ##############################################
33 |
34 | # Load the TOML configuration
35 | with open("./target/release/config.toml", "rb") as f:
36 | data = tomllib.load(f)
37 |
38 | # Get the Enabled API blocks
39 | api_binds = []
40 | for block in data['api_server']:
41 | if block['enabled'] == True:
42 | ipv4_search = re.search(r"((?:(?:25[0-5]|(?:2[0-4]|1\d|[1-9]|)\d)\.?\b){4})\:([0-9]+)", block['bind_address'])
43 | ipv6_search = re.search(r"\[(.+)\]\:([0-9]+)", block['bind_address'])
44 | if ipv4_search != None:
45 | if ipv4_search[1] == "0.0.0.0":
46 | api_binds.append({ "ip": "127.0.0.1", "port": int(ipv4_search[2]), "ssl": block['ssl'] })
47 | else:
48 | api_binds.append({ "ip": ipv4_search[1], "port": int(ipv4_search[2]), "ssl": block['ssl'] })
49 | if ipv6_search != None:
50 | if ipv6_search[1] == "::":
51 | api_binds.append({ "ip": "::1", "port": int(ipv6_search[2]), "ssl": block['ssl'] })
52 | else:
53 | api_binds.append({ "ip": f"[{ipv6_search[1]}]", "port": int(ipv6_search[2]), "ssl": block['ssl'] })
54 |
55 | # Get the Enabled TCP blocks
56 | http_binds = []
57 | for block in data['http_server']:
58 | if block['enabled'] == True:
59 | ipv4_search = re.search(r"((?:(?:25[0-5]|(?:2[0-4]|1\d|[1-9]|)\d)\.?\b){4})\:([0-9]+)", block['bind_address'])
60 | ipv6_search = re.search(r"\[(.+)\]\:([0-9]+)", block['bind_address'])
61 | if ipv4_search != None:
62 | if ipv4_search[1] == "0.0.0.0":
63 | http_binds.append({ "ip": "127.0.0.1", "port": int(ipv4_search[2]), "ssl": block['ssl'] })
64 | else:
65 | http_binds.append({ "ip": ipv4_search[1], "port": int(ipv4_search[2]), "ssl": block['ssl'] })
66 | if ipv6_search != None:
67 | if ipv6_search[1] == "::":
68 | http_binds.append({ "ip": "::1", "port": int(ipv6_search[2]), "ssl": block['ssl'] })
69 | else:
70 | http_binds.append({ "ip": f"[{ipv6_search[1]}]", "port": int(ipv6_search[2]), "ssl": block['ssl'] })
71 |
72 | # Get the Enabled UDP blocks
73 | udp_binds = []
74 | for block in data['udp_server']:
75 | if block['enabled'] == True:
76 | ipv4_search = re.search(r"((?:(?:25[0-5]|(?:2[0-4]|1\d|[1-9]|)\d)\.?\b){4})\:([0-9]+)", block['bind_address'])
77 | ipv6_search = re.search(r"\[(.+)\]\:([0-9]+)", block['bind_address'])
78 | if ipv4_search != None:
79 | udp_binds.append({ "ip": ipv4_search[1], "port": int(ipv4_search[2]) })
80 | if ipv6_search != None:
81 | udp_binds.append({ "ip": f"[{ipv6_search[1]}]", "port": int(ipv6_search[2]) })
82 |
83 | #####################################
84 | # Check if the ports are accessible #
85 | #####################################
86 |
87 | ERROR_FOUND = False
88 |
89 | # Validate API
90 | for api_bind in api_binds:
91 | consoleLog(messageType.INFO, "Checking API(S) binding %s:%s SSL=%s" % (api_bind['ip'], api_bind['port'], api_bind['ssl']))
92 | if api_bind['ssl']:
93 | try:
94 | HTTPS_URL = f'https://{api_bind['ip']}:{api_bind['port']}'
95 | HTTPS_URL = urlparse(HTTPS_URL)
96 | connection = HTTPSConnection(HTTPS_URL.netloc, timeout=2)
97 | connection.request('HEAD', HTTPS_URL.path)
98 | if connection.getresponse():
99 | consoleLog(messageType.INFO, "Connection is available")
100 | else:
101 | ERROR_FOUND = True
102 | consoleLog(messageType.ERROR, "Connection is unavailable")
103 | except:
104 | ERROR_FOUND = True
105 | consoleLog(messageType.ERROR, "Connection is unavailable")
106 | else:
107 | try:
108 | HTTP_URL = f'http://{api_bind['ip']}:{api_bind['port']}'
109 | HTTP_URL = urlparse(HTTP_URL)
110 | connection = HTTPConnection(HTTP_URL.netloc, timeout=2)
111 | connection.request('HEAD', HTTP_URL.path)
112 | if connection.getresponse():
113 | consoleLog(messageType.INFO, "Connection is available")
114 | else:
115 | ERROR_FOUND = True
116 | consoleLog(messageType.ERROR, "Connection is unavailable")
117 | except:
118 | ERROR_FOUND = True
119 | consoleLog(messageType.ERROR, "Connection is unavailable")
120 |
121 |
122 | # Validate TCP
123 | for http_bind in http_binds:
124 | consoleLog(messageType.INFO, "Checking HTTP(S) binding %s:%s SSL=%s" % (http_bind['ip'], http_bind['port'], http_bind['ssl']))
125 | if http_bind['ssl']:
126 | try:
127 | HTTPS_URL = f'https://{http_bind['ip']}:{http_bind['port']}'
128 | HTTPS_URL = urlparse(HTTPS_URL)
129 | connection = HTTPSConnection(HTTPS_URL.netloc, timeout=2)
130 | connection.request('HEAD', HTTPS_URL.path)
131 | if connection.getresponse():
132 | consoleLog(messageType.INFO, "Connection is available")
133 | else:
134 | ERROR_FOUND = True
135 | consoleLog(messageType.ERROR, "Connection is unavailable")
136 | except:
137 | ERROR_FOUND = True
138 | consoleLog(messageType.ERROR, "Connection is unavailable")
139 | else:
140 | try:
141 | HTTP_URL = f'http://{http_bind['ip']}:{http_bind['port']}'
142 | HTTP_URL = urlparse(HTTP_URL)
143 | connection = HTTPConnection(HTTP_URL.netloc, timeout=2)
144 | connection.request('HEAD', HTTP_URL.path)
145 | if connection.getresponse():
146 | consoleLog(messageType.INFO, "Connection is available")
147 | else:
148 | ERROR_FOUND = True
149 | consoleLog(messageType.ERROR, "Connection is unavailable")
150 | except:
151 | ERROR_FOUND = True
152 | consoleLog(messageType.ERROR, "Connection is unavailable")
153 |
154 | # Validate UDP
155 | for udp_bind in udp_binds:
156 | consoleLog(messageType.INFO, "Checking UDP binding %s:%s" % (udp_bind['ip'], udp_bind['port']))
157 | # try:
158 | if check_udp_port(udp_bind['ip'], int(udp_bind['port'])):
159 | consoleLog(messageType.INFO, "Connection is available")
160 | else:
161 | ERROR_FOUND = True
162 | consoleLog(messageType.ERROR, "Connection is unavailable")
163 | # except:
164 | # ERROR_FOUND = True
165 | # consoleLog(messageType.ERROR, "Connection is unavailable")
166 |
167 | if ERROR_FOUND:
168 | consoleLog(messageType.ERROR, "Exit Code 1")
169 | sys.exit(1)
170 | else:
171 | consoleLog(messageType.INFO, "Exit Code 0")
172 | sys.exit(0)
--------------------------------------------------------------------------------
/docker/init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | cd /app/torrust-actix/target/release
4 |
5 | if [ ! -f "config.toml" ]
6 | then
7 | ./torrust-actix --create-config
8 | fi
9 |
10 | ./torrust-actix
--------------------------------------------------------------------------------
/icon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Power2All/torrust-actix/55622882ecfbd6612fc172f130a677b8a83a65b4/icon.ico
--------------------------------------------------------------------------------
/sonar-project.properties:
--------------------------------------------------------------------------------
1 | sonar.projectKey=Power2All_torrust-actix_AZJrfvP7OZVfI2Bvjojo
2 |
--------------------------------------------------------------------------------
/src/api/api_blacklists.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::sync::Arc;
3 | use actix_web::{web, HttpRequest, HttpResponse};
4 | use actix_web::http::header::ContentType;
5 | use actix_web::web::Data;
6 | use serde_json::json;
7 | use crate::api::api::{api_parse_body, api_service_token, api_validation};
8 | use crate::api::structs::api_service_data::ApiServiceData;
9 | use crate::api::structs::query_token::QueryToken;
10 | use crate::common::common::hex2bin;
11 | use crate::tracker::enums::updates_action::UpdatesAction;
12 | use crate::tracker::structs::info_hash::InfoHash;
13 |
14 | #[tracing::instrument(level = "debug")]
15 | pub async fn api_service_blacklist_get(request: HttpRequest, path: web::Path, data: Data>) -> HttpResponse
16 | {
17 | // Validate client
18 | if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
19 |
20 | // Parse the Params
21 | let params = web::Query::::from_query(request.query_string()).unwrap();
22 | if let Some(response) = api_service_token(params.token.clone(), data.torrent_tracker.config.clone()).await { return response; }
23 |
24 | let info = path.into_inner();
25 | if info.len() == 40 {
26 | let info_hash = match hex2bin(info.clone()) {
27 | Ok(hash) => { InfoHash(hash) }
28 | Err(_) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": format!("invalid info_hash {}", info)})); }
29 | };
30 |
31 | match data.torrent_tracker.check_blacklist(info_hash) {
32 | true => { return HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})); }
33 | false => { return HttpResponse::NotFound().content_type(ContentType::json()).json(json!({"status": format!("unknown info_hash {}", info)})); }
34 | }
35 | }
36 |
37 | HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}))
38 | }
39 |
40 | #[tracing::instrument(skip(payload), level = "debug")]
41 | pub async fn api_service_blacklists_get(request: HttpRequest, payload: web::Payload, data: Data>) -> HttpResponse
42 | {
43 | // Validate client
44 | if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
45 |
46 | // Parse the Params
47 | let params = web::Query::::from_query(request.query_string()).unwrap();
48 | if let Some(response) = api_service_token(params.token.clone(), data.torrent_tracker.config.clone()).await { return response; }
49 |
50 | let body = match api_parse_body(payload).await {
51 | Ok(data) => { data }
52 | Err(error) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})); }
53 | };
54 |
55 | let blacklists = match serde_json::from_slice::>(&body) {
56 | Ok(data) => { data }
57 | Err(_) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})); }
58 | };
59 |
60 | let mut blacklist_output = HashMap::new();
61 | for blacklist in blacklists {
62 | if blacklist.len() == 40 {
63 | let blacklist_hash = match hex2bin(blacklist.clone()) {
64 | Ok(hash) => { InfoHash(hash) }
65 | Err(_) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": format!("invalid info_hash {}", blacklist)})) }
66 | };
67 |
68 | blacklist_output.insert(blacklist_hash, data.torrent_tracker.check_blacklist(blacklist_hash));
69 | }
70 | }
71 |
72 | HttpResponse::Ok().content_type(ContentType::json()).json(json!({
73 | "status": "ok",
74 | "blacklists": blacklist_output
75 | }))
76 | }
77 |
78 | #[tracing::instrument(level = "debug")]
79 | pub async fn api_service_blacklist_post(request: HttpRequest, path: web::Path, data: Data>) -> HttpResponse
80 | {
81 | // Validate client
82 | if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
83 |
84 | // Parse the Params
85 | let params = web::Query::::from_query(request.query_string()).unwrap();
86 | if let Some(response) = api_service_token(params.token.clone(), data.torrent_tracker.config.clone()).await { return response; }
87 |
88 | let info = path.into_inner();
89 | if info.len() == 40 {
90 | let info_hash = match hex2bin(info.clone()) {
91 | Ok(hash) => { InfoHash(hash) }
92 | Err(_) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": format!("invalid info_hash {}", info)})); }
93 | };
94 |
95 | if data.torrent_tracker.config.database.clone().persistent {
96 | let _ = data.torrent_tracker.add_blacklist_update(info_hash, UpdatesAction::Add);
97 | }
98 |
99 | return match data.torrent_tracker.add_blacklist(info_hash) {
100 | true => { HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})) }
101 | false => { HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": format!("info_hash updated {}", info)})) }
102 | }
103 | }
104 |
105 | HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}))
106 | }
107 |
108 | #[tracing::instrument(skip(payload), level = "debug")]
109 | pub async fn api_service_blacklists_post(request: HttpRequest, payload: web::Payload, data: Data>) -> HttpResponse
110 | {
111 | // Validate client
112 | if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
113 |
114 | // Parse the Params
115 | let params = web::Query::::from_query(request.query_string()).unwrap();
116 | if let Some(response) = api_service_token(params.token.clone(), data.torrent_tracker.config.clone()).await { return response; }
117 |
118 | let body = match api_parse_body(payload).await {
119 | Ok(data) => { data }
120 | Err(error) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})); }
121 | };
122 |
123 | let blacklists = match serde_json::from_slice::>(&body) {
124 | Ok(data) => { data }
125 | Err(_) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})); }
126 | };
127 |
128 | let mut blacklists_output = HashMap::new();
129 | for info in blacklists {
130 | if info.len() == 40 {
131 | let info_hash = match hex2bin(info.clone()) {
132 | Ok(hash) => { InfoHash(hash) }
133 | Err(_) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": format!("invalid info_hash {}", info)})) }
134 | };
135 |
136 | if data.torrent_tracker.config.database.clone().persistent {
137 | let _ = data.torrent_tracker.add_blacklist_update(info_hash, UpdatesAction::Add);
138 | }
139 |
140 | match data.torrent_tracker.add_blacklist(info_hash) {
141 | true => { blacklists_output.insert(info_hash, json!({"status": "ok"})); }
142 | false => { blacklists_output.insert(info_hash, json!({"status": "info_hash updated"})); }
143 | }
144 | }
145 | }
146 |
147 | HttpResponse::Ok().content_type(ContentType::json()).json(json!({
148 | "status": "ok",
149 | "blacklists": blacklists_output
150 | }))
151 | }
152 |
153 | #[tracing::instrument(level = "debug")]
154 | pub async fn api_service_blacklist_delete(request: HttpRequest, path: web::Path, data: Data>) -> HttpResponse
155 | {
156 | // Validate client
157 | if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
158 |
159 | // Parse the Params
160 | let params = web::Query::::from_query(request.query_string()).unwrap();
161 | if let Some(response) = api_service_token(params.token.clone(), data.torrent_tracker.config.clone()).await { return response; }
162 |
163 | let info = path.into_inner();
164 | if info.len() == 40 {
165 | let info_hash = match hex2bin(info.clone()) {
166 | Ok(hash) => { InfoHash(hash) }
167 | Err(_) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": format!("invalid info_hash {}", info)})); }
168 | };
169 |
170 | if data.torrent_tracker.config.database.clone().persistent {
171 | let _ = data.torrent_tracker.add_blacklist_update(info_hash, UpdatesAction::Remove);
172 | }
173 |
174 | return match data.torrent_tracker.remove_blacklist(info_hash) {
175 | true => { HttpResponse::Ok().content_type(ContentType::json()).json(json!({"status": "ok"})) }
176 | false => { HttpResponse::NotModified().content_type(ContentType::json()).json(json!({"status": format!("unknown info_hash {}", info)})) }
177 | }
178 | }
179 |
180 | HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad info_hash"}))
181 | }
182 |
183 | #[tracing::instrument(skip(payload), level = "debug")]
184 | pub async fn api_service_blacklists_delete(request: HttpRequest, payload: web::Payload, data: Data>) -> HttpResponse
185 | {
186 | // Validate client
187 | if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
188 |
189 | // Parse the Params
190 | let params = web::Query::::from_query(request.query_string()).unwrap();
191 | if let Some(response) = api_service_token(params.token.clone(), data.torrent_tracker.config.clone()).await { return response; }
192 |
193 | let body = match api_parse_body(payload).await {
194 | Ok(data) => { data }
195 | Err(error) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": error.to_string()})); }
196 | };
197 |
198 | let blacklists = match serde_json::from_slice::>(&body) {
199 | Ok(data) => { data }
200 | Err(_) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": "bad json body"})); }
201 | };
202 |
203 | let mut blacklists_output = HashMap::new();
204 | for info in blacklists {
205 | if info.len() == 40 {
206 | let info_hash = match hex2bin(info.clone()) {
207 | Ok(hash) => { InfoHash(hash) }
208 | Err(_) => { return HttpResponse::BadRequest().content_type(ContentType::json()).json(json!({"status": format!("invalid info_hash {}", info)})) }
209 | };
210 |
211 | if data.torrent_tracker.config.database.clone().persistent {
212 | let _ = data.torrent_tracker.add_blacklist_update(info_hash, UpdatesAction::Remove);
213 | }
214 |
215 | match data.torrent_tracker.remove_blacklist(info_hash) {
216 | true => { blacklists_output.insert(info_hash, json!({"status": "ok"})); }
217 | false => { blacklists_output.insert(info_hash, json!({"status": "unknown info_hash"})); }
218 | }
219 | }
220 | }
221 |
222 | HttpResponse::Ok().content_type(ContentType::json()).json(json!({
223 | "status": "ok",
224 | "blacklists": blacklists_output
225 | }))
226 | }
--------------------------------------------------------------------------------
/src/api/api_stats.rs:
--------------------------------------------------------------------------------
1 | use std::sync::Arc;
2 | use actix_web::{web, HttpRequest, HttpResponse};
3 | use actix_web::http::header::ContentType;
4 | use actix_web::web::Data;
5 | use crate::api::api::{api_service_token, api_validation};
6 | use crate::api::structs::api_service_data::ApiServiceData;
7 | use crate::api::structs::query_token::QueryToken;
8 |
9 | #[tracing::instrument(level = "debug")]
10 | pub async fn api_service_stats_get(request: HttpRequest, data: Data>) -> HttpResponse
11 | {
12 | // Validate client
13 | if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
14 |
15 | // Parse the Params
16 | let params = web::Query::::from_query(request.query_string()).unwrap();
17 | if let Some(response) = api_service_token(params.token.clone(), data.torrent_tracker.config.clone()).await { return response; }
18 |
19 | HttpResponse::Ok().content_type(ContentType::json()).json(data.torrent_tracker.get_stats())
20 | }
21 |
22 | #[tracing::instrument(level = "debug")]
23 | pub async fn api_service_prom_get(request: HttpRequest, data: Data>) -> HttpResponse
24 | {
25 | // Validate client
26 | if let Some(error_return) = api_validation(&request, &data).await { return error_return; }
27 |
28 | // Parse the Params
29 | let params = web::Query::::from_query(request.query_string()).unwrap();
30 | if let Some(response) = api_service_token(params.token.clone(), data.torrent_tracker.config.clone()).await { return response; }
31 |
32 | // Get stats
33 | let stats = data.torrent_tracker.get_stats();
34 |
35 | // Build Prometheus Output
36 | let prometheus_id = &data.torrent_tracker.config.tracker_config.prometheus_id;
37 | let mut string_output = vec![];
38 |
39 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "torrents", stats.torrents, true, Some(format!("{} gauge metrics", prometheus_id).as_str())));
40 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "torrents_updates", stats.torrents_updates, false, None));
41 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "users", stats.users, false, None));
42 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "users_updates", stats.users_updates, false, None));
43 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "seeds", stats.seeds, false, None));
44 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "peers", stats.peers, false, None));
45 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "completed", stats.completed, false, None));
46 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "whitelist", stats.whitelist, false, None));
47 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "whitelist_updates", stats.whitelist_updates, false, None));
48 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "blacklist", stats.blacklist, false, None));
49 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "blacklist_updates", stats.blacklist_updates, false, None));
50 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "keys", stats.keys, false, None));
51 | string_output.extend(api_service_prom_generate_line(prometheus_id, "gauge", "keys_updates", stats.keys_updates, false, None));
52 |
53 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp4_not_found", stats.tcp4_not_found, true, Some(format!("{} counter metrics", prometheus_id).as_str())));
54 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp4_failure", stats.tcp4_failure, false, None));
55 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp4_connections_handled", stats.tcp4_connections_handled, false, None));
56 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp4_api_handled", stats.tcp4_api_handled, false, None));
57 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp4_announces_handled", stats.tcp4_announces_handled, false, None));
58 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp4_scrapes_handled", stats.tcp4_scrapes_handled, false, None));
59 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp6_not_found", stats.tcp6_not_found, true, Some(format!("{} counter metrics", prometheus_id).as_str())));
60 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp6_failure", stats.tcp6_failure, false, None));
61 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp6_connections_handled", stats.tcp6_connections_handled, false, None));
62 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp6_api_handled", stats.tcp6_api_handled, false, None));
63 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp6_announces_handled", stats.tcp6_announces_handled, false, None));
64 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "tcp6_scrapes_handled", stats.tcp6_scrapes_handled, false, None));
65 |
66 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "udp4_bad_request", stats.udp4_bad_request, true, Some(format!("{} counter metrics", prometheus_id).as_str())));
67 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "udp4_invalid_request", stats.udp4_invalid_request, false, None));
68 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "udp4_connections_handled", stats.udp4_connections_handled, false, None));
69 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "udp4_announces_handled", stats.udp4_announces_handled, false, None));
70 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "udp4_scrapes_handled", stats.udp4_scrapes_handled, false, None));
71 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "udp6_bad_request", stats.udp6_bad_request, true, Some(format!("{} counter metrics", prometheus_id).as_str())));
72 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "udp6_invalid_request", stats.udp6_invalid_request, false, None));
73 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "udp6_connections_handled", stats.udp6_connections_handled, false, None));
74 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "udp6_announces_handled", stats.udp6_announces_handled, false, None));
75 | string_output.extend(api_service_prom_generate_line(prometheus_id, "counter", "udp6_scrapes_handled", stats.udp6_scrapes_handled, false, None));
76 |
77 | HttpResponse::Ok().content_type(ContentType::plaintext()).body(string_output.join("\n"))
78 | }
79 |
80 | pub fn api_service_prom_generate_line(id: &str, type_metric: &str, metric: &str, value: i64, without_header: bool, description: Option<&str>) -> Vec
81 | {
82 | if without_header {
83 | return vec![
84 | format!("# HELP {}_{} {}", id, type_metric, description.unwrap()).to_string(),
85 | format!("# TYPE {}_{} {}", id, type_metric, type_metric).to_string(),
86 | format!("{}_{}{{metric=\"{}\"}} {}", id, type_metric, metric, value).to_string(),
87 | ];
88 | }
89 | vec![
90 | format!("{}_{}{{metric=\"{}\"}} {}", id, type_metric, metric, value).to_string(),
91 | ]
92 | }
--------------------------------------------------------------------------------
/src/api/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod structs;
2 | #[allow(clippy::module_inception)]
3 | pub mod api;
4 | pub mod api_blacklists;
5 | pub mod api_keys;
6 | pub mod api_torrents;
7 | pub mod api_users;
8 | pub mod api_whitelists;
9 | pub mod api_stats;
--------------------------------------------------------------------------------
/src/api/structs.rs:
--------------------------------------------------------------------------------
1 | pub mod query_token;
2 | pub mod api_service_data;
--------------------------------------------------------------------------------
/src/api/structs/api_service_data.rs:
--------------------------------------------------------------------------------
1 | use std::sync::Arc;
2 | use crate::config::structs::api_trackers_config::ApiTrackersConfig;
3 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
4 |
5 | #[derive(Debug)]
6 | pub struct ApiServiceData {
7 | pub(crate) torrent_tracker: Arc,
8 | pub(crate) api_trackers_config: Arc
9 | }
--------------------------------------------------------------------------------
/src/api/structs/query_token.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Debug, Serialize, Deserialize)]
4 | pub struct QueryToken {
5 | pub(crate) token: Option,
6 | }
--------------------------------------------------------------------------------
/src/common/common.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::fmt;
3 | use std::fmt::Formatter;
4 | use std::io::Cursor;
5 | use std::time::{Duration, SystemTime};
6 | use async_std::future;
7 | use byteorder::{BigEndian, ReadBytesExt};
8 | use fern::colors::{Color, ColoredLevelConfig};
9 | use log::info;
10 | use tokio_shutdown::Shutdown;
11 | use crate::common::structs::custom_error::CustomError;
12 | use crate::config::structs::configuration::Configuration;
13 |
14 | pub fn parse_query(query: Option) -> Result>>, CustomError> {
15 | let mut queries: HashMap>> = HashMap::new();
16 | match query {
17 | None => {}
18 | Some(result) => {
19 | let split_raw_query: Vec<&str> = result.split('&').collect();
20 | for query_item in split_raw_query {
21 | if !query_item.is_empty() {
22 | if query_item.contains('=') {
23 | let key_name_raw = query_item.split('=').collect::>()[0];
24 | let key_name = percent_encoding::percent_decode_str(key_name_raw).decode_utf8_lossy().to_lowercase();
25 | if !key_name.is_empty() {
26 | let value_data_raw = query_item.split('=').collect::>()[1];
27 | let value_data = percent_encoding::percent_decode_str(value_data_raw).collect::>();
28 | match queries.get(&key_name) {
29 | None => {
30 | let query: Vec> = vec![value_data];
31 | let _ = queries.insert(key_name, query);
32 | }
33 | Some(result) => {
34 | let mut result_mut = result.clone();
35 | result_mut.push(value_data);
36 | let _ = queries.insert(key_name, result_mut);
37 | }
38 | }
39 | }
40 | } else {
41 | let key_name = percent_encoding::percent_decode_str(query_item).decode_utf8_lossy().to_lowercase();
42 | if !key_name.is_empty() {
43 | match queries.get(&key_name) {
44 | None => {
45 | let query: Vec> = vec![];
46 | let _ = queries.insert(key_name, query);
47 | }
48 | Some(result) => {
49 | let mut result_mut = result.clone();
50 | result_mut.push(vec![]);
51 | let _ = queries.insert(key_name, result.clone());
52 | }
53 | }
54 | }
55 | }
56 | }
57 | }
58 | }
59 | }
60 |
61 | Ok(queries)
62 | }
63 |
64 | pub fn udp_check_host_and_port_used(bind_address: String) {
65 | if cfg!(target_os = "windows") {
66 | match std::net::UdpSocket::bind(&bind_address) {
67 | Ok(e) => e,
68 | Err(data) => {
69 | sentry::capture_error(&data);
70 | panic!("Unable to bind to {} ! Exiting...", &bind_address);
71 | }
72 | };
73 | }
74 | }
75 |
76 | pub(crate) fn bin2hex(data: &[u8; 20], f: &mut Formatter) -> fmt::Result {
77 | let mut chars = [0u8; 40];
78 | binascii::bin2hex(data, &mut chars).expect("failed to hexlify");
79 | write!(f, "{}", std::str::from_utf8(&chars).unwrap())
80 | }
81 |
82 | pub fn hex2bin(data: String) -> Result<[u8; 20], CustomError>
83 | {
84 | match hex::decode(data) {
85 | Ok(hash_result) => { Ok(<[u8; 20]>::try_from(hash_result[0..20].as_ref()).unwrap()) }
86 | Err(data) => {
87 | sentry::capture_error(&data);
88 | Err(CustomError::new("error converting hex to bin"))
89 | }
90 | }
91 | }
92 |
93 | pub fn print_type(_: &T) {
94 | println!("{:?}", std::any::type_name::());
95 | }
96 |
97 | pub fn return_type(_: &T) -> String {
98 | format!("{:?}", std::any::type_name::())
99 | }
100 |
101 | pub fn equal_string_check(source: &String, check: &String) -> bool
102 | {
103 | if *source.to_string() == format!("{:?}", check) {
104 | return true;
105 | }
106 | println!("Source: {}", source);
107 | println!("Check: {:?}", check);
108 | false
109 | }
110 |
111 | pub fn setup_logging(config: &Configuration)
112 | {
113 | let level = match config.log_level.as_str() {
114 | "off" => log::LevelFilter::Off,
115 | "trace" => log::LevelFilter::Trace,
116 | "debug" => log::LevelFilter::Debug,
117 | "info" => log::LevelFilter::Info,
118 | "warn" => log::LevelFilter::Warn,
119 | "error" => log::LevelFilter::Error,
120 | _ => {
121 | panic!("Unknown log level encountered: '{}'", config.log_level.as_str());
122 | }
123 | };
124 |
125 | let colors = ColoredLevelConfig::new()
126 | .trace(Color::Cyan)
127 | .debug(Color::Magenta)
128 | .info(Color::Green)
129 | .warn(Color::Yellow)
130 | .error(Color::Red);
131 |
132 | if let Err(_err) = fern::Dispatch::new()
133 | .format(move |out, message, record| {
134 | out.finish(format_args!(
135 | "{} [{:width$}][{}] {}",
136 | chrono::Local::now().format("%Y-%m-%d %H:%M:%S%.9f"),
137 | colors.color(record.level()),
138 | record.target(),
139 | message,
140 | width = 5
141 | ))
142 | })
143 | .level(level)
144 | .chain(std::io::stdout())
145 | .apply()
146 | {
147 | panic!("Failed to initialize logging.")
148 | }
149 | info!("logging initialized.");
150 | }
151 |
152 | pub async fn current_time() -> u64 {
153 | SystemTime::now()
154 | .duration_since(SystemTime::UNIX_EPOCH).unwrap()
155 | .as_secs()
156 | }
157 |
158 | pub async fn convert_int_to_bytes(number: &u64) -> Vec {
159 | let mut return_data: Vec = Vec::new();
160 | for i in 1..8 {
161 | if number < &256u64.pow(i) {
162 | let start: usize = 16usize - i as usize;
163 | return_data.extend(number.to_be_bytes()[start..8].iter());
164 | return return_data;
165 | }
166 | }
167 | return_data
168 | }
169 |
170 | pub async fn convert_bytes_to_int(array: &Vec) -> u64 {
171 | let mut array_fixed: Vec = Vec::new();
172 | let size = 8 - array.len();
173 | array_fixed.resize(size, 0);
174 | array_fixed.extend(array);
175 | let mut rdr = Cursor::new(array_fixed);
176 | rdr.read_u64::().unwrap()
177 | }
178 |
179 | pub async fn shutdown_waiting(timeout: Duration, shutdown_handler: Shutdown) -> bool
180 | {
181 | match future::timeout(timeout, shutdown_handler.handle()).await {
182 | Ok(_) => {
183 | true
184 | }
185 | Err(_) => {
186 | false
187 | }
188 | }
189 | }
--------------------------------------------------------------------------------
/src/common/impls.rs:
--------------------------------------------------------------------------------
1 | pub mod custom_error;
--------------------------------------------------------------------------------
/src/common/impls/custom_error.rs:
--------------------------------------------------------------------------------
1 | use std::error::Error;
2 | use std::fmt;
3 | use std::fmt::Formatter;
4 | use crate::common::structs::custom_error::CustomError;
5 |
6 | impl CustomError {
7 | pub fn new(msg: &str) -> CustomError {
8 | CustomError { message: msg.to_string() }
9 | }
10 | }
11 |
12 | impl fmt::Display for CustomError {
13 | fn fmt(&self, f: &mut Formatter) -> fmt::Result {
14 | write!(f, "{}", self.message)
15 | }
16 | }
17 |
18 | impl Error for CustomError {
19 | fn description(&self) -> &str {
20 | &self.message
21 | }
22 | }
--------------------------------------------------------------------------------
/src/common/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod structs;
2 | #[allow(clippy::module_inception)]
3 | pub mod common;
4 | pub mod impls;
--------------------------------------------------------------------------------
/src/common/structs.rs:
--------------------------------------------------------------------------------
1 | pub mod custom_error;
2 | pub mod number_of_bytes;
3 | pub mod number_of_bytes_def;
4 | pub mod get_torrents_api;
5 | pub mod get_torrent_api;
--------------------------------------------------------------------------------
/src/common/structs/custom_error.rs:
--------------------------------------------------------------------------------
1 | #[derive(Debug)]
2 | pub struct CustomError {
3 | pub(crate) message: String,
4 | }
--------------------------------------------------------------------------------
/src/common/structs/get_torrent_api.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 | use serde_json::Value;
3 |
4 | #[derive(Serialize, Deserialize, Clone)]
5 | pub struct GetTorrentApi {
6 | pub info_hash: String,
7 | pub completed: i64,
8 | pub seeders: i64,
9 | pub leechers: i64,
10 | pub peers: Vec,
11 | }
--------------------------------------------------------------------------------
/src/common/structs/get_torrents_api.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Clone)]
4 | pub struct GetTorrentsApi {
5 | pub info_hash: String,
6 | pub completed: i64,
7 | pub seeders: i64,
8 | pub leechers: i64,
9 | }
--------------------------------------------------------------------------------
/src/common/structs/number_of_bytes.rs:
--------------------------------------------------------------------------------
1 | #[derive(PartialEq, PartialOrd, Eq, Hash, Clone, Copy, Debug)]
2 | pub struct NumberOfBytes(pub i64);
--------------------------------------------------------------------------------
/src/common/structs/number_of_bytes_def.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 | use crate::common::structs::number_of_bytes::NumberOfBytes;
3 |
4 | #[derive(Serialize, Deserialize)]
5 | #[serde(remote = "NumberOfBytes")]
6 | pub struct NumberOfBytesDef(pub i64);
--------------------------------------------------------------------------------
/src/config/enums.rs:
--------------------------------------------------------------------------------
1 | pub mod configuration_error;
--------------------------------------------------------------------------------
/src/config/enums/configuration_error.rs:
--------------------------------------------------------------------------------
1 | #[derive(Debug)]
2 | pub enum ConfigurationError {
3 | IOError(std::io::Error),
4 | ParseError(toml::de::Error),
5 | }
--------------------------------------------------------------------------------
/src/config/impls.rs:
--------------------------------------------------------------------------------
1 | pub mod configuration;
2 | pub mod configuration_error;
--------------------------------------------------------------------------------
/src/config/impls/configuration_error.rs:
--------------------------------------------------------------------------------
1 | use crate::config::enums::configuration_error::ConfigurationError;
2 |
3 | impl std::fmt::Display for ConfigurationError {
4 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
5 | match self {
6 | ConfigurationError::IOError(e) => e.fmt(f),
7 | ConfigurationError::ParseError(e) => e.fmt(f)
8 | }
9 | }
10 | }
11 |
12 | impl std::error::Error for ConfigurationError {}
--------------------------------------------------------------------------------
/src/config/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod enums;
2 | pub mod structs;
3 | pub mod impls;
--------------------------------------------------------------------------------
/src/config/structs.rs:
--------------------------------------------------------------------------------
1 | pub mod api_trackers_config;
2 | pub mod configuration;
3 | pub mod database_structure_config;
4 | pub mod http_trackers_config;
5 | pub mod udp_trackers_config;
6 | pub mod database_structure_config_blacklist;
7 | pub mod database_structure_config_keys;
8 | pub mod database_structure_config_torrents;
9 | pub mod database_structure_config_users;
10 | pub mod database_structure_config_whitelist;
11 | pub mod database_config;
12 | pub mod tracker_config;
13 | pub mod sentry_config;
--------------------------------------------------------------------------------
/src/config/structs/api_trackers_config.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Debug, Clone)]
4 | pub struct ApiTrackersConfig {
5 | pub enabled: bool,
6 | pub bind_address: String,
7 | pub real_ip: String,
8 | pub keep_alive: u64,
9 | pub request_timeout: u64,
10 | pub disconnect_timeout: u64,
11 | pub max_connections: u64,
12 | pub threads: u64,
13 | pub ssl: bool,
14 | pub ssl_key: String,
15 | pub ssl_cert: String,
16 | pub tls_connection_rate: u64
17 | }
--------------------------------------------------------------------------------
/src/config/structs/configuration.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 | use crate::config::structs::api_trackers_config::ApiTrackersConfig;
3 | use crate::config::structs::database_config::DatabaseConfig;
4 | use crate::config::structs::database_structure_config::DatabaseStructureConfig;
5 | use crate::config::structs::http_trackers_config::HttpTrackersConfig;
6 | use crate::config::structs::sentry_config::SentryConfig;
7 | use crate::config::structs::tracker_config::TrackerConfig;
8 | use crate::config::structs::udp_trackers_config::UdpTrackersConfig;
9 |
10 | #[derive(Serialize, Deserialize, Debug, Clone)]
11 | pub struct Configuration {
12 | pub log_level: String,
13 | pub log_console_interval: u64,
14 | pub tracker_config: TrackerConfig,
15 | pub sentry_config: SentryConfig,
16 | pub database: DatabaseConfig,
17 | pub database_structure: DatabaseStructureConfig,
18 | pub http_server: Vec,
19 | pub udp_server: Vec,
20 | pub api_server: Vec
21 | }
--------------------------------------------------------------------------------
/src/config/structs/database_config.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 | use crate::database::enums::database_drivers::DatabaseDrivers;
3 |
4 | #[derive(Serialize, Deserialize, Debug, Clone)]
5 | pub struct DatabaseConfig {
6 | pub engine: DatabaseDrivers,
7 | pub path: String,
8 | pub persistent: bool,
9 | pub persistent_interval: u64,
10 | pub insert_vacant: bool,
11 | pub remove_action: bool,
12 | pub update_completed: bool,
13 | pub update_peers: bool
14 | }
--------------------------------------------------------------------------------
/src/config/structs/database_structure_config.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 | use crate::config::structs::database_structure_config_blacklist::DatabaseStructureConfigBlacklist;
3 | use crate::config::structs::database_structure_config_keys::DatabaseStructureConfigKeys;
4 | use crate::config::structs::database_structure_config_torrents::DatabaseStructureConfigTorrents;
5 | use crate::config::structs::database_structure_config_users::DatabaseStructureConfigUsers;
6 | use crate::config::structs::database_structure_config_whitelist::DatabaseStructureConfigWhitelist;
7 |
8 | #[derive(Serialize, Deserialize, Debug, Clone)]
9 | pub struct DatabaseStructureConfig {
10 | pub torrents: DatabaseStructureConfigTorrents,
11 | pub whitelist: DatabaseStructureConfigWhitelist,
12 | pub blacklist: DatabaseStructureConfigBlacklist,
13 | pub keys: DatabaseStructureConfigKeys,
14 | pub users: DatabaseStructureConfigUsers
15 | }
--------------------------------------------------------------------------------
/src/config/structs/database_structure_config_blacklist.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Debug, Clone)]
4 | pub struct DatabaseStructureConfigBlacklist {
5 | pub table_name: String,
6 | pub column_infohash: String,
7 | pub bin_type_infohash: bool
8 | }
--------------------------------------------------------------------------------
/src/config/structs/database_structure_config_keys.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Debug, Clone)]
4 | pub struct DatabaseStructureConfigKeys {
5 | pub table_name: String,
6 | pub column_hash: String,
7 | pub bin_type_hash: bool,
8 | pub column_timeout: String
9 | }
--------------------------------------------------------------------------------
/src/config/structs/database_structure_config_torrents.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Debug, Clone)]
4 | pub struct DatabaseStructureConfigTorrents {
5 | pub table_name: String,
6 | pub column_infohash: String,
7 | pub bin_type_infohash: bool,
8 | pub column_seeds: String,
9 | pub column_peers: String,
10 | pub column_completed: String
11 | }
--------------------------------------------------------------------------------
/src/config/structs/database_structure_config_users.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Debug, Clone)]
4 | pub struct DatabaseStructureConfigUsers {
5 | pub table_name: String,
6 | pub id_uuid: bool,
7 | pub column_uuid: String,
8 | pub column_id: String,
9 | pub column_key: String,
10 | pub bin_type_key: bool,
11 | pub column_uploaded: String,
12 | pub column_downloaded: String,
13 | pub column_completed: String,
14 | pub column_updated: String,
15 | pub column_active: String
16 | }
--------------------------------------------------------------------------------
/src/config/structs/database_structure_config_whitelist.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Debug, Clone)]
4 | pub struct DatabaseStructureConfigWhitelist {
5 | pub table_name: String,
6 | pub column_infohash: String,
7 | pub bin_type_infohash: bool
8 | }
--------------------------------------------------------------------------------
/src/config/structs/http_trackers_config.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Debug, Clone)]
4 | pub struct HttpTrackersConfig {
5 | pub enabled: bool,
6 | pub bind_address: String,
7 | pub real_ip: String,
8 | pub keep_alive: u64,
9 | pub request_timeout: u64,
10 | pub disconnect_timeout: u64,
11 | pub max_connections: u64,
12 | pub threads: u64,
13 | pub ssl: bool,
14 | pub ssl_key: String,
15 | pub ssl_cert: String,
16 | pub tls_connection_rate: u64
17 | }
--------------------------------------------------------------------------------
/src/config/structs/sentry_config.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Debug, Clone)]
4 | pub struct SentryConfig {
5 | pub enabled: bool,
6 | pub dsn: String,
7 | pub debug: bool,
8 | pub sample_rate: f32,
9 | pub max_breadcrumbs: usize,
10 | pub attach_stacktrace: bool,
11 | pub send_default_pii: bool,
12 | pub traces_sample_rate: f32
13 | }
--------------------------------------------------------------------------------
/src/config/structs/tracker_config.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Debug, Clone)]
4 | pub struct TrackerConfig {
5 | pub api_key: String,
6 | pub whitelist_enabled: bool,
7 | pub blacklist_enabled: bool,
8 | pub keys_enabled: bool,
9 | pub keys_cleanup_interval: u64,
10 | pub users_enabled: bool,
11 | pub request_interval: u64,
12 | pub request_interval_minimum: u64,
13 | pub peers_timeout: u64,
14 | pub peers_cleanup_interval: u64,
15 | pub peers_cleanup_threads: u64,
16 | pub total_downloads: u64,
17 | pub swagger: bool,
18 | pub prometheus_id: String,
19 | }
--------------------------------------------------------------------------------
/src/config/structs/udp_trackers_config.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Debug, Clone)]
4 | pub struct UdpTrackersConfig {
5 | pub enabled: bool,
6 | pub bind_address: String,
7 | pub threads: u64
8 | }
--------------------------------------------------------------------------------
/src/database/enums.rs:
--------------------------------------------------------------------------------
1 | pub mod database_drivers;
--------------------------------------------------------------------------------
/src/database/enums/database_drivers.rs:
--------------------------------------------------------------------------------
1 | use clap::ValueEnum;
2 | use serde::{Deserialize, Serialize};
3 |
4 | #[allow(non_camel_case_types)]
5 | #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
6 | pub enum DatabaseDrivers {
7 | sqlite3,
8 | mysql,
9 | pgsql,
10 | }
--------------------------------------------------------------------------------
/src/database/impls.rs:
--------------------------------------------------------------------------------
1 | pub mod database_connector;
2 | pub mod database_connector_sqlite;
3 | pub mod database_connector_mysql;
4 | pub mod database_connector_pgsql;
--------------------------------------------------------------------------------
/src/database/impls/database_connector.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeMap;
2 | use std::sync::Arc;
3 | use sqlx::Error;
4 | use crate::config::structs::configuration::Configuration;
5 | use crate::database::enums::database_drivers::DatabaseDrivers;
6 | use crate::database::structs::database_connector::DatabaseConnector;
7 | use crate::database::structs::database_connector_mysql::DatabaseConnectorMySQL;
8 | use crate::database::structs::database_connector_pgsql::DatabaseConnectorPgSQL;
9 | use crate::database::structs::database_connector_sqlite::DatabaseConnectorSQLite;
10 | use crate::tracker::enums::updates_action::UpdatesAction;
11 | use crate::tracker::structs::info_hash::InfoHash;
12 | use crate::tracker::structs::torrent_entry::TorrentEntry;
13 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
14 | use crate::tracker::structs::user_entry_item::UserEntryItem;
15 | use crate::tracker::structs::user_id::UserId;
16 |
17 | impl DatabaseConnector {
18 | #[tracing::instrument(level = "debug")]
19 | pub async fn new(config: Arc, create_database: bool) -> DatabaseConnector
20 | {
21 | match &config.database.engine {
22 | DatabaseDrivers::sqlite3 => { DatabaseConnectorSQLite::database_connector(config, create_database).await }
23 | DatabaseDrivers::mysql => { DatabaseConnectorMySQL::database_connector(config, create_database).await }
24 | DatabaseDrivers::pgsql => { DatabaseConnectorPgSQL::database_connector(config, create_database).await }
25 | }
26 | }
27 |
28 | #[tracing::instrument(level = "debug")]
29 | pub async fn load_torrents(&self, tracker: Arc) -> Result<(u64, u64), Error>
30 | {
31 | if self.engine.is_some() {
32 | return match self.engine.clone().unwrap() {
33 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().load_torrents(tracker.clone()).await }
34 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().load_torrents(tracker.clone()).await }
35 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().load_torrents(tracker.clone()).await }
36 | };
37 | }
38 |
39 | Err(Error::RowNotFound)
40 | }
41 |
42 | #[tracing::instrument(level = "debug")]
43 | pub async fn load_whitelist(&self, tracker: Arc) -> Result
44 | {
45 | if self.engine.is_some() {
46 | return match self.engine.clone().unwrap() {
47 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().load_whitelist(tracker.clone()).await }
48 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().load_whitelist(tracker.clone()).await }
49 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().load_whitelist(tracker.clone()).await }
50 | };
51 | }
52 |
53 | Err(Error::RowNotFound)
54 | }
55 |
56 | #[tracing::instrument(level = "debug")]
57 | pub async fn load_blacklist(&self, tracker: Arc) -> Result
58 | {
59 | if self.engine.is_some() {
60 | return match self.engine.clone().unwrap() {
61 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().load_blacklist(tracker.clone()).await }
62 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().load_blacklist(tracker.clone()).await }
63 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().load_blacklist(tracker.clone()).await }
64 | };
65 | }
66 |
67 | Err(Error::RowNotFound)
68 | }
69 |
70 | #[tracing::instrument(level = "debug")]
71 | pub async fn load_keys(&self, tracker: Arc) -> Result
72 | {
73 | if self.engine.is_some() {
74 | return match self.engine.clone().unwrap() {
75 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().load_keys(tracker.clone()).await }
76 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().load_keys(tracker.clone()).await }
77 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().load_keys(tracker.clone()).await }
78 | };
79 | }
80 |
81 | Err(Error::RowNotFound)
82 | }
83 |
84 | #[tracing::instrument(level = "debug")]
85 | pub async fn load_users(&self, tracker: Arc) -> Result
86 | {
87 | if self.engine.is_some() {
88 | return match self.engine.clone().unwrap() {
89 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().load_users(tracker.clone()).await }
90 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().load_users(tracker.clone()).await }
91 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().load_users(tracker.clone()).await }
92 | };
93 | }
94 |
95 | Err(Error::RowNotFound)
96 | }
97 |
98 | #[tracing::instrument(level = "debug")]
99 | pub async fn save_whitelist(&self, tracker: Arc, whitelists: Vec<(InfoHash, UpdatesAction)>) -> Result
100 | {
101 | if self.engine.is_some() {
102 | return match self.engine.clone().unwrap() {
103 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().save_whitelist(tracker.clone(), whitelists).await }
104 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().save_whitelist(tracker.clone(), whitelists).await }
105 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().save_whitelist(tracker.clone(), whitelists).await }
106 | };
107 | }
108 |
109 | Err(Error::RowNotFound)
110 | }
111 |
112 | #[tracing::instrument(level = "debug")]
113 | pub async fn save_blacklist(&self, tracker: Arc, blacklists: Vec<(InfoHash, UpdatesAction)>) -> Result
114 | {
115 | if self.engine.is_some() {
116 | return match self.engine.clone().unwrap() {
117 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().save_blacklist(tracker.clone(), blacklists).await }
118 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().save_blacklist(tracker.clone(), blacklists).await }
119 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().save_blacklist(tracker.clone(), blacklists).await }
120 | };
121 | }
122 |
123 | Err(Error::RowNotFound)
124 | }
125 |
126 | #[tracing::instrument(level = "debug")]
127 | pub async fn save_keys(&self, tracker: Arc, keys: BTreeMap) -> Result
128 | {
129 | if self.engine.is_some() {
130 | return match self.engine.clone().unwrap() {
131 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().save_keys(tracker.clone(), keys).await }
132 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().save_keys(tracker.clone(), keys).await }
133 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().save_keys(tracker.clone(), keys).await }
134 | };
135 | }
136 |
137 | Err(Error::RowNotFound)
138 | }
139 |
140 | #[tracing::instrument(level = "debug")]
141 | pub async fn save_torrents(&self, tracker: Arc, torrents: BTreeMap) -> Result<(), Error>
142 | {
143 | if self.engine.is_some() {
144 | return match self.engine.clone().unwrap() {
145 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().save_torrents(tracker.clone(), torrents).await }
146 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().save_torrents(tracker.clone(), torrents).await }
147 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().save_torrents(tracker.clone(), torrents).await }
148 | };
149 | }
150 |
151 | Err(Error::RowNotFound)
152 | }
153 |
154 | #[tracing::instrument(level = "debug")]
155 | pub async fn save_users(&self, tracker: Arc, users: BTreeMap) -> Result<(), Error>
156 | {
157 | if self.engine.is_some() {
158 | return match self.engine.clone().unwrap() {
159 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().save_users(tracker.clone(), users).await }
160 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().save_users(tracker.clone(), users).await }
161 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().save_users(tracker.clone(), users).await }
162 | };
163 | }
164 |
165 | Err(Error::RowNotFound)
166 | }
167 |
168 | #[tracing::instrument(level = "debug")]
169 | pub async fn reset_seeds_peers(&self, tracker: Arc) -> Result<(), Error>
170 | {
171 | if self.engine.is_some() {
172 | return match self.engine.clone().unwrap() {
173 | DatabaseDrivers::sqlite3 => { self.sqlite.clone().unwrap().reset_seeds_peers(tracker.clone()).await }
174 | DatabaseDrivers::mysql => { self.mysql.clone().unwrap().reset_seeds_peers(tracker.clone()).await }
175 | DatabaseDrivers::pgsql => { self.pgsql.clone().unwrap().reset_seeds_peers(tracker.clone()).await }
176 | };
177 | }
178 |
179 | Err(Error::RowNotFound)
180 | }
181 | }
--------------------------------------------------------------------------------
/src/database/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod enums;
2 | pub mod impls;
3 | pub mod structs;
--------------------------------------------------------------------------------
/src/database/structs.rs:
--------------------------------------------------------------------------------
1 | pub mod database_connector;
2 | pub mod database_connector_sqlite;
3 | pub mod database_connector_mysql;
4 | pub mod database_connector_pgsql;
--------------------------------------------------------------------------------
/src/database/structs/database_connector.rs:
--------------------------------------------------------------------------------
1 | use crate::database::enums::database_drivers::DatabaseDrivers;
2 | use crate::database::structs::database_connector_mysql::DatabaseConnectorMySQL;
3 | use crate::database::structs::database_connector_pgsql::DatabaseConnectorPgSQL;
4 | use crate::database::structs::database_connector_sqlite::DatabaseConnectorSQLite;
5 |
6 | #[derive(Debug, Clone)]
7 | pub struct DatabaseConnector {
8 | pub(crate) mysql: Option,
9 | pub(crate) sqlite: Option,
10 | pub(crate) pgsql: Option,
11 | pub(crate) engine: Option,
12 | }
--------------------------------------------------------------------------------
/src/database/structs/database_connector_mysql.rs:
--------------------------------------------------------------------------------
1 | use sqlx::{MySql, Pool};
2 |
3 | #[derive(Debug, Clone)]
4 | pub struct DatabaseConnectorMySQL {
5 | pub(crate) pool: Pool,
6 | }
--------------------------------------------------------------------------------
/src/database/structs/database_connector_pgsql.rs:
--------------------------------------------------------------------------------
1 | use sqlx::{Pool, Postgres};
2 |
3 | #[derive(Debug, Clone)]
4 | pub struct DatabaseConnectorPgSQL {
5 | pub(crate) pool: Pool,
6 | }
--------------------------------------------------------------------------------
/src/database/structs/database_connector_sqlite.rs:
--------------------------------------------------------------------------------
1 | use sqlx::{Pool, Sqlite};
2 |
3 | #[derive(Debug, Clone)]
4 | pub struct DatabaseConnectorSQLite {
5 | pub(crate) pool: Pool,
6 | }
--------------------------------------------------------------------------------
/src/http/enums.rs:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Power2All/torrust-actix/55622882ecfbd6612fc172f130a677b8a83a65b4/src/http/enums.rs
--------------------------------------------------------------------------------
/src/http/impls.rs:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Power2All/torrust-actix/55622882ecfbd6612fc172f130a677b8a83a65b4/src/http/impls.rs
--------------------------------------------------------------------------------
/src/http/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod enums;
2 | pub mod structs;
3 | pub mod impls;
4 | pub mod types;
5 | #[allow(clippy::module_inception)]
6 | pub mod http;
--------------------------------------------------------------------------------
/src/http/structs.rs:
--------------------------------------------------------------------------------
1 | pub mod http_service_data;
--------------------------------------------------------------------------------
/src/http/structs/http_service_data.rs:
--------------------------------------------------------------------------------
1 | use std::sync::Arc;
2 | use crate::config::structs::http_trackers_config::HttpTrackersConfig;
3 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
4 |
5 | #[derive(Debug)]
6 | pub struct HttpServiceData {
7 | pub(crate) torrent_tracker: Arc,
8 | pub(crate) http_trackers_config: Arc
9 | }
--------------------------------------------------------------------------------
/src/http/types.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use actix_web::HttpResponse;
3 |
4 | pub type HttpServiceQueryHashingMapOk = HashMap>>;
5 | pub type HttpServiceQueryHashingMapErr = HttpResponse;
--------------------------------------------------------------------------------
/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod common;
2 | pub mod config;
3 | pub mod database;
4 | pub mod tracker;
5 | pub mod stats;
6 | pub mod api;
7 | pub mod http;
8 | pub mod udp;
9 | pub mod structs;
--------------------------------------------------------------------------------
/src/stats/enums.rs:
--------------------------------------------------------------------------------
1 | pub mod stats_event;
--------------------------------------------------------------------------------
/src/stats/enums/stats_event.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Debug, Serialize, Deserialize, Clone, Copy)]
4 | pub enum StatsEvent {
5 | Torrents,
6 | TorrentsUpdates,
7 | Users,
8 | UsersUpdates,
9 | TimestampSave,
10 | TimestampTimeout,
11 | TimestampConsole,
12 | TimestampKeysTimeout,
13 | Seeds,
14 | Peers,
15 | Completed,
16 | WhitelistEnabled,
17 | Whitelist,
18 | WhitelistUpdates,
19 | BlacklistEnabled,
20 | Blacklist,
21 | BlacklistUpdates,
22 | Key,
23 | KeyUpdates,
24 | Tcp4NotFound,
25 | Tcp4Failure,
26 | Tcp4ConnectionsHandled,
27 | Tcp4ApiHandled,
28 | Tcp4AnnouncesHandled,
29 | Tcp4ScrapesHandled,
30 | Tcp6NotFound,
31 | Tcp6Failure,
32 | Tcp6ConnectionsHandled,
33 | Tcp6ApiHandled,
34 | Tcp6AnnouncesHandled,
35 | Tcp6ScrapesHandled,
36 | Udp4BadRequest,
37 | Udp4InvalidRequest,
38 | Udp4ConnectionsHandled,
39 | Udp4AnnouncesHandled,
40 | Udp4ScrapesHandled,
41 | Udp6BadRequest,
42 | Udp6InvalidRequest,
43 | Udp6ConnectionsHandled,
44 | Udp6AnnouncesHandled,
45 | Udp6ScrapesHandled
46 | }
--------------------------------------------------------------------------------
/src/stats/impls.rs:
--------------------------------------------------------------------------------
1 | pub mod torrent_tracker;
--------------------------------------------------------------------------------
/src/stats/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod enums;
2 | pub mod impls;
3 | pub mod structs;
4 | pub mod tests;
--------------------------------------------------------------------------------
/src/stats/structs.rs:
--------------------------------------------------------------------------------
1 | pub mod stats;
2 | pub mod stats_atomics;
--------------------------------------------------------------------------------
/src/stats/structs/stats.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | #[derive(Serialize, Deserialize, Clone, Copy)]
4 | pub struct Stats {
5 | pub started: i64,
6 | pub timestamp_run_save: i64,
7 | pub timestamp_run_timeout: i64,
8 | pub timestamp_run_console: i64,
9 | pub timestamp_run_keys_timeout: i64,
10 | pub torrents: i64,
11 | pub torrents_updates: i64,
12 | pub users: i64,
13 | pub users_updates: i64,
14 | pub seeds: i64,
15 | pub peers: i64,
16 | pub completed: i64,
17 | pub whitelist_enabled: bool,
18 | pub whitelist: i64,
19 | pub whitelist_updates: i64,
20 | pub blacklist_enabled: bool,
21 | pub blacklist: i64,
22 | pub blacklist_updates: i64,
23 | pub keys_enabled: bool,
24 | pub keys: i64,
25 | pub keys_updates: i64,
26 | pub tcp4_not_found: i64,
27 | pub tcp4_failure: i64,
28 | pub tcp4_connections_handled: i64,
29 | pub tcp4_api_handled: i64,
30 | pub tcp4_announces_handled: i64,
31 | pub tcp4_scrapes_handled: i64,
32 | pub tcp6_not_found: i64,
33 | pub tcp6_failure: i64,
34 | pub tcp6_connections_handled: i64,
35 | pub tcp6_api_handled: i64,
36 | pub tcp6_announces_handled: i64,
37 | pub tcp6_scrapes_handled: i64,
38 | pub udp4_bad_request: i64,
39 | pub udp4_invalid_request: i64,
40 | pub udp4_connections_handled: i64,
41 | pub udp4_announces_handled: i64,
42 | pub udp4_scrapes_handled: i64,
43 | pub udp6_bad_request: i64,
44 | pub udp6_invalid_request: i64,
45 | pub udp6_connections_handled: i64,
46 | pub udp6_announces_handled: i64,
47 | pub udp6_scrapes_handled: i64,
48 | }
--------------------------------------------------------------------------------
/src/stats/structs/stats_atomics.rs:
--------------------------------------------------------------------------------
1 | use std::sync::atomic::{AtomicBool, AtomicI64};
2 | use serde::{Deserialize, Serialize};
3 |
4 | #[derive(Debug, Serialize, Deserialize)]
5 | pub struct StatsAtomics {
6 | pub started: AtomicI64,
7 | pub timestamp_run_save: AtomicI64,
8 | pub timestamp_run_timeout: AtomicI64,
9 | pub timestamp_run_console: AtomicI64,
10 | pub timestamp_run_keys_timeout: AtomicI64,
11 | pub torrents: AtomicI64,
12 | pub torrents_updates: AtomicI64,
13 | pub users: AtomicI64,
14 | pub users_updates: AtomicI64,
15 | pub seeds: AtomicI64,
16 | pub peers: AtomicI64,
17 | pub completed: AtomicI64,
18 | pub whitelist_enabled: AtomicBool,
19 | pub whitelist: AtomicI64,
20 | pub whitelist_updates: AtomicI64,
21 | pub blacklist_enabled: AtomicBool,
22 | pub blacklist: AtomicI64,
23 | pub blacklist_updates: AtomicI64,
24 | pub keys_enabled: AtomicBool,
25 | pub keys: AtomicI64,
26 | pub keys_updates: AtomicI64,
27 | pub tcp4_not_found: AtomicI64,
28 | pub tcp4_failure: AtomicI64,
29 | pub tcp4_connections_handled: AtomicI64,
30 | pub tcp4_api_handled: AtomicI64,
31 | pub tcp4_announces_handled: AtomicI64,
32 | pub tcp4_scrapes_handled: AtomicI64,
33 | pub tcp6_not_found: AtomicI64,
34 | pub tcp6_failure: AtomicI64,
35 | pub tcp6_connections_handled: AtomicI64,
36 | pub tcp6_api_handled: AtomicI64,
37 | pub tcp6_announces_handled: AtomicI64,
38 | pub tcp6_scrapes_handled: AtomicI64,
39 | pub udp4_bad_request: AtomicI64,
40 | pub udp4_invalid_request: AtomicI64,
41 | pub udp4_connections_handled: AtomicI64,
42 | pub udp4_announces_handled: AtomicI64,
43 | pub udp4_scrapes_handled: AtomicI64,
44 | pub udp6_bad_request: AtomicI64,
45 | pub udp6_invalid_request: AtomicI64,
46 | pub udp6_connections_handled: AtomicI64,
47 | pub udp6_announces_handled: AtomicI64,
48 | pub udp6_scrapes_handled: AtomicI64,
49 | }
--------------------------------------------------------------------------------
/src/stats/tests.rs:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Power2All/torrust-actix/55622882ecfbd6612fc172f130a677b8a83a65b4/src/stats/tests.rs
--------------------------------------------------------------------------------
/src/structs.rs:
--------------------------------------------------------------------------------
1 | use clap::Parser;
2 |
3 | #[derive(Debug, Parser)]
4 | #[command(author, version, about, long_about = None)]
5 | pub struct Cli {
6 | /// Create config.toml file if not exists or is broken
7 | #[arg(long)]
8 | pub create_config: bool,
9 | /// Create the database for the engine that is used in the config.toml
10 | #[arg(long)]
11 | pub create_database: bool,
12 | /// Create a development self-signed key and certificate file in PEM format
13 | #[arg(long)]
14 | pub create_selfsigned: bool,
15 |
16 | /// Add an extra domain/subdomain into the certificate, for development
17 | #[arg(long, requires("create_selfsigned"), default_value = "localhost")]
18 | pub selfsigned_domain: String,
19 | /// Give the filename of the key file of the certificate, default key.pem
20 | #[arg(long, requires("create_selfsigned"), default_value = "key.pem")]
21 | pub selfsigned_keyfile: String,
22 | /// Give the filename of the certificate file, default cert.pem
23 | #[arg(long, requires("create_selfsigned"), default_value = "cert.pem")]
24 | pub selfsigned_certfile: String,
25 |
26 | /// Create export files of the data from the database, useful for migration or backup
27 | #[arg(long)]
28 | pub export: bool,
29 | /// Give the filename of the JSON file for torrents, default torrents.json
30 | #[arg(long, requires("export"), default_value = "torrents.json")]
31 | pub export_file_torrents: String,
32 | /// Give the filename of the JSON file for whitelists, default whitelists.json
33 | #[arg(long, requires("export"), default_value = "whitelists.json")]
34 | pub export_file_whitelists: String,
35 | /// Give the filename of the JSON file for blacklists, default blacklists.json
36 | #[arg(long, requires("export"), default_value = "blacklists.json")]
37 | pub export_file_blacklists: String,
38 | /// Give the filename of the JSON file for keys, default keys.json
39 | #[arg(long, requires("export"), default_value = "keys.json")]
40 | pub export_file_keys: String,
41 | /// Give the filename of the JSON file for users, default users.json
42 | #[arg(long, requires("export"), default_value = "users.json")]
43 | pub export_file_users: String,
44 |
45 | /// Import data from JSON files
46 | #[arg(long)]
47 | pub import: bool,
48 | /// Give the filename of the JSON file for torrents, default torrents.json
49 | #[arg(long, requires("export"), default_value = "torrents.json")]
50 | pub import_file_torrents: String,
51 | /// Give the filename of the JSON file for whitelists, default whitelists.json
52 | #[arg(long, requires("export"), default_value = "whitelists.json")]
53 | pub import_file_whitelists: String,
54 | /// Give the filename of the JSON file for blacklists, default blacklists.json
55 | #[arg(long, requires("export"), default_value = "blacklists.json")]
56 | pub import_file_blacklists: String,
57 | /// Give the filename of the JSON file for keys, default keys.json
58 | #[arg(long, requires("export"), default_value = "keys.json")]
59 | pub import_file_keys: String,
60 | /// Give the filename of the JSON file for users, default users.json
61 | #[arg(long, requires("export"), default_value = "users.json")]
62 | pub import_file_users: String,
63 | }
--------------------------------------------------------------------------------
/src/tracker/enums.rs:
--------------------------------------------------------------------------------
1 | pub mod announce_event;
2 | pub mod announce_event_def;
3 | pub mod torrent_peers_type;
4 | pub mod updates_action;
--------------------------------------------------------------------------------
/src/tracker/enums/announce_event.rs:
--------------------------------------------------------------------------------
1 | use serde::Deserialize;
2 |
3 | #[derive(Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)]
4 | pub enum AnnounceEvent {
5 | None = 0,
6 | Completed = 1,
7 | Started = 2,
8 | Stopped = 3
9 | }
--------------------------------------------------------------------------------
/src/tracker/enums/announce_event_def.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 | use crate::tracker::enums::announce_event::AnnounceEvent;
3 |
4 | #[derive(Serialize, Deserialize)]
5 | #[serde(remote = "AnnounceEvent")]
6 | pub enum AnnounceEventDef {
7 | Started,
8 | Stopped,
9 | Completed,
10 | None,
11 | }
--------------------------------------------------------------------------------
/src/tracker/enums/torrent_peers_type.rs:
--------------------------------------------------------------------------------
1 |
2 | #[derive(Debug)]
3 | pub enum TorrentPeersType {
4 | All,
5 | IPv4,
6 | IPv6
7 | }
--------------------------------------------------------------------------------
/src/tracker/enums/updates_action.rs:
--------------------------------------------------------------------------------
1 | use serde::Deserialize;
2 |
3 | #[derive(Deserialize, PartialEq, Eq, Hash, Clone, Copy, Debug)]
4 | pub enum UpdatesAction {
5 | Add,
6 | Remove,
7 | Update,
8 | }
--------------------------------------------------------------------------------
/src/tracker/impls.rs:
--------------------------------------------------------------------------------
1 | pub mod info_hash;
2 | pub mod info_hash_visitor;
3 | pub mod peer_id;
4 | pub mod peer_id_visitor;
5 | pub mod torrent_entry;
6 | pub mod torrent_peer;
7 | pub mod torrent_tracker;
8 | pub mod torrent_tracker_keys;
9 | pub mod torrent_tracker_peers;
10 | pub mod torrent_tracker_torrents;
11 | pub mod torrent_tracker_handlers;
12 | pub mod torrent_tracker_torrents_blacklist;
13 | pub mod torrent_tracker_torrents_updates;
14 | pub mod torrent_tracker_torrents_whitelist;
15 | pub mod torrent_tracker_users;
16 | pub mod torrent_tracker_users_updates;
17 | pub mod user_id;
18 | pub mod user_id_visitor;
19 | pub mod announce_event;
20 | pub mod torrent_sharding;
21 | pub mod torrent_tracker_import;
22 | pub mod torrent_tracker_export;
23 | pub mod torrent_tracker_cert_gen;
24 | pub mod torrent_tracker_torrents_blacklist_updates;
25 | pub mod torrent_tracker_torrents_whitelist_updates;
26 | pub mod torrent_tracker_keys_updates;
--------------------------------------------------------------------------------
/src/tracker/impls/announce_event.rs:
--------------------------------------------------------------------------------
1 | use crate::tracker::enums::announce_event::AnnounceEvent;
2 |
3 | impl AnnounceEvent {
4 | #[inline]
5 | pub fn from_i32(i: i32) -> Self {
6 | match i {
7 | 1 => Self::Completed,
8 | 2 => Self::Started,
9 | 3 => Self::Stopped,
10 | _ => Self::None,
11 | }
12 | }
13 |
14 | #[inline]
15 | pub fn to_i32(&self) -> i32 {
16 | match self {
17 | AnnounceEvent::None => 0,
18 | AnnounceEvent::Completed => 1,
19 | AnnounceEvent::Started => 2,
20 | AnnounceEvent::Stopped => 3,
21 | }
22 | }
23 | }
--------------------------------------------------------------------------------
/src/tracker/impls/info_hash.rs:
--------------------------------------------------------------------------------
1 | use std::fmt;
2 | use std::fmt::Formatter;
3 | use crate::common::common::bin2hex;
4 | use crate::tracker::structs::info_hash::InfoHash;
5 | use crate::tracker::structs::info_hash_visitor::InfoHashVisitor;
6 |
7 | impl fmt::Display for InfoHash {
8 | fn fmt(&self, f: &mut Formatter) -> fmt::Result {
9 | bin2hex(&self.0, f)
10 | }
11 | }
12 |
13 | impl std::str::FromStr for InfoHash {
14 | type Err = binascii::ConvertError;
15 |
16 | fn from_str(s: &str) -> Result {
17 | let mut i = Self([0u8; 20]);
18 | if s.len() != 40 {
19 | return Err(binascii::ConvertError::InvalidInputLength);
20 | }
21 | binascii::hex2bin(s.as_bytes(), &mut i.0)?;
22 | Ok(i)
23 | }
24 | }
25 |
26 | impl From<&[u8]> for InfoHash {
27 | fn from(data: &[u8]) -> InfoHash {
28 | assert_eq!(data.len(), 20);
29 | let mut ret = InfoHash([0u8; 20]);
30 | ret.0.clone_from_slice(data);
31 | ret
32 | }
33 | }
34 |
35 | impl From<[u8; 20]> for InfoHash {
36 | fn from(data: [u8; 20]) -> Self {
37 | InfoHash(data)
38 | }
39 | }
40 |
41 | impl serde::ser::Serialize for InfoHash {
42 | fn serialize(&self, serializer: S) -> Result {
43 | let mut buffer = [0u8; 40];
44 | let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap();
45 | let str_out = std::str::from_utf8(bytes_out).unwrap();
46 | serializer.serialize_str(str_out)
47 | }
48 | }
49 |
50 | impl<'de> serde::de::Deserialize<'de> for InfoHash {
51 | fn deserialize>(des: D) -> Result {
52 | des.deserialize_str(InfoHashVisitor)
53 | }
54 | }
--------------------------------------------------------------------------------
/src/tracker/impls/info_hash_visitor.rs:
--------------------------------------------------------------------------------
1 | use std::fmt;
2 | use std::fmt::Formatter;
3 | use crate::tracker::structs::info_hash::InfoHash;
4 | use crate::tracker::structs::info_hash_visitor::InfoHashVisitor;
5 |
6 | impl serde::de::Visitor<'_> for InfoHashVisitor {
7 | type Value = InfoHash;
8 |
9 | fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
10 | write!(formatter, "a 40 character long hash")
11 | }
12 |
13 | fn visit_str(self, v: &str) -> Result {
14 | if v.len() != 40 {
15 | return Err(serde::de::Error::invalid_value(
16 | serde::de::Unexpected::Str(v),
17 | &"expected a 40 character long string",
18 | ));
19 | }
20 |
21 | let mut res = InfoHash([0u8; 20]);
22 |
23 | if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() {
24 | Err(serde::de::Error::invalid_value(
25 | serde::de::Unexpected::Str(v),
26 | &"expected a hexadecimal string",
27 | ))
28 | } else {
29 | Ok(res)
30 | }
31 | }
32 | }
--------------------------------------------------------------------------------
/src/tracker/impls/peer_id.rs:
--------------------------------------------------------------------------------
1 | use std::fmt;
2 | use std::fmt::Formatter;
3 | use serde::Serialize;
4 | use crate::common::common::bin2hex;
5 | use crate::tracker::structs::peer_id::PeerId;
6 | use crate::tracker::structs::peer_id_visitor::PeerIdVisitor;
7 |
8 | impl fmt::Display for PeerId {
9 | fn fmt(&self, f: &mut Formatter) -> fmt::Result {
10 | bin2hex(&self.0, f)
11 | }
12 | }
13 |
14 | impl PeerId {
15 | pub fn get_client_name(&self) -> Option<&'static str> {
16 | if self.0[0] == b'M' {
17 | return Some("BitTorrent");
18 | }
19 | if self.0[0] == b'-' {
20 | let name = match &self.0[1..3] {
21 | b"AG" => "Ares",
22 | b"A~" => "Ares",
23 | b"AR" => "Arctic",
24 | b"AV" => "Avicora",
25 | b"AX" => "BitPump",
26 | b"AZ" => "Azureus",
27 | b"BB" => "BitBuddy",
28 | b"BC" => "BitComet",
29 | b"BF" => "Bitflu",
30 | b"BG" => "BTG (uses Rasterbar libtorrent)",
31 | b"BR" => "BitRocket",
32 | b"BS" => "BTSlave",
33 | b"BX" => "~Bittorrent X",
34 | b"CD" => "Enhanced CTorrent",
35 | b"CT" => "CTorrent",
36 | b"DE" => "DelugeTorrent",
37 | b"DP" => "Propagate Data Client",
38 | b"EB" => "EBit",
39 | b"ES" => "electric sheep",
40 | b"FT" => "FoxTorrent",
41 | b"FW" => "FrostWire",
42 | b"FX" => "Freebox BitTorrent",
43 | b"GS" => "GSTorrent",
44 | b"HL" => "Halite",
45 | b"HN" => "Hydranode",
46 | b"KG" => "KGet",
47 | b"KT" => "KTorrent",
48 | b"LH" => "LH-ABC",
49 | b"LP" => "Lphant",
50 | b"LT" => "libtorrent",
51 | b"lt" => "libTorrent",
52 | b"LW" => "LimeWire",
53 | b"MO" => "MonoTorrent",
54 | b"MP" => "MooPolice",
55 | b"MR" => "Miro",
56 | b"MT" => "MoonlightTorrent",
57 | b"NX" => "Net Transport",
58 | b"PD" => "Pando",
59 | b"PI" => "PicoTorrent",
60 | b"qB" => "qBittorrent",
61 | b"QD" => "QQDownload",
62 | b"QT" => "Qt 4 Torrent example",
63 | b"RT" => "Retriever",
64 | b"S~" => "Shareaza alpha/beta",
65 | b"SB" => "~Swiftbit",
66 | b"SS" => "SwarmScope",
67 | b"ST" => "SymTorrent",
68 | b"st" => "sharktorrent",
69 | b"SZ" => "Shareaza",
70 | b"TN" => "TorrentDotNET",
71 | b"TR" => "Transmission",
72 | b"TS" => "Torrentstorm",
73 | b"TT" => "TuoTu",
74 | b"UL" => "uLeecher!",
75 | b"UT" => "µTorrent",
76 | b"UW" => "µTorrent Web",
77 | b"VG" => "Vagaa",
78 | b"WD" => "WebTorrent Desktop",
79 | b"WT" => "BitLet",
80 | b"WW" => "WebTorrent",
81 | b"WY" => "FireTorrent",
82 | b"XL" => "Xunlei",
83 | b"XT" => "XanTorrent",
84 | b"XX" => "Xtorrent",
85 | b"ZT" => "ZipTorrent",
86 | _ => return None,
87 | };
88 | Some(name)
89 | } else {
90 | None
91 | }
92 | }
93 | }
94 |
95 | impl Serialize for PeerId {
96 | fn serialize(&self, serializer: S) -> Result
97 | where
98 | S: serde::Serializer, {
99 | let buff_size = self.0.len() * 2;
100 | let mut tmp: Vec = vec![0; buff_size];
101 | binascii::bin2hex(&self.0, &mut tmp).unwrap();
102 | let id = std::str::from_utf8(&tmp).ok();
103 |
104 | #[derive(Serialize)]
105 | struct PeerIdInfo<'a> {
106 | id: Option<&'a str>,
107 | client: Option<&'a str>,
108 | }
109 |
110 | let obj = PeerIdInfo {
111 | id,
112 | client: self.get_client_name(),
113 | };
114 | obj.serialize(serializer)
115 | }
116 | }
117 |
118 | impl std::str::FromStr for PeerId {
119 | type Err = binascii::ConvertError;
120 |
121 | fn from_str(s: &str) -> Result {
122 | let mut i = Self([0u8; 20]);
123 | if s.len() != 40 {
124 | return Err(binascii::ConvertError::InvalidInputLength);
125 | }
126 | binascii::hex2bin(s.as_bytes(), &mut i.0)?;
127 | Ok(i)
128 | }
129 | }
130 |
131 | impl From<&[u8]> for PeerId {
132 | fn from(data: &[u8]) -> PeerId {
133 | assert_eq!(data.len(), 20);
134 | let mut ret = PeerId([0u8; 20]);
135 | ret.0.clone_from_slice(data);
136 | ret
137 | }
138 | }
139 |
140 | impl<'de> serde::de::Deserialize<'de> for PeerId {
141 | fn deserialize>(des: D) -> Result {
142 | des.deserialize_str(PeerIdVisitor)
143 | }
144 | }
--------------------------------------------------------------------------------
/src/tracker/impls/peer_id_visitor.rs:
--------------------------------------------------------------------------------
1 | use std::fmt;
2 | use std::fmt::Formatter;
3 | use crate::tracker::structs::peer_id::PeerId;
4 | use crate::tracker::structs::peer_id_visitor::PeerIdVisitor;
5 |
6 | impl serde::de::Visitor<'_> for PeerIdVisitor {
7 | type Value = PeerId;
8 |
9 | fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
10 | write!(formatter, "a 40 character long hash")
11 | }
12 |
13 | fn visit_str(self, v: &str) -> Result {
14 | if v.len() != 40 {
15 | return Err(serde::de::Error::invalid_value(
16 | serde::de::Unexpected::Str(v),
17 | &"expected a 40 character long string",
18 | ));
19 | }
20 |
21 | let mut res = PeerId([0u8; 20]);
22 |
23 | if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() {
24 | Err(serde::de::Error::invalid_value(
25 | serde::de::Unexpected::Str(v),
26 | &"expected a hexadecimal string",
27 | ))
28 | } else {
29 | Ok(res)
30 | }
31 | }
32 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_entry.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeMap;
2 | use crate::tracker::structs::torrent_entry::TorrentEntry;
3 |
4 | impl TorrentEntry {
5 | pub fn new() -> TorrentEntry {
6 | TorrentEntry {
7 | peers: BTreeMap::new(),
8 | seeds: BTreeMap::new(),
9 | completed: 0u64,
10 | updated: std::time::Instant::now(),
11 | }
12 | }
13 | }
14 |
15 | impl Default for TorrentEntry {
16 | fn default() -> Self {
17 | Self::new()
18 | }
19 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_peer.rs:
--------------------------------------------------------------------------------
1 | use std::net::{IpAddr, SocketAddr};
2 | use crate::tracker::structs::torrent_peer::TorrentPeer;
3 |
4 | impl TorrentPeer {
5 | pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, port: u16) -> SocketAddr {
6 | SocketAddr::new(remote_ip, port)
7 | }
8 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker.rs:
--------------------------------------------------------------------------------
1 | use std::collections::{BTreeMap, HashMap};
2 | use std::sync::Arc;
3 | use std::sync::atomic::{AtomicBool, AtomicI64};
4 | use chrono::Utc;
5 | use parking_lot::RwLock;
6 | use crate::config::structs::configuration::Configuration;
7 | use crate::database::structs::database_connector::DatabaseConnector;
8 | use crate::stats::structs::stats_atomics::StatsAtomics;
9 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
10 |
11 | impl TorrentTracker {
12 | #[tracing::instrument(level = "debug")]
13 | pub async fn new(config: Arc, create_database: bool) -> TorrentTracker
14 | {
15 | TorrentTracker {
16 | config: config.clone(),
17 | torrents_sharding: Arc::new(Default::default()),
18 | torrents_updates: Arc::new(RwLock::new(HashMap::new())),
19 | torrents_whitelist: Arc::new(RwLock::new(Vec::new())),
20 | torrents_whitelist_updates: Arc::new(RwLock::new(HashMap::new())),
21 | torrents_blacklist: Arc::new(RwLock::new(Vec::new())),
22 | torrents_blacklist_updates: Arc::new(RwLock::new(HashMap::new())),
23 | keys: Arc::new(RwLock::new(BTreeMap::new())),
24 | keys_updates: Arc::new(RwLock::new(HashMap::new())),
25 | stats: Arc::new(StatsAtomics {
26 | started: AtomicI64::new(Utc::now().timestamp()),
27 | timestamp_run_save: AtomicI64::new(0),
28 | timestamp_run_timeout: AtomicI64::new(0),
29 | timestamp_run_console: AtomicI64::new(0),
30 | timestamp_run_keys_timeout: AtomicI64::new(0),
31 | torrents: AtomicI64::new(0),
32 | torrents_updates: AtomicI64::new(0),
33 | users: AtomicI64::new(0),
34 | users_updates: AtomicI64::new(0),
35 | seeds: AtomicI64::new(0),
36 | peers: AtomicI64::new(0),
37 | completed: AtomicI64::new(0),
38 | whitelist_enabled: AtomicBool::new(config.tracker_config.clone().whitelist_enabled),
39 | whitelist: AtomicI64::new(0),
40 | whitelist_updates: AtomicI64::new(0),
41 | blacklist_enabled: AtomicBool::new(config.tracker_config.clone().blacklist_enabled),
42 | blacklist: AtomicI64::new(0),
43 | blacklist_updates: AtomicI64::new(0),
44 | keys_enabled: AtomicBool::new(config.tracker_config.clone().keys_enabled),
45 | keys: AtomicI64::new(0),
46 | keys_updates: AtomicI64::new(0),
47 | tcp4_connections_handled: AtomicI64::new(0),
48 | tcp4_api_handled: AtomicI64::new(0),
49 | tcp4_announces_handled: AtomicI64::new(0),
50 | tcp4_scrapes_handled: AtomicI64::new(0),
51 | tcp4_not_found: AtomicI64::new(0),
52 | tcp4_failure: AtomicI64::new(0),
53 | tcp6_connections_handled: AtomicI64::new(0),
54 | tcp6_api_handled: AtomicI64::new(0),
55 | tcp6_announces_handled: AtomicI64::new(0),
56 | tcp6_scrapes_handled: AtomicI64::new(0),
57 | tcp6_not_found: AtomicI64::new(0),
58 | tcp6_failure: AtomicI64::new(0),
59 | udp4_invalid_request: AtomicI64::new(0),
60 | udp4_bad_request: AtomicI64::new(0),
61 | udp4_connections_handled: AtomicI64::new(0),
62 | udp4_announces_handled: AtomicI64::new(0),
63 | udp4_scrapes_handled: AtomicI64::new(0),
64 | udp6_invalid_request: AtomicI64::new(0),
65 | udp6_bad_request: AtomicI64::new(0),
66 | udp6_connections_handled: AtomicI64::new(0),
67 | udp6_announces_handled: AtomicI64::new(0),
68 | udp6_scrapes_handled: AtomicI64::new(0),
69 | }),
70 | users: Arc::new(RwLock::new(BTreeMap::new())),
71 | users_updates: Arc::new(RwLock::new(HashMap::new())),
72 | sqlx: DatabaseConnector::new(config.clone(), create_database).await,
73 | }
74 | }
75 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_cert_gen.rs:
--------------------------------------------------------------------------------
1 | use std::fs;
2 | use std::process::exit;
3 | use log::{error, info};
4 | use rcgen::{generate_simple_self_signed, CertifiedKey};
5 | use crate::structs::Cli;
6 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
7 |
8 | impl TorrentTracker {
9 | #[tracing::instrument(level = "debug")]
10 | pub async fn cert_gen(&self, args: &Cli)
11 | {
12 | info!("[CERTGEN] Requesting to generate a self-signed key and certificate file");
13 |
14 | // Set localhost and optional domain if given.
15 | let mut subject_alt_names = vec![
16 | String::from("localhost")
17 | ];
18 | if args.selfsigned_domain != *"localhost" {
19 | subject_alt_names.push(args.selfsigned_domain.clone());
20 | }
21 |
22 | // Generate X.509 key and cert file.
23 | let CertifiedKey { cert, key_pair} = generate_simple_self_signed(subject_alt_names).unwrap();
24 |
25 | // Write the key and cert file.
26 | match fs::write(args.selfsigned_keyfile.as_str(), key_pair.serialize_pem()) {
27 | Ok(_) => {
28 | info!("[CERTGEN] The key file {} has been generated", args.selfsigned_keyfile.as_str());
29 | }
30 | Err(error) => {
31 | error!("[CERTGEN] The key file {} could not be generated!", args.selfsigned_keyfile.as_str());
32 | panic!("[CERTGEN] {}", error)
33 | }
34 | }
35 | match fs::write(args.selfsigned_certfile.as_str(), cert.pem()) {
36 | Ok(_) => {
37 | info!("[CERTGEN] The cert file {} has been generated", args.selfsigned_certfile.as_str());
38 | }
39 | Err(error) => {
40 | error!("[CERTGEN] The cert file {} could not be generated!", args.selfsigned_certfile.as_str());
41 | panic!("[CERTGEN] {}", error)
42 | }
43 | }
44 |
45 | info!("[CERTGEN] The files {} and {} has been generated, use them only for development reasons", args.selfsigned_keyfile.as_str(), args.selfsigned_certfile.as_str());
46 | exit(0)
47 | }
48 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_export.rs:
--------------------------------------------------------------------------------
1 | use std::fs;
2 | use std::process::exit;
3 | use std::sync::Arc;
4 | use log::{error, info};
5 | use crate::structs::Cli;
6 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
7 |
8 | impl TorrentTracker {
9 | #[tracing::instrument(level = "debug")]
10 | pub async fn export(&self, args: &Cli, tracker: Arc)
11 | {
12 | info!("[EXPORT] Requesting to export data");
13 |
14 | info!("[EXPORT] Exporting torrents to file {}", args.export_file_torrents.as_str());
15 | match fs::write(args.export_file_torrents.as_str(), serde_json::to_vec(&tracker.clone().torrents_sharding.get_all_content()).unwrap()) {
16 | Ok(_) => {
17 | info!("[EXPORT] The torrents have been exported");
18 | }
19 | Err(error) => {
20 | error!("[EXPORT] The torrents file {} could not be generated!", args.export_file_torrents.as_str());
21 | panic!("[EXPORT] {}", error)
22 | }
23 | }
24 |
25 | if tracker.config.tracker_config.clone().whitelist_enabled {
26 | info!("[EXPORT] Exporting whitelists to file {}", args.export_file_whitelists.as_str());
27 | match fs::write(args.export_file_whitelists.as_str(), serde_json::to_vec(&tracker.clone().get_whitelist()).unwrap()) {
28 | Ok(_) => {
29 | info!("[EXPORT] The whitelists have been exported");
30 | }
31 | Err(error) => {
32 | error!("[EXPORT] The whitelists file {} could not be generated!", args.export_file_whitelists.as_str());
33 | panic!("[EXPORT] {}", error)
34 | }
35 | }
36 | }
37 |
38 | if tracker.config.tracker_config.clone().blacklist_enabled {
39 | info!("[EXPORT] Exporting blacklists to file {}", args.export_file_blacklists.as_str());
40 | match fs::write(args.export_file_blacklists.as_str(), serde_json::to_vec(&tracker.clone().get_blacklist()).unwrap()) {
41 | Ok(_) => {
42 | info!("[EXPORT] The blacklists have been exported");
43 | }
44 | Err(error) => {
45 | error!("[EXPORT] The blacklists file {} could not be generated!", args.export_file_blacklists.as_str());
46 | panic!("[EXPORT] {}", error)
47 | }
48 | }
49 | }
50 |
51 | if tracker.config.tracker_config.clone().keys_enabled {
52 | info!("[EXPORT] Exporting keys to file {}", args.export_file_keys.as_str());
53 | match fs::write(args.export_file_keys.as_str(), serde_json::to_vec(&tracker.clone().get_keys()).unwrap()) {
54 | Ok(_) => {
55 | info!("[EXPORT] The keys have been exported");
56 | }
57 | Err(error) => {
58 | error!("[EXPORT] The keys file {} could not be generated!", args.export_file_keys.as_str());
59 | panic!("[EXPORT] {}", error)
60 | }
61 | }
62 | }
63 |
64 | if tracker.config.tracker_config.clone().users_enabled {
65 | info!("[EXPORT] Exporting users to file {}", args.export_file_users.as_str());
66 | match fs::write(args.export_file_users.as_str(), serde_json::to_vec(&tracker.clone().get_users()).unwrap()) {
67 | Ok(_) => {
68 | info!("[EXPORT] The users have been exported");
69 | }
70 | Err(error) => {
71 | error!("[EXPORT] The users file {} could not be generated!", args.export_file_users.as_str());
72 | panic!("[EXPORT] {}", error)
73 | }
74 | }
75 | }
76 |
77 | info!("[EXPORT] Exporting of data completed");
78 | exit(0)
79 | }
80 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_import.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeMap;
2 | use std::fs;
3 | use std::process::exit;
4 | use std::sync::Arc;
5 | use log::{error, info};
6 | use serde_json::Value;
7 | use crate::structs::Cli;
8 | use crate::tracker::enums::updates_action::UpdatesAction;
9 | use crate::tracker::structs::info_hash::InfoHash;
10 | use crate::tracker::structs::torrent_entry::TorrentEntry;
11 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
12 | use crate::tracker::structs::user_entry_item::UserEntryItem;
13 | use crate::tracker::structs::user_id::UserId;
14 |
15 | impl TorrentTracker {
16 | #[tracing::instrument(level = "debug")]
17 | pub async fn import(&self, args: &Cli, tracker: Arc)
18 | {
19 | info!("[IMPORT] Requesting to import data");
20 |
21 | info!("[IMPORT] Importing torrents to memory {}", args.import_file_torrents.as_str());
22 | match fs::read(args.import_file_torrents.as_str()) {
23 | Ok(data) => {
24 | let torrents: Value = serde_json::from_str(String::from_utf8(data).unwrap().as_str()).unwrap();
25 | for (key, value) in torrents.as_object().unwrap() {
26 | let completed = match value["completed"].as_u64() {
27 | None => { panic!("[IMPORT] 'completed' field doesn't exist or is missing!"); }
28 | Some(completed) => { completed }
29 | };
30 | let info_hash = match hex::decode(key) {
31 | Ok(hash_result) => { InfoHash(<[u8; 20]>::try_from(hash_result[0..20].as_ref()).unwrap()) }
32 | Err(_) => { panic!("[IMPORT] Torrent hash is not hex or invalid!"); }
33 | };
34 | let _ = tracker.add_torrent_update(info_hash, TorrentEntry {
35 | seeds: Default::default(),
36 | peers: Default::default(),
37 | completed,
38 | updated: std::time::Instant::now(),
39 | }, UpdatesAction::Add);
40 | }
41 | match tracker.save_torrent_updates(tracker.clone()).await {
42 | Ok(_) => {}
43 | Err(_) => {
44 | panic!("[IMPORT] Unable to save torrents to the database!");
45 | }
46 | }
47 | }
48 | Err(error) => {
49 | error!("[IMPORT] The torrents file {} could not be imported!", args.import_file_torrents.as_str());
50 | panic!("[IMPORT] {}", error)
51 | }
52 | }
53 |
54 | if tracker.config.tracker_config.clone().whitelist_enabled {
55 | info!("[IMPORT] Importing whitelists to memory {}", args.import_file_whitelists.as_str());
56 | match fs::read(args.import_file_whitelists.as_str()) {
57 | Ok(data) => {
58 | let whitelists: Value = serde_json::from_str(String::from_utf8(data).unwrap().as_str()).unwrap();
59 | for value in whitelists.as_array().unwrap() {
60 | let info_hash = match hex::decode(value.as_str().unwrap()) {
61 | Ok(hash_result) => { InfoHash(<[u8; 20]>::try_from(hash_result[0..20].as_ref()).unwrap()) }
62 | Err(_) => { panic!("[IMPORT] Torrent hash is not hex or invalid!"); }
63 | };
64 | tracker.add_whitelist_update(info_hash, UpdatesAction::Add);
65 | }
66 | match tracker.save_whitelist_updates(tracker.clone()).await {
67 | Ok(_) => {}
68 | Err(_) => {
69 | panic!("[IMPORT] Unable to save whitelist to the database!");
70 | }
71 | }
72 | }
73 | Err(error) => {
74 | error!("[IMPORT] The whitelists file {} could not be imported!", args.import_file_whitelists.as_str());
75 | panic!("[IMPORT] {}", error)
76 | }
77 | }
78 | }
79 |
80 | if tracker.config.tracker_config.clone().blacklist_enabled {
81 | info!("[IMPORT] Importing blacklists to memory {}", args.import_file_blacklists.as_str());
82 | match fs::read(args.import_file_blacklists.as_str()) {
83 | Ok(data) => {
84 | let blacklists: Value = serde_json::from_str(String::from_utf8(data).unwrap().as_str()).unwrap();
85 | for value in blacklists.as_array().unwrap() {
86 | let info_hash = match hex::decode(value.as_str().unwrap()) {
87 | Ok(hash_result) => { InfoHash(<[u8; 20]>::try_from(hash_result[0..20].as_ref()).unwrap()) }
88 | Err(_) => { panic!("[IMPORT] Torrent hash is not hex or invalid!"); }
89 | };
90 | tracker.add_blacklist_update(info_hash, UpdatesAction::Add);
91 | }
92 | match tracker.save_blacklist_updates(tracker.clone()).await {
93 | Ok(_) => {}
94 | Err(_) => { panic!("[IMPORT] Unable to save blacklist to the database!"); }
95 | }
96 | }
97 | Err(error) => {
98 | error!("[IMPORT] The blacklists file {} could not be imported!", args.import_file_blacklists.as_str());
99 | panic!("[IMPORT] {}", error)
100 | }
101 | }
102 | }
103 |
104 | if tracker.config.tracker_config.clone().keys_enabled {
105 | info!("[IMPORT] Importing keys to memory {}", args.import_file_keys.as_str());
106 | match fs::read(args.import_file_keys.as_str()) {
107 | Ok(data) => {
108 | let keys: Value = serde_json::from_str(String::from_utf8(data).unwrap().as_str()).unwrap();
109 | for (key, value) in keys.as_object().unwrap() {
110 | let timeout = match value.as_i64() {
111 | None => { panic!("[IMPORT] timeout value doesn't exist or is missing!"); }
112 | Some(timeout) => { timeout }
113 | };
114 | let hash = match hex::decode(key.as_str()) {
115 | Ok(hash_result) => { InfoHash(<[u8; 20]>::try_from(hash_result[0..20].as_ref()).unwrap()) }
116 | Err(_) => { panic!("[IMPORT] Key hash is not hex or invalid!"); }
117 | };
118 | tracker.add_key_update(hash, timeout, UpdatesAction::Add);
119 | }
120 | match tracker.save_key_updates(tracker.clone()).await {
121 | Ok(_) => {}
122 | Err(_) => { panic!("[IMPORT] Unable to save keys to the database!"); }
123 | }
124 | }
125 | Err(error) => {
126 | error!("[IMPORT] The keys file {} could not be imported!", args.import_file_keys.as_str());
127 | panic!("[IMPORT] {}", error)
128 | }
129 | }
130 | }
131 |
132 | if tracker.config.tracker_config.clone().users_enabled {
133 | info!("[IMPORT] Importing users to memory {}", args.import_file_users.as_str());
134 | match fs::read(args.import_file_users.as_str()) {
135 | Ok(data) => {
136 | let users: Value = serde_json::from_str(String::from_utf8(data).unwrap().as_str()).unwrap();
137 | for (key, value) in users.as_object().unwrap() {
138 | let user_hash = match hex::decode(key.as_str()) {
139 | Ok(hash_result) => { UserId(<[u8; 20]>::try_from(hash_result[0..20].as_ref()).unwrap()) }
140 | Err(_) => { panic!("[IMPORT] User hash is not hex or invalid!"); }
141 | };
142 | let key_hash = match hex::decode(value["key"].as_str().unwrap()) {
143 | Ok(hash_result) => { UserId(<[u8; 20]>::try_from(hash_result[0..20].as_ref()).unwrap()) }
144 | Err(_) => { panic!("[IMPORT] Key hash is not hex or invalid!"); }
145 | };
146 | let user_id = value["user_id"].as_u64();
147 | let user_uuid = value["user_uuid"].as_str().map(String::from);
148 | let uploaded = match value["uploaded"].as_u64() {
149 | None => { panic!("[IMPORT] 'uploaded' field doesn't exist or is missing!"); }
150 | Some(uploaded) => { uploaded }
151 | };
152 | let downloaded = match value["downloaded"].as_u64() {
153 | None => { panic!("[IMPORT] 'downloaded' field doesn't exist or is missing!"); }
154 | Some(downloaded) => { downloaded }
155 | };
156 | let completed = match value["completed"].as_u64() {
157 | None => { panic!("[IMPORT] 'completed' field doesn't exist or is missing!"); }
158 | Some(completed) => { completed }
159 | };
160 | let updated = match value["updated"].as_u64() {
161 | None => { panic!("[IMPORT] 'updated' field doesn't exist or is missing!"); }
162 | Some(updated) => { updated }
163 | };
164 | let active = match value["active"].as_u64() {
165 | None => { panic!("[IMPORT] 'active' field doesn't exist or is missing!"); }
166 | Some(active) => { active as u8 }
167 | };
168 | let _ = tracker.add_user_update(user_hash, UserEntryItem {
169 | key: key_hash,
170 | user_id,
171 | user_uuid,
172 | uploaded,
173 | downloaded,
174 | completed,
175 | updated,
176 | active,
177 | torrents_active: BTreeMap::new()
178 | }, UpdatesAction::Add);
179 | }
180 | match tracker.save_user_updates(tracker.clone()).await {
181 | Ok(_) => {}
182 | Err(_) => {
183 | panic!("[IMPORT] Unable to save users to the database!");
184 | }
185 | }
186 | }
187 | Err(error) => {
188 | error!("[IMPORT] The users file {} could not be imported!", args.import_file_users.as_str());
189 | panic!("[IMPORT] {}", error)
190 | }
191 | }
192 | }
193 |
194 | info!("[IMPORT] Importing of data completed");
195 | exit(0)
196 | }
197 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_keys.rs:
--------------------------------------------------------------------------------
1 | use std::collections::btree_map::Entry;
2 | use std::collections::BTreeMap;
3 | use std::sync::Arc;
4 | use std::time::{SystemTime, UNIX_EPOCH};
5 | use chrono::{TimeZone, Utc};
6 | use log::{error, info};
7 | use crate::stats::enums::stats_event::StatsEvent;
8 | use crate::tracker::enums::updates_action::UpdatesAction;
9 | use crate::tracker::structs::info_hash::InfoHash;
10 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
11 |
12 | impl TorrentTracker {
13 | #[tracing::instrument(level = "debug")]
14 | pub async fn load_keys(&self, tracker: Arc)
15 | {
16 | if let Ok(keys) = self.sqlx.load_keys(tracker.clone()).await {
17 | info!("Loaded {} keys", keys);
18 | }
19 | }
20 |
21 | #[tracing::instrument(level = "debug")]
22 | pub async fn save_keys(&self, tracker: Arc, keys: BTreeMap) -> Result<(), ()>
23 | {
24 | match self.sqlx.save_keys(tracker.clone(), keys.clone()).await {
25 | Ok(keys_count) => {
26 | info!("[SYNC KEYS] Synced {} keys", keys_count);
27 | Ok(())
28 | }
29 | Err(_) => {
30 | error!("[SYNC KEYS] Unable to sync {} keys", keys.len());
31 | Err(())
32 | }
33 | }
34 | }
35 |
36 | #[tracing::instrument(level = "debug")]
37 | pub fn add_key(&self, hash: InfoHash, timeout: i64) -> bool
38 | {
39 | let map = self.keys.clone();
40 | let mut lock = map.write();
41 | let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
42 | let timeout_unix = timestamp.as_secs() as i64 + timeout;
43 | match lock.entry(hash) {
44 | Entry::Vacant(v) => {
45 | self.update_stats(StatsEvent::Key, 1);
46 | v.insert(timeout_unix);
47 | true
48 | }
49 | Entry::Occupied(mut o) => {
50 | o.insert(timeout_unix);
51 | false
52 | }
53 | }
54 | }
55 |
56 | #[tracing::instrument(level = "debug")]
57 | pub fn get_key(&self, hash: InfoHash) -> Option<(InfoHash, i64)>
58 | {
59 | let map = self.keys.clone();
60 | let lock = map.read_recursive();
61 | lock.get(&hash).map(|data| (hash, *data))
62 | }
63 |
64 | #[tracing::instrument(level = "debug")]
65 | pub fn get_keys(&self) -> BTreeMap
66 | {
67 | let map = self.keys.clone();
68 | let lock = map.read_recursive();
69 | lock.clone()
70 | }
71 |
72 | #[tracing::instrument(level = "debug")]
73 | pub fn remove_key(&self, hash: InfoHash) -> bool
74 | {
75 | let map = self.keys.clone();
76 | let mut lock = map.write();
77 | match lock.remove(&hash) {
78 | None => {
79 | false
80 | }
81 | Some(_) => {
82 | self.update_stats(StatsEvent::Key, -1);
83 | true
84 | }
85 | }
86 | }
87 |
88 | #[tracing::instrument(level = "debug")]
89 | pub fn check_key(&self, hash: InfoHash) -> bool
90 | {
91 | let map = self.keys.clone();
92 | let lock = map.read_recursive();
93 | match lock.get(&hash) {
94 | None => {
95 | false
96 | }
97 | Some(key) => {
98 | let time = SystemTime::from(Utc.timestamp_opt(*key, 0).unwrap());
99 | match time.duration_since(SystemTime::now()) {
100 | Ok(_) => {
101 | true
102 | }
103 | Err(_) => {
104 | false
105 | }
106 | }
107 | }
108 | }
109 | }
110 |
111 | #[tracing::instrument(level = "debug")]
112 | pub fn clear_keys(&self)
113 | {
114 | let map = self.keys.clone();
115 | let mut lock = map.write();
116 | lock.clear();
117 | self.set_stats(StatsEvent::Key, 0);
118 | }
119 |
120 | #[tracing::instrument(level = "debug")]
121 | pub fn clean_keys(&self)
122 | {
123 | let keys = self.get_keys();
124 | for (hash, key_time) in keys.iter() {
125 | let time = SystemTime::from(Utc.timestamp_opt(*key_time, 0).unwrap());
126 | if time.duration_since(SystemTime::now()).is_err() {
127 | self.remove_key(*hash);
128 | }
129 | }
130 | }
131 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_keys_updates.rs:
--------------------------------------------------------------------------------
1 | use std::collections::{BTreeMap, HashMap};
2 | use std::collections::hash_map::Entry;
3 | use std::sync::Arc;
4 | use std::time::SystemTime;
5 | use log::{error, info};
6 | use crate::stats::enums::stats_event::StatsEvent;
7 | use crate::tracker::enums::updates_action::UpdatesAction;
8 | use crate::tracker::structs::info_hash::InfoHash;
9 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
10 |
11 | impl TorrentTracker {
12 | #[tracing::instrument(level = "debug")]
13 | pub fn add_key_update(&self, info_hash: InfoHash, timeout: i64, updates_action: UpdatesAction) -> bool
14 | {
15 | let map = self.keys_updates.clone();
16 | let mut lock = map.write();
17 | match lock.insert(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos(), (info_hash, timeout, updates_action)) {
18 | None => {
19 | self.update_stats(StatsEvent::KeyUpdates, 1);
20 | true
21 | }
22 | Some(_) => {
23 | false
24 | }
25 | }
26 | }
27 |
28 | #[tracing::instrument(level = "debug")]
29 | pub fn get_key_updates(&self) -> HashMap
30 | {
31 | let map = self.keys_updates.clone();
32 | let lock = map.read_recursive();
33 | lock.clone()
34 | }
35 |
36 | #[tracing::instrument(level = "debug")]
37 | pub fn remove_key_update(&self, timestamp: &u128) -> bool
38 | {
39 | let map = self.keys_updates.clone();
40 | let mut lock = map.write();
41 | match lock.remove(timestamp) {
42 | None => { false }
43 | Some(_) => {
44 | self.update_stats(StatsEvent::KeyUpdates, -1);
45 | true
46 | }
47 | }
48 | }
49 |
50 | #[tracing::instrument(level = "debug")]
51 | pub fn clear_key_updates(&self)
52 | {
53 | let map = self.keys_updates.clone();
54 | let mut lock = map.write();
55 | lock.clear();
56 | self.set_stats(StatsEvent::KeyUpdates, 0);
57 | }
58 |
59 | #[tracing::instrument(level = "debug")]
60 | pub async fn save_key_updates(&self, torrent_tracker: Arc) -> Result<(), ()>
61 | {
62 | let mut mapping: HashMap = HashMap::new();
63 | for (timestamp, (info_hash, timeout, updates_action)) in self.get_key_updates().iter() {
64 | match mapping.entry(*info_hash) {
65 | Entry::Occupied(mut o) => {
66 | o.insert((o.get().0, *timeout, *updates_action));
67 | self.remove_key_update(timestamp);
68 | }
69 | Entry::Vacant(v) => {
70 | v.insert((*timestamp, *timeout, *updates_action));
71 | }
72 | }
73 | }
74 | match self.save_keys(torrent_tracker.clone(), mapping.clone().into_iter().map(|(info_hash, (_, timeout, updates_action))| {
75 | (info_hash, (timeout, updates_action))
76 | }).collect::>()).await {
77 | Ok(_) => {
78 | info!("[SYNC KEY UPDATES] Synced {} keys", mapping.len());
79 | for (_, (timestamp, _, _)) in mapping.into_iter() {
80 | self.remove_key_update(×tamp);
81 | }
82 | Ok(())
83 | }
84 | Err(_) => {
85 | error!("[SYNC KEY UPDATES] Unable to sync {} keys", mapping.len());
86 | Err(())
87 | }
88 | }
89 | }
90 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_torrents.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeMap;
2 | use std::collections::btree_map::Entry;
3 | use std::sync::Arc;
4 | use log::{error, info};
5 | use crate::stats::enums::stats_event::StatsEvent;
6 | use crate::tracker::enums::updates_action::UpdatesAction;
7 | use crate::tracker::structs::info_hash::InfoHash;
8 | use crate::tracker::structs::torrent_entry::TorrentEntry;
9 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
10 |
11 | impl TorrentTracker {
12 | #[tracing::instrument(level = "debug")]
13 | pub async fn load_torrents(&self, tracker: Arc)
14 | {
15 | if let Ok((torrents, completes)) = self.sqlx.load_torrents(tracker.clone()).await {
16 | info!("Loaded {} torrents with {} completes", torrents, completes);
17 | }
18 | }
19 |
20 | #[tracing::instrument(level = "debug")]
21 | pub async fn save_torrents(&self, tracker: Arc, torrents: BTreeMap) -> Result<(), ()>
22 | {
23 | match self.sqlx.save_torrents(tracker.clone(), torrents.clone()).await {
24 | Ok(_) => {
25 | info!("[SYNC TORRENTS] Synced {} torrents", torrents.len());
26 | Ok(())
27 | }
28 | Err(_) => {
29 | error!("[SYNC TORRENTS] Unable to sync {} torrents", torrents.len());
30 | Err(())
31 | }
32 | }
33 | }
34 |
35 | #[tracing::instrument(level = "debug")]
36 | pub async fn reset_seeds_peers(&self, tracker: Arc) -> bool
37 | {
38 | match self.sqlx.reset_seeds_peers(tracker.clone()).await {
39 | Ok(_) => {
40 | info!("[RESET SEEDS PEERS] Completed");
41 | true
42 | }
43 | Err(_) => {
44 | error!("[RESET SEEDS PEERS] Unable to reset the seeds and peers");
45 | false
46 | }
47 | }
48 | }
49 |
50 | #[tracing::instrument(level = "debug")]
51 | pub fn add_torrent(&self, info_hash: InfoHash, torrent_entry: TorrentEntry) -> (TorrentEntry, bool)
52 | {
53 | let shard = self.torrents_sharding.clone().get_shard(info_hash.0[0]).unwrap();
54 | let mut lock = shard.write();
55 | match lock.entry(info_hash) {
56 | Entry::Vacant(v) => {
57 | self.update_stats(StatsEvent::Torrents, 1);
58 | self.update_stats(StatsEvent::Completed, torrent_entry.completed as i64);
59 | self.update_stats(StatsEvent::Seeds, torrent_entry.seeds.len() as i64);
60 | self.update_stats(StatsEvent::Peers, torrent_entry.peers.len() as i64);
61 | (v.insert(torrent_entry).clone(), true)
62 | }
63 | Entry::Occupied(mut o) => {
64 | self.update_stats(StatsEvent::Completed, 0i64 - o.get().completed as i64);
65 | self.update_stats(StatsEvent::Completed, torrent_entry.completed as i64);
66 | o.get_mut().completed = torrent_entry.completed;
67 | self.update_stats(StatsEvent::Seeds, 0i64 - o.get().seeds.len() as i64);
68 | self.update_stats(StatsEvent::Seeds, torrent_entry.seeds.len() as i64);
69 | o.get_mut().seeds = torrent_entry.seeds.clone();
70 | self.update_stats(StatsEvent::Peers, 0i64 - o.get().peers.len() as i64);
71 | self.update_stats(StatsEvent::Peers, torrent_entry.peers.len() as i64);
72 | o.get_mut().peers = torrent_entry.peers.clone();
73 | o.get_mut().updated = torrent_entry.updated;
74 | (torrent_entry.clone(), false)
75 | }
76 | }
77 | }
78 |
79 | #[tracing::instrument(level = "debug")]
80 | pub fn add_torrents(&self, hashes: BTreeMap) -> BTreeMap
81 | {
82 | let mut returned_data = BTreeMap::new();
83 | for (info_hash, torrent_entry) in hashes.iter() {
84 | returned_data.insert(*info_hash, self.add_torrent(*info_hash, torrent_entry.clone()));
85 | }
86 | returned_data
87 | }
88 |
89 | #[tracing::instrument(level = "debug")]
90 | pub fn get_torrent(&self, info_hash: InfoHash) -> Option
91 | {
92 | let shard = self.torrents_sharding.clone().get_shard(info_hash.0[0]).unwrap();
93 | let lock = shard.read_recursive();
94 | lock.get(&info_hash).map(|torrent| TorrentEntry {
95 | seeds: torrent.seeds.clone(),
96 | peers: torrent.peers.clone(),
97 | completed: torrent.completed,
98 | updated: torrent.updated
99 | })
100 | }
101 |
102 | #[tracing::instrument(level = "debug")]
103 | pub fn get_torrents(&self, hashes: Vec) -> BTreeMap>
104 | {
105 | let mut returned_data = BTreeMap::new();
106 | for info_hash in hashes.iter() {
107 | returned_data.insert(*info_hash, self.get_torrent(*info_hash));
108 | }
109 | returned_data
110 | }
111 |
112 | #[tracing::instrument(level = "debug")]
113 | pub fn remove_torrent(&self, info_hash: InfoHash) -> Option
114 | {
115 | if !self.torrents_sharding.contains_torrent(info_hash) { return None; }
116 | let shard = self.torrents_sharding.clone().get_shard(info_hash.0[0]).unwrap();
117 | let mut lock = shard.write();
118 | match lock.remove(&info_hash) {
119 | None => { None }
120 | Some(data) => {
121 | self.update_stats(StatsEvent::Torrents, -1);
122 | self.update_stats(StatsEvent::Seeds, data.seeds.len() as i64);
123 | self.update_stats(StatsEvent::Peers, data.peers.len() as i64);
124 | Some(data)
125 | }
126 | }
127 | }
128 |
129 | #[tracing::instrument(level = "debug")]
130 | pub fn remove_torrents(&self, hashes: Vec) -> BTreeMap>
131 | {
132 | let mut returned_data = BTreeMap::new();
133 | for info_hash in hashes.iter() {
134 | returned_data.insert(*info_hash, match self.remove_torrent(*info_hash) {
135 | None => { None }
136 | Some(torrent) => {
137 | self.update_stats(StatsEvent::Torrents, -1);
138 | self.update_stats(StatsEvent::Seeds, torrent.seeds.len() as i64);
139 | self.update_stats(StatsEvent::Peers, torrent.peers.len() as i64);
140 | Some(torrent)
141 | }
142 | });
143 | }
144 | returned_data
145 | }
146 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_torrents_blacklist.rs:
--------------------------------------------------------------------------------
1 | use std::sync::Arc;
2 | use log::{error, info};
3 | use crate::stats::enums::stats_event::StatsEvent;
4 | use crate::tracker::enums::updates_action::UpdatesAction;
5 | use crate::tracker::structs::info_hash::InfoHash;
6 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
7 |
8 | impl TorrentTracker {
9 | #[tracing::instrument(level = "debug")]
10 | pub async fn load_blacklist(&self, tracker: Arc)
11 | {
12 | if let Ok(blacklist) = self.sqlx.load_blacklist(tracker.clone()).await {
13 | info!("Loaded {} blacklists", blacklist);
14 | }
15 | }
16 |
17 | #[tracing::instrument(level = "debug")]
18 | pub async fn save_blacklist(&self, tracker: Arc, hashes: Vec<(InfoHash, UpdatesAction)>) -> Result<(), ()>
19 | {
20 | match self.sqlx.save_blacklist(tracker.clone(), hashes.clone()).await {
21 | Ok(_) => {
22 | info!("[SYNC BLACKLIST] Synced {} blacklists", hashes.len());
23 | Ok(())
24 | }
25 | Err(_) => {
26 | error!("[SYNC BLACKLIST] Unable to sync {} blacklists", hashes.len());
27 | Err(())
28 | }
29 | }
30 | }
31 |
32 | #[tracing::instrument(level = "debug")]
33 | pub fn add_blacklist(&self, info_hash: InfoHash) -> bool
34 | {
35 | let map = self.torrents_blacklist.clone();
36 | let mut lock = map.write();
37 | if !lock.contains(&info_hash) {
38 | lock.push(info_hash);
39 | self.update_stats(StatsEvent::Blacklist, 1);
40 | return true;
41 | }
42 | false
43 | }
44 |
45 | #[tracing::instrument(level = "debug")]
46 | pub fn get_blacklist(&self) -> Vec
47 | {
48 | let map = self.torrents_blacklist.clone();
49 | let lock = map.read_recursive();
50 | lock.clone()
51 | }
52 |
53 | #[tracing::instrument(level = "debug")]
54 | pub fn check_blacklist(&self, info_hash: InfoHash) -> bool
55 | {
56 | let map = self.torrents_blacklist.clone();
57 | let lock = map.read_recursive();
58 | if lock.contains(&info_hash) {
59 | return true;
60 | }
61 | false
62 | }
63 |
64 | #[tracing::instrument(level = "debug")]
65 | pub fn remove_blacklist(&self, info_hash: InfoHash) -> bool
66 | {
67 | let map = self.torrents_blacklist.clone();
68 | let mut lock = map.write();
69 | match lock.iter().position(|r| *r == info_hash) {
70 | None => { false }
71 | Some(index) => {
72 | lock.remove(index);
73 | self.update_stats(StatsEvent::Blacklist, -1);
74 | true
75 | }
76 | }
77 | }
78 |
79 | #[tracing::instrument(level = "debug")]
80 | pub fn clear_blacklist(&self)
81 | {
82 | let map = self.torrents_blacklist.clone();
83 | let mut lock = map.write();
84 | lock.clear();
85 | }
86 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_torrents_blacklist_updates.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::collections::hash_map::Entry;
3 | use std::sync::Arc;
4 | use std::time::SystemTime;
5 | use log::{error, info};
6 | use crate::stats::enums::stats_event::StatsEvent;
7 | use crate::tracker::enums::updates_action::UpdatesAction;
8 | use crate::tracker::structs::info_hash::InfoHash;
9 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
10 |
11 | impl TorrentTracker {
12 | #[tracing::instrument(level = "debug")]
13 | pub fn add_blacklist_update(&self, info_hash: InfoHash, updates_action: UpdatesAction) -> bool
14 | {
15 | let map = self.torrents_blacklist_updates.clone();
16 | let mut lock = map.write();
17 | match lock.insert(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos(), (info_hash, updates_action)) {
18 | None => {
19 | self.update_stats(StatsEvent::BlacklistUpdates, 1);
20 | true
21 | }
22 | Some(_) => {
23 | false
24 | }
25 | }
26 | }
27 |
28 | #[tracing::instrument(level = "debug")]
29 | pub fn add_blacklist_updates(&self, hashes: Vec<(InfoHash, UpdatesAction)>) -> Vec<(InfoHash, bool)>
30 | {
31 | let mut returned_data = Vec::new();
32 | for (info_hash, updates_action) in hashes {
33 | returned_data.push((info_hash, self.add_blacklist_update(info_hash, updates_action)));
34 | }
35 | returned_data
36 | }
37 |
38 | #[tracing::instrument(level = "debug")]
39 | pub fn get_blacklist_updates(&self) -> HashMap
40 | {
41 | let map = self.torrents_blacklist_updates.clone();
42 | let lock = map.read_recursive();
43 | lock.clone()
44 | }
45 |
46 | #[tracing::instrument(level = "debug")]
47 | pub fn remove_blacklist_update(&self, timestamp: &u128) -> bool
48 | {
49 | let map = self.torrents_blacklist_updates.clone();
50 | let mut lock = map.write();
51 | match lock.remove(timestamp) {
52 | None => { false }
53 | Some(_) => {
54 | self.update_stats(StatsEvent::BlacklistUpdates, -1);
55 | true
56 | }
57 | }
58 | }
59 |
60 | #[tracing::instrument(level = "debug")]
61 | pub fn clear_blacklist_updates(&self)
62 | {
63 | let map = self.torrents_blacklist_updates.clone();
64 | let mut lock = map.write();
65 | lock.clear();
66 | self.set_stats(StatsEvent::BlacklistUpdates, 0);
67 | }
68 |
69 | #[tracing::instrument(level = "debug")]
70 | pub async fn save_blacklist_updates(&self, torrent_tracker: Arc) -> Result<(), ()>
71 | {
72 | let mut mapping: HashMap = HashMap::new();
73 | for (timestamp, (info_hash, updates_action)) in self.get_blacklist_updates().iter() {
74 | match mapping.entry(*info_hash) {
75 | Entry::Occupied(mut o) => {
76 | o.insert((o.get().0, *updates_action));
77 | self.remove_blacklist_update(timestamp);
78 | }
79 | Entry::Vacant(v) => {
80 | v.insert((*timestamp, *updates_action));
81 | }
82 | }
83 | }
84 | match self.save_blacklist(torrent_tracker.clone(), mapping.clone().into_iter().map(|(info_hash, (_, updates_action))| {
85 | (info_hash, updates_action)
86 | }).collect::>()).await {
87 | Ok(_) => {
88 | info!("[SYNC BLACKLIST UPDATES] Synced {} blacklists", mapping.len());
89 | for (_, (timestamp, _)) in mapping.into_iter() {
90 | self.remove_blacklist_update(×tamp);
91 | }
92 | Ok(())
93 | }
94 | Err(_) => {
95 | error!("[SYNC BLACKLIST UPDATES] Unable to sync {} blacklists", mapping.len());
96 | Err(())
97 | }
98 | }
99 | }
100 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_torrents_updates.rs:
--------------------------------------------------------------------------------
1 | use std::collections::{BTreeMap, HashMap};
2 | use std::collections::hash_map::Entry;
3 | use std::sync::Arc;
4 | use std::time::SystemTime;
5 | use log::{error, info};
6 | use crate::stats::enums::stats_event::StatsEvent;
7 | use crate::tracker::enums::updates_action::UpdatesAction;
8 | use crate::tracker::structs::info_hash::InfoHash;
9 | use crate::tracker::structs::torrent_entry::TorrentEntry;
10 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
11 |
12 | impl TorrentTracker {
13 | #[tracing::instrument(level = "debug")]
14 | pub fn add_torrent_update(&self, info_hash: InfoHash, torrent_entry: TorrentEntry, updates_action: UpdatesAction) -> bool
15 | {
16 | let map = self.torrents_updates.clone();
17 | let mut lock = map.write();
18 | match lock.insert(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos(), (info_hash, torrent_entry.clone(), updates_action)) {
19 | None => {
20 | self.update_stats(StatsEvent::TorrentsUpdates, 1);
21 | true
22 | }
23 | Some(_) => {
24 | false
25 | }
26 | }
27 | }
28 |
29 | #[tracing::instrument(level = "debug")]
30 | pub fn add_torrent_updates(&self, hashes: HashMap) -> BTreeMap
31 | {
32 | let mut returned_data = BTreeMap::new();
33 | for (timestamp, (info_hash, torrent_entry, updates_action)) in hashes.iter() {
34 | returned_data.insert(*info_hash, self.add_torrent_update(*info_hash, torrent_entry.clone(), *updates_action));
35 | let _ = self.remove_torrent_update(timestamp);
36 | }
37 | returned_data
38 | }
39 |
40 | #[tracing::instrument(level = "debug")]
41 | pub fn get_torrent_updates(&self) -> HashMap
42 | {
43 | let map = self.torrents_updates.clone();
44 | let lock = map.read_recursive();
45 | lock.clone()
46 | }
47 |
48 | #[tracing::instrument(level = "debug")]
49 | pub fn remove_torrent_update(&self, timestamp: &u128) -> bool
50 | {
51 | let map = self.torrents_updates.clone();
52 | let mut lock = map.write();
53 | match lock.remove(timestamp) {
54 | None => { false }
55 | Some(_) => {
56 | self.update_stats(StatsEvent::TorrentsUpdates, -1);
57 | true
58 | }
59 | }
60 | }
61 |
62 | #[tracing::instrument(level = "debug")]
63 | pub fn clear_torrent_updates(&self)
64 | {
65 | let map = self.torrents_updates.clone();
66 | let mut lock = map.write();
67 | lock.clear();
68 | self.set_stats(StatsEvent::TorrentsUpdates, 0);
69 | }
70 |
71 | #[tracing::instrument(level = "debug")]
72 | pub async fn save_torrent_updates(&self, torrent_tracker: Arc) -> Result<(), ()>
73 | {
74 | let mut mapping: HashMap = HashMap::new();
75 | for (timestamp, (info_hash, torrent_entry, updates_action)) in self.get_torrent_updates().iter() {
76 | match mapping.entry(*info_hash) {
77 | Entry::Occupied(mut o) => {
78 | o.insert((o.get().0, torrent_entry.clone(), *updates_action));
79 | self.remove_torrent_update(timestamp);
80 | }
81 | Entry::Vacant(v) => {
82 | v.insert((*timestamp, torrent_entry.clone(), *updates_action));
83 | }
84 | }
85 | }
86 | match self.save_torrents(torrent_tracker.clone(), mapping.clone().into_iter().map(|(info_hash, (_, torrent_entry, updates_action))| {
87 | (info_hash, (torrent_entry.clone(), updates_action))
88 | }).collect::>()).await {
89 | Ok(_) => {
90 | info!("[SYNC TORRENT UPDATES] Synced {} torrents", mapping.len());
91 | for (_, (timestamp, _, _)) in mapping.into_iter() {
92 | self.remove_torrent_update(×tamp);
93 | }
94 | Ok(())
95 | }
96 | Err(_) => {
97 | error!("[SYNC TORRENT UPDATES] Unable to sync {} torrents", mapping.len());
98 | Err(())
99 | }
100 | }
101 | }
102 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_torrents_whitelist.rs:
--------------------------------------------------------------------------------
1 | use std::sync::Arc;
2 | use log::{error, info};
3 | use crate::stats::enums::stats_event::StatsEvent;
4 | use crate::tracker::enums::updates_action::UpdatesAction;
5 | use crate::tracker::structs::info_hash::InfoHash;
6 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
7 |
8 | impl TorrentTracker {
9 | #[tracing::instrument(level = "debug")]
10 | pub async fn load_whitelist(&self, tracker: Arc)
11 | {
12 | if let Ok(whitelist) = self.sqlx.load_whitelist(tracker.clone()).await {
13 | info!("Loaded {} whitelists", whitelist);
14 | }
15 | }
16 |
17 | #[tracing::instrument(level = "debug")]
18 | pub async fn save_whitelist(&self, tracker: Arc, hashes: Vec<(InfoHash, UpdatesAction)>) -> Result<(), ()>
19 | {
20 | match self.sqlx.save_whitelist(tracker.clone(), hashes.clone()).await {
21 | Ok(_) => {
22 | info!("[SYNC WHITELIST] Synced {} whitelists", hashes.len());
23 | Ok(())
24 | }
25 | Err(_) => {
26 | error!("[SYNC WHITELIST] Unable to sync {} whitelists", hashes.len());
27 | Err(())
28 | }
29 | }
30 | }
31 |
32 | #[tracing::instrument(level = "debug")]
33 | pub fn add_whitelist(&self, info_hash: InfoHash) -> bool
34 | {
35 | let map = self.torrents_whitelist.clone();
36 | let mut lock = map.write();
37 | if !lock.contains(&info_hash) {
38 | lock.push(info_hash);
39 | self.update_stats(StatsEvent::Whitelist, 1);
40 | return true;
41 | }
42 | false
43 | }
44 |
45 | #[tracing::instrument(level = "debug")]
46 | pub fn get_whitelist(&self) -> Vec
47 | {
48 | let map = self.torrents_whitelist.clone();
49 | let lock = map.read_recursive();
50 | lock.clone()
51 | }
52 |
53 | #[tracing::instrument(level = "debug")]
54 | pub fn check_whitelist(&self, info_hash: InfoHash) -> bool
55 | {
56 | let map = self.torrents_whitelist.clone();
57 | let lock = map.read_recursive();
58 | if lock.contains(&info_hash) {
59 | return true;
60 | }
61 | false
62 | }
63 |
64 | #[tracing::instrument(level = "debug")]
65 | pub fn remove_whitelist(&self, info_hash: InfoHash) -> bool
66 | {
67 | let map = self.torrents_whitelist.clone();
68 | let mut lock = map.write();
69 | match lock.iter().position(|r| *r == info_hash) {
70 | None => { false }
71 | Some(index) => {
72 | lock.remove(index);
73 | self.update_stats(StatsEvent::Whitelist, -1);
74 | true
75 | }
76 | }
77 | }
78 |
79 | #[tracing::instrument(level = "debug")]
80 | pub fn clear_whitelist(&self)
81 | {
82 | let map = self.torrents_whitelist.clone();
83 | let mut lock = map.write();
84 | lock.clear();
85 | }
86 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_torrents_whitelist_updates.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::collections::hash_map::Entry;
3 | use std::sync::Arc;
4 | use std::time::SystemTime;
5 | use log::{error, info};
6 | use crate::stats::enums::stats_event::StatsEvent;
7 | use crate::tracker::enums::updates_action::UpdatesAction;
8 | use crate::tracker::structs::info_hash::InfoHash;
9 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
10 |
11 | impl TorrentTracker {
12 | #[tracing::instrument(level = "debug")]
13 | pub fn add_whitelist_update(&self, info_hash: InfoHash, updates_action: UpdatesAction) -> bool
14 | {
15 | let map = self.torrents_whitelist_updates.clone();
16 | let mut lock = map.write();
17 | match lock.insert(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos(), (info_hash, updates_action)) {
18 | None => {
19 | self.update_stats(StatsEvent::WhitelistUpdates, 1);
20 | true
21 | }
22 | Some(_) => {
23 | false
24 | }
25 | }
26 | }
27 |
28 | #[tracing::instrument(level = "debug")]
29 | pub fn add_whitelist_updates(&self, hashes: Vec<(InfoHash, UpdatesAction)>) -> Vec<(InfoHash, bool)>
30 | {
31 | let mut returned_data = Vec::new();
32 | for (info_hash, updates_action) in hashes {
33 | returned_data.push((info_hash, self.add_whitelist_update(info_hash, updates_action)));
34 | }
35 | returned_data
36 | }
37 |
38 | #[tracing::instrument(level = "debug")]
39 | pub fn get_whitelist_updates(&self) -> HashMap
40 | {
41 | let map = self.torrents_whitelist_updates.clone();
42 | let lock = map.read_recursive();
43 | lock.clone()
44 | }
45 |
46 | #[tracing::instrument(level = "debug")]
47 | pub fn remove_whitelist_update(&self, timestamp: &u128) -> bool
48 | {
49 | let map = self.torrents_whitelist_updates.clone();
50 | let mut lock = map.write();
51 | match lock.remove(timestamp) {
52 | None => { false }
53 | Some(_) => {
54 | self.update_stats(StatsEvent::WhitelistUpdates, -1);
55 | true
56 | }
57 | }
58 | }
59 |
60 | #[tracing::instrument(level = "debug")]
61 | pub fn clear_whitelist_updates(&self)
62 | {
63 | let map = self.torrents_whitelist_updates.clone();
64 | let mut lock = map.write();
65 | lock.clear();
66 | self.set_stats(StatsEvent::WhitelistUpdates, 0);
67 | }
68 |
69 | #[tracing::instrument(level = "debug")]
70 | pub async fn save_whitelist_updates(&self, torrent_tracker: Arc) -> Result<(), ()>
71 | {
72 | let mut mapping: HashMap = HashMap::new();
73 | for (timestamp, (info_hash, updates_action)) in self.get_whitelist_updates().iter() {
74 | match mapping.entry(*info_hash) {
75 | Entry::Occupied(mut o) => {
76 | o.insert((o.get().0, *updates_action));
77 | self.remove_whitelist_update(timestamp);
78 | }
79 | Entry::Vacant(v) => {
80 | v.insert((*timestamp, *updates_action));
81 | }
82 | }
83 | }
84 | match self.save_whitelist(torrent_tracker.clone(), mapping.clone().into_iter().map(|(info_hash, (_, updates_action))| {
85 | (info_hash, updates_action)
86 | }).collect::>()).await {
87 | Ok(_) => {
88 | info!("[SYNC WHITELIST UPDATES] Synced {} whitelists", mapping.len());
89 | for (_, (timestamp, _)) in mapping.into_iter() {
90 | self.remove_whitelist_update(×tamp);
91 | }
92 | Ok(())
93 | }
94 | Err(_) => {
95 | error!("[SYNC WHITELIST UPDATES] Unable to sync {} whitelists", mapping.len());
96 | Err(())
97 | }
98 | }
99 | }
100 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_users.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeMap;
2 | use std::collections::btree_map::Entry;
3 | use std::sync::Arc;
4 | use std::time::{Duration, SystemTime, UNIX_EPOCH};
5 | use chrono::{TimeZone, Utc};
6 | use log::{error, info};
7 | use crate::stats::enums::stats_event::StatsEvent;
8 | use crate::tracker::enums::updates_action::UpdatesAction;
9 | use crate::tracker::structs::info_hash::InfoHash;
10 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
11 | use crate::tracker::structs::user_entry_item::UserEntryItem;
12 | use crate::tracker::structs::user_id::UserId;
13 |
14 | impl TorrentTracker {
15 | #[tracing::instrument(level = "debug")]
16 | pub async fn load_users(&self, tracker: Arc)
17 | {
18 | if let Ok(users) = self.sqlx.load_users(tracker.clone()).await {
19 | info!("Loaded {} users", users);
20 | }
21 | }
22 |
23 | #[tracing::instrument(level = "debug")]
24 | pub async fn save_users(&self, tracker: Arc, users: BTreeMap) -> Result<(), ()>
25 | {
26 | match self.sqlx.save_users(tracker.clone(), users.clone()).await {
27 | Ok(_) => {
28 | info!("[SYNC USERS] Synced {} users", users.len());
29 | Ok(())
30 | }
31 | Err(_) => {
32 | error!("[SYNC USERS] Unable to sync {} users", users.len());
33 | Err(())
34 | }
35 | }
36 | }
37 |
38 | #[tracing::instrument(level = "debug")]
39 | pub fn add_user(&self, user_id: UserId, user_entry_item: UserEntryItem) -> bool
40 | {
41 | let map = self.users.clone();
42 | let mut lock = map.write();
43 | match lock.entry(user_id) {
44 | Entry::Vacant(v) => {
45 | self.update_stats(StatsEvent::Users, 1);
46 | v.insert(user_entry_item);
47 | true
48 | }
49 | Entry::Occupied(mut o) => {
50 | o.insert(user_entry_item);
51 | false
52 | }
53 | }
54 | }
55 |
56 | #[tracing::instrument(level = "debug")]
57 | pub fn add_user_active_torrent(&self, user_id: UserId, info_hash: InfoHash) -> bool
58 | {
59 | let map = self.users.clone();
60 | let mut lock = map.write();
61 | match lock.entry(user_id) {
62 | Entry::Vacant(_) => {
63 | false
64 | }
65 | Entry::Occupied(mut o) => {
66 | let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
67 | let timestamp_unix = timestamp.as_secs();
68 | o.get_mut().torrents_active.insert(info_hash, timestamp_unix);
69 | true
70 | }
71 | }
72 | }
73 |
74 | #[tracing::instrument(level = "debug")]
75 | pub fn get_user(&self, id: UserId) -> Option
76 | {
77 | let map = self.users.clone();
78 | let lock = map.read_recursive();
79 | lock.get(&id).cloned()
80 | }
81 |
82 | #[tracing::instrument(level = "debug")]
83 | pub fn get_users(&self) -> BTreeMap
84 | {
85 | let map = self.users.clone();
86 | let lock = map.read_recursive();
87 | lock.clone()
88 | }
89 |
90 | #[tracing::instrument(level = "debug")]
91 | pub fn remove_user(&self, user_id: UserId) -> Option
92 | {
93 | let map = self.users.clone();
94 | let mut lock = map.write();
95 | match lock.remove(&user_id) {
96 | None => { None }
97 | Some(data) => {
98 | self.update_stats(StatsEvent::Users, -1);
99 | Some(data)
100 | }
101 | }
102 | }
103 |
104 | #[tracing::instrument(level = "debug")]
105 | pub fn remove_user_active_torrent(&self, user_id: UserId, info_hash: InfoHash) -> bool
106 | {
107 | let map = self.users.clone();
108 | let mut lock = map.write();
109 | match lock.entry(user_id) {
110 | Entry::Vacant(_) => {
111 | false
112 | }
113 | Entry::Occupied(mut o) => {
114 | match o.get_mut().torrents_active.remove(&info_hash) {
115 | None => { false }
116 | Some(_) => { true }
117 | }
118 | }
119 | }
120 | }
121 |
122 | #[tracing::instrument(level = "debug")]
123 | pub fn check_user_key(&self, key: UserId) -> Option
124 | {
125 | let map = self.users.clone();
126 | let lock = map.read_recursive();
127 | for (user_id, user_entry_item) in lock.iter() {
128 | if user_entry_item.key == key {
129 | return Some(*user_id);
130 | }
131 | }
132 | None
133 | }
134 |
135 | #[tracing::instrument(level = "debug")]
136 | pub fn clean_user_active_torrents(&self, peer_timeout: Duration)
137 | {
138 | let mut torrents_cleaned = 0u64;
139 | let mut remove_active_torrents = vec![];
140 | let map = self.users.clone();
141 | let lock = map.read_recursive();
142 | info!("[USERS] Scanning {} users with dead active torrents", lock.len());
143 | for (user_id, user_entry_item) in lock.iter() {
144 | let torrents_active = user_entry_item.torrents_active.clone();
145 | for (info_hash, updated) in torrents_active.iter() {
146 | let time = SystemTime::from(Utc.timestamp_opt(*updated as i64 + peer_timeout.as_secs() as i64, 0).unwrap());
147 | if time.duration_since(SystemTime::now()).is_err() {
148 | remove_active_torrents.push((*user_id, *info_hash));
149 | }
150 | }
151 | }
152 | for (user_id, info_hash) in remove_active_torrents {
153 | self.remove_user_active_torrent(user_id, info_hash);
154 | torrents_cleaned += 1;
155 | }
156 | info!("[USERS] Removed {} active torrents in users", torrents_cleaned);
157 | }
158 | }
--------------------------------------------------------------------------------
/src/tracker/impls/torrent_tracker_users_updates.rs:
--------------------------------------------------------------------------------
1 | use std::collections::{BTreeMap, HashMap};
2 | use std::collections::hash_map::Entry;
3 | use std::sync::Arc;
4 | use std::time::SystemTime;
5 | use log::{error, info};
6 | use crate::stats::enums::stats_event::StatsEvent;
7 | use crate::tracker::enums::updates_action::UpdatesAction;
8 | use crate::tracker::structs::torrent_tracker::TorrentTracker;
9 | use crate::tracker::structs::user_entry_item::UserEntryItem;
10 | use crate::tracker::structs::user_id::UserId;
11 |
12 | impl TorrentTracker {
13 | #[tracing::instrument(level = "debug")]
14 | pub fn add_user_update(&self, user_id: UserId, user_entry_item: UserEntryItem, updates_action: UpdatesAction) -> (UserEntryItem, bool)
15 | {
16 | let map = self.users_updates.clone();
17 | let mut lock = map.write();
18 | match lock.insert(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_nanos(), (user_id, user_entry_item.clone(), updates_action)) {
19 | None => {
20 | self.update_stats(StatsEvent::UsersUpdates, 1);
21 | (user_entry_item, true)
22 | }
23 | Some(_) => {
24 | (user_entry_item, false)
25 | }
26 | }
27 | }
28 |
29 | #[tracing::instrument(level = "debug")]
30 | pub fn get_user_updates(&self) -> HashMap
31 | {
32 | let map = self.users_updates.clone();
33 | let lock = map.read_recursive();
34 | lock.clone()
35 | }
36 |
37 | #[tracing::instrument(level = "debug")]
38 | pub fn remove_user_update(&self, timestamp: &u128) -> bool
39 | {
40 | let map = self.users_updates.clone();
41 | let mut lock = map.write();
42 | match lock.remove(timestamp) {
43 | None => { false }
44 | Some(_) => {
45 | self.update_stats(StatsEvent::UsersUpdates, -1);
46 | true
47 | }
48 | }
49 | }
50 |
51 | #[tracing::instrument(level = "debug")]
52 | pub fn clear_user_updates(&self)
53 | {
54 | let map = self.users_updates.clone();
55 | let mut lock = map.write();
56 | lock.clear();
57 | self.set_stats(StatsEvent::UsersUpdates, 0);
58 | }
59 |
60 | #[tracing::instrument(level = "debug")]
61 | pub async fn save_user_updates(&self, torrent_tracker: Arc) -> Result<(), ()>
62 | {
63 | let mut mapping: HashMap = HashMap::new();
64 | for (timestamp, (user_id, user_entry_item, updates_action)) in self.get_user_updates().iter() {
65 | match mapping.entry(*user_id) {
66 | Entry::Occupied(mut o) => {
67 | o.insert((o.get().0, user_entry_item.clone(), *updates_action));
68 | self.remove_user_update(timestamp);
69 | }
70 | Entry::Vacant(v) => {
71 | v.insert((*timestamp, user_entry_item.clone(), *updates_action));
72 | }
73 | }
74 | }
75 | match self.save_users(torrent_tracker.clone(), mapping.clone().into_iter().map(|(user_id, (_, user_entry_item, updates_action))| {
76 | (user_id, (user_entry_item.clone(), updates_action))
77 | }).collect::>()).await {
78 | Ok(_) => {
79 | info!("[SYNC USER UPDATES] Synced {} users", mapping.len());
80 | for (_, (timestamp, _, _)) in mapping.into_iter() {
81 | self.remove_user_update(×tamp);
82 | }
83 | Ok(())
84 | }
85 | Err(_) => {
86 | error!("[SYNC USER UPDATES] Unable to sync {} users", mapping.len());
87 | Err(())
88 | }
89 | }
90 | }
91 | }
--------------------------------------------------------------------------------
/src/tracker/impls/user_id.rs:
--------------------------------------------------------------------------------
1 | use std::fmt;
2 | use std::fmt::Formatter;
3 | use crate::common::common::bin2hex;
4 | use crate::tracker::structs::user_id::UserId;
5 | use crate::tracker::structs::user_id_visitor::UserIdVisitor;
6 |
7 | impl fmt::Display for UserId {
8 | fn fmt(&self, f: &mut Formatter) -> fmt::Result {
9 | bin2hex(&self.0, f)
10 | }
11 | }
12 |
13 | impl std::str::FromStr for UserId {
14 | type Err = binascii::ConvertError;
15 |
16 | fn from_str(s: &str) -> Result {
17 | let mut i = Self([0u8; 20]);
18 | if s.len() != 40 {
19 | return Err(binascii::ConvertError::InvalidInputLength);
20 | }
21 | binascii::hex2bin(s.as_bytes(), &mut i.0)?;
22 | Ok(i)
23 | }
24 | }
25 |
26 | impl From<&[u8]> for UserId {
27 | fn from(data: &[u8]) -> UserId {
28 | assert_eq!(data.len(), 20);
29 | let mut ret = UserId([0u8; 20]);
30 | ret.0.clone_from_slice(data);
31 | ret
32 | }
33 | }
34 |
35 | impl From<[u8; 20]> for UserId {
36 | fn from(data: [u8; 20]) -> Self {
37 | UserId(data)
38 | }
39 | }
40 |
41 | impl serde::ser::Serialize for UserId {
42 | fn serialize(&self, serializer: S) -> Result {
43 | let mut buffer = [0u8; 40];
44 | let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap();
45 | let str_out = std::str::from_utf8(bytes_out).unwrap();
46 | serializer.serialize_str(str_out)
47 | }
48 | }
49 |
50 | impl<'de> serde::de::Deserialize<'de> for UserId {
51 | fn deserialize>(des: D) -> Result {
52 | des.deserialize_str(UserIdVisitor)
53 | }
54 | }
--------------------------------------------------------------------------------
/src/tracker/impls/user_id_visitor.rs:
--------------------------------------------------------------------------------
1 | use std::fmt;
2 | use std::fmt::Formatter;
3 | use crate::tracker::structs::user_id::UserId;
4 | use crate::tracker::structs::user_id_visitor::UserIdVisitor;
5 |
6 | impl serde::de::Visitor<'_> for UserIdVisitor {
7 | type Value = UserId;
8 |
9 | fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
10 | write!(formatter, "a 40 character long hash")
11 | }
12 |
13 | fn visit_str(self, v: &str) -> Result {
14 | if v.len() != 40 {
15 | return Err(serde::de::Error::invalid_value(
16 | serde::de::Unexpected::Str(v),
17 | &"expected a 40 character long string",
18 | ));
19 | }
20 |
21 | let mut res = UserId([0u8; 20]);
22 |
23 | if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() {
24 | Err(serde::de::Error::invalid_value(
25 | serde::de::Unexpected::Str(v),
26 | &"expected a hexadecimal string",
27 | ))
28 | } else {
29 | Ok(res)
30 | }
31 | }
32 | }
--------------------------------------------------------------------------------
/src/tracker/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod enums;
2 | pub mod impls;
3 | pub mod structs;
4 | pub mod types;
5 | pub mod tests;
--------------------------------------------------------------------------------
/src/tracker/structs.rs:
--------------------------------------------------------------------------------
1 | pub mod torrent_tracker;
2 | pub mod announce_query_request;
3 | pub mod info_hash;
4 | pub mod info_hash_visitor;
5 | pub mod peer_id;
6 | pub mod peer_id_visitor;
7 | pub mod scrape_query_request;
8 | pub mod torrent_entry;
9 | pub mod torrent_peer;
10 | pub mod user_entry_item;
11 | pub mod user_id;
12 | pub mod user_id_visitor;
13 | pub mod torrent_peers;
14 | pub mod torrent_sharding;
--------------------------------------------------------------------------------
/src/tracker/structs/announce_query_request.rs:
--------------------------------------------------------------------------------
1 | use std::net::IpAddr;
2 | use serde::Deserialize;
3 | use crate::tracker::enums::announce_event::AnnounceEvent;
4 | use crate::tracker::structs::info_hash::InfoHash;
5 | use crate::tracker::structs::peer_id::PeerId;
6 |
7 | #[derive(Deserialize, Clone, Debug)]
8 | #[allow(dead_code)]
9 | pub struct AnnounceQueryRequest {
10 | pub(crate) info_hash: InfoHash,
11 | pub(crate) peer_id: PeerId,
12 | pub(crate) port: u16,
13 | pub(crate) uploaded: u64,
14 | pub(crate) downloaded: u64,
15 | pub(crate) left: u64,
16 | pub(crate) compact: bool,
17 | pub(crate) no_peer_id: bool,
18 | pub(crate) event: AnnounceEvent,
19 | pub(crate) remote_addr: IpAddr,
20 | pub(crate) numwant: u64,
21 | }
--------------------------------------------------------------------------------
/src/tracker/structs/info_hash.rs:
--------------------------------------------------------------------------------
1 | #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)]
2 | pub struct InfoHash(pub [u8; 20]);
3 |
--------------------------------------------------------------------------------
/src/tracker/structs/info_hash_visitor.rs:
--------------------------------------------------------------------------------
1 | pub(crate) struct InfoHashVisitor;
--------------------------------------------------------------------------------
/src/tracker/structs/peer_id.rs:
--------------------------------------------------------------------------------
1 | #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug, PartialOrd, Ord)]
2 | pub struct PeerId(pub [u8; 20]);
--------------------------------------------------------------------------------
/src/tracker/structs/peer_id_visitor.rs:
--------------------------------------------------------------------------------
1 | pub(crate) struct PeerIdVisitor;
--------------------------------------------------------------------------------
/src/tracker/structs/scrape_query_request.rs:
--------------------------------------------------------------------------------
1 | use serde::Deserialize;
2 | use crate::tracker::structs::info_hash::InfoHash;
3 |
4 | #[derive(Deserialize, Clone, Debug)]
5 | #[allow(dead_code)]
6 | pub struct ScrapeQueryRequest {
7 | pub(crate) info_hash: Vec,
8 | }
--------------------------------------------------------------------------------
/src/tracker/structs/torrent_entry.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeMap;
2 | use serde::Serialize;
3 | use crate::tracker::structs::peer_id::PeerId;
4 | use crate::tracker::structs::torrent_peer::TorrentPeer;
5 |
6 | #[derive(Serialize, Clone, Debug)]
7 | pub struct TorrentEntry {
8 | #[serde(skip_serializing)]
9 | pub seeds: BTreeMap,
10 | #[serde(skip_serializing)]
11 | pub peers: BTreeMap,
12 | pub completed: u64,
13 | #[serde(with = "serde_millis")]
14 | pub updated: std::time::Instant
15 | }
--------------------------------------------------------------------------------
/src/tracker/structs/torrent_peer.rs:
--------------------------------------------------------------------------------
1 | use std::net::SocketAddr;
2 | use serde::Serialize;
3 | use crate::common::structs::number_of_bytes::NumberOfBytes;
4 | use crate::common::structs::number_of_bytes_def::NumberOfBytesDef;
5 | use crate::tracker::enums::announce_event::AnnounceEvent;
6 | use crate::tracker::enums::announce_event_def::AnnounceEventDef;
7 | use crate::tracker::structs::peer_id::PeerId;
8 |
9 | #[derive(PartialEq, Eq, Debug, Clone, Serialize)]
10 | pub struct TorrentPeer {
11 | pub peer_id: PeerId,
12 | pub peer_addr: SocketAddr,
13 | #[serde(with = "serde_millis")]
14 | pub updated: std::time::Instant,
15 | #[serde(with = "NumberOfBytesDef")]
16 | pub uploaded: NumberOfBytes,
17 | #[serde(with = "NumberOfBytesDef")]
18 | pub downloaded: NumberOfBytes,
19 | #[serde(with = "NumberOfBytesDef")]
20 | pub left: NumberOfBytes,
21 | #[serde(with = "AnnounceEventDef")]
22 | pub event: AnnounceEvent,
23 | }
--------------------------------------------------------------------------------
/src/tracker/structs/torrent_peers.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeMap;
2 | use serde::Serialize;
3 | use crate::tracker::structs::peer_id::PeerId;
4 | use crate::tracker::structs::torrent_peer::TorrentPeer;
5 |
6 | #[derive(Serialize, Debug)]
7 | pub struct TorrentPeers {
8 | pub(crate) seeds_ipv4: BTreeMap,
9 | pub(crate) seeds_ipv6: BTreeMap,
10 | pub(crate) peers_ipv4: BTreeMap,
11 | pub(crate) peers_ipv6: BTreeMap,
12 | }
--------------------------------------------------------------------------------
/src/tracker/structs/torrent_tracker.rs:
--------------------------------------------------------------------------------
1 | use std::collections::{BTreeMap, HashMap};
2 | use std::sync::Arc;
3 | use parking_lot::RwLock;
4 | use crate::config::structs::configuration::Configuration;
5 | use crate::database::structs::database_connector::DatabaseConnector;
6 | use crate::stats::structs::stats_atomics::StatsAtomics;
7 | use crate::tracker::enums::updates_action::UpdatesAction;
8 | use crate::tracker::structs::info_hash::InfoHash;
9 | use crate::tracker::structs::torrent_sharding::TorrentSharding;
10 | use crate::tracker::structs::user_entry_item::UserEntryItem;
11 | use crate::tracker::structs::user_id::UserId;
12 | use crate::tracker::types::keys_updates::KeysUpdates;
13 | use crate::tracker::types::torrents_updates::TorrentsUpdates;
14 | use crate::tracker::types::users_updates::UsersUpdates;
15 |
16 |
17 | #[derive(Debug)]
18 | pub struct TorrentTracker {
19 | pub config: Arc,
20 | pub sqlx: DatabaseConnector,
21 | pub torrents_sharding: Arc,
22 | pub torrents_updates: TorrentsUpdates,
23 | pub torrents_whitelist: Arc>>,
24 | pub torrents_whitelist_updates: Arc>>,
25 | pub torrents_blacklist: Arc>>,
26 | pub torrents_blacklist_updates: Arc>>,
27 | pub keys: Arc>>,
28 | pub keys_updates: KeysUpdates,
29 | pub users: Arc>>,
30 | pub users_updates: UsersUpdates,
31 | pub stats: Arc,
32 | }
--------------------------------------------------------------------------------
/src/tracker/structs/user_entry_item.rs:
--------------------------------------------------------------------------------
1 | use std::collections::BTreeMap;
2 | use serde::{Deserialize, Serialize};
3 | use crate::tracker::structs::info_hash::InfoHash;
4 | use crate::tracker::structs::user_id::UserId;
5 |
6 | #[derive(Serialize, Deserialize, Clone, Debug)]
7 | pub struct UserEntryItem {
8 | pub key: UserId,
9 | pub user_id: Option,
10 | pub user_uuid: Option,
11 | pub uploaded: u64,
12 | pub downloaded: u64,
13 | pub completed: u64,
14 | pub updated: u64,
15 | pub active: u8,
16 | pub torrents_active: BTreeMap
17 | }
--------------------------------------------------------------------------------
/src/tracker/structs/user_id.rs:
--------------------------------------------------------------------------------
1 | #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)]
2 | pub struct UserId(pub [u8; 20]);
--------------------------------------------------------------------------------
/src/tracker/structs/user_id_visitor.rs:
--------------------------------------------------------------------------------
1 | pub(crate) struct UserIdVisitor;
--------------------------------------------------------------------------------
/src/tracker/tests.rs:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Power2All/torrust-actix/55622882ecfbd6612fc172f130a677b8a83a65b4/src/tracker/tests.rs
--------------------------------------------------------------------------------
/src/tracker/types.rs:
--------------------------------------------------------------------------------
1 | pub mod torrents_updates;
2 | pub mod keys_updates;
3 | pub mod users_updates;
--------------------------------------------------------------------------------
/src/tracker/types/keys_updates.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::sync::Arc;
3 | use parking_lot::RwLock;
4 | use crate::tracker::enums::updates_action::UpdatesAction;
5 | use crate::tracker::structs::info_hash::InfoHash;
6 |
7 | pub type KeysUpdates = Arc>>;
--------------------------------------------------------------------------------
/src/tracker/types/torrents_updates.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::sync::Arc;
3 | use parking_lot::RwLock;
4 | use crate::tracker::enums::updates_action::UpdatesAction;
5 | use crate::tracker::structs::info_hash::InfoHash;
6 | use crate::tracker::structs::torrent_entry::TorrentEntry;
7 |
8 | pub type TorrentsUpdates = Arc>>;
--------------------------------------------------------------------------------
/src/tracker/types/users_updates.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::sync::Arc;
3 | use parking_lot::RwLock;
4 | use crate::tracker::enums::updates_action::UpdatesAction;
5 | use crate::tracker::structs::user_entry_item::UserEntryItem;
6 | use crate::tracker::structs::user_id::UserId;
7 |
8 | pub type UsersUpdates = Arc>>;
--------------------------------------------------------------------------------
/src/udp/enums.rs:
--------------------------------------------------------------------------------
1 | pub mod request_parse_error;
2 | pub mod request;
3 | pub mod response;
4 | pub mod server_error;
--------------------------------------------------------------------------------
/src/udp/enums/request.rs:
--------------------------------------------------------------------------------
1 | use crate::udp::structs::announce_request::AnnounceRequest;
2 | use crate::udp::structs::connect_request::ConnectRequest;
3 | use crate::udp::structs::scrape_request::ScrapeRequest;
4 |
5 | #[derive(PartialEq, Eq, Clone, Debug)]
6 | pub enum Request {
7 | Connect(ConnectRequest),
8 | Announce(AnnounceRequest),
9 | Scrape(ScrapeRequest),
10 | }
--------------------------------------------------------------------------------
/src/udp/enums/request_parse_error.rs:
--------------------------------------------------------------------------------
1 | use std::io;
2 | use actix_web::Either;
3 | use crate::udp::structs::connection_id::ConnectionId;
4 | use crate::udp::structs::transaction_id::TransactionId;
5 |
6 | #[derive(Debug)]
7 | pub enum RequestParseError {
8 | Sendable {
9 | connection_id: ConnectionId,
10 | transaction_id: TransactionId,
11 | err: Either,
12 | },
13 | Unsendable {
14 | err: Either,
15 | },
16 | }
--------------------------------------------------------------------------------
/src/udp/enums/response.rs:
--------------------------------------------------------------------------------
1 | use std::net::{Ipv4Addr, Ipv6Addr};
2 | use crate::udp::structs::announce_response::AnnounceResponse;
3 | use crate::udp::structs::connect_response::ConnectResponse;
4 | use crate::udp::structs::error_response::ErrorResponse;
5 | use crate::udp::structs::scrape_response::ScrapeResponse;
6 |
7 | #[derive(PartialEq, Eq, Clone, Debug)]
8 | pub enum Response {
9 | Connect(ConnectResponse),
10 | AnnounceIpv4(AnnounceResponse),
11 | AnnounceIpv6(AnnounceResponse),
12 | Scrape(ScrapeResponse),
13 | Error(ErrorResponse),
14 | }
--------------------------------------------------------------------------------
/src/udp/enums/server_error.rs:
--------------------------------------------------------------------------------
1 | use thiserror::Error;
2 |
3 | #[derive(Error, Debug)]
4 | pub enum ServerError {
5 | #[error("internal server error")]
6 | InternalServerError,
7 |
8 | #[error("info_hash is either missing or invalid")]
9 | InvalidInfoHash,
10 |
11 | #[error("info_hash unknown")]
12 | UnknownInfoHash,
13 |
14 | #[error("could not find remote address")]
15 | AddressNotFound,
16 |
17 | #[error("torrent has no peers")]
18 | NoPeersFound,
19 |
20 | #[error("torrent not on whitelist")]
21 | TorrentNotWhitelisted,
22 |
23 | #[error("torrent blacklist")]
24 | TorrentBlacklisted,
25 |
26 | #[error("unknown key")]
27 | UnknownKey,
28 |
29 | #[error("peer not authenticated")]
30 | PeerNotAuthenticated,
31 |
32 | #[error("invalid authentication key")]
33 | PeerKeyNotValid,
34 |
35 | #[error("exceeded info_hash limit")]
36 | ExceededInfoHashLimit,
37 |
38 | #[error("bad request")]
39 | BadRequest,
40 |
41 | #[error("maintenance mode enabled, please try again later")]
42 | MaintenanceMode,
43 | }
--------------------------------------------------------------------------------
/src/udp/impls.rs:
--------------------------------------------------------------------------------
1 | pub mod request_parse_error;
2 | pub mod request;
3 | pub mod response;
4 | pub mod ipv4_addr;
5 | pub mod ipv6_addr;
6 | pub mod udp_server;
--------------------------------------------------------------------------------
/src/udp/impls/ipv4_addr.rs:
--------------------------------------------------------------------------------
1 | use std::net::Ipv4Addr;
2 | use crate::udp::traits::Ip;
3 |
4 | impl Ip for Ipv4Addr {}
--------------------------------------------------------------------------------
/src/udp/impls/ipv6_addr.rs:
--------------------------------------------------------------------------------
1 | use std::net::Ipv6Addr;
2 | use crate::udp::traits::Ip;
3 |
4 | impl Ip for Ipv6Addr {}
--------------------------------------------------------------------------------
/src/udp/impls/request.rs:
--------------------------------------------------------------------------------
1 | use std::io;
2 | use std::io::{Cursor, Read, Write};
3 | use std::net::Ipv4Addr;
4 | use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
5 | use crate::common::structs::number_of_bytes::NumberOfBytes;
6 | use crate::tracker::enums::announce_event::AnnounceEvent;
7 | use crate::tracker::structs::info_hash::InfoHash;
8 | use crate::tracker::structs::peer_id::PeerId;
9 | use crate::udp::enums::request::Request;
10 | use crate::udp::enums::request_parse_error::RequestParseError;
11 | use crate::udp::structs::announce_request::AnnounceRequest;
12 | use crate::udp::structs::connect_request::ConnectRequest;
13 | use crate::udp::structs::connection_id::ConnectionId;
14 | use crate::udp::structs::number_of_peers::NumberOfPeers;
15 | use crate::udp::structs::peer_key::PeerKey;
16 | use crate::udp::structs::port::Port;
17 | use crate::udp::structs::scrape_request::ScrapeRequest;
18 | use crate::udp::structs::transaction_id::TransactionId;
19 | use crate::udp::udp::PROTOCOL_IDENTIFIER;
20 |
21 | impl From for Request {
22 | fn from(r: ConnectRequest) -> Self {
23 | Self::Connect(r)
24 | }
25 | }
26 |
27 | impl From for Request {
28 | fn from(r: AnnounceRequest) -> Self {
29 | Self::Announce(r)
30 | }
31 | }
32 |
33 | impl From for Request {
34 | fn from(r: ScrapeRequest) -> Self {
35 | Self::Scrape(r)
36 | }
37 | }
38 |
39 | impl Request {
40 | #[tracing::instrument(skip(bytes), level = "debug")]
41 | pub fn write(self, bytes: &mut impl Write) -> Result<(), io::Error> {
42 | match self {
43 | Request::Connect(r) => {
44 | bytes.write_i64::(PROTOCOL_IDENTIFIER)?;
45 | bytes.write_i32::(0)?;
46 | bytes.write_i32::(r.transaction_id.0)?;
47 | }
48 |
49 | Request::Announce(r) => {
50 | bytes.write_i64::(r.connection_id.0)?;
51 | bytes.write_i32::(1)?;
52 | bytes.write_i32::(r.transaction_id.0)?;
53 |
54 | bytes.write_all(&r.info_hash.0)?;
55 | bytes.write_all(&r.peer_id.0)?;
56 |
57 | bytes.write_i64::(r.bytes_downloaded.0)?;
58 | bytes.write_i64::(r.bytes_left.0)?;
59 | bytes.write_i64::(r.bytes_uploaded.0)?;
60 |
61 | bytes.write_i32::(r.event.to_i32())?;
62 |
63 | bytes.write_all(&r.ip_address.map_or([0; 4], |ip| ip.octets()))?;
64 |
65 | bytes.write_u32::(r.key.0)?;
66 | bytes.write_i32::(r.peers_wanted.0)?;
67 | bytes.write_u16::(r.port.0)?;
68 | }
69 |
70 | Request::Scrape(r) => {
71 | bytes.write_i64::(r.connection_id.0)?;
72 | bytes.write_i32::(2)?;
73 | bytes.write_i32::(r.transaction_id.0)?;
74 |
75 | for info_hash in r.info_hashes {
76 | bytes.write_all(&info_hash.0)?;
77 | }
78 | }
79 | }
80 |
81 | Ok(())
82 | }
83 |
84 | #[tracing::instrument(level = "debug")]
85 | pub fn from_bytes(bytes: &[u8], max_scrape_torrents: u8) -> Result {
86 | let mut cursor = Cursor::new(bytes);
87 |
88 | let connection_id = cursor
89 | .read_i64::()
90 | .map_err(RequestParseError::unsendable_io)?;
91 | let action = cursor
92 | .read_i32::()
93 | .map_err(RequestParseError::unsendable_io)?;
94 | let transaction_id = cursor
95 | .read_i32::()
96 | .map_err(RequestParseError::unsendable_io)?;
97 |
98 | match action {
99 | // Connect
100 | 0 => {
101 | if connection_id == PROTOCOL_IDENTIFIER {
102 | Ok((ConnectRequest {
103 | transaction_id: TransactionId(transaction_id),
104 | })
105 | .into())
106 | } else {
107 | Err(RequestParseError::unsendable_text(
108 | "Protocol identifier missing",
109 | ))
110 | }
111 | }
112 |
113 | // Announce
114 | 1 => {
115 | let mut info_hash = [0; 20];
116 | let mut peer_id = [0; 20];
117 | let mut ip = [0; 4];
118 |
119 | cursor.read_exact(&mut info_hash).map_err(|err| {
120 | RequestParseError::sendable_io(err, connection_id, transaction_id)
121 | })?;
122 | cursor.read_exact(&mut peer_id).map_err(|err| {
123 | RequestParseError::sendable_io(err, connection_id, transaction_id)
124 | })?;
125 |
126 | let bytes_downloaded = cursor.read_i64::().map_err(|err| {
127 | RequestParseError::sendable_io(err, connection_id, transaction_id)
128 | })?;
129 | let bytes_left = cursor.read_i64::().map_err(|err| {
130 | RequestParseError::sendable_io(err, connection_id, transaction_id)
131 | })?;
132 | let bytes_uploaded = cursor.read_i64::().map_err(|err| {
133 | RequestParseError::sendable_io(err, connection_id, transaction_id)
134 | })?;
135 | let event = cursor.read_i32::().map_err(|err| {
136 | RequestParseError::sendable_io(err, connection_id, transaction_id)
137 | })?;
138 |
139 | cursor.read_exact(&mut ip).map_err(|err| {
140 | RequestParseError::sendable_io(err, connection_id, transaction_id)
141 | })?;
142 |
143 | let key = cursor.read_u32::().map_err(|err| {
144 | RequestParseError::sendable_io(err, connection_id, transaction_id)
145 | })?;
146 | let peers_wanted = cursor.read_i32::().map_err(|err| {
147 | RequestParseError::sendable_io(err, connection_id, transaction_id)
148 | })?;
149 | let port = cursor.read_u16::().map_err(|err| {
150 | RequestParseError::sendable_io(err, connection_id, transaction_id)
151 | })?;
152 |
153 | let opt_ip = if ip == [0; 4] {
154 | None
155 | } else {
156 | Some(Ipv4Addr::from(ip))
157 | };
158 |
159 | let option_byte = cursor.read_u8();
160 | let option_size = cursor.read_u8();
161 | let mut path: &str = "";
162 | let mut path_array = vec![];
163 |
164 | let option_byte_value = option_byte.unwrap_or_default();
165 | let option_size_value = option_size.unwrap_or_default();
166 | if option_byte_value == 2 {
167 | path_array.resize(option_size_value as usize, 0u8);
168 | cursor.read_exact(&mut path_array).map_err(|err| {
169 | RequestParseError::sendable_io(err, connection_id, transaction_id)
170 | })?;
171 | path = std::str::from_utf8(&path_array).unwrap_or_default();
172 | }
173 |
174 | Ok((AnnounceRequest {
175 | connection_id: ConnectionId(connection_id),
176 | transaction_id: TransactionId(transaction_id),
177 | info_hash: InfoHash(info_hash),
178 | peer_id: PeerId(peer_id),
179 | bytes_downloaded: NumberOfBytes(bytes_downloaded),
180 | bytes_uploaded: NumberOfBytes(bytes_uploaded),
181 | bytes_left: NumberOfBytes(bytes_left),
182 | event: AnnounceEvent::from_i32(event),
183 | ip_address: opt_ip,
184 | key: PeerKey(key),
185 | peers_wanted: NumberOfPeers(peers_wanted),
186 | port: Port(port),
187 | path: path.to_string(),
188 | })
189 | .into())
190 | }
191 |
192 | // Scrape
193 | 2 => {
194 | let position = cursor.position() as usize;
195 | let inner = cursor.into_inner();
196 |
197 | let info_hashes: Vec = inner[position..]
198 | .chunks_exact(20)
199 | .take(max_scrape_torrents as usize)
200 | .map(|chunk| InfoHash(chunk.try_into().unwrap()))
201 | .collect();
202 |
203 | if info_hashes.is_empty() {
204 | Err(RequestParseError::sendable_text(
205 | "Full scrapes are not allowed",
206 | connection_id,
207 | transaction_id,
208 | ))
209 | } else {
210 | Ok((ScrapeRequest {
211 | connection_id: ConnectionId(connection_id),
212 | transaction_id: TransactionId(transaction_id),
213 | info_hashes,
214 | })
215 | .into())
216 | }
217 | }
218 |
219 | _ => Err(RequestParseError::sendable_text(
220 | "Invalid action",
221 | connection_id,
222 | transaction_id,
223 | )),
224 | }
225 | }
226 | }
--------------------------------------------------------------------------------
/src/udp/impls/request_parse_error.rs:
--------------------------------------------------------------------------------
1 | use std::io;
2 | use actix_web::Either;
3 | use crate::udp::enums::request_parse_error::RequestParseError;
4 | use crate::udp::structs::connection_id::ConnectionId;
5 | use crate::udp::structs::transaction_id::TransactionId;
6 |
7 | impl RequestParseError {
8 | #[tracing::instrument(level = "debug")]
9 | pub fn sendable_io(err: io::Error, connection_id: i64, transaction_id: i32) -> Self {
10 | Self::Sendable {
11 | connection_id: ConnectionId(connection_id),
12 | transaction_id: TransactionId(transaction_id),
13 | err: Either::Left(err),
14 | }
15 | }
16 | #[tracing::instrument(level = "debug")]
17 | pub fn sendable_text(text: &'static str, connection_id: i64, transaction_id: i32) -> Self {
18 | Self::Sendable {
19 | connection_id: ConnectionId(connection_id),
20 | transaction_id: TransactionId(transaction_id),
21 | err: Either::Right(text),
22 | }
23 | }
24 | #[tracing::instrument(level = "debug")]
25 | pub fn unsendable_io(err: io::Error) -> Self {
26 | Self::Unsendable {
27 | err: Either::Left(err),
28 | }
29 | }
30 | #[tracing::instrument(level = "debug")]
31 | pub fn unsendable_text(text: &'static str) -> Self {
32 | Self::Unsendable {
33 | err: Either::Right(text),
34 | }
35 | }
36 | }
--------------------------------------------------------------------------------
/src/udp/impls/response.rs:
--------------------------------------------------------------------------------
1 | use std::convert::TryInto;
2 | use std::io;
3 | use std::io::{Cursor, Write};
4 | use std::net::{Ipv4Addr, Ipv6Addr};
5 | use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
6 | use crate::udp::enums::response::Response;
7 | use crate::udp::structs::announce_interval::AnnounceInterval;
8 | use crate::udp::structs::announce_response::AnnounceResponse;
9 | use crate::udp::structs::connect_response::ConnectResponse;
10 | use crate::udp::structs::connection_id::ConnectionId;
11 | use crate::udp::structs::error_response::ErrorResponse;
12 | use crate::udp::structs::number_of_downloads::NumberOfDownloads;
13 | use crate::udp::structs::number_of_peers::NumberOfPeers;
14 | use crate::udp::structs::port::Port;
15 | use crate::udp::structs::response_peer::ResponsePeer;
16 | use crate::udp::structs::scrape_response::ScrapeResponse;
17 | use crate::udp::structs::torrent_scrape_statistics::TorrentScrapeStatistics;
18 | use crate::udp::structs::transaction_id::TransactionId;
19 |
20 | impl From for Response {
21 | fn from(r: ConnectResponse) -> Self {
22 | Self::Connect(r)
23 | }
24 | }
25 |
26 | impl From> for Response {
27 | fn from(r: AnnounceResponse) -> Self {
28 | Self::AnnounceIpv4(r)
29 | }
30 | }
31 |
32 | impl From> for Response {
33 | fn from(r: AnnounceResponse) -> Self {
34 | Self::AnnounceIpv6(r)
35 | }
36 | }
37 |
38 | impl From for Response {
39 | fn from(r: ScrapeResponse) -> Self {
40 | Self::Scrape(r)
41 | }
42 | }
43 |
44 | impl From for Response {
45 | fn from(r: ErrorResponse) -> Self {
46 | Self::Error(r)
47 | }
48 | }
49 |
50 | impl Response {
51 | #[tracing::instrument(skip(bytes), level = "debug")]
52 | #[inline]
53 | pub fn write(&self, bytes: &mut impl Write) -> Result<(), io::Error> {
54 | match self {
55 | Response::Connect(r) => {
56 | bytes.write_i32::(0)?;
57 | bytes.write_i32::(r.transaction_id.0)?;
58 | bytes.write_i64::(r.connection_id.0)?;
59 | }
60 | Response::AnnounceIpv4(r) => {
61 | bytes.write_i32::(1)?;
62 | bytes.write_i32::(r.transaction_id.0)?;
63 | bytes.write_i32::(r.announce_interval.0)?;
64 | bytes.write_i32::(r.leechers.0)?;
65 | bytes.write_i32::(r.seeders.0)?;
66 |
67 | for peer in r.peers.iter() {
68 | bytes.write_all(&peer.ip_address.octets())?;
69 | bytes.write_u16::(peer.port.0)?;
70 | }
71 | }
72 | Response::AnnounceIpv6(r) => {
73 | bytes.write_i32::(1)?;
74 | bytes.write_i32::(r.transaction_id.0)?;
75 | bytes.write_i32::(r.announce_interval.0)?;
76 | bytes.write_i32::(r.leechers.0)?;
77 | bytes.write_i32::(r.seeders.0)?;
78 |
79 | for peer in r.peers.iter() {
80 | bytes.write_all(&peer.ip_address.octets())?;
81 | bytes.write_u16::(peer.port.0)?;
82 | }
83 | }
84 | Response::Scrape(r) => {
85 | bytes.write_i32::(2)?;
86 | bytes.write_i32::(r.transaction_id.0)?;
87 |
88 | for torrent_stat in r.torrent_stats.iter() {
89 | bytes.write_i32::(torrent_stat.seeders.0)?;
90 | bytes.write_i32::(torrent_stat.completed.0)?;
91 | bytes.write_i32::(torrent_stat.leechers.0)?;
92 | }
93 | }
94 | Response::Error(r) => {
95 | bytes.write_i32::(3)?;
96 | bytes.write_i32::(r.transaction_id.0)?;
97 |
98 | bytes.write_all(r.message.as_bytes())?;
99 | }
100 | }
101 |
102 | Ok(())
103 | }
104 |
105 | #[tracing::instrument(level = "debug")]
106 | #[inline]
107 | pub fn from_bytes(bytes: &[u8], ipv4: bool) -> Result {
108 | let mut cursor = Cursor::new(bytes);
109 |
110 | let action = cursor.read_i32::()?;
111 | let transaction_id = cursor.read_i32::()?;
112 |
113 | match action {
114 | // Connect
115 | 0 => {
116 | let connection_id = cursor.read_i64::()?;
117 |
118 | Ok((ConnectResponse {
119 | connection_id: ConnectionId(connection_id),
120 | transaction_id: TransactionId(transaction_id),
121 | })
122 | .into())
123 | }
124 | // Announce
125 | 1 if ipv4 => {
126 | let announce_interval = cursor.read_i32::()?;
127 | let leechers = cursor.read_i32::()?;
128 | let seeders = cursor.read_i32::()?;
129 |
130 | let position = cursor.position() as usize;
131 | let inner = cursor.into_inner();
132 |
133 | let peers = inner[position..]
134 | .chunks_exact(6)
135 | .map(|chunk| {
136 | let ip_bytes: [u8; 4] = (&chunk[..4]).try_into().unwrap();
137 | let ip_address = Ipv4Addr::from(ip_bytes);
138 | let port = (&chunk[4..]).read_u16::().unwrap();
139 |
140 | ResponsePeer {
141 | ip_address,
142 | port: Port(port),
143 | }
144 | })
145 | .collect();
146 |
147 | Ok((AnnounceResponse {
148 | transaction_id: TransactionId(transaction_id),
149 | announce_interval: AnnounceInterval(announce_interval),
150 | leechers: NumberOfPeers(leechers),
151 | seeders: NumberOfPeers(seeders),
152 | peers,
153 | })
154 | .into())
155 | }
156 | 1 if !ipv4 => {
157 | let announce_interval = cursor.read_i32::()?;
158 | let leechers = cursor.read_i32::()?;
159 | let seeders = cursor.read_i32::()?;
160 |
161 | let position = cursor.position() as usize;
162 | let inner = cursor.into_inner();
163 |
164 | let peers = inner[position..]
165 | .chunks_exact(18)
166 | .map(|chunk| {
167 | let ip_bytes: [u8; 16] = (&chunk[..16]).try_into().unwrap();
168 | let ip_address = Ipv6Addr::from(ip_bytes);
169 | let port = (&chunk[16..]).read_u16::