The response has been limited to 50k tokens of the smallest files in the repo. You can remove this limitation by removing the max tokens filter.
├── .formatter.exs
├── .github
    ├── FUNDING.yml
    ├── extracker-logo.png
    └── workflows
    │   ├── build-on-push.yml
    │   ├── docker-release.yml
    │   └── test-on-push.yml
├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── config
    ├── config.exs
    ├── dev.exs
    ├── prod.exs
    ├── runtime.exs
    └── test.exs
├── docker-compose.yml
├── lib
    ├── ex_tracker.ex
    └── ex_tracker
    │   ├── accesslist.ex
    │   ├── application.ex
    │   ├── backup.ex
    │   ├── cmd.ex
    │   ├── config
    │       └── system_env.ex
    │   ├── http
    │       ├── multiparam_plug.ex
    │       ├── reverse_proxy_plug.ex
    │       └── router.ex
    │   ├── processors
    │       ├── announce.ex
    │       └── scrape.ex
    │   ├── swarm.ex
    │   ├── swarm_cleaner.ex
    │   ├── swarm_finder.ex
    │   ├── swarm_printout.ex
    │   ├── telemetry
    │       ├── basic_reporter.ex
    │       ├── plug.ex
    │       ├── router.ex
    │       └── telemetry.ex
    │   ├── types
    │       ├── announce_request.ex
    │       ├── announce_response.ex
    │       ├── peer.ex
    │       ├── scrape_request.ex
    │       └── scrape_response.ex
    │   ├── udp
    │       ├── router.ex
    │       └── supervisor.ex
    │   └── utils.ex
├── mix.exs
├── mix.lock
├── rel
    ├── env.bat.eex
    ├── env.sh.eex
    ├── remote.vm.args.eex
    └── vm.args.eex
└── test
    ├── ex_tracker_test.exs
    ├── test_helper.exs
    └── utils_test.exs


/.formatter.exs:
--------------------------------------------------------------------------------
1 | # Used by "mix format"
2 | [
3 |   inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"]
4 | ]
5 | 


--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 | # https://docs.github.com/es/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/displaying-a-sponsor-button-in-your-repository
3 | github: Dahrkael
4 | 


--------------------------------------------------------------------------------
/.github/extracker-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Dahrkael/ExTracker/33a512cb68ff12d6fdd5fd90951240c616c29835/.github/extracker-logo.png


--------------------------------------------------------------------------------
/.github/workflows/build-on-push.yml:
--------------------------------------------------------------------------------
 1 | # This workflow uses actions that are not certified by GitHub.
 2 | # They are provided by a third-party and are governed by
 3 | # separate terms of service, privacy policy, and support
 4 | # documentation.
 5 | 
 6 | name: Build
 7 | 
 8 | on:
 9 |   push:
10 |     branches: [ "master" ]
11 |   pull_request:
12 |     branches: [ "master" ]
13 | 
14 | permissions:
15 |   contents: read
16 | 
17 | jobs:
18 |   test:
19 |     runs-on: ubuntu-24.04
20 |     name: Build and test with OTP ${{matrix.otp}} / Elixir ${{matrix.elixir}}
21 |     strategy:
22 |       matrix:
23 |         otp: ['25', '26', '27']
24 |         elixir: ['1.17', '1.18']
25 |     steps:
26 |       - uses: actions/checkout@v4
27 |       - name: Set up Elixir
28 |         uses: erlef/setup-beam@v1
29 |         with:
30 |           otp-version: ${{matrix.otp}}
31 |           elixir-version: ${{matrix.elixir}}
32 |       - name: Install dependencies
33 |         run: mix deps.get
34 |       - name: Compile project
35 |         run: mix compile
36 | 


--------------------------------------------------------------------------------
/.github/workflows/docker-release.yml:
--------------------------------------------------------------------------------
 1 | #
 2 | name: Docker Latest
 3 | 
 4 | on:
 5 |   workflow_run:
 6 |     workflows: ['Build']
 7 |     types:
 8 |       - completed
 9 | 
10 | env:
11 |   REGISTRY: ghcr.io
12 |   IMAGE_NAME: ${{ github.repository }}
13 | 
14 | jobs:
15 |   build-and-push-image:
16 |     runs-on: ubuntu-24.04
17 |     permissions:
18 |       contents: read
19 |       packages: write
20 |       attestations: write
21 |       id-token: write
22 |       #
23 |     steps:
24 |       - name: lowercase github.repository
25 |         run: |
26 |           echo "IMAGE_NAME=${GITHUB_REPOSITORY@L}" >> ${GITHUB_ENV}
27 |       - name: Checkout repository
28 |         uses: actions/checkout@v4
29 |       - name: Log in to the Container registry
30 |         uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
31 |         with:
32 |           registry: ${{ env.REGISTRY }}
33 |           username: ${{ github.actor }}
34 |           password: ${{ secrets.GITHUB_TOKEN }}
35 |       - name: Build and push Docker image
36 |         id: push
37 |         uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
38 |         with:
39 |           context: .
40 |           push: true
41 |           build-args: |
42 |             RELEASE_NAME=extracker
43 |           tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
44 | 


--------------------------------------------------------------------------------
/.github/workflows/test-on-push.yml:
--------------------------------------------------------------------------------
 1 | # This workflow uses actions that are not certified by GitHub.
 2 | # They are provided by a third-party and are governed by
 3 | # separate terms of service, privacy policy, and support
 4 | # documentation.
 5 | 
 6 | name: Tests
 7 | 
 8 | on:
 9 |   workflow_run:
10 |     workflows: ['Build']
11 |     types:
12 |       - completed
13 | 
14 | permissions:
15 |   contents: read
16 | 
17 | jobs:
18 |   test:
19 |     runs-on: ubuntu-24.04
20 |     name: Build and test with OTP ${{matrix.otp}} / Elixir ${{matrix.elixir}}
21 |     strategy:
22 |       matrix:
23 |         otp: ['27']
24 |         elixir: ['1.18']
25 |     steps:
26 |       - uses: actions/checkout@v4
27 |       - name: Set up Elixir
28 |         uses: erlef/setup-beam@v1
29 |         with:
30 |           otp-version: ${{matrix.otp}}
31 |           elixir-version: ${{matrix.elixir}}
32 |       - name: Install dependencies
33 |         run: mix deps.get
34 |       - name: Run tests
35 |         run: mix test
36 | 


--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
 1 | # The directory Mix will write compiled artifacts to.
 2 | /_build/
 3 | 
 4 | # If you run "mix test --cover", coverage assets end up here.
 5 | /cover/
 6 | 
 7 | # The directory Mix downloads your dependencies sources to.
 8 | /deps/
 9 | 
10 | # Where third-party dependencies like ExDoc output generated docs.
11 | /doc/
12 | 
13 | # Ignore .fetch files in case you like to edit your project deps locally.
14 | /.fetch
15 | 
16 | # If the VM crashes, it generates a dump, let's ignore it too.
17 | erl_crash.dump
18 | 
19 | # Also ignore archive artifacts (built via "mix archive.build").
20 | *.ez
21 | 
22 | # Ignore package tarball (built via "mix hex.build").
23 | extracker-*.tar
24 | 
25 | # Temporary files, for example, from tests.
26 | /tmp/
27 | 


--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
 1 | # Stage 1: builder
 2 | FROM elixir:1.18-otp-27-alpine AS builder
 3 | 
 4 | ARG RELEASE_NAME
 5 | ENV MIX_ENV=prod
 6 | RUN apk update && apk upgrade
 7 | WORKDIR /app
 8 | 
 9 | # Copy mix files and config to leverage caching
10 | COPY mix.exs mix.lock ./
11 | COPY config/ config/
12 | 
13 | # Install Hex and Rebar and compile dependencies
14 | RUN mix local.hex --force && mix local.rebar --force
15 | RUN mix deps.get && mix deps.compile
16 | 
17 | # Copy the rest of the files
18 | #COPY . .
19 | COPY ./config ./config
20 | COPY ./lib ./lib
21 | COPY ./rel ./rel
22 | COPY ./test ./test
23 | COPY ./LICENSE ./LICENSE
24 | COPY ./README.md ./README.md
25 | 
26 | # Compile the project and generate the release
27 | RUN mix compile
28 | RUN mix release ${RELEASE_NAME}
29 | 
30 | 
31 | # Stage 2: runtime
32 | FROM alpine:latest
33 | 
34 | # add labels
35 | LABEL org.opencontainers.image.source="https://github.com/Dahrkael/ExTracker"
36 | LABEL org.opencontainers.image.authors="Dahrkael <dahrkael@outlook.com>"
37 | LABEL org.opencontainers.image.title="ExTracker"
38 | LABEL org.opencontainers.image.description="ExTracker BitTorrent Tracker"
39 | LABEL org.opencontainers.image.licenses="Apache-2.0"
40 | 
41 | ARG RELEASE_NAME
42 | ENV MIX_ENV=prod
43 | WORKDIR /app
44 | 
45 | # Update the system
46 | RUN apk update && apk upgrade
47 | RUN apk update && apk add openssl ncurses-libs libgcc libstdc++
48 | 
49 | # setup the user
50 | RUN addgroup -S extracker
51 | RUN adduser -S extracker -G extracker
52 | 
53 | # create a volume for the backups
54 | RUN mkdir /backups
55 | VOLUME /backups
56 | 
57 | # create a volume for the config files (white/blacklists, etc)
58 | RUN mkdir /config
59 | VOLUME /config
60 | 
61 | # copy the built release from the builder stage
62 | COPY --from=builder /app/_build/prod/rel/${RELEASE_NAME} ./
63 | 
64 | # set permissions once all files are in place
65 | RUN chown -R extracker:extracker /app
66 | RUN chown -R extracker:extracker /backups
67 | RUN chown -R extracker:extracker /config
68 | 
69 | # Expose the default ports
70 | EXPOSE 6969/tcp
71 | EXPOSE 6969/udp
72 | EXPOSE 7070/tcp
73 | 
74 | # set the non-root user
75 | USER extracker
76 | 
77 | # build args are not available on runtime
78 | ENV EXTRACKER_RELEASE_NAME=${RELEASE_NAME}
79 | 
80 | # Run the release
81 | RUN chmod +x ./bin/${RELEASE_NAME}
82 | CMD ["sh", "-c", "bin/${EXTRACKER_RELEASE_NAME} start"]
83 | 


--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
  1 |                                  Apache License
  2 |                            Version 2.0, January 2004
  3 |                         http://www.apache.org/licenses/
  4 | 
  5 |    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
  6 | 
  7 |    1. Definitions.
  8 | 
  9 |       "License" shall mean the terms and conditions for use, reproduction,
 10 |       and distribution as defined by Sections 1 through 9 of this document.
 11 | 
 12 |       "Licensor" shall mean the copyright owner or entity authorized by
 13 |       the copyright owner that is granting the License.
 14 | 
 15 |       "Legal Entity" shall mean the union of the acting entity and all
 16 |       other entities that control, are controlled by, or are under common
 17 |       control with that entity. For the purposes of this definition,
 18 |       "control" means (i) the power, direct or indirect, to cause the
 19 |       direction or management of such entity, whether by contract or
 20 |       otherwise, or (ii) ownership of fifty percent (50%) or more of the
 21 |       outstanding shares, or (iii) beneficial ownership of such entity.
 22 | 
 23 |       "You" (or "Your") shall mean an individual or Legal Entity
 24 |       exercising permissions granted by this License.
 25 | 
 26 |       "Source" form shall mean the preferred form for making modifications,
 27 |       including but not limited to software source code, documentation
 28 |       source, and configuration files.
 29 | 
 30 |       "Object" form shall mean any form resulting from mechanical
 31 |       transformation or translation of a Source form, including but
 32 |       not limited to compiled object code, generated documentation,
 33 |       and conversions to other media types.
 34 | 
 35 |       "Work" shall mean the work of authorship, whether in Source or
 36 |       Object form, made available under the License, as indicated by a
 37 |       copyright notice that is included in or attached to the work
 38 |       (an example is provided in the Appendix below).
 39 | 
 40 |       "Derivative Works" shall mean any work, whether in Source or Object
 41 |       form, that is based on (or derived from) the Work and for which the
 42 |       editorial revisions, annotations, elaborations, or other modifications
 43 |       represent, as a whole, an original work of authorship. For the purposes
 44 |       of this License, Derivative Works shall not include works that remain
 45 |       separable from, or merely link (or bind by name) to the interfaces of,
 46 |       the Work and Derivative Works thereof.
 47 | 
 48 |       "Contribution" shall mean any work of authorship, including
 49 |       the original version of the Work and any modifications or additions
 50 |       to that Work or Derivative Works thereof, that is intentionally
 51 |       submitted to Licensor for inclusion in the Work by the copyright owner
 52 |       or by an individual or Legal Entity authorized to submit on behalf of
 53 |       the copyright owner. For the purposes of this definition, "submitted"
 54 |       means any form of electronic, verbal, or written communication sent
 55 |       to the Licensor or its representatives, including but not limited to
 56 |       communication on electronic mailing lists, source code control systems,
 57 |       and issue tracking systems that are managed by, or on behalf of, the
 58 |       Licensor for the purpose of discussing and improving the Work, but
 59 |       excluding communication that is conspicuously marked or otherwise
 60 |       designated in writing by the copyright owner as "Not a Contribution."
 61 | 
 62 |       "Contributor" shall mean Licensor and any individual or Legal Entity
 63 |       on behalf of whom a Contribution has been received by Licensor and
 64 |       subsequently incorporated within the Work.
 65 | 
 66 |    2. Grant of Copyright License. Subject to the terms and conditions of
 67 |       this License, each Contributor hereby grants to You a perpetual,
 68 |       worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 69 |       copyright license to reproduce, prepare Derivative Works of,
 70 |       publicly display, publicly perform, sublicense, and distribute the
 71 |       Work and such Derivative Works in Source or Object form.
 72 | 
 73 |    3. Grant of Patent License. Subject to the terms and conditions of
 74 |       this License, each Contributor hereby grants to You a perpetual,
 75 |       worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 76 |       (except as stated in this section) patent license to make, have made,
 77 |       use, offer to sell, sell, import, and otherwise transfer the Work,
 78 |       where such license applies only to those patent claims licensable
 79 |       by such Contributor that are necessarily infringed by their
 80 |       Contribution(s) alone or by combination of their Contribution(s)
 81 |       with the Work to which such Contribution(s) was submitted. If You
 82 |       institute patent litigation against any entity (including a
 83 |       cross-claim or counterclaim in a lawsuit) alleging that the Work
 84 |       or a Contribution incorporated within the Work constitutes direct
 85 |       or contributory patent infringement, then any patent licenses
 86 |       granted to You under this License for that Work shall terminate
 87 |       as of the date such litigation is filed.
 88 | 
 89 |    4. Redistribution. You may reproduce and distribute copies of the
 90 |       Work or Derivative Works thereof in any medium, with or without
 91 |       modifications, and in Source or Object form, provided that You
 92 |       meet the following conditions:
 93 | 
 94 |       (a) You must give any other recipients of the Work or
 95 |           Derivative Works a copy of this License; and
 96 | 
 97 |       (b) You must cause any modified files to carry prominent notices
 98 |           stating that You changed the files; and
 99 | 
100 |       (c) You must retain, in the Source form of any Derivative Works
101 |           that You distribute, all copyright, patent, trademark, and
102 |           attribution notices from the Source form of the Work,
103 |           excluding those notices that do not pertain to any part of
104 |           the Derivative Works; and
105 | 
106 |       (d) If the Work includes a "NOTICE" text file as part of its
107 |           distribution, then any Derivative Works that You distribute must
108 |           include a readable copy of the attribution notices contained
109 |           within such NOTICE file, excluding those notices that do not
110 |           pertain to any part of the Derivative Works, in at least one
111 |           of the following places: within a NOTICE text file distributed
112 |           as part of the Derivative Works; within the Source form or
113 |           documentation, if provided along with the Derivative Works; or,
114 |           within a display generated by the Derivative Works, if and
115 |           wherever such third-party notices normally appear. The contents
116 |           of the NOTICE file are for informational purposes only and
117 |           do not modify the License. You may add Your own attribution
118 |           notices within Derivative Works that You distribute, alongside
119 |           or as an addendum to the NOTICE text from the Work, provided
120 |           that such additional attribution notices cannot be construed
121 |           as modifying the License.
122 | 
123 |       You may add Your own copyright statement to Your modifications and
124 |       may provide additional or different license terms and conditions
125 |       for use, reproduction, or distribution of Your modifications, or
126 |       for any such Derivative Works as a whole, provided Your use,
127 |       reproduction, and distribution of the Work otherwise complies with
128 |       the conditions stated in this License.
129 | 
130 |    5. Submission of Contributions. Unless You explicitly state otherwise,
131 |       any Contribution intentionally submitted for inclusion in the Work
132 |       by You to the Licensor shall be under the terms and conditions of
133 |       this License, without any additional terms or conditions.
134 |       Notwithstanding the above, nothing herein shall supersede or modify
135 |       the terms of any separate license agreement you may have executed
136 |       with Licensor regarding such Contributions.
137 | 
138 |    6. Trademarks. This License does not grant permission to use the trade
139 |       names, trademarks, service marks, or product names of the Licensor,
140 |       except as required for reasonable and customary use in describing the
141 |       origin of the Work and reproducing the content of the NOTICE file.
142 | 
143 |    7. Disclaimer of Warranty. Unless required by applicable law or
144 |       agreed to in writing, Licensor provides the Work (and each
145 |       Contributor provides its Contributions) on an "AS IS" BASIS,
146 |       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 |       implied, including, without limitation, any warranties or conditions
148 |       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 |       PARTICULAR PURPOSE. You are solely responsible for determining the
150 |       appropriateness of using or redistributing the Work and assume any
151 |       risks associated with Your exercise of permissions under this License.
152 | 
153 |    8. Limitation of Liability. In no event and under no legal theory,
154 |       whether in tort (including negligence), contract, or otherwise,
155 |       unless required by applicable law (such as deliberate and grossly
156 |       negligent acts) or agreed to in writing, shall any Contributor be
157 |       liable to You for damages, including any direct, indirect, special,
158 |       incidental, or consequential damages of any character arising as a
159 |       result of this License or out of the use or inability to use the
160 |       Work (including but not limited to damages for loss of goodwill,
161 |       work stoppage, computer failure or malfunction, or any and all
162 |       other commercial damages or losses), even if such Contributor
163 |       has been advised of the possibility of such damages.
164 | 
165 |    9. Accepting Warranty or Additional Liability. While redistributing
166 |       the Work or Derivative Works thereof, You may choose to offer,
167 |       and charge a fee for, acceptance of support, warranty, indemnity,
168 |       or other liability obligations and/or rights consistent with this
169 |       License. However, in accepting such obligations, You may act only
170 |       on Your own behalf and on Your sole responsibility, not on behalf
171 |       of any other Contributor, and only if You agree to indemnify,
172 |       defend, and hold each Contributor harmless for any liability
173 |       incurred by, or claims asserted against, such Contributor by reason
174 |       of your accepting any such warranty or additional liability.
175 | 
176 |    END OF TERMS AND CONDITIONS
177 | 
178 |    APPENDIX: How to apply the Apache License to your work.
179 | 
180 |       To apply the Apache License to your work, attach the following
181 |       boilerplate notice, with the fields enclosed by brackets "[]"
182 |       replaced with your own identifying information. (Don't include
183 |       the brackets!)  The text should be enclosed in the appropriate
184 |       comment syntax for the file format. We also recommend that a
185 |       file or class name and description of purpose be included on the
186 |       same "printed page" as the copyright notice for easier
187 |       identification within third-party archives.
188 | 
189 |    Copyright [yyyy] [name of copyright owner]
190 | 
191 |    Licensed under the Apache License, Version 2.0 (the "License");
192 |    you may not use this file except in compliance with the License.
193 |    You may obtain a copy of the License at
194 | 
195 |        http://www.apache.org/licenses/LICENSE-2.0
196 | 
197 |    Unless required by applicable law or agreed to in writing, software
198 |    distributed under the License is distributed on an "AS IS" BASIS,
199 |    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 |    See the License for the specific language governing permissions and
201 |    limitations under the License.
202 | 


--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
 1 | ![ExTracker](.github/extracker-logo.png)
 2 | The Bittorrent Tracker made in Elixir
 3 | 
 4 | [![CI](https://github.com/Dahrkael/ExTracker/actions/workflows/build-on-push.yml/badge.svg)](https://github.com/Dahrkael/ExTracker/actions/workflows/build-on-push.yml)
 5 | [![CI](https://github.com/Dahrkael/ExTracker/actions/workflows/test-on-push.yml/badge.svg)](https://github.com/Dahrkael/ExTracker/actions/workflows/test-on-push.yml)
 6 | [![CI](https://github.com/Dahrkael/ExTracker/actions/workflows/docker-release.yml/badge.svg)](https://github.com/Dahrkael/ExTracker/actions/workflows/docker-release.yml)
 7 | 
 8 | 👷‍♂️This project is a Work In Progress. While not ready for full industrial usage it does work.  
 9 | There is a testing instance running at [extracker.dahrkael.net:6969](http://extracker.dahrkael.net:6969/about) with all current features enabled ([Live statistics](http://extracker.dahrkael.net:9568/tracker-stats.html)).
10 | 
11 | ## Features
12 | Implementation Legend: 
13 | 🔲 Not Yet 🔰 Partially ✅ Done ❌ Won't do
14 | 
15 | ### Important Features
16 | - ✅ High performance (uses ALL the available cores, in-memory storage)
17 | - ✅ Low memory usage (~200MB of RAM for each 1.000.000 peers)
18 | - ✅ Zero setup (launch it and it just works)
19 | 
20 | ### Tracker-related BitTorrent Enhancement Proposals
21 | 
22 | #### Final and Active Process BEPs
23 | - ✅ **BEP 0:** [The BitTorrent Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html)
24 | #### Accepted BEPs
25 | - ✅ **BEP 15:** [UDP Tracker Protocol](https://www.bittorrent.org/beps/bep_0015.html)
26 | - ✅ **BEP 23:** [Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html)
27 | - 🔲 **BEP 27:** [Private Torrents](https://www.bittorrent.org/beps/bep_0027.html)
28 | #### Draft BEPs
29 | - ✅ **BEP 7:** [IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html)
30 | - ✅ **BEP 21:** [Extension for partial seeds](https://www.bittorrent.org/beps/bep_0021.html)
31 | - ✅ **BEP 24:** [Tracker Returns External IP](https://www.bittorrent.org/beps/bep_0024.html)
32 | - 🔲 **BEP 31:** [Tracker Failure Retry Extension](https://www.bittorrent.org/beps/bep_0031.html)
33 | - ✅ **BEP 41:** [UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html)
34 | - ✅ **BEP 48:** [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html)
35 | - ✅ **BEP 52:** [The BitTorrent Protocol Specification v2](https://www.bittorrent.org/beps/bep_0052.html)
36 | #### Deferred BEPs
37 | - ❌ **BEP 8:** [Tracker Peer Obfuscation](https://www.bittorrent.org/beps/bep_0008.html)
38 | 
39 | ### Other Features
40 | - ✅ HTTPS support
41 | - ✅ Database backups to disk
42 | - ❌ WebTorrent
43 | - 🔰 Infohash whitelist/blacklist
44 | - 🔰 Peer management (interval enforcement, cleanup, banning, etc)
45 | - 🔰 Metrics
46 | - 🔰 GeoIP support (statistics, peer restrictions)
47 | - **Feel free to propose features in the [Issues](https://github.com/Dahrkael/ExTracker/issues)**
48 | 
49 | ## Setup
50 | There are 3 main ways of running ExTracker currently
51 | 
52 | ### Straight from source code
53 | For this method to work you need to have **Erlang** and **Elixir** installed on your system
54 | - Clone the repository: `git clone https://github.com/Dahrkael/ExTracker.git && cd ExTracker`
55 | - If needed, modify the configuration in [config/runtime.exs](https://github.com/Dahrkael/ExTracker/blob/master/config/runtime.exs) to fit your needs
56 | - run `MIX_ENV=prod iex -S mix`
57 | 
58 | ### From Releases
59 | Currently there are no official releases built (soon™️). You can however make your own and deploy it where needed:
60 | - Clone the repository: `git clone https://github.com/Dahrkael/ExTracker.git && cd ExTracker`
61 | - run `MIX_ENV=prod mix release extracker` for Linux or `MIX_ENV=prod mix release extrackerw` for Windows
62 | - Find the release files inside the *_build/prod/rel/extracker* folder (if its a different machine make sure the OS and architecture is the same!)
63 | - Copy the folder to its final destination
64 | - If needed, modify the configuration in [releases/{VERSION}/runtime.exs](https://github.com/Dahrkael/ExTracker/blob/master/config/runtime.exs) to fit your needs
65 | - Run `bin/extracker start`
66 | 
67 | ### Docker
68 | For this method you can directly run the [available docker image](https://github.com/Dahrkael/ExTracker/pkgs/container/extracker/422008654?tag=latest): `docker run ghcr.io/dahrkael/extracker:latest`  
69 | or use it as part of docker-compose. Theres an [example compose file](https://github.com/Dahrkael/ExTracker/blob/master/docker-compose.yml) available.
70 | 
71 | > [!NOTE]
72 | > Since modifying the [runtime.exs](https://github.com/Dahrkael/ExTracker/blob/master/config/runtime.exs) file to tune the configuration inside the container is not easy you can also configure it using **Environment Variables**, see the example compose file for the complete list.
73 | 
74 | ## Copyright and license
75 | 
76 | Copyright (c) Dahrkael \<dahrkael at outlook dot com\>  
77 | Distributed under the terms of the Apache License, Version 2.0. Please refer to the [LICENSE file](https://github.com/Dahrkael/ExTracker/blob/master/LICENSE) in the repository root directory for details.
78 | 


--------------------------------------------------------------------------------
/config/config.exs:
--------------------------------------------------------------------------------
1 | import Config
2 | 
3 | import_config "#{Mix.env()}.exs"
4 | 


--------------------------------------------------------------------------------
/config/dev.exs:
--------------------------------------------------------------------------------
 1 | import Config
 2 | 
 3 | config :extracker,
 4 |   compress_lookups: true,
 5 |   named_lookups: true,
 6 |   ipv4_enabled: true,
 7 |   ipv4_bind_address: "0.0.0.0",
 8 |   ipv6_enabled: true,
 9 |   ipv6_bind_address: "::",
10 |   http_enabled: true,
11 |   http_port: 6969,
12 |   https_enabled: false,
13 |   https_port: 7070,
14 |   https_keyfile: "",
15 |   udp_enabled: true,
16 |   udp_port: 6969,
17 |   udp_routers: -1,
18 |   udp_recbuf_size: -1,
19 |   udp_sndbuf_size: -1,
20 |   udp_buffer_size: -1,
21 |   connection_id_secret: 87178291199,
22 |   scrape_enabled: true,
23 |   force_compact_peers: false,
24 |   return_external_ip: true,
25 |   max_peers_returned: 25,
26 |   announce_interval: 60,
27 |   announce_interval_min: 10,
28 |   restrict_hashes: "blacklist",
29 |   restrict_hashes_file: "~/hashes.txt",
30 |   restrict_useragents: "blacklist",
31 |   restrict_useragents_file: "~/useragents.txt",
32 |   cleaning_interval: 1_000,
33 |   swarm_clean_delay: (15 * 1_000),
34 |   peer_cleanup_delay: (30 * 1_000),
35 |   backup_auto_enabled: true,
36 |   backup_auto_load_on_startup: true,
37 |   backup_auto_interval: 60_000,
38 |   backup_auto_path: "~/extracker.bck",
39 |   backup_display_stats: true,
40 |   geoip_enabled: false,
41 |   geoip_license_key: "",
42 |   telemetry_enabled: true,
43 |   telemetry_port: 9568,
44 |   telemetry_basic: true,
45 |   telemetry_prometheus: true,
46 |   reverse_proxy_address: "",
47 |   debug: true
48 | 
49 | config :logger,
50 |   level: :debug
51 | 


--------------------------------------------------------------------------------
/config/prod.exs:
--------------------------------------------------------------------------------
 1 | import Config
 2 | 
 3 | config :extracker,
 4 |   compress_lookups: true,
 5 |   named_lookups: false,
 6 |   ipv4_enabled: true,
 7 |   ipv4_bind_address: "0.0.0.0",
 8 |   ipv6_enabled: true,
 9 |   ipv6_bind_address: "::",
10 |   http_enabled: true,
11 |   http_port: 6969,
12 |   https_enabled: false,
13 |   https_port: 7070,
14 |   https_keyfile: "",
15 |   udp_enabled: true,
16 |   udp_port: 6969,
17 |   udp_routers: -1,
18 |   udp_recbuf_size: -1,
19 |   udp_sndbuf_size: -1,
20 |   udp_buffer_size: -1,
21 |   connection_id_secret: 87178291199,
22 |   scrape_enabled: false,
23 |   force_compact_peers: false,
24 |   return_external_ip: false,
25 |   max_peers_returned: 100,
26 |   announce_interval: (60 * 30),
27 |   announce_interval_min: 60,
28 |   restrict_hashes: false,
29 |   restrict_hashes_file: "~/hashes.txt",
30 |   restrict_useragents: false,
31 |   restrict_useragents_file: "~/useragents.txt",
32 |   cleaning_interval: (60 * 1000),
33 |   swarm_clean_delay: (10 * 60 * 1_000),
34 |   peer_cleanup_delay: (60 * 60 * 1_000),
35 |   backup_auto_enabled: false,
36 |   backup_auto_load_on_startup: false,
37 |   backup_auto_interval: (60 * 60 * 1000),
38 |   backup_auto_path: "~/extracker.bck",
39 |   backup_display_stats: true,
40 |   geoip_enabled: false,
41 |   geoip_license_key: "",
42 |   telemetry_enabled: false,
43 |   telemetry_port: 9568,
44 |   telemetry_basic: false,
45 |   telemetry_prometheus: true,
46 |   reverse_proxy_address: "",
47 |   debug: false
48 | 
49 | config :logger,
50 |   level: :notice
51 | 


--------------------------------------------------------------------------------
/config/runtime.exs:
--------------------------------------------------------------------------------
 1 | import Config
 2 | 
 3 | if config_env() in [:prod] do
 4 | 
 5 |   config :extracker,
 6 |     ipv4_enabled: true, # listen or not on IPv4
 7 |     ipv4_bind_address: "0.0.0.0", # IP to bind to when IPv4 is enabled. "0.0.0.0" listens on every address
 8 |     ipv6_enabled: true, # listen or not on IPv6
 9 |     ipv6_bind_address: "::", # IP to bind to when IPv6 is enabled. "::" listens on every address
10 |     http_enabled: true, # enable the HTTP endpoint to fulfill client requests
11 |     http_port: 6969, # port used by the HTTP endpoint if enabled
12 |     https_enabled: false, # enable the TLS endpoint to fulfill client requests
13 |     https_port: 7070, # port used by the TLS endpoint if enabled
14 |     https_keyfile: "", # path to the certificate file for TLS
15 |     udp_enabled: true, # enable the UDP endpoint to fulfill client requests
16 |     udp_port: 6969, # port usesd by the UDP endpoint if enabled
17 |     udp_routers: -1, # amount of processes listening to UDP requests. -1 means one per VM scheduler
18 |     udp_recbuf_size: -1, # kernel receive buffer size for the UDP socket. 512_000 is a good number, -1 means the OS decides
19 |     udp_sndbuf_size: -1, # kernel send buffer size for the UDP socket. 512_000 is a good number, -1 means the OS decides
20 |     udp_buffer_size: -1, # buffer size for the UDP socket. 1_048_576 is a good number, -1 means the OS decides
21 |     connection_id_secret: 87178291199, # prime used as salt for connection id's generation
22 |     scrape_enabled: false, # allow scrape requests on all enabled endpoints
23 |     force_compact_peers: false, # always respond to HTTP(S) requests with a compact peer list
24 |     return_external_ip: false, # return the client's visible IP as per BEP24
25 |     max_peers_returned: 100, # non-negative maximum amount of peers sent to announcing clients
26 |     announce_interval: (30 * 60), # seconds that clients SHOULD wait before announcing again
27 |     announce_interval_min: 60, # seconds that clients HAVE to wait before announcing again. below this requests are ignored
28 |     restrict_hashes: false, # optionally filter hashes using access lists (valid options: "whitelist", "blacklist", "none"/false/empty)
29 |     restrict_hashes_file: "~/hashes.txt", # file from which the (dis)allowed hash list is loaded
30 |     restrict_useragents: false, # optionally filter user-agents (in the HTTP(S) port) using access lists (valid options: "whitelist", "blacklist", "none"/false/empty)
31 |     restrict_useragents_file: "~/useragents.txt", # file from which the (dis)allowed user-agent list is loaded
32 |     cleaning_interval: (60 * 1000), # milliseconds between cleaning passes
33 |     swarm_clean_delay: (10 * 60 * 1_000), # milliseconds after a swarm is marked as 'needs cleaning'
34 |     peer_cleanup_delay: (60 * 60 * 1_000), # milliseconds after a peer is considered stale and removed
35 |     compress_lookups: true, # compressed lookup tables take less space while losing some performance
36 |     named_lookups: false, # identify each swarm lookup table as "swarm_HASH" instead of just "swarm". Will exhaust the atom table at some point
37 |     backup_auto_enabled: false, # enable automatic backups of current swarms and peers creation
38 |     backup_auto_load_on_startup: false, # load the backup file specified in backup_auto_path when the tracker starts
39 |     backup_auto_interval: (60 * 60 * 1000), # milliseconds between automatic backups
40 |     backup_auto_path: "~/extracker.bck", # file path used for automatic backups
41 |     backup_display_stats: true, # log how many peers and swarms are registered when a backup triggers
42 |     geoip_enabled: false, # lookup and store the country of each peer
43 |     geoip_license_key: "", # MaxMind's license key. Required for the geoip features
44 |     telemetry_enabled: false, # enable telemetry events gathering
45 |     telemetry_port: 9568, # port in which telemetry endpoints are served (via HTTP)
46 |     telemetry_basic: false, # expose a simple HTML stats endpoint at '/tracker-stats.html'
47 |     telemetry_prometheus: true, # expose a Prometheus scrape endpoint at '/prometheus'
48 |     reverse_proxy_address: "", # specify the address of a reverse proxy if present (caddy, nginx, apache, etc)
49 |     debug: false # enable extra debug logs and checks
50 | 
51 |   config :logger, level: :notice # log minimum level. info and debug may get spammy
52 | 
53 | end
54 | 


--------------------------------------------------------------------------------
/config/test.exs:
--------------------------------------------------------------------------------
 1 | import Config
 2 | 
 3 | config :extracker,
 4 |   compress_lookups: true,
 5 |   named_lookups: false,
 6 |   ipv4_enabled: true,
 7 |   ipv4_bind_address: "0.0.0.0",
 8 |   ipv6_enabled: true,
 9 |   ipv6_bind_address: "::",
10 |   http_enabled: true,
11 |   http_port: 6969,
12 |   https_enabled: false,
13 |   https_port: 7070,
14 |   https_keyfile: "",
15 |   udp_enabled: true,
16 |   udp_port: 6969,
17 |   udp_routers: -1,
18 |   udp_recbuf_size: -1,
19 |   udp_sndbuf_size: -1,
20 |   udp_buffer_size: -1,
21 |   connection_id_secret: 87178291199,
22 |   scrape_enabled: false,
23 |   force_compact_peers: false,
24 |   return_external_ip: false,
25 |   max_peers_returned: 100,
26 |   announce_interval: (60 * 30),
27 |   announce_interval_min: 60,
28 |   restrict_hashes: false,
29 |   restrict_hashes_file: "~/hashes.txt",
30 |   cleaning_interval: (60 * 1000),
31 |   swarm_clean_delay: (10 * 60 * 1_000),
32 |   peer_cleanup_delay: (60 * 60 * 1_000),
33 |   backup_auto_enabled: false,
34 |   backup_auto_load_on_startup: false,
35 |   backup_auto_interval: (60 * 60 * 1000),
36 |   backup_auto_path: "~/extracker.bck",
37 |   backup_display_stats: true,
38 |   geoip_enabled: false,
39 |   geoip_license_key: "",
40 |   telemetry_enabled: false,
41 |   telemetry_port: 9568,
42 |   telemetry_basic: false,
43 |   telemetry_prometheus: true,
44 |   reverse_proxy_address: "",
45 |   debug: false
46 | 
47 | config :logger,
48 |   level: :debug
49 | 
50 | config :locus,
51 |   license_key: ""
52 | 


--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
 1 | services:
 2 |   extracker:
 3 |     image: ghcr.io/dahrkael/extracker:latest
 4 |     container_name: extracker
 5 |     build:
 6 |       context: .
 7 |       dockerfile: Dockerfile
 8 |       args:
 9 |         RELEASE_NAME: "extracker"
10 |     restart: unless-stopped
11 |     ports:
12 |       - "6969:6969/tcp"  # HTTP port
13 |       - "6969:6969/udp"  # UDP port
14 |       - "7070:7070/tcp"  # HTTPS port
15 |     volumes:
16 |       - extracker_backups:/backups
17 |       - extracker_config:/config
18 |     environment:
19 | #      - EXTRACKER_IPV4_ENABLED=
20 | #      - EXTRACKER_IPV4_BIND_ADDRESS=
21 | #      - EXTRACKER_IPV6_ENABLED=
22 | #      - EXTRACKER_IPV6_BIND_ADDRESS=
23 | #      - EXTRACKER_HTTP_ENABLED=
24 | #      - EXTRACKER_HTTP_PORT=
25 | #      - EXTRACKER_HTTPS_ENABLED=
26 | #      - EXTRACKER_HTTPS_PORT=
27 | #      - EXTRACKER_HTTPS_KEYFILE=
28 | #      - EXTRACKER_UDP_ENABLED=
29 | #      - EXTRACKER_UDP_PORT=
30 | #      - EXTRACKER_UDP_ROUTERS=
31 | #      - EXTRACKER_UDP_BUFFER_SIZE=
32 | #      - EXTRACKER_UDP_RECBUF_SIZE=
33 | #      - EXTRACKER_UDP_SNDBUF_SIZE=
34 | #      - EXTRACKER_SCRAPE_ENABLED=
35 | #      - EXTRACKER_CONNECTION_ID_SECRET=
36 | #      - EXTRACKER_FORCE_COMPACT_PEERS=
37 | #      - EXTRACKER_RETURN_EXTERNAL_IP=
38 | #      - EXTRACKER_MAX_PEERS_RETURNED=
39 | #      - EXTRACKER_ANNOUNCE_INTERVAL=
40 | #      - EXTRACKER_ANNOUNCE_INTERVAL_MIN=
41 | #      - EXTRACKER_RESTRICT_HASHES=
42 | #      - EXTRACKER_RESTRICT_HASHES_FILE=/config/hashes.txt
43 | #      - EXTRACKER_RESTRICT_USERAGENTS=
44 | #      - EXTRACKER_RESTRICT_USERAGENTS_FILE=/config/useragents.txt
45 | #      - EXTRACKER_CLEANING_INTERVAL=
46 | #      - EXTRACKER_SWARM_CLEAN_DELAY=
47 | #      - EXTRACKER_PEER_CLEANUP_DELAY=
48 | #      - EXTRACKER_COMPRESS_LOOKUPS=
49 | #      - EXTRACKER_NAMED_LOOKUPS=
50 | #      - EXTRACKER_BACKUP_AUTO_ENABLED=
51 | #      - EXTRACKER_BACKUP_AUTO_LOAD_ON_STARTUP=
52 | #      - EXTRACKER_BACKUP_AUTO_INTERVAL=
53 | #      - EXTRACKER_BACKUP_AUTO_PATH=/backups/extracker.bck
54 | #      - EXTRACKER_BACKUP_DISPLAY_STATS=
55 | #      - EXTRACKER_GEOIP_ENABLED=
56 | #      - EXTRACKER_GEOIP_LICENSE_KEY=
57 | #      - EXTRACKER_TELEMETRY_ENABLED=
58 | #      - EXTRACKER_TELEMETRY_PORT=
59 | #      - EXTRACKER_TELEMETRY_BASIC=
60 | #      - EXTRACKER_TELEMETRY_PROMETHEUS=
61 | #      - EXTRACKER_REVERSE_PROXY_ADDRESS=
62 | #      - EXTRACKER_DEBUG=false
63 | 
64 | volumes:
65 |   extracker_backups:
66 |   extracker_config:
67 | 


--------------------------------------------------------------------------------
/lib/ex_tracker.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker do
 2 | 
 3 |   def hello() do
 4 |     :world
 5 |   end
 6 | 
 7 |   def version do
 8 |     "0.7.0"
 9 |   end
10 | 
11 |   def web_about do
12 |     "<p>ExTracker #{ExTracker.version()}</p><a href=\"https://github.com/Dahrkael/ExTracker\">https://github.com/Dahrkael/ExTracker</a>"
13 |   end
14 | 
15 |   def console_about() do
16 |     "--------------------------------------\n" <>
17 |     " ExTracker #{ExTracker.version()}\n" <>
18 |     " https://github.com/Dahrkael/ExTracker\n" <>
19 |     "--------------------------------------"
20 |   end
21 | 
22 |   def debug_enabled() do
23 |     Application.get_env(:extracker, :debug, false)
24 |   end
25 | end
26 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/accesslist.ex:
--------------------------------------------------------------------------------
  1 | # ExTracker.Accesslist is a simple MapSet-like implementation using ETS so each
  2 | # process can do the lookups on its own
  3 | defmodule ExTracker.Accesslist do
  4 | 
  5 |   @table_prefix :accesslist
  6 | 
  7 |   use GenServer
  8 |   require Logger
  9 | 
 10 |   def start_link(args) do
 11 |     name = Keyword.get(args, :name, __MODULE__)
 12 |     GenServer.start_link(__MODULE__, args, name: name)
 13 |   end
 14 | 
 15 |   #==========================================================================
 16 |   # Client
 17 |   #==========================================================================
 18 | 
 19 |   def contains(name, entry) do
 20 |     table = :"#{@table_prefix}_#{name}"
 21 |     case :ets.lookup(table, entry) do
 22 |       [^entry] -> true
 23 |       _ -> false
 24 |     end
 25 |   end
 26 | 
 27 |   def add(name, entry), do: GenServer.cast(name, {:add, entry})
 28 |   def remove(name, entry), do: GenServer.cast(name, {:remove, entry})
 29 |   def from_file(name, path), do: GenServer.call(name, {:load_file, path})
 30 | 
 31 |   #==========================================================================
 32 |   # Server (callbacks)
 33 |   #==========================================================================
 34 | 
 35 |   @impl true
 36 |   def init(args) do
 37 |     name = Keyword.get(args, :name, __MODULE__)
 38 |     table = :"#{@table_prefix}_#{name}"
 39 |     ets_args = [:set, :named_table, :protected]
 40 |     :ets.new(table, ets_args)
 41 | 
 42 |     state = %{table: table}
 43 | 
 44 |     case Keyword.get(args, :file) do
 45 |       nil -> :ok
 46 |       path -> load_file(path, state)
 47 |     end
 48 | 
 49 |     {:ok, state}
 50 |   end
 51 | 
 52 |   @impl true
 53 |   def terminate(_reason, _state) do
 54 |   end
 55 | 
 56 |   @impl true
 57 |   def handle_info(_msg, state) do
 58 |     {:noreply, state}
 59 |   end
 60 | 
 61 |   @impl true
 62 |   def handle_cast({:add, entry}, state) do
 63 |     case :ets.insert_new(state.table, entry) do
 64 |       true ->
 65 |         Logger.debug("accesslist #{state.table}: added entry '#{entry}'")
 66 |       false ->
 67 |         Logger.debug("accesslist #{state.table}: entry '#{entry}' already exists")
 68 |     end
 69 |     {:noreply, state}
 70 |   end
 71 | 
 72 |    @impl true
 73 |   def handle_cast({:remove, entry}, state) do
 74 |     with [^entry] <- :ets.lookup(state.table, entry),
 75 |       true <- :ets.delete(state.table, entry) do
 76 |         Logger.debug("accesslist #{state.table}: removed entry '#{entry}'")
 77 |     else
 78 |       _ ->
 79 |         Logger.debug("accesslist #{state.table}: missing entry '#{entry}'")
 80 |     end
 81 |     {:noreply, state}
 82 |   end
 83 | 
 84 |   @impl true
 85 |   def handle_call({:load_file, path}, _from, state) do
 86 |     load_file(path, state)
 87 |     {:reply, :ok, state}
 88 |   end
 89 | 
 90 |   defp load_file(path, state) do
 91 |     absolute_path = path |> Path.expand()
 92 |     # TODO could use File.Stream! with proper error handling if the list grows too big
 93 |     list = case File.read(absolute_path) do
 94 |       {:ok, data} ->
 95 |         list = data
 96 |         |> String.split("\n", trim: true)
 97 |         |> Enum.map(&String.trim/1)
 98 |         |> Enum.reject(&(&1 == ""))
 99 |         |> MapSet.new()
100 | 
101 |         Logger.notice("loaded accesslist from file #{absolute_path} containing #{MapSet.size(list)} hashes")
102 |         list
103 |       {:error, error} ->
104 |         Logger.error("failed to load access list from file '#{absolute_path}': #{:file.format_error(error)}")
105 |         MapSet.new()
106 |     end
107 | 
108 |     # clean the table first and then insert the data
109 |     # TODO error handling, data races?
110 |     :ets.delete_all_objects(state.table)
111 |     list |> Enum.each(&(:ets.insert_new(state.table, &1)))
112 |   end
113 | end
114 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/application.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.Application do
  2 |   # See https://hexdocs.pm/elixir/Application.html
  3 |   # for more information on OTP Applications
  4 |   @moduledoc false
  5 |   require Logger
  6 | 
  7 |   use Application
  8 |   alias ExTracker.Utils
  9 | 
 10 |   @impl true
 11 |   def start(_type, _args) do
 12 |     # override the configuration with whatever environment variables are set
 13 |     Extracker.Config.SystemEnvironment.load()
 14 | 
 15 |     # move the MaxMind's license from extracker to locus
 16 |     Application.put_env(:locus, :license_key, Application.get_env(:extracker, :geoip_license_key, ""))
 17 | 
 18 |     # print out the configuration to be sure what values are being used after reading everything
 19 |     IO.puts(ExTracker.console_about())
 20 |     print_current_config()
 21 | 
 22 |     # check before spawning anything if the provided bind ips are valid
 23 |     if !check_ipv4() or !check_ipv6() do
 24 |       # the user should fix the config instead of trying to boot a erroneous server
 25 |       exit(:misconfigured_address)
 26 |     end
 27 | 
 28 |     required_children = [
 29 |       { ExTracker.SwarmFinder, {}},
 30 |       { ExTracker.SwarmCleaner, {}},
 31 |       { ExTracker.Backup, {}}
 32 |     ]
 33 | 
 34 |     optional_children = []
 35 |       ++ get_geoip_children()
 36 |       ++ get_telemetry_children()
 37 |       ++ get_accesslist_children()
 38 | 
 39 |     ipv4_optional_children = case Application.get_env(:extracker, :ipv4_enabled) do
 40 |       true ->
 41 |         Logger.notice("IPv4 enabled on address #{inspect(Application.get_env(:extracker, :ipv4_bind_address))}")
 42 |         []
 43 |           ++ get_http_children(:inet)
 44 |           ++ get_https_children(:inet)
 45 |           ++ get_udp_children(:inet)
 46 |       _ -> []
 47 |     end
 48 | 
 49 |     ipv6_optional_children = case Application.get_env(:extracker, :ipv6_enabled) do
 50 |       true ->
 51 |         Logger.notice("IPv6 enabled on address #{inspect(Application.get_env(:extracker, :ipv6_bind_address))}")
 52 |         []
 53 |           ++ get_http_children(:inet6)
 54 |           ++ get_https_children(:inet6)
 55 |           ++ get_udp_children(:inet6)
 56 |       _ -> []
 57 |     end
 58 | 
 59 |     children = Enum.concat([optional_children, required_children, ipv4_optional_children, ipv6_optional_children])
 60 | 
 61 |     # See https://hexdocs.pm/elixir/Supervisor.html
 62 |     # for other strategies and supported options
 63 |     opts = [strategy: :one_for_one, name: ExTracker.Supervisor]
 64 |     Supervisor.start_link(children, opts)
 65 |   end
 66 | 
 67 |   defp print_current_config() do
 68 |     config =
 69 |       Application.get_all_env(:extracker)
 70 |       |> Enum.sort_by(fn {key, _value} -> key end)
 71 |       |> Enum.map(fn {key, value} -> "#{Atom.to_string(key)}: #{inspect(value)}" end)
 72 |       |> Enum.join("\n")
 73 | 
 74 |     IO.puts(["configuration to be used:\n"] ++ config)
 75 |   end
 76 | 
 77 |   defp check_ipv4() do
 78 |     case Application.get_env(:extracker, :ipv4_enabled, false) do
 79 |       true ->
 80 |         case Application.get_env(:extracker, :ipv4_bind_address) do
 81 |           nil ->
 82 |             Logger.error("ipv4 mode is enabled but theres no configured ipv4 bind address")
 83 |             false
 84 |           addr ->
 85 |             case :inet.parse_ipv4_address(to_charlist(addr)) do
 86 |               {:ok, _parsed} ->
 87 |                 true
 88 |               {:error, :einval} ->
 89 |                 Logger.error("configured ipv4 bind address is not a valid v4 address")
 90 |                 false
 91 |           end
 92 |       end
 93 |       _ -> true
 94 |     end
 95 |   end
 96 | 
 97 |   defp check_ipv6() do
 98 |     case Application.get_env(:extracker, :ipv6_enabled, false) do
 99 |       true ->
100 |         case Application.get_env(:extracker, :ipv6_bind_address) do
101 |           nil ->
102 |             Logger.error("ipv6 mode is enabled but theres no configured ipv6 bind address")
103 |             false
104 |           addr ->
105 |             case :inet.parse_ipv6_address(to_charlist(addr)) do
106 |               {:ok, _parsed} ->
107 |                 true
108 |               {:error, :einval} ->
109 |                 Logger.error("configured ipv6 bind address is not a valid v6 address")
110 |                 false
111 |           end
112 |       end
113 |       _ -> true
114 |     end
115 |   end
116 | 
117 |   defp get_geoip_children() do
118 |     case Application.get_env(:extracker, :geoip_enabled) do
119 |       true ->
120 |         Logger.notice("GeoIP lookup enabled")
121 |         [:locus.loader_child_spec(:country, {:maxmind, "GeoLite2-Country"})]
122 |       _ -> []
123 |     end
124 |   end
125 | 
126 |   defp get_telemetry_children() do
127 |     case Application.get_env(:extracker, :telemetry_enabled) do
128 |       true ->
129 |         Logger.notice("Telemetry enabled")
130 |         [{ ExTracker.Telemetry, {}}]
131 |       _ ->
132 |         Logger.notice("Telemetry disabled")
133 |         []
134 |     end
135 |   end
136 | 
137 |   defp get_accesslist_children() do
138 |     [:hashes, :useragents]
139 |     |> Enum.map(fn name ->
140 |       title = Atom.to_string(name) |> String.capitalize()
141 |       file = Application.get_env(:extracker, :"restrict_#{name}_file", "")
142 |       case Application.get_env(:extracker, :"restrict_#{name}", false) do
143 |         "whitelist" ->
144 |           Logger.notice("#{title} Whitelist enabled")
145 |           id = :"whitelist_#{name}"
146 |           [Supervisor.child_spec({ExTracker.Accesslist, [name: id, file: file]}, id: id)]
147 |         "blacklist" ->
148 |           Logger.notice("#{title} Blacklist enabled")
149 |           id = :"blacklist_#{name}"
150 |           [Supervisor.child_spec({ExTracker.Accesslist, [name: id, file: file]}, id: id)]
151 |         _ ->
152 |           Logger.notice("#{title} Accesslist disabled")
153 |           []
154 |       end
155 |     end)
156 |     |> List.flatten()
157 |   end
158 | 
159 |   defp get_http_children(family) do
160 |     case Application.get_env(:extracker, :http_enabled) do
161 |       true ->
162 |         ip = case family do
163 |           :inet -> Utils.get_configured_ipv4()
164 |           :inet6 -> Utils.get_configured_ipv6()
165 |         end
166 |         port = Application.get_env(:extracker, :http_port)
167 | 
168 |         http_spec = Supervisor.child_spec(
169 |           {Plug.Cowboy, scheme: :http, plug: ExTracker.HTTP.Router, options: [
170 |             net: family,
171 |             ip: ip,
172 |             port: port,
173 |             compress: true,
174 |             ref: "http_router_#{to_string(family)}",
175 |             dispatch: dispatch(),
176 |             transport_options: [
177 |               num_acceptors: 100,
178 |               max_connections: 100_000,
179 |             ]
180 |           ] ++ (if family == :inet6, do: [ipv6_v6only: true], else: [])
181 |           },
182 |           id: :"http_supervisor_#{family}"
183 |         )
184 | 
185 |         Logger.notice("HTTP mode enabled on port #{port}")
186 |         #if Application.ensure_started(:ranch) do
187 |         #  IO.inspect(:ranch.info(http_spec.id), label: "HTTP info")
188 |         #end
189 | 
190 |         [http_spec]
191 |       false ->
192 |         Logger.notice("HTTP mode disabled")
193 |         []
194 |     end
195 |   end
196 | 
197 |   defp get_https_children(family) do
198 |     case Application.get_env(:extracker, :https_enabled) do
199 |       true ->
200 |         ip = case family do
201 |           :inet -> Utils.get_configured_ipv4()
202 |           :inet6 -> Utils.get_configured_ipv6()
203 |         end
204 |         port = Application.get_env(:extracker, :https_port)
205 |         keyfile = Application.get_env(:extracker, :https_keyfile, "") |> Path.expand()
206 | 
207 |         https_spec = Supervisor.child_spec(
208 |           {Plug.Cowboy, scheme: :https, plug: ExTracker.HTTP.Router, options: [
209 |             net: family,
210 |             ip: ip,
211 |             port: port,
212 |             keyfile: keyfile,
213 |             compress: true,
214 |             ref: "https_router_#{to_string(family)}",
215 |             dispatch: dispatch(),
216 |             transport_options: [
217 |               num_acceptors: 100,
218 |               max_connections: 100_000,
219 |             ]
220 |           ] ++ (if family == :inet6, do: [ipv6_v6only: true], else: [])
221 |           },
222 |           id: :"https_supervisor_#{family}"
223 |         )
224 | 
225 |         Logger.notice("HTTPS mode enabled on port #{port}")
226 |         #if Application.ensure_started(:ranch) do
227 |         #  IO.inspect(:ranch.info(https_spec.id), label: "HTTPS info")
228 |         #end
229 | 
230 |         [https_spec]
231 |       false ->
232 |         Logger.notice("HTTPS mode disabled")
233 |         []
234 |     end
235 |   end
236 | 
237 |   defp get_udp_children(family) do
238 |     case Application.get_env(:extracker, :udp_enabled) do
239 |       true ->
240 |         n = case Application.get_env(:extracker, :udp_routers, -1) do
241 |           -1 -> 1..System.schedulers_online()
242 |           n -> 1..n
243 |         end
244 | 
245 |         port = Application.get_env(:extracker, :udp_port)
246 |         Logger.notice("UDP mode enabled on port #{port} using #{Enum.count(n)} routers")
247 | 
248 |         Enum.map(n, fn index ->
249 |           Supervisor.child_spec(
250 |             {ExTracker.UDP.Supervisor, [family: family, port: port, index: index - 1]},
251 |             id: :"udp_supervisor_#{family}_#{index}")
252 |         end)
253 | 
254 |       false ->
255 |         Logger.notice("UDP mode disabled")
256 |         []
257 |     end
258 |   end
259 | 
260 |   defp dispatch() do
261 |     [
262 |       { :_, [
263 |         #{ "/ws", ExTracker.Websocket, [] },
264 |         { :_, Plug.Cowboy.Handler, { ExTracker.HTTP.Router, [] } }
265 |       ] }
266 |     ]
267 |   end
268 | end
269 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/backup.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.Backup do
  2 | 
  3 |   use GenServer
  4 |   require Logger
  5 | 
  6 |   alias ExTracker.SwarmFinder
  7 | 
  8 |   def start_link(args) do
  9 |     GenServer.start_link(__MODULE__, args, name: __MODULE__)
 10 |   end
 11 | 
 12 |   #==========================================================================
 13 |   # Client
 14 |   #==========================================================================
 15 | 
 16 |   def make(path) do
 17 |     GenServer.cast(__MODULE__, {:make, path})
 18 |   end
 19 | 
 20 |   def make_sync(path) do
 21 |     GenServer.call(__MODULE__, {:make, path}, :infinity)
 22 |   end
 23 | 
 24 |   def restore(path) do
 25 |     GenServer.cast(__MODULE__, {:restore, path})
 26 |   end
 27 | 
 28 |   #==========================================================================
 29 |   # Server (callbacks)
 30 |   #==========================================================================
 31 | 
 32 |   @impl true
 33 |   def init(_args) do
 34 |     if Application.get_env(:extracker, :backup_auto_load_on_startup) do
 35 |       Application.get_env(:extracker, :backup_auto_path) |> restore()
 36 |     end
 37 | 
 38 |     schedule_backup()
 39 |     {:ok, {}}
 40 |   end
 41 | 
 42 |   @impl true
 43 |   def terminate(_reason, _state) do
 44 |   end
 45 | 
 46 |   defp schedule_backup() do
 47 |     Process.send_after(self(), :auto, Application.get_env(:extracker, :backup_auto_interval))
 48 |   end
 49 | 
 50 |   @impl true
 51 |   def handle_info(:auto, state) do
 52 |     if Application.get_env(:extracker, :backup_auto_enabled) do
 53 |       Logger.notice("auto backup triggered")
 54 |       Application.get_env(:extracker, :backup_auto_path) |> save()
 55 |     end
 56 | 
 57 |     # schedule the backup even if its disabled right now as it may be activated on runtime
 58 |     schedule_backup()
 59 |     {:noreply, state}
 60 |   end
 61 | 
 62 |   @impl true
 63 |   def handle_info(_msg, state) do
 64 |     {:noreply, state}
 65 |   end
 66 | 
 67 |   @impl true
 68 |   def handle_call({:make, path}, _from, state) do
 69 |     save(path)
 70 |     {:reply, :ok, state}
 71 |   end
 72 | 
 73 |   @impl true
 74 |   def handle_cast({:make, path}, state) do
 75 |     save(path)
 76 |     {:noreply, state}
 77 |   end
 78 | 
 79 |   @impl true
 80 |   def handle_cast({:restore, path}, state) do
 81 |     load(path)
 82 |     {:noreply, state}
 83 |   end
 84 | 
 85 |   defp save(file_path) do
 86 |     file_path = Path.expand(file_path)
 87 |     case create_path(file_path) do
 88 |       :ok ->
 89 |         Logger.notice("creating backup in #{file_path}")
 90 |         # retrieve all the existing swarms from the index
 91 |         swarm_entries = :ets.tab2list(SwarmFinder.swarms_table_name())
 92 |         # merge the actual swarm data (all the peers) with the index data
 93 |         swarms_backup = swarm_entries
 94 |           |> Task.async_stream(fn {hash, table, created_at, _last_cleaned} ->
 95 |             swarm_data = try do
 96 |               :ets.tab2list(table)
 97 |             rescue
 98 |               e in ArgumentError ->
 99 |                 Logger.debug("Backup.save/1: #{Exception.message(e)}")
100 |                 []
101 |             end
102 | 
103 |             {hash, swarm_data, created_at}
104 |           end)
105 |           |> Enum.map(&elem(&1, 1))
106 | 
107 |           backup = %{
108 |           swarms: swarms_backup
109 |         }
110 | 
111 |         File.write(file_path, :erlang.term_to_binary(backup))
112 | 
113 |         if Application.get_env(:extracker, :backup_display_stats) do
114 |           ExTracker.Cmd.show_peer_count(:all)
115 |           ExTracker.Cmd.show_swarm_count()
116 |         end
117 | 
118 |         Logger.notice("backup created")
119 |       :error ->
120 |         Logger.error("backup failed")
121 |     end
122 |   end
123 | 
124 |   defp load(file_path) do
125 |     file_path = Path.expand(file_path)
126 |     Logger.notice("restoring backup from #{file_path}")
127 |     if Application.get_env(:extracker, :backup_display_stats) do
128 |       ExTracker.Cmd.show_peer_count(:all)
129 |       ExTracker.Cmd.show_swarm_count()
130 |     end
131 | 
132 |     backup =
133 |       case File.read(file_path) do
134 |         {:ok, binary} ->
135 |           :erlang.binary_to_term(binary)
136 |         {:error, reason} ->
137 |           Logger.error("backup loading failed: #{reason}")
138 |           %{}
139 |       end
140 | 
141 |     case Map.fetch(backup, :swarms) do
142 |       {:ok, swarms} ->
143 |         swarms
144 |         |> Task.async_stream(fn {hash, swarm_data, created_at} ->
145 |           # recreate the swarm table
146 |           {:ok, swarm} = SwarmFinder.find_or_create(hash) # FIXME this may fail if control list changes
147 |           # put the correct creation date
148 |           SwarmFinder.restore_creation_timestamp(hash, created_at)
149 |           # insert all the missing peers
150 |           Enum.each(swarm_data, fn peer -> :ets.insert_new(swarm, peer) end)
151 |         end)
152 |         |> Stream.run()
153 | 
154 |         Logger.notice("backup restored")
155 |         if Application.get_env(:extracker, :backup_display_stats) do
156 |           ExTracker.Cmd.show_peer_count(:all)
157 |           ExTracker.Cmd.show_swarm_count()
158 |         end
159 |       :error -> :ok
160 |     end
161 |     :ok
162 |   end
163 | 
164 |   # default path is gonna be the user's home directory
165 |   defp create_path(path) do
166 |     folder = case Path.split(path) do
167 |       [_filename] -> Path.expand("~")
168 |       parts -> Path.join(Enum.drop(parts, -1))
169 |     end
170 | 
171 |     case File.mkdir_p(folder) do
172 |       :ok -> :ok
173 |       {:error, reason} ->
174 |         Logger.error("error creating backup folder '#{folder}': #{inspect(reason)}")
175 |         :error
176 |     end
177 |   end
178 | end
179 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/cmd.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.Cmd do
  2 |   require Logger
  3 |   alias ExTracker.Types.PeerID
  4 | 
  5 |   def shutdown() do
  6 |     # make a back up before shutting down
  7 |     if Application.get_env(:extracker, :backup_auto_enabled) do
  8 |       Application.get_env(:extracker, :backup_auto_path) |> ExTracker.Backup.make_sync()
  9 |     end
 10 | 
 11 |     Logger.critical("shutting down!")
 12 |     System.stop(0)
 13 |   end
 14 | 
 15 |   def show_swarm_list(show_peers) do
 16 |     swarms = ExTracker.SwarmFinder.get_swarm_list()
 17 |     Enum.each(swarms, fn swarm ->
 18 |       {hash, table, created_at, _last_cleaned} = swarm
 19 |       created = DateTime.from_unix!(created_at, :millisecond)
 20 | 
 21 |       info = %{
 22 |         "hash" => String.downcase(Base.encode16(hash)),
 23 |         "created" => DateTime.to_string(created),
 24 |         "total_memory" => (:ets.info(table, :memory) * :erlang.system_info(:wordsize)),
 25 |       }
 26 | 
 27 |       info = case show_peers do
 28 |         true -> Map.put(info, "peers", ExTracker.Swarm.get_all_peers(table, false))
 29 |         false-> Map.put(info, "peer_count", ExTracker.Swarm.get_peer_count(table))
 30 |       end
 31 | 
 32 |       IO.inspect(info, label: "Swarm", limit: :infinity )
 33 |     end)
 34 |     :ok
 35 |   end
 36 | 
 37 |   def show_biggest_swarms(count) do
 38 |     ExTracker.SwarmFinder.get_swarm_list()
 39 |       |> Task.async_stream(fn {hash, table, created_at, _last_cleaned} ->
 40 |         {hash, table, created_at, ExTracker.Swarm.get_peer_count(table)}
 41 |       end, ordered: false)
 42 |       |> Stream.filter(&match?({:ok, _}, &1))
 43 |       |> Enum.map(&elem(&1, 1))
 44 |       |> Enum.sort_by(&elem(&1, 3), :desc) # order by peer count
 45 |       |> Enum.take(count)
 46 |       |> Task.async_stream(fn{hash, table, created_at, peer_count} ->
 47 |         created = DateTime.from_unix!(created_at, :millisecond)
 48 |         %{
 49 |           "hash" => String.downcase(Base.encode16(hash)),
 50 |           "created" => DateTime.to_string(created),
 51 |           "total_memory" => (:ets.info(table, :memory) * :erlang.system_info(:wordsize)),
 52 |           "peer_count" => peer_count
 53 |         }
 54 |       end, ordered: false)
 55 |       |> Stream.filter(&match?({:ok, _}, &1))
 56 |       |> Enum.map(&elem(&1, 1))
 57 |       |> SwarmPrintout.print_table()
 58 |     :ok
 59 |   end
 60 | 
 61 |   def show_swarm_count() do
 62 |     count = ExTracker.SwarmFinder.get_swarm_count()
 63 |     IO.inspect(count, label: "Registered swarm count")
 64 |     :ok
 65 |   end
 66 | 
 67 |   def show_swarm_total_memory() do
 68 |     swarms = ExTracker.SwarmFinder.get_swarm_list()
 69 |     memory = Enum.reduce(swarms, 0, fn swarm, acc ->
 70 |       {_hash, table, _created_at, _last_cleaned} = swarm
 71 |       usage = (:ets.info(table, :memory) * :erlang.system_info(:wordsize))
 72 |       acc + usage
 73 |     end)
 74 |     IO.inspect(memory, label: "Total memory used by swarms" )
 75 |     :ok
 76 |   end
 77 | 
 78 |   def show_pretty_swarm_list() do
 79 |     data =
 80 |       ExTracker.SwarmFinder.get_swarm_list()
 81 |       |> Task.async_stream(fn swarm ->
 82 |         {hash, table, created_at, _last_cleaned} = swarm
 83 |         created = DateTime.from_unix!(created_at, :millisecond)
 84 | 
 85 |         %{
 86 |           "hash" => String.downcase(Base.encode16(hash)),
 87 |           "created" => DateTime.to_string(created),
 88 |           "total_memory" => (:ets.info(table, :memory) * :erlang.system_info(:wordsize)),
 89 |           "peer_count" => ExTracker.Swarm.get_peer_count(table)
 90 |         }
 91 | 
 92 |       end, ordered: false)
 93 |       |> Stream.filter(&match?({:ok, _}, &1))
 94 |       |> Enum.map(&elem(&1, 1))
 95 | 
 96 |     SwarmPrintout.print_table(data)
 97 |     :ok
 98 |   end
 99 | 
100 |   def show_swarm_info(info_hash) do
101 |     with {:ok, hash} <- ExTracker.Utils.validate_hash(info_hash),
102 |       {:ok, swarm} <- get_swarm(hash)
103 |       do
104 |         memory = :ets.info(swarm, :memory) * :erlang.system_info(:wordsize)
105 | 
106 |         info = %{
107 |           "swarm" => String.downcase(Base.encode16(hash)),
108 |           "total_memory" => memory,
109 |           "peer_memory" => (memory / :ets.info(swarm, :size)),
110 |           "peers" => %{
111 |             "all" => %{
112 |               "count" => ExTracker.Swarm.get_peer_count(swarm),
113 |               "total" => ExTracker.Swarm.get_peer_count(swarm, :all),
114 |               "leechers" => ExTracker.Swarm.get_seeder_count(swarm, :all),
115 |               "seeders" => ExTracker.Swarm.get_leecher_count(swarm, :all)
116 |             },
117 |             "ipv4" => %{
118 |               "total" => ExTracker.Swarm.get_peer_count(swarm, :inet),
119 |               "leechers" => ExTracker.Swarm.get_leecher_count(swarm, :inet),
120 |               "seeders" => ExTracker.Swarm.get_seeder_count(swarm, :inet)
121 |             },
122 |             "ipv6" => %{
123 |               "total" => ExTracker.Swarm.get_peer_count(swarm, :inet6),
124 |               "leechers" => ExTracker.Swarm.get_leecher_count(swarm, :inet6),
125 |               "seeders" => ExTracker.Swarm.get_seeder_count(swarm, :inet6)
126 |             },
127 |           }
128 |         }
129 | 
130 |         IO.inspect(info)
131 |       end
132 |       :ok
133 |   end
134 | 
135 |   defp get_swarm(hash) do
136 |     case ExTracker.SwarmFinder.find(hash) do
137 |       :error -> {:error, "swarm does not exist"}
138 |       swarm -> {:ok, swarm}
139 |     end
140 |   end
141 | 
142 |   def show_peer_count(family) do
143 |     total = ExTracker.SwarmFinder.get_swarm_list()
144 |     |> Task.async_stream(fn {_hash, table, _created_at, _last_cleaned} ->
145 |       ExTracker.Swarm.get_peer_count(table, family)
146 |     end, ordered: false)
147 |     |> Stream.reject(&match?({_, :undefined}, &1))
148 |     |> Stream.map(&elem(&1, 1))
149 |     |> Enum.sum()
150 | 
151 |     IO.inspect(total, label: "Total peers (family: #{to_string(family)})")
152 |     :ok
153 |   end
154 | 
155 |   def show_leecher_count(family) do
156 |     total = ExTracker.SwarmFinder.get_swarm_list()
157 |     |> Task.async_stream(fn {_hash, table, _created_at, _last_cleaned} ->
158 |       ExTracker.Swarm.get_leecher_count(table, family)
159 |     end, ordered: false)
160 |     |> Stream.reject(&match?({_, :undefined}, &1))
161 |     |> Stream.map(&elem(&1, 1))
162 |     |> Enum.sum()
163 | 
164 |     IO.inspect(total, label: "Total leechers (family: #{to_string(family)})")
165 |     :ok
166 |   end
167 | 
168 |   def show_seeder_count(family) do
169 |     total = ExTracker.SwarmFinder.get_swarm_list()
170 |     |> Task.async_stream(fn {_hash, table, _created_at, _last_cleaned} ->
171 |       ExTracker.Swarm.get_seeder_count(table, family)
172 |     end, ordered: false)
173 |     |> Stream.reject(&match?({_, :undefined}, &1))
174 |     |> Stream.map(&elem(&1, 1))
175 |     |> Enum.sum()
176 | 
177 |     IO.inspect(total, label: "Total seeders (family: #{to_string(family)})")
178 |     :ok
179 |   end
180 | 
181 |   def show_countries(family) do
182 |     countries = ExTracker.SwarmFinder.get_swarm_list()
183 |     |> Task.async_stream(fn {_hash, table, _created_at, _last_cleaned} ->
184 |       ExTracker.Swarm.get_all_peers(table, true)
185 |     end, ordered: false)
186 |     |> Stream.reject(&match?({_, :undefined}, &1))
187 |     |> Stream.map(&elem(&1, 1))
188 |     |> Enum.to_list()
189 |     |> List.flatten()
190 |     |> Enum.group_by(fn {_id, data} -> data.country end)
191 |     |> Enum.map(fn {country, peers} -> {country, length(peers)} end)
192 |     |> Enum.sort_by(fn {_country, sum} -> sum end, :desc)
193 | 
194 |     IO.inspect(countries, label: "Total peers by country (family: #{to_string(family)})", limit: :infinity)
195 |     :ok
196 |   end
197 | 
198 |   def create_fake_swarms(swarm_count, peer_count) do
199 |     start = System.monotonic_time(:millisecond)
200 |     Enum.map(1..swarm_count, fn _s ->
201 |       # create random hash
202 |       hash = :crypto.strong_rand_bytes(20)
203 |       # create swarm
204 |       {:ok, swarm} = ExTracker.SwarmFinder.find_or_create(hash)
205 |       # fill it with fake peers
206 |       Enum.map(1..peer_count, fn _p ->
207 |         # create random peer data
208 |         <<a, b, c, d>> = :crypto.strong_rand_bytes(4)
209 |         ip = {a, b, c, d}
210 |         port = Enum.random(1024..65535)
211 |         # add the peers
212 |         ExTracker.Swarm.add_peer(swarm, PeerID.new(ip, port))
213 |       end)
214 |     end)
215 |     finish = System.monotonic_time(:millisecond)
216 |     Logger.debug("created #{swarm_count} fake swarms with #{peer_count} fake peers each in #{finish - start}ms")
217 |   end
218 | end
219 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/config/system_env.ex:
--------------------------------------------------------------------------------
 1 | defmodule Extracker.Config.SystemEnvironment do
 2 |   @moduledoc """
 3 |   Module to dynamically override the application configuration of :extracker
 4 |   using environment variables. For each configuration key defined,
 5 |   the corresponding environment variable is generated using the prefix
 6 |   "EXTRACKER_". For example, the key :http_port will be mapped to EXTRACKER_HTTP_PORT
 7 |   """
 8 | 
 9 |   @app :extracker
10 |   @prefix "EXTRACKER_"
11 | 
12 |   @doc """
13 |   Loads environment variables for the :extracker configuration and overrides
14 |   its values if corresponding environment variables are defined.
15 |   """
16 |   def load() do
17 |     Application.get_all_env(@app)
18 |     |> Enum.each(fn {key, default} ->
19 |       env_name = generate_env_var_name(key)
20 | 
21 |       case System.get_env(env_name) do
22 |         nil -> :ok
23 |         env_val -> Application.put_env(@app, key, convert_value(env_val, default))
24 |       end
25 |     end)
26 | 
27 |     :ok
28 |   end
29 | 
30 |   defp generate_env_var_name(key) when is_atom(key) do
31 |     @prefix <> (Atom.to_string(key) |> String.upcase())
32 |   end
33 | 
34 |   defp convert_value(val, default) when is_boolean(default) do
35 |     String.trim(String.downcase(val)) == "true"
36 |   end
37 | 
38 |   defp convert_value(val, default) when is_integer(default) do
39 |     case Integer.parse(String.trim(val)) do
40 |       {int_val, _} -> int_val
41 |       :error -> default
42 |     end
43 |   end
44 | 
45 |   defp convert_value(val, default) when is_float(default) do
46 |     case Float.parse(String.trim(val)) do
47 |       {float_val, _} -> float_val
48 |       :error -> default
49 |     end
50 |   end
51 | 
52 |   defp convert_value(val, default) when is_tuple(default) do
53 |     # Assume a tuple is used for an IP, e.g. "0.0.0.0"
54 |     parts = String.split(String.trim(val), ".")
55 |     if length(parts) == tuple_size(default) do
56 |       parts
57 |       |> Enum.map(&String.to_integer/1)
58 |       |> List.to_tuple()
59 |     else
60 |       default
61 |     end
62 |   end
63 | 
64 |   defp convert_value(val, default) when is_binary(default), do: val
65 |   defp convert_value(val, _default), do: val
66 | end
67 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/http/multiparam_plug.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.HTTP.MultiParamParser do
 2 | 
 3 |   def init(opts), do: opts
 4 | 
 5 |   def call(conn, _opts) do
 6 |     #URI.query_decoder("foo=1&bar=2") |> Enum.to_list()
 7 |     multi_params =
 8 |       conn.query_string
 9 |       |> URI.query_decoder()
10 |       |> Enum.to_list()
11 |       |> Enum.group_by(fn {k, _v} -> k end, fn {_k, v} -> v end)
12 | 
13 |     Plug.Conn.assign(conn, :multi_query_params, multi_params)
14 |   end
15 | end
16 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/http/reverse_proxy_plug.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.HTTP.HandleReverseProxy do
 2 | require Logger
 3 | 
 4 |   @behaviour Plug
 5 | 
 6 |   def init(opts) do
 7 |     case Application.get_env(:extracker, :reverse_proxy_address, "") do
 8 |       nil -> [] # no proxy set
 9 |       "" -> [] # no proxy set
10 |       address ->
11 |         case :inet.parse_address(address) do
12 |          {:ok, proxy_ip} ->
13 |           Logger.notice("Reverse Proxy address set to #{proxy_ip}")
14 |           [proxy: proxy_ip]
15 |          _ ->
16 |           Logger.error("specified reverse proxy address is not a valid ip")
17 |           []
18 |        end
19 |     end
20 | 
21 |     opts
22 |   end
23 | 
24 |   def call(conn, opts) do
25 |     # handle proxy headers only if a reverse proxy is specified in the config
26 |     case Keyword.get(opts, :proxy) do
27 |       nil -> conn
28 |       proxy_ip -> handle_proxy(conn, proxy_ip)
29 |     end
30 |   end
31 | 
32 |   defp handle_proxy(conn, proxy_ip) do
33 |     # the remote ip must match the specified proxy otherwise ignore it
34 |     case conn.remote_ip do
35 |       ^proxy_ip ->
36 |         # TODO throw a warning/error if the reverse proxy doesnt add the header?
37 |         header = Plug.Conn.get_req_header(conn, "x-forwarded-for") |> List.first()
38 |         case parse_forwarded_ip(header) do
39 |           nil -> conn
40 |           real_ip -> %{conn | remote_ip: real_ip}
41 |         end
42 |       _ -> conn
43 |     end
44 |   end
45 | 
46 |   defp parse_forwarded_ip(nil), do: nil
47 |   defp parse_forwarded_ip(header) do
48 |     header
49 |     |> String.split(",", trim: true)
50 |     |> List.first()
51 |     |> String.trim()
52 |     |> :inet.parse_address()
53 |     |> case do
54 |          {:ok, ip} -> ip
55 |          _ -> nil
56 |        end
57 |   end
58 | end
59 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/http/router.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.HTTP.Router do
  2 |   use Plug.Router
  3 |   if Mix.env == :dev, do: use Plug.Debugger
  4 | 
  5 |   @assets_folder Application.app_dir(:extracker, "priv/static/assets")
  6 | 
  7 |   plug ExTracker.HTTP.HandleReverseProxy
  8 |   plug Plug.Logger
  9 |   plug ExTracker.Telemetry.Plug
 10 |   plug Plug.Static, at: "/assets", from: @assets_folder
 11 |   plug :match
 12 |   #plug Plug.Parsers, parsers: [:urlencoded, :multipart], pass: ["*/*"], validate_utf8: false
 13 |   plug Plug.Parsers, parsers: [], pass: ["text/html"], validate_utf8: false
 14 |   plug ExTracker.HTTP.MultiParamParser
 15 |   plug :dispatch
 16 | 
 17 |   # client announcements
 18 |   get "/announce" do
 19 |     {status, result} = case check_allowed_useragent(conn) do
 20 |       true ->
 21 |         ExTracker.Processors.Announce.process(conn.remote_ip, conn.query_params)
 22 |       false ->
 23 |         {403, %{ "failure reason" => "User-Agent not allowed"}}
 24 |     end
 25 | 
 26 |     # bencoded response
 27 |     response = result |> Bento.encode!() |> IO.iodata_to_binary()
 28 | 
 29 |     # send telemetry about this request
 30 |     send_telemetry(conn.remote_ip, "announce", status, byte_size(response))
 31 | 
 32 |     conn
 33 |     |> put_resp_content_type("application/octet-stream", nil)
 34 |     #|> put_resp_content_type("text/plain", nil)
 35 |     |> put_resp_header("cache-control", "no-cache")
 36 |     |> send_resp(200, response)
 37 | 
 38 |   end
 39 | 
 40 |   get "/scrape" do
 41 |     {status, result} =
 42 |       case Application.get_env(:extracker, :scrape_enabled) do
 43 |         true ->
 44 |           case check_allowed_useragent(conn) do
 45 |             true ->
 46 |               case Map.fetch(conn.assigns[:multi_query_params], "info_hash") do
 47 |                 :error -> # only malformed queries should fall here
 48 |                   ExTracker.Processors.Scrape.process(conn.remote_ip, conn.query_params)
 49 |                 {:ok, [_hash]} -> # just one hash, may fail so needs special handle
 50 |                   ExTracker.Processors.Scrape.process(conn.remote_ip, conn.query_params)
 51 |                 {:ok, list} -> # multiple hashes in one request
 52 |                   # TODO use chunked response
 53 |                   successes =
 54 |                     list
 55 |                     # process each info_hash on its own
 56 |                     |> Enum.map(fn hash ->
 57 |                       params = %{conn.query_params | "info_hash" => hash}
 58 |                       case ExTracker.Processors.Scrape.process(conn.remote_ip, params) do
 59 |                         {:ok, response} ->
 60 |                           {:ok, byte_hash} = ExTracker.Utils.validate_hash(hash)
 61 |                           %{byte_hash => response}
 62 |                         {:error, _response} -> nil
 63 |                       end
 64 |                     end)
 65 |                     # discard failed requests
 66 |                     |> Enum.reject(&(&1 == nil))
 67 |                     # combine the rest into one map
 68 |                     |> Enum.reduce( %{}, fn success, acc ->
 69 |                       Map.merge(acc, success)
 70 |                     end)
 71 | 
 72 |                     # return a failure reason is all hashes failed to process
 73 |                     case Kernel.map_size(successes) do
 74 |                       0 ->
 75 |                         {400, %{"failure reason" => "all requested hashes failed to be scraped"}}
 76 |                       _ ->
 77 |                         #  wrap the map as per BEP 48
 78 |                         {200, ExTracker.Types.ScrapeResponse.generate_success_http_envelope(successes)}
 79 |                     end
 80 |               end
 81 |             false ->
 82 |               {403, %{ "failure reason" => "User-Agent not allowed"}}
 83 |           end
 84 |       _ ->
 85 |         {404, %{"failure reason" => "scraping is disabled"}}
 86 |     end
 87 | 
 88 |     # bencoded response
 89 |     response = result |> Bento.encode!() |> IO.iodata_to_binary()
 90 | 
 91 |     # send telemetry about this request
 92 |     send_telemetry(conn.remote_ip, "scrape", status, byte_size(response))
 93 | 
 94 |     conn
 95 |     |> put_resp_content_type("application/octet-stream", nil)
 96 |     #|> put_resp_content_type("text/plain", nil)
 97 |     |> put_resp_header("cache-control", "no-cache")
 98 |     |> send_resp(200, response)
 99 |   end
100 | 
101 |   match _ do
102 |     conn
103 |     |> put_resp_content_type("text/html")
104 |     |> send_resp(200, ExTracker.web_about())
105 |   end
106 | 
107 |   defp send_telemetry(ip, action, status, response_size) do
108 |     # send outcoming bandwidth and the request result here instead of ExTracker.Telemetry.Plug
109 |     # because we don't know the result and response size until the very end
110 |     endpoint = "http"
111 | 
112 |     result = case status do
113 |       :ok -> :success
114 |       :error -> :failure
115 |       code when code in 200..299 -> :success
116 |       code when code in 400..499 -> :failure
117 |       code when code in 500..599 -> :error
118 |     end
119 | 
120 |     family = case tuple_size(ip) do
121 |       4 -> "inet"
122 |       8 -> "inet6"
123 |     end
124 | 
125 |     :telemetry.execute([:extracker, :request, result], %{}, %{endpoint: endpoint, action: action, family: family})
126 |     :telemetry.execute([:extracker, :bandwidth, :out], %{value: response_size})
127 |   end
128 | 
129 |   defp get_useragent(conn) do
130 |     conn |> Plug.Conn.get_req_header("user-agent") |> List.first()
131 |   end
132 | 
133 |   defp check_allowed_useragent(conn) do
134 |     case Application.get_env(:extracker, :restrict_useragents, false) do
135 |       "whitelist" -> ExTracker.Accesslist.contains(:whitelist_useragents, get_useragent(conn))
136 |       "blacklist" -> !ExTracker.Accesslist.contains(:blacklist_useragents, get_useragent(conn))
137 |       _ -> true
138 |     end
139 |   end
140 | end
141 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/processors/announce.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.Processors.Announce do
  2 | 
  3 |   import ExTracker.Utils
  4 |   require Logger
  5 |   alias ExTracker.Types.AnnounceResponse
  6 |   alias ExTracker.Types.AnnounceRequest
  7 |   alias ExTracker.Types.PeerID
  8 |   alias ExTracker.Types.PeerData
  9 | 
 10 |   # entrypoint for client's "/announce" requests
 11 |   def process(source_ip, params) do
 12 |     case AnnounceRequest.parse(params) do
 13 |       {:ok, request} ->
 14 |         client = PeerID.new(source_ip, request.port)
 15 | 
 16 |         with {:ok, event} <- process_event(request.event), # check event first as its the simplest
 17 |           {:ok, swarm} <- get_swarm(request.info_hash), # find swarm based on info_hash
 18 |           {:ok, peer_data} <- get_peer(swarm, client), # retrieve or create peer data
 19 |           :ok <- check_announce_interval(client, peer_data), # is the peer respecting the provided intervals?
 20 |           {:ok, peer_data} <- update_stats(swarm, client, peer_data, request), # update peer stats
 21 |           {:ok, peer_list} <- generate_peer_list(swarm, client, peer_data, event, request), # generate peer list
 22 |           {:ok, totals} <- get_total_peers(swarm, client) # get number of seeders and leechers for this swarm
 23 |         do
 24 |           generate_success_response(client.family, request.compact, peer_list, totals, source_ip)
 25 |         else
 26 |           {:error, error} ->
 27 |             Logger.info("peer #{client} received an error: #{error}")
 28 |             generate_failure_response(error)
 29 |           _ -> {:error, "unknown internal error"}
 30 |         end
 31 |       {:error, error} ->
 32 |         Logger.info("peer #{inspect(source_ip)} sent an invalid announce: #{error}")
 33 |         generate_failure_response(error)
 34 |     end
 35 |   end
 36 | 
 37 |   defp process_event(event) do
 38 |     case event do
 39 |       :invalid -> {:error, "invalid event"}
 40 |       _ -> {:ok, event}
 41 |     end
 42 |   end
 43 | 
 44 |   defp get_swarm(hash) do
 45 |     ExTracker.SwarmFinder.find_or_create(hash)
 46 |   end
 47 | 
 48 |   defp get_peer(swarm, client) do
 49 |     case ExTracker.Swarm.find_peer(swarm, client) do
 50 |       {:ok, data} ->
 51 |         {:ok, data}
 52 |       :notfound ->
 53 |         case ExTracker.Swarm.add_peer(swarm, client) do
 54 |           {:ok, data} -> {:ok, data}
 55 |            {:error, error} -> {:error, error}
 56 |         end
 57 |     end
 58 |   end
 59 | 
 60 |   defp check_announce_interval(client, peer_data) do
 61 |     if peer_data.state != :fresh do
 62 |       now = System.system_time(:millisecond)
 63 |       elapsed = now - peer_data.last_updated
 64 |       elapsed = elapsed + 1000 # some clients like to announce a few milliseconds early so give it some wiggle
 65 |       cond do
 66 |         elapsed < (Application.get_env(:extracker, :announce_interval_min) * 1000) ->
 67 |           {:error, "didn't respect minimal announcement interval"}
 68 |         elapsed < (Application.get_env(:extracker, :announce_interval) * 1000) ->
 69 |           Logger.info("peer #{client} is announcing too soon (#{elapsed / 1000} seconds since last time)")
 70 |           # TODO should we take an automatic action in this case?
 71 |           :ok
 72 |         true ->
 73 |           :ok
 74 |       end
 75 |     else
 76 |       :ok
 77 |     end
 78 |   end
 79 | 
 80 |   defp update_stats(swarm, client, peer_data, request) do
 81 |     updated_data = peer_data
 82 |       |> PeerData.set_id(request.peer_id)
 83 |       #|> PeerData.set_key(request.key)
 84 |       |> PeerData.update_uploaded(request.uploaded)
 85 |       |> PeerData.update_downloaded(request.downloaded)
 86 |       |> PeerData.update_left(request.left)
 87 |       |> PeerData.update_last_event(request.event)
 88 | 
 89 |     if peer_data.left > 0 && request.left == 0 do
 90 |       # TODO increase swarm downloads counter if 'left' reaches zero
 91 |     end
 92 | 
 93 |     # update peer internal state based on the provided event
 94 |     updated_data =
 95 |       case request.event do
 96 |         :started -> PeerData.update_state(updated_data, :active)
 97 |         :stopped -> PeerData.update_state(updated_data, :gone)
 98 |         :updated -> PeerData.update_state(updated_data, :active)
 99 |         :completed -> PeerData.update_state(updated_data, :active)
100 |         :paused -> PeerData.update_state(updated_data, :active)
101 |       end
102 | 
103 |       if updated_data.state == :gone do
104 |         # remove the peer as it has abandoned the swarm
105 |         ExTracker.Swarm.remove_peer(swarm, client)
106 |       else
107 |         # update the peer info in the swarm
108 |         ExTracker.Swarm.update_peer(swarm, client, updated_data)
109 |       end
110 |       {:ok, updated_data}
111 |   end
112 | 
113 |   # the stopped event mean the peer is done with the torrent so it doesn't need more peers
114 |   defp generate_peer_list(_swarm, _client, _peer_data, :stopped, request) do
115 |     case request.compact do
116 |       true -> {:ok, IO.iodata_to_binary([])}
117 |       false -> {:ok, []}
118 |     end
119 |   end
120 | 
121 |   defp generate_peer_list(swarm, client, peer_data, _event, request) do
122 |     need_peer_data = !request.compact
123 |     max_peers = Application.get_env(:extracker, :max_peers_returned, 25)
124 |     desired_total = if request.numwant > max_peers or request.numwant < 0, do: max_peers, else: request.numwant
125 | 
126 |     is_seeder = cond do
127 |       peer_data.left == 0 -> true # full seeders
128 |       request.event == :paused -> true # partial seeders
129 |       true -> false # leechers
130 |     end
131 | 
132 |     peer_list = case is_seeder do
133 |       true ->
134 |         # peer is seeding so try to give it leechers
135 |         leechers = ExTracker.Swarm.get_leechers(swarm, :all, client.family, need_peer_data)
136 |         case length(leechers) do
137 |           length when length == desired_total ->
138 |             # if theres just enough peers to fill the list that's great
139 |             leechers
140 |           length when length > desired_total ->
141 |             # if there are more peers than requested then take a random subset
142 |             Enum.take_random(leechers, desired_total)
143 |           length when length < desired_total ->
144 |             # there are not enough leechers so try to fill up with some random seeders
145 |             ExTracker.Swarm.get_seeders(swarm, :all, client.family, need_peer_data) |> Enum.take_random(desired_total - length)
146 |         end
147 |       false ->
148 |         # peer is leeching so try to give it seeders
149 |         seeders = ExTracker.Swarm.get_seeders(swarm, :all, client.family, need_peer_data)
150 |         case length(seeders) do
151 |           length when length == desired_total ->
152 |             # if theres just enough peers to fill the list that's great
153 |             seeders
154 |           length when length > desired_total ->
155 |             # if there are more peers than requested then take a random subset
156 |             Enum.take_random(seeders, desired_total)
157 |           length when length < desired_total ->
158 |             # there are not enough seeders so try to fill up with some random leechers
159 |             ExTracker.Swarm.get_leechers(swarm, :all, client.family, need_peer_data) |> Enum.take_random(desired_total - length)
160 |         end
161 |     end
162 | 
163 |     # convert the peers to the expected representation for delivery
164 |     peer_list = Enum.map(peer_list, fn peer ->
165 |         case request.compact do
166 |           true -> ip_to_bytes(peer.ip) <> port_to_bytes(peer.port)
167 |           false ->
168 |             {id, data} = peer
169 |             result = %{"ip" => id.ip, "port" => id.port}
170 |             if request.no_peer_id == false do
171 |               Map.put(result, "peer id", data.id)
172 |             end
173 |             result
174 |         end
175 |       end)
176 | 
177 |     case request.compact do
178 |       true -> {:ok, IO.iodata_to_binary(peer_list)}
179 |       false -> {:ok, peer_list}
180 |     end
181 |   end
182 | 
183 |   defp get_total_peers(swarm, client) do
184 |     seeders = ExTracker.Swarm.get_seeder_count(swarm, client.family)
185 |     leechers = ExTracker.Swarm.get_leecher_count(swarm, client.family)
186 |     {:ok, {seeders, leechers}}
187 |   end
188 | 
189 |   defp generate_success_response(family, compact, peer_list, totals, source_ip) do
190 |     {total_seeders, total_leechers} = totals
191 |     response =
192 |       AnnounceResponse.generate_success(family, compact, peer_list, total_seeders, total_leechers)
193 |       |> AnnounceResponse.append_external_ip(source_ip)
194 |     {:ok, response}
195 |   end
196 | 
197 |   defp generate_failure_response(reason) do
198 |     response = AnnounceResponse.generate_failure(reason)
199 |     {:error, response}
200 |   end
201 | end
202 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/processors/scrape.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.Processors.Scrape do
 2 | 
 3 |   alias ExTracker.Types.ScrapeResponse
 4 |   alias ExTracker.Types.ScrapeRequest
 5 | 
 6 |   # entrypoint for client's "/scrape" requests
 7 |   def process(_source_ip, params) do
 8 |     case ScrapeRequest.parse(params) do
 9 |       {:ok, request} ->
10 |         with {:ok, swarm} <- get_swarm(request.info_hash), # find swarm based on info_hash
11 |         {:ok, seeders} <- get_total_seeders(swarm), # get number of seeders for this swarm
12 |         {:ok, partial_seeders} <- get_total_partial_seeders(swarm), # get number of partial seeders for this swarm
13 |         {:ok, leechers} <- get_total_leechers(swarm), # get number of leechers for this swarm
14 |         {:ok, downloads} <- get_total_downloads(swarm) # get absolute number of downloads for this swarm
15 |         do
16 |           generate_success_response(seeders, partial_seeders, leechers, downloads)
17 |         else
18 |           {:error, error} -> generate_failure_response(error)
19 |           _ -> {:error, "unknown internal error"}
20 |         end
21 |       {:error, error} ->
22 |         generate_failure_response(error)
23 |     end
24 |   end
25 | 
26 |   defp get_swarm(hash) do
27 |     case ExTracker.SwarmFinder.find(hash) do
28 |       :error -> {:error, "torrent not found"}
29 |       swarm -> {:ok, swarm}
30 |     end
31 |   end
32 | 
33 |   def get_total_seeders(swarm) do
34 |     {:ok, ExTracker.Swarm.get_seeder_count(swarm, :all)}
35 |   end
36 | 
37 |   def get_total_partial_seeders(swarm) do
38 |     {:ok, ExTracker.Swarm.get_partial_seeder_count(swarm, :all)}
39 |   end
40 | 
41 |   def get_total_leechers(swarm) do
42 |     {:ok, ExTracker.Swarm.get_leecher_count(swarm, :all)}
43 |   end
44 | 
45 |   def get_total_downloads(_swarm) do
46 |     {:ok, 0} #TODO
47 |   end
48 | 
49 |   defp generate_success_response(seeders, partial_seeders, leechers, downloads) do
50 |     response = ScrapeResponse.generate_success(seeders, partial_seeders, leechers, downloads)
51 |     {:ok, response}
52 |   end
53 | 
54 |   defp generate_failure_response(reason) do
55 |     response = ScrapeResponse.generate_failure(reason)
56 |     {:error, response}
57 |   end
58 | end
59 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/swarm.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.Swarm do
  2 |   require Logger
  3 |   alias ExTracker.Types.PeerData
  4 | 
  5 |   # try to find and retrieve a peer registered in the specified swarm
  6 |   @spec find_peer(swarm :: any(), id :: PeerID) :: {:ok, PeerData} | :notfound
  7 |   def find_peer(swarm, id) do
  8 |     case :ets.lookup(swarm, id) do
  9 |       [{_, data}] -> {:ok, data}
 10 |       _ -> :notfound
 11 |     end
 12 |   end
 13 | 
 14 |   # add a new peer to the specified swarm
 15 |   @spec add_peer(swarm :: any(), id :: PeerID) :: {:ok, PeerData} | {:error, any()}
 16 |   def add_peer(swarm, id) do
 17 |     data = %PeerData{
 18 |       country: geoip_lookup_country(id.ip),
 19 |       last_updated: System.system_time(:millisecond)
 20 |     }
 21 | 
 22 |     peer = {id, data}
 23 |     case :ets.insert_new(swarm, peer) do
 24 |       true ->
 25 |         :telemetry.execute([:extracker, :peer, :added], %{}, %{ family: id.family})
 26 |         {:ok, data}
 27 |       false -> {:error, "peer already exists"}
 28 |     end
 29 |   end
 30 | 
 31 |   defp geoip_lookup_country(ip) do
 32 |     case Application.get_env(:extracker, :geoip_enabled, false) do
 33 |       true ->
 34 |         case :locus.lookup(:country, ip) do
 35 |           {:ok, data} -> data["country"]["iso_code"]
 36 |           _ -> ""
 37 |         end
 38 |       false -> ""
 39 |     end
 40 |   end
 41 | 
 42 |   # remove an existing peer from the specified swarm
 43 |   @spec remove_peer(swarm :: any(), id :: PeerID) :: :ok | :notfound
 44 |   def remove_peer(swarm, id) do
 45 |     with [{_, _data}] <- :ets.lookup(swarm, id), true <- :ets.delete(swarm, id) do
 46 |       :telemetry.execute([:extracker, :peer, :removed], %{}, %{ family: id.family})
 47 |       :ok
 48 |     else
 49 |       _ -> :notfound
 50 |     end
 51 |   end
 52 | 
 53 |   @spec update_peer(swarm :: any(), id :: PeerID, data :: PeerData) :: {:ok, PeerData} | {:error, any()}
 54 |   def update_peer(swarm, id, data)  do
 55 |     # reflect when was the last update
 56 |     timestamp = System.system_time(:millisecond)
 57 |     data = PeerData.update_last_updated(data, timestamp)
 58 | 
 59 |     if(find_peer(swarm, id)) do
 60 |       case :ets.insert(swarm, {id, data}) do
 61 |         true -> {:ok, data}
 62 |         false -> {:error, "peer insertion failed"}
 63 |       end
 64 |     end
 65 |     {:error, "peer not found in swarm"}
 66 |   end
 67 | 
 68 |   # get the total number of peers registered in the specified swarm
 69 |   def get_peer_count(swarm) do
 70 |     try do
 71 |     :ets.info(swarm, :size)
 72 |     rescue
 73 |       e in ArgumentError ->
 74 |         Logger.debug("get_peer_count/1: #{Exception.message(e)}")
 75 |         0
 76 |     end
 77 |   end
 78 | 
 79 |   # get the total number of peers registered in the specified swarm filtered by ipv4 or ipv6
 80 |   def get_peer_count(swarm, family) do
 81 |     get_peers(swarm, :all, :all, family, false) |> length()
 82 |   end
 83 | 
 84 |   # get the total number of leechers registered in the specified swarm
 85 |   def get_leecher_count(swarm, family) do
 86 |     get_leechers(swarm, :all, family, false) |> length()
 87 |   end
 88 | 
 89 |   # get the total number of seeders registered in the specified swarm
 90 |   def get_seeder_count(swarm, family) do
 91 |     get_seeders(swarm, :all, family, false) |> length()
 92 |   end
 93 | 
 94 |   # get the total number of partial seeders registered in the specified swarm
 95 |   def get_partial_seeder_count(swarm, family) do
 96 |     get_partial_seeders(swarm, :all, family, false) |> length()
 97 |   end
 98 | 
 99 |   # return a list of all the peers registered in the swarm  up to 'count', optionally includes their associated data
100 |   def get_peers(swarm, count, type, family, include_data) do
101 |     spec_condition_type = case type do
102 |       :leechers -> {:>, {:map_get, :left, :"$2"}, 0} # data.left > 0
103 |       :seeders -> {:==, {:map_get, :left, :"$2"}, 0} # data.left == 0
104 |       :partial_seeders -> {:==, {:map_get, :last_event, :"$2"}, :paused} # data.last_event == :paused
105 |       :all -> nil # no condition
106 |     end
107 | 
108 |     spec_condition_family = case family do
109 |       :inet -> {:==, {:map_get, :family, :"$1"}, :inet} # id.family == :inet
110 |       :inet6 -> {:==, {:map_get, :family, :"$1"}, :inet6} # id.family == :inet6
111 |       :all -> nil # no condition
112 |     end
113 | 
114 |     # [{:andalso,{:>, {:map_get, :left, :"$2"}, 0},{:==, {:map_get, :family, :"$1"}, :inet}}]
115 |     spec_condition = case {spec_condition_type, spec_condition_family} do
116 |       {nil, nil} -> []
117 |       {cond1, nil} -> [cond1]
118 |       {nil, cond2} -> [cond2]
119 |       {cond1, cond2} -> [{:andalso, cond1, cond2}]
120 |     end
121 | 
122 |     spec_match = case include_data do
123 |       false -> [:"$1"] # peer.id
124 |       true -> [:"$_"] # peer
125 |     end
126 | 
127 |     # make the whole spec with the pieces
128 |     spec = [{{:"$1", :"$2"}, spec_condition, spec_match}]
129 | 
130 |     # execute the specified request
131 |     try do
132 |       case count do
133 |         :all -> :ets.select(swarm, spec)
134 |         integer -> :ets.select(swarm, spec, integer)
135 |       end
136 |     rescue
137 |       # the swarm table may be gone while the query reaches this point
138 |       e in ArgumentError ->
139 |         Logger.debug("get_peers/5: #{Exception.message(e)}")
140 |         []
141 |     end
142 |   end
143 | 
144 |   def get_all_peers(swarm, include_data) do
145 |     get_peers(swarm, :all, :all, :all, include_data)
146 |   end
147 | 
148 |   def get_leechers(swarm, count, family, include_data) do
149 |     get_peers(swarm, count, :leechers, family, include_data)
150 |   end
151 | 
152 |   def get_seeders(swarm, count, family, include_data) do
153 |     get_peers(swarm, count, :seeders, family, include_data)
154 |   end
155 | 
156 |   def get_partial_seeders(swarm, count, family, include_data) do
157 |     get_peers(swarm, count, :partial_seeders, family, include_data)
158 |   end
159 | 
160 |   def get_stale_peers(swarm, timestamp) do
161 |     #spec = :ets.fun2ms(fn {id, data} = peer when data.last_updated < timestamp -> peer end)
162 |     spec = [{{:"$1", :"$2"}, [{:<, {:map_get, :last_updated, :"$2"}, timestamp}], [:"$_"]}]
163 |     :ets.select(swarm, spec)
164 |   end
165 | end
166 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/swarm_cleaner.ex:
--------------------------------------------------------------------------------
  1 | # ExTracker.SwarmCleaner is the process responsible for periodically removing old peers and swarms
  2 | # in the future swarms could be saved to disk instead of being wiped
  3 | defmodule ExTracker.SwarmCleaner do
  4 | 
  5 |     use GenServer
  6 |     require Logger
  7 | 
  8 |     alias ExTracker.Swarm
  9 |     alias ExTracker.SwarmFinder
 10 |     alias ExTracker.Utils
 11 | 
 12 |     def start_link(args) do
 13 |       GenServer.start_link(__MODULE__, args, name: __MODULE__)
 14 |     end
 15 | 
 16 |     #==========================================================================
 17 |     # Client
 18 |     #==========================================================================
 19 | 
 20 |     def clean(hash) do
 21 |       GenServer.cast(__MODULE__, {:clean, hash})
 22 |     end
 23 | 
 24 |     def clean_all() do
 25 |       GenServer.cast(__MODULE__, {:clean_all})
 26 |     end
 27 | 
 28 |     #==========================================================================
 29 |     # Server (callbacks)
 30 |     #==========================================================================
 31 | 
 32 |     @impl true
 33 |     def init(_args) do
 34 |       schedule_clean()
 35 |       {:ok, {}}
 36 |     end
 37 | 
 38 |     @impl true
 39 |     def terminate(_reason, _state) do
 40 |     end
 41 | 
 42 |     defp schedule_clean() do
 43 |       Process.send_after(self(), :clean, Application.get_env(:extracker, :cleaning_interval))
 44 |     end
 45 | 
 46 |     @impl true
 47 |     def handle_info(:clean, state) do
 48 |       # select all the tables that are due for a clean up
 49 |       now = System.system_time(:millisecond)
 50 |       swarm_timeout = now - Application.get_env(:extracker, :swarm_clean_delay)
 51 |       peer_timeout = now - Application.get_env(:extracker, :peer_cleanup_delay)
 52 | 
 53 |       start = System.monotonic_time(:millisecond)
 54 |       #spec = :ets.fun2ms(fn {hash, table, created_at, last_cleaned} = swarm when last_cleaned < swarm_timeout  -> swarm end)
 55 |       spec = [{{:"$1", :"$2", :"$3", :"$4"}, [{:<, :"$4", swarm_timeout}], [:"$_"]}]
 56 |       entries = :ets.select(SwarmFinder.swarms_table_name(), spec)
 57 |       elapsed = System.monotonic_time(:millisecond) - start
 58 | 
 59 |       entry_count = length(entries)
 60 |       if (entry_count > 0) do
 61 |         Logger.debug("swarm cleaner found #{entry_count} swarms pending cleaning in #{elapsed}ms")
 62 |       end
 63 | 
 64 |       # retrieve the peers inside every matching swarm in parallel
 65 |       entries
 66 |       |> Task.async_stream(fn entry ->
 67 |         {hash, table, _created_at, _last_cleaned} = entry
 68 |         Swarm.get_stale_peers(table,  peer_timeout)
 69 |         |> (fn stale_peers ->
 70 |           peer_count = length(stale_peers)
 71 |           if peer_count > 0 do
 72 |             Logger.debug("removing #{length(stale_peers)} stale peers from swarm #{Utils.hash_to_string(hash)}")
 73 |           end
 74 |           stale_peers
 75 |         end).()
 76 |         # remove the stale ones
 77 |         |> Enum.each(fn peer ->
 78 |           {id, _data} = peer
 79 |           Swarm.remove_peer(table, id)
 80 |         end)
 81 | 
 82 |         case Swarm.get_peer_count(table) do
 83 |           0 ->
 84 |             # empty swarms are deleted right away
 85 |             SwarmFinder.remove(hash)
 86 |           _ ->
 87 |             # flag the swarm as clean
 88 |             SwarmFinder.mark_as_clean(hash)
 89 |         end
 90 | 
 91 | 
 92 |       end)
 93 |       |> Stream.run()
 94 | 
 95 |       schedule_clean()
 96 |       {:noreply, state}
 97 |     end
 98 | 
 99 |     @impl true
100 |     def handle_info(_msg, state) do
101 |       {:noreply, state}
102 |     end
103 | 
104 |     @impl true
105 |     def handle_cast({:clean, _hash}, state) do
106 |       # TODO
107 |       {:noreply, state}
108 |     end
109 | 
110 |     @impl true
111 |     def handle_cast(:clean_all, state) do
112 |       # TODO
113 |       {:noreply, state}
114 |     end
115 |   end
116 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/swarm_finder.ex:
--------------------------------------------------------------------------------
  1 | # ExTracker.SwarmFinder is the process responsible for keeping track of all the swarms (torrents) using ETS
  2 | # tables are created and looked up here but the actual updates happen in ExTracker.Swarm<
  3 | defmodule ExTracker.SwarmFinder do
  4 | 
  5 |   # ETS table to store the index for every swarm table containing the actual data
  6 |   @swarms_table_name :swarms
  7 |   def swarms_table_name, do: @swarms_table_name
  8 | 
  9 |   use GenServer
 10 |   require Logger
 11 | 
 12 |   alias ExTracker.Utils
 13 | 
 14 |   def start_link(args) do
 15 |     GenServer.start_link(__MODULE__, args, name: __MODULE__)
 16 |   end
 17 | 
 18 |   #==========================================================================
 19 |   # Client
 20 |   #==========================================================================
 21 | 
 22 |   def find_or_create(hash) do
 23 |     case :ets.lookup(@swarms_table_name, hash) do
 24 |       [{^hash, table, _created_at, _last_cleaned}] -> {:ok, table}
 25 |       _ -> create(hash)
 26 |     end
 27 |   end
 28 | 
 29 |   def find(hash) do
 30 |     case :ets.lookup(@swarms_table_name, hash) do
 31 |       [{^hash, table, _created_at, _last_cleaned}] -> table
 32 |       _ -> :error
 33 |     end
 34 |   end
 35 | 
 36 |   def remove(hash) do
 37 |     case :ets.lookup(@swarms_table_name, hash) do
 38 |       [{^hash, _table, _created_at, _last_cleaned}] -> destroy(hash)
 39 |       _ -> :error
 40 |     end
 41 |   end
 42 | 
 43 |   def mark_as_clean(hash) do
 44 |     case :ets.lookup(@swarms_table_name, hash) do
 45 |       [{^hash, _table, _created_at, _last_cleaned}] -> clean(hash)
 46 |       _ -> :error
 47 |     end
 48 |   end
 49 | 
 50 |   def restore_creation_timestamp(hash, timestamp) do
 51 |     case :ets.lookup(@swarms_table_name, hash) do
 52 |       [{^hash, _table, _created_at, _last_cleaned}] -> restore(hash, timestamp)
 53 |       _ -> :error
 54 |     end
 55 |   end
 56 | 
 57 |   def get_swarm_list() do
 58 |     :ets.tab2list(@swarms_table_name)
 59 |   end
 60 | 
 61 |   def get_swarm_count() do
 62 |     :ets.tab2list(@swarms_table_name) |> length()
 63 |   end
 64 | 
 65 |   defp create(hash) do
 66 |     GenServer.call(__MODULE__, {:create, hash})
 67 |   end
 68 | 
 69 |   defp destroy(hash) do
 70 |     GenServer.cast(__MODULE__, {:destroy, hash})
 71 |   end
 72 | 
 73 |   defp clean(hash) do
 74 |     GenServer.cast(__MODULE__, {:clean, hash})
 75 |   end
 76 | 
 77 |   defp restore(hash, created_at) do
 78 |     GenServer.cast(__MODULE__, {:restore, hash, created_at})
 79 |   end
 80 | 
 81 |   #==========================================================================
 82 |   # Server (callbacks)
 83 |   #==========================================================================
 84 | 
 85 |   defp get_ets_compression_arg() do
 86 |     if Application.get_env(:extracker, :compress_lookups, true) do
 87 |       [:compressed]
 88 |     else
 89 |       []
 90 |     end
 91 |   end
 92 | 
 93 |   @impl true
 94 |   def init(_args) do
 95 |     ets_args = [:set, :named_table, :protected] ++ get_ets_compression_arg()
 96 |     :ets.new(@swarms_table_name, ets_args)
 97 | 
 98 |     state = %{}
 99 |     {:ok, state}
100 |   end
101 | 
102 |   @impl true
103 |   def terminate(_reason, _state) do
104 |   end
105 | 
106 |   @impl true
107 |   def handle_call({:create, hash}, _from, state) do
108 |     result = case check_allowed_hash(hash) do
109 |       true ->
110 |         table = create_swarm_checked(hash)
111 |         {:ok, table}
112 |       false ->
113 |         {:error, :hash_not_allowed}
114 |     end
115 | 
116 |     {:reply, result, state}
117 |   end
118 | 
119 |   @impl true
120 |   def handle_cast({:destroy, hash}, state) do
121 |     destroy_swarm(hash)
122 |     {:noreply, state}
123 |   end
124 | 
125 |   @impl true
126 |   def handle_cast({:clean, hash}, state) do
127 |     timestamp = System.system_time(:millisecond)
128 |     case :ets.update_element(@swarms_table_name, hash, [{4, timestamp}]) do
129 |       true -> :ok
130 |       false -> Logger.warning("failed to mark entry #{Utils.hash_to_string(hash)} as clean")
131 |     end
132 |     {:noreply, state}
133 |   end
134 | 
135 |   @impl true
136 |   def handle_cast({:restore, hash, created_at}, state) do
137 |     case :ets.update_element(@swarms_table_name, hash, [{3, created_at}]) do
138 |       true -> :ok
139 |       false -> Logger.warning("failed to update creation time for entry #{Utils.hash_to_string(hash)}")
140 |     end
141 |     {:noreply, state}
142 |   end
143 | 
144 |   @impl true
145 |   def handle_info(_msg, state) do
146 |     {:noreply, state}
147 |   end
148 | 
149 |   defp check_allowed_hash(hash) do
150 |     case Application.get_env(:extracker, :restrict_hashes, false) do
151 |       "whitelist" -> ExTracker.Accesslist.contains(:whitelist_hashes, hash)
152 |       "blacklist" -> !ExTracker.Accesslist.contains(:blacklist_hashes, hash)
153 |       _ -> true
154 |     end
155 |   end
156 | 
157 |   # create a table for the new swarm if it doesnt already exist
158 |   defp create_swarm_checked(hash) do
159 |     case :ets.lookup(@swarms_table_name, hash) do
160 |       [{^hash, table, _created_at, _last_cleaned}] -> table
161 |       _ -> create_swarm(hash)
162 |     end
163 |   end
164 | 
165 |   # create a table for the new swarm and index it
166 |   defp create_swarm(hash) do
167 |     # atom count has an upper limit so better make it optional for debug mostly
168 |     table_name = case Application.get_env(:extracker, :named_lookups, ExTracker.debug_enabled()) do
169 |       true -> :"swarm_#{hash |> Base.encode16() |> String.downcase()}"
170 |       false -> :swarm
171 |     end
172 | 
173 |     ets_args = [:set, :public] ++ get_ets_compression_arg()
174 |     table = :ets.new(table_name, ets_args)
175 | 
176 |     timestamp = System.system_time(:millisecond)
177 |     :ets.insert(@swarms_table_name, {hash, table, timestamp, timestamp})
178 | 
179 |     :telemetry.execute([:extracker, :swarm, :created], %{})
180 |     Logger.debug("created table #{inspect(table_name)} for hash #{hash |> Base.encode16() |> String.downcase()}")
181 | 
182 |     table
183 |   end
184 | 
185 |   defp destroy_swarm(hash) do
186 |     case :ets.lookup(@swarms_table_name, hash) do
187 |       [{^hash, table, _created_at, _last_cleaned}] ->
188 |         # delete the index entry
189 |         :ets.delete(@swarms_table_name, hash)
190 |         # delete the swarm table
191 |         :ets.delete(table)
192 | 
193 |         :telemetry.execute([:extracker, :swarm, :destroyed], %{})
194 |         Logger.debug("destroyed swarm for hash #{hash |> Base.encode16() |> String.downcase()}")
195 |       _ -> :notfound
196 |     end
197 |   end
198 | end
199 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/swarm_printout.ex:
--------------------------------------------------------------------------------
 1 | # this module is purely to pretty-print the info returned by ExTracker.Cmd.show_swarm_list(false)
 2 | # pure throwaway code
 3 | defmodule SwarmPrintout do
 4 |   def format_memory(bytes) when is_integer(bytes) and bytes >= 0 do
 5 |     units = ["B", "KB", "MB", "GB", "TB"]
 6 |     format_memory(bytes * 1.0, 0, units)
 7 |   end
 8 | 
 9 |   defp format_memory(value, idx, units) when idx == length(units) - 1 do
10 |     formatted = :io_lib.format("~.2f", [value]) |> IO.iodata_to_binary()
11 |     "#{formatted} #{Enum.at(units, idx)}"
12 |   end
13 | 
14 |   defp format_memory(value, idx, units) do
15 |     if value < 1024 do
16 |       formatted = :io_lib.format("~.2f", [value]) |> IO.iodata_to_binary()
17 |       "#{formatted} #{Enum.at(units, idx)}"
18 |     else
19 |       format_memory(value / 1024, idx + 1, units)
20 |     end
21 |   end
22 | 
23 |   def print_table(swarms) when is_list(swarms) do
24 |     header = ["Created", "Hash", "Peer Count", "Total Memory"]
25 | 
26 |     rows =
27 |       Enum.map(swarms, fn swarm ->
28 |         created = swarm["created"]
29 |         hash = swarm["hash"]
30 |         peer_count = Integer.to_string(swarm["peer_count"])
31 |         total_memory = format_memory(swarm["total_memory"])
32 |         [created, hash, peer_count, total_memory]
33 |       end)
34 | 
35 |     all_rows = [header | rows]
36 |     num_cols = length(header)
37 | 
38 |     col_widths =
39 |       for col <- 0..(num_cols - 1) do
40 |         all_rows
41 |         |> Enum.map(fn row -> String.length(Enum.at(row, col)) end)
42 |         |> Enum.max()
43 |       end
44 | 
45 |     row_format =
46 |       col_widths
47 |       |> Enum.map(fn width -> "~-" <> Integer.to_string(width) <> "s" end)
48 |       |> Enum.join(" | ")
49 | 
50 |     total_width = Enum.sum(col_widths) + 3 * (num_cols - 1)
51 |     separator = String.duplicate("-", total_width)
52 | 
53 |     IO.puts(separator)
54 |     IO.puts(:io_lib.format(row_format, header) |> IO.iodata_to_binary())
55 |     IO.puts(separator)
56 |     for row <- rows do
57 |       IO.puts(:io_lib.format(row_format, row) |> IO.iodata_to_binary())
58 |     end
59 |     IO.puts(separator)
60 |   end
61 | end
62 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/telemetry/basic_reporter.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.Telemetry.BasicReporter do
  2 |     use GenServer
  3 |     require Logger
  4 |     alias ExTracker.Utils
  5 | 
  6 |     def start_link(args) do
  7 |       GenServer.start_link(__MODULE__, args, name: __MODULE__)
  8 |     end
  9 | 
 10 |     #==========================================================================
 11 |     # Client
 12 |     #==========================================================================
 13 | 
 14 |     def handle_event(_event_name, measurements, metadata, metrics) do
 15 |       metrics |> Enum.map(&handle_metric(&1, measurements, metadata))
 16 |     end
 17 | 
 18 |     defp handle_metric(%Telemetry.Metrics.Counter{} = metric, _measurements, metadata) do
 19 |       GenServer.cast(__MODULE__, {:counter, metric.name, metadata})
 20 |     end
 21 | 
 22 |     defp handle_metric(%Telemetry.Metrics.Sum{} = metric, values, metadata) do
 23 |       values |> Enum.each(fn {_key, value} ->
 24 |         GenServer.cast(__MODULE__, {:sum, metric.name, value, metadata})
 25 |       end)
 26 |     end
 27 | 
 28 |     defp handle_metric(%Telemetry.Metrics.LastValue{} = metric, values, metadata) do
 29 |       values |> Enum.each(fn {_key, value} ->
 30 |         GenServer.cast(__MODULE__, {:last, metric.name, value, metadata})
 31 |       end)
 32 |     end
 33 | 
 34 |     defp handle_metric(metric, _measurements, _metadata) do
 35 |       Logger.error("Unsupported metric: #{metric.__struct__}. #{inspect(metric.event_name)}")
 36 |     end
 37 | 
 38 |     def render_metrics_html() do
 39 |       metrics = GenServer.call(__MODULE__, {:get_metrics})
 40 | 
 41 |       total_swarms = get_in(metrics, [[:extracker, :swarms, :total, :value], :default]) || 0
 42 |       bandwidth_in = get_in(metrics, [[:extracker, :bandwidth, :in, :value], :default, :rate]) || 0
 43 |       bandwidth_out = get_in(metrics, [[:extracker, :bandwidth, :out, :value], :default, :rate]) || 0
 44 | 
 45 |       peers_total_all = get_in(metrics, [[:extracker, :peers, :total, :value], %{family: "all"}]) || 0
 46 |       peers_total_ipv4 = get_in(metrics, [[:extracker, :peers, :total, :value], %{family: "inet"}]) || 0
 47 |       peers_total_ipv6 = get_in(metrics, [[:extracker, :peers, :total, :value], %{family: "inet6"}]) || 0
 48 |       peers_seeders_all = get_in(metrics, [[:extracker, :peers, :seeders, :value], %{family: "all"}]) || 0
 49 |       peers_seeders_ipv4 = get_in(metrics, [[:extracker, :peers, :seeders, :value], %{family: "inet"}]) || 0
 50 |       peers_seeders_ipv6 = get_in(metrics, [[:extracker, :peers, :seeders, :value], %{family: "inet6"}]) || 0
 51 |       peers_leechers_all = get_in(metrics, [[:extracker, :peers, :leechers, :value], %{family: "all"}]) || 0
 52 |       peers_leechers_ipv4 = get_in(metrics, [[:extracker, :peers, :leechers, :value], %{family: "inet"}]) || 0
 53 |       peers_leechers_ipv6 = get_in(metrics, [[:extracker, :peers, :leechers, :value], %{family: "inet6"}]) || 0
 54 | 
 55 |       http_announce_rate_ipv4 = get_in(metrics, [[:extracker, :request, :processing_time, :count], %{family: "inet", action: "announce", endpoint: "http"}, :rate]) || 0
 56 |       http_announce_rate_ipv6 = get_in(metrics, [[:extracker, :request, :processing_time, :count], %{family: "inet6", action: "announce", endpoint: "http"}, :rate]) || 0
 57 |       http_announce_rate_all = http_announce_rate_ipv4 + http_announce_rate_ipv6
 58 | 
 59 |       http_scrape_rate_ipv4 = get_in(metrics, [[:extracker, :request, :processing_time, :count], %{family: "inet", action: "scrape", endpoint: "http"}, :rate]) || 0
 60 |       http_scrape_rate_ipv6 = get_in(metrics, [[:extracker, :request, :processing_time, :count], %{family: "inet6", action: "scrape", endpoint: "http"}, :rate]) || 0
 61 |       http_scrape_rate_all = http_scrape_rate_ipv4 + http_scrape_rate_ipv6
 62 | 
 63 |       http_failure_rate_all =
 64 |         Map.get(metrics, [:extracker, :request, :failure, :count], %{})
 65 |         |> Enum.filter(fn {key, _value} -> key[:endpoint] == "http" end)
 66 |         |> Enum.map(fn {_key, value} -> value[:rate] end)
 67 |         |> Enum.sum()
 68 | 
 69 |       http_failure_rate_ipv4 =
 70 |         Map.get(metrics, [:extracker, :request, :failure, :count], %{})
 71 |         |> Enum.filter(fn {key, _value} -> key[:endpoint] == "http" and key[:family] == "inet" end)
 72 |         |> Enum.map(fn {_key, value} -> value[:rate] end)
 73 |         |> Enum.sum()
 74 | 
 75 |       http_failure_rate_ipv6 =
 76 |         Map.get(metrics, [:extracker, :request, :failure, :count], %{})
 77 |         |> Enum.filter(fn {key, _value} -> key[:endpoint] == "http" and key[:family] == "inet6" end)
 78 |         |> Enum.map(fn {_key, value} -> value[:rate] end)
 79 |         |> Enum.sum()
 80 | 
 81 |       udp_connect_rate_ipv4 = get_in(metrics, [[:extracker, :request, :processing_time, :count], %{family: "inet", action: "connect", endpoint: "udp"}, :rate]) || 0
 82 |       udp_connect_rate_ipv6 = get_in(metrics, [[:extracker, :request, :processing_time, :count], %{family: "inet6", action: "connect", endpoint: "udp"}, :rate]) || 0
 83 |       udp_connect_rate_all = udp_connect_rate_ipv4 + udp_connect_rate_ipv6
 84 | 
 85 |       udp_announce_rate_ipv4 = get_in(metrics, [[:extracker, :request, :processing_time, :count], %{family: "inet", action: "announce", endpoint: "udp"}, :rate]) || 0
 86 |       udp_announce_rate_ipv6 = get_in(metrics, [[:extracker, :request, :processing_time, :count], %{family: "inet6", action: "announce", endpoint: "udp"}, :rate]) || 0
 87 |       udp_announce_rate_all = udp_announce_rate_ipv4 + udp_announce_rate_ipv6
 88 | 
 89 |       udp_scrape_rate_ipv4 = get_in(metrics, [[:extracker, :request, :processing_time, :count], %{family: "inet", action: "scrape", endpoint: "udp"}, :rate]) || 0
 90 |       udp_scrape_rate_ipv6 = get_in(metrics, [[:extracker, :request, :processing_time, :count], %{family: "inet6", action: "scrape", endpoint: "udp"}, :rate]) || 0
 91 |       udp_scrape_rate_all = udp_scrape_rate_ipv4 + udp_scrape_rate_ipv6
 92 | 
 93 |       udp_failure_rate_all =
 94 |         Map.get(metrics, [:extracker, :request, :failure, :count], %{})
 95 |         |> Enum.filter(fn {key, _value} -> key[:endpoint] == "udp" end)
 96 |         |> Enum.map(fn {_key, value} -> value[:rate] end)
 97 |         |> Enum.sum()
 98 | 
 99 |       udp_failure_rate_ipv4 =
100 |         Map.get(metrics, [:extracker, :request, :failure, :count], %{})
101 |         |> Enum.filter(fn {key, _value} -> key[:endpoint] == "udp" and key[:family] == "inet" end)
102 |         |> Enum.map(fn {_key, value} -> value[:rate] end)
103 |         |> Enum.sum()
104 | 
105 |       udp_failure_rate_ipv6 =
106 |         Map.get(metrics, [:extracker, :request, :failure, :count], %{})
107 |         |> Enum.filter(fn {key, _value} -> key[:endpoint] == "udp" and key[:family] == "inet6" end)
108 |         |> Enum.map(fn {_key, value} -> value[:rate] end)
109 |         |> Enum.sum()
110 | 
111 |       html = """
112 | <!DOCTYPE html>
113 | <html lang="en">
114 | <head>
115 |   <meta charset="UTF-8">
116 |   <meta http-equiv=refresh content=60>
117 |   <title>ExTracker Statistics</title>
118 |   <style>
119 |     .cyan { color: cyan; text-shadow: 1px 1px 2px black; }
120 |     .fuchsia { color: fuchsia; text-shadow: 1px 1px 2px black; }
121 |     table { border-collapse: collapse; margin-bottom: 20px; }
122 |     th, td { border: 1px solid #ccc; padding: 8px; text-align: left; }
123 |     th { background-color: #f2f2f2; }
124 |   </style>
125 | </head>
126 | <body>
127 |   <h1><span class="fuchsia">Ex</span><span class="cyan">Tracker</span> Statistics</h1>
128 | 
129 |   <table>
130 |     <thead><tr><th colspan="2">Swarms (Torrents)</th></tr></thead>
131 |     <tbody>
132 |       <tr><td>Current total</td><td>#{total_swarms}</td></tr>
133 |     </tbody>
134 |   </table>
135 | 
136 |   <table>
137 |     <thead><tr><th colspan="4">Peers</th></tr></thead>
138 |     <thead><tr><th>Type</th><th>Total</th><th>IPv4</th><th>IPv6</th></tr></thead>
139 |     <tbody>
140 |     <tr>
141 |         <td>All</td>
142 |         <td>#{peers_total_all}</td>
143 |         <td>#{peers_total_ipv4}</td>
144 |         <td>#{peers_total_ipv6}</td>
145 |       </tr>
146 |       <tr>
147 |         <td>Seeders</td>
148 |         <td>#{peers_seeders_all}</td>
149 |         <td>#{peers_seeders_ipv4}</td>
150 |         <td>#{peers_seeders_ipv6}</td>
151 |       </tr>
152 |       <tr>
153 |         <td>Leechers</td>
154 |         <td>#{peers_leechers_all}</td>
155 |         <td>#{peers_leechers_ipv4}</td>
156 |         <td>#{peers_leechers_ipv6}</td>
157 |       </tr>
158 |     </tbody>
159 |   </table>
160 | 
161 |   <table>
162 |     <thead><tr><th colspan="4">HTTP Responses (per second)</th></tr></thead>
163 |     <thead><tr><th>Action</th><th>Total</th><th>IPv4</th><th>IPv6</th></tr></thead>
164 |     <tbody>
165 |       <tr>
166 |         <td>announce</td>
167 |         <td>#{trunc(http_announce_rate_all)}</td>
168 |         <td>#{trunc(http_announce_rate_ipv4)}</td>
169 |         <td>#{trunc(http_announce_rate_ipv6)}</td>
170 |       </tr>
171 |       <tr>
172 |         <td>scrape</td>
173 |         <td>#{trunc(http_scrape_rate_all)}</td>
174 |         <td>#{trunc(http_scrape_rate_ipv4)}</td>
175 |         <td>#{trunc(http_scrape_rate_ipv6)}</td>
176 |       </tr>
177 | 
178 |       <tr>
179 |         <td>failure</td>
180 |         <td>#{trunc(http_failure_rate_all)}</td>
181 |         <td>#{trunc(http_failure_rate_ipv4)}</td>
182 |         <td>#{trunc(http_failure_rate_ipv6)}</td>
183 |       </tr>
184 |     </tbody>
185 |   </table>
186 | 
187 |   <table>
188 |     <thead><tr><th colspan="4">UDP Responses (per second)</th></tr></thead>
189 |     <thead><tr><th>Action</th><th>Total</th><th>IPv4</th><th>IPv6</th></tr></thead>
190 |     <tbody>
191 |       <tr>
192 |         <td>connect</td>
193 |         <td>#{trunc(udp_connect_rate_all)}</td>
194 |         <td>#{trunc(udp_connect_rate_ipv4)}</td>
195 |         <td>#{trunc(udp_connect_rate_ipv6)}</td>
196 |       </tr>
197 |       <tr>
198 |         <td>announce</td>
199 |         <td>#{trunc(udp_announce_rate_all)}</td>
200 |         <td>#{trunc(udp_announce_rate_ipv4)}</td>
201 |         <td>#{trunc(udp_announce_rate_ipv6)}</td>
202 |       </tr>
203 |       <tr>
204 |         <td>scrape</td>
205 |         <td>#{trunc(udp_scrape_rate_all)}</td>
206 |         <td>#{trunc(udp_scrape_rate_ipv4)}</td>
207 |         <td>#{trunc(udp_scrape_rate_ipv6)}</td>
208 |       </tr>
209 | 
210 |       <tr>
211 |         <td>failure</td>
212 |         <td>#{trunc(udp_failure_rate_all)}</td>
213 |         <td>#{trunc(udp_failure_rate_ipv4)}</td>
214 |         <td>#{trunc(udp_failure_rate_ipv6)}</td>
215 |       </tr>
216 |     </tbody>
217 |   </table>
218 | 
219 |   <table>
220 |     <thead>
221 |       <tr>
222 |         <th colspan="2">Bandwidth (per second)</th>
223 |       </tr>
224 |     </thead>
225 |     <tbody>
226 |       <tr>
227 |         <td>RX (In)</td>
228 |         <td>#{Utils.format_bits_as_string(bandwidth_in)}</td>
229 |       </tr>
230 |       <tr>
231 |         <td>TX (Out)</td>
232 |         <td>#{Utils.format_bits_as_string(bandwidth_out)}</td>
233 |       </tr>
234 |     </tbody>
235 |   </table>
236 | </body>
237 | </html>
238 | """
239 | 
240 |       html
241 |       #"#{inspect(metrics)}"
242 |     end
243 | 
244 |     #==========================================================================
245 |     # Server (callbacks)
246 |     #==========================================================================
247 | 
248 |     @impl true
249 |     def init(args) do
250 |       Process.flag(:trap_exit, true)
251 |       metrics = Keyword.get(args, :metrics, [])
252 |       groups = Enum.group_by(metrics, & &1.event_name)
253 | 
254 |       for {event, metrics} <- groups do
255 |         id = {__MODULE__, event, self()}
256 |         :telemetry.attach(id, event, &__MODULE__.handle_event/4, metrics)
257 |       end
258 | 
259 |       state = Enum.map(groups, fn {_event, metrics} ->
260 |         Enum.map(metrics, fn metric -> {metric.name, %{}} end)
261 |       end)
262 |       |> List.flatten()
263 |       |> Map.new()
264 | 
265 |       {:ok, state}
266 |     end
267 | 
268 |     @impl true
269 |     def terminate(_, events) do
270 |       for event <- events do
271 |         :telemetry.detach({__MODULE__, event, self()})
272 |       end
273 | 
274 |       :ok
275 |     end
276 | 
277 |     @impl true
278 |     def handle_call({:get_metrics}, _from, state) do
279 |       {:reply, state, state}
280 |     end
281 | 
282 |     @impl true
283 |     def handle_cast({:counter, metric, metadata}, state) do
284 |       data = Map.get(state, metric)
285 |       key = get_target_key(metadata)
286 |       now  = System.monotonic_time(:second)
287 | 
288 |       new_entry = case Map.get(data, key) do
289 |         nil ->
290 |           %{prev: 0, value: 1, ts: now, rate: 0}
291 |         %{prev: prev, value: current, ts: ts, rate: _rate} = entry ->
292 |           new_value = current + 1
293 |           elapsed = now - ts
294 |           if elapsed >= 1 do
295 |             delta = new_value - prev
296 |             rate = delta / elapsed
297 |             %{prev: current, value: new_value, ts: now, rate: rate}
298 |           else
299 |             Map.put(entry, :value, new_value)
300 |           end
301 |       end
302 | 
303 |     updated = Map.put(data, key, new_entry)
304 | 
305 |       Logger.debug("counter updated: #{inspect(metric)}/#{inspect(metadata)} - value: #{new_entry[:value]}")
306 |       {:noreply, Map.put(state, metric, updated)}
307 |     end
308 | 
309 |     @impl true
310 |     def handle_cast({:sum, metric, value, metadata}, state) do
311 |       data = Map.get(state, metric)
312 |       key = get_target_key(metadata)
313 |       now  = System.monotonic_time(:second)
314 | 
315 |       new_entry = case Map.get(data, key) do
316 |         nil ->
317 |           %{prev: 0, value: value, ts: now, rate: 0}
318 |         %{prev: prev, value: current, ts: ts, rate: _rate} = entry ->
319 |           new_value = current + value
320 |           elapsed = now - ts
321 |           if elapsed >= 1 do
322 |             delta = new_value - prev
323 |             rate = delta / elapsed
324 |             %{prev: current, value: new_value, ts: now, rate: rate}
325 |           else
326 |             Map.put(entry, :value, new_value)
327 |           end
328 |       end
329 | 
330 |       updated = Map.put(data, key, new_entry)
331 | 
332 |       Logger.debug("sum updated: #{inspect(metric)}/#{inspect(metadata)} - value: #{new_entry[:value]}")
333 |       {:noreply, Map.put(state, metric, updated)}
334 |     end
335 | 
336 |     @impl true
337 |     def handle_cast({:last, metric, value, metadata}, state) do
338 |       data = Map.get(state, metric)
339 |       key = get_target_key(metadata)
340 | 
341 |       updated = Map.put(data, key, value)
342 | 
343 |       Logger.debug("lastValue updated: #{inspect(metric)} - value: #{value}")
344 |       {:noreply, Map.put(state, metric, updated)}
345 |     end
346 | 
347 |     defp get_target_key(metadata) do
348 |       case Kernel.map_size(metadata) do
349 |         0 -> :default
350 |         _n -> metadata
351 |       end
352 |     end
353 | end
354 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/telemetry/plug.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.Telemetry.Plug do
 2 |   @behaviour Plug
 3 | 
 4 |   def init(opts), do: opts
 5 | 
 6 |   def call(conn, _opts) do
 7 |     start_time = System.monotonic_time(:microsecond)
 8 | 
 9 |     Plug.Conn.register_before_send(conn, fn conn ->
10 |       elapsed = System.monotonic_time(:microsecond) - start_time
11 | 
12 |       # incoming bandwidth
13 |       size = estimate_request_size(conn)
14 |       :telemetry.execute([:extracker, :bandwidth, :in], %{value: size})
15 | 
16 |       # request processing time
17 |       endpoint = "http"
18 |       family = case tuple_size(conn.remote_ip) do
19 |         4 -> "inet"
20 |         8 -> "inet6"
21 |       end
22 | 
23 |       action = case conn.request_path do
24 |         "/announce" -> "announce"
25 |         "/scrape" -> "scrape"
26 |         _ -> nil
27 |       end
28 | 
29 |       if action != nil do
30 |         :telemetry.execute([:extracker, :request], %{processing_time: elapsed}, %{endpoint: endpoint, action: action, family: family})
31 |       end
32 | 
33 |       conn
34 |     end)
35 |   end
36 | 
37 |   defp estimate_request_size(conn) do
38 |     method = conn.method
39 |     path = conn.request_path
40 | 
41 |     request_line_size = byte_size("#{method} #{path}") + 10 # "HTTP/1.1" and "\r\n"
42 | 
43 |     headers_size =
44 |       conn.req_headers
45 |       |> Enum.map(fn {k, v} -> byte_size(k) + byte_size(v) + 4 end)  # ": " and "\r\n"
46 |       |> Enum.sum()
47 | 
48 |     request_line_size + headers_size + 2 # "\r\n"
49 |   end
50 | end
51 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/telemetry/router.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.Telemetry.Router do
 2 |   use Plug.Router
 3 |   if Mix.env == :dev, do: use Plug.Debugger
 4 | 
 5 |   @assets_folder Application.app_dir(:extracker, "priv/static/assets")
 6 | 
 7 |   plug Plug.Logger
 8 |   plug Plug.Static, at: "/assets", from: @assets_folder
 9 |   plug :match
10 |   #plug Plug.Parsers, parsers: [:urlencoded, :multipart], pass: ["*/*"], validate_utf8: false
11 |   plug Plug.Parsers, parsers: [], pass: ["text/html"], validate_utf8: false
12 |   plug :dispatch
13 | 
14 |   # basic telemetry
15 |   get "/tracker-stats.html" do
16 |     response = ExTracker.Telemetry.BasicReporter.render_metrics_html()
17 |     conn
18 |     |> put_resp_content_type("text/html")
19 |     |> put_resp_header("cache-control", "no-cache")
20 |     |> send_resp(200, response)
21 | 
22 |   end
23 | 
24 |   # prometheus scrape
25 |   get "/prometheus" do
26 |     metrics = TelemetryMetricsPrometheus.Core.scrape()
27 | 
28 |     conn
29 |     |> put_resp_content_type("text/plain")
30 |     |> send_resp(200, metrics)
31 |   end
32 | 
33 |   match _ do
34 |     conn
35 |     |> put_resp_content_type("text/html")
36 |     |> send_resp(200, ExTracker.web_about())
37 |   end
38 | end
39 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/telemetry/telemetry.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.Telemetry do
  2 |   use Supervisor
  3 |   require Logger
  4 |   import Telemetry.Metrics
  5 | 
  6 |   def start_link(args) do
  7 |     Supervisor.start_link(__MODULE__, args, name: __MODULE__)
  8 |   end
  9 | 
 10 |   def init(_arg) do
 11 |     children = [
 12 |       {:telemetry_poller,
 13 |         measurements: poller_measurements(),
 14 |         period: 60_000,
 15 |         init_delay: 10_000
 16 |       }
 17 |     ]
 18 |       ++ get_http_children()
 19 |       ++ get_basic_children()
 20 |       ++ get_prometheus_children()
 21 | 
 22 |     Supervisor.init(children, strategy: :one_for_one)
 23 |   end
 24 | 
 25 |   defp get_http_children() do
 26 |     v4 = case Application.get_env(:extracker, :ipv4_enabled) do
 27 |       true ->
 28 |         [Supervisor.child_spec({Plug.Cowboy,
 29 |           scheme: :http,
 30 |           plug: ExTracker.Telemetry.Router,
 31 |           options: [
 32 |             net: :inet,
 33 |             ip: ExTracker.Utils.get_configured_ipv4(),
 34 |             port: Application.get_env(:extracker, :telemetry_port),
 35 |             compress: true,
 36 |             ref: "telemetry_router_inet",
 37 |           ]},
 38 |           id: :telemetry_supervisor_inet
 39 |         )]
 40 |       false -> []
 41 |     end
 42 | 
 43 |     v6 = case Application.get_env(:extracker, :ipv6_enabled) do
 44 |       true ->
 45 |         [Supervisor.child_spec({Plug.Cowboy,
 46 |           scheme: :http,
 47 |           plug: ExTracker.Telemetry.Router,
 48 |           options: [
 49 |             net: :inet6,
 50 |             ip: ExTracker.Utils.get_configured_ipv6(),
 51 |             port: Application.get_env(:extracker, :telemetry_port),
 52 |             compress: true,
 53 |             ref: "telemetry_router_inet6",
 54 |             ipv6_v6only: true
 55 |           ]},
 56 |           id: :telemetry_supervisor_inet6
 57 |         )]
 58 |       false -> []
 59 |     end
 60 | 
 61 |     v4 ++ v6
 62 |   end
 63 | 
 64 |   defp get_basic_children() do
 65 |     case Application.get_env(:extracker, :telemetry_basic) do
 66 |       true ->
 67 |         Logger.notice("Telemetry Basic endpoint enabled")
 68 |         [{ExTracker.Telemetry.BasicReporter, metrics: metrics()}]
 69 |       _ -> []
 70 |     end
 71 |   end
 72 | 
 73 |   defp get_prometheus_children() do
 74 |      case Application.get_env(:extracker, :telemetry_prometheus) do
 75 |       true ->
 76 |         Logger.notice("Telemetry Prometheus endpoint enabled")
 77 |         [{TelemetryMetricsPrometheus.Core, metrics: metrics()}]
 78 |       _ -> []
 79 |     end
 80 |   end
 81 | 
 82 |   defp metrics do
 83 |     [
 84 |       # :telemetry.execute([:extracker, :request], %{processing_time: 0}, %{endpoint: "udp", action: "announce", family: "inet"})
 85 |       counter("extracker.request.processing_time.count", event_name: [:extracker, :request], measurement: :processing_time, tags: [:endpoint, :action, :family], unit: :microsecond),
 86 |       sum("extracker.request.processing_time.sum", event_name: [:extracker, :request], measurement: :processing_time, tags: [:endpoint, :action, :family], unit: :microsecond),
 87 |       # :telemetry.execute([:extracker, :request, :success], %{}, %{endpoint: "udp", action: "announce", family: "inet"})
 88 |       counter("extracker.request.success.count", tags: [:endpoint, :action, :family]),
 89 |       # :telemetry.execute([:extracker, :request, :failure], %{}, %{endpoint: "udp", action: "announce", family: "inet"})
 90 |       counter("extracker.request.failure.count", tags: [:endpoint, :action, :family]),
 91 |       # :telemetry.execute([:extracker, :request, :error], %{}, %{endpoint: "udp", action: "announce", family: "inet"})
 92 |       counter("extracker.request.error.count", tags: [:endpoint, :action, :family]),
 93 | 
 94 |       # :telemetry.execute([:extracker, :peer, :added], %{}, %{ family: "inet"})
 95 |       counter("extracker.peer.added.count", tags: [:family]),
 96 |       # :telemetry.execute([:extracker, :peer, :removed], %{}, %{ family: "inet"})
 97 |       counter("extracker.peer.removed.count", tags: [:family]),
 98 | 
 99 |       # :telemetry.execute([:extracker, :swarm, :created], %{})
100 |       counter("extracker.swarm.created.count"),
101 |       # :telemetry.execute([:extracker, :swarm, :destroyed], %{})
102 |       counter("extracker.swarm.destroyed.count"),
103 | 
104 |       # :telemetry.execute([:extracker, :peers, :total], %{value: 0}, %{family: "inet"})
105 |       last_value("extracker.peers.total.value", tags: [:family]),
106 |       # :telemetry.execute([:extracker, :peers, :seeders], %{value: 0}, %{family: "inet"})
107 |       last_value("extracker.peers.seeders.value", tags: [:family]),
108 |       # :telemetry.execute([:extracker, :peers, :leechers], %{value: 0}, %{family: "inet"})
109 |       last_value("extracker.peers.leechers.value", tags: [:family]),
110 | 
111 |       #:telemetry.execute([:extracker, :swarms, :total], %{value: 0})
112 |       last_value("extracker.swarms.total.value"),
113 | 
114 |       # :telemetry.execute([:extracker, :bandwidth, :in], %{value: 0})
115 |       sum("extracker.bandwidth.in.value"),
116 |       # :telemetry.execute([:extracker, :bandwidth, :out], %{value: 0})
117 |       sum("extracker.bandwidth.out.value"),
118 | 
119 |       #last_value("extracker.system.memory")
120 |     ]
121 |   end
122 | 
123 |   defp poller_measurements() do
124 |     [
125 |       #{:process_info, event: [:extracker, :system], name: ExTracker.Telemetry.Poller, keys: [:memory]},
126 | 
127 |       {ExTracker.Telemetry, :measure_swarms_totals, []},
128 |       {ExTracker.Telemetry, :measure_peer_totals, []},
129 |       {ExTracker.Telemetry, :measure_peer_seeders, []},
130 |       {ExTracker.Telemetry, :measure_peer_leechers, []},
131 |       {ExTracker.Telemetry, :reset_bandwidth, []},
132 |     ]
133 |   end
134 | 
135 |   def measure_peer_totals() do
136 |     [:all, :inet, :inet6]
137 |     |> Enum.each( fn family ->
138 |       total = ExTracker.SwarmFinder.get_swarm_list()
139 |       |> Task.async_stream(fn {_hash, table, _created_at, _last_cleaned} ->
140 |         ExTracker.Swarm.get_peer_count(table, family)
141 |       end, ordered: false)
142 |       |> Stream.reject(&match?({_, :undefined}, &1))
143 |       |> Stream.map(&elem(&1, 1))
144 |       |> Enum.sum()
145 | 
146 |       :telemetry.execute([:extracker, :peers, :total], %{value: total}, %{family: Atom.to_string(family)})
147 |     end)
148 |   end
149 | 
150 |   def measure_peer_seeders() do
151 |     [:all, :inet, :inet6]
152 |     |> Enum.each( fn family ->
153 |       total = ExTracker.SwarmFinder.get_swarm_list()
154 |       |> Task.async_stream(fn {_hash, table, _created_at, _last_cleaned} ->
155 |         ExTracker.Swarm.get_seeder_count(table, family)
156 |       end, ordered: false)
157 |       |> Stream.reject(&match?({_, :undefined}, &1))
158 |       |> Stream.map(&elem(&1, 1))
159 |       |> Enum.sum()
160 | 
161 |       :telemetry.execute([:extracker, :peers, :seeders], %{value: total}, %{family: Atom.to_string(family)})
162 |     end)
163 |   end
164 | 
165 |   def measure_peer_leechers() do
166 |     [:all, :inet, :inet6]
167 |     |> Enum.each( fn family ->
168 |       total = ExTracker.SwarmFinder.get_swarm_list()
169 |       |> Task.async_stream(fn {_hash, table, _created_at, _last_cleaned} ->
170 |         ExTracker.Swarm.get_leecher_count(table, family)
171 |       end, ordered: false)
172 |       |> Stream.reject(&match?({_, :undefined}, &1))
173 |       |> Stream.map(&elem(&1, 1))
174 |       |> Enum.sum()
175 | 
176 |       :telemetry.execute([:extracker, :peers, :leechers], %{value: total}, %{family: Atom.to_string(family)})
177 |     end)
178 |   end
179 | 
180 |   def measure_swarms_totals() do
181 |     total = ExTracker.SwarmFinder.get_swarm_count()
182 |     :telemetry.execute([:extracker, :swarms, :total], %{value: total})
183 |   end
184 | 
185 |   def reset_bandwidth() do
186 |     :telemetry.execute([:extracker, :bandwidth, :in], %{value: 0})
187 |     :telemetry.execute([:extracker, :bandwidth, :out], %{value: 0})
188 |   end
189 | end
190 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/types/announce_request.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.Types.AnnounceRequest do
  2 | 
  3 |   require Logger
  4 | 
  5 |   def parse(params) do
  6 |     # mandatory fields
  7 |     with {:ok, info_hash} <- fetch_field_info_hash(params),
  8 |       {:ok, peer_id} <- fetch_field_peer_id(params),
  9 |       {:ok, port} <- fetch_field_port(params),
 10 |       {:ok, uploaded} <- fetch_field_uploaded(params),
 11 |       {:ok, downloaded} <- fetch_field_downloaded(params),
 12 |       {:ok, left} <- fetch_field_left(params)
 13 |     do
 14 |       mandatories = %{
 15 |         info_hash: info_hash, peer_id: peer_id, port: port,
 16 |         uploaded: uploaded, downloaded: downloaded, left: left
 17 |       }
 18 | 
 19 |       # optional fields
 20 |       optionals = %{}
 21 |         |> add_field_compact(params)
 22 |         |> add_field_event(params)
 23 |         |> add_field_no_peer_id(params)
 24 |         |> add_field_numwant(params)
 25 |         |> add_field_ip(params)
 26 |         #|> add_field_key(params)
 27 |         #|> add_field_trackerid(params)
 28 | 
 29 |       request = Map.merge(mandatories, optionals)
 30 |       {:ok, request}
 31 |     else
 32 |       {:error, message} -> {:error, message}
 33 |       _ -> {:error, "unknown error"}
 34 |     end
 35 |   end
 36 | 
 37 |   #==========================================================================
 38 |   # Mandatory Fields
 39 |   #==========================================================================
 40 | 
 41 |   # info_hash: urlencoded 20-byte SHA1 hash of the value of the info key from the Metainfo file.
 42 |   defp fetch_field_info_hash(params) do
 43 |     case Map.fetch(params, "info_hash") do
 44 |       {:ok, info_hash} ->
 45 |         case ExTracker.Utils.validate_hash(info_hash) do
 46 |           {:ok, decoded_hash} -> {:ok, decoded_hash}
 47 |           {:error, error} ->
 48 |             Logger.warning("invalid 'info_hash' parameter: size: #{byte_size(info_hash)} value: #{inspect(info_hash)}")
 49 |             {:error, "invalid 'info_hash' parameter: #{error}"}
 50 |         end
 51 |       :error -> {:error, "missing 'info_hash' parameter"}
 52 |     end
 53 |   end
 54 | 
 55 |   # peer_id: urlencoded 20-byte string used as a unique ID for the client, generated by the client at startup. This is allowed to be any value, and may be binary data.
 56 |   defp fetch_field_peer_id(params) do
 57 |     case Map.fetch(params, "peer_id") do
 58 |       {:ok, peer_id} ->
 59 |         case byte_size(peer_id) do
 60 |           20 -> {:ok,peer_id}
 61 |           _ ->
 62 |             Logger.warning("invalid 'peer_id' parameter: size: #{byte_size(peer_id)} value: #{inspect(peer_id)}")
 63 |             {:error, "invalid 'peer_id' parameter"}
 64 |         end
 65 |       :error -> {:error, "missing 'peer_id' parameter"}
 66 |     end
 67 |   end
 68 | 
 69 |   # port: The port number that the client is listening on. Ports reserved for BitTorrent are typically 6881-6889.
 70 |   defp fetch_field_port(params) do
 71 |     case Map.fetch(params, "port") do
 72 |       {:ok, port} when is_integer(port) ->
 73 |         {:ok, port}
 74 |       {:ok, port} ->
 75 |         case Integer.parse(port) do
 76 |           {number, _rest} when number >= 0 and number <= 65535 -> {:ok, number}
 77 |           :error -> {:error, "invalid 'port' parameter"}
 78 |         end
 79 |       :error -> {:error, "missing 'port' parameter"}
 80 |     end
 81 |   end
 82 | 
 83 |   # downloaded: The total amount downloaded (since the client sent the 'started' event to the tracker) in base ten ASCII.
 84 |   # While not explicitly stated in the official specification, the consensus is that this should be the total number of bytes downloaded.
 85 |   defp fetch_field_downloaded(params) do
 86 |     case Map.fetch(params, "downloaded") do
 87 |       {:ok, downloaded} when is_integer(downloaded) ->
 88 |         {:ok, downloaded}
 89 |       {:ok, downloaded} ->
 90 |         case Integer.parse(downloaded) do
 91 |           {number, _rest} when number >= 0 -> {:ok, number}
 92 |           :error -> {:error, "invalid 'downloaded' parameter"}
 93 |         end
 94 |       :error -> {:error, "missing 'downloaded' parameter"}
 95 |     end
 96 |   end
 97 | 
 98 |   # uploaded: The total amount uploaded (since the client sent the 'started' event to the tracker) in base ten ASCII.
 99 |   # While not explicitly stated in the official specification, the consensus is that this should be the total number of bytes uploaded.
100 |   defp fetch_field_uploaded(params) do
101 |     case Map.fetch(params, "uploaded") do
102 |       {:ok, uploaded} when is_integer(uploaded) ->
103 |         {:ok, uploaded}
104 |       {:ok, uploaded} ->
105 |         case Integer.parse(uploaded) do
106 |           {number, _rest} when number >= 0 -> {:ok, number}
107 |           :error -> {:error, "invalid 'uploaded' parameter"}
108 |         end
109 |       :error -> {:error, "missing 'uploaded' parameter"}
110 |     end
111 |   end
112 | 
113 |   # left: The number of bytes this client still has to download in base ten ASCII.
114 |   # Clarification: The number of bytes needed to download to be 100% complete and get all the included files in the torrent.
115 |   defp fetch_field_left(params) do
116 |     case Map.fetch(params, "left") do
117 |       {:ok, left} when is_integer(left) ->
118 |         {:ok, left}
119 |       {:ok, left} ->
120 |         case Integer.parse(left) do
121 |           {number, _rest} when number >= 0 -> {:ok, number}
122 |           :error -> {:error, "invalid 'left' parameter"}
123 |         end
124 |       :error -> {:error, "missing 'left' parameter"}
125 |     end
126 |   end
127 | 
128 |   #==========================================================================
129 |   # Optional Fields
130 |   #==========================================================================
131 | 
132 |   # compact: Setting this to 1 indicates that the client accepts a compact response. The peers list is replaced by a peers string with 6 bytes per peer.
133 |   # The first four bytes are the host (in network byte order), the last two bytes are the port (in network byte order).
134 |   # It should be noted that some trackers only support compact responses (for saving bandwidth) and either refuse requests without "compact=1"
135 |   # or simply send a compact response unless the request contains "compact=0" (in which case they will refuse the request.)
136 |   defp add_field_compact(request, params) do
137 |     case Map.fetch(params, "compact") do
138 |       {:ok, compact} -> Map.put(request, :compact, compact != "0")
139 |       :error -> Map.put(request, :compact, true)
140 |     end
141 |   end
142 | 
143 |   # no_peer_id: Indicates that the tracker can omit peer id field in peers dictionary. This option is ignored if compact is enabled.
144 |   defp add_field_no_peer_id(request, params) do
145 |     case Map.fetch(params, "no_peer_id") do
146 |       {:ok, no_peer_id} -> Map.put(request, :no_peer_id, no_peer_id == "1")
147 |       :error -> Map.put(request, :no_peer_id, false)
148 |     end
149 |   end
150 | 
151 |   # event: If specified, must be one of started, completed, stopped, (or empty which is the same as not being specified). If not specified, then this request is one performed at regular intervals.
152 |   #   started: The first request to the tracker must include the event key with this value.
153 |   #   stopped: Must be sent to the tracker if the client is shutting down gracefully.
154 |   #   completed: Must be sent to the tracker when the download completes. However, must not be sent if the download was already 100 % complete when the client started.
155 |   #              Presumably, this is to allow the tracker to increment the "completed downloads" metric based solely on this event.
156 |   defp add_field_event(request, params) do
157 |     case Map.fetch(params, "event") do
158 |       {:ok, "started"} -> Map.put(request, :event, :started)
159 |       {:ok, "stopped"} -> Map.put(request, :event, :stopped)
160 |       {:ok, "completed"} -> Map.put(request, :event, :completed)
161 |       {:ok, "paused"} -> Map.put(request, :event, :paused)
162 |       {:ok, "unknown"} -> Map.put(request, :event, :updated) # this one is annoying in the wild
163 |       {:ok, ""} -> Map.put(request, :event, :updated)
164 |       {:ok, other} ->
165 |         Logger.warning("invalid 'event' parameter: size: #{byte_size(other)} value: #{inspect(other)}")
166 |         Map.put(request, :event, :updated) #:invalid
167 |       :error -> Map.put(request, :event, :updated)
168 |     end
169 |   end
170 | 
171 |   # numwant: Optional. Number of peers that the client would like to receive from the tracker. This value is permitted to be zero. If omitted, typically defaults to 50 peers.
172 |   defp add_field_numwant(request, params) do
173 |     case Map.fetch(params, "numwant") do
174 |       {:ok, numwant} when is_integer(numwant) ->
175 |         Map.put(request, :numwant, numwant)
176 |       {:ok, numwant} ->
177 |         case Integer.parse(numwant) do
178 |           {number, _rest} -> Map.put(request, :numwant, number)
179 |           :error -> Map.put(request, :numwant, 25)
180 |         end
181 |       :error -> Map.put(request, :numwant, 25)
182 |     end
183 |   end
184 | 
185 |   # ip: Optional. The true IP address of the client machine, in dotted quad format or rfc3513 defined hexed IPv6 address.
186 |   # In general this parameter is not necessary as the address of the client can be determined from the IP address from which the HTTP request came.
187 |   # The parameter is only needed in the case where the IP address that the request came in on is not the IP address of the client.
188 |   # This happens if the client is communicating to the tracker through a proxy (or a transparent web proxy/cache.)
189 |   # It also is necessary when both the client and the tracker are on the same local side of a NAT gateway.
190 |   # The reason for this is that otherwise the tracker would give out the internal(RFC1918) address of the client, which is not routable.
191 |   # Therefore the client must explicitly state its (external, routable) IP address to be given out to external peers.
192 |   # Various trackers treat this parameter differently. Some only honor it only if the IP address that the request came in on is in RFC1918 space.
193 |   # Others honor it unconditionally, while others ignore it completely. In case of IPv6 address(e.g. :2001:db8:1:2::100) it indicates only that client can communicate via IPv6.
194 |   defp add_field_ip(request, params) do
195 |     case Map.fetch(params, "ip") do
196 |       {:ok, requested_ip} -> Map.put(request, :ip, requested_ip)
197 |       :error -> Map.put(request, :ip, nil)
198 |     end
199 |   end
200 | 
201 |   # key: Optional. An additional identification that is not shared with any other peers. It is intended to allow a client to prove their identity should their IP address change.
202 |   defp add_field_key(request, params) do
203 |     case Map.fetch(params, "key") do
204 |       {:ok, key} -> Map.put(request, :key, key)
205 |       :error -> Map.put(request, :key, nil)
206 |     end
207 |   end
208 | 
209 |   # trackerid: Optional. If a previous announce contained a tracker id, it should be set here.
210 |   defp add_field_trackerid(request, params) do
211 |     case Map.fetch(params, "trackerid") do
212 |       {:ok, trackerid} -> Map.put(request, :trackerid, trackerid)
213 |       :error -> Map.put(request, :trackerid, nil)
214 |     end
215 |   end
216 | end
217 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/types/announce_response.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.Types.AnnounceResponse do
 2 | 
 3 |   def generate_success(family, compact, peer_list, total_seeders, total_leechers) do
 4 |     response = %{
 5 |       #warning message: (new, optional) Similar to failure reason, but the response still gets processed normally. The warning message is shown just like an error.
 6 |       #interval: Interval in seconds that the client should wait between sending regular requests to the tracker.
 7 |       "interval" => Application.get_env(:extracker, :announce_interval),
 8 |       #tracker id: A string that the client should send back on its next announcements. If absent and a previous announce sent a tracker id, do not discard the old value; keep using it.
 9 |       #"tracker id" => "",
10 |       #complete: number of peers with the entire file, i.e. seeders (integer)
11 |       "complete" => total_seeders,
12 |       #incomplete: number of non-seeder peers, aka "leechers" (integer)
13 |       "incomplete" => total_leechers,
14 |       #peers: (dictionary model) The value is a list of dictionaries, each with the following keys:
15 |       #    peer id: peer's self-selected ID, as described above for the tracker request (string)
16 |       #    ip: peer's IP address either IPv6 (hexed) or IPv4 (dotted quad) or DNS name (string)
17 |       #    port: peer's port number (integer)
18 |       #peers: (binary model) Instead of using the dictionary model described above, the peers value may be a string consisting of multiples of 6 bytes. First 4 bytes are the IP address and last 2 bytes are the port number. All in network (big endian) notation.
19 |       "peers" => peer_list
20 |     }
21 | 
22 |     case {compact, family} do
23 |       # if its compact and ipv6 use 'peers6'
24 |       {true, :inet6} -> Map.put(response, "peers6", peer_list)
25 |       # in all other cases just use 'peers'
26 |       {_, _} -> Map.put(response, "peers", peer_list)
27 |     end
28 | 
29 |     #min interval: (optional) Minimum announce interval. If present clients must not reannounce more frequently than this.
30 |     response = case Application.fetch_env(:extracker, :announce_interval_min) do
31 |       {:ok, value}-> Map.put(response, "min interval", value)
32 |       :error -> response
33 |     end
34 | 
35 |     response
36 |   end
37 | 
38 |   # BEP 24 'Tracker Returns External IP' extra field
39 |   def append_external_ip(response, ip) do
40 |     case Application.get_env(:extracker, :return_external_ip) do
41 |       true -> Map.put(response, "external ip", ExTracker.Utils.ip_to_bytes(ip))
42 |       _ -> response
43 |     end
44 |   end
45 | 
46 |   def generate_failure(reason) do
47 |     text = cond do
48 |       is_atom(reason) -> Atom.to_string(reason)
49 |       true -> reason
50 |     end
51 | 
52 |     %{ "failure reason" => text }
53 |   end
54 | end
55 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/types/peer.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.Types.PeerID do
  2 |   alias ExTracker.Types.PeerID
  3 | 
  4 |   @enforce_keys [:ip, :port, :family]
  5 |   defstruct [:ip, :port, :family]
  6 | 
  7 |   def new(ip, port) do
  8 |     family = cond do
  9 |       tuple_size(ip) == 4 -> :inet
 10 |       tuple_size(ip) == 8 -> :inet6
 11 |     end
 12 | 
 13 |     %PeerID{ip: ip, port: port, family: family}
 14 |   end
 15 | 
 16 |   def is_ipv4(%PeerID{family: family}) do
 17 |     family == :inet
 18 |   end
 19 | 
 20 |   def is_ipv6(%PeerID{family: family}) do
 21 |     family == :inet6
 22 |   end
 23 | end
 24 | 
 25 | defimpl String.Chars, for: ExTracker.Types.PeerID do
 26 |   def to_string(%ExTracker.Types.PeerID{ip: ip, port: port}) do
 27 |     ip_str = ip |> Tuple.to_list() |> Enum.join(".")
 28 |     "#{ip_str}:#{port}"
 29 |   end
 30 | end
 31 | 
 32 | defmodule ExTracker.Types.PeerData do
 33 |   alias ExTracker.Types.PeerData
 34 | 
 35 |   @type peer_state :: :fresh | :gone | :active
 36 | 
 37 |   defstruct [
 38 |     id: nil,
 39 |     key: nil,
 40 |     uploaded: 0,
 41 |     downloaded: 0,
 42 |     left: 0,
 43 |     country: "",
 44 |     last_event: nil,
 45 |     last_updated: 0,
 46 |     state: :fresh
 47 |   ]
 48 | 
 49 |   def set_id(peer_data, id) when byte_size(id) == 20 do
 50 |     %PeerData{peer_data | id: id}
 51 |   end
 52 | 
 53 |   def validate_key(peer_data, key) do
 54 |       cond do
 55 |         peer_data.key == nil -> true
 56 |         peer_data.key == key -> true
 57 |         true -> false
 58 |       end
 59 |   end
 60 | 
 61 |   def set_key(peer_data, new_key) do
 62 |     cond do
 63 |       peer_data.key == nil -> %PeerData{peer_data | key: new_key}
 64 |       peer_data.key == new_key -> peer_data
 65 |       true -> {:error, "different key already set"}
 66 |     end
 67 |   end
 68 | 
 69 |   def update_uploaded(peer_data, value) when is_integer(value) do
 70 |     case peer_data.uploaded < value do
 71 |       true -> %PeerData{peer_data | uploaded: value}
 72 |       false -> peer_data
 73 |     end
 74 |   end
 75 | 
 76 |   def update_downloaded(peer_data, value) when is_integer(value) do
 77 |     case peer_data.downloaded < value do
 78 |       true -> %PeerData{peer_data | downloaded: value}
 79 |       false -> peer_data
 80 |     end
 81 |   end
 82 | 
 83 |   def update_left(peer_data, value) when is_integer(value) do
 84 |     %PeerData{peer_data | left: value}
 85 |   end
 86 | 
 87 |   def update_country(peer_data, country) when is_binary(country) do
 88 |     %PeerData{peer_data | country: country}
 89 |   end
 90 | 
 91 |   def update_last_event(peer_data, event) when is_atom(event) do
 92 |     %PeerData{peer_data | last_event: event}
 93 |   end
 94 | 
 95 |   def update_last_updated(peer_data, timestamp) do
 96 |     %PeerData{peer_data | last_updated: timestamp}
 97 |   end
 98 | 
 99 |   @spec update_state(peer_data :: PeerData, state :: peer_state()) :: PeerData
100 |   def update_state(peer_data, state) do
101 |     %PeerData{peer_data | state: state}
102 |   end
103 | end
104 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/types/scrape_request.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.Types.ScrapeRequest do
 2 | 
 3 |   def parse(params) do
 4 |     # mandatory fields
 5 |     with {:ok, info_hash} <- fetch_field_info_hash(params)
 6 |     do
 7 |       request = %{ info_hash: info_hash }
 8 |       {:ok, request}
 9 |     else
10 |       {:error, message} -> {:error, message}
11 |       _ -> {:error, "unknown error"}
12 |     end
13 |   end
14 | 
15 |   #==========================================================================
16 |   # Mandatory Fields
17 |   #==========================================================================
18 | 
19 |   # info_hash: urlencoded 20-byte SHA1 hash of the value of the info key from the Metainfo file.
20 |   defp fetch_field_info_hash(params) do
21 |     case Map.fetch(params, "info_hash") do
22 |       {:ok, info_hash} ->
23 |         case ExTracker.Utils.validate_hash(info_hash) do
24 |           {:ok, decoded_hash} -> {:ok, decoded_hash}
25 |           {:error, error} -> {:error, "invalid 'info_hash' parameter: #{error}"}
26 |         end
27 |       :error -> {:error, "missing 'info_hash' parameter"}
28 |     end
29 |   end
30 | end
31 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/types/scrape_response.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.Types.ScrapeResponse do
 2 | 
 3 |   # The response to a successful request is a bencoded dictionary containing one key-value pair:
 4 |   # the key files with the value being a dictionary of the 20-byte string representation of an infohash paired with a dictionary of swarm metadata.
 5 |   def generate_success_http_envelope(successes) when is_map(successes) do
 6 |     %{
 7 |       "files" => successes
 8 |     }
 9 |   end
10 | 
11 |   def generate_success(seeders, partial_seeders, leechers, downloads) do
12 |     # The fields found in the swarm metadata dictionary are as follows:
13 |     %{
14 |       # complete: The number of active peers that have completed downloading.
15 |       "complete" => seeders,
16 |       # incomplete: The number of active peers that have not completed downloading.
17 |       "incomplete" => leechers,
18 |       # downloaded: The number of peers that have ever completed downloading.
19 |       "downloaded" => downloads,
20 |       # downloaders: The number of active peers that have not completed downloading, not including partial seeds.
21 |       "downloaders" => leechers - partial_seeders
22 |     }
23 |   end
24 | 
25 |   def generate_failure(reason) do
26 |     text = cond do
27 |       is_atom(reason) -> Atom.to_string(reason)
28 |       true -> reason
29 |     end
30 | 
31 |     %{ "failure reason" => text }
32 |   end
33 | end
34 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/udp/router.ex:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.UDP.Router do
  2 |   require Logger
  3 |   use GenServer
  4 |   import Bitwise
  5 |   alias ExTracker.Utils
  6 | 
  7 |   @protocol_magic   0x41727101980
  8 |   @action_connect   0
  9 |   @action_announce  1
 10 |   @action_scrape    2
 11 |   @action_error     3
 12 | 
 13 |   @doc """
 14 |   Starts a UDP Router on the given port
 15 |   """
 16 |   def start_link(args) do
 17 |     name = Keyword.get(args, :name, __MODULE__)
 18 |     GenServer.start_link(__MODULE__, args, name: name)
 19 |   end
 20 | 
 21 |   @impl true
 22 |   def init(args) do
 23 |     index = Keyword.get(args, :index, 0)
 24 |     name = Keyword.get(args, :name, __MODULE__)
 25 |     port = Keyword.get(args, :port, -1)
 26 |     family = Keyword.get(args, :family, :inet)
 27 | 
 28 |     Process.put(:index, index)
 29 |     Process.put(:name, name)
 30 |     Process.put(:family, family)
 31 | 
 32 |     # open the UDP socket in binary mode, active, and allow address (and if needed, port) reuse
 33 |     case :gen_udp.open(port,
 34 |     [
 35 |       family,
 36 |       :binary,
 37 |       active: :once,
 38 |       reuseaddr: true
 39 |     ]
 40 |     ++ set_binding_address()
 41 |     ++ set_socket_buffer()
 42 |     ++ set_reuseport()
 43 |     ) do
 44 |       {:ok, socket} ->
 45 |         Logger.info("#{Process.get(:name)} started in mode #{to_string(family)}, on port #{port}")
 46 | 
 47 |         set_receive_buffer(socket)
 48 |         set_send_buffer(socket)
 49 | 
 50 |         {:ok, %{socket: socket, port: port}}
 51 | 
 52 |       {:error, reason} ->
 53 |         Logger.error("#{Process.get(:name)} startup error: #{inspect(reason)}")
 54 |         {:stop, reason}
 55 |     end
 56 |   end
 57 | 
 58 |   defp set_binding_address() do
 59 |     case Process.get(:family) do
 60 |       :inet -> [ip: Utils.get_configured_ipv4()]
 61 |       :inet6 -> [ip: Utils.get_configured_ipv6(), ipv6_v6only: true]
 62 |       other ->
 63 |         Logger.error("unknown internet family: #{inspect(other)}")
 64 |         exit(:unknown_family)
 65 |     end
 66 |   end
 67 | 
 68 |   defp set_reuseport() do
 69 |     case Application.get_env(:extracker, :udp_routers, -1) do
 70 |       0 -> []
 71 |       1 -> []
 72 |       _ -> [reuseport: true]
 73 |     end
 74 |   end
 75 | 
 76 |   defp set_socket_buffer() do
 77 |     case Application.get_env(:extracker, :udp_buffer_size, -1) do
 78 |       -1 -> []
 79 |       value -> [buffer: value]
 80 |     end
 81 |   end
 82 | 
 83 |   defp set_receive_buffer(socket) do
 84 |     case Application.get_env(:extracker, :udp_recbuf_size, -1) do
 85 |       -1 -> :ok
 86 |       value ->
 87 |         case :inet.setopts(socket, [{:recbuf, value}]) do
 88 |           :ok ->
 89 |             Logger.debug("#{Process.get(:name)} set receive buffer size to #{value}")
 90 |           {:error, _error} ->
 91 |             Logger.error("#{Process.get(:name)} failed to change receive buffer size ")
 92 |         end
 93 |     end
 94 |   end
 95 | 
 96 |   defp set_send_buffer(socket) do
 97 |     case Application.get_env(:extracker, :udp_sndbuf_size, -1) do
 98 |       -1 -> :ok
 99 |       value ->
100 |         case :inet.setopts(socket, [{:sndbuf, value}]) do
101 |           :ok ->
102 |             Logger.debug("#{Process.get(:name)} set send buffer size to #{value}")
103 |           {:error, _error} ->
104 |             Logger.error("#{Process.get(:name)} failed to change send buffer size ")
105 |         end
106 |     end
107 |   end
108 | 
109 |   @impl true
110 |   def handle_info({:udp, socket, ip, port, data}, state) do
111 |     # delegate message handling to a Task under the associated supervisor
112 |     supervisor = ExTracker.UDP.Supervisor.get_task_supervisor_name(Process.get(:index), Process.get(:family))
113 |     Task.Supervisor.start_child(supervisor, fn ->
114 |       :telemetry.execute([:extracker, :bandwidth, :in], %{value: byte_size(data)})
115 |       process_packet(Process.get(:name), socket, ip, port, data)
116 |     end)
117 | 
118 |     :inet.setopts(socket, active: :once)
119 |     {:noreply, state}
120 |   end
121 | 
122 |   @impl true
123 |   def handle_info(_msg, state) do
124 |     {:noreply, state}
125 |   end
126 | 
127 |   defp match_connection_id(connection_id, ip, port) do
128 |     <<t::integer-unsigned-8, _s::integer-unsigned-56>> = Utils.pad_to_8_bytes(:binary.encode_unsigned(connection_id))
129 |     case generate_connection_id(t, ip, port) do
130 |       ^connection_id ->
131 |         case expired_connection_id(t) do
132 |           true -> {:error, "connection id expired"}
133 |           false -> :ok
134 |         end
135 |       _ -> {:error, "connection id mismatch"}
136 |     end
137 |   end
138 | 
139 |   defp expired_connection_id(t) do
140 |     current_t = (System.monotonic_time(:second) >>> 6) &&& 0xFF
141 |     cond do
142 |       current_t < t -> true  # t overflowed already (edge case here)
143 |       current_t > (t + 1) -> true # t increased at least two times
144 |       true -> false
145 |     end
146 |   end
147 | 
148 |   # connection id is derived from known data so we dont need to store it on the tracker side
149 |   defp generate_connection_id(t, ip, port) do
150 |     secret = Application.get_env(:extracker, :connection_id_secret)
151 |     # generate s from the time, ip and port of the client and a runtime secret
152 |     data = :erlang.term_to_binary({t, ip, port, secret})
153 |     # compute the SHA-256 hash of the input data and retrieve the first 56 bits
154 |     hash = :crypto.hash(:sha256, data)
155 |     <<s::integer-unsigned-56, _rest::binary>> = hash
156 | 
157 |     # make a 64bit integer out of both
158 |     :binary.decode_unsigned(<<t::integer-unsigned-8, s::integer-unsigned-56>>)
159 |   end
160 | 
161 |   defp generate_connection_id(ip, port) do
162 |     # get the current monotonic time, reduce its resolution to 64 seconds and fit it in 8 bits
163 |     t = (System.monotonic_time(:second) >>> 6) &&& 0xFF
164 |     t = :binary.decode_unsigned(<<t::integer-unsigned-8>>)
165 |     generate_connection_id(t, ip, port)
166 |   end
167 | 
168 |   defp process_packet(name, _socket, ip, 0, _packet) do
169 |     family = case tuple_size(ip) do
170 |       4 -> "inet"
171 |       8 -> "inet6"
172 |     end
173 | 
174 |     :telemetry.execute([:extracker, :request], %{processing_time: 0}, %{endpoint: "udp", action: "connect", family: family})
175 |     :telemetry.execute([:extracker, :request, :error], %{}, %{endpoint: "udp", action: "connect", family: family})
176 |     Logger.debug("#{name}: message from #{inspect(ip)} ignored because source port is zero")
177 |     :ok
178 |   end
179 | 
180 |   defp process_packet(name, socket, ip, port, packet) do
181 |     start = System.monotonic_time(:microsecond)
182 |     {result, action} = process_message(socket, ip, port, packet)
183 |     finish = System.monotonic_time(:microsecond)
184 | 
185 |     elapsed = finish - start
186 | 
187 |     # send telemetry about this request
188 |     endpoint = "udp"
189 |     action_str = Atom.to_string(action)
190 |     family = case tuple_size(ip) do
191 |       4 -> "inet"
192 |       8 -> "inet6"
193 |     end
194 | 
195 |     :telemetry.execute([:extracker, :request], %{processing_time: elapsed}, %{endpoint: endpoint, action: action_str, family: family})
196 |     :telemetry.execute([:extracker, :request, result], %{}, %{endpoint: endpoint, action: action_str, family: family})
197 | 
198 |     if elapsed < 1_000 do
199 |       Logger.debug("#{name}: message processed in #{elapsed}µs")
200 |     else
201 |       ms = System.convert_time_unit(elapsed, :microsecond, :millisecond)
202 |       Logger.debug("#{name}: message processed in #{ms}ms")
203 |     end
204 |     :ok
205 |   end
206 | 
207 |   # connect request
208 |   defp process_message(socket, ip, port, <<@protocol_magic::integer-unsigned-64, @action_connect::integer-unsigned-32, transaction_id::integer-unsigned-32>>) do
209 |     # connect response
210 |     response = <<
211 |       @action_connect::integer-unsigned-32,
212 |       transaction_id::integer-unsigned-32,
213 |       generate_connection_id(ip, port)::integer-unsigned-64
214 |     >>
215 | 
216 |     response = IO.iodata_to_binary(response)
217 |     case :gen_udp.send(socket, ip, port, response) do
218 |       :ok ->
219 |         :telemetry.execute([:extracker, :bandwidth, :out], %{value: byte_size(response)})
220 |         {:success, :connect}
221 |       {:error, reason} ->
222 |         Logger.error("[connect] udp send failed. reason: #{inspect(reason)} ip: #{inspect(ip)} port: #{inspect(port)} response: #{inspect(response)}")
223 |         {:error, :connect}
224 |     end
225 |   end
226 | 
227 |   # announce request
228 |   defp process_message(socket, ip, port, <<connection_id::integer-unsigned-64, @action_announce::integer-unsigned-32, transaction_id::integer-unsigned-32, data::binary>>) do
229 |     {ret, response} = with :ok <- match_connection_id(connection_id, ip, port), # check connection id first
230 |     params <- read_announce(data), # convert the binary fields to a map for the processor to understand
231 |     params <- handle_zero_port(params, port), # if port is zero, use the socket port
232 |     {:ok, result} <- ExTracker.Processors.Announce.process(ip, params),
233 |     {:ok, interval} <- Map.fetch(result, "interval"),
234 |     {:ok, leechers} <- Map.fetch(result, "incomplete"),
235 |     {:ok, seeders} <- Map.fetch(result, "complete"),
236 |     {:ok, peers} <- retrieve_announce_peers(result)
237 |     do
238 |       data = <<
239 |         # 32-bit integer  transaction_id
240 |         @action_announce::integer-unsigned-32,
241 |         # 32-bit integer  transaction_id
242 |         transaction_id::integer-unsigned-32,
243 |         # 32-bit integer  interval
244 |         interval::integer-unsigned-32,
245 |         # 32-bit integer  leechers
246 |         leechers::integer-unsigned-32,
247 |         # 32-bit integer  seeders
248 |         seeders::integer-unsigned-32,
249 |         # 32-bit or 128-bit integer  IP address
250 |         # 16-bit integer  TCP port
251 |         # 6 * N
252 |         peers::binary
253 |       >>
254 |       {:success, data}
255 |     else
256 |       # processor failure
257 |       {:error, %{"failure reason" => reason}} ->
258 |         {:failure, [<<@action_error::integer-unsigned-32, transaction_id::integer-unsigned-32>>,  reason]}
259 |       # general error
260 |       {:error, reason} ->
261 |         {:failure, [<<@action_error::integer-unsigned-32, transaction_id::integer-unsigned-32>>,  reason]}
262 |       # some response key is missing (shouldn't happen)
263 |       :error ->
264 |         {:failure, [<<@action_error::integer-unsigned-32, transaction_id::integer-unsigned-32>>, "internal error"]}
265 |     end
266 | 
267 |     # send a response in all (expected) cases
268 |     response = IO.iodata_to_binary(response)
269 |     case :gen_udp.send(socket, ip, port, response) do
270 |       :ok ->
271 |         :telemetry.execute([:extracker, :bandwidth, :out], %{value: byte_size(response)})
272 |         {ret, :announce}
273 |       {:error, reason} ->
274 |         Logger.error("[announce] udp send failed. reason: #{inspect(reason)} ip: #{inspect(ip)} port: #{inspect(port)} response: #{inspect(response)}")
275 |         {:error, :announce}
276 |     end
277 |   end
278 | 
279 |   # scrape request
280 |   defp process_message(socket, ip, port, <<connection_id::integer-unsigned-64, @action_scrape::integer-unsigned-32, transaction_id::integer-unsigned-32, data::binary>>) do
281 |     {ret, response} =
282 |       with :ok <- check_scrape_enabled(),
283 |       :ok <- match_connection_id(connection_id, ip, port), # check connection id first
284 |       hashes when hashes != 0 <- read_info_hashes(data) # then extract the hashes and make sure theres at least one
285 |     do
286 |       # TODO using recursion i can probably return early if any of them fail for whatever reason
287 |       # instead of traversing the list twice
288 | 
289 |       # process each info_hash on its own
290 |       results = Enum.map(hashes, fn hash ->
291 |         ExTracker.Processors.Scrape.process(ip, %{info_hash: hash})
292 |       end)
293 | 
294 |       # check if any of them failed and return the first error message
295 |       # craft a response based on the requests result
296 |       case Enum.find(results, fn
297 |         {:error, _reason} -> true
298 |         _ -> false
299 |       end) do
300 |         {:error, %{"failure reason" => reason}} ->
301 |           {:failure, [<<@action_error::integer-unsigned-32, transaction_id::integer-unsigned-32>>, reason]}
302 |         {:error, failure} ->
303 |           {:failure, [<<@action_error::integer-unsigned-32, transaction_id::integer-unsigned-32>>, failure]}
304 |         nil ->
305 |           # convert the results to binaries
306 |           binaries = Enum.reduce(results, [], fn result, acc ->
307 |             binary = <<
308 |               Map.fetch!(result, "seeders")::integer-unsigned-32,
309 |               Map.fetch!(result, "completed")::integer-unsigned-32,
310 |               Map.fetch!(result, "leechers")::integer-unsigned-32
311 |             >>
312 |             [binary | acc]
313 |           end)
314 | 
315 |           # concatenate the header and all the resulting binaries as response
316 |           header = <<
317 |             @action_scrape::integer-unsigned-32,
318 |             transaction_id::integer-unsigned-32
319 |           >>
320 |           {:success, IO.iodata_to_binary([header | binaries])}
321 |       end
322 |     else
323 |       # general error
324 |       {:error, %{"failure reason" => reason}} ->
325 |         {:failure, [<<@action_error::integer-unsigned-32, transaction_id::integer-unsigned-32>>, reason]}
326 |       {:error, reason} ->
327 |         {:failure, [<<@action_error::integer-unsigned-32, transaction_id::integer-unsigned-32>>, reason]}
328 |       # hashes list is empty
329 |       [] ->
330 |         {:failure, [<<@action_error::integer-unsigned-32, transaction_id::integer-unsigned-32>>, "no info_hash provided"]}
331 |     end
332 | 
333 |     # send a response in all (expected) cases
334 |     response = IO.iodata_to_binary(response)
335 |     case :gen_udp.send(socket, ip, port, response) do
336 |       :ok ->
337 |         :telemetry.execute([:extracker, :bandwidth, :out], %{value: byte_size(response)})
338 |         {ret, :scrape}
339 |       {:error, reason} ->
340 |         Logger.error("[scrape] udp send failed. reason: #{inspect(reason)} ip: #{inspect(ip)} port: #{inspect(port)} response: #{inspect(response)}")
341 |         {:error, :scrape}
342 |     end
343 |   end
344 | 
345 |   # unexpected request
346 |   defp process_message(_socket, _ip, _port, _data) do
347 |     {:error, :unknown}
348 |   end
349 | 
350 |   defp check_scrape_enabled() do
351 |     case Application.get_env(:extracker, :scrape_enabled) do
352 |       true -> :ok
353 |       _ -> {:error, "scraping is disabled"}
354 |     end
355 |   end
356 | 
357 |   defp retrieve_announce_peers(result) do
358 |     case Map.fetch(result, "peers") do
359 |       {:ok, peers} -> {:ok, peers}
360 |       :error -> Map.fetch(result, "peers6")
361 |     end
362 |   end
363 | 
364 |   # scrape requests can hold up to 72 info hashes
365 |   defp read_info_hashes(data) when is_binary(data), do: read_info_hash(data, [])
366 |   defp read_info_hash(<<>>, acc), do: acc
367 |   defp read_info_hash(data, acc) when is_binary(data) and byte_size(data) < 20, do: acc # ignore incomplete hashes
368 |   defp read_info_hash(<<hash::binary-size(20), rest::binary>>, acc), do: read_info_hash(rest, [hash | acc])
369 | 
370 |   # read and convert announce message to a map
371 |   defp read_announce(data) when is_binary(data) do
372 |     <<
373 |       # 20-byte string  info_hash
374 |       info_hash::binary-size(20),
375 |       # 20-byte string  peer_id
376 |       peer_id::binary-size(20),
377 |       # 64-bit integer  downloaded
378 |       downloaded::integer-unsigned-64,
379 |       # 64-bit integer  left
380 |       left::integer-unsigned-64,
381 |       # 64-bit integer  uploaded
382 |       uploaded::integer-unsigned-64,
383 |       # 32-bit integer  event           0 // 0: none; 1: completed; 2: started; 3: stopped
384 |       event::integer-unsigned-32,
385 |       # 32-bit integer  IP address      0 // default
386 |       ip::integer-unsigned-32,
387 |       # 32-bit integer  key
388 |       key::integer-unsigned-32,
389 |       # 32-bit integer  num_want        -1 // default
390 |       num_want::integer-signed-32,
391 |       # 16-bit integer  port
392 |       port::integer-unsigned-16,
393 |       # remaining may be empty or BEP41 options
394 |       remaining::binary
395 |     >> = data
396 | 
397 |     # read options if any after the standard announce data
398 |     options = read_options(remaining)
399 | 
400 |     # TODO should be able to use atoms directly
401 |     event_str = case event do
402 |       0 -> ""
403 |       1 -> "completed"
404 |       2 -> "started"
405 |       3 -> "stopped"
406 |       _ -> "unknown"
407 |     end
408 | 
409 |     %{
410 |       "info_hash" => info_hash,
411 |       "peer_id" => peer_id,
412 |       "downloaded" => downloaded,
413 |       "left" => left,
414 |       "uploaded" => uploaded,
415 |       "event" => event_str,
416 |       "ip" => ip,
417 |       "key" => key,
418 |       "numwant" => num_want,
419 |       "port" => port,
420 |       "compact" => 1, # udp is always compact
421 |       "options" => options
422 |     }
423 |   end
424 | 
425 |   # https://stackoverflow.com/questions/32075418/bittorrent-peers-with-zero-ports
426 |   def handle_zero_port(params, port) do
427 |     case Map.get(params, "port", port) do
428 |       0 -> Map.put(params, "port", port)
429 |       _other -> params
430 |     end
431 |   end
432 | 
433 |   # read BEP41-defined variable-length options and return a map with them
434 |   defp read_options(data) when is_binary(data), do: read_option(data, %{})
435 | 
436 |   defp read_option(<<>>, options), do: options
437 |   # EndOfOptions: <Option-Type 0x0>
438 |   # A special case option that has a fixed-length of one byte. It is not followed by a length field, or associated data.
439 |   # Option parsing continues until either the end of the packet is reached, or an EndOfOptions option is encountered.
440 |   defp read_option(<<0x00::integer-unsigned-8, _rest::binary>>, options), do: options
441 | 
442 |   # NOP: <Option-Type 0x1>
443 |   # A special case option that has a fixed-length of one byte. It is not followed by a length field, or associated data.
444 |   # A NOP has no affect on option parsing. It is used only if optional padding is necessary in the future.
445 |   defp read_option(<<0x01::integer-unsigned-8, rest::binary>>, options), do: read_option(rest, options)
446 | 
447 |   # URLData: <Option-Type 0x2>, <Length Byte>, <Variable-Length URL Data>
448 |   # A variable-length option, followed by a length byte and variable-length data.
449 |   # The data field contains the concatenated PATH and QUERY portion of the UDP tracker URL. If this option appears more than once, the data fields are concatenated.
450 |   # This allows clients to send PATH and QUERY strings that are longer than 255 bytes, chunked into blocks of no larger than 255 bytes.
451 |   defp read_option(<<0x02::integer-unsigned-8, length::integer-unsigned-8, rest::binary>>, options) when byte_size(rest) >= length do
452 |     <<url_data::binary-size(length), remaining::binary>> = rest
453 |     options = case Map.fetch(options, :urldata) do
454 |       :error -> Map.put(options, :urldata, url_data)
455 |       current -> Map.put(options, :urldata, current <> url_data)
456 |     end
457 |     read_option(remaining, options)
458 |   end
459 | 
460 |   # unkonwn Option-Type with length, just skip it
461 |   defp read_option(<<unknown::integer-unsigned-8, length::integer-unsigned-8, rest::binary>>, options) when byte_size(rest) >= length do
462 |     <<data::binary-size(length), remaining::binary>> = rest
463 |     Logger.debug("udp announce unknown option: #{inspect(unknown)}. data: #{inspect(data)}")
464 |     read_option(remaining, options)
465 |   end
466 | 
467 |   defp read_option(<<malformed::binary>>, options) do
468 |     Logger.debug("udp announce malformed option: #{inspect(malformed)}. options: #{inspect(options)}")
469 |     options
470 |   end
471 | end
472 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/udp/supervisor.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.UDP.Supervisor do
 2 |   use Supervisor
 3 | 
 4 |   @doc """
 5 |   starts the UDP Supervisor that integrates the UDP Router and the Supervisor for its tasks
 6 |   """
 7 |   def start_link(args \\ []) do
 8 |     index = Keyword.get(args, :index, 0)
 9 |     family = Keyword.get(args, :family)
10 |     Supervisor.start_link(__MODULE__, args, name: :"udp_supervisor_#{family}_#{index}")
11 |   end
12 | 
13 |   @impl true
14 |   def init(args) do
15 |     index = Keyword.get(args, :index, 0)
16 |     port = Keyword.get(args, :port)
17 |     family = Keyword.get(args, :family)
18 | 
19 |     children = [
20 |       # Task Supervisor for concurrently processing incoming UDP messages
21 |       {Task.Supervisor, name: get_task_supervisor_name(index, family)},
22 |       # the UDP Router that listens for UDP messages
23 |       {ExTracker.UDP.Router, index: index, family: family, port: port, name: get_router_name(index, family)}
24 |     ]
25 | 
26 |     # if the router fails the tasks wont be able to use the socket so restart them all
27 |     Supervisor.init(children, strategy: :one_for_all)
28 |   end
29 | 
30 |   def get_router_name(index, family) do
31 |     :"udp_router_#{family}_#{index}"
32 |   end
33 | 
34 |   def get_task_supervisor_name(index, family) do
35 |     :"udp_task_supervisor_#{family}_#{index}"
36 |   end
37 | end
38 | 


--------------------------------------------------------------------------------
/lib/ex_tracker/utils.ex:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.Utils do
 2 | 
 3 |   def pad_to_8_bytes(bin) when byte_size(bin) < 8 do
 4 |     padding = :binary.copy(<<0>>, 8 - byte_size(bin))
 5 |     padding <> bin
 6 |   end
 7 |   def pad_to_8_bytes(bin), do: bin
 8 | 
 9 |   def hash_to_string(hash) do
10 |     String.downcase(Base.encode16(hash))
11 |   end
12 | 
13 |   def format_bits_as_string(bits) when is_integer(bits) or is_float(bits) do
14 |     cond do
15 |       bits < 1_000 -> "#{(bits)} bits"
16 |       bits < 1_000_000 -> "#{(bits / 1_000)} Kilobits"
17 |       true -> "#{(bits / 1_000_000)} Megabits"
18 |     end
19 |   end
20 | 
21 |   def ip_to_bytes(ip) when is_tuple(ip) and tuple_size(ip) == 4 do
22 |     ip |> Tuple.to_list() |> :binary.list_to_bin()
23 |   end
24 | 
25 |   def ip_to_bytes(ip) when is_tuple(ip) and tuple_size(ip) == 8 do
26 |     ip |> Tuple.to_list() |> Enum.map(fn num -> <<num::16>> end) |> IO.iodata_to_binary()
27 |   end
28 | 
29 |   def ipv4_to_bytes(ip) do
30 |     ip |> String.split(".") |> Enum.map(&String.to_integer/1) |> :binary.list_to_bin()
31 |   end
32 | 
33 |   def port_to_bytes(port) do
34 |     <<port::16>>
35 |   end
36 | 
37 |   def get_configured_ipv4() do
38 |     {:ok, address} =
39 |       Application.get_env(:extracker, :ipv4_bind_address)
40 |       |> to_charlist()
41 |       |> :inet.parse_ipv4_address()
42 |       address
43 |   end
44 | 
45 |   def get_configured_ipv6() do
46 |     {:ok, address} =
47 |       Application.get_env(:extracker, :ipv6_bind_address)
48 |       |> to_charlist()
49 |       |> :inet.parse_ipv6_address()
50 |       address
51 |   end
52 | 
53 |   # v1 hex-string hash (40 bytes, SHA-1)
54 |   def validate_hash(hash) when is_binary(hash) and byte_size(hash) == 40 do
55 |     with true <- String.valid?(hash, :fast_ascii),
56 |     {:ok, decoded} <- Base.decode16(String.upcase(hash))
57 |     do
58 |       {:ok, decoded}
59 |     else
60 |       _ -> {:error, "invalid hex-string hash"}
61 |     end
62 |   end
63 | 
64 |   # v1 base32 hash (32 bytes)
65 |   def validate_hash(hash) when is_binary(hash) and byte_size(hash) == 32 do
66 |     case Base.decode32(hash, case: :upper) do
67 |       {:ok, decoded} -> {:ok, decoded}
68 |       :error -> {:error, "invalid base32 hash"}
69 |     end
70 |   end
71 | 
72 |   # v1 binary hash (20 bytes, SHA-1) or v2 truncated binary hash (32 bytes, SHA-256)
73 |   def validate_hash(hash) when is_binary(hash) and byte_size(hash) == 20, do: {:ok, hash}
74 |   def validate_hash(hash) when is_list(hash), do: hash |> :erlang.list_to_binary() |> validate_hash()
75 |   def validate_hash(_hash), do: {:error, "invalid hash"}
76 | end
77 | 


--------------------------------------------------------------------------------
/mix.exs:
--------------------------------------------------------------------------------
 1 | defmodule ExTracker.MixProject do
 2 |   use Mix.Project
 3 | 
 4 |   def version() do
 5 |     "0.7.0"
 6 |   end
 7 | 
 8 |   def project do
 9 |     [
10 |       app: :extracker,
11 |       version: version(),
12 |       elixir: "~> 1.15",
13 |       start_permanent: Mix.env() == :prod,
14 |       deps: deps(),
15 |       releases: [
16 |         extracker: [
17 |           include_executables_for: [:unix],
18 |           version: {:from_app, :extracker}
19 |         ],
20 |         extrackerw: [
21 |           include_executables_for: [:windows],
22 |           version: {:from_app, :extracker}
23 |         ]
24 |       ]
25 |     ]
26 |   end
27 | 
28 |   # Run "mix help compile.app" to learn about applications.
29 |   def application do
30 |     [
31 |       extra_applications: [:logger],
32 |       mod: {ExTracker.Application, []}
33 |     ]
34 |   end
35 | 
36 |   # Run "mix help deps" to learn about dependencies.
37 |   defp deps do
38 |     [
39 |       {:plug_cowboy, "~> 2.6"},
40 |       {:bento, "~> 1.0"},
41 |       {:locus, "~> 2.3"},
42 |       {:telemetry, "~> 1.3"},
43 |       {:telemetry_metrics, "~> 1.1"},
44 |       {:telemetry_poller, "~> 1.2"},
45 |       {:telemetry_metrics_prometheus_core, "~> 1.2"}
46 |     ]
47 |   end
48 | end
49 | 


--------------------------------------------------------------------------------
/mix.lock:
--------------------------------------------------------------------------------
 1 | %{
 2 |   "bento": {:hex, :bento, "1.0.0", "5097e6f02e4980b72d08bf0270026f6f5c9bf5d8ca606b43d41321b549d49de8", [:mix], [], "hexpm", "b921b335a555f7570adfac0cc41864c76377f6852617cf1f25fbb88578b993c8"},
 3 |   "cowboy": {:hex, :cowboy, "2.13.0", "09d770dd5f6a22cc60c071f432cd7cb87776164527f205c5a6b0f24ff6b38990", [:make, :rebar3], [{:cowlib, ">= 2.14.0 and < 3.0.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, ">= 1.8.0 and < 3.0.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "e724d3a70995025d654c1992c7b11dbfea95205c047d86ff9bf1cda92ddc5614"},
 4 |   "cowboy_telemetry": {:hex, :cowboy_telemetry, "0.4.0", "f239f68b588efa7707abce16a84d0d2acf3a0f50571f8bb7f56a15865aae820c", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7d98bac1ee4565d31b62d59f8823dfd8356a169e7fcbb83831b8a5397404c9de"},
 5 |   "cowlib": {:hex, :cowlib, "2.15.0", "3c97a318a933962d1c12b96ab7c1d728267d2c523c25a5b57b0f93392b6e9e25", [:make, :rebar3], [], "hexpm", "4f00c879a64b4fe7c8fcb42a4281925e9ffdb928820b03c3ad325a617e857532"},
 6 |   "locus": {:hex, :locus, "2.3.11", "ddfab230e3fb8b45f47416ed0fb8776c6d6d00f38687f6d37647ed7502c33d8e", [:rebar3], [{:tls_certificate_check, "~> 1.9", [hex: :tls_certificate_check, repo: "hexpm", optional: false]}], "hexpm", "ad855e9b998adc6ec5c57b9d0e5130b0e40a927be7b50d8e104df245c60ede1a"},
 7 |   "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"},
 8 |   "plug": {:hex, :plug, "1.17.0", "a0832e7af4ae0f4819e0c08dd2e7482364937aea6a8a997a679f2cbb7e026b2e", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "f6692046652a69a00a5a21d0b7e11fcf401064839d59d6b8787f23af55b1e6bc"},
 9 |   "plug_cowboy": {:hex, :plug_cowboy, "2.7.3", "1304d36752e8bdde213cea59ef424ca932910a91a07ef9f3874be709c4ddb94b", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "77c95524b2aa5364b247fa17089029e73b951ebc1adeef429361eab0bb55819d"},
10 |   "plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"},
11 |   "ranch": {:hex, :ranch, "2.2.0", "25528f82bc8d7c6152c57666ca99ec716510fe0925cb188172f41ce93117b1b0", [:make, :rebar3], [], "hexpm", "fa0b99a1780c80218a4197a59ea8d3bdae32fbff7e88527d7d8a4787eff4f8e7"},
12 |   "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"},
13 |   "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"},
14 |   "telemetry_metrics": {:hex, :telemetry_metrics, "1.1.0", "5bd5f3b5637e0abea0426b947e3ce5dd304f8b3bc6617039e2b5a008adc02f8f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7b79e8ddfde70adb6db8a6623d1778ec66401f366e9a8f5dd0955c56bc8ce67"},
15 |   "telemetry_metrics_prometheus_core": {:hex, :telemetry_metrics_prometheus_core, "1.2.1", "c9755987d7b959b557084e6990990cb96a50d6482c683fb9622a63837f3cd3d8", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "5e2c599da4983c4f88a33e9571f1458bf98b0cf6ba930f1dc3a6e8cf45d5afb6"},
16 |   "telemetry_poller": {:hex, :telemetry_poller, "1.2.0", "ba82e333215aed9dd2096f93bd1d13ae89d249f82760fcada0850ba33bac154b", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7216e21a6c326eb9aa44328028c34e9fd348fb53667ca837be59d0aa2a0156e8"},
17 |   "tls_certificate_check": {:hex, :tls_certificate_check, "1.27.0", "2c1c7fc922a329b9eb45ddf39113c998bbdeb28a534219cd884431e2aee1811e", [:rebar3], [{:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "51a5ad3dbd72d4694848965f3b5076e8b55d70eb8d5057fcddd536029ab8a23c"},
18 | }
19 | 


--------------------------------------------------------------------------------
/rel/env.bat.eex:
--------------------------------------------------------------------------------
1 | @echo off
2 | rem Set the release to load code on demand (interactive) instead of preloading (embedded).
3 | rem set RELEASE_MODE=interactive
4 | 
5 | rem Set the release to work across nodes.
6 | rem RELEASE_DISTRIBUTION must be sname (local), name (distributed) or none.
7 | rem set RELEASE_DISTRIBUTION=name
8 | rem set RELEASE_NODE=<%= @release.name %>
9 | 


--------------------------------------------------------------------------------
/rel/env.sh.eex:
--------------------------------------------------------------------------------
 1 | #!/bin/sh
 2 | 
 3 | # # Sets and enables heart (recommended only in daemon mode)
 4 | # case $RELEASE_COMMAND in
 5 | #   daemon*)
 6 | #     HEART_COMMAND="$RELEASE_ROOT/bin/$RELEASE_NAME $RELEASE_COMMAND"
 7 | #     export HEART_COMMAND
 8 | #     export ELIXIR_ERL_OPTIONS="-heart"
 9 | #     ;;
10 | #   *)
11 | #     ;;
12 | # esac
13 | 
14 | # # Set the release to load code on demand (interactive) instead of preloading (embedded).
15 | # export RELEASE_MODE=interactive
16 | 
17 | # # Set the release to work across nodes.
18 | # # RELEASE_DISTRIBUTION must be "sname" (local), "name" (distributed) or "none".
19 | # export RELEASE_DISTRIBUTION=name
20 | # export RELEASE_NODE=<%= @release.name %>
21 | 


--------------------------------------------------------------------------------
/rel/remote.vm.args.eex:
--------------------------------------------------------------------------------
 1 | ## Customize flags given to the VM: https://www.erlang.org/doc/man/erl.html
 2 | ## -mode/-name/-sname/-setcookie are configured via env vars, do not set them here
 3 | 
 4 | ## Increase number of concurrent ports/sockets
 5 | ##+Q 65536
 6 | 
 7 | ## Tweak GC to run more often
 8 | ##-env ERL_FULLSWEEP_AFTER 10
 9 | 
10 | ## Enable deployment without epmd
11 | ## (requires changing both vm.args and remote.vm.args)
12 | ##-start_epmd false -erl_epmd_port 6789 -dist_listen false
13 | 


--------------------------------------------------------------------------------
/rel/vm.args.eex:
--------------------------------------------------------------------------------
 1 | ## Customize flags given to the VM: https://www.erlang.org/doc/man/erl.html
 2 | ## -mode/-name/-sname/-setcookie are configured via env vars, do not set them here
 3 | 
 4 | ## Increase number of concurrent ports/sockets
 5 | ##+Q 65536
 6 | 
 7 | ## Tweak GC to run more often
 8 | ##-env ERL_FULLSWEEP_AFTER 10
 9 | 
10 | ## Enable deployment without epmd
11 | ## (requires changing both vm.args and remote.vm.args)
12 | ##-start_epmd false -erl_epmd_port 6789
13 | 
14 | # use address order best fit strategy for the ETS allocator to reduce fragmentation and memory usage
15 | +MEas aobf
16 | # enable the use of single-mapped RWX memory for JIT code to avoid issues with VMs (aka Docker)
17 | +JMsingle true
18 | 


--------------------------------------------------------------------------------
/test/ex_tracker_test.exs:
--------------------------------------------------------------------------------
1 | defmodule ExTrackerTest do
2 |   use ExUnit.Case
3 |   doctest ExTracker
4 | 
5 |   test "greets the world" do
6 |     assert ExTracker.hello() == :world
7 |   end
8 | end
9 | 


--------------------------------------------------------------------------------
/test/test_helper.exs:
--------------------------------------------------------------------------------
1 | ExUnit.start()
2 | 


--------------------------------------------------------------------------------
/test/utils_test.exs:
--------------------------------------------------------------------------------
  1 | defmodule ExTracker.UtilsTest do
  2 |   use ExUnit.Case, async: true
  3 | 
  4 |   alias ExTracker.Utils
  5 | 
  6 |   describe "pad_to_8_bytes/1" do
  7 |     test "add padding when the binary size is less than 8 bytes" do
  8 |       input = "cat"  # 3 bytes
  9 |       result = Utils.pad_to_8_bytes(input)
 10 |       assert byte_size(result) == 8
 11 |       assert result == <<0, 0, 0, 0, 0>> <> input
 12 |     end
 13 | 
 14 |     test "leave the binary as is if the size is 8 bytes" do
 15 |       input = "12345678" # 8 bytes
 16 |       result = Utils.pad_to_8_bytes(input)
 17 |       assert result == input
 18 |     end
 19 | 
 20 |     test "leave the binary as is if the size is bigger than 8 bytes" do
 21 |       input = "123456789"  # 9 bytes
 22 |       result = Utils.pad_to_8_bytes(input)
 23 |       assert result == input
 24 |     end
 25 |   end
 26 | 
 27 |   describe "hash_to_string/1" do
 28 |     test "convert a binary hash into a downcase hexadecimal string" do
 29 |       hash = <<0, 15, 255>>
 30 |       expected = String.downcase(Base.encode16(hash))
 31 |       result = Utils.hash_to_string(hash)
 32 |       assert result == expected
 33 |     end
 34 |   end
 35 | 
 36 |   describe "ip_to_bytes/1" do
 37 |     test "convert an IPv4 tuple into a binary" do
 38 |       ip = {127, 0, 0, 1}
 39 |       result = Utils.ip_to_bytes(ip)
 40 |       assert result == <<127, 0, 0, 1>>
 41 |     end
 42 | 
 43 |     test "convert an IPv6 tuple into a binary" do
 44 |       ip = {9225, 35413, 38466, 7920, 14778, 38138, 22855, 51913}
 45 |       result = Utils.ip_to_bytes(ip)
 46 |       assert result == <<36, 9, 138, 85, 150, 66, 30, 240, 57, 186, 148, 250, 89, 71, 202, 201>>
 47 |     end
 48 |   end
 49 | 
 50 |   describe "ipv4_to_bytes/1" do
 51 |     test "convert an IPv4 string to a binary" do
 52 |       ip_str = "192.168.1.1"
 53 |       result = Utils.ipv4_to_bytes(ip_str)
 54 |       assert result == <<192, 168, 1, 1>>
 55 |     end
 56 |   end
 57 | 
 58 |   describe "port_to_bytes/1" do
 59 |     test "convert network port number into a big-endian 16bit binary" do
 60 |       port = 8080
 61 |       result = Utils.port_to_bytes(port)
 62 |       expected = <<port::16>>
 63 |       assert result == expected
 64 |     end
 65 |   end
 66 | 
 67 |   describe "get_configured_ipv4/0" do
 68 |     setup do
 69 |       Application.put_env(:extracker, :ipv4_bind_address, "127.0.0.1")
 70 |       :ok
 71 |     end
 72 | 
 73 |     test "return the ip defined in :ipv4_bind_address as a tuple" do
 74 |       result = Utils.get_configured_ipv4()
 75 |       assert result == {127, 0, 0, 1}
 76 |     end
 77 |   end
 78 | 
 79 |   describe "get_configured_ipv6/0" do
 80 |     setup do
 81 |       Application.put_env(:extracker, :ipv6_bind_address, "::1")
 82 |       :ok
 83 |     end
 84 | 
 85 |     test "return the ip defined in :ipv6_bind_address as a tuple" do
 86 |       result = Utils.get_configured_ipv6()
 87 |       assert result == {0, 0, 0, 0, 0, 0, 0, 1}
 88 |     end
 89 |   end
 90 | 
 91 |   describe "validate_hash/1" do
 92 |     test "validate a 40 byte hexadecimal string" do
 93 |       valid_binary = :crypto.strong_rand_bytes(20)
 94 |       valid_hex = Base.encode16(valid_binary) |> String.downcase()
 95 |       assert byte_size(valid_hex) == 40
 96 |       assert Utils.validate_hash(valid_hex) == {:ok, valid_binary}
 97 |     end
 98 | 
 99 |     test "return an error when the hash is not valid hexadecimal" do
100 |       invalid_hex = "zz" <> String.duplicate("0", 38)
101 |       assert Utils.validate_hash(invalid_hex) == {:error, "invalid hex-string hash"}
102 |     end
103 | 
104 |     test "validate a 32 byte base32 hash" do
105 |       valid_binary = :crypto.strong_rand_bytes(20)
106 |       valid_base32 = Base.encode32(valid_binary, case: :upper)
107 |       assert byte_size(valid_base32) == 32
108 |       assert Utils.validate_hash(valid_base32) == {:ok, valid_binary}
109 |     end
110 | 
111 |     test "return an error for invalid base32 hash" do
112 |       invalid_base32 = String.slice("INVALIDBASE32HASHVALUE12345678" <> "AAAAAA", 0, 32)
113 |       assert Utils.validate_hash(invalid_base32) == {:error, "invalid base32 hash"}
114 |     end
115 | 
116 |     test "validate a 20 byte binary hash" do
117 |       valid_binary = :crypto.strong_rand_bytes(20)
118 |       assert Utils.validate_hash(valid_binary) == {:ok, valid_binary}
119 |     end
120 | 
121 |     test "validate a hash as a 20 item list" do
122 |       valid_binary = :crypto.strong_rand_bytes(20)
123 |       valid_list = :erlang.binary_to_list(valid_binary)
124 |       assert Utils.validate_hash(valid_list) == {:ok, valid_binary}
125 |     end
126 | 
127 |     test "return an error if the hash size is invalid" do
128 |       assert Utils.validate_hash(:crypto.strong_rand_bytes(5)) == {:error, "invalid hash"}
129 |       assert Utils.validate_hash(:crypto.strong_rand_bytes(50)) == {:error, "invalid hash"}
130 |     end
131 |   end
132 | end
133 | 


--------------------------------------------------------------------------------