├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── Tonutils Storage.postman_collection.json
├── api
└── api.go
├── cli
└── main.go
├── config
├── config.go
└── fallback.go
├── db
├── entity.go
├── fs.go
├── fscache.go
├── fscache_test.go
├── remove-dir.go
└── storage.go
├── go.mod
├── go.sum
├── provider
└── provider.go
└── storage
├── client.go
├── conn.go
├── create.go
├── create_test.go
├── download.go
├── fetch.go
├── peer.go
├── server.go
├── storage.go
├── torrent.go
└── torrent_test.go
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .idea
3 | base
4 | build
5 | tonutils-storage-db*
6 | *.json
7 | payments-db
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: all build
2 |
3 | ver := $(shell git describe --tags --always --dirty)
4 |
5 | build:
6 | go build -ldflags "-w -s -X main.GitCommit=$(ver)" -o build/tonutils-storage cli/main.go
7 |
8 | all:
9 | GOOS=linux GOARCH=amd64 go build -ldflags "-w -s -X main.GitCommit=$(ver)" -o build/tonutils-storage-linux-amd64 cli/main.go
10 | GOOS=linux GOARCH=arm64 go build -ldflags "-w -s -X main.GitCommit=$(ver)" -o build/tonutils-storage-linux-arm64 cli/main.go
11 | GOOS=darwin GOARCH=arm64 go build -ldflags "-w -s -X main.GitCommit=$(ver)" -o build/tonutils-storage-mac-arm64 cli/main.go
12 | GOOS=darwin GOARCH=amd64 go build -ldflags "-w -s -X main.GitCommit=$(ver)" -o build/tonutils-storage-mac-amd64 cli/main.go
13 | GOOS=windows GOARCH=amd64 go build -ldflags "-w -s -X main.GitCommit=$(ver)" -o build/tonutils-storage-x64.exe cli/main.go
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Tonutils Storage
2 | [![Based on TON][ton-svg]][ton] [![Join our group][join-svg]][tg]
3 |
4 | Alternative TON Storage implementation based on [tonutils-go](https://github.com/xssnick/tonutils-go), with [HTTP API](#http-api).
5 |
6 | You could freely use this storage in any type of projects.
7 |
8 | If you want to support this project and help us to develop projects like this 100% free and open source, we would be grateful if you donate any amount of TON or Jettons to `EQBx6tZZWa2Tbv6BvgcvegoOQxkRrVaBVwBOoW85nbP37_Go` ☺️
9 |
10 |
11 |
12 | ## Quick start
13 |
14 | 1. Download precompiled version:
15 | * [Linux AMD64](https://github.com/xssnick/tonutils-storage/releases/latest/download/tonutils-storage-linux-amd64)
16 | * [Linux ARM64](https://github.com/xssnick/tonutils-storage/releases/latest/download/tonutils-storage-linux-arm64)
17 | * [Windows x64](https://github.com/xssnick/tonutils-storage/releases/latest/download/tonutils-storage-x64.exe)
18 | * [Mac Intel](https://github.com/xssnick/tonutils-storage/releases/latest/download/tonutils-storage-mac-amd64)
19 | * [Mac Apple Silicon](https://github.com/xssnick/tonutils-storage/releases/latest/download/tonutils-storage-mac-arm64)
20 | 2. Run
21 | `./tonutils-storage`
22 | 3. Try `download 85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f`
23 | 4. Use `list` command to check progress
24 |
25 | ## CLI
26 |
27 | At this moment 4 commands are available:
28 |
29 | * Create bag: `create [path] [description]`
30 | * Download bag: `download [bag_id]`
31 | * List bags: `list`
32 | * Display help: `help`
33 |
34 | At the first start you will see something like `Using port checker tonutils.com at 31.172.68.159`.
35 | Storage will try to resolve your external ip address. In case if it fails, to seed bags you will need to manually specify ip in config.json inside db folder .
36 |
37 | ### Minimum requirements
38 |
39 | * RAM: **512 MB**
40 | * CPU: **2 Cores**
41 | * Enough disk space to host your files
42 | * Internet connection
43 |
44 | ### HTTP API
45 |
46 | When running with flag `--api ip:port`, you could access storage using HTTP API and control it.
47 |
48 | If you want to enable HTTP Basic Auth you could use additional flags `--api-login [login] --api-password [password]`
49 |
50 | Example: `./tonutils-storage --api 127.0.0.1:8192 --api-login admin --api-password 123456`
51 |
52 | You could [download Postman collection](https://github.com/xssnick/tonutils-storage/blob/master/Tonutils%20Storage.postman_collection.json) or check examples below.
53 |
54 | #### POST /api/v1/add
55 |
56 | Download bag by id. If `download_all` is false and files are empty, only header will be downloaded.
57 |
58 | After adding, you could call `GET /api/v1/details?bag_id=[id]`, when header is available you will see the list of files. Call `add` again with required files ids.
59 |
60 | Request:
61 | ```json
62 | {
63 | "bag_id": "85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f",
64 | "path": "/root/downloads",
65 | "files": [0,1,2],
66 | "download_all": false
67 | }
68 | ```
69 | Response:
70 | ```json
71 | {
72 | "ok": true
73 | }
74 | ```
75 |
76 | #### GET /api/v1/list
77 |
78 | Response:
79 | ```json
80 | {
81 | "bags": [
82 | {
83 | "bag_id": "6d791040957b5efa0311ef14f4278d92143b4c8369ad55d969ae6c1a6840ade8",
84 | "description": "Some Stuff",
85 | "downloaded": 150126947,
86 | "size": 150126947,
87 | "peers": 0,
88 | "download_speed": 0,
89 | "upload_speed": 0,
90 | "files_count": 17,
91 | "dir_name": "torrent/",
92 | "completed": true,
93 | "header_loaded": true,
94 | "info_loaded": true,
95 | "active": true,
96 | "seeding": true
97 | },
98 | {
99 | "bag_id": "85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f",
100 | "description": "FunnyPack",
101 | "downloaded": 188249739,
102 | "size": 188249739,
103 | "peers": 0,
104 | "download_speed": 0,
105 | "upload_speed": 0,
106 | "files_count": 3,
107 | "dir_name": "video/",
108 | "completed": false,
109 | "header_loaded": true,
110 | "info_loaded": true,
111 | "active": false,
112 | "seeding": false
113 | }
114 | ]
115 | }
116 | ```
117 |
118 | * Size in bytes and speed in bytes per second
119 |
120 | #### GET /api/v1/details?bag_id=[id]
121 | Response:
122 | ```json
123 | {
124 | "bag_id": "85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f",
125 | "description": "FunnyPack",
126 | "downloaded": 130936,
127 | "size": 188249739,
128 | "download_speed": 0,
129 | "upload_speed": 0,
130 | "files_count": 3,
131 | "dir_name": "video/",
132 | "completed": false,
133 | "header_loaded": true,
134 | "info_loaded": true,
135 | "active": true,
136 | "seeding": true,
137 | "piece_size": 131072,
138 | "bag_size": 46749448209,
139 | "merkle_hash": "acaaf3306ce628b18c62bd074b263c2354b1fd156eab189d4398db02f40ed09c",
140 | "path": "/root/admin/downloads",
141 | "files": [
142 | {
143 | "index": 0,
144 | "name": "200px-Feels_good_man.jpg",
145 | "size": 13768
146 | },
147 | {
148 | "index": 1,
149 | "name": "kek/1.txt",
150 | "size": 22
151 | },
152 | {
153 | "index": 2,
154 | "name": "videoplayback.mp4",
155 | "size": 188235949
156 | }
157 | ],
158 | "peers": [
159 | {
160 | "addr": "31.172.68.159:17555",
161 | "id": "bec28d6ff140884d7304b2698630cf84b9b4d14f1c55b3b504205bebf1c37133",
162 | "upload_speed": 0,
163 | "download_speed": 0
164 | },
165 | {
166 | "addr": "185.18.52.220:17555",
167 | "id": "f546878e8e4bd3885cc623ab0440f05abb12138d4701cee998e4f073ec9ade7f",
168 | "upload_speed": 0,
169 | "download_speed": 0
170 | },
171 | {
172 | "addr": "185.195.69.60:13333",
173 | "id": "04e7276cc1d3d480c70c83b0fb66d88412e34a5734b15a412155b1e9b5ff17a7",
174 | "upload_speed": 0,
175 | "download_speed": 0
176 | }
177 | ]
178 | }
179 | ```
180 |
181 | #### POST /api/v1/create
182 | Request:
183 | ```json
184 | {
185 | "description": "Some Stuff",
186 | "path": "/Users/admin/some-dir"
187 | }
188 | ```
189 |
190 | Response:
191 | ```json
192 | {
193 | "bag_id": "6d791040957b5efa0311ef14f4278d92143b4c8369ad55d969ae6c1a6840ade8"
194 | }
195 | ```
196 |
197 | #### POST /api/v1/remove
198 | Request:
199 | ```json
200 | {
201 | "bag_id": "85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f",
202 | "with_files": false
203 | }
204 | ```
205 |
206 | Response:
207 | ```json
208 | {
209 | "ok": true
210 | }
211 | ```
212 |
213 | #### POST /api/v1/stop
214 | Request:
215 | ```json
216 | {
217 | "bag_id": "85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f"
218 | }
219 | ```
220 |
221 | Response:
222 | ```json
223 | {
224 | "ok": true
225 | }
226 | ```
227 |
228 | ##### GET /api/v1/piece/proof?bag_id=[bag_id]&piece=[piece_index]
229 |
230 | Response:
231 | ```json
232 | {
233 | "proof": "te6ccgECGAEAAhIACUYDHTW6QVztpEzC31WcBwM8yBMvSGXQd3MD+wAhTsHW0xIACwEiAAMCKEgBAc7bhbyT2s90nYhAtDWBlX9L8/vk1RRSnBoStVbcYP9yAAoiAAUEKEgBAU3BESCG4LO5vLWDqEeHHaVoNfsW4uB6ulW98Ig+0F14AAkiAAcGKEgBAUTJbcOij+kzzAfzGdAVE+dqUkVeEh8k0Fa4Er17mqv0AAgiAAkIKEgBAem7eQT+47rVFPOw2xTXStiPmmvp8qnTRoJU4ytEVjKBAAciAAsKKEgBARYLz8mwWc5C7/m2pxPyuvHyHCJxYYERZpIVLFOfeQ+QAAYiAA0MKEgBAWPbB3lTLu3TmzksGONbNpq0B9ZzIkIngurqakszU2VOAAUiAA8OKEgBAZ1yE9s5xTNpnnPznOY9ec7CoWth9ss7zx+BpPooewEeAAQiABEQKEgBAXZvDBuAa212EubKLPaSX62+k36vechsm9D50Qo2cTDiAAMiABMSKEgBAbhbC8Eeb4JXZSowetpDFDnu6kiV98PUmtc1LnTvY5JXAAIiABUUKEgBAUzzpEPNmZLfP8J+90j7cniw7f3eiKvvJWTGx3sX82yvAAECABcWAED6S6nWCQpkeziAkJwbLPvFX6hjRdFFPlXF6dQXqauQOwBAixDSw4tIVZgM4DvkN6Juu2Mu5FA3yuTtitTe60WOzVE="
234 | }
235 | ```
236 |
237 |
238 | [ton-svg]: https://img.shields.io/badge/Based%20on-TON-blue
239 | [join-svg]: https://img.shields.io/badge/Join%20-Telegram-blue
240 | [ton]: https://ton.org
241 | [tg]: https://t.me/tonrh
242 |
243 |
--------------------------------------------------------------------------------
/Tonutils Storage.postman_collection.json:
--------------------------------------------------------------------------------
1 | {
2 | "info": {
3 | "_postman_id": "ebbdc51e-67b9-4d8c-a042-6464b527de2c",
4 | "name": "Tonutils Storage",
5 | "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json",
6 | "_exporter_id": "14189790"
7 | },
8 | "item": [
9 | {
10 | "name": "/api/v1/add",
11 | "request": {
12 | "method": "POST",
13 | "header": [],
14 | "body": {
15 | "mode": "raw",
16 | "raw": "{\n \"bag_id\": \"85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f\",\n \"path\": \"./db/files\",\n \"files\": [1]\n}",
17 | "options": {
18 | "raw": {
19 | "language": "json"
20 | }
21 | }
22 | },
23 | "url": {
24 | "raw": "http://127.0.0.1:8192/api/v1/add",
25 | "protocol": "http",
26 | "host": [
27 | "127",
28 | "0",
29 | "0",
30 | "1"
31 | ],
32 | "port": "8192",
33 | "path": [
34 | "api",
35 | "v1",
36 | "add"
37 | ]
38 | }
39 | },
40 | "response": [
41 | {
42 | "name": "/api/v1/add",
43 | "originalRequest": {
44 | "method": "POST",
45 | "header": [],
46 | "body": {
47 | "mode": "raw",
48 | "raw": "{\n \"bag_id\": \"85D0998DCF325B6FEE4F529D4DCF66FB253FC39C59687C82A0EF7FC96FED4C9F\",\n \"path\": \"./db/files\",\n \"files\": [2]\n}",
49 | "options": {
50 | "raw": {
51 | "language": "json"
52 | }
53 | }
54 | },
55 | "url": {
56 | "raw": "http://127.0.0.1:8192/api/v1/add",
57 | "protocol": "http",
58 | "host": [
59 | "127",
60 | "0",
61 | "0",
62 | "1"
63 | ],
64 | "port": "8192",
65 | "path": [
66 | "api",
67 | "v1",
68 | "add"
69 | ]
70 | }
71 | },
72 | "status": "OK",
73 | "code": 200,
74 | "_postman_previewlanguage": "json",
75 | "header": [
76 | {
77 | "key": "Content-Type",
78 | "value": "application/json"
79 | },
80 | {
81 | "key": "Date",
82 | "value": "Mon, 12 Jun 2023 12:02:33 GMT"
83 | },
84 | {
85 | "key": "Content-Length",
86 | "value": "12"
87 | }
88 | ],
89 | "cookie": [],
90 | "body": "{\n \"ok\": true\n}"
91 | }
92 | ]
93 | },
94 | {
95 | "name": "/api/v1/stop",
96 | "request": {
97 | "method": "POST",
98 | "header": [],
99 | "body": {
100 | "mode": "raw",
101 | "raw": "{\n \"bag_id\": \"85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f\"\n}",
102 | "options": {
103 | "raw": {
104 | "language": "json"
105 | }
106 | }
107 | },
108 | "url": {
109 | "raw": "http://127.0.0.1:8192/api/v1/stop",
110 | "protocol": "http",
111 | "host": [
112 | "127",
113 | "0",
114 | "0",
115 | "1"
116 | ],
117 | "port": "8192",
118 | "path": [
119 | "api",
120 | "v1",
121 | "stop"
122 | ]
123 | }
124 | },
125 | "response": [
126 | {
127 | "name": "/api/v1/stop",
128 | "originalRequest": {
129 | "method": "POST",
130 | "header": [],
131 | "body": {
132 | "mode": "raw",
133 | "raw": "{\n \"bag_id\": \"85D0998DCF325B6FEE4F529D4DCF66FB253FC39C59687C82A0EF7FC96FED4C9F\"\n}",
134 | "options": {
135 | "raw": {
136 | "language": "json"
137 | }
138 | }
139 | },
140 | "url": {
141 | "raw": "http://127.0.0.1:8192/api/v1/stop",
142 | "protocol": "http",
143 | "host": [
144 | "127",
145 | "0",
146 | "0",
147 | "1"
148 | ],
149 | "port": "8192",
150 | "path": [
151 | "api",
152 | "v1",
153 | "stop"
154 | ]
155 | }
156 | },
157 | "status": "OK",
158 | "code": 200,
159 | "_postman_previewlanguage": "json",
160 | "header": [
161 | {
162 | "key": "Content-Type",
163 | "value": "application/json"
164 | },
165 | {
166 | "key": "Date",
167 | "value": "Mon, 12 Jun 2023 12:03:06 GMT"
168 | },
169 | {
170 | "key": "Content-Length",
171 | "value": "12"
172 | }
173 | ],
174 | "cookie": [],
175 | "body": "{\n \"ok\": true\n}"
176 | }
177 | ]
178 | },
179 | {
180 | "name": "/api/v1/remove",
181 | "request": {
182 | "method": "POST",
183 | "header": [],
184 | "body": {
185 | "mode": "raw",
186 | "raw": "{\n \"bag_id\": \"85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f\",\n \"with_files\": false\n}",
187 | "options": {
188 | "raw": {
189 | "language": "json"
190 | }
191 | }
192 | },
193 | "url": {
194 | "raw": "http://127.0.0.1:8192/api/v1/remove",
195 | "protocol": "http",
196 | "host": [
197 | "127",
198 | "0",
199 | "0",
200 | "1"
201 | ],
202 | "port": "8192",
203 | "path": [
204 | "api",
205 | "v1",
206 | "remove"
207 | ]
208 | }
209 | },
210 | "response": [
211 | {
212 | "name": "/api/v1/remove",
213 | "originalRequest": {
214 | "method": "POST",
215 | "header": [],
216 | "body": {
217 | "mode": "raw",
218 | "raw": "{\n \"bag_id\": \"85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f\",\n \"with_files\": false\n}",
219 | "options": {
220 | "raw": {
221 | "language": "json"
222 | }
223 | }
224 | },
225 | "url": {
226 | "raw": "http://127.0.0.1:8192/api/v1/remove",
227 | "protocol": "http",
228 | "host": [
229 | "127",
230 | "0",
231 | "0",
232 | "1"
233 | ],
234 | "port": "8192",
235 | "path": [
236 | "api",
237 | "v1",
238 | "remove"
239 | ]
240 | }
241 | },
242 | "status": "OK",
243 | "code": 200,
244 | "_postman_previewlanguage": "json",
245 | "header": [
246 | {
247 | "key": "Content-Type",
248 | "value": "application/json"
249 | },
250 | {
251 | "key": "Date",
252 | "value": "Mon, 12 Jun 2023 12:12:45 GMT"
253 | },
254 | {
255 | "key": "Content-Length",
256 | "value": "12"
257 | }
258 | ],
259 | "cookie": [],
260 | "body": "{\n \"ok\": true\n}"
261 | }
262 | ]
263 | },
264 | {
265 | "name": "/api/v1/list",
266 | "request": {
267 | "method": "GET",
268 | "header": [],
269 | "url": {
270 | "raw": "http://127.0.0.1:8192/api/v1/list",
271 | "protocol": "http",
272 | "host": [
273 | "127",
274 | "0",
275 | "0",
276 | "1"
277 | ],
278 | "port": "8192",
279 | "path": [
280 | "api",
281 | "v1",
282 | "list"
283 | ]
284 | }
285 | },
286 | "response": [
287 | {
288 | "name": "/api/v1/list",
289 | "originalRequest": {
290 | "method": "GET",
291 | "header": [],
292 | "url": {
293 | "raw": "http://127.0.0.1:8192/api/v1/list",
294 | "protocol": "http",
295 | "host": [
296 | "127",
297 | "0",
298 | "0",
299 | "1"
300 | ],
301 | "port": "8192",
302 | "path": [
303 | "api",
304 | "v1",
305 | "list"
306 | ]
307 | }
308 | },
309 | "status": "OK",
310 | "code": 200,
311 | "_postman_previewlanguage": "json",
312 | "header": [
313 | {
314 | "key": "Content-Type",
315 | "value": "application/json"
316 | },
317 | {
318 | "key": "Date",
319 | "value": "Mon, 12 Jun 2023 12:05:58 GMT"
320 | },
321 | {
322 | "key": "Content-Length",
323 | "value": "642"
324 | }
325 | ],
326 | "cookie": [],
327 | "body": "{\n \"Bags\": [\n {\n \"bag_id\": \"6d791040957b5efa0311ef14f4278d92143b4c8369ad55d969ae6c1a6840ade8\",\n \"description\": \"Some Stuff\",\n \"downloaded\": 150126947,\n \"size\": 150126947,\n \"peers\": 0,\n \"download_speed\": 0,\n \"upload_speed\": 0,\n \"files_count\": 17,\n \"dir_name\": \"torrent/\",\n \"completed\": true,\n \"header_loaded\": true,\n \"info_loaded\": true,\n \"active\": true,\n \"seeding\": true\n },\n {\n \"bag_id\": \"85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f\",\n \"description\": \"FunnyPack\",\n \"downloaded\": 188350328,\n \"size\": 188249739,\n \"peers\": 0,\n \"download_speed\": 0,\n \"upload_speed\": 0,\n \"files_count\": 3,\n \"dir_name\": \"video/\",\n \"completed\": false,\n \"header_loaded\": true,\n \"info_loaded\": true,\n \"active\": false,\n \"seeding\": false\n }\n ]\n}"
328 | }
329 | ]
330 | },
331 | {
332 | "name": "/api/v1/details",
333 | "request": {
334 | "method": "GET",
335 | "header": [],
336 | "url": {
337 | "raw": "http://127.0.0.1:8192/api/v1/details?bag_id=85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f",
338 | "protocol": "http",
339 | "host": [
340 | "127",
341 | "0",
342 | "0",
343 | "1"
344 | ],
345 | "port": "8192",
346 | "path": [
347 | "api",
348 | "v1",
349 | "details"
350 | ],
351 | "query": [
352 | {
353 | "key": "bag_id",
354 | "value": "85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f"
355 | }
356 | ]
357 | }
358 | },
359 | "response": [
360 | {
361 | "name": "/api/v1/details",
362 | "originalRequest": {
363 | "method": "GET",
364 | "header": [],
365 | "url": {
366 | "raw": "http://127.0.0.1:8192/api/v1/details?bag_id=85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f",
367 | "protocol": "http",
368 | "host": [
369 | "127",
370 | "0",
371 | "0",
372 | "1"
373 | ],
374 | "port": "8192",
375 | "path": [
376 | "api",
377 | "v1",
378 | "details"
379 | ],
380 | "query": [
381 | {
382 | "key": "bag_id",
383 | "value": "85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f"
384 | }
385 | ]
386 | }
387 | },
388 | "status": "OK",
389 | "code": 200,
390 | "_postman_previewlanguage": "json",
391 | "header": [
392 | {
393 | "key": "Content-Type",
394 | "value": "application/json"
395 | },
396 | {
397 | "key": "Date",
398 | "value": "Thu, 15 Jun 2023 06:29:58 GMT"
399 | },
400 | {
401 | "key": "Content-Length",
402 | "value": "1178"
403 | }
404 | ],
405 | "cookie": [],
406 | "body": "{\n \"bag_id\": \"85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f\",\n \"description\": \"FunnyPack\",\n \"downloaded\": 188249739,\n \"size\": 188249739,\n \"download_speed\": 0,\n \"upload_speed\": 0,\n \"files_count\": 3,\n \"dir_name\": \"video/\",\n \"completed\": true,\n \"header_loaded\": true,\n \"info_loaded\": true,\n \"active\": true,\n \"seeding\": true,\n \"bag_pieces_num\": 1437,\n \"has_pieces_mask\": \"//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////8f\",\n \"files\": [\n {\n \"index\": 0,\n \"name\": \"200px-Feels_good_man.jpg\",\n \"size\": 13768\n },\n {\n \"index\": 1,\n \"name\": \"kek/1.txt\",\n \"size\": 22\n },\n {\n \"index\": 2,\n \"name\": \"videoplayback.mp4\",\n \"size\": 188235949\n }\n ],\n \"peers\": [\n {\n \"addr\": \"185.195.69.60:13333\",\n \"id\": \"04e7276cc1d3d480c70c83b0fb66d88412e34a5734b15a412155b1e9b5ff17a7\",\n \"upload_speed\": 0,\n \"download_speed\": 0\n },\n {\n \"addr\": \"31.172.68.159:17555\",\n \"id\": \"bec28d6ff140884d7304b2698630cf84b9b4d14f1c55b3b504205bebf1c37133\",\n \"upload_speed\": 0,\n \"download_speed\": 0\n },\n {\n \"addr\": \"185.18.52.220:17555\",\n \"id\": \"f546878e8e4bd3885cc623ab0440f05abb12138d4701cee998e4f073ec9ade7f\",\n \"upload_speed\": 0,\n \"download_speed\": 0\n }\n ]\n}"
407 | }
408 | ]
409 | },
410 | {
411 | "name": "/api/v1/piece/proof",
412 | "request": {
413 | "method": "GET",
414 | "header": [],
415 | "url": {
416 | "raw": "http://127.0.0.1:8192/api/v1/piece/proof?bag_id=85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f&piece=0",
417 | "protocol": "http",
418 | "host": [
419 | "127",
420 | "0",
421 | "0",
422 | "1"
423 | ],
424 | "port": "8192",
425 | "path": [
426 | "api",
427 | "v1",
428 | "piece",
429 | "proof"
430 | ],
431 | "query": [
432 | {
433 | "key": "bag_id",
434 | "value": "85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f"
435 | },
436 | {
437 | "key": "piece",
438 | "value": "0"
439 | }
440 | ]
441 | }
442 | },
443 | "response": [
444 | {
445 | "name": "/api/v1/piece/proof",
446 | "originalRequest": {
447 | "method": "GET",
448 | "header": [],
449 | "url": {
450 | "raw": "http://127.0.0.1:8192/api/v1/piece/proof?bag_id=85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f&piece=0",
451 | "protocol": "http",
452 | "host": [
453 | "127",
454 | "0",
455 | "0",
456 | "1"
457 | ],
458 | "port": "8192",
459 | "path": [
460 | "api",
461 | "v1",
462 | "piece",
463 | "proof"
464 | ],
465 | "query": [
466 | {
467 | "key": "bag_id",
468 | "value": "85d0998dcf325b6fee4f529d4dcf66fb253fc39c59687c82a0ef7fc96fed4c9f"
469 | },
470 | {
471 | "key": "piece",
472 | "value": "0"
473 | }
474 | ]
475 | }
476 | },
477 | "status": "OK",
478 | "code": 200,
479 | "_postman_previewlanguage": "json",
480 | "header": [
481 | {
482 | "key": "Content-Type",
483 | "value": "application/json"
484 | },
485 | {
486 | "key": "Date",
487 | "value": "Thu, 15 Jun 2023 06:31:41 GMT"
488 | },
489 | {
490 | "key": "Content-Length",
491 | "value": "737"
492 | }
493 | ],
494 | "cookie": [],
495 | "body": "{\n \"proof\": \"te6ccgECGAEAAhIACUYDHTW6QVztpEzC31WcBwM8yBMvSGXQd3MD+wAhTsHW0xIACwEiAAMCKEgBAc7bhbyT2s90nYhAtDWBlX9L8/vk1RRSnBoStVbcYP9yAAoiAAUEKEgBAU3BESCG4LO5vLWDqEeHHaVoNfsW4uB6ulW98Ig+0F14AAkiAAcGKEgBAUTJbcOij+kzzAfzGdAVE+dqUkVeEh8k0Fa4Er17mqv0AAgiAAkIKEgBAem7eQT+47rVFPOw2xTXStiPmmvp8qnTRoJU4ytEVjKBAAciAAsKKEgBARYLz8mwWc5C7/m2pxPyuvHyHCJxYYERZpIVLFOfeQ+QAAYiAA0MKEgBAWPbB3lTLu3TmzksGONbNpq0B9ZzIkIngurqakszU2VOAAUiAA8OKEgBAZ1yE9s5xTNpnnPznOY9ec7CoWth9ss7zx+BpPooewEeAAQiABEQKEgBAXZvDBuAa212EubKLPaSX62+k36vechsm9D50Qo2cTDiAAMiABMSKEgBAbhbC8Eeb4JXZSowetpDFDnu6kiV98PUmtc1LnTvY5JXAAIiABUUKEgBAUzzpEPNmZLfP8J+90j7cniw7f3eiKvvJWTGx3sX82yvAAECABcWAED6S6nWCQpkeziAkJwbLPvFX6hjRdFFPlXF6dQXqauQOwBAixDSw4tIVZgM4DvkN6Juu2Mu5FA3yuTtitTe60WOzVE=\"\n}"
496 | }
497 | ]
498 | },
499 | {
500 | "name": "/api/v1/create",
501 | "request": {
502 | "method": "POST",
503 | "header": [],
504 | "body": {
505 | "mode": "raw",
506 | "raw": "{\n \"description\": \"Some Stuff\",\n \"path\": \"/Users/admin/some-dir\"\n}",
507 | "options": {
508 | "raw": {
509 | "language": "json"
510 | }
511 | }
512 | },
513 | "url": {
514 | "raw": "http://127.0.0.1:8192/api/v1/create",
515 | "protocol": "http",
516 | "host": [
517 | "127",
518 | "0",
519 | "0",
520 | "1"
521 | ],
522 | "port": "8192",
523 | "path": [
524 | "api",
525 | "v1",
526 | "create"
527 | ]
528 | }
529 | },
530 | "response": [
531 | {
532 | "name": "/api/v1/create",
533 | "originalRequest": {
534 | "method": "POST",
535 | "header": [],
536 | "body": {
537 | "mode": "raw",
538 | "raw": "{\n \"description\": \"Some Stuff\",\n \"path\": \"/Users/admin/some-dir\"\n}",
539 | "options": {
540 | "raw": {
541 | "language": "json"
542 | }
543 | }
544 | },
545 | "url": {
546 | "raw": "http://127.0.0.1:8192/api/v1/create",
547 | "protocol": "http",
548 | "host": [
549 | "127",
550 | "0",
551 | "0",
552 | "1"
553 | ],
554 | "port": "8192",
555 | "path": [
556 | "api",
557 | "v1",
558 | "create"
559 | ]
560 | }
561 | },
562 | "status": "OK",
563 | "code": 200,
564 | "_postman_previewlanguage": "json",
565 | "header": [
566 | {
567 | "key": "Content-Type",
568 | "value": "application/json"
569 | },
570 | {
571 | "key": "Date",
572 | "value": "Mon, 12 Jun 2023 12:05:46 GMT"
573 | },
574 | {
575 | "key": "Content-Length",
576 | "value": "78"
577 | }
578 | ],
579 | "cookie": [],
580 | "body": "{\n \"bag_id\": \"6d791040957b5efa0311ef14f4278d92143b4c8369ad55d969ae6c1a6840ade8\"\n}"
581 | }
582 | ]
583 | }
584 | ]
585 | }
--------------------------------------------------------------------------------
/api/api.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "crypto/ed25519"
5 | "encoding/hex"
6 | "encoding/json"
7 | "github.com/pterm/pterm"
8 | "github.com/xssnick/tonutils-go/tl"
9 | "github.com/xssnick/tonutils-storage-provider/pkg/transport"
10 | "github.com/xssnick/tonutils-storage/db"
11 | "github.com/xssnick/tonutils-storage/storage"
12 | "math/bits"
13 | "net/http"
14 | "strconv"
15 | )
16 |
17 | type Error struct {
18 | Error string `json:"error"`
19 | }
20 |
21 | type Ok struct {
22 | Ok bool `json:"ok"`
23 | }
24 |
25 | type ADNLProofResponse struct {
26 | Key []byte `json:"key"`
27 | Signature []byte `json:"signature"`
28 | }
29 |
30 | type ProofResponse struct {
31 | Proof []byte `json:"proof"`
32 | }
33 |
34 | type File struct {
35 | Index uint32 `json:"index"`
36 | Name string `json:"name"`
37 | Size uint64 `json:"size"`
38 | }
39 |
40 | type Peer struct {
41 | Addr string `json:"addr"`
42 | ID string `json:"id"`
43 | UploadSpeed uint64 `json:"upload_speed"`
44 | DownloadSpeed uint64 `json:"download_speed"`
45 | }
46 |
47 | type BagDetailed struct {
48 | Bag
49 | BagPiecesNum uint32 `json:"bag_pieces_num"`
50 | HasPiecesMask []byte `json:"has_pieces_mask"`
51 | Files []File `json:"files"`
52 | Peers []Peer `json:"peers"`
53 |
54 | PieceSize uint32 `json:"piece_size"`
55 | BagSize uint64 `json:"bag_size"`
56 | MerkleHash string `json:"merkle_hash"`
57 | Path string `json:"path"`
58 | }
59 |
60 | type Bag struct {
61 | BagID string `json:"bag_id"`
62 | Description string `json:"description"`
63 | Downloaded uint64 `json:"downloaded"`
64 | Size uint64 `json:"size"`
65 | HeaderSize uint64 `json:"header_size"`
66 | Peers uint64 `json:"peers"`
67 | DownloadSpeed uint64 `json:"download_speed"`
68 | UploadSpeed uint64 `json:"upload_speed"`
69 | FilesCount uint64 `json:"files_count"`
70 | DirName string `json:"dir_name"`
71 | Completed bool `json:"completed"`
72 | HeaderLoaded bool `json:"header_loaded"`
73 | InfoLoaded bool `json:"info_loaded"`
74 | Active bool `json:"active"`
75 | Seeding bool `json:"seeding"`
76 | }
77 |
78 | type List struct {
79 | Bags []Bag `json:"bags"`
80 | }
81 |
82 | type Created struct {
83 | BagID string `json:"bag_id"`
84 | }
85 |
86 | type Credentials struct {
87 | Login string
88 | Password string
89 | }
90 |
91 | type Server struct {
92 | credentials *Credentials
93 | connector storage.NetConnector
94 | store *db.Storage
95 | }
96 |
97 | func NewServer(connector storage.NetConnector, store *db.Storage) *Server {
98 | return &Server{
99 | connector: connector,
100 | store: store,
101 | }
102 | }
103 |
104 | func (s *Server) SetCredentials(credentials *Credentials) {
105 | s.credentials = credentials
106 | }
107 |
108 | func (s *Server) Start(addr string) error {
109 | m := http.NewServeMux()
110 | m.HandleFunc("/api/v1/details", s.withAuth(s.handleDetails))
111 | m.HandleFunc("/api/v1/add", s.withAuth(s.handleAdd))
112 | m.HandleFunc("/api/v1/create", s.withAuth(s.handleCreate))
113 | m.HandleFunc("/api/v1/remove", s.withAuth(s.handleRemove))
114 | m.HandleFunc("/api/v1/stop", s.withAuth(s.handleStop))
115 | m.HandleFunc("/api/v1/list", s.withAuth(s.handleList))
116 | m.HandleFunc("/api/v1/piece/proof", s.withAuth(s.handlePieceProof))
117 | m.HandleFunc("/api/v1/sign/provider", s.withAuth(s.handleSignProvider))
118 |
119 | return http.ListenAndServe(addr, m)
120 | }
121 |
122 | func (s *Server) handleAdd(w http.ResponseWriter, r *http.Request) {
123 | req := struct {
124 | BagID string `json:"bag_id"`
125 | Path string `json:"path"`
126 | DownloadAll bool `json:"download_all"`
127 | Files []uint32 `json:"files"`
128 | }{}
129 | if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
130 | response(w, http.StatusBadRequest, Error{err.Error()})
131 | return
132 | }
133 |
134 | bag, err := hex.DecodeString(req.BagID)
135 | if err != nil {
136 | response(w, http.StatusBadRequest, Error{"Invalid bag id"})
137 | return
138 | }
139 | if len(bag) != 32 {
140 | response(w, http.StatusBadRequest, Error{"Invalid bag id"})
141 | return
142 | }
143 |
144 | tor := s.store.GetTorrent(bag)
145 | if tor == nil {
146 | tor = storage.NewTorrent(req.Path+"/"+hex.EncodeToString(bag), s.store, s.connector)
147 | tor.BagID = bag
148 |
149 | if err = tor.Start(true, req.DownloadAll, false); err != nil {
150 | pterm.Error.Println("Failed to start:", err.Error())
151 | response(w, http.StatusInternalServerError, Error{"Failed to start download:" + err.Error()})
152 | return
153 | }
154 |
155 | err = s.store.SetTorrent(tor)
156 | if err != nil {
157 | pterm.Error.Println("Failed to set storage:", err.Error())
158 | response(w, http.StatusInternalServerError, Error{"Failed to save to db:" + err.Error()})
159 | return
160 | }
161 | pterm.Success.Println("Bag added", hex.EncodeToString(bag))
162 | } else {
163 | if err = tor.Start(true, req.DownloadAll, false); err != nil {
164 | pterm.Error.Println("Failed to start:", err.Error())
165 | response(w, http.StatusInternalServerError, Error{"Failed to start download:" + err.Error()})
166 | return
167 | }
168 | pterm.Success.Println("Bag state updated", hex.EncodeToString(bag), "download all:", req.DownloadAll)
169 | }
170 |
171 | if len(req.Files) > 0 {
172 | if err = tor.SetActiveFilesIDs(req.Files); err != nil {
173 | pterm.Error.Println("Failed to set active files:", err.Error())
174 | response(w, http.StatusInternalServerError, Error{"Failed to set active files:" + err.Error()})
175 | return
176 | }
177 | pterm.Success.Println("Bag active files updated", hex.EncodeToString(bag))
178 | }
179 |
180 | response(w, http.StatusOK, Ok{true})
181 | }
182 |
183 | func (s *Server) handleCreate(w http.ResponseWriter, r *http.Request) {
184 | req := struct {
185 | Path string `json:"path"`
186 | Description string `json:"description"`
187 | KeepOnlyPaths []string `json:"keep_only_paths"`
188 | }{}
189 | if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
190 | response(w, http.StatusBadRequest, Error{err.Error()})
191 | return
192 | }
193 |
194 | var only map[string]bool
195 | if len(req.KeepOnlyPaths) > 0 {
196 | only = make(map[string]bool)
197 | for _, p := range req.KeepOnlyPaths {
198 | only[p] = true
199 | }
200 | }
201 |
202 | rootPath, dirName, files, err := s.store.DetectFileRefs(req.Path, only)
203 | if err != nil {
204 | pterm.Error.Println("Failed to read file refs:", err.Error())
205 | response(w, http.StatusInternalServerError, Error{err.Error()})
206 | return
207 | }
208 |
209 | it, err := storage.CreateTorrent(r.Context(), rootPath, dirName, req.Description, s.store, s.connector, files, nil)
210 | if err != nil {
211 | pterm.Error.Println("Failed to create bag:", err.Error())
212 | response(w, http.StatusInternalServerError, Error{err.Error()})
213 | return
214 | }
215 |
216 | if err = it.Start(true, true, false); err != nil {
217 | pterm.Error.Println("Failed to start bag:", err.Error())
218 | response(w, http.StatusInternalServerError, Error{err.Error()})
219 | return
220 | }
221 |
222 | if err = s.store.SetTorrent(it); err != nil {
223 | pterm.Error.Println("Failed to save bag to db:", err.Error())
224 | response(w, http.StatusInternalServerError, Error{err.Error()})
225 | return
226 | }
227 |
228 | pterm.Success.Println("Bag created", hex.EncodeToString(it.BagID))
229 | response(w, http.StatusOK, Created{BagID: hex.EncodeToString(it.BagID)})
230 | }
231 |
232 | func (s *Server) handlePieceProof(w http.ResponseWriter, r *http.Request) {
233 | bag, err := hex.DecodeString(r.URL.Query().Get("bag_id"))
234 | if err != nil {
235 | response(w, http.StatusBadRequest, Error{"Invalid bag id"})
236 | return
237 | }
238 | if len(bag) != 32 {
239 | response(w, http.StatusBadRequest, Error{"Invalid bag id"})
240 | return
241 | }
242 |
243 | piece, err := strconv.ParseUint(r.URL.Query().Get("piece"), 10, 32)
244 | if err != nil {
245 | response(w, http.StatusBadRequest, Error{"Invalid piece"})
246 | return
247 | }
248 |
249 | if tor := s.store.GetTorrent(bag); tor != nil {
250 | proof, err := tor.GetPieceProof(uint32(piece))
251 | if err == nil {
252 | response(w, http.StatusOK, ProofResponse{proof})
253 | return
254 | }
255 | }
256 | response(w, http.StatusNotFound, Ok{Ok: false})
257 | }
258 |
259 | func (s *Server) handleRemove(w http.ResponseWriter, r *http.Request) {
260 | req := struct {
261 | BagID string `json:"bag_id"`
262 | WithFiles bool `json:"with_files"`
263 | }{}
264 | if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
265 | response(w, http.StatusBadRequest, Error{err.Error()})
266 | return
267 | }
268 |
269 | bag, err := hex.DecodeString(req.BagID)
270 | if err != nil {
271 | response(w, http.StatusBadRequest, Error{"Invalid bag id"})
272 | return
273 | }
274 | if len(bag) != 32 {
275 | response(w, http.StatusBadRequest, Error{"Invalid bag id"})
276 | return
277 | }
278 |
279 | if tor := s.store.GetTorrent(bag); tor != nil {
280 | if err = s.store.RemoveTorrent(tor, req.WithFiles); err != nil {
281 | pterm.Error.Println("Failed to remove bag from db:", err.Error())
282 | response(w, http.StatusInternalServerError, Error{err.Error()})
283 | return
284 | }
285 | pterm.Success.Println("Bag removed", hex.EncodeToString(tor.BagID))
286 | response(w, http.StatusOK, Ok{Ok: true})
287 | return
288 | }
289 | response(w, http.StatusNotFound, Ok{Ok: false})
290 | }
291 |
292 | func (s *Server) handleList(w http.ResponseWriter, r *http.Request) {
293 | var bags []Bag
294 | for _, t := range s.store.GetAll() {
295 | bags = append(bags, s.getBag(t, true).Bag)
296 | }
297 | response(w, http.StatusOK, List{Bags: bags})
298 | }
299 |
300 | func (s *Server) handleSignProvider(w http.ResponseWriter, r *http.Request) {
301 | req := struct {
302 | ProviderID string `json:"provider_id"`
303 | }{}
304 | if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
305 | response(w, http.StatusBadRequest, Error{err.Error()})
306 | return
307 | }
308 |
309 | providerId, err := hex.DecodeString(req.ProviderID)
310 | if err != nil {
311 | response(w, http.StatusBadRequest, Error{"Invalid provider id"})
312 | return
313 | }
314 |
315 | if len(providerId) != 32 {
316 | response(w, http.StatusBadRequest, Error{"Invalid provider id"})
317 | return
318 | }
319 |
320 | res, err := tl.Serialize(transport.ADNLProofScheme{
321 | Key: providerId,
322 | }, true)
323 | if err != nil {
324 | response(w, http.StatusBadRequest, Error{"Invalid provider id, cannot serialize scheme"})
325 | return
326 | }
327 |
328 | key := s.connector.GetADNLPrivateKey()
329 | response(w, http.StatusOK, ADNLProofResponse{
330 | Key: key.Public().(ed25519.PublicKey),
331 | Signature: ed25519.Sign(key, res),
332 | })
333 | }
334 |
335 | func (s *Server) handleDetails(w http.ResponseWriter, r *http.Request) {
336 | bag, err := hex.DecodeString(r.URL.Query().Get("bag_id"))
337 | if err != nil {
338 | response(w, http.StatusBadRequest, Error{"Invalid bag id"})
339 | return
340 | }
341 | if len(bag) != 32 {
342 | response(w, http.StatusBadRequest, Error{"Invalid bag id"})
343 | return
344 | }
345 |
346 | if tor := s.store.GetTorrent(bag); tor != nil {
347 | response(w, http.StatusOK, s.getBag(tor, false))
348 | return
349 | }
350 | response(w, http.StatusNotFound, Ok{Ok: false})
351 | }
352 |
353 | func (s *Server) handleStop(w http.ResponseWriter, r *http.Request) {
354 | req := struct {
355 | BagID string `json:"bag_id"`
356 | }{}
357 | if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
358 | response(w, http.StatusBadRequest, Error{err.Error()})
359 | return
360 | }
361 |
362 | bag, err := hex.DecodeString(req.BagID)
363 | if err != nil {
364 | response(w, http.StatusBadRequest, Error{"Invalid bag id"})
365 | return
366 | }
367 | if len(bag) != 32 {
368 | response(w, http.StatusBadRequest, Error{"Invalid bag id"})
369 | return
370 | }
371 |
372 | if tor := s.store.GetTorrent(bag); tor != nil {
373 | tor.Stop()
374 | response(w, http.StatusOK, Ok{Ok: true})
375 | return
376 | }
377 | response(w, http.StatusNotFound, Ok{Ok: false})
378 |
379 | }
380 |
381 | func (s *Server) withAuth(next func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
382 | return func(w http.ResponseWriter, r *http.Request) {
383 | if crs := s.credentials; crs != nil {
384 | login, password, ok := r.BasicAuth()
385 | if !ok || login != crs.Login || password != crs.Password {
386 | response(w, http.StatusUnauthorized, Error{
387 | "Invalid credentials",
388 | })
389 | return
390 | }
391 | }
392 | next(w, r)
393 | }
394 | }
395 |
396 | func response(w http.ResponseWriter, status int, result any) {
397 | w.Header().Set("Content-Type", "application/json")
398 | w.WriteHeader(status)
399 | _ = json.NewEncoder(w).Encode(result)
400 | }
401 |
402 | func (s *Server) getBag(t *storage.Torrent, short bool) BagDetailed {
403 | res := BagDetailed{
404 | Files: []File{},
405 | Peers: []Peer{},
406 | }
407 |
408 | var dow, upl, num uint64
409 | for id, p := range t.GetPeers() {
410 | dow += p.GetDownloadSpeed()
411 | upl += p.GetUploadSpeed()
412 | num++
413 |
414 | if !short {
415 | res.Peers = append(res.Peers, Peer{
416 | Addr: p.Addr,
417 | ID: id,
418 | UploadSpeed: p.GetUploadSpeed(),
419 | DownloadSpeed: p.GetDownloadSpeed(),
420 | })
421 | }
422 | }
423 |
424 | var desc, dirName string
425 | var headerSz, full, downloaded, filesCount uint64
426 | completed, infoLoaded, headerLoaded := false, false, false
427 | if t.Info != nil {
428 | infoLoaded = true
429 | mask := t.PiecesMask()
430 | downloadedPieces := 0
431 | for _, b := range mask {
432 | downloadedPieces += bits.OnesCount8(b)
433 | }
434 |
435 | downloaded = uint64(downloadedPieces*int(t.Info.PieceSize)) - t.Info.HeaderSize
436 | if uint64(downloadedPieces*int(t.Info.PieceSize)) < t.Info.HeaderSize { // 0 if header not fully downloaded
437 | downloaded = 0
438 | }
439 |
440 | headerSz = t.Info.HeaderSize
441 | full = t.Info.FileSize - t.Info.HeaderSize
442 | if downloaded > full { // cut not full last piece
443 | downloaded = full
444 | }
445 | completed = downloaded == full
446 |
447 | if !completed && !t.IsDownloadAll() {
448 | var wantSz uint64
449 | files := t.GetActiveFilesIDs()
450 | for _, f := range files {
451 | off, err := t.GetFileOffsetsByID(f)
452 | if err == nil {
453 | wantSz += off.Size
454 | }
455 | }
456 |
457 | if downloaded > wantSz { // cut not full last piece
458 | downloaded = wantSz
459 | }
460 | completed = downloaded == wantSz
461 | }
462 |
463 | if !short {
464 | res.BagPiecesNum = t.Info.PiecesNum()
465 | res.HasPiecesMask = t.PiecesMask()
466 | }
467 |
468 | desc = t.Info.Description.Value
469 | if t.Header != nil {
470 | headerLoaded = true
471 | dirName = string(t.Header.DirName)
472 | filesCount = uint64(t.Header.FilesCount)
473 |
474 | if !short {
475 | list, err := t.ListFiles()
476 | if err == nil {
477 | for _, fl := range list {
478 | fi, err := t.GetFileOffsets(fl)
479 | if err != nil {
480 | continue
481 | }
482 |
483 | res.Files = append(res.Files, File{
484 | Index: fi.Index,
485 | Name: fi.Name,
486 | Size: fi.Size,
487 | })
488 | }
489 | }
490 | }
491 | }
492 |
493 | res.BagSize = t.Info.FileSize
494 | res.PieceSize = t.Info.PieceSize
495 | res.MerkleHash = hex.EncodeToString(t.Info.RootHash)
496 | }
497 |
498 | res.Path = t.Path
499 | active, seeding := t.IsActive()
500 | res.Bag = Bag{
501 | BagID: hex.EncodeToString(t.BagID),
502 | Description: desc,
503 | Downloaded: downloaded,
504 | Size: full,
505 | HeaderSize: headerSz,
506 | Peers: num,
507 | DownloadSpeed: dow,
508 | UploadSpeed: upl,
509 | FilesCount: filesCount,
510 | DirName: dirName,
511 | Completed: completed,
512 | HeaderLoaded: headerLoaded,
513 | InfoLoaded: infoLoaded,
514 | Active: active,
515 | Seeding: seeding,
516 | }
517 |
518 | return res
519 | }
520 |
--------------------------------------------------------------------------------
/config/config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "context"
5 | "crypto/ed25519"
6 | "encoding/json"
7 | "errors"
8 | "github.com/pterm/pterm"
9 | tunnelConfig "github.com/ton-blockchain/adnl-tunnel/config"
10 | "log"
11 | "net"
12 | "os"
13 | "time"
14 | )
15 |
16 | type Config struct {
17 | Key ed25519.PrivateKey
18 | ListenAddr string
19 | ExternalIP string
20 | DownloadsPath string
21 | NetworkConfigUrl string
22 | Version uint
23 | TunnelConfig *tunnelConfig.ClientConfig
24 | }
25 |
26 | func checkIPAddress(ip string) string {
27 | p := net.ParseIP(ip)
28 | if p == nil {
29 | log.Println("bad ip", len(p))
30 | return ""
31 | }
32 | p = p.To4()
33 | if p == nil {
34 | log.Println("bad ip, not v4", len(p))
35 | return ""
36 | }
37 |
38 | return p.String()
39 | }
40 |
41 | func checkCanSeed() (string, bool) {
42 | ch := make(chan bool, 1)
43 |
44 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
45 | defer cancel()
46 |
47 | ip := ""
48 | go func() {
49 | defer func() {
50 | ch <- ip != ""
51 | }()
52 |
53 | listen, err := net.Listen("tcp", "0.0.0.0:18889")
54 | if err != nil {
55 | log.Println("listen err", err.Error())
56 | return
57 | }
58 | defer listen.Close()
59 |
60 | conn, err := listen.Accept()
61 | if err != nil {
62 | log.Println("accept err", err.Error())
63 | return
64 | }
65 |
66 | ipData := make([]byte, 256)
67 | n, err := conn.Read(ipData)
68 | if err != nil {
69 | log.Println("read err", err.Error())
70 | return
71 | }
72 |
73 | ip = string(ipData[:n])
74 | ip = checkIPAddress(ip)
75 | _ = conn.Close()
76 | }()
77 |
78 | sp, _ := pterm.DefaultSpinner.Start("Resolving port checker...")
79 | ips, err := net.LookupIP("tonutils.com")
80 | if err != nil || len(ips) == 0 {
81 | sp.Fail("Port is not resolved, you can download, but no-one can download from you, unless you specify your ip manually in config.json")
82 | return "", false
83 | }
84 | sp.Success("Port checker resolved.")
85 |
86 | sp, _ = pterm.DefaultSpinner.Start("Using port checker tonutils.com at ", ips[0].String())
87 | conn, err := net.Dial("tcp", ips[0].String()+":9099")
88 | if err != nil {
89 | return "", false
90 | }
91 |
92 | _, err = conn.Write([]byte("ME"))
93 | if err != nil {
94 | return "", false
95 | }
96 | ok := false
97 | select {
98 | case k := <-ch:
99 | ok = k
100 | sp.Success("Ports are open, public ip is ", ip, " Seeding is available, bags can be downloaded from you.")
101 | case <-ctx.Done():
102 | _ = sp.Stop()
103 | pterm.Warning.Println("No request from port checker, looks like it cannot reach you, so ports are probably closed. You can download, " +
104 | "but no-one can download from you, unless you specify your ip manually in db's config.json")
105 | }
106 |
107 | return ip, ok
108 | }
109 |
110 | func LoadConfig(dir string) (*Config, error) {
111 | _, err := os.Stat(dir)
112 | if err != nil {
113 | if errors.Is(err, os.ErrNotExist) {
114 | err = os.MkdirAll(dir, os.ModePerm)
115 | }
116 | if err != nil {
117 | return nil, err
118 | }
119 | }
120 |
121 | var cfg *Config
122 | path := dir + "/config.json"
123 | _, err = os.Stat(path)
124 | if os.IsNotExist(err) {
125 | _, priv, err := ed25519.GenerateKey(nil)
126 | if err != nil {
127 | return nil, err
128 | }
129 |
130 | cfg = &Config{
131 | Version: 1,
132 | Key: priv,
133 | ListenAddr: "0.0.0.0:17555",
134 | ExternalIP: "",
135 | DownloadsPath: "./downloads/",
136 | NetworkConfigUrl: "https://ton-blockchain.github.io/global.config.json",
137 | }
138 |
139 | cfg.TunnelConfig, err = tunnelConfig.GenerateClientConfig()
140 | if err != nil {
141 | return nil, err
142 | }
143 |
144 | ip, seed := checkCanSeed()
145 | if seed {
146 | cfg.ExternalIP = ip
147 | }
148 |
149 | err = SaveConfig(cfg, dir)
150 | if err != nil {
151 | return nil, err
152 | }
153 |
154 | return cfg, nil
155 | } else if err == nil {
156 | data, err := os.ReadFile(path)
157 | if err != nil {
158 | return nil, err
159 | }
160 |
161 | err = json.Unmarshal(data, &cfg)
162 | if err != nil {
163 | return nil, err
164 | }
165 | }
166 |
167 | if cfg.Version < 1 {
168 | cfg.Version = 1
169 | cfg.TunnelConfig, err = tunnelConfig.GenerateClientConfig()
170 | if err != nil {
171 | return nil, err
172 | }
173 |
174 | err = SaveConfig(cfg, dir)
175 | if err != nil {
176 | return nil, err
177 | }
178 | }
179 |
180 | return cfg, nil
181 | }
182 |
183 | func SaveConfig(cfg *Config, dir string) error {
184 | path := dir + "/config.json"
185 |
186 | data, err := json.MarshalIndent(cfg, "", "\t")
187 | if err != nil {
188 | return err
189 | }
190 |
191 | err = os.WriteFile(path, data, 0766)
192 | if err != nil {
193 | return err
194 | }
195 | return nil
196 | }
197 |
--------------------------------------------------------------------------------
/config/fallback.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | const FallbackNetworkConfig = `{
4 | "@type": "config.global",
5 | "dht": {
6 | "@type": "dht.config.global",
7 | "k": 6,
8 | "a": 3,
9 | "static_nodes": {
10 | "@type": "dht.nodes",
11 | "nodes": [
12 | {
13 | "@type": "dht.node",
14 | "id": {
15 | "@type": "pub.ed25519",
16 | "key": "6PGkPQSbyFp12esf1NqmDOaLoFA8i9+Mp5+cAx5wtTU="
17 | },
18 | "addr_list": {
19 | "@type": "adnl.addressList",
20 | "addrs": [
21 | {
22 | "@type": "adnl.address.udp",
23 | "ip": -1185526007,
24 | "port": 22096
25 | }
26 | ],
27 | "version": 0,
28 | "reinit_date": 0,
29 | "priority": 0,
30 | "expire_at": 0
31 | },
32 | "version": -1,
33 | "signature": "L4N1+dzXLlkmT5iPnvsmsixzXU0L6kPKApqMdcrGP5d9ssMhn69SzHFK+yIzvG6zQ9oRb4TnqPBaKShjjj2OBg=="
34 | },
35 | {
36 | "@type": "dht.node",
37 | "id": {
38 | "@type": "pub.ed25519",
39 | "key": "4R0C/zU56k+x2HGMsLWjX2rP/SpoTPIHSSAmidGlsb8="
40 | },
41 | "addr_list": {
42 | "@type": "adnl.addressList",
43 | "addrs": [
44 | {
45 | "@type": "adnl.address.udp",
46 | "ip": -1952265919,
47 | "port": 14395
48 | }
49 | ],
50 | "version": 0,
51 | "reinit_date": 0,
52 | "priority": 0,
53 | "expire_at": 0
54 | },
55 | "version": -1,
56 | "signature": "0uwWyCFn2KjPnnlbSFYXLZdwIakaSgI9WyRo87J3iCGwb5TvJSztgA224A9kNAXeutOrXMIPYv1b8Zt8ImsrCg=="
57 | },
58 | {
59 | "@type": "dht.node",
60 | "id": {
61 | "@type": "pub.ed25519",
62 | "key": "/YDNd+IwRUgL0mq21oC0L3RxrS8gTu0nciSPUrhqR78="
63 | },
64 | "addr_list": {
65 | "@type": "adnl.addressList",
66 | "addrs": [
67 | {
68 | "@type": "adnl.address.udp",
69 | "ip": -1402455171,
70 | "port": 14432
71 | }
72 | ],
73 | "version": 0,
74 | "reinit_date": 0,
75 | "priority": 0,
76 | "expire_at": 0
77 | },
78 | "version": -1,
79 | "signature": "6+oVk6HDtIFbwYi9khCc8B+fTFceBUo1PWZDVTkb4l84tscvr5QpzAkdK7sS5xGzxM7V7YYQ6gUQPrsP9xcLAw=="
80 | },
81 | {
82 | "@type": "dht.node",
83 | "id": {
84 | "@type": "pub.ed25519",
85 | "key": "DA0H568bb+LoO2LGY80PgPee59jTPCqqSJJzt1SH+KE="
86 | },
87 | "addr_list": {
88 | "@type": "adnl.addressList",
89 | "addrs": [
90 | {
91 | "@type": "adnl.address.udp",
92 | "ip": -1402397332,
93 | "port": 14583
94 | }
95 | ],
96 | "version": 0,
97 | "reinit_date": 0,
98 | "priority": 0,
99 | "expire_at": 0
100 | },
101 | "version": -1,
102 | "signature": "cL79gDTrixhaM9AlkCdZWccCts7ieQYQBmPxb/R7d7zHw3bEHL8Le96CFJoB1KHu8C85iDpFK8qlrGl1Yt/ZDg=="
103 | },
104 | {
105 | "@type": "dht.node",
106 | "id": {
107 | "@type": "pub.ed25519",
108 | "key": "MJr8xja0xpu9DoisFXBrkNHNx1XozR7HHw9fJdSyEdo="
109 | },
110 | "addr_list": {
111 | "@type": "adnl.addressList",
112 | "addrs": [
113 | {
114 | "@type": "adnl.address.udp",
115 | "ip": -2018147130,
116 | "port": 6302
117 | }
118 | ],
119 | "version": 0,
120 | "reinit_date": 0,
121 | "priority": 0,
122 | "expire_at": 0
123 | },
124 | "version": -1,
125 | "signature": "XcR5JaWcf4QMdI8urLSc1zwv5+9nCuItSE1EDa0dSwYF15R/BtJoKU5YHA4/T8SiO18aVPQk2SL1pbhevuMrAQ=="
126 | },
127 | {
128 | "@type": "dht.node",
129 | "id": {
130 | "@type": "pub.ed25519",
131 | "key": "Fhldu4zlnb20/TUj9TXElZkiEmbndIiE/DXrbGKu+0c="
132 | },
133 | "addr_list": {
134 | "@type": "adnl.addressList",
135 | "addrs": [
136 | {
137 | "@type": "adnl.address.udp",
138 | "ip": -2018147075,
139 | "port": 6302
140 | }
141 | ],
142 | "version": 0,
143 | "reinit_date": 0,
144 | "priority": 0,
145 | "expire_at": 0
146 | },
147 | "version": -1,
148 | "signature": "nUGB77UAkd2+ZAL5PgInb3TvtuLLXJEJ2icjAUKLv4qIGB3c/O9k/v0NKwSzhsMP0ljeTGbcIoMDw24qf3goCg=="
149 | },
150 | {
151 | "@type": "dht.node",
152 | "id": {
153 | "@type": "pub.ed25519",
154 | "key": "gzUNJnBJhdpooYCE8juKZo2y4tYDIQfoCvFm0yBr7y0="
155 | },
156 | "addr_list": {
157 | "@type": "adnl.addressList",
158 | "addrs": [
159 | {
160 | "@type": "adnl.address.udp",
161 | "ip": 89013260,
162 | "port": 54390
163 | }
164 | ],
165 | "version": 0,
166 | "reinit_date": 0,
167 | "priority": 0,
168 | "expire_at": 0
169 | },
170 | "version": -1,
171 | "signature": "LCrCkjmkMn6AZHW2I+oRm1gHK7CyBPfcb6LwsltskCPpNECyBl1GxZTX45n0xZtLgyBd/bOqMPBfawpQwWt1BA=="
172 | },
173 | {
174 | "@type": "dht.node",
175 | "id": {
176 | "@type": "pub.ed25519",
177 | "key": "jXiLaOQz1HPayilWgBWhV9xJhUIqfU95t+KFKQPIpXg="
178 | },
179 | "addr_list": {
180 | "@type": "adnl.addressList",
181 | "addrs": [
182 | {
183 | "@type": "adnl.address.udp",
184 | "ip": 94452896,
185 | "port": 12485
186 | }
187 | ],
188 | "version": 0,
189 | "reinit_date": 0,
190 | "priority": 0,
191 | "expire_at": 0
192 | },
193 | "version": -1,
194 | "signature": "fKSZh9nXMx+YblkQXn3I/bndTD0JZ1yAtK/tXPIGruNglpe9sWMXR+8fy3YogPhLJMdjNiMom1ya+tWG7qvBAQ=="
195 | },
196 | {
197 | "@type": "dht.node",
198 | "id": {
199 | "@type": "pub.ed25519",
200 | "key": "vhFPq+tgjJi+4ZbEOHBo4qjpqhBdSCzNZBdgXyj3NK8="
201 | },
202 | "addr_list": {
203 | "@type": "adnl.addressList",
204 | "addrs": [
205 | {
206 | "@type": "adnl.address.udp",
207 | "ip": 85383775,
208 | "port": 36752
209 | }
210 | ],
211 | "version": 0,
212 | "reinit_date": 0,
213 | "priority": 0,
214 | "expire_at": 0
215 | },
216 | "version": -1,
217 | "signature": "kBwAIgJVkz8AIOGoZcZcXWgNmWq8MSBWB2VhS8Pd+f9LLPIeeFxlDTtwAe8Kj7NkHDSDC+bPXLGQZvPv0+wHCg=="
218 | },
219 | {
220 | "@type": "dht.node",
221 | "id": {
222 | "@type": "pub.ed25519",
223 | "key": "sbsuMcdyYFSRQ0sG86/n+ZQ5FX3zOWm1aCVuHwXdgs0="
224 | },
225 | "addr_list": {
226 | "@type": "adnl.addressList",
227 | "addrs": [
228 | {
229 | "@type": "adnl.address.udp",
230 | "ip": 759132846,
231 | "port": 50187
232 | }
233 | ],
234 | "version": 0,
235 | "reinit_date": 0,
236 | "priority": 0,
237 | "expire_at": 0
238 | },
239 | "version": -1,
240 | "signature": "9FJwbFw3IECRFkb9bA54YaexjDmlNBArimWkh+BvW88mjm3K2i5V2uaBPS3GubvXWOwdHLE2lzQBobgZRGMyCg=="
241 | },
242 | {
243 | "@type": "dht.node",
244 | "id": {
245 | "@type": "pub.ed25519",
246 | "key": "aeMgdMdkkbkfAS4+n4BEGgtqhkf2/zXrVWWECOJ/h3A="
247 | },
248 | "addr_list": {
249 | "@type": "adnl.addressList",
250 | "addrs": [
251 | {
252 | "@type": "adnl.address.udp",
253 | "ip": -1481887565,
254 | "port": 25975
255 | }
256 | ],
257 | "version": 0,
258 | "reinit_date": 0,
259 | "priority": 0,
260 | "expire_at": 0
261 | },
262 | "version": -1,
263 | "signature": "z5ogivZWpQchkS4UR4wB7i2pfOpMwX9Nd/USxinL9LvJPa+/Aw3F1AytR9FX0BqDftxIYvblBYAB5JyAmlj+AA=="
264 | },
265 | {
266 | "@type": "dht.node",
267 | "id": {
268 | "@type": "pub.ed25519",
269 | "key": "rNzhnAlmtRn9rTzW6o2568S6bbOXly7ddO1olDws5wM="
270 | },
271 | "addr_list": {
272 | "@type": "adnl.addressList",
273 | "addrs": [
274 | {
275 | "@type": "adnl.address.udp",
276 | "ip": -2134428422,
277 | "port": 45943
278 | }
279 | ],
280 | "version": 0,
281 | "reinit_date": 0,
282 | "priority": 0,
283 | "expire_at": 0
284 | },
285 | "version": -1,
286 | "signature": "sn/+ZfkfCSw2bHnEnv04AXX/Goyw7+StHBPQOdPr+wvdbaJ761D7hyiMNdQGbuZv2Ep2cXJpiwylnZItrwdUDg=="
287 | }
288 | ]
289 | }
290 | },
291 | "liteservers": [
292 | {
293 | "ip": 84478511,
294 | "port": 19949,
295 | "id": {
296 | "@type": "pub.ed25519",
297 | "key": "n4VDnSCUuSpjnCyUk9e3QOOd6o0ItSWYbTnW3Wnn8wk="
298 | }
299 | },
300 | {
301 | "ip": 84478479,
302 | "port": 48014,
303 | "id": {
304 | "@type": "pub.ed25519",
305 | "key": "3XO67K/qi+gu3T9v8G2hx1yNmWZhccL3O7SoosFo8G0="
306 | }
307 | },
308 | {
309 | "ip": -2018135749,
310 | "port": 53312,
311 | "id": {
312 | "@type": "pub.ed25519",
313 | "key": "aF91CuUHuuOv9rm2W5+O/4h38M3sRm40DtSdRxQhmtQ="
314 | }
315 | },
316 | {
317 | "ip": -2018145068,
318 | "port": 13206,
319 | "id": {
320 | "@type": "pub.ed25519",
321 | "key": "K0t3+IWLOXHYMvMcrGZDPs+pn58a17LFbnXoQkKc2xw="
322 | }
323 | },
324 | {
325 | "ip": -2018145059,
326 | "port": 46995,
327 | "id": {
328 | "@type": "pub.ed25519",
329 | "key": "wQE0MVhXNWUXpWiW5Bk8cAirIh5NNG3cZM1/fSVKIts="
330 | }
331 | },
332 | {
333 | "ip": 1091931625,
334 | "port": 30131,
335 | "id": {
336 | "@type": "pub.ed25519",
337 | "key": "wrQaeIFispPfHndEBc0s0fx7GSp8UFFvebnytQQfc6A="
338 | }
339 | },
340 | {
341 | "ip": 1091931590,
342 | "port": 47160,
343 | "id": {
344 | "@type": "pub.ed25519",
345 | "key": "vOe1Xqt/1AQ2Z56Pr+1Rnw+f0NmAA7rNCZFIHeChB7o="
346 | }
347 | },
348 | {
349 | "ip": 1091931623,
350 | "port": 17728,
351 | "id": {
352 | "@type": "pub.ed25519",
353 | "key": "BYSVpL7aPk0kU5CtlsIae/8mf2B/NrBi7DKmepcjX6Q="
354 | }
355 | },
356 | {
357 | "ip": 1091931589,
358 | "port": 13570,
359 | "id": {
360 | "@type": "pub.ed25519",
361 | "key": "iVQH71cymoNgnrhOT35tl/Y7k86X5iVuu5Vf68KmifQ="
362 | }
363 | },
364 | {
365 | "ip": -1539021362,
366 | "port": 52995,
367 | "id": {
368 | "@type": "pub.ed25519",
369 | "key": "QnGFe9kihW+TKacEvvxFWqVXeRxCB6ChjjhNTrL7+/k="
370 | }
371 | },
372 | {
373 | "ip": -1539021936,
374 | "port": 20334,
375 | "id": {
376 | "@type": "pub.ed25519",
377 | "key": "gyLh12v4hBRtyBygvvbbO2HqEtgl+ojpeRJKt4gkMq0="
378 | }
379 | },
380 | {
381 | "ip": -1136338705,
382 | "port": 19925,
383 | "id": {
384 | "@type": "pub.ed25519",
385 | "key": "ucho5bEkufbKN1JR1BGHpkObq602whJn3Q3UwhtgSo4="
386 | }
387 | },
388 | {
389 | "ip": 868465979,
390 | "port": 19434,
391 | "id": {
392 | "@type": "pub.ed25519",
393 | "key": "J5CwYXuCZWVPgiFPW+NY2roBwDWpRRtANHSTYTRSVtI="
394 | }
395 | },
396 | {
397 | "ip": 868466060,
398 | "port": 23067,
399 | "id": {
400 | "@type": "pub.ed25519",
401 | "key": "vX8d0i31zB0prVuZK8fBkt37WnEpuEHrb7PElk4FJ1o="
402 | }
403 | },
404 | {
405 | "ip": -2018147130,
406 | "port": 53560,
407 | "id": {
408 | "@type": "pub.ed25519",
409 | "key": "NlYhh/xf4uQpE+7EzgorPHqIaqildznrpajJTRRH2HU="
410 | }
411 | },
412 | {
413 | "ip": -2018147075,
414 | "port": 46529,
415 | "id": {
416 | "@type": "pub.ed25519",
417 | "key": "jLO6yoooqUQqg4/1QXflpv2qGCoXmzZCR+bOsYJ2hxw="
418 | }
419 | },
420 | {
421 | "ip": 908566172,
422 | "port": 51565,
423 | "id": {
424 | "@type": "pub.ed25519",
425 | "key": "TDg+ILLlRugRB4Kpg3wXjPcoc+d+Eeb7kuVe16CS9z8="
426 | }
427 | },
428 | {
429 | "ip": -1185526007,
430 | "port": 4701,
431 | "id": {
432 | "@type": "pub.ed25519",
433 | "key": "G6cNAr6wXBBByWDzddEWP5xMFsAcp6y13fXA8Q7EJlM="
434 | }
435 | }
436 | ],
437 | "validator": {
438 | "@type": "validator.config.global",
439 | "zero_state": {
440 | "workchain": -1,
441 | "shard": -9223372036854775808,
442 | "seqno": 0,
443 | "root_hash": "F6OpKZKqvqeFp6CQmFomXNMfMj2EnaUSOXN+Mh+wVWk=",
444 | "file_hash": "XplPz01CXAps5qeSWUtxcyBfdAo5zVb1N979KLSKD24="
445 | },
446 | "init_block": {
447 | "workchain": -1,
448 | "shard": -9223372036854775808,
449 | "seqno": 43792209,
450 | "root_hash": "AvaCaskwbUZCAOkULgK9zqtArFavpqg/KMTu9qO4M1U=",
451 | "file_hash": "mlMbjSH0ahszX7yNIQOOawmCJxzofpaTeqlsrQspqUk="
452 | },
453 | "hardforks": [
454 | {
455 | "workchain": -1,
456 | "shard": -9223372036854775808,
457 | "seqno": 8536841,
458 | "root_hash": "08Kpc9XxrMKC6BF/FeNHPS3MEL1/Vi/fQU/C9ELUrkc=",
459 | "file_hash": "t/9VBPODF7Zdh4nsnA49dprO69nQNMqYL+zk5bCjV/8="
460 | }
461 | ]
462 | }
463 | }
464 | `
465 |
--------------------------------------------------------------------------------
/db/entity.go:
--------------------------------------------------------------------------------
1 | package db
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "github.com/syndtr/goleveldb/leveldb/util"
7 | "github.com/xssnick/tonutils-storage/storage"
8 | )
9 |
10 | func (s *Storage) SetActiveFiles(bagId []byte, ids []uint32) error {
11 | if len(bagId) != 32 {
12 | panic("invalid bag id len, should be 32")
13 | }
14 |
15 | k := make([]byte, 3+32)
16 | copy(k, "ai:")
17 | copy(k[3:3+32], bagId)
18 |
19 | v := make([]byte, len(ids)*4)
20 | for i := 0; i < len(ids); i++ {
21 | binary.LittleEndian.PutUint32(v[i*4:], ids[i])
22 | }
23 | defer s.notify(EventTorrentUpdated)
24 |
25 | return s.db.Put(k, v, nil)
26 | }
27 |
28 | func (s *Storage) GetActiveFiles(bagId []byte) ([]uint32, error) {
29 | if len(bagId) != 32 {
30 | panic("invalid bag id len, should be 32")
31 | }
32 |
33 | k := make([]byte, 3+32)
34 | copy(k, "ai:")
35 | copy(k[3:3+32], bagId)
36 |
37 | res, err := s.db.Get(k, nil)
38 | if err != nil {
39 | return nil, err
40 | }
41 |
42 | var files = make([]uint32, len(res)/4)
43 | for i := 0; i < len(res)/4; i++ {
44 | files[i] = binary.LittleEndian.Uint32(res[i*4:])
45 | }
46 | return files, nil
47 | }
48 |
49 | func (s *Storage) GetPiece(bagId []byte, id uint32) (*storage.PieceInfo, error) {
50 | if len(bagId) != 32 {
51 | panic("invalid bag id len, should be 32")
52 | }
53 |
54 | k := make([]byte, 3+32+4)
55 | copy(k, "pc:")
56 | copy(k[3:3+32], bagId)
57 | binary.LittleEndian.PutUint32(k[3+32:], id)
58 |
59 | res, err := s.db.Get(k, nil)
60 | if err != nil {
61 | return nil, err
62 | }
63 |
64 | return &storage.PieceInfo{
65 | StartFileIndex: binary.LittleEndian.Uint32(res),
66 | Proof: res[4:],
67 | }, nil
68 | }
69 |
70 | func (s *Storage) RemovePiece(bagId []byte, id uint32) error {
71 | if len(bagId) != 32 {
72 | panic("invalid bag id len, should be 32")
73 | }
74 |
75 | k := make([]byte, 3+32+4)
76 | copy(k, "pc:")
77 | copy(k[3:3+32], bagId)
78 | binary.LittleEndian.PutUint32(k[3+32:], id)
79 |
80 | defer s.notify(EventTorrentUpdated)
81 | return s.db.Delete(k, nil)
82 | }
83 |
84 | func (s *Storage) SetPiece(bagId []byte, id uint32, p *storage.PieceInfo) error {
85 | if len(bagId) != 32 {
86 | panic("invalid bag id len, should be 32")
87 | }
88 |
89 | k := make([]byte, 3+32+4)
90 | copy(k, "pc:")
91 | copy(k[3:3+32], bagId)
92 | binary.LittleEndian.PutUint32(k[3+32:], id)
93 |
94 | v := make([]byte, 4+len(p.Proof))
95 | binary.LittleEndian.PutUint32(v, p.StartFileIndex)
96 | copy(v[4:], p.Proof)
97 |
98 | defer s.notify(EventTorrentUpdated)
99 | return s.db.Put(k, v, nil)
100 | }
101 |
102 | func (s *Storage) PiecesMask(bagId []byte, num uint32) []byte {
103 | if len(bagId) != 32 {
104 | panic("invalid bag id len, should be 32")
105 | }
106 |
107 | k := make([]byte, 3+32)
108 | copy(k, "pc:")
109 | copy(k[3:3+32], bagId)
110 |
111 | p := num / 8
112 | if num%8 != 0 {
113 | p++
114 | }
115 |
116 | mask := make([]byte, p)
117 | iter := s.db.NewIterator(&util.Range{Start: k}, nil)
118 | for iter.Next() {
119 | key := iter.Key()
120 | if !bytes.HasPrefix(key, k) {
121 | break
122 | }
123 | id := binary.LittleEndian.Uint32(key[len(k):])
124 | mask[id/8] |= 1 << (id % 8)
125 | }
126 | return mask
127 | }
128 |
--------------------------------------------------------------------------------
/db/fs.go:
--------------------------------------------------------------------------------
1 | package db
2 |
3 | import (
4 | "fmt"
5 | "github.com/xssnick/tonutils-storage/storage"
6 | "io"
7 | "os"
8 | "path/filepath"
9 | "strings"
10 | )
11 |
12 | type OsFs struct {
13 | ctrl storage.FSController
14 | }
15 |
16 | func (o *OsFs) GetController() storage.FSController {
17 | return o.ctrl
18 | }
19 |
20 | func (o *OsFs) Open(name string, mode storage.OpenMode) (storage.FSFile, error) {
21 | if mode == storage.OpenModeWrite {
22 | if err := os.MkdirAll(filepath.Dir(name), os.ModePerm); err != nil {
23 | return nil, err
24 | }
25 |
26 | f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0666)
27 | if err != nil {
28 | return nil, fmt.Errorf("failed to open/create file %s: %w", name, err)
29 | }
30 | return f, nil
31 | }
32 | panic("unsupported mode")
33 | }
34 |
35 | func (o *OsFs) Exists(name string) bool {
36 | _, err := os.Stat(name)
37 | return err == nil
38 | }
39 |
40 | func (o *OsFs) Delete(name string) error {
41 | return o.ctrl.RemoveFile(name)
42 | }
43 |
44 | func (s *Storage) GetFS() storage.FS {
45 | return &s.fs
46 | }
47 |
48 | type fileInfo struct {
49 | name string
50 | size uint64
51 | path string
52 | }
53 |
54 | func (f fileInfo) GetName() string {
55 | return f.name
56 | }
57 |
58 | func (f fileInfo) GetSize() uint64 {
59 | return f.size
60 | }
61 |
62 | func (f fileInfo) CreateReader() (io.ReaderAt, func() error, error) {
63 | fl, err := os.Open(f.path)
64 | if err != nil {
65 | return nil, nil, fmt.Errorf("failed to open file %s: %w", f.path, err)
66 | }
67 | return fl, fl.Close, nil
68 | }
69 |
70 | func (s *Storage) DetectFileRefs(path string, only ...map[string]bool) (rootPath string, dirName string, _ []storage.FileRef, _ error) {
71 | path, err := filepath.Abs(path)
72 | if err != nil {
73 | return "", "", nil, err
74 | }
75 |
76 | fi, err := os.Stat(path)
77 | if err != nil {
78 | return "", "", nil, fmt.Errorf("failed to stat file %s: %w", path, err)
79 | }
80 |
81 | if fi.IsDir() {
82 | files, err := s.GetAllFilesRefsInDir(path, only...)
83 | if err != nil {
84 | return "", "", nil, err
85 | }
86 |
87 | dir := filepath.Base(path) + "/"
88 | if strings.HasPrefix(dir, ".") || strings.HasPrefix(dir, "/") {
89 | // fallback to empty name
90 | dir = ""
91 | }
92 |
93 | return filepath.Dir(path), dir, files, nil
94 | }
95 |
96 | if only != nil && only[0] != nil && !only[0][path] {
97 | return filepath.Dir(path), "", []storage.FileRef{}, nil
98 | }
99 |
100 | file, err := s.GetSingleFileRef(path)
101 | if err != nil {
102 | return "", "", nil, err
103 | }
104 |
105 | return filepath.Dir(path), "", []storage.FileRef{file}, nil
106 | }
107 |
108 | func (s *Storage) GetSingleFileRef(path string) (storage.FileRef, error) {
109 | path, err := filepath.Abs(path)
110 | if err != nil {
111 | return nil, err
112 | }
113 |
114 | // stat is not always gives the right file size, so we open file and find the end
115 | fl, err := os.Open(path)
116 | if err != nil {
117 | return nil, fmt.Errorf("failed to open file %s: %w", path, err)
118 | }
119 | defer fl.Close()
120 |
121 | sz, err := fl.Seek(0, io.SeekEnd)
122 | if err != nil {
123 | return nil, fmt.Errorf("failed to seek file end %s: %w", path, err)
124 | }
125 |
126 | return fileInfo{
127 | name: filepath.Base(path),
128 | size: uint64(sz),
129 | path: path,
130 | }, nil
131 | }
132 |
133 | func (s *Storage) GetAllFilesRefsInDir(path string, only ...map[string]bool) ([]storage.FileRef, error) {
134 | path, err := filepath.Abs(path)
135 | if err != nil {
136 | return nil, err
137 | }
138 |
139 | var keepOnly map[string]bool
140 | if len(only) > 0 && only[0] != nil {
141 | keepOnly = only[0]
142 | }
143 |
144 | var files []storage.FileRef
145 | err = filepath.Walk(path, func(filePath string, f os.FileInfo, err error) error {
146 | if err != nil {
147 | return err
148 | }
149 |
150 | if f.IsDir() {
151 | return nil
152 | }
153 |
154 | if keepOnly != nil && !keepOnly[filePath] {
155 | // excluded by filters
156 | return nil
157 | }
158 |
159 | name := filePath[len(path)+1:]
160 | name = strings.ReplaceAll(name, "\\", "/") // to unix style
161 |
162 | nameLower := strings.ToLower(name)
163 | if name == ".DS_Store" || nameLower == "desktop.ini" || nameLower == "thumbs.db" {
164 | // exclude OS-created files that can be modified automatically and thus break some torrent pieces
165 | return nil
166 | }
167 |
168 | // stat is not always gives the right file size, so we open file and find the end
169 | fl, err := os.Open(filePath)
170 | if err != nil {
171 | return fmt.Errorf("failed to open file %s: %w", filePath, err)
172 | }
173 |
174 | sz, err := fl.Seek(0, io.SeekEnd)
175 | fl.Close()
176 | if err != nil {
177 | return fmt.Errorf("failed to seek file end %s: %w", filePath, err)
178 | }
179 |
180 | files = append(files, fileInfo{
181 | name: name,
182 | size: uint64(sz),
183 | path: filePath,
184 | })
185 | return nil
186 | })
187 | if err != nil {
188 | err = fmt.Errorf("failed to scan directory '%s': %w", path, err)
189 | return nil, err
190 | }
191 | return files, nil
192 | }
193 |
--------------------------------------------------------------------------------
/db/fscache.go:
--------------------------------------------------------------------------------
1 | package db
2 |
3 | import (
4 | "log"
5 | "os"
6 | "path/filepath"
7 | "runtime"
8 | "sync"
9 | "time"
10 | )
11 |
12 | type FDescCache struct {
13 | file *os.File
14 | usedAt time.Time
15 | path string
16 | mx sync.Mutex
17 | }
18 |
19 | // FSControllerCache caches file descriptors to avoid unnecessary open/close of most used files
20 | type FSControllerCache struct {
21 | dsc map[string]*FDescCache
22 | mx sync.RWMutex
23 | noRemove bool
24 | }
25 |
26 | var CachedFDLimit = 800
27 |
28 | func NewFSControllerCache(noRemove bool) *FSControllerCache {
29 | return &FSControllerCache{
30 | dsc: map[string]*FDescCache{},
31 | noRemove: noRemove,
32 | }
33 | }
34 |
35 | func (f *FSControllerCache) AcquireRead(path string, p []byte, off int64) (n int, err error) {
36 | desc := f.acquire(path)
37 |
38 | if desc == nil {
39 | f.mx.Lock()
40 | desc = f.dsc[path]
41 | if desc == nil {
42 | if len(f.dsc) >= CachedFDLimit {
43 | for !f.clean() {
44 | // Retry till we clean something
45 | runtime.Gosched()
46 | }
47 | }
48 |
49 | desc = &FDescCache{
50 | path: path,
51 | usedAt: time.Now(),
52 | }
53 | desc.mx.Lock()
54 |
55 | f.dsc[path] = desc
56 | f.mx.Unlock()
57 |
58 | fl, err := os.Open(path)
59 | if err != nil {
60 | f.mx.Lock()
61 | // unlikely, rare case
62 | delete(f.dsc, path)
63 | f.mx.Unlock()
64 |
65 | desc.mx.Unlock()
66 | return 0, err
67 | }
68 | desc.file = fl
69 | } else {
70 | f.mx.Unlock()
71 | desc.mx.Lock()
72 | }
73 | desc.usedAt = time.Now()
74 | }
75 | defer desc.mx.Unlock()
76 |
77 | return desc.file.ReadAt(p, off)
78 | }
79 |
80 | func (f *FSControllerCache) acquire(path string) *FDescCache {
81 | f.mx.RLock()
82 | desc, ok := f.dsc[path]
83 | f.mx.RUnlock()
84 | if ok {
85 | desc.mx.Lock()
86 | desc.usedAt = time.Now()
87 | return desc
88 | }
89 | return nil
90 | }
91 |
92 | // clean removes the oldest and currently unused file descriptor without sorting
93 | func (f *FSControllerCache) clean() bool {
94 | var oldest *FDescCache
95 |
96 | // Find the oldest descriptor
97 | for _, desc := range f.dsc {
98 | if oldest == nil || desc.usedAt.Before(oldest.usedAt) {
99 | oldest = desc
100 | }
101 | }
102 |
103 | if oldest != nil && oldest.mx.TryLock() {
104 | defer oldest.mx.Unlock()
105 | _ = oldest.file.Close()
106 | delete(f.dsc, oldest.path)
107 | return true
108 | }
109 | return false
110 | }
111 |
112 | func (f *FSControllerCache) RemoveFile(path string) (err error) {
113 | if f.noRemove {
114 | log.Println("attempt to remove file skipped because no-remove flag is set, file", path)
115 | return nil
116 | }
117 |
118 | f.mx.Lock()
119 | defer f.mx.Unlock()
120 |
121 | if desc := f.dsc[path]; desc != nil {
122 | desc.mx.Lock()
123 | _ = desc.file.Close()
124 | desc.mx.Unlock()
125 | delete(f.dsc, path)
126 | }
127 |
128 | if runtime.GOOS == "windows" {
129 | if err = os.Remove(filepath.Clean(path)); err == nil {
130 | return
131 | }
132 |
133 | log.Println("first delete attempt failed, retrying async in 50ms, file", path)
134 | go func() {
135 | // windows can still hold a file for some time, so we retry
136 | for i := 0; i < 4; i++ {
137 | err = os.Remove(filepath.Clean(path))
138 | if err == nil {
139 | log.Println("removed asynchronously", path)
140 | return
141 | }
142 | time.Sleep(50 * time.Millisecond)
143 | }
144 | log.Println("async removal failed, file", path)
145 | }()
146 |
147 | return nil
148 | }
149 |
150 | return os.Remove(filepath.Clean(path))
151 | }
152 |
--------------------------------------------------------------------------------
/db/fscache_test.go:
--------------------------------------------------------------------------------
1 | package db
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "testing"
7 |
8 | "golang.org/x/sync/errgroup"
9 | )
10 |
11 | type testCaseAcquire struct {
12 | name string
13 | paths []string
14 | }
15 |
16 | func TestAcquire(t *testing.T) {
17 | tcs := genTestCasesAcquire()
18 | for _, tc := range tcs {
19 | t.Run(tc.name, func(t *testing.T) {
20 | defer cleanTmpFiles(tc.paths)
21 | err := testAcquire(tc.paths)
22 | if err != nil {
23 | t.Fatal(fmt.Sprintf("error acquiring error: %s", err.Error()))
24 | }
25 | })
26 | }
27 | }
28 |
29 | func BenchmarkAcquire(b *testing.B) {
30 | paths := createTmpFiles(2000)
31 | defer cleanTmpFiles(paths)
32 | for i := 0; i < b.N; i++ {
33 | testAcquire(paths)
34 | }
35 | }
36 |
37 | func testAcquire(paths []string) error {
38 | fs := NewFSControllerCache(false)
39 | // acquire all the fd in tc.paths
40 | eg := new(errgroup.Group)
41 | for _, p := range paths {
42 | p := p
43 | eg.Go(func() error {
44 | _, err := fs.AcquireRead(p, make([]byte, 1), 0)
45 | return err
46 | })
47 | }
48 |
49 | return eg.Wait()
50 | }
51 |
52 | func genTestCasesAcquire() []testCaseAcquire {
53 | return []testCaseAcquire{
54 | {
55 | name: "acquiring 3 file descriptors",
56 | paths: createTmpFiles(3),
57 | },
58 | {
59 | name: "acquiring 797 file descriptors",
60 | paths: createTmpFiles(797),
61 | },
62 | {
63 | name: "acquiring _FDLimit file descriptors",
64 | paths: createTmpFiles(CachedFDLimit),
65 | },
66 | {
67 | name: "acquiring 2000 file descriptors",
68 | paths: createTmpFiles(2000),
69 | },
70 | }
71 | }
72 |
73 | func createTmpFiles(n int) []string {
74 | paths := make([]string, n)
75 | for i := 0; i < n; i++ {
76 | f, _ := os.CreateTemp("", "dump_")
77 | f.WriteString(fmt.Sprint(i))
78 | p := f.Name()
79 | f.Close()
80 | paths[i] = p
81 | }
82 |
83 | return paths
84 | }
85 |
86 | func cleanTmpFiles(paths []string) {
87 | // removing temporary files from system
88 | for _, p := range paths {
89 | os.Remove(p)
90 | }
91 | }
92 |
--------------------------------------------------------------------------------
/db/remove-dir.go:
--------------------------------------------------------------------------------
1 | package db
2 |
3 | import (
4 | "github.com/xssnick/tonutils-storage/storage"
5 | "log"
6 | "os"
7 | "path/filepath"
8 | )
9 |
10 | type Node struct {
11 | Id string
12 | Children []*Node
13 | }
14 |
15 | func buildTreeFromDir(baseDir string) *Node {
16 | _, err := os.ReadDir(baseDir)
17 | if err != nil {
18 | return nil
19 | }
20 | root := &Node{
21 | Id: baseDir,
22 | }
23 | queue := make(chan *Node, 100)
24 | queue <- root
25 | for {
26 | if len(queue) == 0 {
27 | break
28 | }
29 | data, ok := <-queue
30 | if ok {
31 | // Iterate all the contents in the dir
32 | curDir := (*data).Id
33 | if ifDir(curDir) {
34 | contents, _ := os.ReadDir(curDir)
35 |
36 | data.Children = make([]*Node, len(contents))
37 | for i, content := range contents {
38 | node := new(Node)
39 | node.Id = filepath.Join(curDir, content.Name())
40 | data.Children[i] = node
41 | if content.IsDir() {
42 | queue <- node
43 | }
44 | }
45 | }
46 | }
47 | }
48 | return root
49 | }
50 |
51 | func ifDir(path string) bool {
52 | file, err := os.Open(path)
53 | if err != nil {
54 | panic(err)
55 | }
56 | defer file.Close()
57 | info, err := file.Stat()
58 | if err != nil {
59 | panic(err)
60 | }
61 | if info.IsDir() {
62 | return true
63 | }
64 | return false
65 | }
66 |
67 | func recursiveEmptyDelete(root *Node, fs storage.FSController) {
68 | // If the current root is not pointing to any dir
69 | if root == nil {
70 | return
71 | }
72 | for _, each := range root.Children {
73 | recursiveEmptyDelete(each, fs)
74 | }
75 | if !ifDir(root.Id) {
76 | return
77 | } else if content, _ := os.ReadDir(root.Id); len(content) != 0 {
78 | log.Println("skip remove of", root.Id, "contains unknown files")
79 |
80 | return
81 | }
82 | if err := fs.RemoveFile(root.Id); err != nil {
83 | log.Println("failed to remove", root.Id, err.Error())
84 | return
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/db/storage.go:
--------------------------------------------------------------------------------
1 | package db
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "encoding/hex"
7 | "encoding/json"
8 | "errors"
9 | "fmt"
10 | "github.com/syndtr/goleveldb/leveldb"
11 | "github.com/syndtr/goleveldb/leveldb/util"
12 | "github.com/xssnick/tonutils-go/adnl"
13 | "github.com/xssnick/tonutils-go/tl"
14 | "github.com/xssnick/tonutils-storage/storage"
15 | "path/filepath"
16 | "sort"
17 | "sync"
18 | "time"
19 | )
20 |
21 | type Event int
22 |
23 | const (
24 | EventTorrentUpdated Event = iota
25 | EventUploadUpdated
26 | )
27 |
28 | type Storage struct {
29 | torrents map[string]*storage.Torrent
30 | torrentsOverlay map[string]*storage.Torrent
31 | connector storage.NetConnector
32 | fs OsFs
33 | skipVerify bool
34 | forcePieceSize uint32
35 |
36 | notifyCh chan Event
37 | db *leveldb.DB
38 | mx sync.RWMutex
39 | }
40 |
41 | func NewStorage(db *leveldb.DB, connector storage.NetConnector, forcePieceSize int, startWithoutActiveFilesToo bool, skipVerify bool, noRemove bool, notifier chan Event) (*Storage, error) {
42 | if forcePieceSize < 0 {
43 | return nil, fmt.Errorf("invalid piece size flag")
44 | }
45 |
46 | s := &Storage{
47 | torrents: map[string]*storage.Torrent{},
48 | torrentsOverlay: map[string]*storage.Torrent{},
49 | db: db,
50 | connector: connector,
51 | fs: OsFs{
52 | ctrl: NewFSControllerCache(noRemove),
53 | },
54 | notifyCh: notifier,
55 | skipVerify: skipVerify,
56 | forcePieceSize: uint32(forcePieceSize),
57 | }
58 |
59 | err := s.loadTorrents(startWithoutActiveFilesToo)
60 | if err != nil {
61 | return nil, err
62 | }
63 |
64 | return s, nil
65 | }
66 |
67 | func (s *Storage) VerifyOnStartup() bool {
68 | return !s.skipVerify
69 | }
70 |
71 | func (s *Storage) GetForcedPieceSize() uint32 {
72 | return s.forcePieceSize
73 | }
74 |
75 | func (s *Storage) GetTorrent(hash []byte) *storage.Torrent {
76 | s.mx.RLock()
77 | defer s.mx.RUnlock()
78 |
79 | return s.torrents[string(hash)]
80 | }
81 |
82 | func (s *Storage) GetTorrentByOverlay(overlay []byte) *storage.Torrent {
83 | s.mx.RLock()
84 | defer s.mx.RUnlock()
85 |
86 | return s.torrentsOverlay[string(overlay)]
87 | }
88 |
89 | func (s *Storage) GetAll() []*storage.Torrent {
90 | s.mx.RLock()
91 | defer s.mx.RUnlock()
92 |
93 | res := make([]*storage.Torrent, 0, len(s.torrents))
94 | for _, t := range s.torrents {
95 | res = append(res, t)
96 | }
97 | sort.Slice(res, func(i, j int) bool {
98 | return res[i].CreatedAt.Unix() > res[j].CreatedAt.Unix()
99 | })
100 | return res
101 | }
102 |
103 | func (s *Storage) SetSpeedLimits(download, upload uint64) error {
104 | k := make([]byte, 13)
105 | copy(k, "speed_limits:")
106 |
107 | data := make([]byte, 16)
108 | binary.LittleEndian.PutUint64(data, download)
109 | binary.LittleEndian.PutUint64(data[8:], upload)
110 |
111 | return s.db.Put(k, data, nil)
112 | }
113 |
114 | func (s *Storage) GetSpeedLimits() (download uint64, upload uint64, err error) {
115 | k := make([]byte, 13)
116 | copy(k, "speed_limits:")
117 |
118 | var data []byte
119 | data, err = s.db.Get(k, nil)
120 | if err != nil {
121 | if errors.Is(err, leveldb.ErrNotFound) {
122 | return 0, 0, nil
123 | }
124 | return 0, 0, err
125 | }
126 | return binary.LittleEndian.Uint64(data), binary.LittleEndian.Uint64(data[8:]), nil
127 | }
128 |
129 | func (s *Storage) RemoveTorrent(t *storage.Torrent, withFiles bool) error {
130 | id, err := tl.Hash(adnl.PublicKeyOverlay{Key: t.BagID})
131 | if err != nil {
132 | return err
133 | }
134 | s.mx.Lock()
135 | delete(s.torrents, string(t.BagID))
136 | delete(s.torrentsOverlay, string(id))
137 | s.mx.Unlock()
138 |
139 | t.Stop()
140 |
141 | b := &leveldb.Batch{}
142 | b.Delete(append([]byte("bags:"), t.BagID...))
143 | b.Delete(append([]byte("upl_stat:"), t.BagID...))
144 |
145 | if err = s.db.Write(b, nil); err != nil {
146 | return err
147 | }
148 |
149 | if t.Header != nil {
150 | if withFiles {
151 | list, err := t.ListFiles()
152 | if err == nil {
153 | for _, f := range list {
154 | path := filepath.Clean(t.Path + "/" + string(t.Header.DirName) + "/" + f)
155 | if errR := s.fs.GetController().RemoveFile(path); errR != nil {
156 | println("remove err, skip", path, errR.Error())
157 | }
158 | }
159 | }
160 | recursiveEmptyDelete(buildTreeFromDir(t.Path+"/"+string(t.Header.DirName)), s.fs.GetController())
161 | }
162 | }
163 |
164 | if t.Info != nil {
165 | num := t.Info.PiecesNum()
166 | for i := uint32(0); i < num; i++ {
167 | _ = s.RemovePiece(t.BagID, i)
168 | }
169 | }
170 | s.notify(EventTorrentUpdated)
171 | return nil
172 | }
173 |
174 | func (s *Storage) SetTorrent(t *storage.Torrent) error {
175 | activeDownload, activeUpload := t.IsActiveRaw()
176 | data, err := json.Marshal(&TorrentStored{
177 | BagID: t.BagID,
178 | Path: t.Path,
179 | Info: t.Info,
180 | Header: t.Header,
181 | CreatedAt: t.CreatedAt,
182 | ActiveUpload: activeUpload,
183 | ActiveDownload: activeDownload,
184 | DownloadAll: t.IsDownloadAll(),
185 | DownloadOrdered: t.IsDownloadOrdered(),
186 | CreatedLocally: t.CreatedLocally,
187 | })
188 | if err != nil {
189 | return err
190 | }
191 |
192 | k := make([]byte, 5+32)
193 | copy(k, "bags:")
194 | copy(k[5:], t.BagID)
195 |
196 | err = s.db.Put(k, data, nil)
197 | if err != nil {
198 | return err
199 | }
200 |
201 | return s.addTorrent(t)
202 | }
203 |
204 | func (s *Storage) addTorrent(t *storage.Torrent) error {
205 | id, err := tl.Hash(adnl.PublicKeyOverlay{Key: t.BagID})
206 | if err != nil {
207 | return err
208 | }
209 |
210 | s.mx.Lock()
211 | s.torrents[string(t.BagID)] = t
212 | s.torrentsOverlay[string(id)] = t
213 | s.mx.Unlock()
214 | s.notify(EventTorrentUpdated)
215 | return nil
216 | }
217 |
218 | type TorrentStored struct {
219 | BagID []byte
220 | Path string
221 | Info *storage.TorrentInfo
222 | Header *storage.TorrentHeader
223 | CreatedAt time.Time
224 | CreatedLocally bool
225 |
226 | ActiveUpload bool
227 | ActiveDownload bool
228 | DownloadAll bool
229 | DownloadOrdered bool
230 | }
231 |
232 | func (s *Storage) loadTorrents(startWithoutActiveFilesToo bool) error {
233 | iter := s.db.NewIterator(&util.Range{Start: []byte("bags:")}, nil)
234 | for iter.Next() {
235 | if !bytes.HasPrefix(iter.Key(), []byte("bags:")) {
236 | break
237 | }
238 |
239 | var tr TorrentStored
240 | err := json.Unmarshal(iter.Value(), &tr)
241 | if err != nil {
242 | return fmt.Errorf("failed to load %s from db: %w", hex.EncodeToString(iter.Key()[5:]), err)
243 | }
244 |
245 | t := storage.NewTorrent(tr.Path, s, s.connector)
246 | t.Info = tr.Info
247 | t.Header = tr.Header
248 | t.BagID = tr.BagID
249 | t.CreatedAt = tr.CreatedAt
250 | t.CreatedLocally = tr.CreatedLocally
251 |
252 | uplStat, err := s.db.Get(append([]byte("upl_stat:"), t.BagID...), nil)
253 | if err != nil && !errors.Is(err, leveldb.ErrNotFound) {
254 | return fmt.Errorf("failed to load upload stats of %s from db: %w", hex.EncodeToString(iter.Key()[5:]), err)
255 | }
256 |
257 | if t.Info != nil {
258 | t.InitMask()
259 | // cache header
260 | /*err = t.BuildCache(int(t.Info.HeaderSize/uint64(t.Info.PieceSize)) + 1)
261 | if err != nil {
262 | log.Printf("failed to build cache for %s: %s", hex.EncodeToString(t.BagID), err.Error())
263 | continue
264 | }*/
265 | _ = t.LoadActiveFilesIDs()
266 |
267 | if len(uplStat) == 8 {
268 | t.SetUploadStats(binary.LittleEndian.Uint64(uplStat))
269 | }
270 | }
271 |
272 | if tr.ActiveDownload {
273 | if startWithoutActiveFilesToo || len(t.GetActiveFilesIDs()) > 0 {
274 | err = t.Start(tr.ActiveUpload, tr.DownloadAll, tr.DownloadOrdered)
275 | if err != nil {
276 | return fmt.Errorf("failed to startd download %s: %w", hex.EncodeToString(iter.Key()[5:]), err)
277 | }
278 | }
279 | }
280 |
281 | err = s.addTorrent(t)
282 | if err != nil {
283 | return fmt.Errorf("failed to add torrent %s from db: %w", hex.EncodeToString(t.BagID), err)
284 | }
285 | }
286 |
287 | return nil
288 | }
289 |
290 | func (s *Storage) UpdateUploadStats(bagId []byte, val uint64) error {
291 | k := make([]byte, 9+32)
292 | copy(k, "upl_stat:")
293 | copy(k[9:], bagId)
294 |
295 | data := make([]byte, 8)
296 | binary.LittleEndian.PutUint64(data, val)
297 |
298 | if err := s.db.Put(k, data, nil); err != nil {
299 | return err
300 | }
301 | s.notify(EventUploadUpdated)
302 | return nil
303 | }
304 |
305 | func (s *Storage) notify(e Event) {
306 | if s.notifyCh != nil {
307 | select {
308 | case s.notifyCh <- e:
309 | default:
310 | }
311 | }
312 | }
313 |
314 | func (s *Storage) Close() {
315 | s.mx.Lock()
316 | defer s.mx.Unlock()
317 |
318 | for _, t := range s.torrents {
319 | t.Stop()
320 | }
321 |
322 | s.db.Close()
323 | }
324 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/xssnick/tonutils-storage
2 |
3 | go 1.23.3
4 |
5 | require (
6 | github.com/kevinms/leakybucket-go v0.0.0-20200115003610-082473db97ca
7 | github.com/pterm/pterm v0.12.80
8 | github.com/rs/zerolog v1.34.0
9 | github.com/syndtr/goleveldb v1.0.0
10 | github.com/ton-blockchain/adnl-tunnel v0.1.3
11 | github.com/xssnick/tonutils-go v1.13.0
12 | github.com/xssnick/tonutils-storage-provider v0.3.6
13 | golang.org/x/sync v0.14.0
14 | )
15 |
16 | require (
17 | atomicgo.dev/schedule v0.1.0 // indirect
18 | github.com/beorn7/perks v1.0.1 // indirect
19 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
20 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
21 | github.com/prometheus/client_golang v1.22.0 // indirect
22 | github.com/prometheus/client_model v0.6.1 // indirect
23 | github.com/prometheus/common v0.62.0 // indirect
24 | github.com/prometheus/procfs v0.15.1 // indirect
25 | google.golang.org/protobuf v1.36.5 // indirect
26 | )
27 |
28 | require (
29 | atomicgo.dev/cursor v0.2.0 // indirect
30 | atomicgo.dev/keyboard v0.2.9 // indirect
31 | github.com/containerd/console v1.0.4 // indirect
32 | github.com/golang/snappy v1.0.0 // indirect
33 | github.com/gookit/color v1.5.4 // indirect
34 | github.com/lithammer/fuzzysearch v1.1.8 // indirect
35 | github.com/mattn/go-colorable v0.1.14 // indirect
36 | github.com/mattn/go-isatty v0.0.20 // indirect
37 | github.com/mattn/go-runewidth v0.0.16 // indirect
38 | github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a // indirect
39 | github.com/rivo/uniseg v0.4.7 // indirect
40 | github.com/sigurn/crc16 v0.0.0-20240131213347-83fcde1e29d1 // indirect
41 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
42 | github.com/xssnick/raptorq v1.0.0 // indirect
43 | github.com/xssnick/ton-payment-network v0.2.1 // indirect
44 | golang.org/x/crypto v0.38.0 // indirect
45 | golang.org/x/sys v0.33.0 // indirect
46 | golang.org/x/term v0.32.0 // indirect
47 | golang.org/x/text v0.25.0 // indirect; indirects
48 | )
49 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | atomicgo.dev/assert v0.0.2 h1:FiKeMiZSgRrZsPo9qn/7vmr7mCsh5SZyXY4YGYiYwrg=
2 | atomicgo.dev/assert v0.0.2/go.mod h1:ut4NcI3QDdJtlmAxQULOmA13Gz6e2DWbSAS8RUOmNYQ=
3 | atomicgo.dev/cursor v0.2.0 h1:H6XN5alUJ52FZZUkI7AlJbUc1aW38GWZalpYRPpoPOw=
4 | atomicgo.dev/cursor v0.2.0/go.mod h1:Lr4ZJB3U7DfPPOkbH7/6TOtJ4vFGHlgj1nc+n900IpU=
5 | atomicgo.dev/keyboard v0.2.9 h1:tOsIid3nlPLZ3lwgG8KZMp/SFmr7P0ssEN5JUsm78K8=
6 | atomicgo.dev/keyboard v0.2.9/go.mod h1:BC4w9g00XkxH/f1HXhW2sXmJFOCWbKn9xrOunSFtExQ=
7 | atomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs=
8 | atomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU=
9 | github.com/MarvinJWendt/testza v0.1.0/go.mod h1:7AxNvlfeHP7Z/hDQ5JtE3OKYT3XFUeLCDE2DQninSqs=
10 | github.com/MarvinJWendt/testza v0.2.1/go.mod h1:God7bhG8n6uQxwdScay+gjm9/LnO4D3kkcZX4hv9Rp8=
11 | github.com/MarvinJWendt/testza v0.2.8/go.mod h1:nwIcjmr0Zz+Rcwfh3/4UhBp7ePKVhuBExvZqnKYWlII=
12 | github.com/MarvinJWendt/testza v0.2.10/go.mod h1:pd+VWsoGUiFtq+hRKSU1Bktnn+DMCSrDrXDpX2bG66k=
13 | github.com/MarvinJWendt/testza v0.2.12/go.mod h1:JOIegYyV7rX+7VZ9r77L/eH6CfJHHzXjB69adAhzZkI=
14 | github.com/MarvinJWendt/testza v0.3.0/go.mod h1:eFcL4I0idjtIx8P9C6KkAuLgATNKpX4/2oUqKc6bF2c=
15 | github.com/MarvinJWendt/testza v0.4.2/go.mod h1:mSdhXiKH8sg/gQehJ63bINcCKp7RtYewEjXsvsVUPbE=
16 | github.com/MarvinJWendt/testza v0.5.2 h1:53KDo64C1z/h/d/stCYCPY69bt/OSwjq5KpFNwi+zB4=
17 | github.com/MarvinJWendt/testza v0.5.2/go.mod h1:xu53QFE5sCdjtMCKk8YMQ2MnymimEctc4n3EjyIYvEY=
18 | github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
19 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
20 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
21 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
22 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
23 | github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
24 | github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
25 | github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
26 | github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
27 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
28 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
29 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
30 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
31 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
32 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
33 | github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
34 | github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
35 | github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
36 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
37 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
38 | github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
39 | github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
40 | github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0=
41 | github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w=
42 | github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
43 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
44 | github.com/kevinms/leakybucket-go v0.0.0-20200115003610-082473db97ca h1:qNtd6alRqd3qOdPrKXMZImV192ngQ0WSh1briEO33Tk=
45 | github.com/kevinms/leakybucket-go v0.0.0-20200115003610-082473db97ca/go.mod h1:ph+C5vpnCcQvKBwJwKLTK3JLNGnBXYlG7m7JjoC/zYA=
46 | github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
47 | github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
48 | github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
49 | github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
50 | github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
51 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
52 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
53 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
54 | github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4=
55 | github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
56 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
57 | github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
58 | github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
59 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
60 | github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
61 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
62 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
63 | github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
64 | github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
65 | github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
66 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
67 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
68 | github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a h1:dlRvE5fWabOchtH7znfiFCcOvmIYgOeAS5ifBXBlh9Q=
69 | github.com/oasisprotocol/curve25519-voi v0.0.0-20230904125328-1f23a7beb09a/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s=
70 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
71 | github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
72 | github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
73 | github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
74 | github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
75 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
76 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
77 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
78 | github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
79 | github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
80 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
81 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
82 | github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
83 | github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
84 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
85 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
86 | github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
87 | github.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg=
88 | github.com/pterm/pterm v0.12.30/go.mod h1:MOqLIyMOgmTDz9yorcYbcw+HsgoZo3BQfg2wtl3HEFE=
89 | github.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEejaWgXU=
90 | github.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE=
91 | github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=
92 | github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
93 | github.com/pterm/pterm v0.12.80 h1:mM55B+GnKUnLMUSqhdINe4s6tOuVQIetQ3my8JGyAIg=
94 | github.com/pterm/pterm v0.12.80/go.mod h1:c6DeF9bSnOSeFPZlfs4ZRAFcf5SCoTwvwQ5xaKGQlHo=
95 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
96 | github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
97 | github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
98 | github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
99 | github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
100 | github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
101 | github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
102 | github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
103 | github.com/sigurn/crc16 v0.0.0-20240131213347-83fcde1e29d1 h1:NVK+OqnavpyFmUiKfUMHrpvbCi2VFoWTrcpI7aDaJ2I=
104 | github.com/sigurn/crc16 v0.0.0-20240131213347-83fcde1e29d1/go.mod h1:9/etS5gpQq9BJsJMWg1wpLbfuSnkm8dPF6FdW2JXVhA=
105 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
106 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
107 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
108 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
109 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
110 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
111 | github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
112 | github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
113 | github.com/ton-blockchain/adnl-tunnel v0.1.3 h1:dnruA+PavLX3xHUzPkcB132WptLlGogpI8xSl1KCu74=
114 | github.com/ton-blockchain/adnl-tunnel v0.1.3/go.mod h1:2IFRabvTHXNbHDNTGvBS70pvf95BZtIHtL2NrX/FLjc=
115 | github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
116 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
117 | github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
118 | github.com/xssnick/raptorq v1.0.0 h1:l77lntIV/W/SV9rZjF4wRpIhikQm8nBHtB3h+qiu2cM=
119 | github.com/xssnick/raptorq v1.0.0/go.mod h1:kgEVVsZv2hP+IeV7C7985KIFsDdvYq2ARW234SBA9Q4=
120 | github.com/xssnick/ton-payment-network v0.2.1 h1:ellYKcIvVb5BFlKuTmToTVkSlr9OCSIdcSpDFguk7jE=
121 | github.com/xssnick/ton-payment-network v0.2.1/go.mod h1:MyVTgwXbVqtUVduMPj047dyTpNqd9h76HyosOPqhMOc=
122 | github.com/xssnick/tonutils-go v1.13.0 h1:LV2JzB+CuuWaLQiYNolK+YI3NRQOpS0W+T+N+ctF6VQ=
123 | github.com/xssnick/tonutils-go v1.13.0/go.mod h1:EDe/9D/HZpAenbR+WPMQHICOF0BZWAe01TU5+Vpg08k=
124 | github.com/xssnick/tonutils-storage-provider v0.3.6 h1:ciYicZNb3Bynav9Ew+AKBkcMSDTZr+1ffbkFCuhor48=
125 | github.com/xssnick/tonutils-storage-provider v0.3.6/go.mod h1:uheEbAIomxxoqDwdlUH7JxegzPKRZYBlgniBr+kGA+E=
126 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
127 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
128 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
129 | golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
130 | golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
131 | golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=
132 | golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
133 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
134 | golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
135 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
136 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
137 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
138 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
139 | golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
140 | golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
141 | golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
142 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
143 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
144 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
145 | golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
146 | golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
147 | golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
148 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
149 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
150 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
151 | golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
152 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
153 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
154 | golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
155 | golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
156 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
157 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
158 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
159 | golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
160 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
161 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
162 | golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
163 | golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
164 | golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
165 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
166 | golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
167 | golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
168 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
169 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
170 | golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
171 | golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
172 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
173 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
174 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
175 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
176 | golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
177 | golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
178 | golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
179 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
180 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
181 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
182 | golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
183 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
184 | google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
185 | google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
186 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
187 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
188 | gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
189 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
190 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
191 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
192 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
193 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
194 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
195 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
196 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
197 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
198 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
199 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
200 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
201 |
--------------------------------------------------------------------------------
/provider/provider.go:
--------------------------------------------------------------------------------
1 | package provider
2 |
3 | import (
4 | "context"
5 | "encoding/hex"
6 | "errors"
7 | "fmt"
8 | "github.com/xssnick/tonutils-go/address"
9 | "github.com/xssnick/tonutils-go/tlb"
10 | "github.com/xssnick/tonutils-go/ton"
11 | "github.com/xssnick/tonutils-go/tvm/cell"
12 | "github.com/xssnick/tonutils-storage-provider/pkg/contract"
13 | "github.com/xssnick/tonutils-storage-provider/pkg/transport"
14 | "github.com/xssnick/tonutils-storage/db"
15 | "log"
16 | "math"
17 | "math/big"
18 | "math/rand"
19 | "strings"
20 | "sync"
21 | "time"
22 | )
23 |
24 | var Logger = log.Println
25 |
26 | type NewProviderData struct {
27 | Address *address.Address
28 | MaxSpan uint32
29 | PricePerMBDay tlb.Coins
30 | }
31 |
32 | type ProviderContractData struct {
33 | Size uint64
34 | Address *address.Address
35 | Providers []contract.ProviderDataV1
36 | Balance tlb.Coins
37 | }
38 |
39 | type ProviderRates struct {
40 | Available bool
41 | RatePerMBDay tlb.Coins
42 | MinBounty tlb.Coins
43 | SpaceAvailableMB uint64
44 | MinSpan uint32
45 | MaxSpan uint32
46 |
47 | Size uint64
48 | }
49 |
50 | type ProviderStorageInfo struct {
51 | StorageADNL string
52 | Status string
53 | Reason string
54 | Progress float64
55 |
56 | Context context.Context
57 | FetchedAt time.Time
58 | }
59 |
60 | type Client struct {
61 | storage *db.Storage
62 | api ton.APIClientWrapped
63 | provider *transport.Client
64 |
65 | adnlInfo map[string][]byte
66 | infoCache map[string]*ProviderStorageInfo
67 | mx sync.RWMutex
68 | }
69 |
70 | func NewClient(storage *db.Storage, api ton.APIClientWrapped, provider *transport.Client) *Client {
71 | return &Client{
72 | storage: storage,
73 | api: api,
74 | provider: provider,
75 | adnlInfo: map[string][]byte{},
76 | infoCache: map[string]*ProviderStorageInfo{},
77 | }
78 | }
79 |
80 | func (c *Client) FetchProviderContract(ctx context.Context, torrentHash []byte, owner *address.Address) (*ProviderContractData, error) {
81 | t := c.storage.GetTorrent(torrentHash)
82 | if t == nil {
83 | return nil, fmt.Errorf("torrent is not found")
84 | }
85 | if t.Info == nil {
86 | return nil, fmt.Errorf("info is not downloaded")
87 | }
88 |
89 | addr, _, _, err := contract.PrepareV1DeployData(torrentHash, t.Info.RootHash, t.Info.FileSize, t.Info.PieceSize, owner, nil)
90 | if err != nil {
91 | return nil, fmt.Errorf("failed to calc contract addr: %w", err)
92 | }
93 |
94 | master, err := c.api.CurrentMasterchainInfo(ctx)
95 | if err != nil {
96 | return nil, fmt.Errorf("failed to fetch master block: %w", err)
97 | }
98 |
99 | list, balance, err := contract.GetProvidersV1(ctx, c.api, master, addr)
100 | if err != nil {
101 | if errors.Is(err, contract.ErrNotDeployed) {
102 | return nil, contract.ErrNotDeployed
103 | }
104 | return nil, fmt.Errorf("failed to fetch providers list: %w", err)
105 | }
106 |
107 | return &ProviderContractData{
108 | Size: t.Info.FileSize,
109 | Address: addr,
110 | Providers: list,
111 | Balance: balance,
112 | }, nil
113 | }
114 |
115 | func (c *Client) BuildAddProviderTransaction(ctx context.Context, torrentHash []byte, owner *address.Address, providers []NewProviderData) (addr *address.Address, bodyData, stateInit []byte, err error) {
116 | t := c.storage.GetTorrent(torrentHash)
117 | if t == nil {
118 | return nil, nil, nil, fmt.Errorf("torrent is not found")
119 | }
120 | if t.Info == nil {
121 | return nil, nil, nil, fmt.Errorf("info is not downloaded")
122 | }
123 |
124 | var prs []contract.ProviderV1
125 | for _, p := range providers {
126 | prs = append(prs, contract.ProviderV1{
127 | Address: p.Address,
128 | MaxSpan: p.MaxSpan,
129 | PricePerMBDay: p.PricePerMBDay,
130 | })
131 | }
132 |
133 | addr, si, body, err := contract.PrepareV1DeployData(torrentHash, t.Info.RootHash, t.Info.FileSize, t.Info.PieceSize, owner, prs)
134 | if err != nil {
135 | return nil, nil, nil, fmt.Errorf("failed to prepare contract data: %w", err)
136 | }
137 |
138 | siCell, err := tlb.ToCell(si)
139 | if err != nil {
140 | return nil, nil, nil, fmt.Errorf("serialize state init: %w", err)
141 | }
142 | return addr, body.ToBOC(), siCell.ToBOC(), nil
143 | }
144 |
145 | func (c *Client) BuildWithdrawalTransaction(torrentHash []byte, owner *address.Address) (addr *address.Address, bodyData []byte, err error) {
146 | t := c.storage.GetTorrent(torrentHash)
147 | if t == nil {
148 | return nil, nil, fmt.Errorf("torrent is not found")
149 | }
150 | if t.Info == nil {
151 | return nil, nil, fmt.Errorf("info is not downloaded")
152 | }
153 |
154 | addr, body, err := contract.PrepareWithdrawalRequest(torrentHash, t.Info.RootHash, t.Info.FileSize, t.Info.PieceSize, owner)
155 | if err != nil {
156 | return nil, nil, fmt.Errorf("failed to prepare contract data: %w", err)
157 | }
158 |
159 | return addr, body.ToBOC(), nil
160 | }
161 |
162 | func (c *Client) FetchProviderRates(ctx context.Context, torrentHash, providerKey []byte) (*ProviderRates, error) {
163 | t := c.storage.GetTorrent(torrentHash)
164 | if t == nil {
165 | return nil, fmt.Errorf("torrent is not found")
166 | }
167 | if t.Info == nil {
168 | return nil, fmt.Errorf("info is not downloaded")
169 | }
170 |
171 | rates, err := c.provider.GetStorageRates(ctx, providerKey, t.Info.FileSize)
172 | if err != nil {
173 | switch {
174 | case strings.Contains(err.Error(), "value is not found"):
175 | return nil, errors.New("provider is not found")
176 | case strings.Contains(err.Error(), "context deadline exceeded"):
177 | return nil, errors.New("provider is not respond in a given time")
178 | }
179 | return nil, fmt.Errorf("failed to get rates: %w", err)
180 | }
181 |
182 | return &ProviderRates{
183 | Available: rates.Available,
184 | RatePerMBDay: tlb.FromNanoTON(new(big.Int).SetBytes(rates.RatePerMBDay)),
185 | MinBounty: tlb.FromNanoTON(new(big.Int).SetBytes(rates.MinBounty)),
186 | SpaceAvailableMB: rates.SpaceAvailableMB,
187 | MinSpan: rates.MinSpan,
188 | MaxSpan: rates.MaxSpan,
189 | Size: t.Info.FileSize,
190 | }, nil
191 | }
192 |
193 | func (c *Client) RequestProviderStorageInfo(ctx context.Context, torrentHash, providerKey []byte, owner *address.Address) (*ProviderStorageInfo, error) {
194 | t := c.storage.GetTorrent(torrentHash)
195 | if t == nil {
196 | return nil, fmt.Errorf("torrent is not found")
197 | }
198 | if t.Info == nil {
199 | return nil, fmt.Errorf("info is not downloaded")
200 | }
201 |
202 | addr, _, _, err := contract.PrepareV1DeployData(torrentHash, t.Info.RootHash, t.Info.FileSize, t.Info.PieceSize, owner, nil)
203 | if err != nil {
204 | return nil, fmt.Errorf("failed to calc contract addr: %w", err)
205 | }
206 |
207 | c.mx.Lock()
208 | defer c.mx.Unlock()
209 |
210 | mKey := addr.String() + "_" + hex.EncodeToString(providerKey)
211 |
212 | var tm time.Time
213 | v := c.infoCache[mKey]
214 | if v != nil {
215 | tm = v.FetchedAt
216 | } else {
217 | v = &ProviderStorageInfo{
218 | Status: "connecting...",
219 | }
220 | c.infoCache[mKey] = v
221 | }
222 |
223 | // run job if result is older than 10 sec and no another active job
224 | if time.Since(tm) > 5*time.Second && (v.Context == nil || v.Context.Err() != nil) {
225 | var end func()
226 | v.Context, end = context.WithTimeout(context.Background(), 60*time.Second)
227 |
228 | go func() {
229 | defer end()
230 |
231 | sn := time.Now()
232 | var storageADNL string
233 | proofByte := uint64(rand.Int63()) % t.Info.FileSize
234 | info, err := c.provider.RequestStorageInfo(v.Context, providerKey, addr, proofByte)
235 | if err != nil {
236 | Logger("failed to get storage info:", err, "took", time.Since(sn).String())
237 |
238 | c.mx.Lock()
239 | c.infoCache[mKey] = &ProviderStorageInfo{
240 | Status: "inactive",
241 | Reason: err.Error(),
242 | FetchedAt: time.Now(),
243 | }
244 | c.mx.Unlock()
245 | return
246 | }
247 |
248 | progress, _ := new(big.Float).Quo(new(big.Float).SetUint64(info.Downloaded), new(big.Float).SetUint64(t.Info.FileSize)).Float64()
249 |
250 | if info.Status == "active" {
251 | proved := false
252 | // verify proof
253 | proof, err := cell.FromBOC(info.Proof)
254 | if err == nil {
255 | if proofData, err := cell.UnwrapProof(proof, t.Info.RootHash); err == nil {
256 | piece := uint32(proofByte / uint64(t.Info.PieceSize))
257 | pieces := uint32(t.Info.FileSize / uint64(t.Info.PieceSize))
258 |
259 | if err = checkProofBranch(proofData, piece, pieces); err == nil {
260 | info.Reason = fmt.Sprintf("Storage proof received just now, but not connected with peer")
261 | proved = true
262 | }
263 | } else {
264 | Logger("failed to unwrap proof:", err)
265 | }
266 | }
267 |
268 | if !proved {
269 | info.Status = "untrusted"
270 | info.Reason = "Incorrect proof received"
271 | }
272 | } else if info.Status == "downloading" {
273 | info.Reason = fmt.Sprintf("Progress: %.2f", progress*100) + "%"
274 | } else if info.Status == "resolving" {
275 | info.Reason = fmt.Sprintf("Provider is trying to find source to download bag")
276 | } else if info.Status == "warning-balance" {
277 | info.Reason = fmt.Sprintf("Not enough balance to store bag, please topup or it will be deleted soon")
278 | }
279 |
280 | c.mx.RLock()
281 | adnlAddr := c.adnlInfo[mKey]
282 | c.mx.RUnlock()
283 |
284 | if adnlAddr == nil {
285 | xCtx, cancel := context.WithTimeout(v.Context, 10*time.Second)
286 | adnlAddr, err = c.provider.VerifyStorageADNLProof(xCtx, providerKey, addr)
287 | cancel()
288 | if err == nil {
289 | c.mx.Lock()
290 | c.adnlInfo[mKey] = adnlAddr
291 | c.mx.Unlock()
292 | }
293 | }
294 |
295 | if adnlAddr != nil {
296 | storageADNL = strings.ToUpper(hex.EncodeToString(adnlAddr))
297 | }
298 |
299 | c.mx.Lock()
300 | c.infoCache[mKey] = &ProviderStorageInfo{
301 | StorageADNL: storageADNL,
302 | Status: info.Status,
303 | Reason: info.Reason,
304 | Progress: progress * 100,
305 | FetchedAt: time.Now(),
306 | }
307 | c.mx.Unlock()
308 | }()
309 | }
310 |
311 | return v, nil
312 | }
313 |
314 | func checkProofBranch(proof *cell.Cell, piece, piecesNum uint32) error {
315 | if piece >= piecesNum {
316 | return fmt.Errorf("piece is out of range %d/%d", piece, piecesNum)
317 | }
318 |
319 | tree := proof.BeginParse()
320 |
321 | // calc tree depth
322 | depth := int(math.Log2(float64(piecesNum)))
323 | if piecesNum > uint32(math.Pow(2, float64(depth))) {
324 | // add 1 if pieces num is not exact log2
325 | depth++
326 | }
327 |
328 | // check bits from left to right and load branches
329 | for i := depth - 1; i >= 0; i-- {
330 | isLeft := piece&(1< math.MaxInt64/3 {
143 | bytesPerSec = math.MaxInt64 / 3
144 | }
145 |
146 | atomic.StoreUint64(&s.bytesPerSec, bytesPerSec)
147 |
148 | b := leakybucket.NewLeakyBucket(float64(bytesPerSec), int64(bytesPerSec*3))
149 | atomic.StorePointer(&s.bucket, unsafe.Pointer(b))
150 | }
151 |
152 | func (s *speedLimit) GetLimit() uint64 {
153 | return atomic.LoadUint64(&s.bytesPerSec)
154 | }
155 |
156 | func (s *speedLimit) Throttle(_ context.Context, sz uint64) error {
157 | b := (*leakybucket.LeakyBucket)(atomic.LoadPointer(&s.bucket))
158 | if b != nil {
159 | full := uint64(b.Capacity())
160 | if sz < full {
161 | full = sz
162 | }
163 |
164 | if b.Remaining() < int64(full) || b.Add(int64(sz)) == 0 {
165 | return fmt.Errorf("limited")
166 | }
167 | }
168 | return nil
169 | }
170 |
171 | func (c *Connector) GetUploadLimit() uint64 {
172 | return c.uploadLimit.GetLimit()
173 | }
174 |
175 | func (c *Connector) GetDownloadLimit() uint64 {
176 | return c.downloadLimit.GetLimit()
177 | }
178 |
179 | func (c *Connector) SetDownloadLimit(bytesPerSec uint64) {
180 | c.downloadLimit.SetLimit(bytesPerSec)
181 | }
182 |
183 | func (c *Connector) SetUploadLimit(bytesPerSec uint64) {
184 | c.uploadLimit.SetLimit(bytesPerSec)
185 | }
186 |
187 | func (c *Connector) ThrottleDownload(ctx context.Context, sz uint64) error {
188 | return c.downloadLimit.Throttle(ctx, sz)
189 | }
190 |
191 | func (c *Connector) ThrottleUpload(ctx context.Context, sz uint64) error {
192 | return c.uploadLimit.Throttle(ctx, sz)
193 | }
194 |
195 | func (c *Connector) GetADNLPrivateKey() ed25519.PrivateKey {
196 | return c.TorrentServer.GetADNLPrivateKey()
197 | }
198 |
199 | func (c *Connector) CreateDownloader(ctx context.Context, t *Torrent) (_ TorrentDownloader, err error) {
200 | if len(t.BagID) != 32 {
201 | return nil, fmt.Errorf("invalid torrent bag id")
202 | }
203 |
204 | globalCtx, downloadCancel := context.WithCancel(ctx)
205 | var dow = &torrentDownloader{
206 | torrent: t,
207 | globalCtx: globalCtx,
208 | downloadCancel: downloadCancel,
209 | }
210 | defer func() {
211 | if err != nil {
212 | downloadCancel()
213 | }
214 | }()
215 |
216 | // connect to first node and resolve torrent info
217 | for dow.torrent.Info == nil {
218 | select {
219 | case <-ctx.Done():
220 | err = fmt.Errorf("failed to find storage nodes for this bag, err: %w", ctx.Err())
221 | return nil, err
222 | case <-time.After(10 * time.Millisecond):
223 | }
224 | }
225 |
226 | if dow.torrent.Header == nil {
227 | hdrPieces := dow.torrent.Info.HeaderSize / uint64(dow.torrent.Info.PieceSize)
228 | if dow.torrent.Info.HeaderSize%uint64(dow.torrent.Info.PieceSize) > 0 {
229 | // add not full piece
230 | hdrPieces++
231 | }
232 |
233 | data := make([]byte, 0, hdrPieces*uint64(dow.torrent.Info.PieceSize))
234 | proofs := make([][]byte, 0, hdrPieces)
235 | for i := uint32(0); i < uint32(hdrPieces); i++ {
236 | piece, proof, _, _, pieceErr := dow.DownloadPieceDetailed(globalCtx, i)
237 | if pieceErr != nil {
238 | err = fmt.Errorf("failed to get header piece %d, err: %w", i, pieceErr)
239 | return nil, err
240 | }
241 | data = append(data, piece...)
242 | proofs = append(proofs, proof)
243 | }
244 |
245 | var header TorrentHeader
246 | data, err = tl.Parse(&header, data, true)
247 | if err != nil {
248 | err = fmt.Errorf("failed to load header from cell, err: %w", err)
249 | return nil, err
250 | }
251 |
252 | if len(header.DirName) > 256 {
253 | return nil, fmt.Errorf("too big dir name > 256")
254 | }
255 |
256 | if err := validateFileName(string(header.DirName), false); err != nil {
257 | return nil, fmt.Errorf("malicious bag: %w", err)
258 | }
259 |
260 | if header.FilesCount > 1_000_000 {
261 | return nil, fmt.Errorf("bag has > 1_000_000 files, looks dangerous")
262 | }
263 | if uint32(len(header.NameIndex)) != header.FilesCount ||
264 | uint32(len(header.DataIndex)) != header.FilesCount {
265 | err = fmt.Errorf("corrupted header, lack of files info")
266 | return nil, err
267 | }
268 |
269 | dow.torrent.Header = &header
270 |
271 | for i, proof := range proofs {
272 | err = dow.torrent.setPiece(uint32(i), &PieceInfo{
273 | StartFileIndex: 0,
274 | Proof: proof,
275 | })
276 | if err != nil {
277 | return nil, err
278 | }
279 | }
280 | }
281 |
282 | return dow, nil
283 | }
284 |
285 | func (p *storagePeer) Close() {
286 | p.closeOnce.Do(func() {
287 | Logger("[STORAGE] CLOSING CONNECTION OF", hex.EncodeToString(p.nodeId), p.nodeAddr, "BAG", hex.EncodeToString(p.torrent.BagID))
288 | p.stop()
289 | p.conn.CloseFor(p)
290 | p.torrent.RemovePeer(p.nodeId)
291 | })
292 | }
293 |
294 | func (p *storagePeer) initializeSession(ctx context.Context, id int64, doPing bool) bool {
295 | var err error
296 | defer func() {
297 | if err == nil {
298 | atomic.StoreInt32(&p.sessionInitialized, 1)
299 |
300 | Logger("[STORAGE] SESSION INITIALIZED FOR", hex.EncodeToString(p.nodeId), "BAG", hex.EncodeToString(p.torrent.BagID), "SESSION", atomic.LoadInt64(&p.sessionId))
301 | return
302 | }
303 |
304 | if atomic.LoadInt64(&p.sessionId) != id {
305 | return
306 | }
307 |
308 | Logger("[STORAGE] SESSION INITIALIZATION FAILED FOR", hex.EncodeToString(p.nodeId), "BAG", hex.EncodeToString(p.torrent.BagID), "SESSION", atomic.LoadInt64(&p.sessionId), "ERR", err.Error())
309 | p.Close()
310 | }()
311 |
312 | if doPing {
313 | qCtx, cancel := context.WithTimeout(ctx, 7*time.Second)
314 | err = p.ping(qCtx)
315 | cancel()
316 | if err != nil {
317 | err = fmt.Errorf("failed to ping: %w", err)
318 | return false
319 | }
320 | }
321 |
322 | if err = p.prepareTorrentInfo(); err != nil {
323 | err = fmt.Errorf("failed to prepare torrent info, err: %w", err)
324 | return false
325 | }
326 |
327 | if err = p.updateInitPieces(ctx); err != nil {
328 | err = fmt.Errorf("failed to send init pieces, err: %w", err)
329 | return false
330 | }
331 |
332 | return true
333 | }
334 |
335 | func (p *storagePeer) touch() {
336 | p.torrent.TouchPeer(p)
337 | }
338 |
339 | func (p *storagePeer) findNeighbours(ctx context.Context) (*overlay.NodesList, error) {
340 | var al overlay.NodesList
341 | err := p.conn.adnl.Query(ctx, overlay.WrapQuery(p.overlay, &overlay.GetRandomPeers{}), &al)
342 | if err != nil {
343 | return nil, err
344 | }
345 | return &al, nil
346 | }
347 |
348 | func (p *storagePeer) ping(ctx context.Context) error {
349 | ses := atomic.LoadInt64(&p.sessionId)
350 | if ses == 0 {
351 | return fmt.Errorf("no session id")
352 | }
353 |
354 | tm := time.Now()
355 | var pong Pong
356 | err := p.conn.adnl.Query(ctx, overlay.WrapQuery(p.overlay, &Ping{SessionID: ses}), &pong)
357 | if err != nil {
358 | return err
359 | }
360 | atomic.StoreInt64(&p.currentPing, int64(time.Since(tm)/time.Millisecond))
361 |
362 | return nil
363 | }
364 |
365 | func (p *storagePeer) downloadPiece(ctx context.Context, id uint32) (*Piece, error) {
366 | var piece Piece
367 | err := func() error {
368 | tm := time.Now()
369 | reqCtx, cancel := context.WithTimeout(ctx, p.torrent.transmitTimeout())
370 | err := p.conn.rldp.DoQuery(reqCtx, 4096+uint64(p.torrent.Info.PieceSize)*2, overlay.WrapQuery(p.overlay, &GetPiece{int32(id)}), &piece)
371 | cancel()
372 | if err != nil {
373 | return fmt.Errorf("failed to query piece %d. err: %w", id, err)
374 | }
375 | Logger("[STORAGE] LOAD PIECE", id, "FROM", p.nodeAddr, "DOWNLOAD TOOK:", time.Since(tm).String())
376 | // tm = time.Now()
377 |
378 | proof, err := cell.FromBOC(piece.Proof)
379 | if err != nil {
380 | return fmt.Errorf("failed to parse BoC of piece %d, err: %w", id, err)
381 | }
382 |
383 | err = cell.CheckProof(proof, p.torrent.Info.RootHash)
384 | if err != nil {
385 | return fmt.Errorf("proof check of piece %d failed: %w", id, err)
386 | }
387 |
388 | err = p.torrent.checkProofBranch(proof, piece.Data, id)
389 | if err != nil {
390 | return fmt.Errorf("proof branch check of piece %d failed: %w", id, err)
391 | }
392 |
393 | p.torrent.UpdateDownloadedPeer(p, uint64(len(piece.Data)))
394 | // Logger("[STORAGE] LOAD PIECE", id, "FROM", p.nodeAddr, "VERIFICATION TOOK:", time.Since(tm).String())
395 |
396 | return nil
397 | }()
398 | if err != nil {
399 | if errors.Is(err, context.Canceled) {
400 | return nil, err
401 | }
402 |
403 | Logger("[STORAGE] LOAD PIECE FROM", p.nodeAddr, "ERR:", err.Error())
404 |
405 | now := time.Now().Unix()
406 | if old := atomic.LoadInt64(&p.failAt); old < time.Now().Unix()-1 {
407 | if atomic.CompareAndSwapInt64(&p.failAt, old, now) {
408 | atomic.AddInt32(&p.fails, 1)
409 | }
410 |
411 | // in case 3 fails with 2s delay in a row, disconnect
412 | if atomic.LoadInt32(&p.fails) >= 3 {
413 | Logger("[STORAGE] TOO MANY FAILS FROM", p.nodeAddr, "CLOSING CONNECTION, ERR:", err.Error())
414 | // something wrong, close connection, we should reconnect after it
415 | p.Close()
416 | }
417 | }
418 | return nil, err
419 | }
420 | atomic.StoreInt32(&p.fails, 0)
421 | atomic.StoreInt64(&p.failAt, 0)
422 |
423 | return &piece, nil
424 | }
425 |
426 | var DownloadMaxInflightScore = int32(400)
427 |
428 | // DownloadPieceDetailed - same as DownloadPiece, but also returns proof data
429 | func (t *torrentDownloader) DownloadPieceDetailed(ctx context.Context, pieceIndex uint32) (piece []byte, proof []byte, peer []byte, peerAddr string, err error) {
430 | skip := map[string]*storagePeer{}
431 | for {
432 | peers := t.torrent.GetPeers()
433 |
434 | var bestNode *storagePeer
435 |
436 | t.mx.Lock()
437 | {
438 | for _, node := range peers {
439 | if skip[string(node.peer.nodeId)] != nil {
440 | continue
441 | }
442 |
443 | if atomic.LoadInt32(&node.peer.sessionInitialized) == 0 {
444 | continue
445 | }
446 |
447 | inf := atomic.LoadInt32(&node.peer.inflight)
448 | if inf > atomic.LoadInt32(&node.peer.maxInflightScore) {
449 | continue
450 | }
451 |
452 | if bestNode != nil && atomic.LoadInt32(&bestNode.inflight) < inf {
453 | continue
454 | }
455 |
456 | node.peer.piecesMx.RLock()
457 | hasPiece := node.peer.hasPieces[pieceIndex]
458 | node.peer.piecesMx.RUnlock()
459 |
460 | if hasPiece {
461 | bestNode = node.peer
462 | }
463 | }
464 |
465 | if bestNode != nil {
466 | atomic.AddInt32(&bestNode.inflight, 1)
467 | }
468 | }
469 | t.mx.Unlock()
470 |
471 | if bestNode == nil {
472 | select {
473 | case <-ctx.Done():
474 | return nil, nil, nil, "", ctx.Err()
475 | case <-time.After(5 * time.Millisecond):
476 | skip = map[string]*storagePeer{}
477 | // no nodes, wait
478 | }
479 | continue
480 | }
481 |
482 | // tm := time.Now()
483 | pc, err := bestNode.downloadPiece(ctx, pieceIndex)
484 | // log.Println("DW", pieceIndex, bestNode.nodeAddr, time.Since(tm).String(), err)
485 | atomic.AddInt32(&bestNode.inflight, -1)
486 | if err != nil {
487 | if x := atomic.LoadInt32(&bestNode.maxInflightScore); x > 5 {
488 | atomic.CompareAndSwapInt32(&bestNode.maxInflightScore, x, x-1)
489 | }
490 |
491 | skip[string(bestNode.nodeId)] = bestNode
492 | continue
493 | } else {
494 | if x := atomic.LoadInt32(&bestNode.maxInflightScore); x < 60 {
495 | atomic.CompareAndSwapInt32(&bestNode.maxInflightScore, x, x+1)
496 | }
497 | }
498 |
499 | if x := atomic.LoadInt32(&bestNode.maxInflightScore); x < DownloadMaxInflightScore {
500 | atomic.CompareAndSwapInt32(&bestNode.maxInflightScore, x, x+1)
501 | }
502 |
503 | return pc.Data, pc.Proof, bestNode.nodeId, bestNode.nodeAddr, nil
504 | }
505 | }
506 |
507 | func (t *Torrent) checkProofBranch(proof *cell.Cell, data []byte, piece uint32) error {
508 | piecesNum := t.Info.PiecesNum()
509 | if piece >= piecesNum {
510 | return fmt.Errorf("piece is out of range %d/%d", piece, piecesNum)
511 | }
512 |
513 | tree, err := proof.PeekRef(0)
514 | if err != nil {
515 | return err
516 | }
517 |
518 | // calc tree depth
519 | depth := int(math.Log2(float64(piecesNum)))
520 | if piecesNum > uint32(math.Pow(2, float64(depth))) {
521 | // add 1 if pieces num is not exact log2
522 | depth++
523 | }
524 |
525 | // check bits from left to right and load branches
526 | for i := depth - 1; i >= 0; i-- {
527 | refId := 1
528 | if piece&(1< 256*1024*1024 {
109 | type jobRes struct {
110 | piece uint32
111 | err error
112 | }
113 | type job struct {
114 | piece uint32
115 | res chan jobRes
116 | }
117 |
118 | jobs := make(chan job, 100)
119 | results := make(chan jobRes, 100)
120 | ctxWorker, cancel := context.WithCancel(t.globalCtx)
121 |
122 | workers := (runtime.NumCPU() / 3) * 2
123 | if workers == 0 {
124 | workers = 1
125 | }
126 |
127 | for i := 0; i < workers; i++ {
128 | go func() {
129 | for {
130 | select {
131 | case <-ctxWorker.Done():
132 | return
133 | case j := <-jobs:
134 | _, err := t.getPieceInternal(j.piece, true)
135 | select {
136 | case <-ctxWorker.Done():
137 | return
138 | case j.res <- jobRes{piece: j.piece, err: err}:
139 | }
140 | }
141 | }
142 | }()
143 | }
144 |
145 | go func() {
146 | for i := info.FromPiece; i <= info.ToPiece; i++ {
147 | if checked[i] {
148 | continue
149 | }
150 |
151 | jobs <- job{piece: i, res: results}
152 | }
153 | }()
154 |
155 | for i := info.FromPiece; i <= info.ToPiece; i++ {
156 | if checked[i] {
157 | continue
158 | }
159 |
160 | select {
161 | case <-ctxWorker.Done():
162 | break
163 | case res := <-results:
164 | if res.err != nil {
165 | if !strings.Contains(res.err.Error(), "is not downloaded") {
166 | Logger("[VERIFICATION] FAILED FOR PIECE:", res.piece, "BAG:", hex.EncodeToString(t.BagID), res.err.Error())
167 | isDelete = true
168 | }
169 | }
170 |
171 | // mark only for existing pieces to remove not only 1 file in not exist
172 | checked[res.piece] = res.err == nil
173 | }
174 |
175 | if isDelete {
176 | break
177 | }
178 | }
179 | cancel()
180 | } else {
181 | for i := info.FromPiece; i <= info.ToPiece; i++ {
182 | if checked[i] {
183 | continue
184 | }
185 |
186 | _, err := t.getPieceInternal(i, true)
187 | if err != nil {
188 | if strings.Contains(err.Error(), "is not downloaded") {
189 | continue
190 | }
191 |
192 | isDelete = true
193 | break
194 | }
195 |
196 | // mark only for existing pieces to remove not only 1 file in not exist
197 | checked[i] = true
198 | }
199 | }
200 | }
201 |
202 | if isDelete {
203 | // we delete whole file because size can be > than expected
204 | // and just replace of piece will be not enough
205 | for i := info.FromPiece; i <= info.ToPiece; i++ {
206 | // file was deleted, delete pieces records also
207 | if err = t.removePiece(i); err != nil {
208 | Logger("[VERIFICATION] REMOVE PIECE ERR:", hex.EncodeToString(t.BagID), i, err.Error())
209 |
210 | return err
211 | }
212 | }
213 |
214 | if !t.CreatedLocally {
215 | needDownload = true
216 | Logger("[VERIFICATION] NEED DOWNLOAD:", rootPath+"/"+info.Name, hex.EncodeToString(t.BagID))
217 |
218 | if err = t.db.GetFS().Delete(rootPath + "/" + info.Name); err != nil && !errors.Is(err, fs.ErrNotExist) {
219 | Logger("[VERIFICATION] FAILED TO REMOVE FILE:", rootPath+"/"+info.Name, hex.EncodeToString(t.BagID), err.Error())
220 | }
221 | } else {
222 | Logger("[VERIFICATION] CORRUPTED, BUT CREATED LOCALLY AND WE WILL NOT TOUCH FILES :", hex.EncodeToString(t.BagID))
223 | }
224 | }
225 | }
226 |
227 | if needDownload {
228 | // restart download of missing pieces
229 | currFlag := t.currentDownloadFlag
230 | currPause := t.pause
231 | _ = t.startDownload(func(event Event) {
232 | if event.Name == EventErr && currFlag == t.currentDownloadFlag {
233 | currPause()
234 | }
235 | })
236 | }
237 |
238 | Logger("[VERIFICATION] COMPLETED:", hex.EncodeToString(t.BagID), "TOOK", time.Since(tm).String())
239 |
240 | return nil
241 | }
242 |
243 | func (t *Torrent) startDownload(report func(Event)) error {
244 | if t.BagID == nil {
245 | return fmt.Errorf("bag is not set")
246 | }
247 |
248 | // we use flag pointer to know is download was replaced
249 | var flag = false
250 | t.currentDownloadFlag = &flag
251 |
252 | stop := t.stopDownload
253 | if stop != nil {
254 | // stop current download
255 | stop()
256 | }
257 | var ctx context.Context
258 | ctx, stop = context.WithCancel(t.globalCtx)
259 | t.stopDownload = stop
260 |
261 | go func() {
262 | defer func() {
263 | if stop != nil {
264 | stop()
265 | }
266 | }()
267 |
268 | piecesMap := map[uint32]bool{}
269 | var list []fileInfo
270 |
271 | if t.Header == nil || t.Info == nil {
272 | if err := t.prepareDownloader(ctx); err != nil {
273 | Logger("failed to prepare downloader for", hex.EncodeToString(t.BagID), "err: ", err.Error())
274 | return
275 | }
276 |
277 | // update torrent in db
278 | if err := t.db.SetTorrent(t); err != nil {
279 | Logger("failed to set torrent in db", hex.EncodeToString(t.BagID), "err: ", err.Error())
280 | return
281 | }
282 | }
283 |
284 | var downloaded uint64
285 | rootPath := t.Path + "/" + string(t.Header.DirName)
286 |
287 | var files []uint32
288 | if t.downloadAll {
289 | for i := uint32(0); i < t.Header.FilesCount; i++ {
290 | files = append(files, i)
291 | }
292 | } else {
293 | files = t.GetActiveFilesIDs()
294 | if len(files) == 0 {
295 | // do not stop download because we just loaded headers
296 | // TODO: make it better
297 | t.stopDownload = nil
298 | }
299 | }
300 |
301 | list = make([]fileInfo, 0, len(files))
302 | for _, f := range files {
303 | info, err := t.GetFileOffsetsByID(f)
304 | if err != nil {
305 | continue
306 | }
307 |
308 | needFile := false
309 |
310 | if !t.db.GetFS().Exists(rootPath + "/" + info.Name) {
311 | needFile = true
312 | for i := info.FromPiece; i <= info.ToPiece; i++ {
313 | piecesMap[i] = true
314 | // file was deleted, delete pieces records also
315 | _ = t.removePiece(i)
316 | }
317 | } else {
318 | for i := info.FromPiece; i <= info.ToPiece; i++ {
319 | // TODO: read file parts and compare with hashes
320 | if _, err = t.getPiece(i); err != nil {
321 | needFile = true
322 | piecesMap[i] = true
323 | continue
324 | }
325 | downloaded++
326 | }
327 | }
328 |
329 | if needFile {
330 | list = append(list, fileInfo{info: info, path: info.Name})
331 | }
332 | }
333 |
334 | pieces := make([]uint32, 0, len(piecesMap))
335 | for p := range piecesMap {
336 | pieces = append(pieces, p)
337 | }
338 |
339 | sort.Slice(pieces, func(i, j int) bool {
340 | return pieces[i] < pieces[j]
341 | })
342 | sort.Slice(list, func(i, j int) bool {
343 | return uint64(list[i].info.ToPiece)<<32+uint64(list[i].info.ToPieceOffset) <
344 | uint64(list[j].info.ToPiece)<<32+uint64(list[j].info.ToPieceOffset)
345 | })
346 |
347 | report(Event{Name: EventBagResolved, Value: PiecesInfo{OverallPieces: int(t.Info.PiecesNum()), PiecesToDownload: len(pieces)}})
348 | if len(pieces) > 0 {
349 | if err := t.prepareDownloader(ctx); err != nil {
350 | Logger("failed to prepare downloader for", hex.EncodeToString(t.BagID), "err: ", err.Error())
351 | return
352 | }
353 |
354 | if t.downloadOrdered {
355 | fetch := NewPreFetcher(ctx, t, t.downloader, report, DownloadPrefetch, pieces)
356 | defer fetch.Stop()
357 |
358 | if err := writeOrdered(ctx, t, list, piecesMap, rootPath, report, fetch); err != nil {
359 | report(Event{Name: EventErr, Value: err})
360 | return
361 | }
362 | } else {
363 | filesMap := map[uint32]bool{}
364 | for _, file := range files {
365 | filesMap[file] = true
366 | }
367 |
368 | left := len(pieces)
369 | ready := make(chan uint32, DownloadPrefetch)
370 | fetch := NewPreFetcher(ctx, t, t.downloader, func(event Event) {
371 | if event.Name == EventPieceDownloaded {
372 | ready <- event.Value.(uint32)
373 | }
374 | report(event)
375 | }, DownloadPrefetch, pieces)
376 | defer fetch.Stop()
377 |
378 | var currentFile FSFile
379 | var currentFileId uint32
380 |
381 | defer func() {
382 | if currentFile != nil {
383 | currentFile.Close()
384 | }
385 | }()
386 |
387 | for i := 0; i < left; i++ {
388 | select {
389 | case e := <-ready:
390 | err := func(piece uint32) error {
391 | currentPiece, currentProof, err := fetch.Get(ctx, piece)
392 | if err != nil {
393 | return fmt.Errorf("failed to download piece %d: %w", piece, err)
394 | }
395 | defer fetch.Free(piece)
396 |
397 | pieceFiles, err := t.GetFilesInPiece(piece)
398 | if err != nil {
399 | return fmt.Errorf("failed to get files of piece %d: %w", piece, err)
400 | }
401 |
402 | for _, file := range pieceFiles {
403 | if !filesMap[file.Index] {
404 | continue
405 | }
406 |
407 | if err := validateFileName(file.Name, true); err != nil {
408 | Logger(fmt.Sprintf("Malicious file '%s' was skipped: %v", file.Name, err))
409 | continue
410 | }
411 |
412 | err = func() error {
413 | if currentFile == nil || currentFileId != file.Index {
414 | if currentFile != nil {
415 | currentFile.Close()
416 | }
417 |
418 | for x := 1; x <= 5; x++ {
419 | // we retry because on Windows close file behaves
420 | // like async, and it may throw that file still opened
421 | currentFile, err = t.db.GetFS().Open(rootPath+"/"+file.Name, OpenModeWrite)
422 | if err != nil {
423 | Logger(fmt.Errorf("failed to create or open file %s: %w", file.Name, err).Error())
424 | time.Sleep(time.Duration(x*50) * time.Millisecond)
425 | continue
426 | }
427 | currentFileId = file.Index
428 | break
429 | }
430 | if err != nil {
431 | return fmt.Errorf("failed to create or open file %s: %w", file.Name, err)
432 | }
433 | }
434 |
435 | notEmptyFile := file.FromPiece != file.ToPiece || file.FromPieceOffset != file.ToPieceOffset
436 | if notEmptyFile {
437 | fileOff := int64(0)
438 | if file.FromPiece != piece {
439 | fileOff = int64(piece-file.FromPiece)*int64(t.Info.PieceSize) - int64(file.FromPieceOffset)
440 | }
441 |
442 | data := currentPiece
443 | if file.ToPiece == piece {
444 | data = data[:file.ToPieceOffset]
445 | }
446 | if file.FromPiece == piece {
447 | data = data[file.FromPieceOffset:]
448 | }
449 |
450 | _, err = currentFile.WriteAt(data, fileOff)
451 | if err != nil {
452 | return fmt.Errorf("failed to write file %s: %w", file.Name, err)
453 | }
454 | }
455 |
456 | return currentFile.Sync()
457 | }()
458 | if err != nil {
459 | return err
460 | }
461 | }
462 |
463 | err = t.setPiece(piece, &PieceInfo{
464 | StartFileIndex: pieceFiles[0].Index,
465 | Proof: currentProof,
466 | })
467 | if err != nil {
468 | return fmt.Errorf("failed to save piece %d to db: %w", piece, err)
469 | }
470 |
471 | return nil
472 | }(e)
473 | if err != nil {
474 | report(Event{Name: EventErr, Value: err})
475 | return
476 | }
477 | case <-ctx.Done():
478 | report(Event{Name: EventErr, Value: ctx.Err()})
479 | return
480 | }
481 | }
482 | }
483 | }
484 |
485 | report(Event{Name: EventDone, Value: DownloadResult{
486 | Path: rootPath,
487 | Dir: string(t.Header.DirName),
488 | Description: t.Info.Description.Value,
489 | }})
490 |
491 | for id := range t.GetPeers() {
492 | peerId, _ := hex.DecodeString(id)
493 | t.ResetDownloadPeer(peerId)
494 | }
495 |
496 | if len(files) == 0 {
497 | // stop if downloaded failed, on header we leave it for reuse
498 | stop = nil
499 | }
500 | }()
501 |
502 | return nil
503 | }
504 |
505 | func writeOrdered(ctx context.Context, t *Torrent, list []fileInfo, piecesMap map[uint32]bool, rootPath string, report func(Event), fetch *PreFetcher) error {
506 | var currentPieceId uint32
507 | var pieceStartFileIndex uint32
508 | var currentPiece, currentProof []byte
509 | for _, off := range list {
510 | err := func() error {
511 | if strings.Contains(off.path, "..") {
512 | Logger("Malicious file with path traversal was skipped: " + off.path)
513 | return fmt.Errorf("malicious file")
514 | }
515 | if err := validateFileName(off.path, true); err != nil {
516 | Logger(fmt.Sprintf("Malicious file '%s' was skipped: %v", off.path, err))
517 | return fmt.Errorf("malicious file %q", off.path)
518 | }
519 |
520 | f, err := t.db.GetFS().Open(rootPath+"/"+off.path, OpenModeWrite)
521 | if err != nil {
522 | return fmt.Errorf("failed to create file %s: %w", off.path, err)
523 | }
524 | defer f.Close()
525 |
526 | notEmptyFile := off.info.FromPiece != off.info.ToPiece || off.info.FromPieceOffset != off.info.ToPieceOffset
527 | if notEmptyFile {
528 | for piece := off.info.FromPiece; piece <= off.info.ToPiece; piece++ {
529 | if !piecesMap[piece] {
530 | continue
531 | }
532 |
533 | if piece != currentPieceId || currentPiece == nil {
534 | if currentPiece != nil {
535 | fetch.Free(currentPieceId)
536 |
537 | err = t.setPiece(currentPieceId, &PieceInfo{
538 | StartFileIndex: pieceStartFileIndex,
539 | Proof: currentProof,
540 | })
541 | if err != nil {
542 | return fmt.Errorf("failed to save piece %d to db: %w", currentPieceId, err)
543 | }
544 | }
545 |
546 | pieceStartFileIndex = off.info.Index
547 | currentPiece, currentProof, err = fetch.Get(ctx, piece)
548 | if err != nil {
549 | return fmt.Errorf("failed to download piece %d: %w", piece, err)
550 | }
551 |
552 | currentPieceId = piece
553 | }
554 | part := currentPiece
555 | offset := int64(piece-off.info.FromPiece) * int64(t.Info.PieceSize)
556 | if piece == off.info.ToPiece {
557 | part = part[:off.info.ToPieceOffset]
558 | }
559 | if piece == off.info.FromPiece {
560 | part = part[off.info.FromPieceOffset:]
561 | }
562 | if piece > off.info.FromPiece {
563 | offset -= int64(off.info.FromPieceOffset)
564 | }
565 |
566 | if piece < off.info.FromPiece || piece > off.info.ToPiece {
567 | // assert, should never happen
568 | panic("piece is not related to file")
569 | }
570 |
571 | _, err = f.WriteAt(part, offset)
572 | if err != nil {
573 | return fmt.Errorf("failed to write piece %d for file %s: %w", piece, off.path, err)
574 | }
575 | }
576 | }
577 |
578 | report(Event{Name: EventFileDownloaded, Value: off.path})
579 | return nil
580 | }()
581 | if err != nil {
582 | return err
583 | }
584 | }
585 |
586 | if currentPiece != nil {
587 | fetch.Free(currentPieceId)
588 |
589 | err := t.setPiece(currentPieceId, &PieceInfo{
590 | StartFileIndex: pieceStartFileIndex,
591 | Proof: currentProof,
592 | })
593 | if err != nil {
594 | return fmt.Errorf("failed to save piece %d to db: %w", currentPieceId, err)
595 | }
596 | }
597 | return nil
598 | }
599 |
--------------------------------------------------------------------------------
/storage/fetch.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | "context"
5 | "encoding/hex"
6 | "fmt"
7 | "github.com/pterm/pterm"
8 | "sync"
9 | "sync/atomic"
10 | "time"
11 | )
12 |
13 | type piecePack struct {
14 | data []byte
15 | proof []byte
16 | }
17 |
18 | type PreFetcher struct {
19 | downloader TorrentDownloader
20 | torrent *Torrent
21 | offset int
22 | pieces map[uint32]*piecePack
23 | tasks chan uint32
24 | piecesList []uint32
25 |
26 | downloaded uint64
27 | report func(Event)
28 |
29 | mx sync.RWMutex
30 | ctx context.Context
31 | close func()
32 | }
33 |
34 | type Progress struct {
35 | Downloaded string
36 | Speed string
37 | }
38 |
39 | func NewPreFetcher(ctx context.Context, torrent *Torrent, downloader TorrentDownloader, report func(Event), prefetch int, pieces []uint32) *PreFetcher {
40 | if prefetch > len(pieces) {
41 | prefetch = len(pieces)
42 | }
43 |
44 | ff := &PreFetcher{
45 | downloader: downloader,
46 | torrent: torrent,
47 | report: report,
48 | piecesList: pieces,
49 | offset: prefetch - 1,
50 | pieces: map[uint32]*piecePack{},
51 | tasks: make(chan uint32, prefetch),
52 | }
53 | ff.ctx, ff.close = context.WithCancel(ctx)
54 |
55 | for _, piece := range pieces {
56 | // mark pieces as existing
57 | ff.pieces[piece] = nil
58 | }
59 |
60 | // pre-download pieces
61 | for i := 0; i < prefetch; i++ {
62 | ff.tasks <- ff.piecesList[i]
63 | }
64 |
65 | go ff.scaling()
66 |
67 | return ff
68 | }
69 |
70 | func (f *PreFetcher) Stop() {
71 | f.close()
72 | }
73 |
74 | func (f *PreFetcher) Get(ctx context.Context, piece uint32) ([]byte, []byte, error) {
75 | f.mx.RLock()
76 | if _, ok := f.pieces[piece]; !ok {
77 | panic("unexpected piece requested")
78 | }
79 | f.mx.RUnlock()
80 |
81 | for {
82 | f.mx.RLock()
83 | if p := f.pieces[piece]; p != nil {
84 | f.mx.RUnlock()
85 | return p.data, p.proof, nil
86 | }
87 | f.mx.RUnlock()
88 |
89 | // wait for piece to be ready
90 | select {
91 | case <-ctx.Done():
92 | return nil, nil, ctx.Err()
93 | case <-time.After(5 * time.Millisecond):
94 | }
95 | }
96 | }
97 |
98 | func (f *PreFetcher) Free(piece uint32) {
99 | f.mx.Lock()
100 | defer f.mx.Unlock()
101 |
102 | if _, ok := f.pieces[piece]; !ok {
103 | panic("unexpected piece requested")
104 | }
105 | delete(f.pieces, piece)
106 |
107 | if f.offset+1 < len(f.piecesList) {
108 | f.offset++
109 | f.tasks <- f.piecesList[f.offset]
110 | }
111 | }
112 |
113 | func (f *PreFetcher) scaling() {
114 | const (
115 | minWorkers = 8
116 | perScaleWorkers = 2
117 | maxWorkers = 120
118 | windowSize = 35
119 | interval = time.Millisecond * 100
120 | )
121 |
122 | cancels := make([]context.CancelFunc, 0, maxWorkers)
123 |
124 | for i := 0; i < 8; i++ {
125 | ctx, cancel := context.WithCancel(f.ctx)
126 | cancels = append(cancels, cancel)
127 | go f.worker(ctx)
128 | }
129 |
130 | piecesPerFrame := make([]uint64, 0, windowSize)
131 | ticker := time.NewTicker(interval)
132 | defer ticker.Stop()
133 |
134 | var prevDownloaded uint64
135 | var maxInPeriod uint64
136 | for {
137 | select {
138 | case <-f.ctx.Done():
139 | return
140 | case <-ticker.C:
141 | workers := len(cancels)
142 | if workers == 0 {
143 | continue
144 | }
145 |
146 | dn := atomic.LoadUint64(&f.downloaded)
147 | downloaded := dn - prevDownloaded
148 | prevDownloaded = dn
149 |
150 | if len(piecesPerFrame) >= windowSize {
151 | piecesPerFrame = piecesPerFrame[1:]
152 | }
153 | piecesPerFrame = append(piecesPerFrame, downloaded)
154 |
155 | if len(piecesPerFrame) == windowSize {
156 | // moving avg
157 | var totalInPeriod uint64
158 | for _, v := range piecesPerFrame {
159 | totalInPeriod += v
160 | }
161 |
162 | if totalInPeriod > 0 && totalInPeriod > maxInPeriod && workers < maxWorkers {
163 | maxInPeriod = totalInPeriod
164 | for i := 0; i < perScaleWorkers; i++ {
165 | ctx, cancel := context.WithCancel(f.ctx)
166 | cancels = append(cancels, cancel)
167 | go f.worker(ctx)
168 | }
169 | piecesPerFrame = piecesPerFrame[len(piecesPerFrame)-len(piecesPerFrame)/8:]
170 | Logger("[STORAGE_SCALER] ADDED WORKER, TOTAL:", len(cancels), "BAG", hex.EncodeToString(f.torrent.BagID), "MAX", maxInPeriod)
171 | } else if totalInPeriod < maxInPeriod-maxInPeriod/4 && workers > minWorkers {
172 | maxInPeriod = totalInPeriod
173 | for i := 0; i < perScaleWorkers; i++ {
174 | last := len(cancels) - 1
175 | cancels[last]()
176 | cancels = cancels[:last]
177 | }
178 | piecesPerFrame = piecesPerFrame[:0]
179 | Logger("[STORAGE_SCALER] CLOSED WORKER, TOTAL:", len(cancels), "BAG", hex.EncodeToString(f.torrent.BagID), "MAX", maxInPeriod)
180 | }
181 | }
182 | }
183 | }
184 | }
185 |
186 | func (f *PreFetcher) worker(downscaleCtx context.Context) {
187 | for {
188 | var task uint32
189 | select {
190 | case <-downscaleCtx.Done():
191 | return
192 | case <-f.ctx.Done():
193 | return
194 | case task = <-f.tasks:
195 | for {
196 | err := f.torrent.connector.ThrottleDownload(f.ctx, uint64(f.torrent.Info.PieceSize))
197 | if err != nil {
198 | select {
199 | case <-f.ctx.Done():
200 | return
201 | case <-time.After(5 * time.Millisecond):
202 | continue
203 | }
204 | }
205 | break
206 | }
207 | }
208 |
209 | for {
210 | data, proof, _, _, err := f.downloader.DownloadPieceDetailed(f.ctx, task)
211 | if err == nil {
212 | f.mx.Lock()
213 | f.pieces[task] = &piecePack{
214 | data: data,
215 | proof: proof,
216 | }
217 | f.mx.Unlock()
218 |
219 | atomic.AddUint64(&f.downloaded, 1)
220 | f.report(Event{Name: EventPieceDownloaded, Value: task})
221 |
222 | break
223 | }
224 |
225 | // when error we retry
226 | select {
227 | case <-f.ctx.Done():
228 | return
229 | case <-time.After(300 * time.Millisecond):
230 | pterm.Warning.Println("Piece", task, "download error (", err.Error(), "), will retry in 300ms")
231 | }
232 | }
233 | }
234 | }
235 |
236 | func ToSz(sz uint64) string {
237 | switch {
238 | case sz < 1024:
239 | return fmt.Sprintf("%d Bytes", sz)
240 | case sz < 1024*1024:
241 | return fmt.Sprintf("%.2f KB", float64(sz)/1024)
242 | case sz < 1024*1024*1024:
243 | return fmt.Sprintf("%.2f MB", float64(sz)/(1024*1024))
244 | default:
245 | return fmt.Sprintf("%.2f GB", float64(sz)/(1024*1024*1024))
246 | }
247 | }
248 |
249 | func ToSpeed(speed uint64) string {
250 | switch {
251 | case speed < 1024:
252 | return fmt.Sprintf("%d Bytes/s", speed)
253 | case speed < 1024*1024:
254 | return fmt.Sprintf("%.2f KB/s", float64(speed)/1024)
255 | case speed < 1024*1024*1024:
256 | return fmt.Sprintf("%.2f MB/s", float64(speed)/(1024*1024))
257 | default:
258 | return fmt.Sprintf("%.2f GB/s", float64(speed)/(1024*1024*1024))
259 | }
260 | }
261 |
--------------------------------------------------------------------------------
/storage/peer.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | "encoding/hex"
5 | "sync/atomic"
6 | "time"
7 | )
8 |
9 | type PeerInfo struct {
10 | Addr string
11 | Uploaded uint64
12 | Downloaded uint64
13 |
14 | peer *storagePeer
15 | uploadSpeed *speedInfo
16 | downloadSpeed *speedInfo
17 | }
18 |
19 | func (t *Torrent) GetPeers() map[string]PeerInfo {
20 | t.peersMx.RLock()
21 | defer t.peersMx.RUnlock()
22 |
23 | peers := make(map[string]PeerInfo, len(t.peers))
24 | for s, info := range t.peers {
25 | peers[s] = *info
26 | }
27 | return peers
28 | }
29 |
30 | func (t *Torrent) TouchPeer(peer *storagePeer) *PeerInfo {
31 | t.peersMx.Lock()
32 | defer t.peersMx.Unlock()
33 |
34 | return t.touchPeer(peer)
35 | }
36 |
37 | func (t *Torrent) UpdateDownloadedPeer(peer *storagePeer, bytes uint64) {
38 | t.peersMx.Lock()
39 | defer t.peersMx.Unlock()
40 |
41 | p := t.touchPeer(peer)
42 | p.Downloaded += bytes
43 | }
44 |
45 | func (t *Torrent) RemovePeer(id []byte) {
46 | strId := hex.EncodeToString(id)
47 |
48 | t.peersMx.Lock()
49 | defer t.peersMx.Unlock()
50 |
51 | delete(t.peers, strId)
52 | delete(t.knownNodes, strId)
53 | }
54 |
55 | func (t *Torrent) GetPeer(id []byte) *PeerInfo {
56 | t.peersMx.Lock()
57 | defer t.peersMx.Unlock()
58 |
59 | return t.peers[hex.EncodeToString(id)]
60 | }
61 |
62 | func (t *Torrent) ResetDownloadPeer(id []byte) {
63 | t.peersMx.Lock()
64 | defer t.peersMx.Unlock()
65 |
66 | strId := hex.EncodeToString(id)
67 | p := t.peers[strId]
68 | if p != nil {
69 | p.Downloaded = 0
70 | p.downloadSpeed = &speedInfo{}
71 | }
72 | }
73 |
74 | func (t *Torrent) UpdateUploadedPeer(peer *storagePeer, bytes uint64) {
75 | _ = t.db.UpdateUploadStats(t.BagID, atomic.AddUint64(&t.stats.Uploaded, bytes))
76 |
77 | t.peersMx.Lock()
78 | defer t.peersMx.Unlock()
79 |
80 | p := t.touchPeer(peer)
81 | p.Uploaded += bytes
82 | }
83 |
84 | func (t *Torrent) touchPeer(peer *storagePeer) *PeerInfo {
85 | strId := hex.EncodeToString(peer.nodeId)
86 | p := t.peers[strId]
87 | if p == nil {
88 | p = &PeerInfo{
89 | uploadSpeed: &speedInfo{},
90 | downloadSpeed: &speedInfo{},
91 | }
92 | t.peers[strId] = p
93 | }
94 | p.peer = peer
95 | p.Addr = peer.nodeAddr
96 | return p
97 | }
98 |
99 | func (p *PeerInfo) GetDownloadSpeed() uint64 {
100 | return uint64(p.downloadSpeed.dispSpeed)
101 | }
102 |
103 | func (p *PeerInfo) GetUploadSpeed() uint64 {
104 | return uint64(p.uploadSpeed.dispSpeed)
105 | }
106 |
107 | type speedInfo struct {
108 | wantReset bool
109 |
110 | prevBytes uint64
111 | lastTime time.Time
112 |
113 | speed float64
114 | init bool
115 |
116 | dispSpeed float64
117 | }
118 |
119 | func (s *speedInfo) calculate(nowBytes uint64) float64 {
120 | now := time.Now()
121 |
122 | if !s.init {
123 | s.prevBytes = nowBytes
124 | s.lastTime = now
125 | s.init = true
126 | s.dispSpeed = 0
127 | return s.dispSpeed
128 | }
129 |
130 | dt := now.Sub(s.lastTime).Seconds()
131 | if dt > 0 {
132 | const alpha = 0.05
133 | delta := float64(nowBytes - s.prevBytes)
134 | instant := delta / dt
135 | s.speed = alpha*instant + (1-alpha)*s.speed
136 | }
137 |
138 | s.prevBytes = nowBytes
139 | s.lastTime = now
140 |
141 | // smooth animation
142 | const beta = 0.1
143 | s.dispSpeed += (s.speed - s.dispSpeed) * beta
144 |
145 | return s.dispSpeed
146 | }
147 |
--------------------------------------------------------------------------------
/storage/storage.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "errors"
7 | "fmt"
8 | "github.com/xssnick/tonutils-go/tl"
9 | )
10 |
11 | func init() {
12 | tl.Register(TorrentInfoContainer{}, "storage.torrentInfo data:bytes = storage.TorrentInfo")
13 | tl.Register(GetTorrentInfo{}, "storage.getTorrentInfo = storage.TorrentInfo")
14 | tl.Register(Piece{}, "storage.piece proof:bytes data:bytes = storage.Piece")
15 | tl.Register(GetPiece{}, "storage.getPiece piece_id:int = storage.Piece")
16 | tl.Register(Ping{}, "storage.ping session_id:long = storage.Pong")
17 | tl.Register(Pong{}, "storage.pong = storage.Pong")
18 | tl.Register(AddUpdate{}, "storage.addUpdate session_id:long seqno:int update:storage.Update = Ok")
19 | tl.Register(State{}, "storage.state will_upload:Bool want_download:Bool = storage.State")
20 | tl.Register(UpdateInit{}, "storage.updateInit have_pieces:bytes have_pieces_offset:int state:storage.State = storage.Update")
21 | tl.Register(UpdateHavePieces{}, "storage.updateHavePieces piece_id:(vector int) = storage.Update")
22 | tl.Register(UpdateState{}, "storage.updateState state:storage.State = storage.Update")
23 | tl.Register(Ok{}, "storage.ok = Ok")
24 |
25 | tl.Register(FECInfoNone{}, "fec_info_none#c82a1964 = FecInfo")
26 | tl.Register(TorrentHeader{}, "torrent_header#9128aab7 files_count:uint32 "+
27 | "tot_name_size:uint64 tot_data_size:uint64 fec:FecInfo "+
28 | "dir_name_size:uint32 dir_name:(dir_name_size * [uint8]) "+
29 | "name_index:(files_count * [uint64]) data_index:(files_count * [uint64]) "+
30 | "names:(file_names_size * [uint8]) data:(tot_data_size * [uint8]) "+
31 | "= TorrentHeader")
32 | }
33 |
34 | type AddUpdate struct {
35 | SessionID int64 `tl:"long"`
36 | Seqno int64 `tl:"int"`
37 | Update any `tl:"struct boxed [storage.updateInit,storage.updateHavePieces,storage.updateState]"`
38 | }
39 |
40 | type TorrentInfoContainer struct {
41 | Data []byte `tl:"bytes"`
42 | }
43 |
44 | type GetTorrentInfo struct{}
45 |
46 | type Piece struct {
47 | Proof []byte `tl:"bytes"`
48 | Data []byte `tl:"bytes"`
49 | }
50 |
51 | type GetPiece struct {
52 | PieceID int32 `tl:"int"`
53 | }
54 |
55 | type Ping struct {
56 | SessionID int64 `tl:"long"`
57 | }
58 |
59 | type Pong struct{}
60 |
61 | type State struct {
62 | WillUpload bool `tl:"bool"`
63 | WantDownload bool `tl:"bool"`
64 | }
65 |
66 | type UpdateInit struct {
67 | HavePieces []byte `tl:"bytes"`
68 | HavePiecesOffset int32 `tl:"int"`
69 | State State `tl:"struct boxed"`
70 | }
71 |
72 | type UpdateHavePieces struct {
73 | PieceIDs []int32 `tl:"vector int"`
74 | }
75 |
76 | type UpdateState struct {
77 | State State `tl:"struct boxed"`
78 | }
79 |
80 | type Ok struct{}
81 |
82 | type FECInfoNone struct{}
83 |
84 | type TorrentHeader struct {
85 | FilesCount uint32
86 | TotalNameSize uint64
87 | TotalDataSize uint64
88 | FEC FECInfoNone
89 | DirNameSize uint32
90 | DirName []byte
91 | NameIndex []uint64
92 | DataIndex []uint64
93 | Names []byte
94 | Data []byte
95 | }
96 |
97 | func (t *TorrentHeader) Parse(data []byte) (_ []byte, err error) {
98 | // Manual parse because of not standard array definition
99 | if len(data) < 28 {
100 | return nil, fmt.Errorf("too short sizes data to parse")
101 | }
102 | t.FilesCount = binary.LittleEndian.Uint32(data)
103 | data = data[4:]
104 | t.TotalNameSize = binary.LittleEndian.Uint64(data)
105 | data = data[8:]
106 | t.TotalDataSize = binary.LittleEndian.Uint64(data)
107 | data = data[8:]
108 | data, err = tl.Parse(&t.FEC, data, true)
109 | if err != nil {
110 | return nil, fmt.Errorf("failed to parse fec: %w", err)
111 | }
112 | t.DirNameSize = binary.LittleEndian.Uint32(data)
113 | data = data[4:]
114 |
115 | if uint64(len(data)) < uint64(t.DirNameSize)+uint64(t.FilesCount*8*2)+t.TotalNameSize+t.TotalDataSize {
116 | return nil, fmt.Errorf("too short arrays data to parse")
117 | }
118 |
119 | t.DirName = data[:t.DirNameSize]
120 | data = data[t.DirNameSize:]
121 |
122 | for i := uint32(0); i < t.FilesCount; i++ {
123 | t.NameIndex = append(t.NameIndex, binary.LittleEndian.Uint64(data[i*8:]))
124 | t.DataIndex = append(t.DataIndex, binary.LittleEndian.Uint64(data[t.FilesCount*8+i*8:]))
125 | }
126 | data = data[t.FilesCount*8*2:]
127 |
128 | t.Names = data[:t.TotalNameSize]
129 | data = data[t.TotalNameSize:]
130 | t.Data = data[:t.TotalDataSize]
131 | data = data[t.TotalDataSize:]
132 | return data, nil
133 | }
134 |
135 | func (t *TorrentHeader) Serialize(buffer *bytes.Buffer) error {
136 | tmp := make([]byte, 20)
137 | binary.LittleEndian.PutUint32(tmp[0:], t.FilesCount)
138 | binary.LittleEndian.PutUint64(tmp[4:], t.TotalNameSize)
139 | binary.LittleEndian.PutUint64(tmp[12:], t.TotalDataSize)
140 | buffer.Write(tmp)
141 |
142 | fecData, err := tl.Serialize(t.FEC, true)
143 | if err != nil {
144 | return err
145 | }
146 | buffer.Write(fecData)
147 |
148 | if t.DirNameSize != uint32(len(t.DirName)) {
149 | return fmt.Errorf("incorrect dir name size")
150 | }
151 |
152 | dataDirNameSz := make([]byte, 4)
153 | binary.LittleEndian.PutUint32(dataDirNameSz, t.DirNameSize)
154 | buffer.Write(dataDirNameSz)
155 | buffer.Write(t.DirName)
156 |
157 | for _, ni := range t.NameIndex {
158 | iData := make([]byte, 8)
159 | binary.LittleEndian.PutUint64(iData, ni)
160 | buffer.Write(iData)
161 | }
162 |
163 | for _, ni := range t.DataIndex {
164 | iData := make([]byte, 8)
165 | binary.LittleEndian.PutUint64(iData, ni)
166 | buffer.Write(iData)
167 | }
168 | buffer.Write(t.Names)
169 | buffer.Write(t.Data)
170 |
171 | return nil
172 | }
173 |
174 | func (t *Torrent) calcFileIndexes() error {
175 | t.mx.Lock()
176 | defer t.mx.Unlock()
177 |
178 | // already calculated
179 | if t.filesIndex != nil {
180 | return nil
181 | }
182 |
183 | t.filesIndex = map[string]uint32{}
184 | for i := uint32(0); i < t.Header.FilesCount; i++ {
185 | if uint64(len(t.Header.Names)) < t.Header.NameIndex[i] {
186 | return fmt.Errorf("corrupted header, too short names data")
187 | }
188 | if t.Info.FileSize < t.Header.DataIndex[i]+t.Info.HeaderSize {
189 | return fmt.Errorf("corrupted header, data out of range")
190 | }
191 |
192 | nameFrom := uint64(0)
193 | if i > 0 {
194 | nameFrom = t.Header.NameIndex[i-1]
195 | }
196 | name := t.Header.Names[nameFrom:t.Header.NameIndex[i]]
197 | t.filesIndex[string(name)] = i
198 | }
199 | return nil
200 | }
201 |
202 | var ErrFileNotExist = errors.New("file is not exists in torrent")
203 |
204 | func (t *Torrent) GetFileOffsets(name string) (*FileInfo, error) {
205 | if err := t.calcFileIndexes(); err != nil {
206 | return nil, err
207 | }
208 |
209 | i, ok := t.filesIndex[name]
210 | if !ok {
211 | return nil, ErrFileNotExist
212 | }
213 | return t.GetFileOffsetsByID(i)
214 | }
215 |
216 | func (t *Torrent) GetFilesInPiece(piece uint32) ([]*FileInfo, error) {
217 | start := uint64(piece) * uint64(t.Info.PieceSize)
218 | end := uint64(piece+1) * uint64(t.Info.PieceSize)
219 |
220 | var files []*FileInfo
221 | for i := range t.Header.DataIndex {
222 | fileStart, fileEnd := uint64(0), t.Header.DataIndex[i]
223 | if i > 0 {
224 | fileStart = t.Header.DataIndex[i-1]
225 | }
226 | fileStart += t.Info.HeaderSize
227 | fileEnd += t.Info.HeaderSize
228 |
229 | if fileStart >= end {
230 | break
231 | }
232 | if fileEnd < start {
233 | continue
234 | }
235 |
236 | file, err := t.GetFileOffsetsByID(uint32(i))
237 | if err != nil {
238 | return nil, fmt.Errorf("failed to get offsets for %d: %w", i, err)
239 | }
240 | files = append(files, file)
241 | }
242 |
243 | return files, nil
244 | }
245 |
246 | func (t *Torrent) GetFileOffsetsByID(i uint32) (*FileInfo, error) {
247 | if int(i) >= len(t.Header.DataIndex) {
248 | return nil, ErrFileNotExist
249 | }
250 | info := &FileInfo{
251 | Index: i,
252 | }
253 |
254 | var end = t.Header.DataIndex[i]
255 | var start uint64 = 0
256 | if i > 0 {
257 | start = t.Header.DataIndex[i-1]
258 | }
259 | info.FromPiece = uint32((t.Info.HeaderSize + start) / uint64(t.Info.PieceSize))
260 | info.ToPiece = uint32((t.Info.HeaderSize + end) / uint64(t.Info.PieceSize))
261 | info.FromPieceOffset = uint32((t.Info.HeaderSize + start) - uint64(info.FromPiece)*uint64(t.Info.PieceSize))
262 | info.ToPieceOffset = uint32((t.Info.HeaderSize + end) - uint64(info.ToPiece)*uint64(t.Info.PieceSize))
263 | info.Size = (uint64(info.ToPiece-info.FromPiece)*uint64(t.Info.PieceSize) + uint64(info.ToPieceOffset)) - uint64(info.FromPieceOffset)
264 |
265 | var nameFrom uint64 = 0
266 | if i > 0 {
267 | nameFrom = t.Header.NameIndex[i-1]
268 | }
269 | info.Name = string(t.Header.Names[nameFrom:t.Header.NameIndex[i]])
270 |
271 | return info, nil
272 | }
273 |
274 | func (t *Torrent) ListFiles() ([]string, error) {
275 | if err := t.calcFileIndexes(); err != nil {
276 | return nil, err
277 | }
278 |
279 | files := make([]string, len(t.filesIndex), len(t.filesIndex))
280 | for s, idx := range t.filesIndex {
281 | files[idx] = s
282 | }
283 | return files, nil
284 | }
285 |
--------------------------------------------------------------------------------
/storage/torrent.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | "context"
5 | "crypto/ed25519"
6 | "encoding/hex"
7 | "fmt"
8 | "github.com/xssnick/tonutils-go/adnl/address"
9 | "github.com/xssnick/tonutils-go/tl"
10 | "io"
11 | "math/bits"
12 | "sync"
13 | "sync/atomic"
14 | "time"
15 |
16 | "github.com/xssnick/tonutils-go/adnl/overlay"
17 | "github.com/xssnick/tonutils-go/tlb"
18 | "github.com/xssnick/tonutils-go/tvm/cell"
19 | )
20 |
21 | type FileIndex struct {
22 | BlockFrom uint32
23 | BlockTo uint32
24 | BlockFromOffset uint32
25 | BlockToOffset uint32
26 | Name string
27 |
28 | mx sync.Mutex
29 | }
30 |
31 | type OpenMode int
32 |
33 | const (
34 | OpenModeRead OpenMode = iota
35 | OpenModeWrite
36 | )
37 |
38 | type FSController interface {
39 | AcquireRead(path string, p []byte, off int64) (n int, err error)
40 | RemoveFile(path string) error
41 | }
42 |
43 | type FSFile interface {
44 | io.ReaderAt
45 | io.WriterAt
46 | io.Closer
47 | Sync() error
48 | }
49 |
50 | type FS interface {
51 | Open(name string, mode OpenMode) (FSFile, error)
52 | Delete(name string) error
53 | Exists(name string) bool
54 | GetController() FSController
55 | }
56 |
57 | type PieceInfo struct {
58 | StartFileIndex uint32
59 | Proof []byte
60 | }
61 |
62 | type Storage interface {
63 | GetFS() FS
64 | GetAll() []*Torrent
65 | GetTorrentByOverlay(overlay []byte) *Torrent
66 | SetTorrent(torrent *Torrent) error
67 | SetActiveFiles(bagId []byte, ids []uint32) error
68 | GetActiveFiles(bagId []byte) ([]uint32, error)
69 | GetPiece(bagId []byte, id uint32) (*PieceInfo, error)
70 | RemovePiece(bagId []byte, id uint32) error
71 | SetPiece(bagId []byte, id uint32, p *PieceInfo) error
72 | PiecesMask(bagId []byte, num uint32) []byte
73 | UpdateUploadStats(bagId []byte, val uint64) error
74 | VerifyOnStartup() bool
75 | GetForcedPieceSize() uint32
76 | }
77 |
78 | type NetConnector interface {
79 | GetID() []byte
80 | GetADNLPrivateKey() ed25519.PrivateKey
81 | SetDownloadLimit(bytesPerSec uint64)
82 | SetUploadLimit(bytesPerSec uint64)
83 | GetUploadLimit() uint64
84 | GetDownloadLimit() uint64
85 | ThrottleDownload(ctx context.Context, sz uint64) error
86 | ThrottleUpload(ctx context.Context, sz uint64) error
87 | CreateDownloader(ctx context.Context, t *Torrent) (_ TorrentDownloader, err error)
88 | ConnectToNode(ctx context.Context, t *Torrent, node *overlay.Node, addrList *address.List) error
89 | TorrentServer
90 | }
91 |
92 | type TorrentStats struct {
93 | Uploaded uint64
94 | }
95 |
96 | type Torrent struct {
97 | BagID []byte
98 | Path string
99 | Info *TorrentInfo
100 | Header *TorrentHeader
101 | CreatedAt time.Time
102 | CreatedLocally bool
103 |
104 | activeFiles []uint32
105 | activeUpload bool
106 | downloadAll bool
107 | downloadOrdered bool
108 | stats TorrentStats
109 |
110 | connector NetConnector
111 | downloader TorrentDownloader
112 |
113 | knownNodes map[string]*overlay.Node
114 | peers map[string]*PeerInfo
115 | peersMx sync.RWMutex
116 |
117 | db Storage
118 |
119 | // completedCtx context.Context
120 | globalCtx context.Context
121 | pause func()
122 | // complete func()
123 |
124 | filesIndex map[string]uint32
125 |
126 | pieceMask []byte
127 | lastVerified time.Time
128 | isVerificationInProgress bool
129 |
130 | lastDHTStore time.Time
131 | lastDHTStoreCompletedAt int64
132 | lastDHTStoreFailed int32
133 |
134 | searchesWithZeroPeersNum uint32
135 |
136 | signalNewPieces chan struct{}
137 |
138 | mx sync.RWMutex
139 | maskMx sync.RWMutex
140 |
141 | currentDownloadFlag *bool
142 | stopDownload func()
143 | }
144 |
145 | func (t *Torrent) InitMask() {
146 | t.maskMx.Lock()
147 | if len(t.pieceMask) == 0 {
148 | t.pieceMask = t.db.PiecesMask(t.BagID, t.Info.PiecesNum())
149 | }
150 | t.maskMx.Unlock()
151 | }
152 |
153 | func (t *Torrent) GetConnector() NetConnector {
154 | return t.connector
155 | }
156 |
157 | func NewTorrent(path string, db Storage, connector NetConnector) *Torrent {
158 | t := &Torrent{
159 | Path: path,
160 | CreatedAt: time.Now(),
161 | peers: map[string]*PeerInfo{},
162 | knownNodes: map[string]*overlay.Node{},
163 | db: db,
164 | connector: connector,
165 | signalNewPieces: make(chan struct{}, 1),
166 | }
167 |
168 | // create as stopped
169 | t.globalCtx, t.pause = context.WithCancel(context.Background())
170 | // t.completedCtx, t.complete = context.WithCancel(context.Background())
171 | t.pause()
172 |
173 | return t
174 | }
175 |
176 | func (t *Torrent) IsDownloadAll() bool {
177 | return t.downloadAll
178 | }
179 |
180 | func (t *Torrent) IsDownloadOrdered() bool {
181 | return t.downloadOrdered
182 | }
183 |
184 | func (t *Torrent) GetUploadStats() uint64 {
185 | return atomic.LoadUint64(&t.stats.Uploaded)
186 | }
187 |
188 | func (t *Torrent) SetUploadStats(val uint64) {
189 | atomic.StoreUint64(&t.stats.Uploaded, val)
190 | }
191 |
192 | func (t *Torrent) IsActive() (activeDownload, activeUpload bool) {
193 | select {
194 | case <-t.globalCtx.Done():
195 | return false, false
196 | default:
197 | return true, t.activeUpload && t.Header != nil
198 | }
199 | }
200 |
201 | func (t *Torrent) GetLastVerifiedAt() (bool, time.Time) {
202 | return t.isVerificationInProgress, t.lastVerified
203 | }
204 |
205 | func (t *Torrent) IsActiveRaw() (activeDownload, activeUpload bool) {
206 | select {
207 | case <-t.globalCtx.Done():
208 | return false, false
209 | default:
210 | return true, t.activeUpload
211 | }
212 | }
213 |
214 | func (t *Torrent) Stop() {
215 | t.activeUpload = false
216 | t.pause()
217 | }
218 |
219 | func (t *Torrent) Start(withUpload, downloadAll, downloadOrdered bool) (err error) {
220 | t.activeUpload = withUpload
221 |
222 | t.mx.Lock()
223 | defer t.mx.Unlock()
224 |
225 | if d, _ := t.IsActive(); d && t.downloadAll == downloadAll && t.downloadOrdered == downloadOrdered {
226 | return nil
227 | }
228 |
229 | if !t.isVerificationInProgress && t.lastVerified.Before(time.Now().Add(-30*time.Second)) {
230 | t.isVerificationInProgress = true
231 | go func() {
232 | // it will remove corrupted pieces
233 | if err = t.verify(t.db.VerifyOnStartup()); err != nil {
234 | Logger("Verification of", hex.EncodeToString(t.BagID), "failed:", err.Error())
235 | }
236 |
237 | t.mx.Lock()
238 | defer t.mx.Unlock()
239 |
240 | t.lastVerified = time.Now()
241 | t.isVerificationInProgress = false
242 | }()
243 | }
244 |
245 | t.downloadAll = downloadAll
246 | t.downloadOrdered = downloadOrdered
247 |
248 | if t.pause != nil {
249 | t.pause()
250 | }
251 |
252 | t.globalCtx, t.pause = context.WithCancel(context.Background())
253 |
254 | go t.peersManager(t.globalCtx)
255 |
256 | if t.IsCompleted() {
257 | // t.complete()
258 | return nil
259 | }
260 |
261 | currFlag := t.currentDownloadFlag
262 | currPause := t.pause
263 | return t.startDownload(func(event Event) {
264 | if event.Name == EventErr && currFlag == t.currentDownloadFlag {
265 | currPause()
266 | }
267 | })
268 | }
269 |
270 | func (t *Torrent) peersManager(workerCtx context.Context) {
271 | defer Logger("[STORAGE_PEERS] PEER MANAGER STOPPED", "BAG", hex.EncodeToString(t.BagID))
272 |
273 | ticker := time.NewTicker(5 * time.Millisecond)
274 | defer ticker.Stop()
275 |
276 | for {
277 | var updatePieces bool
278 | select {
279 | case <-workerCtx.Done():
280 | t.peersMx.RLock()
281 | peers := make([]*storagePeer, 0, len(t.peers))
282 | for _, peer := range t.peers {
283 | peers = append(peers, peer.peer)
284 | }
285 | t.peersMx.RUnlock()
286 |
287 | for _, peer := range peers {
288 | peer.Close()
289 | }
290 |
291 | return
292 | case <-t.signalNewPieces:
293 | updatePieces = true
294 | case <-ticker.C:
295 | }
296 | ticker.Reset(time.Second)
297 |
298 | t.peersMx.RLock()
299 | peers := make([]*storagePeer, 0, len(t.peers))
300 | for _, peer := range t.peers {
301 | peers = append(peers, peer.peer)
302 | }
303 | t.peersMx.RUnlock()
304 |
305 | wg := sync.WaitGroup{}
306 | for _, peer := range peers {
307 | if atomic.LoadInt32(&peer.sessionInitialized) == 0 {
308 | continue
309 | }
310 |
311 | if atomic.LoadInt32(&peer.updateInitReceived) == 0 && time.Now().Unix()-atomic.LoadInt64(&peer.sessionInitAt) > 45 {
312 | Logger("[STORAGE_PEERS] PEER", hex.EncodeToString(peer.nodeId), "HAS NOT SENT UPDATE INIT, SOMETHING WRONG, CLOSING CONNECTION", "BAG", hex.EncodeToString(t.BagID))
313 | peer.Close()
314 | continue
315 | }
316 |
317 | if updatePieces {
318 | wg.Add(1)
319 | go func() {
320 | defer wg.Done()
321 |
322 | Logger("[STORAGE_PEERS] DOING UPDATE HAVE PIECES FOR PEER", hex.EncodeToString(peer.nodeId), "BAG", hex.EncodeToString(t.BagID))
323 |
324 | if err := peer.updateHavePieces(workerCtx); err != nil && atomic.AddInt32(&peer.fails, 1) > 3 {
325 | Logger("[STORAGE_PEERS] UPDATE HAVE PIECES FAILED FOR PEER", hex.EncodeToString(peer.nodeId), "AND TOO MANY FAILS, CLOSING CONNECTION", "BAG", hex.EncodeToString(t.BagID))
326 | peer.Close()
327 | return
328 | }
329 | atomic.StoreInt32(&peer.fails, 0)
330 | }()
331 | }
332 |
333 | if time.Since(peer.lastPingAt) > 15*time.Second {
334 | peer.lastPingAt = time.Now()
335 | wg.Add(1)
336 | go func() {
337 | defer wg.Done()
338 |
339 | qCtx, cancel := context.WithTimeout(workerCtx, 5*time.Second)
340 | defer cancel()
341 |
342 | if err := peer.ping(qCtx); err != nil && atomic.AddInt32(&peer.fails, 1) > 3 {
343 | Logger("[STORAGE_PEERS] PING FAILED FOR PEER", hex.EncodeToString(peer.nodeId), "AND TOO MANY FAILS, CLOSING CONNECTION", "BAG", hex.EncodeToString(t.BagID))
344 | peer.Close()
345 | return
346 | }
347 | atomic.StoreInt32(&peer.fails, 0)
348 | }()
349 | } else if time.Since(peer.lastNeighboursAt) > 30*time.Second {
350 | peer.lastNeighboursAt = time.Now()
351 | wg.Add(1)
352 | go func() {
353 | defer wg.Done()
354 |
355 | qCtx, cancel := context.WithTimeout(workerCtx, 5*time.Second)
356 | defer cancel()
357 |
358 | nodes, err := peer.findNeighbours(qCtx)
359 | if err != nil && atomic.AddInt32(&peer.fails, 1) > 3 {
360 | Logger("[STORAGE_PEERS] FIND NEIGHBOURS FAILED FOR PEER", hex.EncodeToString(peer.nodeId), "AND TOO MANY FAILS, CLOSING CONNECTION", "BAG", hex.EncodeToString(t.BagID))
361 | peer.Close()
362 | } else if err == nil {
363 | atomic.StoreInt32(&peer.fails, 0)
364 | for _, node := range nodes.List {
365 | t.addNode(node)
366 | }
367 | }
368 | }()
369 | }
370 | }
371 | wg.Wait()
372 | }
373 | }
374 |
375 | func (t *TorrentInfo) PiecesNum() uint32 {
376 | piecesNum := t.FileSize / uint64(t.PieceSize)
377 | if t.FileSize%uint64(t.PieceSize) != 0 {
378 | piecesNum++
379 | }
380 | return uint32(piecesNum)
381 | }
382 |
383 | func (t *Torrent) getPiece(id uint32) (*PieceInfo, error) {
384 | return t.db.GetPiece(t.BagID, id)
385 | }
386 |
387 | func (t *Torrent) removePiece(id uint32) error {
388 | i := id / 8
389 | y := id % 8
390 |
391 | t.maskMx.Lock()
392 | t.pieceMask[i] &= ^(1 << y)
393 | t.maskMx.Unlock()
394 |
395 | return t.db.RemovePiece(t.BagID, id)
396 | }
397 |
398 | func (t *Torrent) setPiece(id uint32, p *PieceInfo) error {
399 | i := id / 8
400 | y := id % 8
401 |
402 | t.maskMx.Lock()
403 | t.pieceMask[i] |= 1 << y
404 | t.maskMx.Unlock()
405 |
406 | if err := t.db.SetPiece(t.BagID, id, p); err != nil {
407 | return err
408 | }
409 |
410 | // notify peers about our new pieces
411 | select {
412 | case t.signalNewPieces <- struct{}{}:
413 | default:
414 | }
415 |
416 | return nil
417 | }
418 |
419 | func (t *Torrent) PiecesMask() []byte {
420 | t.maskMx.RLock()
421 | defer t.maskMx.RUnlock()
422 |
423 | return append([]byte{}, t.pieceMask...)
424 | }
425 |
426 | func (t *Torrent) IsCompleted() bool {
427 | mask := t.PiecesMask()
428 | if len(mask) == 0 {
429 | return false
430 | }
431 |
432 | num := t.Info.PiecesNum()
433 | for i, b := range mask {
434 | ones := 8
435 | if i == len(mask)-1 {
436 | if ones = int(num % 8); ones == 0 {
437 | ones = 8
438 | }
439 | }
440 |
441 | if bits.OnesCount8(b) != ones {
442 | return false
443 | }
444 | }
445 | return true
446 | }
447 |
448 | func (t *Torrent) DownloadedPiecesNum() int {
449 | mask := t.PiecesMask()
450 |
451 | pieces := 0
452 | for _, b := range mask {
453 | pieces += bits.OnesCount8(b)
454 | }
455 | return pieces
456 | }
457 |
458 | func (t *Torrent) LoadActiveFilesIDs() error {
459 | files, err := t.db.GetActiveFiles(t.BagID)
460 | if err != nil {
461 | return fmt.Errorf("failed to load active files from db: %w", err)
462 | }
463 | t.activeFiles = files
464 | return nil
465 | }
466 |
467 | func (t *Torrent) GetActiveFilesIDs() []uint32 {
468 | return t.activeFiles
469 | }
470 |
471 | func (t *Torrent) SetActiveFilesIDs(ids []uint32) error {
472 | t.mx.Lock()
473 | defer t.mx.Unlock()
474 |
475 | if err := t.db.SetActiveFiles(t.BagID, ids); err != nil {
476 | return fmt.Errorf("failed to store active files in db: %w", err)
477 | }
478 |
479 | t.downloadAll = false
480 | t.activeFiles = ids
481 | currFlag := t.currentDownloadFlag
482 | currPause := t.pause
483 | return t.startDownload(func(event Event) {
484 | if event.Name == EventErr && currFlag == t.currentDownloadFlag {
485 | currPause()
486 | }
487 | })
488 | }
489 |
490 | func (t *Torrent) SetActiveFiles(names []string) error {
491 | if err := t.calcFileIndexes(); err != nil {
492 | return err
493 | }
494 |
495 | ids := make([]uint32, 0, len(names))
496 | for _, name := range names {
497 | val, ok := t.filesIndex[name]
498 | if !ok {
499 | return fmt.Errorf("file %s is not exist in torrent", name)
500 | }
501 | ids = append(ids, val)
502 | }
503 | return t.SetActiveFilesIDs(ids)
504 | }
505 |
506 | func (t *Torrent) GetPiece(id uint32) (*Piece, error) {
507 | select {
508 | case <-t.globalCtx.Done():
509 | return nil, fmt.Errorf("torrent paused")
510 | default:
511 | }
512 | return t.getPieceInternal(id, false)
513 | }
514 |
515 | func (t *Torrent) getPieceInternal(id uint32, verify bool) (*Piece, error) {
516 | if id >= t.Info.PiecesNum() {
517 | return nil, fmt.Errorf("piece %d not found, pieces count: %d", id, t.Info.PiecesNum())
518 | }
519 |
520 | piece, err := t.getPiece(id)
521 | if err != nil {
522 | return nil, fmt.Errorf("piece %d is not downloaded (%w)", id, err)
523 | }
524 |
525 | offset := 0
526 | block := make([]byte, t.Info.PieceSize)
527 |
528 | var headerData []byte
529 | fileFrom := piece.StartFileIndex
530 | for {
531 | isHdr := t.Info.HeaderSize > uint64(id)*uint64(t.Info.PieceSize)+uint64(offset)
532 |
533 | // header
534 | if isHdr {
535 | if headerData == nil {
536 | headerData, err = tl.Serialize(t.Header, true)
537 | if err != nil {
538 | return nil, fmt.Errorf("failed to serialize header: %w", err)
539 | }
540 | }
541 | offset += copy(block[offset:], headerData[id*t.Info.PieceSize:])
542 | } else {
543 | f, err := t.GetFileOffsetsByID(fileFrom)
544 | if err != nil {
545 | return nil, fmt.Errorf("offsets for %d %d are not exists (%w)", id, fileFrom, err)
546 | }
547 |
548 | path := t.Path + "/" + string(t.Header.DirName) + "/" + f.Name
549 | read := func(path string, from int64) error {
550 | n, err := t.db.GetFS().GetController().AcquireRead(path, block[offset:], from)
551 | if err != nil && err != io.EOF {
552 | return err
553 | }
554 |
555 | offset += n
556 | return nil
557 | }
558 |
559 | var fileOff int64 = 0
560 | if f.FromPiece != id {
561 | fileOff = int64(id-f.FromPiece)*int64(t.Info.PieceSize) - int64(f.FromPieceOffset)
562 | }
563 |
564 | if err = read(path, fileOff); err != nil {
565 | return nil, err
566 | }
567 | fileFrom++
568 |
569 | if fileFrom >= uint32(len(t.Header.DataIndex)) {
570 | // end reached
571 | break
572 | }
573 | }
574 |
575 | if offset == int(t.Info.PieceSize) {
576 | break
577 | }
578 | }
579 |
580 | if offset > 0 {
581 | block = block[:offset]
582 | }
583 |
584 | if verify {
585 | proof, err := cell.FromBOC(piece.Proof)
586 | if err != nil {
587 | return nil, fmt.Errorf("failed to parse proof cell: %w", err)
588 | }
589 |
590 | if err = cell.CheckProof(proof, t.Info.RootHash); err != nil {
591 | return nil, fmt.Errorf("proof check of piece %d failed: %w, %s", id, err, proof.Dump())
592 | }
593 |
594 | if err = t.checkProofBranch(proof, block, id); err != nil {
595 | return nil, fmt.Errorf("piece verification failed: %w", err)
596 | }
597 | }
598 |
599 | return &Piece{
600 | Proof: piece.Proof,
601 | Data: block,
602 | }, nil
603 | }
604 |
605 | func (t *Torrent) GetPieceProof(id uint32) ([]byte, error) {
606 | if id >= t.Info.PiecesNum() {
607 | return nil, fmt.Errorf("piece %d not found, pieces count: %d", id, t.Info.PiecesNum())
608 | }
609 |
610 | piece, err := t.getPieceInternal(id, true)
611 | if err != nil {
612 | return nil, fmt.Errorf("piece %d error: %w", id, err)
613 | }
614 |
615 | return piece.Proof, nil
616 | }
617 |
618 | func (t *Torrent) SetInfoStats(pieceSize uint32, headerData, rootHash []byte, fileSize, headerSize uint64, description string) {
619 | t.Info = &TorrentInfo{
620 | PieceSize: pieceSize,
621 | FileSize: fileSize,
622 | RootHash: rootHash,
623 | HeaderSize: headerSize,
624 | HeaderHash: calcHash(headerData),
625 | Description: tlb.Text{
626 | MaxFirstChunkSize: tlb.MaxTextChunkSize - 84, // 84 = size of prev data in bytes
627 | Value: description,
628 | },
629 | }
630 | }
631 |
632 | func (t *Torrent) transmitTimeout() time.Duration {
633 | timeout := time.Duration(t.Info.PieceSize/(256<<10)) * time.Second
634 | if timeout < 7*time.Second {
635 | return 7 * time.Second
636 | } else if timeout > 60*time.Second {
637 | return 60 * time.Second
638 | }
639 | return timeout
640 | }
641 |
--------------------------------------------------------------------------------
/storage/torrent_test.go:
--------------------------------------------------------------------------------
1 | package storage
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestTorrent_IsCompleted(t *testing.T) {
8 | tr := Torrent{
9 | pieceMask: []byte{0xff, 0xff, 0xC0}, // 18
10 | }
11 |
12 | for i := 18; i < 24; i++ {
13 | tr.Info = &TorrentInfo{
14 | FileSize: uint64(i),
15 | PieceSize: 1,
16 | }
17 |
18 | if i == 18 {
19 | if !tr.IsCompleted() {
20 | t.Fatal("should be completed", i)
21 | }
22 | } else {
23 | if tr.IsCompleted() {
24 | t.Fatal("should be not completed", i)
25 | }
26 | }
27 | }
28 | }
29 |
--------------------------------------------------------------------------------