├── .gitattributes ├── .github └── workflows │ ├── attachBuilds.yml │ └── test.yml ├── .gitignore ├── LICENSE ├── README.md ├── bencode ├── check.go ├── decode.go ├── decode_test.go ├── encode.go ├── encode_test.go └── infohash.go ├── client ├── Deluge.go ├── Deluge_test.go ├── Transmission.go ├── interface.go ├── qBittorrent.go └── uTorrent.go ├── cmd └── main.go ├── config.example.yml ├── feed ├── atom.go ├── feed.go └── rss.go ├── filter ├── contentSize.go ├── interface.go └── regexp.go ├── go.mod ├── go.sum ├── history.go ├── logLevel.go ├── receiver ├── client.go ├── download.go └── interface.go ├── setting ├── editTorrent.go ├── types.go └── types_test.go ├── t-rss.go ├── t-rss_test.go ├── task.go ├── ticker ├── rss.go └── types.go ├── unit ├── duration.go ├── duration_test.go ├── shave.go ├── size.go ├── size_test.go └── time.go └── version.go /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/workflows/attachBuilds.yml: -------------------------------------------------------------------------------- 1 | on: 2 | release: 3 | types: 4 | - created 5 | 6 | jobs: 7 | build: 8 | name: Attach Builds to Release 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v2 12 | 13 | - name: Set up Go LATEST 14 | run: | 15 | curl -fsSL https://raw.githubusercontent.com/capric98/myenv/master/Go/Linux/install.sh | bash 16 | 17 | - name: Build 18 | env: 19 | CGO_ENABLED: 0 20 | run: | 21 | /usr/local/go/bin/go version 22 | export GOOS=linux 23 | export GOARCH=arm 24 | /usr/local/go/bin/go build -o t-rss cmd/main.go 25 | zip t-rss_${GOOS}_${GOARCH}.zip t-rss && rm -rf t-rss 26 | export GOOS=linux 27 | export GOARCH=arm64 28 | /usr/local/go/bin/go build -o t-rss cmd/main.go 29 | zip t-rss_${GOOS}_${GOARCH}.zip t-rss && rm -rf t-rss 30 | export GOOS=linux 31 | export GOARCH=amd64 32 | /usr/local/go/bin/go build -o t-rss cmd/main.go 33 | zip t-rss_${GOOS}_${GOARCH}.zip t-rss && rm -rf t-rss 34 | export GOOS=darwin 35 | export GOARCH=amd64 36 | /usr/local/go/bin/go build -o t-rss cmd/main.go 37 | zip t-rss_${GOOS}_${GOARCH}.zip t-rss && rm -rf t-rss 38 | export GOOS=darwin 39 | export GOARCH=arm64 40 | /usr/local/go/bin/go build -o t-rss cmd/main.go 41 | zip t-rss_${GOOS}_${GOARCH}.zip t-rss && rm -rf t-rss 42 | export GOOS=windows 43 | export GOARCH=amd64 44 | /usr/local/go/bin/go build -o t-rss.exe cmd/main.go 45 | zip t-rss_${GOOS}_${GOARCH}.zip t-rss.exe && rm -rf t-rss.exe 46 | 47 | - uses: shogo82148/actions-upload-release-asset@v1 48 | with: 49 | upload_url: ${{ github.event.release.upload_url }} 50 | asset_path: t-rss*.zip 51 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Code Test 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | 7 | jobs: 8 | 9 | build: 10 | name: Code Test 11 | runs-on: ubuntu-latest 12 | steps: 13 | 14 | - name: Set up Go LATEST 15 | run: | 16 | curl -fsSL https://raw.githubusercontent.com/capric98/myenv/master/Go/Linux/install.sh | bash 17 | 18 | - name: Check out code into the Go module directory 19 | uses: actions/checkout@v2 20 | 21 | - name: Test 22 | run: /usr/local/go/bin/go test ./... 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # macOS 9 | # General 10 | .DS_Store 11 | .AppleDouble 12 | .LSOverride 13 | 14 | # Icon must end with two \r 15 | Icon 16 | 17 | # Thumbnails 18 | ._* 19 | 20 | # Files that might appear in the root of a volume 21 | .DocumentRevisions-V100 22 | .fseventsd 23 | .Spotlight-V100 24 | .TemporaryItems 25 | .Trashes 26 | .VolumeIcon.icns 27 | .com.apple.timemachine.donotpresent 28 | 29 | # Directories potentially created on remote AFP share 30 | .AppleDB 31 | .AppleDesktop 32 | Network Trash Folder 33 | Temporary Items 34 | .apdisk 35 | 36 | # Test binary, build with `go test -c` 37 | *.test 38 | 39 | *.out 40 | *.torrent 41 | *.a 42 | *.yml 43 | !config.example.yml 44 | *.conf 45 | 46 | .t-rss_History/* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | 1. Definitions 5 | -------------- 6 | 7 | 1.1. "Contributor" 8 | means each individual or legal entity that creates, contributes to 9 | the creation of, or owns Covered Software. 10 | 11 | 1.2. "Contributor Version" 12 | means the combination of the Contributions of others (if any) used 13 | by a Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | means Covered Software of a particular Contributor. 17 | 18 | 1.4. "Covered Software" 19 | means Source Code Form to which the initial Contributor has attached 20 | the notice in Exhibit A, the Executable Form of such Source Code 21 | Form, and Modifications of such Source Code Form, in each case 22 | including portions thereof. 23 | 24 | 1.5. "Incompatible With Secondary Licenses" 25 | means 26 | 27 | (a) that the initial Contributor has attached the notice described 28 | in Exhibit B to the Covered Software; or 29 | 30 | (b) that the Covered Software was made available under the terms of 31 | version 1.1 or earlier of the License, but not also under the 32 | terms of a Secondary License. 33 | 34 | 1.6. "Executable Form" 35 | means any form of the work other than Source Code Form. 36 | 37 | 1.7. "Larger Work" 38 | means a work that combines Covered Software with other material, in 39 | a separate file or files, that is not Covered Software. 40 | 41 | 1.8. "License" 42 | means this document. 43 | 44 | 1.9. "Licensable" 45 | means having the right to grant, to the maximum extent possible, 46 | whether at the time of the initial grant or subsequently, any and 47 | all of the rights conveyed by this License. 48 | 49 | 1.10. "Modifications" 50 | means any of the following: 51 | 52 | (a) any file in Source Code Form that results from an addition to, 53 | deletion from, or modification of the contents of Covered 54 | Software; or 55 | 56 | (b) any new file in Source Code Form that contains any Covered 57 | Software. 58 | 59 | 1.11. "Patent Claims" of a Contributor 60 | means any patent claim(s), including without limitation, method, 61 | process, and apparatus claims, in any patent Licensable by such 62 | Contributor that would be infringed, but for the grant of the 63 | License, by the making, using, selling, offering for sale, having 64 | made, import, or transfer of either its Contributions or its 65 | Contributor Version. 66 | 67 | 1.12. "Secondary License" 68 | means either the GNU General Public License, Version 2.0, the GNU 69 | Lesser General Public License, Version 2.1, the GNU Affero General 70 | Public License, Version 3.0, or any later versions of those 71 | licenses. 72 | 73 | 1.13. "Source Code Form" 74 | means the form of the work preferred for making modifications. 75 | 76 | 1.14. "You" (or "Your") 77 | means an individual or a legal entity exercising rights under this 78 | License. For legal entities, "You" includes any entity that 79 | controls, is controlled by, or is under common control with You. For 80 | purposes of this definition, "control" means (a) the power, direct 81 | or indirect, to cause the direction or management of such entity, 82 | whether by contract or otherwise, or (b) ownership of more than 83 | fifty percent (50%) of the outstanding shares or beneficial 84 | ownership of such entity. 85 | 86 | 2. License Grants and Conditions 87 | -------------------------------- 88 | 89 | 2.1. Grants 90 | 91 | Each Contributor hereby grants You a world-wide, royalty-free, 92 | non-exclusive license: 93 | 94 | (a) under intellectual property rights (other than patent or trademark) 95 | Licensable by such Contributor to use, reproduce, make available, 96 | modify, display, perform, distribute, and otherwise exploit its 97 | Contributions, either on an unmodified basis, with Modifications, or 98 | as part of a Larger Work; and 99 | 100 | (b) under Patent Claims of such Contributor to make, use, sell, offer 101 | for sale, have made, import, and otherwise transfer either its 102 | Contributions or its Contributor Version. 103 | 104 | 2.2. Effective Date 105 | 106 | The licenses granted in Section 2.1 with respect to any Contribution 107 | become effective for each Contribution on the date the Contributor first 108 | distributes such Contribution. 109 | 110 | 2.3. Limitations on Grant Scope 111 | 112 | The licenses granted in this Section 2 are the only rights granted under 113 | this License. No additional rights or licenses will be implied from the 114 | distribution or licensing of Covered Software under this License. 115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 116 | Contributor: 117 | 118 | (a) for any code that a Contributor has removed from Covered Software; 119 | or 120 | 121 | (b) for infringements caused by: (i) Your and any other third party's 122 | modifications of Covered Software, or (ii) the combination of its 123 | Contributions with other software (except as part of its Contributor 124 | Version); or 125 | 126 | (c) under Patent Claims infringed by Covered Software in the absence of 127 | its Contributions. 128 | 129 | This License does not grant any rights in the trademarks, service marks, 130 | or logos of any Contributor (except as may be necessary to comply with 131 | the notice requirements in Section 3.4). 132 | 133 | 2.4. Subsequent Licenses 134 | 135 | No Contributor makes additional grants as a result of Your choice to 136 | distribute the Covered Software under a subsequent version of this 137 | License (see Section 10.2) or under the terms of a Secondary License (if 138 | permitted under the terms of Section 3.3). 139 | 140 | 2.5. Representation 141 | 142 | Each Contributor represents that the Contributor believes its 143 | Contributions are its original creation(s) or it has sufficient rights 144 | to grant the rights to its Contributions conveyed by this License. 145 | 146 | 2.6. Fair Use 147 | 148 | This License is not intended to limit any rights You have under 149 | applicable copyright doctrines of fair use, fair dealing, or other 150 | equivalents. 151 | 152 | 2.7. Conditions 153 | 154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 155 | in Section 2.1. 156 | 157 | 3. Responsibilities 158 | ------------------- 159 | 160 | 3.1. Distribution of Source Form 161 | 162 | All distribution of Covered Software in Source Code Form, including any 163 | Modifications that You create or to which You contribute, must be under 164 | the terms of this License. You must inform recipients that the Source 165 | Code Form of the Covered Software is governed by the terms of this 166 | License, and how they can obtain a copy of this License. You may not 167 | attempt to alter or restrict the recipients' rights in the Source Code 168 | Form. 169 | 170 | 3.2. Distribution of Executable Form 171 | 172 | If You distribute Covered Software in Executable Form then: 173 | 174 | (a) such Covered Software must also be made available in Source Code 175 | Form, as described in Section 3.1, and You must inform recipients of 176 | the Executable Form how they can obtain a copy of such Source Code 177 | Form by reasonable means in a timely manner, at a charge no more 178 | than the cost of distribution to the recipient; and 179 | 180 | (b) You may distribute such Executable Form under the terms of this 181 | License, or sublicense it under different terms, provided that the 182 | license for the Executable Form does not attempt to limit or alter 183 | the recipients' rights in the Source Code Form under this License. 184 | 185 | 3.3. Distribution of a Larger Work 186 | 187 | You may create and distribute a Larger Work under terms of Your choice, 188 | provided that You also comply with the requirements of this License for 189 | the Covered Software. If the Larger Work is a combination of Covered 190 | Software with a work governed by one or more Secondary Licenses, and the 191 | Covered Software is not Incompatible With Secondary Licenses, this 192 | License permits You to additionally distribute such Covered Software 193 | under the terms of such Secondary License(s), so that the recipient of 194 | the Larger Work may, at their option, further distribute the Covered 195 | Software under the terms of either this License or such Secondary 196 | License(s). 197 | 198 | 3.4. Notices 199 | 200 | You may not remove or alter the substance of any license notices 201 | (including copyright notices, patent notices, disclaimers of warranty, 202 | or limitations of liability) contained within the Source Code Form of 203 | the Covered Software, except that You may alter any license notices to 204 | the extent required to remedy known factual inaccuracies. 205 | 206 | 3.5. Application of Additional Terms 207 | 208 | You may choose to offer, and to charge a fee for, warranty, support, 209 | indemnity or liability obligations to one or more recipients of Covered 210 | Software. However, You may do so only on Your own behalf, and not on 211 | behalf of any Contributor. You must make it absolutely clear that any 212 | such warranty, support, indemnity, or liability obligation is offered by 213 | You alone, and You hereby agree to indemnify every Contributor for any 214 | liability incurred by such Contributor as a result of warranty, support, 215 | indemnity or liability terms You offer. You may include additional 216 | disclaimers of warranty and limitations of liability specific to any 217 | jurisdiction. 218 | 219 | 4. Inability to Comply Due to Statute or Regulation 220 | --------------------------------------------------- 221 | 222 | If it is impossible for You to comply with any of the terms of this 223 | License with respect to some or all of the Covered Software due to 224 | statute, judicial order, or regulation then You must: (a) comply with 225 | the terms of this License to the maximum extent possible; and (b) 226 | describe the limitations and the code they affect. Such description must 227 | be placed in a text file included with all distributions of the Covered 228 | Software under this License. Except to the extent prohibited by statute 229 | or regulation, such description must be sufficiently detailed for a 230 | recipient of ordinary skill to be able to understand it. 231 | 232 | 5. Termination 233 | -------------- 234 | 235 | 5.1. The rights granted under this License will terminate automatically 236 | if You fail to comply with any of its terms. However, if You become 237 | compliant, then the rights granted under this License from a particular 238 | Contributor are reinstated (a) provisionally, unless and until such 239 | Contributor explicitly and finally terminates Your grants, and (b) on an 240 | ongoing basis, if such Contributor fails to notify You of the 241 | non-compliance by some reasonable means prior to 60 days after You have 242 | come back into compliance. Moreover, Your grants from a particular 243 | Contributor are reinstated on an ongoing basis if such Contributor 244 | notifies You of the non-compliance by some reasonable means, this is the 245 | first time You have received notice of non-compliance with this License 246 | from such Contributor, and You become compliant prior to 30 days after 247 | Your receipt of the notice. 248 | 249 | 5.2. If You initiate litigation against any entity by asserting a patent 250 | infringement claim (excluding declaratory judgment actions, 251 | counter-claims, and cross-claims) alleging that a Contributor Version 252 | directly or indirectly infringes any patent, then the rights granted to 253 | You by any and all Contributors for the Covered Software under Section 254 | 2.1 of this License shall terminate. 255 | 256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 257 | end user license agreements (excluding distributors and resellers) which 258 | have been validly granted by You or Your distributors under this License 259 | prior to termination shall survive termination. 260 | 261 | ************************************************************************ 262 | * * 263 | * 6. Disclaimer of Warranty * 264 | * ------------------------- * 265 | * * 266 | * Covered Software is provided under this License on an "as is" * 267 | * basis, without warranty of any kind, either expressed, implied, or * 268 | * statutory, including, without limitation, warranties that the * 269 | * Covered Software is free of defects, merchantable, fit for a * 270 | * particular purpose or non-infringing. The entire risk as to the * 271 | * quality and performance of the Covered Software is with You. * 272 | * Should any Covered Software prove defective in any respect, You * 273 | * (not any Contributor) assume the cost of any necessary servicing, * 274 | * repair, or correction. This disclaimer of warranty constitutes an * 275 | * essential part of this License. No use of any Covered Software is * 276 | * authorized under this License except under this disclaimer. * 277 | * * 278 | ************************************************************************ 279 | 280 | ************************************************************************ 281 | * * 282 | * 7. Limitation of Liability * 283 | * -------------------------- * 284 | * * 285 | * Under no circumstances and under no legal theory, whether tort * 286 | * (including negligence), contract, or otherwise, shall any * 287 | * Contributor, or anyone who distributes Covered Software as * 288 | * permitted above, be liable to You for any direct, indirect, * 289 | * special, incidental, or consequential damages of any character * 290 | * including, without limitation, damages for lost profits, loss of * 291 | * goodwill, work stoppage, computer failure or malfunction, or any * 292 | * and all other commercial damages or losses, even if such party * 293 | * shall have been informed of the possibility of such damages. This * 294 | * limitation of liability shall not apply to liability for death or * 295 | * personal injury resulting from such party's negligence to the * 296 | * extent applicable law prohibits such limitation. Some * 297 | * jurisdictions do not allow the exclusion or limitation of * 298 | * incidental or consequential damages, so this exclusion and * 299 | * limitation may not apply to You. * 300 | * * 301 | ************************************************************************ 302 | 303 | 8. Litigation 304 | ------------- 305 | 306 | Any litigation relating to this License may be brought only in the 307 | courts of a jurisdiction where the defendant maintains its principal 308 | place of business and such litigation shall be governed by laws of that 309 | jurisdiction, without reference to its conflict-of-law provisions. 310 | Nothing in this Section shall prevent a party's ability to bring 311 | cross-claims or counter-claims. 312 | 313 | 9. Miscellaneous 314 | ---------------- 315 | 316 | This License represents the complete agreement concerning the subject 317 | matter hereof. If any provision of this License is held to be 318 | unenforceable, such provision shall be reformed only to the extent 319 | necessary to make it enforceable. Any law or regulation which provides 320 | that the language of a contract shall be construed against the drafter 321 | shall not be used to construe this License against a Contributor. 322 | 323 | 10. Versions of the License 324 | --------------------------- 325 | 326 | 10.1. New Versions 327 | 328 | Mozilla Foundation is the license steward. Except as provided in Section 329 | 10.3, no one other than the license steward has the right to modify or 330 | publish new versions of this License. Each version will be given a 331 | distinguishing version number. 332 | 333 | 10.2. Effect of New Versions 334 | 335 | You may distribute the Covered Software under the terms of the version 336 | of the License under which You originally received the Covered Software, 337 | or under the terms of any subsequent version published by the license 338 | steward. 339 | 340 | 10.3. Modified Versions 341 | 342 | If you create software not governed by this License, and you want to 343 | create a new license for such software, you may create and use a 344 | modified version of this License if you rename the license and remove 345 | any references to the name of the license steward (except to note that 346 | such modified license differs from this License). 347 | 348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 349 | Licenses 350 | 351 | If You choose to distribute Source Code Form that is Incompatible With 352 | Secondary Licenses under the terms of this version of the License, the 353 | notice described in Exhibit B of this License must be attached. 354 | 355 | Exhibit A - Source Code Form License Notice 356 | ------------------------------------------- 357 | 358 | This Source Code Form is subject to the terms of the Mozilla Public 359 | License, v. 2.0. If a copy of the MPL was not distributed with this 360 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 361 | 362 | If it is not possible or desirable to put the notice in a particular 363 | file, then You may include the notice in a location (such as a LICENSE 364 | file in a relevant directory) where a recipient would be likely to look 365 | for such a notice. 366 | 367 | You may add additional accurate notices of copyright ownership. 368 | 369 | Exhibit B - "Incompatible With Secondary Licenses" Notice 370 | --------------------------------------------------------- 371 | 372 | This Source Code Form is "Incompatible With Secondary Licenses", as 373 | defined by the Mozilla Public License, v. 2.0. 374 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## t-rss 2 | t-rss是一个自动rss bt种子文件的程序,相比flexget丰富的功能,t-rss功能较为精简,同时体积更小、占用资源更少,支持自动将rss得到的种子文件添加至bt客户端(目前支持qBittorrent和Deluge(未完整测试过)),将来会加入从irc获取资源信息的功能(irc已经不想写了。) 3 | 4 | 从v0.6.x开始配置文件格式有所改变,与之前不再兼容,且v0.6.x的稳定性还未经历时间检验,建议一般用户使用v0.5.4beta版本,并且查看老版本的README来编写配置文件 5 | 6 | https://github.com/capric98/t-rss/blob/d8b0c0be0acf251b7e24c183039dc61b39cce80c/README-zh_CN.md 7 | 8 | ## 安装 9 | 首先[下载](https://github.com/capric98/t-rss/releases)最新的pre-release or release中系统/架构对应的压缩包 10 | 11 | 解压后得到一个二进制文件,如果没有x属性自己加一下 12 | 13 | 写好配置文件直接运行就完了,命令行参数见`-help`,此处略 14 | 15 | ## 配置 16 | 带*的部分是可省略不配置的,但`receiver`部分需要至少配置一个不然程序跑完rss啥也不会干 17 | 18 |
19 | config.yml(格式修改中) 20 | 21 | ```yaml 22 | GLOBAL: 23 | log_file: # delete this to output log to stderr 24 | history: 25 | max_num: 300 # max history record nums 26 | save_to: # default: ./.t-rss_History 27 | timeout: 1m # {int}s/m/h/d 28 | 29 | TASKS: 30 | Name_of_task0: 31 | rss: 32 | url: https://example.com 33 | method: GET #*delete this except you know what this means 34 | headers: #*if needed 35 | Cookie: something 36 | Key: Value 37 | interval: 10s # {int}s/m/h/d 38 | filter: 39 | content_size: 40 | min: 10MB340KB 41 | max: 120G10MB 42 | regexp: 43 | accept: 44 | - A 45 | reject: 46 | - B 47 | quota: 48 | num: 65535 49 | size: 100G 50 | edit: 51 | tracker: 52 | delete: 53 | - share 54 | add: 55 | - http(s)://example.com/ 56 | receiver: 57 | delay: 12s 58 | save_path: /home/WatchDir/ 59 | client: 60 | Name_of_client0: 61 | type: qBittorrent 62 | url: http://127.0.0.1 63 | username: admin 64 | password: adminadmin 65 | dlLimit: 66 | upLimit: 67 | paused: true 68 | savepath: /home/Downloads 69 | Name_of_client1: 70 | type: Deluge 71 | host: 127.0.0.1:1234 72 | username: 73 | password: 74 | 75 | Name_of_task1: 76 | rss: 77 | url: https://example.com 78 | receiver: 79 | save_path: /home/WatchDir/ 80 | Name_of_task2: 81 | rss: 82 | url: https://example.com 83 | receiver: 84 | save_path: /home/WatchDir/ 85 | 86 | ``` 87 | 88 |
89 | 90 | ### 运行 91 | 在RSS目录下运行二进制文件即可,默认使用同目录下的config.yml作为配置文件,历史保留在同目录下的`.t-rss_History`目录内;也可以nohup或者注册成服务什么的。。 92 | 93 | ### TODO 94 | * 重写client部分 95 | * 重写/重新设计bencode部分 96 | * 增加test覆盖率 97 | 98 | [go-yaml](https://github.com/go-yaml/yaml) 99 | 100 | [go-rencode](https://github.com/gdm85/go-rencode) 101 | 102 | [logrus](https://github.com/sirupsen/logrus) 103 | 104 | [go-colorable](https://github.com/mattn/go-colorable) 105 | -------------------------------------------------------------------------------- /bencode/check.go: -------------------------------------------------------------------------------- 1 | package bencode 2 | 3 | // Dictionary: 4 | // 1. All keys must be byte strings. 5 | // 2. All keys must appear in lexicographical order. 6 | 7 | func (body *Body) Check() (f bool) { 8 | f = true 9 | 10 | if body.btype == DictType { 11 | if len(body.dict) == 0 { 12 | return false 13 | } 14 | f = f && body.dict[0].value.Check() 15 | for i := 1; i < len(body.dict); i++ { 16 | if string(body.dict[i].key) < string(body.dict[i-1].key) { 17 | return false 18 | } 19 | if body.dict[i].value.btype == DictType { 20 | f = f && body.dict[i].value.Check() 21 | } 22 | } 23 | } 24 | return 25 | } 26 | -------------------------------------------------------------------------------- /bencode/decode.go: -------------------------------------------------------------------------------- 1 | /* 2 | bep_0012: announce-list 3 | bep_0030: Merkle hash torrent extension. 4 | */ 5 | package bencode 6 | 7 | import ( 8 | "errors" 9 | ) 10 | 11 | const ( 12 | Unknown = -100 13 | DorLEnd = -1 // Dictionary or List end mark. 14 | DictType = 0 15 | ListType = 1 16 | IntValue = 2 17 | ByteString = 3 // Actually byte string. 18 | MaxDepth = 10 19 | ) 20 | 21 | var ( 22 | ErrEncodeDepthTooGreat = errors.New("bencode: Bencode depth over than 10, it is abnormal!") 23 | ErrInvalidDictKey = errors.New("bencode: Dictionary's key must be byte string!") 24 | ErrTypeFrom = errors.New("bencode: Cannot be body after int or string.") 25 | ErrTooManyEnd = errors.New("bencode: Too many end.") 26 | ) 27 | 28 | type kvBody struct { 29 | value *Body 30 | key []byte 31 | } 32 | 33 | type Body struct { 34 | btype int 35 | value int64 36 | byteStr []byte 37 | dict []kvBody // nil key List -> Dict 38 | } 39 | 40 | func decodepart(data []byte) (typemark int, offset int, value int64, e error) { 41 | defer func() { 42 | if p := recover(); p != nil { 43 | e = p.(error) 44 | } 45 | }() 46 | 47 | offset = 0 48 | value = 0 49 | 50 | switch data[offset] { 51 | case 'd': 52 | // Dictionary start. 53 | typemark = DictType 54 | case 'l': 55 | // List start. 56 | typemark = ListType 57 | case 'e': 58 | // Dictionary or List end. 59 | typemark = DorLEnd 60 | case 'i': 61 | // Interger. 62 | offset++ 63 | sgn := int64(1) 64 | if data[offset] == '-' { 65 | offset++ 66 | sgn = -1 67 | } 68 | for data[offset] != 'e' { 69 | value = value*10 + int64(data[offset]-'0') 70 | offset++ 71 | } 72 | value = sgn * value 73 | typemark = IntValue 74 | default: 75 | // Byte string. 76 | for data[offset] != ':' { 77 | value = value*10 + int64(data[offset]-'0') 78 | offset++ 79 | } 80 | typemark = ByteString 81 | } 82 | 83 | return 84 | } 85 | 86 | func Decode(data []byte) (result []*Body, e error) { 87 | defer func() { 88 | if p := recover(); p != nil { 89 | e = p.(error) 90 | } 91 | }() 92 | 93 | var tmp *Body 94 | var offset, pos int 95 | stack := make([]*Body, MaxDepth+1) 96 | length := len(data) 97 | 98 | stack[0] = &Body{ 99 | btype: ListType, 100 | dict: make([]kvBody, 0, 1), 101 | } 102 | var lastString []byte 103 | 104 | for offset < length { 105 | mark, shift, value, err := decodepart(data[offset:]) 106 | if err != nil { 107 | return nil, err 108 | } 109 | 110 | switch mark { 111 | case DorLEnd: 112 | pos-- 113 | lastString = nil 114 | case DictType: 115 | tmp = &Body{ 116 | btype: DictType, 117 | dict: make([]kvBody, 0, 2), 118 | } 119 | case ListType: 120 | tmp = &Body{ 121 | btype: ListType, 122 | dict: make([]kvBody, 0, 4), 123 | } 124 | case IntValue: 125 | tmp = &Body{ 126 | btype: IntValue, 127 | value: value, 128 | } 129 | case ByteString: 130 | offset += shift 131 | shift = int(value) 132 | tmp = &Body{ 133 | btype: ByteString, 134 | byteStr: data[offset+1 : offset+int(value)+1], 135 | } 136 | } 137 | 138 | if mark != DorLEnd { 139 | switch stack[pos].btype { 140 | case DictType: 141 | if lastString == nil { 142 | if tmp.btype == ByteString { 143 | lastString = tmp.byteStr 144 | } else { 145 | e = ErrInvalidDictKey 146 | } 147 | 148 | } else { 149 | stack[pos].dict = append(stack[pos].dict, kvBody{ 150 | key: lastString, 151 | value: tmp, 152 | }) 153 | lastString = nil 154 | } 155 | case ListType: 156 | stack[pos].dict = append(stack[pos].dict, kvBody{value: tmp}) 157 | default: 158 | e = ErrTypeFrom 159 | } 160 | 161 | if tmp.btype < IntValue { 162 | (pos)++ 163 | stack[pos] = tmp 164 | } 165 | } 166 | 167 | offset += shift + 1 168 | if pos > MaxDepth { 169 | e = ErrEncodeDepthTooGreat 170 | return 171 | } 172 | if pos < 0 { 173 | e = ErrTooManyEnd 174 | return nil, ErrTooManyEnd 175 | } 176 | if e != nil { 177 | return 178 | } 179 | } 180 | 181 | result = make([]*Body, len(stack[0].dict)) 182 | for i := 0; i < len(stack[0].dict); i++ { 183 | result[i] = stack[0].dict[i].value 184 | } 185 | return 186 | } 187 | 188 | func (body *Body) Type() int { 189 | return body.btype 190 | } 191 | 192 | func (body *Body) Value() int64 { 193 | if body.btype == IntValue { 194 | return body.value 195 | } 196 | return Unknown 197 | } 198 | 199 | func (body *Body) BStr() []byte { 200 | if body.btype == ByteString { 201 | return body.byteStr 202 | } 203 | return nil 204 | } 205 | 206 | func (body *Body) Dict(key string) *Body { 207 | if body.btype != DictType { 208 | return nil 209 | } 210 | pos := body.findpos(key) 211 | if pos == -1 { 212 | return nil 213 | } 214 | return body.dict[pos].value 215 | } 216 | 217 | func (body *Body) findpos(k string) int { 218 | dict := body.dict 219 | dlen := len(dict) - 1 220 | l := 0 221 | r := dlen 222 | var m int 223 | var key string 224 | for { 225 | m = (l + r) / 2 226 | if m < 0 || m > dlen || l > r { 227 | return -1 228 | } 229 | key = string(dict[m].key) 230 | if key == k { 231 | return m 232 | } 233 | if key > k { 234 | r = m - 1 235 | } else { 236 | l = m + 1 237 | } 238 | } 239 | } 240 | 241 | func (body *Body) DictN(n int) (k string, b *Body) { 242 | if body.btype != DictType || len(body.dict) <= n { 243 | return 244 | } 245 | return string(body.dict[n].key), body.dict[n].value 246 | } 247 | 248 | func (body *Body) List(n int) *Body { 249 | if body.btype != ListType || len(body.dict) <= n { 250 | return nil 251 | } 252 | return body.dict[n].value 253 | } 254 | 255 | func (body *Body) Len() int { 256 | if body == nil { 257 | return 0 258 | } 259 | if body.btype == ListType || body.btype == DictType { 260 | return len(body.dict) 261 | } 262 | return 0 263 | } 264 | -------------------------------------------------------------------------------- /bencode/decode_test.go: -------------------------------------------------------------------------------- 1 | package bencode 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func printSpace(n int) { 9 | for i := 0; i < n; i++ { 10 | fmt.Printf(" ") 11 | } 12 | } 13 | 14 | func (b *Body) print(level int) { 15 | if b == nil { 16 | return 17 | } 18 | 19 | printSpace(level) 20 | switch b.Type() { 21 | case IntValue: 22 | fmt.Println(b.Value()) 23 | case ByteString: 24 | if len(b.BStr()) < 250 { 25 | fmt.Println(string(b.BStr())) 26 | } else { 27 | fmt.Println("[...Too long]") 28 | } 29 | case DictType: 30 | fmt.Println("[Dictionary]") 31 | for i := 0; i < b.Len(); i++ { 32 | printSpace(level + 1) 33 | k, _ := b.DictN(i) 34 | fmt.Println(k + ":") 35 | v := b.Dict(k) 36 | v.print(level + 2) 37 | } 38 | case ListType: 39 | fmt.Println("[List]") 40 | for i := 0; i < b.Len(); i++ { 41 | b.List(i).print(level + 1) 42 | } 43 | } 44 | } 45 | 46 | func (b *Body) idle() { 47 | } 48 | 49 | func TestDecodeByteSlice(t *testing.T) { 50 | // startT := time.Now() 51 | // f, _ := ioutil.ReadFile("vcb.torrent") 52 | // result, err := Decode(f) 53 | // if err != nil { 54 | // fmt.Println(err) 55 | // t.Fail() 56 | // } 57 | // fmt.Printf("%s\n", time.Since(startT)) 58 | // result[0].print(0) 59 | 60 | // info := result[0].Dict("info") 61 | // pl := (info.Dict("piece length")).Value() 62 | // ps := int64(len((info.Dict("pieces")).BStr())) / 20 63 | // //ps := int64(1) 64 | // fmt.Println(float64(pl*ps) / 1024 / 1024 / 1024) 65 | // fmt.Println("Checked:", result[0].Check()) 66 | 67 | // hash, err := result[0].Infohash() 68 | // if err != nil { 69 | // fmt.Println(err) 70 | // } else { 71 | // fmt.Println(hex.EncodeToString(hash)) 72 | // } 73 | 74 | //enc, _ := result[0].Encode() 75 | //_ = ioutil.WriteFile("out", enc, 0644) 76 | //t.Fail() 77 | } 78 | 79 | func BenchmarkDecode(b *testing.B) { 80 | // f, _ := ioutil.ReadFile("TLMC.torrent") 81 | // b.ReportAllocs() 82 | // for i := 0; i < b.N; i++ { 83 | // result, err := Decode(f) 84 | // if err != nil { 85 | // fmt.Println(err) 86 | // b.Fail() 87 | // } 88 | // result[0].idle() 89 | // } 90 | } 91 | -------------------------------------------------------------------------------- /bencode/encode.go: -------------------------------------------------------------------------------- 1 | package bencode 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | ) 8 | 9 | var ( 10 | ErrInvalidBody = errors.New("bencode: Body did not pass Check().") 11 | ErrDictWithoutKey = errors.New("bencode: Dict need a key to add value.") 12 | ErrUnknownType = errors.New("bencode: Unknown type to add.") 13 | t = []string{"Dict", "List", "Int", "ByteString"} 14 | ) 15 | 16 | type BEncoder struct { 17 | stack []*Body 18 | pos int 19 | } 20 | 21 | func NewEncoder() *BEncoder { 22 | encoder := &BEncoder{ 23 | stack: make([]*Body, MaxDepth), 24 | pos: 0, 25 | } 26 | encoder.stack[0] = &Body{ 27 | btype: ListType, 28 | dict: make([]kvBody, 0, 2), 29 | } 30 | return encoder 31 | } 32 | 33 | func (e *BEncoder) Add(k string, val interface{}) error { 34 | var v *Body 35 | switch val := val.(type) { 36 | case byte, int, int8, int16, int32, int64: 37 | v = &Body{ 38 | btype: IntValue, 39 | value: vtoint64(val), 40 | } 41 | case string: 42 | v = &Body{ 43 | btype: ByteString, 44 | byteStr: []byte(val), 45 | } 46 | case []byte: 47 | v = &Body{ 48 | btype: ByteString, 49 | byteStr: val, 50 | } 51 | default: 52 | return ErrUnknownType 53 | } 54 | 55 | if k == "" && e.stack[e.pos].btype == DictType { 56 | return ErrDictWithoutKey 57 | } 58 | switch e.stack[e.pos].btype { 59 | case DictType: 60 | newkv := kvBody{ 61 | key: []byte(k), 62 | value: v, 63 | } 64 | if len(e.stack[e.pos].dict) == 0 { 65 | e.stack[e.pos].dict = append(e.stack[e.pos].dict, newkv) 66 | } else { 67 | i := e.stack[e.pos].inspos(k) 68 | e.stack[e.pos].dict = append(e.stack[e.pos].dict[:i], append([]kvBody{newkv}, e.stack[e.pos].dict[i:]...)...) 69 | } 70 | case ListType: 71 | e.stack[e.pos].dict = append(e.stack[e.pos].dict, kvBody{value: v}) 72 | default: 73 | return errors.New("bencode: Cannot add k-v struct to " + t[e.stack[e.pos].btype]) 74 | } 75 | return nil 76 | } 77 | 78 | func (e *BEncoder) NewDict(k string) error { 79 | return e.newpart(DictType, k) 80 | } 81 | 82 | func (e *BEncoder) NewList(k string) error { 83 | return e.newpart(ListType, k) 84 | } 85 | 86 | func (e *BEncoder) EndPart() error { 87 | if e.pos == 0 { 88 | return ErrTooManyEnd 89 | } 90 | switch e.stack[e.pos].btype { 91 | case ListType, DictType: 92 | e.pos-- 93 | default: 94 | return errors.New("bencode: Cannot end at " + t[e.stack[e.pos].btype]) 95 | } 96 | return nil 97 | } 98 | 99 | func (e *BEncoder) End() []*Body { 100 | result := make([]*Body, 0, 1) 101 | for i := 0; i < len(e.stack[0].dict); i++ { 102 | result = append(result, e.stack[0].dict[i].value) 103 | } 104 | return result 105 | } 106 | 107 | func (e *BEncoder) newpart(Type int, k string) error { 108 | if e.pos+1 == MaxDepth { 109 | return ErrEncodeDepthTooGreat 110 | } 111 | switch e.stack[e.pos].btype { 112 | case ListType: 113 | e.pos++ 114 | e.stack[e.pos] = &Body{ 115 | btype: Type, 116 | dict: make([]kvBody, 0, 2), 117 | } 118 | e.stack[e.pos-1].dict = append(e.stack[e.pos-1].dict, kvBody{value: e.stack[e.pos]}) 119 | case DictType: 120 | if k == "" { 121 | return ErrDictWithoutKey 122 | } 123 | e.pos++ 124 | e.stack[e.pos] = &Body{ 125 | btype: Type, 126 | dict: make([]kvBody, 0, 2), 127 | } 128 | i := e.stack[e.pos-1].inspos(k) 129 | e.stack[e.pos-1].dict = append(e.stack[e.pos-1].dict[:i], append([]kvBody{kvBody{ 130 | key: []byte(k), 131 | value: e.stack[e.pos], 132 | }}, e.stack[e.pos-1].dict[i:]...)...) 133 | default: 134 | return errors.New("bencode: Cannot add Dict to " + t[e.stack[e.pos].btype]) 135 | } 136 | return nil 137 | } 138 | 139 | func (body *Body) inspos(k string) int { 140 | l := -1 141 | r := len(body.dict) 142 | for { 143 | if l+1 >= r { 144 | return l + 1 145 | } 146 | m := (l + r) / 2 147 | if k < string(body.dict[m].key) { 148 | r = m 149 | } 150 | if k > string(body.dict[m].key) { 151 | l = m 152 | } 153 | } 154 | } 155 | 156 | func (body *Body) Encode() ([]byte, error) { 157 | if !body.Check() { 158 | return nil, ErrInvalidBody 159 | } 160 | return encode(body), nil 161 | } 162 | 163 | func encode(b *Body) []byte { 164 | var buf bytes.Buffer 165 | 166 | switch b.btype { 167 | case IntValue: 168 | _ = (&buf).WriteByte('i') 169 | i := b.value 170 | if i < 0 { 171 | i = -i 172 | _ = (&buf).WriteByte('-') 173 | } 174 | _, _ = (&buf).WriteString(fmt.Sprintf("%d", i)) 175 | _ = (&buf).WriteByte('e') 176 | case ByteString: 177 | _, _ = (&buf).WriteString(fmt.Sprintf("%d", len(b.byteStr))) 178 | _ = (&buf).WriteByte(':') 179 | _, _ = (&buf).Write(b.byteStr) 180 | default: 181 | if b.btype == ListType { 182 | _ = (&buf).WriteByte('l') 183 | } else { 184 | _ = (&buf).WriteByte('d') 185 | } 186 | for _, v := range b.dict { 187 | if v.key != nil { 188 | _, _ = (&buf).WriteString(fmt.Sprintf("%d", len(v.key))) 189 | _ = (&buf).WriteByte(':') 190 | _, _ = (&buf).Write(v.key) 191 | } 192 | _, _ = (&buf).Write(encode(v.value)) 193 | } 194 | _ = (&buf).WriteByte('e') 195 | } 196 | return buf.Bytes() 197 | } 198 | 199 | func (body *Body) Delete(k string) { 200 | var pos int 201 | if pos = body.findpos(k); pos == -1 { 202 | return 203 | } 204 | body.dict = append(body.dict[:pos], body.dict[pos+1:]...) 205 | //body.dict[len(body.dict)-1] = kvBody{} 206 | //body.dict = body.dict[:len(body.dict)-1] 207 | } 208 | func (body *Body) DeleteN(n int) { 209 | if len(body.dict) <= n { 210 | return 211 | } 212 | body.dict = append(body.dict[:n], body.dict[n+1:]...) 213 | //body.dict[len(body.dict)-1] = kvBody{} 214 | //body.dict = body.dict[:len(body.dict)-1] 215 | } 216 | func (body *Body) Edit(v interface{}) { 217 | if body.btype != ByteString && body.btype != IntValue { 218 | return 219 | } 220 | switch v := v.(type) { 221 | case byte, int, int8, int16, int32, int64: 222 | body.btype = IntValue 223 | body.value = vtoint64(v) 224 | case string: 225 | body.btype = ByteString 226 | body.byteStr = []byte(v) 227 | case []byte: 228 | body.btype = ByteString 229 | body.byteStr = v 230 | } 231 | } 232 | func (body *Body) AddPart(k string, v *Body) error { 233 | if body.btype != ListType && body.btype != DictType { 234 | return ErrTypeFrom 235 | } 236 | if body.btype == DictType && k == "" { 237 | return ErrDictWithoutKey 238 | } 239 | i := body.inspos(k) 240 | if body.btype == DictType { 241 | body.dict = append(body.dict[:i], append([]kvBody{kvBody{ 242 | key: []byte(k), 243 | value: v, 244 | }}, body.dict[i:]...)...) 245 | } else { 246 | body.dict = append(body.dict[:i], append([]kvBody{kvBody{ 247 | value: v, 248 | }}, body.dict[i:]...)...) 249 | } 250 | return nil 251 | } 252 | 253 | func NewBStr(s string) *Body { 254 | return &Body{ 255 | btype: ByteString, 256 | byteStr: []byte(s), 257 | } 258 | } 259 | 260 | func NewEmptyList() *Body { 261 | return &Body{ 262 | btype: ListType, 263 | dict: make([]kvBody, 0, 2), 264 | } 265 | } 266 | 267 | func (b *Body) AnnounceList(s []string) { 268 | if b.btype != ListType { 269 | return 270 | } 271 | for _, v := range s { 272 | tmp := kvBody{ 273 | value: &Body{ 274 | btype: ListType, 275 | dict: []kvBody{kvBody{value: &Body{ 276 | btype: ByteString, 277 | byteStr: []byte(v), 278 | }}}, 279 | }, 280 | } 281 | b.dict = append(b.dict, tmp) 282 | } 283 | } 284 | 285 | func vtoint64(v interface{}) int64 { 286 | switch v := v.(type) { 287 | case byte: 288 | return int64(v) 289 | case int8: 290 | return int64(v) 291 | case int16: 292 | return int64(v) 293 | case int32: 294 | return int64(v) 295 | case int64: 296 | return v 297 | case int: 298 | return int64(v) 299 | } 300 | return 0 301 | } 302 | -------------------------------------------------------------------------------- /bencode/encode_test.go: -------------------------------------------------------------------------------- 1 | package bencode 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func check(e error) { 9 | if e != nil { 10 | fmt.Println(e) 11 | } 12 | } 13 | func TestEncode(t *testing.T) { 14 | e := NewEncoder() 15 | _ = e.NewDict("") 16 | _ = e.Add("name", "Tadokoro Kouji") 17 | _ = e.Add("age", 24) 18 | _ = e.NewList("List Test") 19 | check(e.Add("", "Line0")) 20 | check(e.Add("", "Line1")) 21 | check(e.NewDict("")) 22 | check(e.Add("Ooops", "bilibili")) 23 | check(e.EndPart()) 24 | check(e.EndPart()) 25 | check(e.Add("A", "Add to head.")) 26 | check(e.Add("X", 114514)) 27 | result := e.End() 28 | result[0].print(0) 29 | result[0].Delete("A") 30 | result[0].Dict("X").Edit(1919810) 31 | check(result[0].AddPart("Copy", result[0].Dict("name"))) 32 | result[0].Dict("name").Edit("Yajuu Senpai") 33 | result[0].print(0) 34 | //t.Fail() 35 | } 36 | -------------------------------------------------------------------------------- /bencode/infohash.go: -------------------------------------------------------------------------------- 1 | package bencode 2 | 3 | import ( 4 | "crypto/sha1" 5 | "errors" 6 | ) 7 | 8 | var ( 9 | ErrInfoNotFound = errors.New("bencode: Did not find Info to calc infohash.") 10 | ) 11 | 12 | func (body *Body) Infohash() (r []byte, e error) { 13 | info := body.Dict("info") 14 | if info == nil { 15 | e = ErrInfoNotFound 16 | return 17 | } 18 | 19 | hasher := sha1.New() 20 | data, e := info.Encode() 21 | if e != nil { 22 | return 23 | } 24 | _, e = hasher.Write(data) 25 | if e != nil { 26 | return 27 | } 28 | 29 | r = hasher.Sum(nil) 30 | return 31 | } 32 | -------------------------------------------------------------------------------- /client/Deluge.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "compress/zlib" 6 | "crypto/tls" 7 | "encoding/base64" 8 | "encoding/binary" 9 | "errors" 10 | "fmt" 11 | "io" 12 | "log" 13 | "net" 14 | "sync" 15 | "time" 16 | 17 | "github.com/gdm85/go-rencode" 18 | ) 19 | 20 | const ( 21 | none = 0 22 | wTimeout = 10 23 | timeoutA = 1000 //(ms) 24 | rpcResp = 1 25 | rpcError = 2 26 | rpcEvent = 3 27 | ) 28 | 29 | // DeType :) 30 | type DeType struct { 31 | settings map[string]interface{} 32 | host string 33 | name, label string 34 | user, pass string 35 | version int 36 | protoVer int 37 | rttx4 time.Duration 38 | } 39 | 40 | type reqIDType struct { 41 | count int 42 | mu sync.Mutex 43 | } 44 | 45 | var ( 46 | reqID = &reqIDType{} 47 | paraList = []string{"add_paused", "auto_managed", 48 | "download_location", "max_connections", "max_download_speed", 49 | "max_upload_speed", "move_completed", "move_completed_path", 50 | "pre_allocated_storage", "prioritize_first_last_pieces", 51 | "remove_at_ratio", "seed_mode", "sequential_download", 52 | "shared", "stop_at_ratio", "stop_ratio", "super_seeding"} 53 | //https://github.com/deluge-torrent/deluge/blob/4b29436cd5eabf9af271f3fa6250cd7c91cdbc9d/deluge/core/torrent.py#L133 54 | 55 | // ErrExpectDHeader :) 56 | ErrExpectDHeader = errors.New("expected D as first byte in reply") 57 | // ErrExpectPVHeader :) 58 | ErrExpectPVHeader = errors.New("expected protocal version as first byte in reply") 59 | // ErrRespIncomplete :) 60 | ErrRespIncomplete = errors.New("expected a longer response than actually got") 61 | // ErrUnknownResp :) 62 | ErrUnknownResp = errors.New("unknown RPC response") 63 | // ErrRPCEvent :) 64 | ErrRPCEvent = errors.New("unexpected RPC Event message") 65 | // ErrAddFail :) 66 | ErrAddFail = errors.New("failed to add torrent file after 3 tries") 67 | ) 68 | 69 | // Add :) 70 | func (c *DeType) Add(data []byte, name string) (e error) { 71 | defer func() { 72 | if p := recover(); p != nil { 73 | e = p.(error) 74 | } 75 | }() 76 | 77 | b64 := base64.StdEncoding.EncodeToString(data) 78 | 79 | for try := 0; try < 3; try++ { 80 | if nil == func() (e error) { 81 | conn, err := c.newConn() 82 | if err == nil { 83 | defer conn.Close() 84 | if e = c.login(conn); e != nil { 85 | return 86 | } 87 | if e = c.call("core.add_torrent_file", makeList(name, b64, makeDict(c.settings)), makeDict(nil), conn); e != nil { 88 | // c.call("core.add_torrent_file", makeList(name, b64), makeDict(c.settings), conn) 89 | // └-> THIS WOULD NOT WORK!!!!!!! 90 | // Thank you Deluge! 91 | return 92 | } 93 | return c.recvResp(conn) 94 | } 95 | return err 96 | }() { 97 | return nil 98 | } 99 | 100 | } 101 | 102 | return ErrAddFail 103 | } 104 | 105 | // Name :) 106 | func (c *DeType) Name() string { 107 | return c.label 108 | } 109 | 110 | // NewDeClient :) 111 | func NewDeClient(key string, m map[string]interface{}) *DeType { 112 | defer func() { 113 | if p := recover(); p != nil { 114 | log.Fatal("new deluge client:", p) 115 | } 116 | }() 117 | 118 | var nc = &DeType{ 119 | name: "Deluge", 120 | label: key, 121 | settings: make(map[string]interface{}), 122 | user: m["username"].(string), 123 | pass: m["password"].(string), 124 | } 125 | 126 | for _, para := range paraList { 127 | if m[para] != nil { 128 | nc.settings[para] = m[para] 129 | } 130 | } 131 | if nc.settings["max_download_speed"] != nil { 132 | nc.settings["max_download_speed"] = parseSpeed(nc.settings["max_download_speed"]) 133 | } 134 | if nc.settings["max_upload_speed"] != nil { 135 | nc.settings["max_upload_speed"] = parseSpeed(nc.settings["max_upload_speed"]) 136 | } 137 | 138 | if m["host"] == nil { 139 | log.Panicln("Deluge: miss host.") 140 | } 141 | nc.host = m["host"].(string) 142 | 143 | var failcount int 144 | var err error 145 | var conn *tls.Conn 146 | for { 147 | if conn, err = nc.init(); err == nil { 148 | _ = conn.Close() 149 | break 150 | } 151 | failcount++ 152 | if failcount == 3 { 153 | log.Fatal("Init deluge client:", err) 154 | } 155 | } 156 | return nc 157 | } 158 | 159 | func checkConn(c *tls.Conn, e error) *tls.Conn { 160 | if e != nil { 161 | log.Panic(e) 162 | } 163 | return c 164 | } 165 | 166 | func (c *DeType) init() (conn *tls.Conn, e error) { 167 | defer func() { 168 | if p := recover(); p != nil { 169 | e = fmt.Errorf("%v", p) 170 | } 171 | }() 172 | 173 | conn = checkConn(c.newConn()) 174 | conn = checkConn(c.detectVersion(conn)) 175 | //log.Println("Deluge client init with error", e) 176 | //log.Println("Deluge version:", c.version) 177 | //log.Println("Protocal version:", c.protoVer) 178 | return conn, c.login(conn) 179 | } 180 | 181 | func (c *DeType) login(conn *tls.Conn) (e error) { 182 | m := make(map[string]interface{}) 183 | m["client_version"] = "deluge-client" 184 | dict := makeDict(m) 185 | list := makeList(c.user, c.pass) 186 | 187 | switch c.version { 188 | case 1: 189 | e = c.call("daemon.login", list, makeDict(nil), conn) 190 | case 2: 191 | e = c.call("daemon.login", list, dict, conn) 192 | } 193 | 194 | if e != nil { 195 | return 196 | } 197 | 198 | return c.recvResp(conn) 199 | } 200 | 201 | func (c *DeType) call(method string, args rencode.List, kargs rencode.Dictionary, conn *tls.Conn) (e error) { 202 | defer func() { 203 | if p := recover(); p != nil { 204 | e = p.(error) 205 | } 206 | }() 207 | return c.sendCall(c.version, c.protoVer, method, args, kargs, conn) 208 | } 209 | 210 | func (c *DeType) sendCall(version int, protoVer int, method string, args rencode.List, kargs rencode.Dictionary, conn *tls.Conn) error { 211 | rID := reqID.next() 212 | var b, z, req bytes.Buffer 213 | 214 | e := rencode.NewEncoder(&b) 215 | if err := e.Encode(makeList(makeList(rID, method, args, kargs))); err != nil { 216 | return err 217 | } 218 | 219 | wzlib := zlib.NewWriter(&z) 220 | _, _ = wzlib.Write(b.Bytes()) 221 | wzlib.Close() 222 | 223 | if version == 2 { 224 | // need to send a header to client 225 | switch protoVer { 226 | case none: 227 | req.WriteRune('D') 228 | _ = binary.Write(&req, binary.BigEndian, int32(z.Len())) 229 | case 1: 230 | _ = binary.Write(&req, binary.BigEndian, uint8(protoVer)) 231 | _ = binary.Write(&req, binary.BigEndian, uint32(z.Len())) 232 | } 233 | } 234 | 235 | _, _ = req.Write(z.Bytes()) 236 | 237 | _ = conn.SetDeadline(time.Now().Add(wTimeout * time.Second)) 238 | 239 | if _, err := conn.Write(req.Bytes()); err != nil { 240 | return err 241 | } 242 | 243 | return nil 244 | } 245 | 246 | func (c *DeType) detectVersion(conn *tls.Conn) (*tls.Conn, error) { 247 | sign := make([]byte, 1) 248 | 249 | now := time.Now() 250 | _ = c.sendCall(1, none, "daemon.info", makeList(), makeDict(nil), conn) 251 | _ = c.sendCall(2, none, "daemon.info", makeList(), makeDict(nil), conn) 252 | _ = c.sendCall(2, 1, "daemon.info", makeList(), makeDict(nil), conn) 253 | 254 | _ = conn.SetDeadline(time.Now().Add(1 * time.Second)) 255 | _, err := conn.Read(sign) 256 | 257 | c.rttx4 = time.Since(now) + (timeoutA * time.Millisecond) 258 | 259 | if err != nil { 260 | return nil, err 261 | } 262 | 263 | if sign[0] == byte('D') { 264 | c.version = 2 265 | c.protoVer = none 266 | } else if sign[0] == 1 { 267 | c.version = 2 268 | c.protoVer = 1 269 | } else { 270 | c.version = 1 271 | c.protoVer = none 272 | //Deluge 1 doesn't recover well from the bad request. Re-connect! 273 | conn.Close() 274 | return c.newConn() 275 | } 276 | 277 | return conn, nil 278 | } 279 | 280 | func (c *DeType) recvResp(conn *tls.Conn) (e error) { 281 | defer func() { 282 | if p := recover(); p != nil { 283 | e = p.(error) 284 | } 285 | }() 286 | 287 | var buf bytes.Buffer 288 | var zr io.Reader 289 | 290 | switch c.version { 291 | case 1: 292 | for { 293 | _ = conn.SetDeadline(time.Now().Add(c.rttx4)) 294 | if n, _ := io.Copy(&buf, conn); n == 0 { 295 | break 296 | } 297 | } 298 | case 2: 299 | var sign bytes.Buffer 300 | var expectLen int 301 | 302 | _ = conn.SetDeadline(time.Now().Add(c.rttx4)) 303 | if _, err := io.CopyN(&sign, conn, 5); err != nil { 304 | return err 305 | } 306 | 307 | switch c.protoVer { 308 | case none: 309 | if (sign.Bytes())[0] != byte('D') { 310 | return ErrExpectDHeader 311 | } 312 | if err := binary.Read(bytes.NewReader((sign.Bytes())[1:5]), binary.BigEndian, &expectLen); err != nil { 313 | return err 314 | } 315 | case 1: 316 | if (sign.Bytes())[0] != 1 { 317 | return ErrExpectPVHeader 318 | } 319 | expectLen = int(binary.BigEndian.Uint32((sign.Bytes())[1:5])) 320 | } 321 | _ = conn.SetDeadline(time.Now().Add(2 * c.rttx4)) 322 | if _, err := io.CopyN(&buf, conn, int64(expectLen)); err != nil { 323 | return ErrRespIncomplete 324 | } 325 | } 326 | 327 | resp := buf.Bytes() 328 | zr, e = zlib.NewReader(bytes.NewReader(resp)) 329 | if e != nil { 330 | return 331 | } 332 | 333 | r := rencode.NewDecoder(zr) 334 | rli, err := r.DecodeNext() 335 | if err != nil { 336 | return err 337 | } 338 | rlist := rli.(rencode.List) 339 | rValue := rlist.Values() 340 | msgType := convertInt(rValue[0]) 341 | switch msgType { 342 | case rpcResp: 343 | case rpcError: 344 | errorlist := rValue[2].(rencode.List) 345 | errs := errorlist.Values() 346 | msg := string(errs[0].([]uint8)) + "\n" + string(errs[1].([]uint8)) + "\n" + string(errs[2].([]uint8)) 347 | e = errors.New("rpcError with message:\n" + msg) 348 | case rpcEvent: 349 | e = ErrRPCEvent 350 | default: 351 | e = ErrUnknownResp 352 | } 353 | return 354 | } 355 | 356 | func (c *DeType) newConn() (conn *tls.Conn, e error) { 357 | d := net.Dialer{Timeout: 10 * time.Second} 358 | conn, e = tls.DialWithDialer(&d, "tcp", c.host, &tls.Config{ 359 | InsecureSkipVerify: true, 360 | }) 361 | return 362 | } 363 | 364 | func (r *reqIDType) next() (rid int) { 365 | r.mu.Lock() 366 | r.count++ 367 | rid = r.count 368 | r.mu.Unlock() 369 | return 370 | } 371 | 372 | func makeList(args ...interface{}) rencode.List { 373 | list := rencode.NewList() 374 | for _, v := range args { 375 | list.Add(v) 376 | } 377 | return list 378 | } 379 | 380 | func makeDict(args map[string]interface{}) rencode.Dictionary { 381 | var dict rencode.Dictionary 382 | for k, v := range args { 383 | dict.Add(k, v) 384 | } 385 | return dict 386 | } 387 | 388 | func convertInt(i interface{}) int { 389 | switch i := i.(type) { 390 | case int8: 391 | return int(i) 392 | case int16: 393 | return int(i) 394 | case int32: 395 | return int(i) 396 | case int64: 397 | return int(i) 398 | case int: 399 | return i 400 | default: 401 | return -1 402 | } 403 | } 404 | 405 | func parseSpeed(v interface{}) float32 { 406 | // I don't know why I use float32 here but so be it. 407 | if v == nil { 408 | return -1 409 | } 410 | switch v := v.(type) { 411 | case int: 412 | return float32(v) 413 | case string: 414 | return float32(UConvert(v)) 415 | default: 416 | return -1 417 | } 418 | } 419 | -------------------------------------------------------------------------------- /client/Deluge_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestDeluge(t *testing.T) { 8 | // config := make(map[string]interface{}) 9 | // config["host"] = "127.0.0.1:58846" 10 | // config["username"] = "localclient" 11 | // config["password"] = "d96a384f6b9a314405575554acd8b40a6f2f343d" 12 | // config["add_paused"] = true 13 | // c := NewDeClient("Test", config) 14 | 15 | // file, _ := ioutil.ReadFile("SAXZ-5.torrent") 16 | // e := c.Add(file, "SAXZ-5") 17 | // if e != nil { 18 | // log.Println("Test:", e) 19 | // // t.Fail() 20 | // } 21 | // log.Println("Success!") 22 | // // t.Fail() 23 | } 24 | -------------------------------------------------------------------------------- /client/Transmission.go: -------------------------------------------------------------------------------- 1 | package client 2 | -------------------------------------------------------------------------------- /client/interface.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "strconv" 5 | "strings" 6 | "unicode" 7 | ) 8 | 9 | // Client :) 10 | type Client interface { 11 | Add(b []byte, name string) error 12 | Name() string 13 | } 14 | 15 | // UConvert: convert a string which may contain unit to a float64 with bytes unit. 16 | func UConvert(s string) float64 { 17 | if s == "" { 18 | return 0 19 | } 20 | 21 | spNum := float64(0) 22 | number := make([]rune, 0) 23 | runit := make([]rune, 0) 24 | 25 | for _, r := range s { 26 | if unicode.IsDigit(r) || r == '.' || r == '-' { 27 | number = append(number, r) 28 | } else { 29 | runit = append(runit, r) 30 | } 31 | } 32 | 33 | sunit := strings.TrimSpace(string(runit)) 34 | spNum, _ = strconv.ParseFloat(strings.TrimSpace(string(number)), 64) 35 | 36 | switch { 37 | case sunit == "K" || sunit == "k" || sunit == "KB" || sunit == "kB" || sunit == "KiB" || sunit == "kiB": 38 | spNum = spNum * 1024 39 | case sunit == "M" || sunit == "m" || sunit == "MB" || sunit == "mB" || sunit == "MiB" || sunit == "miB": 40 | spNum = spNum * 1024 * 1024 41 | case sunit == "G" || sunit == "g" || sunit == "GB" || sunit == "gB" || sunit == "GiB" || sunit == "giB": 42 | spNum = spNum * 1024 * 1024 * 1024 43 | case sunit == "T" || sunit == "t" || sunit == "TB" || sunit == "tB" || sunit == "TiB" || sunit == "tiB": 44 | spNum = spNum * 1024 * 1024 * 1024 * 1024 45 | default: 46 | spNum = spNum * 1024 * 1024 47 | } 48 | 49 | return spNum 50 | } 51 | -------------------------------------------------------------------------------- /client/qBittorrent.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io/ioutil" 7 | "log" 8 | "mime/multipart" 9 | "net" 10 | "net/http" 11 | "net/http/cookiejar" 12 | "net/textproto" 13 | "net/url" 14 | "sync" 15 | "time" 16 | ) 17 | 18 | // QBType :) 19 | type QBType struct { 20 | client *http.Client 21 | settings map[string]string 22 | name string 23 | label string 24 | mu sync.RWMutex 25 | } 26 | 27 | var ( 28 | qBparalist = []string{"dlLimit", "upLimit", "savepath", "paused", "category", "skip_checking", "root_folder", "rename", "autoTMM", "sequentialDownload", "firstLastPiecePrio"} 29 | privateIPBlocks []*net.IPNet 30 | ) 31 | 32 | // NewqBclient :) 33 | func NewqBclient(key string, m map[string]interface{}) *QBType { 34 | nc := &QBType{ 35 | client: nil, 36 | settings: make(map[string]string), 37 | name: "qBittorrent", 38 | label: key, 39 | mu: sync.RWMutex{}, 40 | } 41 | 42 | for k, v := range m { 43 | switch v := v.(type) { 44 | case string: 45 | nc.settings[k] = v 46 | case bool: 47 | if v { 48 | nc.settings[k] = "true" 49 | } else { 50 | nc.settings[k] = "false" 51 | } 52 | case int: 53 | nc.settings[k] = fmt.Sprintf("%d", v) 54 | } 55 | } // Copy settings. 56 | 57 | if length := len(nc.settings["url"]); nc.settings["url"][length-1] == '/' { 58 | nc.settings["url"] = nc.settings["url"][:length-1] 59 | } 60 | nc.settings["dlLimit"] = fmt.Sprintf("%.0f", UConvert(nc.settings["dlLimit"])) 61 | nc.settings["upLimit"] = fmt.Sprintf("%.0f", UConvert(nc.settings["upLimit"])) 62 | 63 | fcount := 1 64 | initPrivateIP() 65 | err := nc.init() 66 | for err != nil { 67 | fcount++ 68 | if fcount == 3 { 69 | log.Fatal(err) 70 | } 71 | err = nc.init() 72 | } 73 | return nc 74 | } 75 | 76 | func (c *QBType) init() error { 77 | c.mu.Lock() 78 | defer c.mu.Unlock() 79 | 80 | cookieJar, _ := cookiejar.New(nil) 81 | c.client = &http.Client{ 82 | Timeout: 30 * time.Second, 83 | Jar: cookieJar, 84 | } 85 | 86 | if c.settings["password"] == "" && isPrivateURL(c.settings["url"]) { 87 | log.Println(c.label + " qBittorrent client: You do not set username or password.") 88 | log.Println("Please make sure the client is running on local network, and make sure you have enabled no authentication for local user.") 89 | return nil 90 | } 91 | 92 | resp, err := c.client.PostForm(c.settings["url"]+"/api/v2/auth/login", url.Values{ 93 | "username": {c.settings["username"]}, 94 | "password": {c.settings["password"]}, 95 | }) 96 | if err != nil { 97 | log.Printf("Failed to initialize %s client: %v\n", c.label, err) 98 | return err 99 | } 100 | resp.Body.Close() 101 | return nil 102 | } 103 | 104 | // Name :) 105 | func (c *QBType) Name() string { 106 | return c.label 107 | } 108 | 109 | // Add :) 110 | func (c *QBType) Add(data []byte, filename string) (e error) { 111 | defer func() { 112 | if p := recover(); p != nil { 113 | e = p.(error) 114 | } 115 | }() // In case of init fails. 116 | 117 | var try int 118 | for { 119 | e = c.call(data, filename) 120 | if e == nil { 121 | return 122 | } 123 | try++ 124 | if try == 3 { 125 | return 126 | } 127 | _ = c.init() 128 | } 129 | } 130 | 131 | func (c *QBType) call(data []byte, filename string) error { 132 | var b bytes.Buffer 133 | w := multipart.NewWriter(&b) 134 | 135 | // Write config. 136 | for _, v := range qBparalist { 137 | if c.settings[v] != "" { 138 | if w.WriteField(v, c.settings[v]) != nil { 139 | return fmt.Errorf("failed to write field %s", v) 140 | } 141 | } 142 | } 143 | // Write torrent body. 144 | h := make(textproto.MIMEHeader) 145 | h.Set("Content-Disposition", 146 | fmt.Sprintf(`form-data; name="torrents"; filename="%s"`, filename)) 147 | p, _ := w.CreatePart(h) 148 | if _, perr := p.Write(data); perr != nil { 149 | return perr 150 | } 151 | w.Close() 152 | 153 | req, err := http.NewRequest("POST", c.settings["url"]+"/api/v2/torrents/add", &b) 154 | if err != nil { 155 | return err 156 | } 157 | // Don't forget to set the content type, this will contain the boundary. 158 | req.Header.Set("Content-Type", w.FormDataContentType()) 159 | 160 | c.mu.RLock() 161 | resp, err := c.client.Do(req) 162 | c.mu.RUnlock() 163 | if err != nil { 164 | fmt.Println(err) 165 | return err 166 | } 167 | defer resp.Body.Close() 168 | if resp.StatusCode != 200 { 169 | return fmt.Errorf("HTTP code: %d", resp.StatusCode) 170 | } 171 | 172 | body, _ := ioutil.ReadAll(resp.Body) 173 | if string(body) != "Ok." { 174 | return fmt.Errorf(c.label + "'s webui returns \"" + string(body) + "\" rather than \"Ok.\"") 175 | } 176 | return nil 177 | } 178 | 179 | func initPrivateIP() { 180 | for _, cidr := range []string{ 181 | "127.0.0.0/8", // IPv4 loopback 182 | "10.0.0.0/8", // RFC1918 183 | "172.16.0.0/12", // RFC1918 184 | "192.168.0.0/16", // RFC1918 185 | "::1/128", // IPv6 loopback 186 | "fe80::/10", // IPv6 link-local 187 | "fc00::/7", // IPv6 unique local addr 188 | } { 189 | _, block, err := net.ParseCIDR(cidr) 190 | if err != nil { 191 | panic(fmt.Errorf("parse error on %q: %v", cidr, err)) 192 | } 193 | privateIPBlocks = append(privateIPBlocks, block) 194 | } 195 | } 196 | 197 | func isPrivateURL(webuiurl string) bool { 198 | u, err := url.Parse(webuiurl) 199 | if err != nil { 200 | log.Panicln("qBittorrent client: Cannot parse webui url.") 201 | } 202 | ip, err := net.LookupIP(u.Hostname()) 203 | if err != nil { 204 | log.Printf("qBittorrent client: Cannot resolve %s, assuming you are running on local network.\n", webuiurl) 205 | return true 206 | } 207 | 208 | for _, block := range privateIPBlocks { 209 | if block.Contains(ip[0]) { 210 | return true 211 | } 212 | } 213 | return false 214 | } 215 | -------------------------------------------------------------------------------- /client/uTorrent.go: -------------------------------------------------------------------------------- 1 | package client 2 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "os" 7 | "path" 8 | 9 | trss "github.com/capric98/t-rss" 10 | ) 11 | 12 | var ( 13 | config = flag.String("conf", "config.yml", "config file") 14 | loglevel = flag.String("log", "info", "log level: warn/info/debug/trace") 15 | learn = flag.Bool("learn", false, "learn") 16 | 17 | userConfigDir, _ = os.UserConfigDir() 18 | ) 19 | 20 | func init() { 21 | flag.Parse() 22 | if _, e := os.Stat(*config); os.IsNotExist(e) { 23 | fmt.Println("could not open "+*config+" , use", path.Join(userConfigDir, "/t-rss/config.yml"), "instead.") 24 | *config = path.Join(userConfigDir, "/t-rss/config.yml") 25 | } 26 | } 27 | 28 | func main() { 29 | trss.WithConfigFile(*config, *loglevel, *learn) 30 | } 31 | -------------------------------------------------------------------------------- /config.example.yml: -------------------------------------------------------------------------------- 1 | GLOBAL: 2 | log_file: # delete this to output log to stderr 3 | history: 4 | max_num: 300 # max history record nums 5 | save_to: # default: ./.t-rss_History 6 | timeout: 1m # {int}s/m/h/d 7 | 8 | TASKS: 9 | Name_of_task0: 10 | rss: 11 | url: https://example.com 12 | method: GET #*delete this except you know what this means 13 | headers: #*if needed 14 | Cookie: something 15 | Key: Value 16 | interval: 10s # {int}s/m/h/d 17 | filter: 18 | content_size: 19 | min: 10MB340KB 20 | max: 120G10MB 21 | regexp: 22 | accept: 23 | - A 24 | reject: 25 | - B 26 | quota: 27 | num: 65535 28 | size: 100G 29 | edit: 30 | tracker: 31 | delete: 32 | - share 33 | add: 34 | - http(s)://example.com/ 35 | receiver: 36 | delay: 12s 37 | save_path: /home/WatchDir/ 38 | client: 39 | Name_of_client0: 40 | type: qBittorrent 41 | url: http://127.0.0.1 42 | username: admin 43 | password: adminadmin 44 | dlLimit: 45 | upLimit: 46 | paused: true 47 | savepath: /home/Downloads 48 | Name_of_client1: 49 | type: Deluge 50 | host: 127.0.0.1:1234 51 | username: 52 | password: 53 | 54 | Name_of_task1: 55 | rss: 56 | url: https://example.com 57 | receiver: 58 | save_path: /home/WatchDir/ 59 | Name_of_task2: 60 | rss: 61 | url: https://example.com 62 | receiver: 63 | save_path: /home/WatchDir/ 64 | -------------------------------------------------------------------------------- /feed/atom.go: -------------------------------------------------------------------------------- 1 | package feed 2 | 3 | import ( 4 | "bytes" 5 | "encoding/xml" 6 | 7 | "github.com/capric98/t-rss/unit" 8 | "golang.org/x/net/html/charset" 9 | ) 10 | 11 | // AtomFeed :) 12 | type AtomFeed struct { 13 | // Required 14 | Title string `xml:"title"` 15 | Link string `xml:"href,attr"` 16 | Description string `xml:"description"` 17 | 18 | // Optional 19 | PubDate string `xml:"updated"` 20 | Generator string `xml:"generator"` 21 | 22 | Items []AtomItem `xml:"entry"` 23 | } 24 | 25 | // AtomItem :) 26 | type AtomItem struct { 27 | Title string `xml:"title"` 28 | Link string `xml:"-"` 29 | Description string `xml:"subtitle"` 30 | Author string `xml:"author"` 31 | Category struct { 32 | Domain string `xml:"term,attr"` 33 | Name string `xml:"label,attr"` 34 | } `xml:"category"` 35 | Comments string `xml:"-"` 36 | Enclosure struct { 37 | URL string `xml:"rel,attr"` 38 | Len int64 `xml:"-"` 39 | Type string `xml:"href,attr"` 40 | } `xml:"link"` 41 | GUID struct { 42 | IsPermaLink bool `xml:"-"` 43 | Value string `xml:",chardata"` 44 | } `xml:"id"` 45 | SpubDate string `xml:"updated"` 46 | Source string `xml:"-"` 47 | } 48 | 49 | func parseAtom(body []byte) (f []Item, e error) { 50 | var feed []AtomFeed 51 | 52 | decoder := xml.NewDecoder(bytes.NewReader(body)) 53 | decoder.CharsetReader = charset.NewReaderLabel 54 | e = decoder.Decode(&feed) 55 | 56 | if e != nil { 57 | return 58 | } 59 | 60 | for k := range feed { 61 | for _, v := range feed[k].Items { 62 | f = append(f, Item{ 63 | Title: v.Title, 64 | Link: v.Link, 65 | Description: v.Description, 66 | Author: v.Author, 67 | URL: v.Enclosure.URL, 68 | Len: v.Enclosure.Len, 69 | Type: v.Enclosure.Type, 70 | GUID: v.GUID.Value, 71 | Date: unit.ParseTime(v.SpubDate), 72 | Source: v.Source, 73 | }) 74 | } 75 | } 76 | 77 | return 78 | } 79 | -------------------------------------------------------------------------------- /feed/feed.go: -------------------------------------------------------------------------------- 1 | package feed 2 | 3 | import ( 4 | "errors" 5 | "strings" 6 | "time" 7 | 8 | "golang.org/x/net/html" 9 | ) 10 | 11 | // Item :) 12 | type Item struct { 13 | Title string 14 | Link string 15 | Description string 16 | Author string 17 | Category string 18 | Comments string 19 | 20 | // Enclosure 21 | URL string 22 | Len int64 23 | Type string 24 | 25 | GUID string 26 | Date time.Time 27 | Source string 28 | } 29 | 30 | // Parse :) 31 | func Parse(body []byte) (i []Item, e error) { 32 | var estr string 33 | i, e = parseRSS(body) 34 | if e != nil { 35 | estr = e.Error() 36 | i, e = parseAtom(body) 37 | } 38 | if e != nil { 39 | e = errors.New(estr + " | " + e.Error()) 40 | } 41 | 42 | for k := range i { 43 | i[k].Title = html.UnescapeString(i[k].Title) 44 | i[k].Link = html.UnescapeString(i[k].Link) 45 | i[k].Description = html.UnescapeString(i[k].Description) 46 | i[k].Author = html.UnescapeString(i[k].Author) 47 | i[k].Category = html.UnescapeString(i[k].Category) 48 | i[k].Comments = html.UnescapeString(i[k].Comments) 49 | i[k].URL = html.UnescapeString(i[k].URL) 50 | i[k].Type = html.UnescapeString(i[k].Type) 51 | i[k].GUID = html.UnescapeString(i[k].GUID) 52 | i[k].Source = html.UnescapeString(i[k].Source) 53 | 54 | if i[k].GUID == "" { 55 | i[k].GUID = i[k].Title 56 | } 57 | if i[k].URL == "" { 58 | i[k].URL = i[k].Link 59 | } 60 | i[k].GUID = regularizeFilename(i[k].GUID) 61 | } 62 | 63 | return 64 | } 65 | 66 | func regularizeFilename(name string) string { 67 | name = strings.ReplaceAll(name, ":", "_") 68 | name = strings.ReplaceAll(name, "\\", "_") 69 | name = strings.ReplaceAll(name, "/", "_") 70 | name = strings.ReplaceAll(name, "*", "_") 71 | name = strings.ReplaceAll(name, "?", "_") 72 | name = strings.ReplaceAll(name, "\"", "_") 73 | name = strings.ReplaceAll(name, "<", "_") 74 | name = strings.ReplaceAll(name, ">", "_") 75 | name = strings.ReplaceAll(name, "|", "_") 76 | name = strings.ReplaceAll(name, "\n", "_") 77 | name = strings.ReplaceAll(name, "\r", "_") 78 | name = strings.ReplaceAll(name, " ", "_") 79 | if len(name) > 255 { 80 | name = name[:255] 81 | } 82 | return name 83 | } 84 | -------------------------------------------------------------------------------- /feed/rss.go: -------------------------------------------------------------------------------- 1 | package feed 2 | 3 | import ( 4 | "bytes" 5 | "encoding/xml" 6 | 7 | "github.com/capric98/t-rss/unit" 8 | "golang.org/x/net/html/charset" 9 | ) 10 | 11 | // RSSFeed :) 12 | type RSSFeed struct { 13 | Version string `xml:"version,attr"` 14 | Channel []Channel `xml:"channel"` 15 | } 16 | 17 | // Channel :) 18 | type Channel struct { 19 | // Required 20 | Title string `xml:"title"` 21 | Link string `xml:"link"` 22 | Description string `xml:"description"` 23 | 24 | // Optional 25 | Language string `xml:"language"` 26 | Copyright string `xml:"copyright"` 27 | //managingEditor 28 | //webMaster 29 | PubDate string `xml:"pubDate"` 30 | //lastBuildDate 31 | //category 32 | Generator string `xml:"generator"` 33 | //docs 34 | //cloud 35 | //ttl 36 | //image 37 | //textInput 38 | //skipHours 39 | //skipDays 40 | 41 | Items []RSSItem `xml:"item"` 42 | } 43 | 44 | // RSSItem :) 45 | type RSSItem struct { 46 | Title string `xml:"title"` 47 | Link string `xml:"link"` 48 | Description string `xml:"description"` 49 | Author string `xml:"author"` 50 | Category struct { 51 | Domain string `xml:"domain,attr"` 52 | Name string `xml:",chardata"` 53 | } `xml:"category"` 54 | Comments string `xml:"comments"` 55 | Enclosure struct { 56 | URL string `xml:"url,attr"` 57 | Len int64 `xml:"length,attr"` 58 | Type string `xml:"type,attr"` 59 | } `xml:"enclosure"` 60 | GUID struct { 61 | IsPermaLink bool `xml:"type,attr"` 62 | Value string `xml:",chardata"` 63 | } `xml:"guid"` 64 | SpubDate string `xml:"pubDate"` 65 | Source string `xml:"source"` 66 | } 67 | 68 | func parseRSS(body []byte) (f []Item, e error) { 69 | var feed RSSFeed 70 | 71 | decoder := xml.NewDecoder(bytes.NewReader(body)) 72 | decoder.CharsetReader = charset.NewReaderLabel 73 | e = decoder.Decode(&feed) 74 | 75 | if e != nil { 76 | return 77 | } 78 | 79 | for _, c := range feed.Channel { 80 | for _, v := range c.Items { 81 | i := Item{ 82 | Title: v.Title, 83 | Link: v.Link, 84 | Description: v.Description, 85 | Author: v.Author, 86 | URL: v.Enclosure.URL, 87 | Len: v.Enclosure.Len, 88 | Type: v.Enclosure.Type, 89 | GUID: v.GUID.Value, 90 | Date: unit.ParseTime(v.SpubDate), 91 | Source: v.Source, 92 | } 93 | f = append(f, i) 94 | } 95 | } 96 | 97 | return 98 | } 99 | -------------------------------------------------------------------------------- /filter/contentSize.go: -------------------------------------------------------------------------------- 1 | package filter 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/capric98/t-rss/feed" 7 | "github.com/capric98/t-rss/unit" 8 | ) 9 | 10 | type contentSizeFilter struct { 11 | min, max int64 12 | } 13 | 14 | // NewContentSizeFilter :) 15 | func NewContentSizeFilter(min, max int64) Filter { 16 | return &contentSizeFilter{ 17 | min: min, 18 | max: max, 19 | } 20 | } 21 | 22 | // Check meets Filter.Check() interface. 23 | func (f *contentSizeFilter) Check(v *feed.Item) error { 24 | if v.Len == 0 { 25 | return nil 26 | } // check it later 27 | if v.Len < f.min { 28 | return fmt.Errorf("content_size: %v < minSize:%v", unit.FormatSize(v.Len), unit.FormatSize(f.min)) 29 | } 30 | if v.Len > f.max { 31 | return fmt.Errorf("content_size: %v > maxSize:%v", unit.FormatSize(v.Len), unit.FormatSize(f.max)) 32 | } 33 | return nil 34 | } 35 | -------------------------------------------------------------------------------- /filter/interface.go: -------------------------------------------------------------------------------- 1 | package filter 2 | 3 | import "github.com/capric98/t-rss/feed" 4 | 5 | // Filter interface. 6 | type Filter interface { 7 | Check(*feed.Item) error 8 | } 9 | -------------------------------------------------------------------------------- /filter/regexp.go: -------------------------------------------------------------------------------- 1 | package filter 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/capric98/t-rss/feed" 7 | "github.com/capric98/t-rss/setting" 8 | ) 9 | 10 | type regexpFilter struct { 11 | accept, reject []setting.Reg 12 | } 13 | 14 | // NewRegexpFilter :) 15 | func NewRegexpFilter(accept, reject []setting.Reg) Filter { 16 | regf := ®expFilter{ 17 | accept: accept, 18 | reject: reject, 19 | } 20 | if len(regf.accept) == 0 { 21 | regf.accept = nil 22 | } 23 | return regf 24 | } 25 | 26 | // Check meets Filter.Check() interface. 27 | func (f *regexpFilter) Check(v *feed.Item) error { 28 | for _, r := range f.reject { 29 | // if r.R.MatchString(v.Description) { 30 | // return true, r.C 31 | // } 32 | if r.R.MatchString(v.Title) || r.R.MatchString(v.Author) { 33 | return fmt.Errorf("regexp: matched - %v", r.C) 34 | } 35 | } 36 | if f.accept != nil { 37 | for _, r := range f.accept { 38 | if r.R.MatchString(v.Title) || r.R.MatchString(v.Author) { 39 | return nil 40 | } 41 | } 42 | return fmt.Errorf("regexp: no match of Accept") 43 | } 44 | return nil 45 | } 46 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/capric98/t-rss 2 | 3 | go 1.14 4 | 5 | require ( 6 | github.com/gdm85/go-rencode v0.1.4 7 | github.com/mattn/go-colorable v0.1.6 8 | github.com/sirupsen/logrus v1.5.0 9 | golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e 10 | gopkg.in/yaml.v2 v2.2.8 11 | ) 12 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/gdm85/go-rencode v0.1.4 h1:UAEXC7XLg39Bxzd4bNf/h+6mZC1PN9EYAPr8F2eDh+g= 4 | github.com/gdm85/go-rencode v0.1.4/go.mod h1:0dr3BuaKzeseY1of6o1KRTGB/Oo7eio+YEyz8KDp5+s= 5 | github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= 6 | github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 7 | github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= 8 | github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= 9 | github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= 10 | github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= 11 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 12 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 13 | github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= 14 | github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= 15 | github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= 16 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 17 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 18 | golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= 19 | golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= 20 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 21 | golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 22 | golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 23 | golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 24 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= 25 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 26 | golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= 27 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 28 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 29 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 30 | gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= 31 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 32 | -------------------------------------------------------------------------------- /history.go: -------------------------------------------------------------------------------- 1 | package trss 2 | 3 | import ( 4 | "math/rand" 5 | "os" 6 | "path/filepath" 7 | "time" 8 | 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | type file struct { 13 | path string 14 | info os.FileInfo 15 | } 16 | 17 | func checkAndWatchHistory(path string, maxNum int, log *logrus.Logger) { 18 | if _, e := os.Stat(path); os.IsNotExist(e) { 19 | log.WithFields(logrus.Fields{ 20 | "@func": "checkAndWatchHistory", 21 | "path": path, 22 | }).Info("path does not exist, create it") 23 | e = os.MkdirAll(path, 0740) 24 | if e != nil { 25 | log.Fatal(e) 26 | } 27 | } 28 | go watchHistroy(path, maxNum, log.WithField("@func", "watchHistory")) 29 | } 30 | 31 | func watchHistroy(path string, maxNum int, log *logrus.Entry) { 32 | log.Debug("start to watch history dir") 33 | for { 34 | var subdir []string 35 | e := filepath.Walk(path, func(p string, i os.FileInfo, err error) error { 36 | if err != nil { 37 | return err 38 | } 39 | if i.IsDir() { 40 | if p != path { 41 | subdir = append(subdir, p) 42 | return filepath.SkipDir 43 | } 44 | } 45 | return nil 46 | }) 47 | if e != nil { 48 | log.Warn("walk: ", e) 49 | continue 50 | } 51 | for k := range subdir { 52 | log.Debug("clean subdir: ", subdir[k]) 53 | cleanDir(subdir[k], maxNum, log) 54 | } 55 | time.Sleep(12 * time.Hour) 56 | } 57 | } 58 | 59 | func cleanDir(path string, maxNum int, log *logrus.Entry) { 60 | var f []file 61 | e := filepath.Walk(path, func(p string, i os.FileInfo, err error) error { 62 | if err != nil { 63 | return err 64 | } 65 | if !i.IsDir() { 66 | f = append(f, file{ 67 | info: i, 68 | path: p, 69 | }) 70 | } else { 71 | if p != path { 72 | return filepath.SkipDir 73 | } 74 | } 75 | return nil 76 | }) 77 | if e != nil { 78 | log.Warn("walk: ", e) 79 | return 80 | } 81 | log.Debug("find ", len(f), " files in ", path) 82 | if len(f) > maxNum { 83 | sort(f) 84 | f = f[maxNum:] 85 | for k := range f { 86 | log.Debug("delete old history: ", f[k].info.Name()) 87 | if e := os.Remove(f[k].path); e != nil { 88 | log.Warn("delete old history: ", f[k].info.Name(), " - ", e) 89 | } 90 | } 91 | } 92 | } 93 | 94 | func sort(a []file) { 95 | ll := len(a) 96 | l, r := 0, ll-1 97 | if l >= r { 98 | return 99 | } 100 | key := a[l+rand.Intn(r-l)].info.ModTime() 101 | for l <= r { 102 | for ; key.Before(a[l].info.ModTime()); l++ { 103 | } 104 | for ; a[r].info.ModTime().Before(key); r-- { 105 | } 106 | if l <= r { 107 | tmp := a[l] 108 | a[l] = a[r] 109 | a[r] = tmp 110 | l++ 111 | r-- 112 | } 113 | } 114 | 115 | if l < ll { 116 | sort(a[l:]) 117 | } 118 | if 0 < r { 119 | sort(a[:r]) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /logLevel.go: -------------------------------------------------------------------------------- 1 | package trss 2 | 3 | import "github.com/sirupsen/logrus" 4 | 5 | func toLogLevel(s string) (l logrus.Level) { 6 | switch s { 7 | case "trace": 8 | l = logrus.TraceLevel 9 | case "debug": 10 | l = logrus.DebugLevel 11 | case "info": 12 | l = logrus.InfoLevel 13 | case "warn": 14 | l = logrus.WarnLevel 15 | default: 16 | l = logrus.InfoLevel 17 | } 18 | return 19 | } 20 | -------------------------------------------------------------------------------- /receiver/client.go: -------------------------------------------------------------------------------- 1 | package receiver 2 | 3 | import ( 4 | "github.com/capric98/t-rss/client" 5 | "github.com/capric98/t-rss/feed" 6 | ) 7 | 8 | // Client :) 9 | type Client struct { 10 | client.Client 11 | } 12 | 13 | // NewClient :) 14 | func NewClient(tYPE interface{}, conf map[string]interface{}, name string) Receiver { 15 | stype := tYPE.(string) // let it crash if got non string value 16 | 17 | var cc client.Client 18 | switch stype { 19 | case "qBittorrent": 20 | cc = client.NewqBclient(name, conf) 21 | case "Deluge": 22 | cc = client.NewDeClient(name, conf) 23 | } 24 | rc := &Client{} 25 | rc.Client = cc 26 | return rc 27 | } 28 | 29 | // Push :) 30 | func (c *Client) Push(i *feed.Item, b []byte) error { 31 | return c.Client.Add(b, i.Title) 32 | } 33 | -------------------------------------------------------------------------------- /receiver/download.go: -------------------------------------------------------------------------------- 1 | package receiver 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path" 7 | "strings" 8 | 9 | "github.com/capric98/t-rss/feed" 10 | ) 11 | 12 | type dReceiver struct { 13 | path string 14 | } 15 | 16 | // NewDownload news a download receiver 17 | func NewDownload(path string) Receiver { 18 | if _, e := os.Stat(path); os.IsNotExist(e) { 19 | _ = os.MkdirAll(path, 0740) 20 | } 21 | 22 | return &dReceiver{path: path} 23 | } 24 | 25 | // Push implements Receiver interface. 26 | func (r *dReceiver) Push(i *feed.Item, b []byte) (e error) { 27 | fn := i.Title 28 | fn = regularizeFilename(fn) 29 | e = ioutil.WriteFile(path.Join(r.path, fn+".torrent"), b, 0664) 30 | return 31 | } 32 | 33 | // Name implements Receiver interface. 34 | func (r *dReceiver) Name() string { 35 | return "download" 36 | } 37 | 38 | func regularizeFilename(name string) string { 39 | name = strings.ReplaceAll(name, ":", "_") 40 | name = strings.ReplaceAll(name, "\\", "_") 41 | name = strings.ReplaceAll(name, "/", "_") 42 | name = strings.ReplaceAll(name, "*", "_") 43 | name = strings.ReplaceAll(name, "?", "_") 44 | name = strings.ReplaceAll(name, "\"", "_") 45 | name = strings.ReplaceAll(name, "<", "_") 46 | name = strings.ReplaceAll(name, ">", "_") 47 | name = strings.ReplaceAll(name, "|", "_") 48 | name = strings.ReplaceAll(name, "\n", "_") 49 | name = strings.ReplaceAll(name, "\r", "_") 50 | name = strings.ReplaceAll(name, " ", "_") 51 | 52 | nameRune := []rune(name) 53 | for len(string(nameRune)) > 200 { 54 | nameRune = nameRune[:len(nameRune)-1] 55 | } 56 | return string(nameRune) 57 | } 58 | -------------------------------------------------------------------------------- /receiver/interface.go: -------------------------------------------------------------------------------- 1 | package receiver 2 | 3 | import "github.com/capric98/t-rss/feed" 4 | 5 | // Receiver interface 6 | type Receiver interface { 7 | Push(*feed.Item, []byte) error 8 | Name() string 9 | } 10 | -------------------------------------------------------------------------------- /setting/editTorrent.go: -------------------------------------------------------------------------------- 1 | package setting 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/capric98/t-rss/bencode" 7 | ) 8 | 9 | // EditTorrent tmp 10 | func (edt *Edit) EditTorrent(data []byte) (en []byte, err error) { 11 | defer func() { 12 | if p := recover(); p != nil { 13 | err = p.(error) 14 | } 15 | }() 16 | 17 | results, err := bencode.Decode(data) 18 | if err != nil || len(results) != 1 { 19 | err = fmt.Errorf("decode: %v", err) 20 | return 21 | } 22 | torrent := results[0] 23 | 24 | for _, reg := range edt.Tracker.Delete { 25 | announce := torrent.Dict("announce") 26 | if announce != nil { 27 | if reg.R.Match(announce.BStr()) { 28 | // log.Debug(fmt.Sprintf(" + edit tracker: \"%s\" matches \"%s\", delete announce.", announce.BStr(), reg.C), 0) 29 | torrent.Delete("announce") 30 | continue 31 | } 32 | } 33 | announceList := torrent.Dict("announce-list") 34 | if announceList != nil { 35 | for i := announceList.Len(); i > 0; i-- { 36 | subList := announceList.List(i - 1) 37 | for s := subList.Len(); s > 0; s-- { 38 | if reg.R.Match(subList.List(s - 1).BStr()) { 39 | // log.Debug(fmt.Sprintf(" + edit tracker: \"%s\" matches \"%s\", delete part of announce-list.", subList.List(s-1).BStr(), reg.C), 0) 40 | subList.DeleteN(s - 1) 41 | break 42 | } 43 | } 44 | if subList.Len() == 0 { 45 | announceList.DeleteN(i - 1) 46 | } 47 | } 48 | } 49 | if announceList.Len() == 0 { 50 | torrent.Delete("announce-list") 51 | } 52 | } 53 | 54 | var waitList []string 55 | for _, add := range edt.Tracker.Add { 56 | if torrent.Dict("announce") == nil { 57 | // log.Debug("announce add "+"\""+add+"\"", 0) 58 | _ = torrent.AddPart("announce", bencode.NewBStr(add)) 59 | continue 60 | } 61 | waitList = append(waitList, add) 62 | } 63 | if torrent.Dict("announce-list") == nil { 64 | _ = torrent.AddPart("announce-list", bencode.NewEmptyList()) 65 | } 66 | list := torrent.Dict("announce-list") 67 | list.AnnounceList(waitList) 68 | // log.Debug("check ", torrent.Check()) 69 | 70 | en, _ = torrent.Encode() 71 | 72 | return 73 | } 74 | -------------------------------------------------------------------------------- /setting/types.go: -------------------------------------------------------------------------------- 1 | package setting 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "regexp" 7 | "time" 8 | 9 | "github.com/capric98/t-rss/unit" 10 | "gopkg.in/yaml.v2" 11 | ) 12 | 13 | // Int64 is int64 14 | type Int64 struct { 15 | I int64 16 | } 17 | 18 | // Duration wraps time.Duration 19 | type Duration struct { 20 | T time.Duration 21 | } 22 | 23 | // C :) 24 | type C struct { 25 | Global Global `yaml:"GLOBAL"` 26 | Tasks map[string]*Task `yaml:"TASKS"` 27 | } 28 | 29 | // Global is global configs. 30 | type Global struct { 31 | LogFile string `yaml:"log_file"` 32 | History struct { 33 | MaxNum int `yaml:"max_num"` 34 | Save string `yaml:"save_to"` 35 | } `yaml:"history"` 36 | Timeout Duration `yaml:"timeout"` 37 | } 38 | 39 | // Task is task part. 40 | type Task struct { 41 | Rss *Rss `yaml:"rss"` 42 | Filter Filter `yaml:"filter"` 43 | Quota Quota `yaml:"quota"` 44 | Edit *Edit `yaml:"edit"` 45 | Receiver Receiver `yaml:"receiver"` 46 | } 47 | 48 | // Rss :) 49 | type Rss struct { 50 | URL string `yaml:"url"` 51 | Method string `yaml:"method"` 52 | Headers Header `yaml:"headers"` 53 | Interval Duration `yaml:"interval"` 54 | } 55 | 56 | // Filter :) 57 | type Filter struct { 58 | ContentSize ContentSize `yaml:"content_size"` 59 | Regexp RegexpConfig `yaml:"regexp"` 60 | } 61 | 62 | // Quota :) 63 | type Quota struct { 64 | Num int `yaml:"num"` 65 | Size Int64 `yaml:"size"` 66 | } 67 | 68 | // Edit :) 69 | type Edit struct { 70 | Tracker Tracker `yaml:"tracker"` 71 | } 72 | 73 | // Receiver defines tasks' receiver(s). 74 | type Receiver struct { 75 | Delay Duration `yaml:"delay"` 76 | Save *string `yaml:"save_path"` 77 | Client map[string]map[string]interface{} `yaml:"client"` 78 | } 79 | 80 | // ContentSize :) 81 | type ContentSize struct { 82 | Min Int64 `yaml:"min"` 83 | Max Int64 `yaml:"max"` 84 | } 85 | 86 | // RegexpConfig :) 87 | type RegexpConfig struct { 88 | Accept []Reg `yaml:"accept"` 89 | Reject []Reg `yaml:"reject"` 90 | } 91 | 92 | // Reg :) 93 | type Reg struct { 94 | R *regexp.Regexp 95 | C string 96 | } 97 | 98 | // Tracker :) 99 | type Tracker struct { 100 | Delete []Reg `yaml:"delete"` 101 | Add []string `yaml:"add"` 102 | } 103 | 104 | // Header :) 105 | type Header struct { 106 | H map[string][]string 107 | } 108 | 109 | // Parse :) 110 | func Parse(r io.Reader) (config *C, e error) { 111 | config = new(C) 112 | e = yaml.NewDecoder(r).Decode(config) 113 | if e != nil { 114 | return 115 | } 116 | config.standardize() 117 | return 118 | } 119 | 120 | func (c *C) standardize() { 121 | if c.Global.Timeout.T == 0 { 122 | c.Global.Timeout.T = 30 * time.Second 123 | } 124 | if c.Global.History.MaxNum == 0 { 125 | c.Global.History.MaxNum = 500 126 | } 127 | if c.Global.History.Save == "" { 128 | c.Global.History.Save = ".t-rss_History/" 129 | } 130 | if l := len(c.Global.History.Save); l > 0 && c.Global.History.Save[l-1] != '/' { 131 | c.Global.History.Save = c.Global.History.Save + "/" 132 | } 133 | for _, v := range c.Tasks { 134 | if v.Rss.Method == "" { 135 | v.Rss.Method = "GET" 136 | } 137 | if v.Rss.Interval.T == 0 { 138 | v.Rss.Interval.T = 30 * time.Second 139 | } 140 | if v.Filter.ContentSize.Max.I == 0 { 141 | v.Filter.ContentSize.Max.I = 1 << 62 142 | } 143 | if v.Quota.Num == 0 { 144 | v.Quota.Num = 1 << 30 145 | } 146 | if v.Quota.Size.I == 0 { 147 | v.Quota.Size.I = 1 << 62 148 | } 149 | } 150 | } 151 | 152 | // UnmarshalYAML :) 153 | func (t *Duration) UnmarshalYAML(uf func(interface{}) error) (e error) { 154 | var s string 155 | e = uf(&s) 156 | if e != nil { 157 | return 158 | } 159 | t.T = unit.ParseDuration(s) 160 | return nil 161 | } 162 | 163 | // UnmarshalYAML :) 164 | func (r *Reg) UnmarshalYAML(uf func(interface{}) error) (e error) { 165 | var s string 166 | e = uf(&s) 167 | if e != nil { 168 | return 169 | } 170 | r.C = s 171 | r.R, e = regexp.Compile(s) 172 | return 173 | } 174 | 175 | // UnmarshalYAML :) 176 | func (n *Int64) UnmarshalYAML(uf func(interface{}) error) (e error) { 177 | var s string 178 | e = uf(&s) 179 | if e != nil { 180 | return 181 | } 182 | n.I = unit.ParseSize(s) 183 | e = nil 184 | return 185 | } 186 | 187 | // UnmarshalYAML :) 188 | func (h *Header) UnmarshalYAML(uf func(interface{}) error) (e error) { 189 | if h.H == nil { 190 | h.H = make(map[string][]string) 191 | } 192 | var header map[string]interface{} 193 | e = uf(&header) 194 | for k, v := range header { 195 | switch vi := v.(type) { 196 | case string: 197 | h.H[k] = append(h.H[k], vi) 198 | case []string: 199 | for i := range vi { 200 | h.H[k] = append(h.H[k], vi[i]) 201 | } 202 | default: 203 | h.H[k] = append(h.H[k], fmt.Sprintf("%v", vi)) 204 | } 205 | } 206 | return 207 | } 208 | -------------------------------------------------------------------------------- /setting/types_test.go: -------------------------------------------------------------------------------- 1 | package setting 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestParse(t *testing.T) { 8 | 9 | } 10 | -------------------------------------------------------------------------------- /t-rss.go: -------------------------------------------------------------------------------- 1 | package trss 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "os" 7 | "os/signal" 8 | "path" 9 | "sync" 10 | "syscall" 11 | 12 | "github.com/capric98/t-rss/setting" 13 | "github.com/mattn/go-colorable" 14 | "github.com/sirupsen/logrus" 15 | ) 16 | 17 | // WithConfigFile starts program using a config file. 18 | func WithConfigFile(filename string, level string, learn bool) { 19 | backgroundLogger := logrus.New() 20 | formatter := &logrus.TextFormatter{ 21 | ForceColors: false, 22 | ForceQuote: true, 23 | FullTimestamp: true, 24 | PadLevelText: true, 25 | QuoteEmptyFields: true, 26 | TimestampFormat: "2006-01-02 15:04:05", 27 | FieldMap: logrus.FieldMap{ 28 | logrus.FieldKeyTime: "@time", 29 | logrus.FieldKeyLevel: "&", 30 | logrus.FieldKeyMsg: "@msg", 31 | }, 32 | } 33 | backgroundLogger.SetFormatter(formatter) 34 | backgroundLogger.SetLevel(toLogLevel(level)) 35 | 36 | fr, e := os.Open(filename) 37 | if e != nil { 38 | backgroundLogger.Fatal("open config file: ", e) 39 | } 40 | config, e := setting.Parse(fr) 41 | fr.Close() 42 | if e != nil { 43 | backgroundLogger.Fatal("parse config file: ", e) 44 | } 45 | 46 | backgroundLogger.Tracef("%#v\n", *config) 47 | 48 | if config.Global.LogFile == "" { 49 | formatter.ForceColors = true 50 | backgroundLogger.SetOutput(colorable.NewColorableStderr()) 51 | } else { 52 | fw, fe := os.OpenFile( 53 | config.Global.LogFile, 54 | os.O_APPEND|os.O_CREATE|os.O_WRONLY, 55 | 0740, 56 | ) 57 | if fe != nil { 58 | backgroundLogger.Fatal(fe) 59 | } 60 | backgroundLogger.SetOutput(fw) 61 | } 62 | 63 | checkAndWatchHistory( 64 | config.Global.History.Save, 65 | config.Global.History.MaxNum, 66 | backgroundLogger, 67 | ) 68 | 69 | client := &http.Client{Timeout: config.Global.Timeout.T} 70 | bgCtx, cancel := context.WithCancel(context.Background()) 71 | var wg sync.WaitGroup 72 | runNum := -1 73 | if learn { 74 | backgroundLogger.Info("Learning...") 75 | runNum = 1 76 | } 77 | for k, v := range config.Tasks { 78 | kk := k // make a copy 79 | wg.Add(1) 80 | 81 | nw := &worker{ 82 | client: client, 83 | header: v.Rss.Headers.H, 84 | tick: nil, // https://github.com/capric98/t-rss/blob/master/task.go#L40 85 | 86 | filters: nil, // https://github.com/capric98/t-rss/blob/master/task.go#L50 87 | recvers: nil, // https://github.com/capric98/t-rss/blob/master/task.go#L54 88 | quota: v.Quota, 89 | delay: v.Receiver.Delay.T, 90 | wpath: path.Join(config.Global.History.Save, kk), 91 | 92 | edit: v.Edit, 93 | 94 | logger: func() *logrus.Entry { 95 | return backgroundLogger.WithField("@task", kk) 96 | }, 97 | ctx: bgCtx, 98 | wg: &wg, 99 | } 100 | backgroundLogger.Tracef("%#v\n", nw.header) 101 | nw.prepare(v, runNum) 102 | go nw.loop() 103 | } 104 | 105 | c := make(chan os.Signal, 10) 106 | signal.Notify(c, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM) 107 | wgDone := make(chan struct{}, 1) 108 | go func() { 109 | wg.Wait() 110 | wgDone <- struct{}{} 111 | }() 112 | 113 | select { 114 | case sig := <-c: 115 | backgroundLogger.Info("receive signal: ", sig) 116 | case <-wgDone: 117 | backgroundLogger.Info("all tasks done") 118 | } 119 | 120 | cancel() 121 | backgroundLogger.Info("gracefully shutting down...") 122 | wg.Wait() 123 | backgroundLogger.Info("bye~") 124 | _ = backgroundLogger.Writer().Close() 125 | } 126 | -------------------------------------------------------------------------------- /t-rss_test.go: -------------------------------------------------------------------------------- 1 | package trss 2 | 3 | import "testing" 4 | 5 | func TestWithConfigFile(t *testing.T) { 6 | // WithConfigFile("config.test.yml", "trace", true) 7 | // t.Fail() 8 | } 9 | -------------------------------------------------------------------------------- /task.go: -------------------------------------------------------------------------------- 1 | package trss 2 | 3 | import ( 4 | "context" 5 | "io/ioutil" 6 | "net/http" 7 | "os" 8 | "path" 9 | "sync" 10 | "time" 11 | 12 | "github.com/capric98/t-rss/bencode" 13 | "github.com/capric98/t-rss/feed" 14 | "github.com/capric98/t-rss/filter" 15 | "github.com/capric98/t-rss/receiver" 16 | "github.com/capric98/t-rss/setting" 17 | "github.com/capric98/t-rss/ticker" 18 | "github.com/capric98/t-rss/unit" 19 | "github.com/sirupsen/logrus" 20 | ) 21 | 22 | type worker struct { 23 | client *http.Client 24 | header map[string][]string 25 | tick *ticker.Ticker 26 | 27 | filters []filter.Filter 28 | recvers []receiver.Receiver 29 | quota setting.Quota 30 | delay time.Duration 31 | wpath string 32 | 33 | edit *setting.Edit 34 | 35 | logger func() *logrus.Entry 36 | ctx context.Context 37 | wg *sync.WaitGroup 38 | } 39 | 40 | func (w *worker) prepare(t *setting.Task, num int) { 41 | // check if log directory exists. 42 | if _, e := os.Stat(w.wpath); os.IsNotExist(e) { 43 | e = os.MkdirAll(w.wpath, 0740) 44 | if e != nil { 45 | w.logger().Fatal("create log dir: ", e) 46 | } 47 | } 48 | // make ticker 49 | if t.Rss != nil { 50 | req, err := http.NewRequest(t.Rss.Method, t.Rss.URL, nil) 51 | if err != nil { 52 | w.logger().Fatal(err) 53 | } 54 | req.Header = w.header 55 | w.tick = ticker.NewRssTicker(num, req, w.client, w.logger(), t.Rss.Interval.T) 56 | } 57 | 58 | // make filter 59 | w.filters = append(w.filters, filter.NewRegexpFilter(t.Filter.Regexp.Accept, t.Filter.Regexp.Reject)) 60 | w.filters = append(w.filters, filter.NewContentSizeFilter(t.Filter.ContentSize.Min.I, t.Filter.ContentSize.Max.I)) 61 | 62 | // make receiver only if learn==false (num==-1) 63 | if num == -1 { 64 | if t.Receiver.Save != nil { 65 | w.recvers = append(w.recvers, receiver.NewDownload(*t.Receiver.Save)) 66 | } 67 | for k, v := range t.Receiver.Client { 68 | w.recvers = append(w.recvers, receiver.NewClient(v["type"], v, k)) 69 | } 70 | } 71 | } 72 | 73 | func (w *worker) loop() { 74 | defer w.wg.Done() 75 | defer w.tick.Stop() 76 | 77 | for { 78 | select { 79 | case <-w.ctx.Done(): 80 | return 81 | case items, ok := <-w.tick.C(): 82 | if !ok { 83 | w.logger().Debug("ticker closed, return") 84 | return 85 | } // under "learn" mode or ticker crashed 86 | 87 | var passed []feed.Item 88 | var accept, reject int 89 | for k := range items { 90 | log := w.logger().WithFields(logrus.Fields{ 91 | "author": items[k].Author, 92 | "category": items[k].Category, 93 | "GUID": items[k].GUID, 94 | "size": unit.FormatSize(items[k].Len), 95 | }) 96 | 97 | // Check if have seen. 98 | historyPath := path.Join(w.wpath, items[k].GUID) 99 | if _, err := os.Stat(historyPath); !os.IsNotExist(err) { 100 | log.Trace("(reject) have seen ", items[k].Title, " before.") 101 | reject++ 102 | continue 103 | } 104 | 105 | if w.recvers == nil { 106 | // learn -> create history file immediately 107 | hf, err := os.Create(historyPath) 108 | if err != nil { 109 | log.Warn("create history file: ", err) 110 | } else { 111 | hf.Close() 112 | } 113 | } // else create history file at push phase 114 | 115 | flag := true 116 | for _, f := range w.filters { 117 | if e := f.Check(&items[k]); e != nil { 118 | flag = false 119 | log.Info("(reject) ", e) 120 | break 121 | } 122 | } 123 | if flag { 124 | accept++ 125 | log.Info("(accept) ", items[k].Title) 126 | passed = append(passed, items[k]) 127 | } else { 128 | reject++ 129 | hf, err := os.Create(historyPath) 130 | if err != nil { 131 | log.Warn("create history file: ", err) 132 | } else { 133 | hf.Close() 134 | } 135 | } 136 | } 137 | w.logger().Info("accepted ", accept, " item(s), rejected ", reject, " item(s).") 138 | go w.push(passed) 139 | } 140 | } 141 | } 142 | 143 | func (w *worker) push(items []feed.Item) { 144 | quota := w.quota 145 | mu := sync.Mutex{} 146 | for k := range items { 147 | go func(item feed.Item) { 148 | time.Sleep(w.delay) 149 | 150 | // preperation 151 | start := time.Now() 152 | log := w.logger().WithFields(logrus.Fields{ 153 | "title": item.Title, 154 | }) 155 | 156 | // request torrent's body 157 | req, e := http.NewRequest("GET", item.URL, nil) 158 | if e != nil { 159 | log.Warn("new request: ", e) 160 | return 161 | } 162 | req.Header = w.header 163 | resp, e := w.client.Do(req) 164 | for retry := 0; e != nil && retry < 3; retry++ { 165 | resp, e = w.client.Do(req) 166 | } 167 | if e != nil { 168 | log.Warn("client.Do(): ", e) 169 | return 170 | } 171 | body, e := ioutil.ReadAll(resp.Body) 172 | resp.Body.Close() 173 | if e != nil { 174 | log.Warn("read response body: ", e) 175 | return 176 | } 177 | 178 | if tLen(body) == -1 { 179 | log.Info("got non-bencoded file, skip") 180 | log.Debug("content: ", string(body)) 181 | return 182 | } 183 | // write history 184 | historyPath := path.Join(w.wpath, item.GUID) 185 | if _, err := os.Stat(historyPath); os.IsNotExist(err) { 186 | hf, err := os.Create(historyPath) 187 | if err != nil { 188 | log.Warn("create history file: ", err) 189 | } else { 190 | hf.Close() 191 | } 192 | } 193 | 194 | // double check in case of Len=0 195 | if item.Len == 0 { 196 | item.Len = tLen(body) 197 | for _, f := range w.filters { 198 | if e := f.Check(&item); e != nil { 199 | log.Info("(reject in double check) ", e) 200 | return 201 | } 202 | } 203 | } 204 | 205 | // check quota 206 | mu.Lock() 207 | if quota.Num > 0 && quota.Size.I >= item.Len { 208 | quota.Num-- 209 | quota.Size.I -= item.Len 210 | } else { 211 | log.Info("(drop) quota exceeded (left Num=", quota.Num, " Size=", unit.FormatSize(quota.Size.I), ")") 212 | mu.Unlock() 213 | return 214 | } 215 | mu.Unlock() 216 | 217 | if w.edit != nil { 218 | log.Debug("edit torrent...") 219 | body, e = w.edit.EditTorrent(body) 220 | if e != nil { 221 | w.logger().WithFields(logrus.Fields{ 222 | "@func": "editTorrent", 223 | }).Warn(e) 224 | return 225 | } 226 | } 227 | 228 | // push to every receiver 229 | recvwg := sync.WaitGroup{} 230 | for i := range w.recvers { 231 | recvwg.Add(1) 232 | go func(recv receiver.Receiver) { 233 | err := recv.Push(&item, body) 234 | if err != nil { 235 | log.Warn("push to ", recv.Name(), " : ", err) 236 | } else { 237 | log.WithField("@cost", time.Since(start)).Info("push to ", recv.Name()) 238 | } 239 | recvwg.Done() 240 | }(w.recvers[i]) 241 | } 242 | // preserve (start) 243 | recvwg.Wait() 244 | }(items[k]) 245 | } 246 | } 247 | 248 | func tLen(data []byte) (l int64) { 249 | defer func() { _ = recover() }() 250 | 251 | result, err := bencode.Decode(data) 252 | if err != nil { 253 | return -1 254 | } 255 | info := result[0].Dict("info") 256 | pl := (info.Dict("piece length")).Value() 257 | ps := int64(len((info.Dict("pieces")).BStr())) / 20 258 | l = pl * ps 259 | 260 | return 261 | } 262 | -------------------------------------------------------------------------------- /ticker/rss.go: -------------------------------------------------------------------------------- 1 | package ticker 2 | 3 | import ( 4 | "io/ioutil" 5 | "net/http" 6 | "time" 7 | "unsafe" 8 | 9 | "github.com/capric98/t-rss/feed" 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | // NewRssTicker :) 14 | func NewRssTicker(n int, req *http.Request, client *http.Client, log *logrus.Entry, interval time.Duration) *Ticker { 15 | ch := make(chan []feed.Item, 10) 16 | go rssTicker(n, req, client, ch, log, interval) 17 | return &Ticker{c: ch} 18 | } 19 | 20 | func rssTicker(n int, req *http.Request, client *http.Client, ch chan []feed.Item, log *logrus.Entry, interval time.Duration) { 21 | log = log.WithField("@func", "rssTicker") 22 | 23 | times := byte(0) 24 | retry := 0 25 | for { 26 | if retry == 3 { 27 | log.Debug("reset ticker due to too many retry") 28 | retry = 0 29 | time.Sleep(interval) 30 | } 31 | retry++ 32 | 33 | resp, e := client.Do(req) 34 | if e != nil { 35 | log.Warn(e) 36 | continue 37 | } 38 | body, e := ioutil.ReadAll(resp.Body) 39 | resp.Body.Close() 40 | log.Trace("\n", *(*string)(unsafe.Pointer(&body))) 41 | 42 | items, e := feed.Parse(body) 43 | if e != nil { 44 | log.Warn("parse: ", e) 45 | continue 46 | } 47 | ch <- items 48 | 49 | if times++; int(times) == n { 50 | close(ch) 51 | return 52 | } 53 | time.Sleep(interval) 54 | retry = 0 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /ticker/types.go: -------------------------------------------------------------------------------- 1 | package ticker 2 | 3 | import "github.com/capric98/t-rss/feed" 4 | 5 | // Ticker struct 6 | type Ticker struct { 7 | c chan []feed.Item 8 | } 9 | 10 | // C :) 11 | func (t *Ticker) C() <-chan []feed.Item { 12 | return t.c 13 | } 14 | 15 | // Stop :) 16 | func (t *Ticker) Stop() { 17 | defer func() { _ = recover() }() 18 | close(t.c) 19 | } 20 | -------------------------------------------------------------------------------- /unit/duration.go: -------------------------------------------------------------------------------- 1 | package unit 2 | 3 | import ( 4 | "regexp" 5 | "strconv" 6 | "time" 7 | ) 8 | 9 | var ( 10 | secondReg = regexp.MustCompile(`[0-9]+s`) 11 | minuteReg = regexp.MustCompile(`[0-9]+m`) 12 | hourReg = regexp.MustCompile(`[0-9]+h`) 13 | dayReg = regexp.MustCompile(`[0-9]+d`) 14 | residue = regexp.MustCompile(`[0-9]+`) 15 | ) 16 | 17 | // ParseDuration parses a string to time.Duration. 18 | // If fails to parse a string, it will return 0. 19 | func ParseDuration(s string) time.Duration { 20 | second, _ := strconv.ParseInt(shave(secondReg.FindString(s), 1), 10, 64) 21 | minute, _ := strconv.ParseInt(shave(minuteReg.FindString(s), 1), 10, 64) 22 | hour, _ := strconv.ParseInt(shave(hourReg.FindString(s), 1), 10, 64) 23 | day, _ := strconv.ParseInt(shave(dayReg.FindString(s), 1), 10, 64) 24 | t := time.Duration(day)*24*time.Hour + 25 | time.Duration(hour)*time.Hour + 26 | time.Duration(minute)*time.Minute + 27 | time.Duration(second)*time.Second 28 | if t == 0 { 29 | second, _ = strconv.ParseInt(residue.FindString(s), 10, 64) 30 | t = time.Duration(second) * time.Second 31 | } 32 | return t 33 | } 34 | -------------------------------------------------------------------------------- /unit/duration_test.go: -------------------------------------------------------------------------------- 1 | package unit 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestParseDuration(t *testing.T) { 8 | // fmt.Println(ParseDuration("11s")) 9 | // fmt.Println(ParseDuration("1m")) 10 | // fmt.Println(ParseDuration("1h")) 11 | // fmt.Println(ParseDuration("1d")) 12 | // fmt.Println(ParseDuration("1d6h")) 13 | // fmt.Println(ParseDuration("1d6h3m")) 14 | // fmt.Println(ParseDuration("31d15h6m34s")) 15 | } 16 | -------------------------------------------------------------------------------- /unit/shave.go: -------------------------------------------------------------------------------- 1 | package unit 2 | 3 | func shave(s string, n int) string { 4 | l := len(s) 5 | if l <= n { 6 | return "" 7 | } 8 | return s[:l-n] 9 | } 10 | -------------------------------------------------------------------------------- /unit/size.go: -------------------------------------------------------------------------------- 1 | package unit 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | "strconv" 7 | ) 8 | 9 | var ( 10 | bReg = regexp.MustCompile(`[0-9]+B`) 11 | // KiBReg - don't use leading k in Go names; var kBReg should be bReg :( 12 | kiBReg = regexp.MustCompile(`[0-9]+[kK][i]{0,1}B`) 13 | miBReg = regexp.MustCompile(`[0-9]+[mM][i]{0,1}B`) 14 | giBReg = regexp.MustCompile(`[0-9]+[gG][i]{0,1}B`) 15 | tiBReg = regexp.MustCompile(`[0-9]+[tT][i]{0,1}B`) 16 | 17 | toD = regexp.MustCompile(`[0-9]+`) 18 | 19 | u = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "NF"} 20 | ) 21 | 22 | // ParseSize parses a string to int64 size. 23 | // If fails to parse a string, it will return 0. 24 | func ParseSize(s string) int64 { 25 | b, _ := strconv.ParseInt(toD.FindString(bReg.FindString(s)), 10, 64) 26 | kiB, _ := strconv.ParseInt(toD.FindString(kiBReg.FindString(s)), 10, 64) 27 | miB, _ := strconv.ParseInt(toD.FindString(miBReg.FindString(s)), 10, 64) 28 | giB, _ := strconv.ParseInt(toD.FindString(giBReg.FindString(s)), 10, 64) 29 | tiB, _ := strconv.ParseInt(toD.FindString(tiBReg.FindString(s)), 10, 64) 30 | size := tiB<<40 + giB<<30 + miB<<20 + kiB<<10 + b 31 | if size == 0 { 32 | b, _ = strconv.ParseInt(toD.FindString(s), 10, 64) 33 | size = b 34 | } 35 | return size 36 | } 37 | 38 | // FormatSize :) 39 | func FormatSize(n int64) string { 40 | f64n := float64(n) 41 | count := 0 42 | for f64n > 1024 { 43 | count++ 44 | f64n /= 1024.0 45 | } 46 | if count >= 6 { 47 | count = 6 48 | f64n = 0.1 49 | } 50 | return fmt.Sprintf("%.1f%s", f64n, u[count]) 51 | } 52 | -------------------------------------------------------------------------------- /unit/size_test.go: -------------------------------------------------------------------------------- 1 | package unit 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestParseSize(t *testing.T) { 9 | fmt.Println(ParseSize("11B")) 10 | fmt.Println(ParseSize("11KB")) 11 | fmt.Println(ParseSize("11kB")) 12 | fmt.Println(ParseSize("11KiB")) 13 | fmt.Println(ParseSize("10MB12KB")) 14 | fmt.Println(ParseSize("100MiB")) 15 | fmt.Println(ParseSize("10GB")) 16 | fmt.Println(ParseSize("10TB10GB10MB10KB10B")) 17 | } 18 | -------------------------------------------------------------------------------- /unit/time.go: -------------------------------------------------------------------------------- 1 | package unit 2 | 3 | import "time" 4 | 5 | var ( 6 | timeFormat = []string{time.ANSIC, time.UnixDate, time.RubyDate, 7 | time.RFC1123, time.RFC1123Z, time.RFC3339, time.RFC3339Nano, 8 | time.RFC822, time.RFC822Z, time.RFC850, time.Kitchen, 9 | time.Stamp, time.StampMicro, time.StampMilli, time.StampNano} 10 | ) 11 | 12 | // ParseTime parses a string to time.Time. 13 | // If fails to parse a string, it will return time.Now(). 14 | func ParseTime(s string) time.Time { 15 | for k := range timeFormat { 16 | t, e := time.Parse(timeFormat[k], s) 17 | if e == nil { 18 | return t 19 | } 20 | } 21 | return time.Now() 22 | } 23 | -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | package trss 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | ) 7 | 8 | var ( 9 | version = "v0.6.12" 10 | intro = fmt.Sprintf("t-rss %v %v/%v (%v build)\n", version, runtime.GOOS, runtime.GOARCH, runtime.Version()) 11 | ) 12 | 13 | func init() { 14 | fmt.Println(intro) 15 | } 16 | --------------------------------------------------------------------------------