├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml └── workflows │ ├── docker.yml │ └── go.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── Note.md ├── README.md ├── README_NEW.md ├── api └── router.go ├── app.json ├── assets ├── GeoLite2-City.mmdb ├── flags.json └── proxy.jpg ├── config ├── config.go ├── config.yaml ├── source.go └── source.yaml ├── docs ├── fast.png ├── genbindata.sh └── speedtest.png ├── go.mod ├── go.sum ├── internal ├── app │ ├── getter.go │ └── task.go ├── bindata │ ├── geoip │ │ └── geoip.go │ └── html │ │ └── html.go ├── cache │ ├── cache.go │ └── vars.go ├── cloudflare │ └── cache.go ├── cron │ └── cron.go └── database │ ├── db.go │ ├── db_test.go │ └── proxy.go ├── log ├── file.go ├── level.go └── log.go ├── main.go └── pkg ├── getter ├── base.go ├── clash.go ├── subscribe.go ├── tgchannel.go ├── web_fanqiangdang.go ├── web_free_ssr_xyz.go ├── web_fuzz.go └── web_fuzz_sub.go ├── healthcheck ├── delaycheck.go ├── speedcheck.go ├── speedserver.go ├── speeduser.go ├── statistic.go └── util.go ├── provider ├── base.go ├── clash.go ├── ssrsub.go ├── sssub.go ├── surge.go ├── trojansub.go └── vmesssub.go ├── proxy ├── base.go ├── convert.go ├── geoip.go ├── link_test.go ├── proxies.go ├── shadowsocks.go ├── shadowsocksr.go ├── trojan.go └── vmess.go └── tool ├── base64.go ├── cfdecode.go ├── check.go ├── colly.go ├── httpclient.go ├── option.go └── unicode.go /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] ['zu1k'] 4 | patreon: zu1k # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | 13 | - [ ] I'm using the newest release version, and bug isn't solved 14 | 我使用的是最新的release版本,Bug仍未解决 15 | - [ ] I've tested on the newest master, and bug isn't solved 16 | 我测试的是最新master分支,Bug仍未解决 17 | - [ ] I've searched the issues and there's not a similar one 18 | 我已经查看过其他issue,没有类似的情况 19 | 20 | Bug Version出现Bug的版本:v0.x.x 21 | Environment操作环境:heroku/ubuntu/mac 22 | 23 | ## Description 问题描述 24 | 25 | 26 | 27 | 28 | 29 | 30 | ## To Reproduce 复现步骤 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Propose a new feature to help us improve 4 | title: "" 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Description 描述 11 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | - package-ecosystem: "docker" 8 | directory: "/" 9 | schedule: 10 | interval: "daily" 11 | - package-ecosystem: "gomod" 12 | directory: "/" 13 | schedule: 14 | interval: "daily" 15 | open-pull-requests-limit: 10 -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: docker 2 | 3 | on: 4 | push: 5 | tags-ignore: 6 | - v*-*-* 7 | pull_request: 8 | 9 | jobs: 10 | build: 11 | name: Build 12 | if: startsWith(github.ref, 'refs/tags/') 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Check out code into the Go module directory 16 | uses: actions/checkout@v2.3.4 17 | 18 | - name: Build and push Docker images 19 | uses: docker/build-push-action@v2.2.1 20 | with: 21 | username: sansui233 22 | password: ${{ secrets.GITHUB_TOKEN }} 23 | registry: docker.pkg.github.com 24 | repository: sansui233/proxypool/proxypool 25 | tag_with_ref: true 26 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | on: 3 | push: 4 | tags-ignore: 5 | - v*-*-* 6 | pull_request: 7 | jobs: 8 | 9 | build: 10 | name: Build 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Setup Go 14 | uses: actions/setup-go@v2 15 | with: 16 | go-version: 1.14.x 17 | 18 | - name: Check out code into the Go module directory 19 | uses: actions/checkout@v2.3.4 20 | 21 | - name: Cache go module 22 | uses: actions/cache@v2 23 | with: 24 | path: ~/go/pkg/mod 25 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 26 | restore-keys: | 27 | ${{ runner.os }}-go- 28 | 29 | - name: Get dependencies and run test 30 | run: | 31 | go test ./... 32 | 33 | - name: gen go-bindata 34 | if: startsWith(github.ref, 'refs/tags/') 35 | run: | 36 | go get -u github.com/go-bindata/go-bindata/... 37 | go-bindata -o internal/bindata/geoip/geoip.go -pkg bingeoip assets/GeoLite2-City.mmdb assets/flags.json 38 | 39 | - name: Build 40 | if: startsWith(github.ref, 'refs/tags/') 41 | env: 42 | NAME: proxypool 43 | BINDIR: bin 44 | run: make -j releases 45 | 46 | - name: Upload Release 47 | uses: softprops/action-gh-release@v1 48 | if: startsWith(github.ref, 'refs/tags/') 49 | env: 50 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 51 | with: 52 | files: bin/* 53 | draft: true 54 | prerelease: true 55 | 56 | - uses: actions/upload-artifact@v2.2.1 57 | if: startsWith(github.ref, 'refs/tags/') 58 | with: 59 | name: build 60 | path: bin 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | bin/* 8 | tmp/ 9 | 10 | # Test binary, build with `go test -c` 11 | *.test 12 | # Manually add it if neccessary 13 | *_test.* 14 | 15 | # Output of the go coverage tool, specifically when used with LiteIDE 16 | *.out 17 | 18 | # dep 19 | vendor 20 | 21 | # GoLand 22 | .idea/* 23 | 24 | # macOS file 25 | .DS_Store 26 | 27 | # assets 28 | assets/html/* 29 | assets/css/* 30 | assets/static/* 31 | 32 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine as builder 2 | 3 | RUN apk add --no-cache make git 4 | WORKDIR /proxypool-src 5 | COPY . /proxypool-src 6 | RUN go mod download && \ 7 | make docker && \ 8 | mv ./bin/proxypool-docker /proxypool 9 | 10 | FROM alpine:latest 11 | 12 | RUN apk add --no-cache ca-certificates tzdata 13 | WORKDIR /proxypool-src 14 | COPY ./assets /proxypool-src/assets 15 | COPY --from=builder /proxypool /proxypool-src/ 16 | ENTRYPOINT ["/proxypool-src/proxypool", "-d"] 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | NAME=proxypool 2 | BINDIR=bin 3 | VERSION=$(shell git describe --tags || echo "unknown version") 4 | GOBUILD=CGO_ENABLED=0 go build -trimpath -ldflags '-w -s' 5 | 6 | PLATFORM_LIST = \ 7 | darwin-amd64 \ 8 | linux-386 \ 9 | linux-amd64 \ 10 | linux-armv5 \ 11 | linux-armv6 \ 12 | linux-armv7 \ 13 | linux-armv8 \ 14 | linux-mips-softfloat \ 15 | linux-mips-hardfloat \ 16 | linux-mipsle-softfloat \ 17 | linux-mipsle-hardfloat \ 18 | linux-mips64 \ 19 | linux-mips64le \ 20 | freebsd-386 \ 21 | freebsd-amd64 22 | 23 | 24 | all: linux-amd64 darwin-amd64 25 | 26 | docker: 27 | $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 28 | 29 | darwin-amd64: 30 | GOARCH=amd64 GOOS=darwin $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 31 | 32 | linux-386: 33 | GOARCH=386 GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 34 | 35 | linux-amd64: 36 | GOARCH=amd64 GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 37 | 38 | linux-armv5: 39 | GOARCH=arm GOOS=linux GOARM=5 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 40 | 41 | linux-armv6: 42 | GOARCH=arm GOOS=linux GOARM=6 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 43 | 44 | linux-armv7: 45 | GOARCH=arm GOOS=linux GOARM=7 $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 46 | 47 | linux-armv8: 48 | GOARCH=arm64 GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 49 | 50 | linux-mips-softfloat: 51 | GOARCH=mips GOMIPS=softfloat GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 52 | 53 | linux-mips-hardfloat: 54 | GOARCH=mips GOMIPS=hardfloat GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 55 | 56 | linux-mipsle-softfloat: 57 | GOARCH=mipsle GOMIPS=softfloat GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 58 | 59 | linux-mipsle-hardfloat: 60 | GOARCH=mipsle GOMIPS=hardfloat GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 61 | 62 | linux-mips64: 63 | GOARCH=mips64 GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 64 | 65 | linux-mips64le: 66 | GOARCH=mips64le GOOS=linux $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 67 | 68 | freebsd-386: 69 | GOARCH=386 GOOS=freebsd $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 70 | 71 | freebsd-amd64: 72 | GOARCH=amd64 GOOS=freebsd $(GOBUILD) -o $(BINDIR)/$(NAME)-$@ 73 | 74 | gz_releases=$(addsuffix .gz, $(PLATFORM_LIST)) 75 | 76 | $(gz_releases): %.gz : % 77 | chmod +x $(BINDIR)/$(NAME)-$(basename $@) 78 | gzip -f -S -$(VERSION).gz $(BINDIR)/$(NAME)-$(basename $@) 79 | 80 | all-arch: $(PLATFORM_LIST) 81 | 82 | releases: $(gz_releases) 83 | clean: 84 | rm $(BINDIR)/* 85 | -------------------------------------------------------------------------------- /Note.md: -------------------------------------------------------------------------------- 1 | # Note 2 | 3 | 由于个人并未学过go,看代码需要做下笔记。仅为个人参考。 4 | 5 | ## 处理订阅源:getter类 6 | 有关订阅源的package位于pkg/getter。 7 | 8 | 订阅源的类型为接口Getter,实现Getter至少需要实现Get()和Get2chan()。 9 | - Get() 返回一个ProxyList 10 | - Get2chan() Send proxy to Channel用于并发抓取 11 | 12 | 已实现的Getter(以sourceType命名) 13 | - subscribe(该实现比接口Getter多了个url) 14 | - tgchannel 15 | - web_fanqiangdang 16 | - web_fuzz 17 | - web_fuzz_sub 18 | 19 | 接口Getter与err状态组成一个creator,方便错误处理。 20 | 为了方便外部程序辨认creator类型,在init()中初始化一个map,key为sourceType字符串,value为creator。 21 | 22 | 程序运行时,package app由配置文件读取到source.yaml,由sourceType map到对应的creator类型,同时使用sourceOption(通常是url)初始化一个creator。 23 | 24 | 所有Getter最后存于package app的Getters中。 25 | 26 | ## proxy类 27 | 节点的接口为Proxy,由struct Base实现其基类,Vmess等实现多态。 28 | 29 | 所有字段名依据clash的配置文件标准设计。比如 30 | ``` 31 | type ShadowsocksR struct { 32 | Base // 节点基本信息 33 | Password string `yaml:"password" json:"password"` 34 | Cipher string `yaml:"cipher" json:"cipher"` 35 | Protocol string `yaml:"protocol" json:"protocol"` 36 | ProtocolParam string `yaml:"protocol-param,omitempty" json:"protocol_param,omitempty"` 37 | Obfs string `yaml:"obfs" json:"obfs"` 38 | ObfsParam string `yaml:"obfs-param,omitempty" json:"obfs_param,omitempty"` 39 | Group string `yaml:"group,omitempty" json:"group,omitempty"` 40 | } 41 | ``` 42 | 43 | Proxylist是proxy数组加上一系列批量处理proxy的方法。 44 | 45 | 不知道是否是有意为之,基类的Base的方法的传入参数全部用的指针,因为Base变成了Proxy的指针实现。因此Vmess等对于接口Proxy而言也是Proxy的指针,type assertion应该写为 `proxy.(*Vmess)`。 46 | 47 | ## 抓取 48 | task.go的Crawl.go实现抓取。 49 | 50 | 1. 并发抓取订阅源,加载历史节点 51 | 2. 节点去重,去除Clash不支持的类型,重命名 52 | 3. 存储所有节点(包括不可用节点)到database和cache 53 | 4. 检测IP可用性 54 | 尽管已经对IP的有效性测试过,但并不保证节点在客户端上可用,特别是封IP和端口的时期,不可用的比例接近100%。可以使用proxypoolCheck进行本地检测。 55 | 5. 存储可用的节点到cache 56 | 57 | ## 存储 58 | 所有节点存储到cache中。 59 | 60 | cache中的key设计有: 61 | - allproxies: 所有节点(包括不可用节点) 62 | - proxies: 可用节点 63 | - clashproxies: clash支持的节点。第一次运行时是把proxies复制过来的。 64 | 65 | 问题是对于失效的节点也存储,运行时间久了无用的cache会非常多。可以考虑删除对失效节点的存放。 66 | 67 | ### 使用数据库 68 | 远程运行时添加add即可,heroku自己会添加DATABASE_URL环境变量到provision,无需其他配置。 69 | 70 | 本地运行时安装postgresql,建立相应user和database。 71 | 72 | ``` 73 | dsn := "user=proxypool password=proxypool dbname=proxypool port=5432 sslmode=disable TimeZone=Asia/Shanghai" 74 | ``` 75 | 76 | 程序运行时建立会proxies表。每次运行时读出节点,爬虫完成后再存储可用的节点进去。 77 | 78 | 更新时会Update所有数据库上次可用节点的usable为false(此时useable全是false),然后存储新节点,已有的条目则Update usable。最后再自动清除7天未更新且不可用的节点。 79 | 80 | 重点在于,失效的条目不能更新。 81 | 82 | ## Health Check 83 | 84 | 分为延迟测试与测速(带宽测试)。 85 | 86 | 延迟测试用于筛选掉无效的节点。 87 | 88 | 已经重写测速。单线程测速,失败请求3次。需要在配置文件中依据带宽调整connection与timeout。不同的网络环境下测速结果可能有着巨大的差别。 89 | 90 | 91 | ## Web界面 92 | 93 | 为了方便打包,原作者将静态的assets文件模板由zip压缩后存为字符串的形式,如 94 | 95 | ``` 96 | var _assetsHtmlSurgeHtml="[]byte("\x1f\x8b\x...")" 97 | ``` 98 | 99 | 以上字节解压后是一个go的HTML模板。解压时,由gzip的reader写入byte.Buffer,再转换为Bytes写入相应文件。 100 | 101 | 静态文件打包工具见:[这里](https://github.com/go-bindata/go-bindata) 或 [这里](https://github.com/shuLhan/go-bindata) 。请在修改后html文件后执行docs里的shell脚本 102 | 103 | 根据原作者要求,请勿修改原作者版权信息。 104 | 105 | Web数据是Get一次更新一次。但是貌似在cache改变前不会重复读取cache中的内容(还是说是时间间隔没到?没有看gin-cache的源码,有待验证)。 106 | 107 | 关于页面端口,有时候会遇到web端口时proxypool服务端口不一致的情况(如heroku和内网穿透)。web页面上的端口显示和config文件保持一致,实际的服务端口由配置文件或环境变量决定,环境变量优先级更高。 108 | 109 | 一般情况下,部署到自己的机器上时,需要确保没有PORT环境变量(除非你明白其中的原理且知道自己在做什么) 110 | 111 | ## 本地测试 112 | 113 | 需要注意: 114 | - 修改config的domain 115 | - 修改source,注释掉较慢的源 116 | 117 | 增加了对config-local文件的解析。url为/clash/localconfig 118 | 119 | ## Github Action Release和源码自行make版本的区别 120 | 121 | Release版本在make之前还打包了所有的静态文件,其中包括了一个60M的GeoIP数据库。 122 | 缺点是打包体积相对较大,因为geoip.go经过gobindata打包后的文件是120。且第一次部署运行时会占用较多内存。 123 | 优点是不需要额外自行下载数据库。打包的体积也比自己下载数据库的体积小太多。 124 | 125 | 自行源码make版本在不修改bindata/GeoIP的情况下,不包含数据库,打包程序较小。缺点是要自已下载数据库(源码都有了这不算是问题) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 |
proxypool
3 |

4 | 5 |
自动抓取tg频道、订阅地址、公开互联网上的ss、ssr、vmess、trojan节点信息,聚合去重测试可用性后提供节点列表
6 | 7 |

8 | 9 | Github Actions 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 |

18 | 19 | ## 支持 20 | 21 | - 支持ss、ssr、vmess、trojan多种类型 22 | - Telegram频道抓取 23 | - 订阅地址抓取解析 24 | - 公开互联网页面模糊抓取 25 | - 定时抓取自动更新 26 | - 通过配置文件设置抓取源 27 | - 自动检测节点可用性 28 | - 提供clash、surge配置文件 29 | - 提供ss、ssr、vmess、sip002订阅 30 | 31 | ## 安装 32 | 33 | 以下四选一。 34 | 35 | ### 使用Heroku 36 | 37 | 点击按钮进入部署页面,填写基本信息然后运行 38 | 39 | 其中 `DOMAIN` 需要填写为你需要绑定的域名,`CONFIG_FILE` 需要填写你的配置文件路径。 40 | 41 | > heroku app域名为appname.herokuapp.com。项目内配置文件为./config/config.yaml 42 | 43 | 配置文件模板见 config/config.yaml 文件,可选项区域均可不填。完整配置选项请查看[配置文件说明](https://github.com/Sansui233/proxypool/wiki/%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6%E8%AF%B4%E6%98%8E)。 44 | 45 | [![Deploy](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy) 46 | 47 | > 因为爬虫程序需要持续运行,所以至少选择 $7/月 的配置 48 | > 免费配置长时间无人访问会被heroku强制停止 49 | 50 | ### 从源码编译 51 | 52 | 需要安装Golang 53 | 54 | ```sh 55 | $ go get -u -v github.com/Sansui233/proxypool 56 | ``` 57 | 58 | 运行 59 | ```shell script 60 | $ go run main.go -c ./config/config.yaml 61 | ``` 62 | 63 | 编译 64 | ``` 65 | make 66 | ``` 67 | 68 | ### 下载预编译程序 69 | 70 | 从这里下载预编译好的程序 [release](https://github.com/Sansui233/proxypool/releases)。 71 | 72 | ### 使用docker 73 | 74 | ```sh 75 | docker pull docker.pkg.github.com/Sansui233/proxypool/proxypool:latest 76 | ``` 77 | 78 | ## 使用 79 | 80 | 运行该程序需要具有访问完整互联网的能力。 81 | 82 | ### 修改配置文件 83 | 84 | 首先修改 config.yaml 中的必要配置信息。带有默认值的字段均可不填写。完整的配置选项见[配置文件说明](https://github.com/Sansui233/proxypool/wiki/%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6%E8%AF%B4%E6%98%8E) 85 | 86 | ### 启动程序 87 | 88 | 使用 `-c` 参数指定配置文件路径,支持http链接 89 | 90 | ```shell 91 | proxypool -c ./config/config.yaml 92 | ``` 93 | 94 | 如果需要部署到VPS,更多细节请[查看wiki](https://github.com/Sansui233/proxypool/wiki/%E9%83%A8%E7%BD%B2%E5%88%B0VPS-Step-by-Step)。 95 | 96 | ## Clash配置文件 97 | 98 | 远程部署时Clash配置文件访问:https://domain/clash/config 99 | 100 | 本地运行时Clash配置文件访问:http://127.0.0.1:[端口]/clash/localconfig 101 | 102 | ## 本地检查节点可用性 103 | 104 | 此项非必须。为了提高实际可用性,可选择增加一个本地服务器,检测远程proxypool节点在本地的可用性并提供配置,见[proxypoolCheck](https://github.com/Sansui233/proxypoolCheck)。 105 | 106 | ## 截图 107 | 108 | ![Speedtest](docs/speedtest.png) 109 | 110 | ![Fast](docs/fast.png) 111 | 112 | ## 声明 113 | 114 | 本项目遵循 GNU General Public License v3.0 开源,在此基础上,所有使用本项目提供服务者都必须在网站首页保留指向本项目的链接 115 | -------------------------------------------------------------------------------- /README_NEW.md: -------------------------------------------------------------------------------- 1 | Clash客户端支持: 2 | - Clash for Windows(需要Clash Core1.3以上) 3 | - ClashX(需要Clash Core1.3以上) 4 | - 不支持ClashXR与ClashR等非原生Clash Core客户端。 5 | 6 | ## New 7 | 8 | 2020-12-09 9 | - 前端页面改版 10 | 页面还需优化,以及增加api支持前后端分离。gin的路由和重启与高消耗的程序绑定的设计不好。即便前者可以使用分组渲染解决,实测时间一久就不稳定,尤其是在静态文件的托管上。也可能和cache有关。 11 | 12 | 2020-12-06 13 | - 紧急修复一个非常影响使用的bug(v0.5.3) 14 | 15 | 2020-12-02 16 | - source增加Clash配置格式 17 | - 增加trojansub 18 | 19 | 2020-12-01 20 | - 修复节点阻塞bug,取消对vmess h2的检测 21 | 22 | 2020-11-28 23 | - 日志分级,输出到文件 24 | 25 | 2020-11-26 26 | - 修复了一个导致vmess h2解析错误的bug= = 27 | - 显著增强对cf的js重写的破解。目前看来还需要大量的实例完善此功能 28 | - 优化命名流程,结构体对齐调整,优化内存占用(时间与算力换空间)。 29 | 30 | 2020-11-25 31 | - 增加tg channel文件的抓取 32 | 33 | 2020-11-24 34 | - 修改一个getter,移除cf cdn的email protection 35 | - 破解cf的js重写url保护 36 | 37 | 2020-11-21 38 | - 重构healthcheck 39 | - 分离节点抓取与测速,增加自定义测速间隔,减缓节点过多时的服务器流量压力 40 | - 增加活跃节点增加速度检测频率策略 41 | - Config文件增加很多带默认值的自定义字段 42 | 43 | 2020-11-18 44 | - 修改前端逻辑。config文件中的port不再影响前端。前端订阅强制使用https+443端口。 45 | 46 | 2020-11-17 47 | - 修复编译版本首次运行geoIP未初始化的的bug 48 | 49 | 2020-11-14 50 | - 测速重写为单线程测速,增加自定义参数,测速有效性更加严格,带宽足够的情况下测速过程更快,结果可用性更高。 51 | - 速度筛选支持区间 52 | 53 | 2020-11-12 54 | - 增加自定义端口 55 | - 增加速度筛选 56 | - 改进启动流程,启动时载入数据库数据,解决heroku冷启动问题 57 | 58 | 2020-11-11 59 | - 增加并发测速。 60 | 61 | 2020-11-06 62 | - 修改vmess struct至与Clash相同。 63 | 64 | 2020-11-05 65 | - vmess增加http与https解析的支持(仅理论支持,没有测试客户端对于缺少参数时的支持情况) 66 | 67 | 2020-10-30 68 | - 减少启动时的内存占用(使用release版本第一次运行时除外) 69 | 70 | 2020-10-26 71 | - 单独分离出healthcheck模块 72 | - 分离出用于本地检测proxypool可用性的部分,见[proxypoolCheck](https://github.com/Sansui233/proxypoolCheck)项目 73 | 74 | 2020-10-24 75 | - Vmess动态格式解析,对链接的字段类型进行强制转换(可以爬到更多节点) 76 | 77 | 2020-10-23 78 | - 修复数据库未连接时的err提示 79 | - 忽略vmess的Unmarshal时的ps类型错误 80 | 81 | 2020-10-21 82 | - 数据库更新改为保留数据库已有节点与当次有效节点,且清扫失效时间大于7天的节点 83 | - Manually sync to original source v0.3.10 84 | 85 | 2020-10-10 86 | - 修复:对空provider添加NULL节点,防止Clash报错 87 | - 数据库更新不再存储所有的节点,只保留当次有效节点 88 | 89 | 2020-10-09 90 | - 增加本地http运行用的配置文件 91 | 92 | > clash的本地配置文件位于127.0.0.1:8080/clash/localconfig -------------------------------------------------------------------------------- /api/router.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | binhtml "github.com/Sansui233/proxypool/internal/bindata/html" 5 | "github.com/Sansui233/proxypool/log" 6 | "html/template" 7 | "net/http" 8 | "os" 9 | "strconv" 10 | "strings" 11 | "time" 12 | 13 | "github.com/Sansui233/proxypool/config" 14 | appcache "github.com/Sansui233/proxypool/internal/cache" 15 | "github.com/Sansui233/proxypool/pkg/provider" 16 | "github.com/gin-contrib/cache" 17 | "github.com/gin-contrib/cache/persistence" 18 | "github.com/gin-gonic/gin" 19 | _ "github.com/heroku/x/hmetrics/onload" 20 | ) 21 | 22 | const version = "v0.5.3" 23 | 24 | var router *gin.Engine 25 | 26 | func setupRouter() { 27 | gin.SetMode(gin.ReleaseMode) 28 | router = gin.New() // 没有任何中间件的路由 29 | store := persistence.NewInMemoryStore(time.Minute) 30 | router.Use(gin.Recovery(), cache.SiteCache(store, time.Minute)) // 加上处理panic的中间件,防止遇到panic退出程序 31 | 32 | _ = binhtml.RestoreAssets("", "assets/html") // 恢复静态文件(不恢复问题也不大就是难修改) 33 | _ = binhtml.RestoreAssets("", "assets/static") 34 | 35 | temp, err := loadHTMLTemplate() // 加载html模板,模板源存放于html.go中的类似_assetsHtmlSurgeHtml的变量 36 | if err != nil { 37 | panic(err) 38 | } 39 | router.SetHTMLTemplate(temp) // 应用模板 40 | 41 | router.StaticFile("/static/index.js", "assets/static/index.js") 42 | 43 | router.GET("/", func(c *gin.Context) { 44 | c.HTML(http.StatusOK, "assets/html/index.html", gin.H{ 45 | "domain": config.Config.Domain, 46 | "getters_count": appcache.GettersCount, 47 | "all_proxies_count": appcache.AllProxiesCount, 48 | "ss_proxies_count": appcache.SSProxiesCount, 49 | "ssr_proxies_count": appcache.SSRProxiesCount, 50 | "vmess_proxies_count": appcache.VmessProxiesCount, 51 | "trojan_proxies_count": appcache.TrojanProxiesCount, 52 | "useful_proxies_count": appcache.UsefullProxiesCount, 53 | "last_crawl_time": appcache.LastCrawlTime, 54 | "is_speed_test": appcache.IsSpeedTest, 55 | "version": version, 56 | }) 57 | }) 58 | 59 | router.GET("/clash", func(c *gin.Context) { 60 | c.HTML(http.StatusOK, "assets/html/clash.html", gin.H{ 61 | "domain": config.Config.Domain, 62 | "port": config.Config.Port, 63 | }) 64 | }) 65 | 66 | router.GET("/surge", func(c *gin.Context) { 67 | c.HTML(http.StatusOK, "assets/html/surge.html", gin.H{ 68 | "domain": config.Config.Domain, 69 | }) 70 | }) 71 | 72 | router.GET("/shadowrocket", func(c *gin.Context) { 73 | c.HTML(http.StatusOK, "assets/html/shadowrocket.html", gin.H{ 74 | "domain": config.Config.Domain, 75 | }) 76 | }) 77 | 78 | router.GET("/clash/config", func(c *gin.Context) { 79 | c.HTML(http.StatusOK, "assets/html/clash-config.yaml", gin.H{ 80 | "domain": config.Config.Domain, 81 | }) 82 | }) 83 | router.GET("/clash/localconfig", func(c *gin.Context) { 84 | c.HTML(http.StatusOK, "assets/html/clash-config-local.yaml", gin.H{ 85 | "port": config.Config.Port, 86 | }) 87 | }) 88 | 89 | router.GET("/surge/config", func(c *gin.Context) { 90 | c.HTML(http.StatusOK, "assets/html/surge.conf", gin.H{ 91 | "domain": config.Config.Domain, 92 | }) 93 | }) 94 | 95 | router.GET("/clash/proxies", func(c *gin.Context) { 96 | proxyTypes := c.DefaultQuery("type", "") 97 | proxyCountry := c.DefaultQuery("c", "") 98 | proxyNotCountry := c.DefaultQuery("nc", "") 99 | proxySpeed := c.DefaultQuery("speed", "") 100 | text := "" 101 | if proxyTypes == "" && proxyCountry == "" && proxyNotCountry == "" && proxySpeed == "" { 102 | text = appcache.GetString("clashproxies") // A string. To show speed in this if condition, this must be updated after speedtest 103 | if text == "" { 104 | proxies := appcache.GetProxies("proxies") 105 | clash := provider.Clash{ 106 | Base: provider.Base{ 107 | Proxies: &proxies, 108 | }, 109 | } 110 | text = clash.Provide() // 根据Query筛选节点 111 | appcache.SetString("clashproxies", text) 112 | } 113 | } else if proxyTypes == "all" { 114 | proxies := appcache.GetProxies("allproxies") 115 | clash := provider.Clash{ 116 | provider.Base{ 117 | Proxies: &proxies, 118 | Types: proxyTypes, 119 | Country: proxyCountry, 120 | NotCountry: proxyNotCountry, 121 | Speed: proxySpeed, 122 | }, 123 | } 124 | text = clash.Provide() // 根据Query筛选节点 125 | } else { 126 | proxies := appcache.GetProxies("proxies") 127 | clash := provider.Clash{ 128 | provider.Base{ 129 | Proxies: &proxies, 130 | Types: proxyTypes, 131 | Country: proxyCountry, 132 | NotCountry: proxyNotCountry, 133 | Speed: proxySpeed, 134 | }, 135 | } 136 | text = clash.Provide() // 根据Query筛选节点 137 | } 138 | c.String(200, text) 139 | }) 140 | router.GET("/surge/proxies", func(c *gin.Context) { 141 | proxyTypes := c.DefaultQuery("type", "") 142 | proxyCountry := c.DefaultQuery("c", "") 143 | proxyNotCountry := c.DefaultQuery("nc", "") 144 | proxySpeed := c.DefaultQuery("speed", "") 145 | text := "" 146 | if proxyTypes == "" && proxyCountry == "" && proxyNotCountry == "" && proxySpeed == "" { 147 | text = appcache.GetString("surgeproxies") // A string. To show speed in this if condition, this must be updated after speedtest 148 | if text == "" { 149 | proxies := appcache.GetProxies("proxies") 150 | surge := provider.Surge{ 151 | Base: provider.Base{ 152 | Proxies: &proxies, 153 | }, 154 | } 155 | text = surge.Provide() 156 | appcache.SetString("surgeproxies", text) 157 | } 158 | } else if proxyTypes == "all" { 159 | proxies := appcache.GetProxies("allproxies") 160 | surge := provider.Surge{ 161 | Base: provider.Base{ 162 | Proxies: &proxies, 163 | Types: proxyTypes, 164 | Country: proxyCountry, 165 | NotCountry: proxyNotCountry, 166 | Speed: proxySpeed, 167 | }, 168 | } 169 | text = surge.Provide() 170 | } else { 171 | proxies := appcache.GetProxies("proxies") 172 | surge := provider.Surge{ 173 | Base: provider.Base{ 174 | Proxies: &proxies, 175 | Types: proxyTypes, 176 | Country: proxyCountry, 177 | NotCountry: proxyNotCountry, 178 | }, 179 | } 180 | text = surge.Provide() 181 | } 182 | c.String(200, text) 183 | }) 184 | 185 | router.GET("/ss/sub", func(c *gin.Context) { 186 | proxies := appcache.GetProxies("proxies") 187 | ssSub := provider.SSSub{ 188 | Base: provider.Base{ 189 | Proxies: &proxies, 190 | Types: "ss", 191 | }, 192 | } 193 | c.String(200, ssSub.Provide()) 194 | }) 195 | router.GET("/ssr/sub", func(c *gin.Context) { 196 | proxies := appcache.GetProxies("proxies") 197 | ssrSub := provider.SSRSub{ 198 | Base: provider.Base{ 199 | Proxies: &proxies, 200 | Types: "ssr", 201 | }, 202 | } 203 | c.String(200, ssrSub.Provide()) 204 | }) 205 | router.GET("/vmess/sub", func(c *gin.Context) { 206 | proxies := appcache.GetProxies("proxies") 207 | vmessSub := provider.VmessSub{ 208 | Base: provider.Base{ 209 | Proxies: &proxies, 210 | Types: "vmess", 211 | }, 212 | } 213 | c.String(200, vmessSub.Provide()) 214 | }) 215 | router.GET("/sip002/sub", func(c *gin.Context) { 216 | proxies := appcache.GetProxies("proxies") 217 | sip002Sub := provider.SIP002Sub{ 218 | Base: provider.Base{ 219 | Proxies: &proxies, 220 | Types: "ss", 221 | }, 222 | } 223 | c.String(200, sip002Sub.Provide()) 224 | }) 225 | router.GET("/trojan/sub", func(c *gin.Context) { 226 | proxies := appcache.GetProxies("proxies") 227 | trojanSub := provider.TrojanSub{ 228 | Base: provider.Base{ 229 | Proxies: &proxies, 230 | Types: "trojan", 231 | }, 232 | } 233 | c.String(200, trojanSub.Provide()) 234 | }) 235 | router.GET("/link/:id", func(c *gin.Context) { 236 | idx := c.Param("id") 237 | proxies := appcache.GetProxies("allproxies") 238 | id, err := strconv.Atoi(idx) 239 | if err != nil { 240 | c.String(500, err.Error()) 241 | } 242 | if id >= proxies.Len() || id < 0 { 243 | c.String(500, "id out of range") 244 | } 245 | c.String(200, proxies[id].Link()) 246 | }) 247 | } 248 | 249 | func Run() { 250 | setupRouter() 251 | servePort := config.Config.Port 252 | envp := os.Getenv("PORT") // environment port for heroku app 253 | if envp != "" { 254 | servePort = envp 255 | } 256 | // Run on this server 257 | err := router.Run(":" + servePort) 258 | if err != nil { 259 | log.Errorln("router: Web server starting failed. Make sure your port %s has not been used. \n%s", servePort, err.Error()) 260 | } else { 261 | log.Infoln("Proxypool is serving on port: %s", servePort) 262 | } 263 | } 264 | 265 | // 返回页面templates 266 | func loadHTMLTemplate() (t *template.Template, err error) { 267 | t = template.New("") 268 | for _, fileName := range binhtml.AssetNames() { //fileName带有路径前缀 269 | if strings.Contains(fileName, "css") { 270 | continue 271 | } 272 | data := binhtml.MustAsset(fileName) //读取页面数据 273 | t, err = t.New(fileName).Parse(string(data)) //生成带路径名称的模板 274 | if err != nil { 275 | return nil, err 276 | } 277 | } 278 | return t, nil 279 | } 280 | -------------------------------------------------------------------------------- /app.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "proxypool", 3 | "description": "自动抓取tg频道、订阅地址、公开互联网上的ss、ssr、vmess节点信息,聚合去重测试可用性后提供节点列表", 4 | "website": "https://proxypoolss.herokuapp.com/", 5 | "repository": "https://github.com/Sansui233/proxypool", 6 | "success_url": "/", 7 | "logo": "https://raw.githubusercontent.com/Sansui233/proxypool/heroku/assets/proxy.jpg", 8 | "keywords": ["golang", "ss", "ssr", "vmess", "shadowsocks", "shadowsocksr", "trojan"], 9 | "env": { 10 | "CONFIG_FILE": { 11 | "description": "Path to config file, could be a url." 12 | }, 13 | "DOMAIN": { 14 | "description": "Domain to use." 15 | }, 16 | "CF_API_EMAIL": { 17 | "description": "Cloudflare Email.", 18 | "required": false 19 | }, 20 | "CF_API_KEY": { 21 | "description": "Cloudflare API key.", 22 | "required": false 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /assets/GeoLite2-City.mmdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alvin9999/proxypool/b0f9fa721810e6ff1bcd0fdb0ab37e79f44c3f6a/assets/GeoLite2-City.mmdb -------------------------------------------------------------------------------- /assets/proxy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alvin9999/proxypool/b0f9fa721810e6ff1bcd0fdb0ab37e79f44c3f6a/assets/proxy.jpg -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | "io/ioutil" 6 | "os" 7 | "strings" 8 | 9 | "github.com/Sansui233/proxypool/pkg/tool" 10 | "github.com/ghodss/yaml" 11 | ) 12 | 13 | var configFilePath = "config.yaml" 14 | 15 | type ConfigOptions struct { 16 | Domain string `json:"domain" yaml:"domain"` 17 | Port string `json:"port" yaml:"port"` 18 | DatabaseUrl string `json:"database_url" yaml:"database_url"` 19 | CrawlInterval uint64 `json:"crawl-interval" yaml:"crawl-interval"` 20 | CFEmail string `json:"cf_email" yaml:"cf_email"` 21 | CFKey string `json:"cf_key" yaml:"cf_key"` 22 | SourceFiles []string `json:"source-files" yaml:"source-files"` 23 | SpeedTest bool `json:"speedtest" yaml:"speedtest"` 24 | SpeedTestInterval uint64 `json:"speedtest-interval" yaml:"speedtest-interval"` 25 | Connection int `json:"connection" yaml:"connection"` 26 | Timeout int `json:"timeout" yaml:"timeout"` 27 | ActiveFrequency uint16 `json:"active-frequency" yaml:"active-frequency" ` 28 | ActiveInterval uint64 `json:"active-interval" yaml:"active-interval"` 29 | ActiveMaxNumber uint16 `json:"active-max-number" yaml:"active-max-number"` 30 | } 31 | 32 | // Config 配置 33 | var Config ConfigOptions 34 | 35 | // Parse 解析配置文件,支持本地文件系统和网络链接 36 | func Parse(path string) error { 37 | if path == "" { 38 | path = configFilePath 39 | } else { 40 | configFilePath = path 41 | } 42 | fileData, err := ReadFile(path) 43 | if err != nil { 44 | return err 45 | } 46 | Config = ConfigOptions{} 47 | err = yaml.Unmarshal(fileData, &Config) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | // set default 53 | if Config.Connection <= 0 { 54 | Config.Connection = 5 55 | } 56 | if Config.Port == "" { 57 | Config.Port = "12580" 58 | } 59 | if Config.CrawlInterval == 0 { 60 | Config.CrawlInterval = 60 61 | } 62 | if Config.SpeedTestInterval == 0 { 63 | Config.SpeedTestInterval = 720 64 | } 65 | if Config.ActiveInterval == 0 { 66 | Config.ActiveInterval = 60 67 | } 68 | if Config.ActiveFrequency == 0 { 69 | Config.ActiveFrequency = 100 70 | } 71 | if Config.ActiveMaxNumber == 0 { 72 | Config.ActiveMaxNumber = 100 73 | } 74 | 75 | // 部分配置环境变量优先 76 | if domain := os.Getenv("DOMAIN"); domain != "" { 77 | Config.Domain = domain 78 | } 79 | if cfEmail := os.Getenv("CF_API_EMAIL"); cfEmail != "" { 80 | Config.CFEmail = cfEmail 81 | } 82 | if cfKey := os.Getenv("CF_API_KEY"); cfKey != "" { 83 | Config.CFKey = cfKey 84 | } 85 | 86 | return nil 87 | } 88 | 89 | // 从本地文件或者http链接读取配置文件内容 90 | func ReadFile(path string) ([]byte, error) { 91 | if strings.HasPrefix(path, "http://") || strings.HasPrefix(path, "https://") { 92 | resp, err := tool.GetHttpClient().Get(path) 93 | if err != nil { 94 | return nil, errors.New("config file http get fail") 95 | } 96 | defer resp.Body.Close() 97 | return ioutil.ReadAll(resp.Body) 98 | } else { 99 | if _, err := os.Stat(path); os.IsNotExist(err) { 100 | return nil, err 101 | } 102 | return ioutil.ReadFile(path) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /config/config.yaml: -------------------------------------------------------------------------------- 1 | # your domain 2 | domain: example.com 3 | port: # default 12580 4 | 5 | # source list file 6 | source-files: 7 | # use local file 8 | - ./config/source.yaml 9 | # use web file 10 | # - https://example.com/config/source.yaml 11 | 12 | # ======= 可选项,留空使用default值 ======= 13 | 14 | # postgresql database info 15 | database_url: "" 16 | 17 | # interval between each crawling 18 | crawl-interval: # v0.5.x default 60 (minutes) 19 | crontime: # v0.4.x default 60 (minutes). Deprecated in the newest version 20 | 21 | # speed test 22 | speedtest: false # default false. Warning: this will consume large network resources. 23 | speedtest-interval: # default 720 (min) 24 | connection: # default 5. The number of speed test connections simultaneously 25 | timeout: # default 10 (seconds). 26 | ## active proxy speed test 27 | active-interval: # default 60 (min) 28 | active-frequency: # default 100 (requests per interval) 29 | active-max-number: # default 100. If more than this number of active proxies, the extra will be deprecated by speed 30 | 31 | # cloudflare api 32 | cf_email: "" 33 | cf_key: "" 34 | -------------------------------------------------------------------------------- /config/source.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "github.com/Sansui233/proxypool/pkg/tool" 4 | 5 | type Source struct { 6 | Type string `json:"type" yaml:"type"` 7 | Options tool.Options `json:"options" yaml:"options"` 8 | } 9 | -------------------------------------------------------------------------------- /config/source.yaml: -------------------------------------------------------------------------------- 1 | # 模糊抓取订阅链接 2 | - type: webfuzzsub 3 | options: 4 | url: https://raw.githubusercontent.com/du5/free/master/sub.list 5 | #- type: webfuzzsub 6 | # options: 7 | # url: https://uploader.shimo.im/f/KO9YB4sh4a9OSwXw.txt?attname=1.txt 8 | - type: webfuzzsub 9 | options: 10 | url: https://t.me/s/ssrv2taytgshare?before=743 11 | #- type: webfuzzsub 12 | # options: 13 | # url: http://d.zaix.ru/ntiL.txt 14 | - type: webfuzzsub 15 | options: 16 | url: https://github.com/umelabs/node.umelabs.dev 17 | - type: webfuzzsub 18 | options: 19 | url: https://github.com/freefq/free 20 | - type: webfuzzsub 21 | options: 22 | url: https://github.com/oouxx/fqsub/blob/7f35b272888341f207acc10c92f1eb0be431e0e3/sub.list 23 | - type: webfuzzsub 24 | options: 25 | url: https://github.com/woytu/notes-vuepress/blob/757206d5c10d112d56a3c789d5a622d4bcfe6375/System/%E5%86%85%E7%BD%91%E7%A9%BF%E9%80%8F.md 26 | - type: webfuzzsub 27 | options: 28 | url: https://raw.githubusercontent.com/oouxx/fqsub/master/sub.list 29 | - type: webfuzzsub 30 | options: 31 | url: https://github.com/doudoubinga/ceshi/blob/master/trash/base64-sub 32 | - type: webfuzzsub 33 | options: 34 | url: https://github.com/JACKUSR2089/v2ray-subscribed 35 | - type: webfuzzsub 36 | options: 37 | url: https://github.com/pojiezhiyuanjun/freev2 38 | - type: webfuzzsub 39 | options: 40 | url: https://lisondawang.tk/jiedianfenxiang.html 41 | 42 | # 订阅链接 43 | - type: subscribe 44 | options: 45 | url: https://raw.githubusercontent.com/ssrsub/ssr/master/v2ray 46 | - type: subscribe 47 | options: 48 | url: https://bihai-v2ray.netlify.com 49 | - type: subscribe 50 | options: 51 | url: https://raw.githubusercontent.com/umelabs/node.umelabs.dev/master/Subscribe/SS.md 52 | - type: subscribe 53 | options: 54 | url: https://raw.githubusercontent.com/umelabs/node.umelabs.dev/master/Subscribe/SSR.md 55 | - type: subscribe 56 | options: 57 | url: https://raw.githubusercontent.com/umelabs/node.umelabs.dev/master/Subscribe/v2ray.md 58 | - type: subscribe 59 | options: 60 | url: https://rss.cnrss.xyz/link/RRbIrEn90XWdWQl5?mu=2 61 | - type: subscribe 62 | options: 63 | url: https://s.sublank.xyz/subscribe/42207/AFTzQlTeksG/ssr/ 64 | - type: subscribe 65 | options: 66 | url: https://rss-node.com/link/vv0Ue59uVLHp1nfX?mu=1 67 | - type: subscribe 68 | options: 69 | url: https://s.sublank.xyz/subscribe/43574/srCJLB2XwWl/ssr/ 70 | - type: subscribe 71 | options: 72 | url: https://rss-node.com/link/FYw5RxXkHrRZdUAb?mu=1 73 | - type: subscribe 74 | options: 75 | url: https://raw.githubusercontent.com/JACKUSR2089/v2ray-subscribed/master/2020-11-1 76 | - type: subscribe 77 | options: 78 | url: https://www.recear.xyz/link/XZHfqw7m7nyO17hT?sub=3 79 | - type: subscribe 80 | options: 81 | url: https://subhaha.xyz/link/A8OkgyOpxGeJOdFQ?sub=3&extend=1 82 | - type: subscribe 83 | options: 84 | url: https://www.recear.xyz/link/XZHfqw7m7nyO17hT?sub=1 85 | - type: subscribe 86 | options: 87 | url: https://subhaha.xyz/link/A8OkgyOpxGeJOdFQ?sub=1&extend=1 88 | - type: subscribe 89 | options: 90 | url: https://www.recear.xyz/link/XZHfqw7m7nyO17hT?sub=2 91 | - type: subscribe 92 | options: 93 | url: https://proxypoolss.tk/sip002/sub 94 | - type: subscribe 95 | options: 96 | url: https://proxypoolss.tk/ssr/sub 97 | - type: subscribe 98 | options: 99 | url: https://proxypoolss.tk/vmess/sub 100 | - type: subscribe 101 | options: 102 | url: https://proxypoolss.tk/trojan/sub 103 | - type: subscribe 104 | options: 105 | url: https://alexproxy001.herokuapp.com/sip002/sub 106 | - type: subscribe 107 | options: 108 | url: https://alexproxy001.herokuapp.com/ssr/sub 109 | - type: subscribe 110 | options: 111 | url: https://alexproxy001.herokuapp.com/vmess/sub 112 | - type: subscribe 113 | options: 114 | url: https://tg200.herokuapp.com/sip002/sub 115 | - type: subscribe 116 | options: 117 | url: https://tg200.herokuapp.com/ssr/sub 118 | - type: subscribe 119 | options: 120 | url: https://tg200.herokuapp.com/vmess/sub 121 | 122 | # 网页模糊抓取 123 | - type: webfuzz 124 | options: 125 | url: https://merlinblog.xyz/wiki/freess.html 126 | - type: webfuzz 127 | options: 128 | url: https://github.com/umelabs/node.umelabs.dev 129 | - type: webfuzz 130 | options: 131 | url: https://github.com/freefq/free 132 | - type: webfuzz 133 | options: 134 | url: https://github.com/iwxf/free-v2ray/blob/master/README.md 135 | - type: webfuzz 136 | options: 137 | url: https://github.com/hugetiny/awesome-vpn/blob/master/READMECN.md 138 | - type: webfuzz 139 | options: 140 | url: https://github.com/umelabs/node.umelabs.dev 141 | - type: webfuzz 142 | options: 143 | url: https://github.com/ruanfei/ShadowsocksRRShare/blob/master/vmess%E8%8A%82%E7%82%B9%EF%BC%88%E9%A1%B6%E9%83%A8%E6%9C%80%E6%96%B0).md 144 | - type: webfuzz 145 | options: 146 | url: https://bihai-v2ray.netlify.com 147 | - type: webfuzz 148 | options: 149 | url: https://ss.pythonic.life/ 150 | - type: webfuzz 151 | options: 152 | url: https://www.youneed.win/free-ss 153 | - type: webfuzz 154 | options: 155 | url: https://www.youneed.win/free-ssr 156 | - type: webfuzz 157 | options: 158 | url: https://raw.githubusercontent.com/freefq/free/master/README.md 159 | - type: webfuzz 160 | options: 161 | url: https://zfjvpn.gitbook.io/ 162 | - type: webfuzz 163 | options: 164 | url: https://fanqiang.network/free-v2ray 165 | - type: webfuzz 166 | options: 167 | url: https://www.freefq.com/d/file/free-ssr/20200811/1f3e9d0d0064f662457062712dcf1b66.txt 168 | - type: webfuzz 169 | options: 170 | url: https://merlinblog.xyz/wiki/freess.html 171 | - type: webfuzz 172 | options: 173 | url: https://zfjvpn.gitbook.io/123/ 174 | - type: webfuzz 175 | options: 176 | url: https://raw.githubusercontent.com/ruanfei/ShadowsocksRRShare/master/ss/ss.txt 177 | - type: webfuzz 178 | options: 179 | url: https://raw.githubusercontent.com/ruanfei/ShadowsocksRRShare/master/ssr/ssr.txt 180 | - type: webfuzz 181 | options: 182 | url: https://raw.githubusercontent.com/pojiezhiyuanjun/freev2/master/all.txt 183 | - type: webfuzz 184 | options: 185 | url: https://raw.githubusercontent.com/zu1k/ssrtool-crawler/master/data/ssr.txt 186 | - type: webfuzz 187 | options: 188 | url: https://raw.githubusercontent.com/freefq/free/master/README.md 189 | - type: webfuzz 190 | options: 191 | url: https://raw.githubusercontent.com/52bp/52bp.github.io/master/freesite.html 192 | - type: webfuzz 193 | options: 194 | url: https://telegra.ph/2020-10-3-10-02 195 | - type: webfuzz 196 | options: 197 | url: https://lisondawang.tk/jiedianfenxiang.html 198 | 199 | # tg频道抓取 200 | - type: tgchannel 201 | options: 202 | channel: ssrList 203 | num: 200 204 | - type: tgchannel 205 | options: 206 | channel: SSRSUB 207 | num: 200 208 | - type: tgchannel 209 | options: 210 | channel: FreeSSRNode 211 | num: 200 212 | - type: tgchannel 213 | options: 214 | channel: V2List 215 | num: 200 216 | - type: tgchannel 217 | options: 218 | channel: ssrtool 219 | num: 200 220 | - type: tgchannel 221 | options: 222 | channel: freeshadowsock 223 | num: 200 224 | - type: tgchannel 225 | options: 226 | channel: fanqiang666 227 | num: 200 228 | - type: tgchannel 229 | options: 230 | channel: ssrtool_crack 231 | num: 200 232 | - type: tgchannel 233 | options: 234 | channel: ssrshares 235 | num: 200 236 | - type: tgchannel 237 | options: 238 | channel: gongyijichangfenxiang 239 | num: 200 240 | - type: tgchannel 241 | options: 242 | channel: ssrv2taytgshare 243 | num: 200 244 | - type: tgchannel 245 | options: 246 | channel: TgProxies 247 | num: 200 248 | - type: tgchannel 249 | options: 250 | channel: TG_Mtproxy_1 251 | num: 200 252 | - type: tgchannel 253 | options: 254 | channel: baipiaojiedian 255 | num: 200 256 | - type: tgchannel 257 | options: 258 | channel: sharecentre 259 | num: 200 260 | - type: tgchannel 261 | options: 262 | channel: daolis_miao 263 | num: 200vmesssr 264 | - type: tgchannel 265 | options: 266 | channel: vmesssr 267 | num: 200 268 | - type: tgchannel 269 | options: 270 | channel: sphard 271 | num: 200 272 | - type: tgchannel 273 | options: 274 | channel: pjzyj0 275 | num: 200 276 | - type: tgchannel 277 | options: 278 | channel: jiedianfenxiang 279 | num: 200 280 | - type: tgchannel 281 | options: 282 | channel: flyingboat 283 | num: 200 284 | - type: tgchannel 285 | options: 286 | channel: NetfreexSrV 287 | num: 200 288 | - type: tgchannel 289 | options: 290 | channel: gyjclub 291 | num: 200 292 | - type: tgchannel 293 | options: 294 | channel: VPNFolder 295 | num: 200 296 | - type: tgchannel 297 | options: 298 | channel: youmtp 299 | num: 200 300 | - type: tgchannel 301 | options: 302 | channel: ssrkn 303 | num: 200 304 | - type: tgchannel 305 | options: 306 | channel: abchz 307 | num: 200 308 | - type: tgchannel 309 | options: 310 | channel: yg251153 311 | num: 200 312 | - type: tgchannel 313 | options: 314 | channel: woyaofq 315 | num: 200 316 | - type: tgchannel 317 | options: 318 | channel: wwx_com 319 | num: 200 320 | - type: tgchannel 321 | options: 322 | channel: freessr4k 323 | num: 200 324 | - type: tgchannel 325 | options: 326 | channel: ultrafreevpn 327 | num: 200 328 | - type: tgchannel 329 | options: 330 | channel: VSshit 331 | num: 200 332 | - type: tgchannel 333 | options: 334 | channel: InternetSSR 335 | num: 200 336 | - type: tgchannel 337 | options: 338 | channel: baipiaodadui 339 | num: 200 340 | - type: tgchannel 341 | options: 342 | channel: mtproxy666 343 | num: 200 344 | - type: tgchannel 345 | options: 346 | channel: v2ray666 347 | num: 200 348 | - type: tgchannel 349 | options: 350 | channel: freeVPNserv 351 | num: 200 352 | 353 | # 翻墙党论坛抓取 354 | - type: web-fanqiangdang 355 | options: 356 | url: https://fanqiangdang.com/forum-48-1.html 357 | 358 | # 某个网站抓取 359 | - type: web-freessrxyz 360 | options: 361 | -------------------------------------------------------------------------------- /docs/fast.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alvin9999/proxypool/b0f9fa721810e6ff1bcd0fdb0ab37e79f44c3f6a/docs/fast.png -------------------------------------------------------------------------------- /docs/genbindata.sh: -------------------------------------------------------------------------------- 1 | go-bindata -o internal/bindata/html/html.go -pkg binhtml assets/html/ assets/static 2 | go-bindata -o internal/bindata/geoip/geoip.go -pkg bingeoip assets/GeoLite2-City.mmdb assets/flags.json 3 | -------------------------------------------------------------------------------- /docs/speedtest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Alvin9999/proxypool/b0f9fa721810e6ff1bcd0fdb0ab37e79f44c3f6a/docs/speedtest.png -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | // +heroku goVersion go1.14 2 | 3 | module github.com/Sansui233/proxypool 4 | 5 | go 1.15 6 | 7 | require ( 8 | github.com/Dreamacro/clash v1.3.0 9 | github.com/PuerkitoBio/goquery v1.5.1 // indirect 10 | github.com/andybalholm/cascadia v1.2.0 // indirect 11 | github.com/antchfx/htmlquery v1.2.3 // indirect 12 | github.com/antchfx/xmlquery v1.2.4 // indirect 13 | github.com/antchfx/xpath v1.1.8 // indirect 14 | github.com/cloudflare/cloudflare-go v0.13.5 15 | github.com/ghodss/yaml v1.0.0 16 | github.com/gin-contrib/cache v1.1.0 17 | github.com/gin-gonic/gin v1.6.3 18 | github.com/go-playground/validator/v10 v10.3.0 // indirect 19 | github.com/gobwas/glob v0.2.3 // indirect 20 | github.com/gocolly/colly v1.2.0 21 | github.com/golang/protobuf v1.4.2 // indirect 22 | github.com/heroku/x v0.0.26 23 | github.com/ivpusic/grpool v1.0.0 24 | github.com/jasonlvhit/gocron v0.0.1 25 | github.com/json-iterator/go v1.1.10 // indirect 26 | github.com/kennygrant/sanitize v1.2.4 // indirect 27 | github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect 28 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 29 | github.com/modern-go/reflect2 v1.0.1 // indirect 30 | github.com/oschwald/geoip2-golang v1.4.0 31 | github.com/oschwald/maxminddb-golang v1.8.0 // indirect 32 | github.com/patrickmn/go-cache v2.1.0+incompatible 33 | github.com/robertkrimen/otto v0.0.0-20200922221731-ef014fd054ac 34 | github.com/saintfish/chardet v0.0.0-20120816061221-3af4cd4741ca // indirect 35 | github.com/sirupsen/logrus v1.7.0 36 | github.com/temoto/robotstxt v1.1.1 // indirect 37 | github.com/x-cray/logrus-prefixed-formatter v0.5.2 38 | golang.org/x/sys v0.0.0-20201126233918-771906719818 // indirect 39 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect 40 | google.golang.org/appengine v1.6.6 // indirect 41 | google.golang.org/protobuf v1.25.0 // indirect 42 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 43 | gopkg.in/sourcemap.v1 v1.0.5 // indirect 44 | gopkg.in/yaml.v2 v2.4.0 // indirect 45 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c 46 | gorm.io/driver/postgres v1.0.5 47 | gorm.io/gorm v1.20.7 48 | ) 49 | -------------------------------------------------------------------------------- /internal/app/getter.go: -------------------------------------------------------------------------------- 1 | package app 2 | 3 | import ( 4 | "errors" 5 | "github.com/Sansui233/proxypool/log" 6 | 7 | "github.com/Sansui233/proxypool/internal/cache" 8 | 9 | "github.com/ghodss/yaml" 10 | 11 | "github.com/Sansui233/proxypool/config" 12 | "github.com/Sansui233/proxypool/pkg/getter" 13 | ) 14 | 15 | var Getters = make([]getter.Getter, 0) 16 | 17 | func InitConfigAndGetters(path string) (err error) { 18 | err = config.Parse(path) 19 | if err != nil { 20 | return 21 | } 22 | if s := config.Config.SourceFiles; len(s) == 0 { 23 | return errors.New("no sources") 24 | } else { 25 | initGetters(s) 26 | } 27 | return 28 | } 29 | 30 | func initGetters(sourceFiles []string) { 31 | Getters = make([]getter.Getter, 0) 32 | for _, path := range sourceFiles { 33 | data, err := config.ReadFile(path) 34 | if err != nil { 35 | log.Errorln("Init SourceFile Error: %s\n", err.Error()) 36 | continue 37 | } 38 | sourceList := make([]config.Source, 0) 39 | err = yaml.Unmarshal(data, &sourceList) 40 | if err != nil { 41 | log.Errorln("Init SourceFile Error: %s\n", err.Error()) 42 | continue 43 | } 44 | for _, source := range sourceList { 45 | g, err := getter.NewGetter(source.Type, source.Options) 46 | if err == nil && g != nil { 47 | Getters = append(Getters, g) 48 | log.Debugln("init getter: %s %v", source.Type, source.Options) 49 | } 50 | } 51 | } 52 | log.Infoln("Getter count: %d", len(Getters)) 53 | cache.GettersCount = len(Getters) 54 | } 55 | -------------------------------------------------------------------------------- /internal/app/task.go: -------------------------------------------------------------------------------- 1 | package app 2 | 3 | import ( 4 | "github.com/Sansui233/proxypool/config" 5 | "github.com/Sansui233/proxypool/log" 6 | "github.com/Sansui233/proxypool/pkg/healthcheck" 7 | "sync" 8 | "time" 9 | 10 | "github.com/Sansui233/proxypool/internal/cache" 11 | "github.com/Sansui233/proxypool/internal/database" 12 | "github.com/Sansui233/proxypool/pkg/provider" 13 | "github.com/Sansui233/proxypool/pkg/proxy" 14 | ) 15 | 16 | var location, _ = time.LoadLocation("PRC") 17 | 18 | func CrawlGo() { 19 | wg := &sync.WaitGroup{} 20 | var pc = make(chan proxy.Proxy) 21 | for _, g := range Getters { 22 | wg.Add(1) 23 | go g.Get2ChanWG(pc, wg) 24 | } 25 | proxies := cache.GetProxies("allproxies") 26 | dbProxies := database.GetAllProxies() 27 | // Show last time result when launch 28 | if proxies == nil && dbProxies != nil { 29 | cache.SetProxies("proxies", dbProxies) 30 | cache.LastCrawlTime = "抓取中,已载入上次数据库数据" 31 | log.Infoln("Database: loaded") 32 | } 33 | if dbProxies != nil { 34 | proxies = dbProxies.UniqAppendProxyList(proxies) 35 | } 36 | if proxies == nil { 37 | proxies = make(proxy.ProxyList, 0) 38 | } 39 | 40 | go func() { 41 | wg.Wait() 42 | close(pc) 43 | }() // Note: 为何并发?可以一边抓取一边读取而非抓完再读 44 | // for 用于阻塞goroutine 45 | for p := range pc { // Note: pc关闭后不能发送数据可以读取剩余数据 46 | if p != nil { 47 | proxies = proxies.UniqAppendProxy(p) 48 | } 49 | } 50 | 51 | proxies = proxies.Derive() 52 | log.Infoln("CrawlGo unique proxy count: %d", len(proxies)) 53 | 54 | // Clean Clash unsupported proxy because health check depends on clash 55 | proxies = provider.Clash{ 56 | provider.Base{ 57 | Proxies: &proxies, 58 | }, 59 | }.CleanProxies() 60 | log.Infoln("CrawlGo clash supported proxy count: %d", len(proxies)) 61 | 62 | cache.SetProxies("allproxies", proxies) 63 | cache.AllProxiesCount = proxies.Len() 64 | log.Infoln("AllProxiesCount: %d", cache.AllProxiesCount) 65 | cache.SSProxiesCount = proxies.TypeLen("ss") 66 | log.Infoln("SSProxiesCount: %d", cache.SSProxiesCount) 67 | cache.SSRProxiesCount = proxies.TypeLen("ssr") 68 | log.Infoln("SSRProxiesCount: %d", cache.SSRProxiesCount) 69 | cache.VmessProxiesCount = proxies.TypeLen("vmess") 70 | log.Infoln("VmessProxiesCount: %d", cache.VmessProxiesCount) 71 | cache.TrojanProxiesCount = proxies.TypeLen("trojan") 72 | log.Infoln("TrojanProxiesCount: %d", cache.TrojanProxiesCount) 73 | cache.LastCrawlTime = time.Now().In(location).Format("2006-01-02 15:04:05") 74 | 75 | // 节点可用性检测,使用batchsize不能降低内存占用,只是为了看性能 76 | log.Infoln("Now proceed proxy health check...") 77 | b := 1000 78 | round := len(proxies) / b 79 | okproxies := make(proxy.ProxyList, 0) 80 | for i := 0; i < round; i++ { 81 | okproxies = append(okproxies, healthcheck.CleanBadProxiesWithGrpool(proxies[i*b:(i+1)*b])...) 82 | log.Infoln("\tChecking round: %d", i) 83 | } 84 | okproxies = append(okproxies, healthcheck.CleanBadProxiesWithGrpool(proxies[round*b:])...) 85 | proxies = okproxies 86 | 87 | log.Infoln("CrawlGo clash usable proxy count: %d", len(proxies)) 88 | 89 | // 重命名节点名称为类似US_01的格式,并按国家排序 90 | proxies.NameSetCounrty().Sort().NameAddIndex() 91 | log.Infoln("Proxy rename DONE!") 92 | 93 | // 可用节点存储 94 | cache.SetProxies("proxies", proxies) 95 | cache.UsefullProxiesCount = proxies.Len() 96 | database.SaveProxyList(proxies) 97 | database.ClearOldItems() 98 | 99 | log.Infoln("Usablility checking done. Open %s to check", config.Config.Domain+":"+config.Config.Port) 100 | 101 | // 测速 102 | speedTestNew(proxies) 103 | cache.SetString("clashproxies", provider.Clash{ 104 | provider.Base{ 105 | Proxies: &proxies, 106 | }, 107 | }.Provide()) // update static string provider 108 | cache.SetString("surgeproxies", provider.Surge{ 109 | provider.Base{ 110 | Proxies: &proxies, 111 | }, 112 | }.Provide()) 113 | } 114 | 115 | // Speed test for new proxies 116 | func speedTestNew(proxies proxy.ProxyList) { 117 | if config.Config.SpeedTest { 118 | cache.IsSpeedTest = "已开启" 119 | if config.Config.Timeout > 0 { 120 | healthcheck.SpeedTimeout = time.Second * time.Duration(config.Config.Timeout) 121 | } 122 | healthcheck.SpeedTestNew(proxies, config.Config.Connection) 123 | } else { 124 | cache.IsSpeedTest = "未开启" 125 | } 126 | } 127 | 128 | // Speed test for all proxies in proxy.ProxyList 129 | func SpeedTest(proxies proxy.ProxyList) { 130 | if config.Config.SpeedTest { 131 | cache.IsSpeedTest = "已开启" 132 | if config.Config.Timeout > 0 { 133 | healthcheck.SpeedTimeout = time.Second * time.Duration(config.Config.Timeout) 134 | } 135 | healthcheck.SpeedTestAll(proxies, config.Config.Connection) 136 | } else { 137 | cache.IsSpeedTest = "未开启" 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /internal/bindata/geoip/geoip.go: -------------------------------------------------------------------------------- 1 | // Code generated for package bingeoip by go-bindata DO NOT EDIT. (@generated) 2 | // sources: 3 | // assets/GeoLite2-City.mmdb 4 | // assets/flags.json 5 | package bingeoip 6 | 7 | import ( 8 | "fmt" 9 | "io/ioutil" 10 | "os" 11 | "path/filepath" 12 | "strings" 13 | ) 14 | 15 | // bindataRead reads the given file from disk. It returns an error on failure. 16 | func bindataRead(path, name string) ([]byte, error) { 17 | buf, err := ioutil.ReadFile(path) 18 | if err != nil { 19 | err = fmt.Errorf("Error reading asset %s at %s: %v", name, path, err) 20 | } 21 | return buf, err 22 | } 23 | 24 | type asset struct { 25 | bytes []byte 26 | info os.FileInfo 27 | } 28 | 29 | // assetsGeolite2CityMmdb reads file data from disk. It returns an error on failure. 30 | func assetsGeolite2CityMmdb() (*asset, error) { 31 | path := "assets/GeoLite2-City.mmdb" 32 | name := "assets/GeoLite2-City.mmdb" 33 | bytes, err := bindataRead(path, name) 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | fi, err := os.Stat(path) 39 | if err != nil { 40 | err = fmt.Errorf("Error reading asset info %s at %s: %v", name, path, err) 41 | } 42 | 43 | a := &asset{bytes: bytes, info: fi} 44 | return a, err 45 | } 46 | 47 | // assetsFlagsJson reads file data from disk. It returns an error on failure. 48 | func assetsFlagsJson() (*asset, error) { 49 | path := "assets/flags.json" 50 | name := "assets/flags.json" 51 | bytes, err := bindataRead(path, name) 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | fi, err := os.Stat(path) 57 | if err != nil { 58 | err = fmt.Errorf("Error reading asset info %s at %s: %v", name, path, err) 59 | } 60 | 61 | a := &asset{bytes: bytes, info: fi} 62 | return a, err 63 | } 64 | 65 | // Asset loads and returns the asset bytes for the given name. 66 | // It returns an error if the asset could not be found or 67 | // could not be loaded. 68 | func Asset(name string) ([]byte, error) { 69 | cannonicalName := strings.Replace(name, "\\", "/", -1) 70 | if f, ok := _bindata[cannonicalName]; ok { 71 | a, err := f() 72 | if err != nil { 73 | return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) 74 | } 75 | return a.bytes, nil 76 | } 77 | return nil, fmt.Errorf("Asset %s not found", name) 78 | } 79 | 80 | // MustAsset is like Asset but panics when Asset would return an error. 81 | // It simplifies safe initialization of global variables. 82 | func MustAsset(name string) []byte { 83 | a, err := Asset(name) 84 | if err != nil { 85 | panic("asset: Asset(" + name + "): " + err.Error()) 86 | } 87 | 88 | return a 89 | } 90 | 91 | // AssetInfo loads and returns the asset info for the given name. 92 | // It returns an error if the asset could not be found or 93 | // could not be loaded. 94 | func AssetInfo(name string) (os.FileInfo, error) { 95 | cannonicalName := strings.Replace(name, "\\", "/", -1) 96 | if f, ok := _bindata[cannonicalName]; ok { 97 | a, err := f() 98 | if err != nil { 99 | return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) 100 | } 101 | return a.info, nil 102 | } 103 | return nil, fmt.Errorf("AssetInfo %s not found", name) 104 | } 105 | 106 | // AssetNames returns the names of the assets. 107 | func AssetNames() []string { 108 | names := make([]string, 0, len(_bindata)) 109 | for name := range _bindata { 110 | names = append(names, name) 111 | } 112 | return names 113 | } 114 | 115 | // _bindata is a table, holding each asset generator, mapped to its name. 116 | var _bindata = map[string]func() (*asset, error){ 117 | "assets/GeoLite2-City.mmdb": assetsGeolite2CityMmdb, 118 | "assets/flags.json": assetsFlagsJson, 119 | } 120 | 121 | // AssetDir returns the file names below a certain 122 | // directory embedded in the file by go-bindata. 123 | // For example if you run go-bindata on data/... and data contains the 124 | // following hierarchy: 125 | // data/ 126 | // foo.txt 127 | // img/ 128 | // a.png 129 | // b.png 130 | // then AssetDir("data") would return []string{"foo.txt", "img"} 131 | // AssetDir("data/img") would return []string{"a.png", "b.png"} 132 | // AssetDir("foo.txt") and AssetDir("notexist") would return an error 133 | // AssetDir("") will return []string{"data"}. 134 | func AssetDir(name string) ([]string, error) { 135 | node := _bintree 136 | if len(name) != 0 { 137 | cannonicalName := strings.Replace(name, "\\", "/", -1) 138 | pathList := strings.Split(cannonicalName, "/") 139 | for _, p := range pathList { 140 | node = node.Children[p] 141 | if node == nil { 142 | return nil, fmt.Errorf("Asset %s not found", name) 143 | } 144 | } 145 | } 146 | if node.Func != nil { 147 | return nil, fmt.Errorf("Asset %s not found", name) 148 | } 149 | rv := make([]string, 0, len(node.Children)) 150 | for childName := range node.Children { 151 | rv = append(rv, childName) 152 | } 153 | return rv, nil 154 | } 155 | 156 | type bintree struct { 157 | Func func() (*asset, error) 158 | Children map[string]*bintree 159 | } 160 | 161 | var _bintree = &bintree{nil, map[string]*bintree{ 162 | "assets": &bintree{nil, map[string]*bintree{ 163 | "GeoLite2-City.mmdb": &bintree{assetsGeolite2CityMmdb, map[string]*bintree{}}, 164 | "flags.json": &bintree{assetsFlagsJson, map[string]*bintree{}}, 165 | }}, 166 | }} 167 | 168 | // RestoreAsset restores an asset under the given directory 169 | func RestoreAsset(dir, name string) error { 170 | data, err := Asset(name) 171 | if err != nil { 172 | return err 173 | } 174 | info, err := AssetInfo(name) 175 | if err != nil { 176 | return err 177 | } 178 | err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) 179 | if err != nil { 180 | return err 181 | } 182 | err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) 183 | if err != nil { 184 | return err 185 | } 186 | err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) 187 | if err != nil { 188 | return err 189 | } 190 | return nil 191 | } 192 | 193 | // RestoreAssets restores an asset under the given directory recursively 194 | func RestoreAssets(dir, name string) error { 195 | children, err := AssetDir(name) 196 | // File 197 | if err != nil { 198 | return RestoreAsset(dir, name) 199 | } 200 | // Dir 201 | for _, child := range children { 202 | err = RestoreAssets(dir, filepath.Join(name, child)) 203 | if err != nil { 204 | return err 205 | } 206 | } 207 | return nil 208 | } 209 | 210 | func _filePath(dir, name string) string { 211 | cannonicalName := strings.Replace(name, "\\", "/", -1) 212 | return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) 213 | } 214 | -------------------------------------------------------------------------------- /internal/cache/cache.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/Sansui233/proxypool/pkg/proxy" 7 | "github.com/patrickmn/go-cache" 8 | ) 9 | 10 | var c = cache.New(cache.NoExpiration, 10*time.Minute) 11 | 12 | func GetProxies(key string) proxy.ProxyList { 13 | result, found := c.Get(key) 14 | if found { 15 | return result.(proxy.ProxyList) //Get返回的是interface 16 | } 17 | return nil 18 | } 19 | 20 | func SetProxies(key string, proxies proxy.ProxyList) { 21 | c.Set(key, proxies, cache.NoExpiration) 22 | } 23 | 24 | func SetString(key, value string) { 25 | c.Set(key, value, cache.NoExpiration) 26 | } 27 | 28 | func GetString(key string) string { 29 | result, found := c.Get(key) 30 | if found { 31 | return result.(string) 32 | } 33 | return "" 34 | } 35 | -------------------------------------------------------------------------------- /internal/cache/vars.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | var ( 4 | GettersCount = 0 5 | 6 | AllProxiesCount = 0 7 | SSRProxiesCount = 0 8 | SSProxiesCount = 0 9 | VmessProxiesCount = 0 10 | TrojanProxiesCount = 0 11 | UsefullProxiesCount = 0 12 | LastCrawlTime = "程序正在启动,请于3分钟后刷新页面" 13 | IsSpeedTest = "未开启" 14 | ) 15 | -------------------------------------------------------------------------------- /internal/cloudflare/cache.go: -------------------------------------------------------------------------------- 1 | package cloudflare 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | 7 | "github.com/Sansui233/proxypool/config" 8 | "github.com/cloudflare/cloudflare-go" 9 | ) 10 | 11 | func test() { 12 | api, err := cloudflare.New(config.Config.CFKey, config.Config.CFKey) 13 | if err != nil { 14 | log.Fatal(err) 15 | } 16 | 17 | // Fetch the zone ID 18 | id, err := api.ZoneIDByName(config.Config.Domain) 19 | if err != nil { 20 | log.Fatal(err) 21 | } 22 | 23 | // Fetch zone details 24 | zone, err := api.ZoneDetails(id) 25 | if err != nil { 26 | log.Fatal(err) 27 | } 28 | // Print zone details 29 | fmt.Println(zone) 30 | } 31 | -------------------------------------------------------------------------------- /internal/cron/cron.go: -------------------------------------------------------------------------------- 1 | package cron 2 | 3 | import ( 4 | "github.com/Sansui233/proxypool/config" 5 | "github.com/Sansui233/proxypool/internal/cache" 6 | "github.com/Sansui233/proxypool/log" 7 | "github.com/Sansui233/proxypool/pkg/healthcheck" 8 | "github.com/Sansui233/proxypool/pkg/provider" 9 | "runtime" 10 | 11 | "github.com/Sansui233/proxypool/internal/app" 12 | "github.com/jasonlvhit/gocron" 13 | ) 14 | 15 | func Cron() { 16 | _ = gocron.Every(config.Config.CrawlInterval).Minutes().Do(crawlTask) 17 | _ = gocron.Every(config.Config.SpeedTestInterval).Minutes().Do(speedTestTask) 18 | _ = gocron.Every(config.Config.ActiveInterval).Minutes().Do(frequentSpeedTestTask) 19 | <-gocron.Start() 20 | } 21 | 22 | func crawlTask() { 23 | err := app.InitConfigAndGetters("") 24 | if err != nil { 25 | log.Errorln("[cron.go] config parse error: %s", err) 26 | } 27 | app.CrawlGo() 28 | app.Getters = nil 29 | runtime.GC() 30 | } 31 | 32 | func speedTestTask() { 33 | log.Infoln("Doing speed test task...") 34 | err := config.Parse("") 35 | if err != nil { 36 | log.Errorln("[cron.go] config parse error: %s", err) 37 | } 38 | pl := cache.GetProxies("proxies") 39 | 40 | app.SpeedTest(pl) 41 | cache.SetString("clashproxies", provider.Clash{ 42 | provider.Base{ 43 | Proxies: &pl, 44 | }, 45 | }.Provide()) // update static string provider 46 | cache.SetString("surgeproxies", provider.Surge{ 47 | provider.Base{ 48 | Proxies: &pl, 49 | }, 50 | }.Provide()) 51 | runtime.GC() 52 | } 53 | 54 | func frequentSpeedTestTask() { 55 | log.Infoln("Doing speed test task for active proxies...") 56 | err := config.Parse("") 57 | if err != nil { 58 | log.Errorln("[cron.go] config parse error: %s", err) 59 | } 60 | pl_all := cache.GetProxies("proxies") 61 | pl := healthcheck.ProxyStats.ReqCountThan(config.Config.ActiveFrequency, pl_all, true) 62 | if len(pl) > int(config.Config.ActiveMaxNumber) { 63 | pl = healthcheck.ProxyStats.SortProxiesBySpeed(pl)[:config.Config.ActiveMaxNumber] 64 | } 65 | log.Infoln("Active proxies count: %d", len(pl)) 66 | 67 | app.SpeedTest(pl) 68 | cache.SetString("clashproxies", provider.Clash{ 69 | provider.Base{ 70 | Proxies: &pl_all, 71 | }, 72 | }.Provide()) // update static string provider 73 | cache.SetString("surgeproxies", provider.Surge{ 74 | provider.Base{ 75 | Proxies: &pl_all, 76 | }, 77 | }.Provide()) 78 | runtime.GC() 79 | } 80 | -------------------------------------------------------------------------------- /internal/database/db.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "github.com/Sansui233/proxypool/log" 5 | "os" 6 | 7 | "github.com/Sansui233/proxypool/config" 8 | 9 | "gorm.io/driver/postgres" 10 | "gorm.io/gorm" 11 | "gorm.io/gorm/logger" 12 | ) 13 | 14 | var DB *gorm.DB 15 | 16 | func connect() (err error) { 17 | // localhost url 18 | dsn := "user=proxypool password=proxypool dbname=proxypool port=5432 sslmode=disable TimeZone=Asia/Shanghai" 19 | if url := config.Config.DatabaseUrl; url != "" { 20 | dsn = url 21 | } 22 | if url := os.Getenv("DATABASE_URL"); url != "" { 23 | dsn = url 24 | } 25 | DB, err = gorm.Open(postgres.Open(dsn), &gorm.Config{ 26 | Logger: logger.Default.LogMode(logger.Silent), 27 | }) 28 | if err == nil { 29 | log.Infoln("database: successfully connected to: %s", DB.Name()) 30 | } else { 31 | DB = nil 32 | log.Warnln("database connection info: %s \n\t\tUse cache to store proxies", err.Error()) 33 | } 34 | return 35 | } 36 | -------------------------------------------------------------------------------- /internal/database/db_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestConnect(t *testing.T) { 8 | //t.SkipNow() 9 | connect() 10 | //InitTables() 11 | //proxies := GetAllProxies() 12 | //fmt.Println(proxies.Len()) 13 | } 14 | -------------------------------------------------------------------------------- /internal/database/proxy.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "github.com/Sansui233/proxypool/log" 5 | "github.com/Sansui233/proxypool/pkg/proxy" 6 | "gorm.io/gorm" 7 | "time" 8 | ) 9 | 10 | // 设置数据库字段,表名为默认为type名的复数。相比于原作者,不使用软删除特性 11 | type Proxy struct { 12 | ID uint `gorm:"primarykey"` 13 | CreatedAt time.Time 14 | UpdatedAt time.Time 15 | proxy.Base 16 | Link string 17 | Identifier string `gorm:"unique"` 18 | } 19 | 20 | func InitTables() { 21 | if DB == nil { 22 | err := connect() 23 | if err != nil { 24 | return 25 | } 26 | } 27 | // Warnln: 自动迁移仅仅会创建表,缺少列和索引,并且不会改变现有列的类型或删除未使用的列以保护数据。 28 | // 如更改表的Column请于数据库中操作 29 | err := DB.AutoMigrate(&Proxy{}) 30 | if err != nil { 31 | log.Errorln("\n\t\t[db/proxy.go] database migration failed") 32 | panic(err) 33 | } 34 | } 35 | 36 | func SaveProxyList(pl proxy.ProxyList) { 37 | if DB == nil { 38 | return 39 | } 40 | 41 | DB.Transaction(func(tx *gorm.DB) error { 42 | // Set All Usable to false 43 | if err := DB.Model(&Proxy{}).Where("useable = ?", true).Update("useable", "false").Error; err != nil { 44 | log.Warnln("database: Reset useable to false failed: %s", err.Error()) 45 | } 46 | // Create or Update proxies 47 | for i := 0; i < pl.Len(); i++ { 48 | p := Proxy{ 49 | Base: *pl[i].BaseInfo(), 50 | Link: pl[i].Link(), 51 | Identifier: pl[i].Identifier(), 52 | } 53 | p.Useable = true 54 | if err := DB.Create(&p).Error; err != nil { 55 | // Update with Identifier 56 | if uperr := DB.Model(&Proxy{}).Where("identifier = ?", p.Identifier).Updates(&Proxy{ 57 | Base: proxy.Base{Useable: true, Name: p.Name}, 58 | }).Error; uperr != nil { 59 | log.Warnln("\n\t\tdatabase: Update failed:"+ 60 | "\n\t\tdatabase: When Created item: %s"+ 61 | "\n\t\tdatabase: When Updated item: %s", err.Error(), uperr.Error()) 62 | } 63 | } 64 | } 65 | log.Infoln("database: Updated") 66 | return nil 67 | }) 68 | } 69 | 70 | // Get a proxy list consists of all proxies in database 71 | func GetAllProxies() (proxies proxy.ProxyList) { 72 | proxies = make(proxy.ProxyList, 0) 73 | if DB == nil { 74 | return nil 75 | } 76 | 77 | proxiesDB := make([]Proxy, 0) 78 | DB.Select("link").Find(&proxiesDB) 79 | 80 | for _, proxyDB := range proxiesDB { 81 | if proxiesDB != nil { 82 | p, err := proxy.ParseProxyFromLink(proxyDB.Link) 83 | if err == nil && p != nil { 84 | p.SetUseable(false) 85 | proxies = append(proxies, p) 86 | } 87 | } 88 | } 89 | return 90 | } 91 | 92 | // Clear proxies unusable more than 1 week 93 | func ClearOldItems() { 94 | if DB == nil { 95 | return 96 | } 97 | lastWeek := time.Now().Add(-time.Hour * 24 * 7) 98 | if err := DB.Where("updated_at < ? AND useable = ?", lastWeek, false).Delete(&Proxy{}); err != nil { 99 | var count int64 100 | DB.Model(&Proxy{}).Where("updated_at < ? AND useable = ?", lastWeek, false).Count(&count) 101 | if count == 0 { 102 | log.Infoln("database: Nothing old to sweep") // TODO always this line? 103 | } else { 104 | log.Warnln("database: Delete old item failed: %s", err.Error.Error()) 105 | } 106 | } else { 107 | log.Infoln("database: Swept old and unusable proxies") 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /log/file.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | ) 7 | 8 | var ( 9 | logDir = "tmp" 10 | logFilePath = filepath.Join(logDir, "run.log") 11 | allLogFilePath = filepath.Join(logDir, "all.log") 12 | ) 13 | 14 | var logFile *os.File 15 | var allLogFile *os.File 16 | 17 | func init() { 18 | ok := initDir(logDir) 19 | if ok { 20 | logFile = initFile(logFilePath) 21 | allLogFile = initFile(allLogFilePath) 22 | } 23 | } 24 | 25 | func initDir(path string) bool { 26 | if _, err := os.Stat(path); os.IsNotExist(err) { 27 | if err := os.Mkdir(path, 0755); err != nil { 28 | Errorln("init log dir error: %s", err.Error()) 29 | } 30 | } 31 | return true 32 | } 33 | 34 | func initFile(path string) *os.File { 35 | f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0755) 36 | if err != nil { 37 | Errorln("get log file error: %s", err.Error()) 38 | } 39 | return f 40 | } 41 | -------------------------------------------------------------------------------- /log/level.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | ) 6 | 7 | type LogLevel int 8 | 9 | const ( 10 | TRACE LogLevel = iota 11 | DEBUG 12 | INFO 13 | WARNING 14 | ERROR 15 | ) 16 | 17 | var ( 18 | levelMapping = map[LogLevel]log.Level{ 19 | TRACE: log.TraceLevel, 20 | DEBUG: log.DebugLevel, 21 | INFO: log.InfoLevel, 22 | WARNING: log.WarnLevel, 23 | ERROR: log.ErrorLevel, 24 | } 25 | ) 26 | -------------------------------------------------------------------------------- /log/log.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "fmt" 5 | log "github.com/sirupsen/logrus" 6 | "github.com/x-cray/logrus-prefixed-formatter" 7 | "os" 8 | "sync" 9 | ) 10 | 11 | var ( 12 | level = INFO 13 | fileLogger = log.New() 14 | fileMux = sync.Mutex{} 15 | ) 16 | 17 | func init() { 18 | log.SetFormatter(&prefixed.TextFormatter{ 19 | ForceFormatting: true, 20 | }) 21 | log.SetOutput(os.Stdout) 22 | log.SetLevel(log.InfoLevel) 23 | fileLogger.SetFormatter(&prefixed.TextFormatter{ 24 | FullTimestamp: true, 25 | TimestampFormat: "2006-01-02 15:04:05", 26 | DisableColors: true, 27 | ForceFormatting: true, 28 | }) 29 | fileLogger.SetLevel(levelMapping[TRACE]) 30 | } 31 | 32 | func SetLevel(l LogLevel) { 33 | level = l 34 | log.SetLevel(levelMapping[level]) 35 | } 36 | 37 | func Traceln(format string, v ...interface{}) { 38 | log.Traceln(fmt.Sprintf(format, v...)) 39 | logToFile(TRACE, fmt.Sprintf(format, v...)) 40 | } 41 | 42 | func Debugln(format string, v ...interface{}) { 43 | log.Debugln(fmt.Sprintf(format, v...)) 44 | logToFile(DEBUG, fmt.Sprintf(format, v...)) 45 | } 46 | 47 | func Infoln(format string, v ...interface{}) { 48 | log.Infoln(fmt.Sprintf(format, v...)) 49 | logToFile(INFO, fmt.Sprintf(format, v...)) 50 | } 51 | 52 | func Warnln(format string, v ...interface{}) { 53 | log.Warnln(fmt.Sprintf(format, v...)) 54 | logToFile(WARNING, fmt.Sprintf(format, v...)) 55 | } 56 | 57 | func Errorln(format string, v ...interface{}) { 58 | log.Errorln(fmt.Sprintf(format, v...)) 59 | logToFile(ERROR, fmt.Sprintf(format, v...)) 60 | } 61 | 62 | func logToFile(l LogLevel, data string) { 63 | if l >= level { 64 | if logFile != nil { 65 | fileMux.Lock() 66 | fileLogger.SetOutput(logFile) 67 | fileLogger.Logln(levelMapping[l], data) 68 | fileMux.Unlock() 69 | } 70 | } 71 | if allLogFile != nil { 72 | fileMux.Lock() 73 | fileLogger.SetOutput(allLogFile) 74 | fileLogger.Logln(levelMapping[l], data) 75 | fileMux.Unlock() 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | _ "net/http/pprof" 6 | "os" 7 | 8 | "github.com/Sansui233/proxypool/api" 9 | "github.com/Sansui233/proxypool/internal/app" 10 | "github.com/Sansui233/proxypool/internal/cron" 11 | "github.com/Sansui233/proxypool/internal/database" 12 | "github.com/Sansui233/proxypool/log" 13 | "github.com/Sansui233/proxypool/pkg/proxy" 14 | ) 15 | 16 | var configFilePath = "" 17 | var debugMode = false 18 | 19 | func main() { 20 | //go func() { 21 | // http.ListenAndServe("0.0.0.0:6060", nil) 22 | //}() 23 | 24 | flag.StringVar(&configFilePath, "c", "", "path to config file: config.yaml") 25 | flag.BoolVar(&debugMode, "d", false, "debug output") 26 | flag.Parse() 27 | 28 | log.SetLevel(log.INFO) 29 | if debugMode { 30 | log.SetLevel(log.DEBUG) 31 | } 32 | if configFilePath == "" { 33 | configFilePath = os.Getenv("CONFIG_FILE") 34 | } 35 | if configFilePath == "" { 36 | configFilePath = "config.yaml" 37 | } 38 | err := app.InitConfigAndGetters(configFilePath) 39 | if err != nil { 40 | log.Errorln("Configuration init error: %s", err.Error()) 41 | panic(err) 42 | } 43 | 44 | database.InitTables() 45 | // init GeoIp db reader and map between emoji's and countries 46 | // return: struct geoIp (dbreader, emojimap) 47 | err = proxy.InitGeoIpDB() 48 | if err != nil { 49 | os.Exit(1) 50 | } 51 | log.Infoln("Do the first crawl...") 52 | go app.CrawlGo() // 抓取主程序 53 | go cron.Cron() // 定时运行 54 | api.Run() // Web Serve 55 | } 56 | -------------------------------------------------------------------------------- /pkg/getter/base.go: -------------------------------------------------------------------------------- 1 | package getter 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | 7 | "github.com/Sansui233/proxypool/pkg/proxy" 8 | "github.com/Sansui233/proxypool/pkg/tool" 9 | ) 10 | 11 | // functions for getters 12 | type Getter interface { 13 | Get() proxy.ProxyList 14 | Get2Chan(pc chan proxy.Proxy) 15 | Get2ChanWG(pc chan proxy.Proxy, wg *sync.WaitGroup) 16 | } 17 | 18 | // function type that creates getters 19 | type creator func(options tool.Options) (getter Getter, err error) 20 | 21 | // map str sourceType -> func creating getters, 22 | // registered in package init() 23 | var creatorMap = make(map[string]creator) 24 | 25 | func Register(sourceType string, c creator) { 26 | creatorMap[sourceType] = c 27 | } 28 | 29 | func NewGetter(sourceType string, options tool.Options) (getter Getter, err error) { 30 | c, ok := creatorMap[sourceType] 31 | if ok { 32 | return c(options) 33 | } 34 | return nil, ErrorCreaterNotSupported 35 | } 36 | 37 | func StringArray2ProxyArray(origin []string) proxy.ProxyList { 38 | results := make(proxy.ProxyList, 0) 39 | for _, link := range origin { 40 | p, err := proxy.ParseProxyFromLink(link) 41 | if err == nil && p != nil { 42 | results = append(results, p) 43 | } 44 | } 45 | return results 46 | } 47 | 48 | func ClashProxy2ProxyArray(origin []map[string]interface{}) proxy.ProxyList { 49 | results := make(proxy.ProxyList, 0, len(origin)) 50 | for _, pjson := range origin { 51 | p, err := proxy.ParseProxyFromClashProxy(pjson) 52 | if err == nil && p != nil { 53 | results = append(results, p) 54 | } 55 | } 56 | return results 57 | } 58 | 59 | func GrepLinksFromString(text string) []string { 60 | results := proxy.GrepSSRLinkFromString(text) 61 | results = append(results, proxy.GrepVmessLinkFromString(text)...) 62 | results = append(results, proxy.GrepSSLinkFromString(text)...) 63 | results = append(results, proxy.GrepTrojanLinkFromString(text)...) 64 | return results 65 | } 66 | 67 | func FuzzParseProxyFromString(text string) proxy.ProxyList { 68 | return StringArray2ProxyArray(GrepLinksFromString(text)) 69 | } 70 | 71 | var ( 72 | ErrorUrlNotFound = errors.New("url should be specified") 73 | ErrorCreaterNotSupported = errors.New("type not supported") 74 | ) 75 | 76 | func AssertTypeStringNotNull(i interface{}) (str string, err error) { 77 | switch i.(type) { 78 | case string: 79 | str = i.(string) 80 | if str == "" { 81 | return "", errors.New("string is null") 82 | } 83 | return str, nil 84 | default: 85 | return "", errors.New("type is not string") 86 | } 87 | return "", nil 88 | } 89 | -------------------------------------------------------------------------------- /pkg/getter/clash.go: -------------------------------------------------------------------------------- 1 | package getter 2 | 3 | import ( 4 | "github.com/Sansui233/proxypool/log" 5 | "github.com/Sansui233/proxypool/pkg/proxy" 6 | "github.com/Sansui233/proxypool/pkg/tool" 7 | "gopkg.in/yaml.v3" 8 | "io/ioutil" 9 | "sync" 10 | ) 11 | 12 | func init() { 13 | Register("clash", NewClashGetter) 14 | } 15 | 16 | type Clash struct { 17 | Url string 18 | } 19 | 20 | type config struct { 21 | Proxy []map[string]interface{} `json:"proxies" yaml:"proxies"` 22 | } 23 | 24 | func (c *Clash) Get() proxy.ProxyList { 25 | resp, err := tool.GetHttpClient().Get(c.Url) 26 | if err != nil { 27 | return nil 28 | } 29 | defer resp.Body.Close() 30 | body, err := ioutil.ReadAll(resp.Body) 31 | if err != nil { 32 | return nil 33 | } 34 | 35 | conf := config{} 36 | err = yaml.Unmarshal(body, &conf) 37 | if err != nil { 38 | return nil 39 | } 40 | 41 | return ClashProxy2ProxyArray(conf.Proxy) 42 | 43 | } 44 | 45 | func (c *Clash) Get2Chan(pc chan proxy.Proxy) { 46 | nodes := c.Get() 47 | log.Infoln("STATISTIC: Clash\tcount=%d\turl=%s\n", len(nodes), c.Url) 48 | for _, node := range nodes { 49 | pc <- node 50 | } 51 | } 52 | 53 | func (c *Clash) Get2ChanWG(pc chan proxy.Proxy, wg *sync.WaitGroup) { 54 | defer wg.Done() 55 | nodes := c.Get() 56 | log.Infoln("STATISTIC: Clash\tcount=%d\turl=%s\n", len(nodes), c.Url) 57 | for _, node := range nodes { 58 | pc <- node 59 | } 60 | } 61 | 62 | func NewClashGetter(options tool.Options) (getter Getter, err error) { 63 | urlInterface, found := options["url"] 64 | if found { 65 | url, err := AssertTypeStringNotNull(urlInterface) 66 | if err != nil { 67 | return nil, err 68 | } 69 | return &Clash{ 70 | Url: url, 71 | }, nil 72 | } 73 | return nil, ErrorUrlNotFound 74 | } 75 | -------------------------------------------------------------------------------- /pkg/getter/subscribe.go: -------------------------------------------------------------------------------- 1 | package getter 2 | 3 | import ( 4 | "github.com/Sansui233/proxypool/log" 5 | "io/ioutil" 6 | "strings" 7 | "sync" 8 | 9 | "github.com/Sansui233/proxypool/pkg/proxy" 10 | "github.com/Sansui233/proxypool/pkg/tool" 11 | ) 12 | 13 | // Add key value pair to creatorMap(string → creator) in base.go 14 | func init() { 15 | Register("subscribe", NewSubscribe) 16 | } 17 | 18 | // Subscribe is A Getter with an additional property 19 | type Subscribe struct { 20 | Url string 21 | } 22 | 23 | // Get() of Subscribe is to implement Getter interface 24 | func (s *Subscribe) Get() proxy.ProxyList { 25 | resp, err := tool.GetHttpClient().Get(s.Url) 26 | if err != nil { 27 | return nil 28 | } 29 | defer resp.Body.Close() 30 | body, err := ioutil.ReadAll(resp.Body) 31 | if err != nil { 32 | return nil 33 | } 34 | 35 | nodesString, err := tool.Base64DecodeString(string(body)) 36 | if err != nil { 37 | return nil 38 | } 39 | nodesString = strings.ReplaceAll(nodesString, "\t", "") 40 | 41 | nodes := strings.Split(nodesString, "\n") 42 | return StringArray2ProxyArray(nodes) 43 | } 44 | 45 | // Get2Chan() of Subscribe is to implement Getter interface. It gets proxies and send proxy to channel one by one 46 | func (s *Subscribe) Get2ChanWG(pc chan proxy.Proxy, wg *sync.WaitGroup) { 47 | defer wg.Done() 48 | nodes := s.Get() 49 | log.Infoln("STATISTIC: Subscribe\tcount=%d\turl=%s\n", len(nodes), s.Url) 50 | for _, node := range nodes { 51 | pc <- node 52 | } 53 | } 54 | 55 | func (s *Subscribe) Get2Chan(pc chan proxy.Proxy) { 56 | nodes := s.Get() 57 | log.Infoln("STATISTIC: Subscribe\tcount=%d\turl=%s\n", len(nodes), s.Url) 58 | for _, node := range nodes { 59 | pc <- node 60 | } 61 | } 62 | 63 | func NewSubscribe(options tool.Options) (getter Getter, err error) { 64 | urlInterface, found := options["url"] 65 | if found { 66 | url, err := AssertTypeStringNotNull(urlInterface) 67 | if err != nil { 68 | return nil, err 69 | } 70 | return &Subscribe{ 71 | Url: url, 72 | }, nil 73 | } 74 | return nil, ErrorUrlNotFound 75 | } 76 | -------------------------------------------------------------------------------- /pkg/getter/tgchannel.go: -------------------------------------------------------------------------------- 1 | package getter 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Sansui233/proxypool/log" 6 | "io/ioutil" 7 | "strings" 8 | "sync" 9 | 10 | "github.com/Sansui233/proxypool/pkg/proxy" 11 | "github.com/Sansui233/proxypool/pkg/tool" 12 | "github.com/gocolly/colly" 13 | ) 14 | 15 | func init() { 16 | Register("tgchannel", NewTGChannelGetter) 17 | } 18 | 19 | type TGChannelGetter struct { 20 | c *colly.Collector 21 | NumNeeded int 22 | results []string 23 | Url string 24 | apiUrl string 25 | } 26 | 27 | func NewTGChannelGetter(options tool.Options) (getter Getter, err error) { 28 | num, found := options["num"] 29 | t := 200 30 | switch num.(type) { 31 | case int: 32 | t = num.(int) 33 | case float64: 34 | t = int(num.(float64)) 35 | } 36 | 37 | if !found || t <= 0 { 38 | t = 200 39 | } 40 | urlInterface, found := options["channel"] 41 | if found { 42 | url, err := AssertTypeStringNotNull(urlInterface) 43 | if err != nil { 44 | return nil, err 45 | } 46 | return &TGChannelGetter{ 47 | c: tool.GetColly(), 48 | NumNeeded: t, 49 | Url: "https://t.me/s/" + url, 50 | apiUrl: "https://tg.i-c-a.su/rss/" + url, 51 | }, nil 52 | } 53 | return nil, ErrorUrlNotFound 54 | } 55 | 56 | func (g *TGChannelGetter) Get() proxy.ProxyList { 57 | result := make(proxy.ProxyList, 0) 58 | g.results = make([]string, 0) 59 | // 找到所有的文字消息 60 | g.c.OnHTML("div.tgme_widget_message_text", func(e *colly.HTMLElement) { 61 | g.results = append(g.results, GrepLinksFromString(e.Text)...) 62 | // 抓取到http链接,有可能是订阅链接或其他链接,无论如何试一下 63 | subUrls := urlRe.FindAllString(e.Text, -1) 64 | for _, url := range subUrls { 65 | result = append(result, (&Subscribe{Url: url}).Get()...) 66 | } 67 | }) 68 | 69 | // 找到之前消息页面的链接,加入访问队列 70 | g.c.OnHTML("link[rel=prev]", func(e *colly.HTMLElement) { 71 | if len(g.results) < g.NumNeeded { 72 | _ = e.Request.Visit(e.Attr("href")) 73 | } 74 | }) 75 | 76 | g.results = make([]string, 0) 77 | err := g.c.Visit(g.Url) 78 | if err != nil { 79 | _ = fmt.Errorf("%s", err.Error()) 80 | } 81 | result = append(result, StringArray2ProxyArray(g.results)...) 82 | 83 | // 获取文件(api需要维护) 84 | resp, err := tool.GetHttpClient().Get(g.apiUrl) 85 | if err != nil { 86 | return result 87 | } 88 | defer resp.Body.Close() 89 | body, err := ioutil.ReadAll(resp.Body) 90 | items := strings.Split(string(body), "\n") 91 | for _, s := range items { 92 | if strings.Contains(s, "enclosure url") { // get to xml node 93 | elements := strings.Split(s, "\"") 94 | for _, e := range elements { 95 | if strings.Contains(e, "https://") { 96 | // Webfuzz的可能性比较大,也有可能是订阅链接,为了不拖慢运行速度不写了 97 | result = append(result, (&WebFuzz{Url: e}).Get()...) 98 | } 99 | } 100 | } 101 | } 102 | return result 103 | } 104 | 105 | func (g *TGChannelGetter) Get2ChanWG(pc chan proxy.Proxy, wg *sync.WaitGroup) { 106 | defer wg.Done() 107 | nodes := g.Get() 108 | log.Infoln("STATISTIC: TGChannel\tcount=%d\turl=%s\n", len(nodes), g.Url) 109 | for _, node := range nodes { 110 | pc <- node 111 | } 112 | } 113 | func (g *TGChannelGetter) Get2Chan(pc chan proxy.Proxy) { 114 | nodes := g.Get() 115 | log.Infoln("STATISTIC: TGChannel\tcount=%d\turl=%s\n", len(nodes), g.Url) 116 | for _, node := range nodes { 117 | pc <- node 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /pkg/getter/web_fanqiangdang.go: -------------------------------------------------------------------------------- 1 | package getter 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Sansui233/proxypool/log" 6 | "strings" 7 | "sync" 8 | 9 | "github.com/Sansui233/proxypool/pkg/proxy" 10 | "github.com/Sansui233/proxypool/pkg/tool" 11 | "github.com/gocolly/colly" 12 | ) 13 | 14 | func init() { 15 | Register("web-fanqiangdang", NewWebFanqiangdangGetter) 16 | } 17 | 18 | type WebFanqiangdang struct { 19 | c *colly.Collector 20 | Url string 21 | results proxy.ProxyList 22 | } 23 | 24 | func NewWebFanqiangdangGetter(options tool.Options) (getter Getter, err error) { 25 | urlInterface, found := options["url"] 26 | if found { 27 | url, err := AssertTypeStringNotNull(urlInterface) 28 | if err != nil { 29 | return nil, err 30 | } 31 | return &WebFanqiangdang{ 32 | c: colly.NewCollector(), 33 | Url: url, 34 | }, nil 35 | } 36 | return nil, ErrorUrlNotFound 37 | } 38 | 39 | func (w *WebFanqiangdang) Get() proxy.ProxyList { 40 | w.results = make(proxy.ProxyList, 0) 41 | w.c.OnHTML("td.t_f", func(e *colly.HTMLElement) { 42 | innerHTML, err := e.DOM.Html() 43 | if err != nil { 44 | return 45 | } 46 | if strings.Contains(innerHTML, "data-cfemail") { 47 | decoded, err := tool.CFEmailDecode(tool.GetCFEmailPayload(innerHTML)) 48 | if err == nil { 49 | e.Text = strings.ReplaceAll(e.Text, "[email protected]", decoded) 50 | } 51 | } 52 | w.results = append(w.results, FuzzParseProxyFromString(e.Text)...) 53 | subUrls := urlRe.FindAllString(e.Text, -1) 54 | for _, url := range subUrls { 55 | w.results = append(w.results, (&Subscribe{Url: url}).Get()...) 56 | } 57 | }) 58 | 59 | w.c.OnHTML("th.new>a[href]", func(e *colly.HTMLElement) { 60 | url := e.Attr("href") 61 | if url == "javascript:;" { 62 | return 63 | } 64 | url, err := tool.CFScriptRedirect(url) 65 | if err == nil && url[0] == '/' { 66 | url = "https://fanqiangdang.com" + url 67 | } 68 | if strings.HasPrefix(url, "https://fanqiangdang.com/thread") { 69 | _ = e.Request.Visit(url) 70 | } 71 | }) 72 | 73 | w.results = make(proxy.ProxyList, 0) 74 | err := w.c.Visit(w.Url) 75 | if err != nil { 76 | _ = fmt.Errorf("%s", err.Error()) 77 | } 78 | 79 | return w.results 80 | } 81 | 82 | func (w *WebFanqiangdang) Get2ChanWG(pc chan proxy.Proxy, wg *sync.WaitGroup) { 83 | defer wg.Done() 84 | nodes := w.Get() 85 | log.Infoln("STATISTIC: Fanqiangdang\tcount=%d\turl=%s\n", len(nodes), w.Url) 86 | for _, node := range nodes { 87 | pc <- node 88 | } 89 | } 90 | 91 | func (w *WebFanqiangdang) Get2Chan(pc chan proxy.Proxy) { 92 | nodes := w.Get() 93 | log.Infoln("STATISTIC: Fanqiangdang\tcount=%d\turl=%s\n", len(nodes), w.Url) 94 | for _, node := range nodes { 95 | pc <- node 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /pkg/getter/web_free_ssr_xyz.go: -------------------------------------------------------------------------------- 1 | package getter 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/Sansui233/proxypool/log" 6 | "io/ioutil" 7 | "sync" 8 | 9 | "github.com/Sansui233/proxypool/pkg/proxy" 10 | "github.com/Sansui233/proxypool/pkg/tool" 11 | ) 12 | 13 | func init() { 14 | Register("web-freessrxyz", NewWebFreessrxyzGetter) 15 | } 16 | 17 | const ( 18 | freessrxyzSsrLink = "https://api.free-ssr.xyz/ssr" 19 | freessrxyzV2rayLink = "https://api.free-ssr.xyz/v2ray" 20 | ) 21 | 22 | type WebFreessrXyz struct { 23 | } 24 | 25 | func NewWebFreessrxyzGetter(options tool.Options) (getter Getter, err error) { 26 | return &WebFreessrXyz{}, nil 27 | } 28 | 29 | func (w *WebFreessrXyz) Get() proxy.ProxyList { 30 | results := freessrxyzFetch(freessrxyzSsrLink) 31 | results = append(results, freessrxyzFetch(freessrxyzV2rayLink)...) 32 | return results 33 | } 34 | 35 | func (w *WebFreessrXyz) Get2ChanWG(pc chan proxy.Proxy, wg *sync.WaitGroup) { 36 | defer wg.Done() 37 | nodes := w.Get() 38 | log.Infoln("STATISTIC: FreeSSRxyz\tcount=%d\turl=%s\n", len(nodes), "api.free-ssr.xyz") 39 | for _, node := range nodes { 40 | pc <- node 41 | } 42 | } 43 | 44 | func (w *WebFreessrXyz) Get2Chan(pc chan proxy.Proxy) { 45 | nodes := w.Get() 46 | log.Infoln("STATISTIC: FreeSSRxyz\tcount=%d\turl=%s\n", len(nodes), "api.free-ssr.xyz") 47 | for _, node := range nodes { 48 | pc <- node 49 | } 50 | } 51 | 52 | func freessrxyzFetch(link string) proxy.ProxyList { 53 | resp, err := tool.GetHttpClient().Get(link) 54 | if err != nil { 55 | return nil 56 | } 57 | defer resp.Body.Close() 58 | body, err := ioutil.ReadAll(resp.Body) 59 | if err != nil { 60 | return nil 61 | } 62 | 63 | type node struct { 64 | Url string `json:"url"` 65 | } 66 | ssrs := make([]node, 0) 67 | err = json.Unmarshal(body, &ssrs) 68 | if err != nil { 69 | return nil 70 | } 71 | 72 | result := make([]string, 0) 73 | for _, node := range ssrs { 74 | u := node.Url[0:15] + node.Url[16:] 75 | result = append(result, u) 76 | } 77 | 78 | return StringArray2ProxyArray(result) 79 | } 80 | -------------------------------------------------------------------------------- /pkg/getter/web_fuzz.go: -------------------------------------------------------------------------------- 1 | package getter 2 | 3 | import ( 4 | "github.com/Sansui233/proxypool/log" 5 | "io/ioutil" 6 | "sync" 7 | 8 | "github.com/Sansui233/proxypool/pkg/proxy" 9 | "github.com/Sansui233/proxypool/pkg/tool" 10 | ) 11 | 12 | // Add key value pair to creatorMap(string → creator) in base.go 13 | func init() { 14 | // register to creator map 15 | Register("webfuzz", NewWebFuzzGetter) 16 | } 17 | 18 | /* A Getter with an additional property */ 19 | type WebFuzz struct { 20 | Url string 21 | } 22 | 23 | // Implement Getter interface 24 | func (w *WebFuzz) Get() proxy.ProxyList { 25 | resp, err := tool.GetHttpClient().Get(w.Url) 26 | if err != nil { 27 | return nil 28 | } 29 | defer resp.Body.Close() 30 | body, err := ioutil.ReadAll(resp.Body) 31 | if err != nil { 32 | return nil 33 | } 34 | return FuzzParseProxyFromString(string(body)) 35 | } 36 | 37 | func (w *WebFuzz) Get2ChanWG(pc chan proxy.Proxy, wg *sync.WaitGroup) { 38 | defer wg.Done() 39 | nodes := w.Get() 40 | log.Infoln("STATISTIC: WebFuzz\tcount=%d\turl=%s\n", len(nodes), w.Url) 41 | for _, node := range nodes { 42 | pc <- node 43 | } 44 | } 45 | 46 | func (w *WebFuzz) Get2Chan(pc chan proxy.Proxy) { 47 | nodes := w.Get() 48 | log.Infoln("STATISTIC: WebFuzz\tcount=%d\turl=%s\n", len(nodes), w.Url) 49 | for _, node := range nodes { 50 | pc <- node 51 | } 52 | } 53 | 54 | func NewWebFuzzGetter(options tool.Options) (getter Getter, err error) { 55 | urlInterface, found := options["url"] 56 | if found { 57 | url, err := AssertTypeStringNotNull(urlInterface) 58 | if err != nil { 59 | return nil, err 60 | } 61 | return &WebFuzz{Url: url}, nil 62 | } 63 | return nil, ErrorUrlNotFound 64 | } 65 | -------------------------------------------------------------------------------- /pkg/getter/web_fuzz_sub.go: -------------------------------------------------------------------------------- 1 | package getter 2 | 3 | import ( 4 | "github.com/Sansui233/proxypool/log" 5 | "io/ioutil" 6 | "regexp" 7 | "sync" 8 | 9 | "github.com/Sansui233/proxypool/pkg/proxy" 10 | "github.com/Sansui233/proxypool/pkg/tool" 11 | ) 12 | 13 | func init() { 14 | Register("webfuzzsub", NewWebFuzzSubGetter) 15 | } 16 | 17 | type WebFuzzSub struct { 18 | Url string 19 | } 20 | 21 | func (w *WebFuzzSub) Get() proxy.ProxyList { 22 | resp, err := tool.GetHttpClient().Get(w.Url) 23 | if err != nil { 24 | return nil 25 | } 26 | defer resp.Body.Close() 27 | body, err := ioutil.ReadAll(resp.Body) 28 | if err != nil { 29 | return nil 30 | } 31 | text := string(body) 32 | subUrls := urlRe.FindAllString(text, -1) 33 | result := make(proxy.ProxyList, 0) 34 | for _, url := range subUrls { 35 | newResult := (&Subscribe{Url: url}).Get() 36 | if len(newResult) == 0 { 37 | newResult = (&Clash{Url: url}).Get() 38 | } 39 | result = result.UniqAppendProxyList(newResult) 40 | } 41 | return result 42 | } 43 | 44 | func (w *WebFuzzSub) Get2ChanWG(pc chan proxy.Proxy, wg *sync.WaitGroup) { 45 | defer wg.Done() 46 | nodes := w.Get() 47 | log.Infoln("STATISTIC: WebFuzzSub\tcount=%d\turl=%s\n", len(nodes), w.Url) 48 | for _, node := range nodes { 49 | pc <- node 50 | } 51 | } 52 | 53 | func (w *WebFuzzSub) Get2Chan(pc chan proxy.Proxy) { 54 | nodes := w.Get() 55 | log.Infoln("STATISTIC: WebFuzzSub\tcount=%d\turl=%s\n", len(nodes), w.Url) 56 | for _, node := range nodes { 57 | pc <- node 58 | } 59 | } 60 | 61 | func NewWebFuzzSubGetter(options tool.Options) (getter Getter, err error) { 62 | urlInterface, found := options["url"] 63 | if found { 64 | url, err := AssertTypeStringNotNull(urlInterface) 65 | if err != nil { 66 | return nil, err 67 | } 68 | return &WebFuzzSub{Url: url}, nil 69 | } 70 | return nil, ErrorUrlNotFound 71 | } 72 | 73 | var urlRe = regexp.MustCompile(urlPattern) 74 | 75 | const ( 76 | // 匹配 IP4 77 | ip4Pattern = `((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)` 78 | 79 | // 匹配 IP6,参考以下网页内容: 80 | // http://blog.csdn.net/jiangfeng08/article/details/7642018 81 | ip6Pattern = `(([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|` + 82 | `(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|` + 83 | `(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|` + 84 | `(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|` + 85 | `(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|` + 86 | `(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|` + 87 | `(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|` + 88 | `(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))` 89 | 90 | // 同时匹配 IP4 和 IP6 91 | ipPattern = "(" + ip4Pattern + ")|(" + ip6Pattern + ")" 92 | 93 | // 匹配域名 94 | domainPattern = `[a-zA-Z0-9][a-zA-Z0-9_-]{0,62}(\.[a-zA-Z0-9][a-zA-Z0-9_-]{0,62})*(\.[a-zA-Z][a-zA-Z0-9]{0,10}){1}` 95 | 96 | // 匹配 URL 97 | urlPattern = `((https|http)?://)?` + // 协议 98 | `(([0-9a-zA-Z]+:)?[0-9a-zA-Z_-]+@)?` + // pwd:user@ 99 | "(" + ipPattern + "|(" + domainPattern + "))" + // IP 或域名 100 | `(:\d{1,5})?` + // 端口 101 | `(/+[a-zA-Z0-9][a-zA-Z0-9_.-]*)*/*` + // path 102 | `(\?([a-zA-Z0-9_-]+(=.*&?)*)*)*` // query 103 | ) 104 | -------------------------------------------------------------------------------- /pkg/healthcheck/delaycheck.go: -------------------------------------------------------------------------------- 1 | package healthcheck 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/Sansui233/proxypool/pkg/proxy" 7 | "sync" 8 | "time" 9 | 10 | "github.com/ivpusic/grpool" 11 | 12 | "github.com/Dreamacro/clash/adapters/outbound" 13 | ) 14 | 15 | const defaultURLTestTimeout = time.Second * 5 16 | 17 | func CleanBadProxiesWithGrpool(proxies []proxy.Proxy) (cproxies []proxy.Proxy) { 18 | // Note: Grpool实现对go并发管理的封装,主要是在数据量大时减少内存占用,不会提高效率。 19 | pool := grpool.NewPool(500, 200) 20 | 21 | c := make(chan *Stat) 22 | defer close(c) 23 | m := sync.Mutex{} 24 | 25 | pool.WaitCount(len(proxies)) 26 | // 线程:延迟测试,测试过程通过grpool的job并发 27 | go func() { 28 | for _, p := range proxies { 29 | pp := p // 捕获,否则job执行时是按当前的p测试的 30 | pool.JobQueue <- func() { 31 | defer pool.JobDone() 32 | delay, err := testDelay(pp) 33 | if err == nil { 34 | m.Lock() 35 | if ps, ok := ProxyStats.Find(pp); ok { 36 | ps.UpdatePSDelay(delay) 37 | c <- ps 38 | } else { 39 | ps = &Stat{ 40 | Id: pp.Identifier(), 41 | Delay: delay, 42 | } 43 | ProxyStats = append(ProxyStats, *ps) 44 | c <- ps 45 | } 46 | m.Unlock() 47 | } 48 | } 49 | } 50 | }() 51 | done := make(chan struct{}) // 用于多线程的运行结束标识 52 | defer close(done) 53 | 54 | go func() { 55 | pool.WaitAll() 56 | pool.Release() 57 | done <- struct{}{} 58 | }() 59 | 60 | okMap := make(map[string]struct{}) 61 | for { // Note: 无限循环,直到能读取到done 62 | select { 63 | case ps := <-c: 64 | if ps.Delay > 0 { 65 | okMap[ps.Id] = struct{}{} 66 | } 67 | case <-done: 68 | cproxies = make(proxy.ProxyList, 0, 500) // 定义返回的proxylist 69 | // check usable proxy 70 | for i, _ := range proxies { 71 | if _, ok := okMap[proxies[i].Identifier()]; ok { 72 | //cproxies = append(cproxies, p.Clone()) 73 | cproxies = append(cproxies, proxies[i]) // 返回对GC不友好的指针看会怎么样 74 | } 75 | } 76 | return 77 | } 78 | } 79 | } 80 | 81 | func testDelay(p proxy.Proxy) (delay uint16, err error) { 82 | pmap := make(map[string]interface{}) 83 | err = json.Unmarshal([]byte(p.String()), &pmap) 84 | if err != nil { 85 | return 86 | } 87 | 88 | pmap["port"] = int(pmap["port"].(float64)) 89 | if p.TypeName() == "vmess" { 90 | pmap["alterId"] = int(pmap["alterId"].(float64)) 91 | if network, ok := pmap["network"]; ok && network.(string) == "h2" { 92 | return 0, nil // todo 暂无方法测试h2的延迟,clash对于h2的connection会阻塞 93 | } 94 | } 95 | 96 | clashProxy, err := outbound.ParseProxy(pmap) 97 | if err != nil { 98 | fmt.Println(err.Error()) 99 | return 100 | } 101 | 102 | sTime := time.Now() 103 | err = HTTPHeadViaProxy(clashProxy, "http://www.gstatic.com/generate_204") 104 | if err != nil { 105 | return 106 | } 107 | fTime := time.Now() 108 | delay = uint16(fTime.Sub(sTime) / time.Millisecond) 109 | 110 | return delay, err 111 | } 112 | -------------------------------------------------------------------------------- /pkg/healthcheck/speedcheck.go: -------------------------------------------------------------------------------- 1 | package healthcheck 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "github.com/Dreamacro/clash/adapters/outbound" 9 | C "github.com/Dreamacro/clash/constant" 10 | "github.com/Sansui233/proxypool/log" 11 | "github.com/Sansui233/proxypool/pkg/proxy" 12 | "github.com/ivpusic/grpool" 13 | "sort" 14 | "strconv" 15 | "strings" 16 | "sync" 17 | "time" 18 | ) 19 | 20 | var SpeedTimeout = time.Second * 10 21 | 22 | // SpeedTestAll tests speed of a group of proxies. Results are stored in ProxyStats 23 | func SpeedTestAll(proxies []proxy.Proxy, conns int) { 24 | if ok := checkErrorProxies(proxies); !ok { 25 | return 26 | } 27 | numWorker := conns 28 | if numWorker <= 0 { 29 | numWorker = 5 30 | } 31 | numJob := 1 32 | if numWorker > 4 { 33 | numJob = (numWorker + 2) / 4 34 | } 35 | resultCount := 0 36 | m := sync.Mutex{} 37 | 38 | log.Infoln("Speed Test ON") 39 | doneCount := 0 40 | // use grpool 41 | pool := grpool.NewPool(numWorker, numJob) 42 | pool.WaitCount(len(proxies)) 43 | for _, p := range proxies { 44 | pp := p 45 | pool.JobQueue <- func() { 46 | defer pool.JobDone() 47 | speed, err := ProxySpeedTest(pp) 48 | if err == nil || speed > 0 { 49 | m.Lock() 50 | if proxyStat, ok := ProxyStats.Find(pp); ok { 51 | proxyStat.UpdatePSSpeed(speed) 52 | } else { 53 | ProxyStats = append(ProxyStats, Stat{ 54 | Id: pp.Identifier(), 55 | Speed: speed, 56 | }) 57 | } 58 | resultCount++ 59 | m.Unlock() 60 | } 61 | doneCount++ 62 | progress := float64(doneCount) * 100 / float64(len(proxies)) 63 | fmt.Printf("\r\t[%5.1f%% DONE]", progress) 64 | } 65 | } 66 | pool.WaitAll() 67 | pool.Release() 68 | fmt.Println() 69 | log.Infoln("Speed Test Done. Count all speed results: %d", resultCount) 70 | } 71 | 72 | // SpeedTestNew tests speed of new proxies which is not in ProxyStats. Then appended to ProxyStats 73 | func SpeedTestNew(proxies []proxy.Proxy, conns int) { 74 | if ok := checkErrorProxies(proxies); !ok { 75 | return 76 | } 77 | numWorker := conns 78 | if numWorker <= 0 { 79 | numWorker = 5 80 | } 81 | numJob := 1 82 | if numWorker > 4 { 83 | numJob = (numWorker + 2) / 4 84 | } 85 | resultCount := 0 86 | m := sync.Mutex{} 87 | 88 | log.Infoln("Speed Test ON") 89 | doneCount := 0 90 | // use grpool 91 | pool := grpool.NewPool(numWorker, numJob) 92 | pool.WaitCount(len(proxies)) 93 | for _, p := range proxies { 94 | pp := p 95 | pool.JobQueue <- func() { 96 | defer pool.JobDone() 97 | m.Lock() 98 | if proxyStat, ok := ProxyStats.Find(pp); !ok { 99 | // when proxy's Stat not exits 100 | speed, err := ProxySpeedTest(pp) 101 | if err == nil || speed > 0 { 102 | ProxyStats = append(ProxyStats, Stat{ 103 | Id: pp.Identifier(), 104 | Speed: speed, 105 | }) 106 | resultCount++ 107 | } 108 | } else if proxyStat.Speed == 0 { 109 | speed, err := ProxySpeedTest(pp) 110 | if err == nil || speed > 0 { 111 | proxyStat.UpdatePSSpeed(speed) 112 | resultCount++ 113 | } 114 | } 115 | m.Unlock() 116 | doneCount++ 117 | progress := float64(doneCount) * 100 / float64(len(proxies)) 118 | fmt.Printf("\r\t[%5.1f%% DONE]", progress) 119 | } 120 | } 121 | pool.WaitAll() 122 | pool.Release() 123 | fmt.Println() 124 | log.Infoln("Speed Test Done. New speed results count: %d", resultCount) 125 | } 126 | 127 | // ProxySpeedTest returns a speed result of a proxy. The speed result is like 20Mbit/s. -1 for error. 128 | func ProxySpeedTest(p proxy.Proxy) (speedResult float64, err error) { 129 | // convert to clash proxy struct 130 | pmap := make(map[string]interface{}) 131 | err = json.Unmarshal([]byte(p.String()), &pmap) 132 | if err != nil { 133 | return -1, err 134 | } 135 | pmap["port"] = int(pmap["port"].(float64)) 136 | if p.TypeName() == "vmess" { 137 | pmap["alterId"] = int(pmap["alterId"].(float64)) 138 | if network, ok := pmap["network"]; ok && network.(string) == "h2" { 139 | return 0, nil // todo 暂无方法测试h2的速度,clash对于h2的connection会阻塞 140 | } 141 | } 142 | 143 | clashProxy, err := outbound.ParseProxy(pmap) 144 | if err != nil { 145 | return -1, err 146 | } 147 | 148 | // start speedtest using speedtest.net 149 | var user *User 150 | wg := sync.WaitGroup{} 151 | wg.Add(1) 152 | go func() { 153 | defer wg.Done() 154 | user, _ = fetchUserInfo(clashProxy) 155 | }() 156 | serverList, err := fetchServerList(clashProxy) 157 | if err != nil { 158 | return -1, err 159 | } 160 | 161 | // deal fetchUserInfo routine 162 | wg.Wait() 163 | 164 | // some logically unexpected error handling 165 | if user == nil { 166 | return -1, errors.New("fetch User Infoln failed in go routine") // 我真的不会用channel抛出err,go routine的不明原因阻塞我服了。下面的两个BUG现在都不知道原因,逻辑上不该出现的 167 | } 168 | if &serverList == nil { 169 | return -1, errors.New("unexpected error when fetching serverlist: addr of var serverlist nil") 170 | } 171 | if len(serverList.Servers) == 0 { 172 | return -1, errors.New("unexpected error when fetching serverlist: unexpected 0 server") 173 | } 174 | 175 | // Calculate distance 176 | for i := range serverList.Servers { 177 | server := serverList.Servers[i] 178 | sLat, _ := strconv.ParseFloat(server.Lat, 64) 179 | sLon, _ := strconv.ParseFloat(server.Lon, 64) 180 | uLat, _ := strconv.ParseFloat(user.Lat, 64) 181 | uLon, _ := strconv.ParseFloat(user.Lon, 64) 182 | server.Distance = distance(sLat, sLon, uLat, uLon) 183 | } 184 | // Sort by distance 185 | sort.Sort(ByDistance{serverList.Servers}) 186 | 187 | var targets Servers 188 | targets = append(serverList.Servers[:3]) 189 | 190 | // Test 191 | targets.StartTest(clashProxy) 192 | speedResult = targets.GetResult() 193 | 194 | return speedResult, nil 195 | 196 | } 197 | 198 | /* Test with SpeedTest.net */ 199 | // Download Size(MB) 0.245 0.5 1.125 2 5 8 12.5 18 24.5 32 200 | var dlSizes = [...]int{350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000} 201 | 202 | //var ulSizes = [...]int{100, 300, 500, 800, 1000, 1500, 2500, 3000, 3500, 4000} //kB 203 | 204 | func pingTest(clashProxy C.Proxy, sURL string) time.Duration { 205 | pingURL := strings.Split(sURL, "/upload")[0] + "/latency.txt" 206 | 207 | l := time.Second * 10 208 | for i := 0; i < 2; i++ { 209 | sTime := time.Now() 210 | err := HTTPGetViaProxy(clashProxy, pingURL) 211 | fTime := time.Now() 212 | if err != nil { 213 | continue 214 | } 215 | if fTime.Sub(sTime) < l { 216 | l = fTime.Sub(sTime) 217 | } 218 | } 219 | return l / 2.0 220 | } 221 | 222 | // return a speed(Mbps) 223 | func downloadTest(clashProxy C.Proxy, sURL string, latency time.Duration) float64 { 224 | dlURL := strings.Split(sURL, "/upload")[0] 225 | 226 | // Warming up 227 | sTime := time.Now() 228 | err := dlWarmUp(clashProxy, dlURL) 229 | fTime := time.Now() 230 | if err != nil { 231 | return 0 232 | } 233 | // 1.125MB for each request (750 * 750 * 2) 234 | wuSpeed := 1.125 * 8 * 2 / fTime.Sub(sTime.Add(latency)).Seconds() 235 | 236 | // Decide workload by warm up speed. Weight is the level of size. 237 | weight := 0 238 | if 10.0 < wuSpeed { 239 | weight = 5 240 | } else if 5 < wuSpeed { 241 | weight = 4 242 | } else if 2.5 < wuSpeed { 243 | weight = 3 244 | } else { // if too slow, skip main test to save time 245 | return wuSpeed 246 | } 247 | 248 | // Main speedtest 249 | dlSpeed := wuSpeed 250 | sTime = time.Now() 251 | err = downloadRequest(clashProxy, dlURL, weight) 252 | fTime = time.Now() 253 | if err != nil && errors.Is(err, context.DeadlineExceeded) { 254 | return wuSpeed // todo Incorrect Result 255 | } 256 | reqMB := dlSizes[weight] * dlSizes[weight] * 2 / 1000 / 1000 257 | dlSpeed = float64(reqMB) * 8 / fTime.Sub(sTime).Seconds() 258 | return dlSpeed 259 | } 260 | 261 | func dlWarmUp(clashProxy C.Proxy, dlURL string) error { 262 | size := dlSizes[2] 263 | url := dlURL + "/random" + strconv.Itoa(size) + "x" + strconv.Itoa(size) + ".jpg" 264 | err := HTTPGetBodyForSpeedTest(clashProxy, url, SpeedTimeout) 265 | if err != nil { 266 | return err 267 | } 268 | return nil 269 | } 270 | 271 | func downloadRequest(clashProxy C.Proxy, dlURL string, w int) error { 272 | size := dlSizes[w] 273 | url := dlURL + "/random" + strconv.Itoa(size) + "x" + strconv.Itoa(size) + ".jpg" 274 | err := HTTPGetBodyForSpeedTest(clashProxy, url, SpeedTimeout) 275 | if err != nil { 276 | return err 277 | } 278 | return nil 279 | } 280 | -------------------------------------------------------------------------------- /pkg/healthcheck/speedserver.go: -------------------------------------------------------------------------------- 1 | package healthcheck 2 | 3 | import ( 4 | "bytes" 5 | "encoding/xml" 6 | "errors" 7 | C "github.com/Dreamacro/clash/constant" 8 | ) 9 | 10 | // speedtest.net config 11 | type User struct { 12 | IP string `xml:"ip,attr"` 13 | Lat string `xml:"lat,attr"` 14 | Lon string `xml:"lon,attr"` 15 | Isp string `xml:"isp,attr"` 16 | } 17 | 18 | // Users : for decode speedtest.net xml 19 | type Users struct { 20 | Users []User `xml:"client"` 21 | } 22 | 23 | // fetchUserInfo with proxy connection 24 | func fetchUserInfo(clashProxy C.Proxy) (user *User, err error) { 25 | url := "https://www.speedtest.net/speedtest-config.php" 26 | body, err := HTTPGetBodyViaProxy(clashProxy, url) 27 | decoder := xml.NewDecoder(bytes.NewReader(body)) 28 | users := Users{} 29 | for { 30 | t, _ := decoder.Token() 31 | if t == nil { 32 | break 33 | } 34 | switch se := t.(type) { 35 | case xml.StartElement: 36 | decoder.DecodeElement(&users, &se) 37 | } 38 | } 39 | if users.Users == nil { 40 | //log.Println("Warning: Cannot fetch user information. http://www.speedtest.net/speedtest-config.php is temporarily unavailable.") 41 | return nil, errors.New("No user to speedtest.net. ") 42 | } 43 | return &users.Users[0], nil 44 | } 45 | -------------------------------------------------------------------------------- /pkg/healthcheck/speeduser.go: -------------------------------------------------------------------------------- 1 | package healthcheck 2 | 3 | import ( 4 | "bytes" 5 | "encoding/xml" 6 | "errors" 7 | C "github.com/Dreamacro/clash/constant" 8 | "math" 9 | "time" 10 | ) 11 | 12 | // Server information 13 | type Server struct { 14 | URL string `xml:"url,attr"` 15 | Lat string `xml:"lat,attr"` 16 | Lon string `xml:"lon,attr"` 17 | Name string `xml:"name,attr"` 18 | Country string `xml:"country,attr"` 19 | Sponsor string `xml:"sponsor,attr"` 20 | ID string `xml:"id,attr"` 21 | URL2 string `xml:"url2,attr"` 22 | Host string `xml:"host,attr"` 23 | Distance float64 24 | DLSpeed float64 25 | } 26 | 27 | // ServerList : List of Server. for xml decoding 28 | type ServerList struct { 29 | Servers []Server `xml:"servers>server"` 30 | } 31 | 32 | // Servers : For sorting servers. 33 | type Servers []Server 34 | 35 | // ByDistance : For sorting servers. 36 | type ByDistance struct { 37 | Servers 38 | } 39 | 40 | // Len : length of servers. For sorting servers. 41 | func (s Servers) Len() int { 42 | return len(s) 43 | } 44 | 45 | // Swap : swap i-th and j-th. For sorting servers. 46 | func (s Servers) Swap(i, j int) { 47 | s[i], s[j] = s[j], s[i] 48 | } 49 | 50 | // Less : compare the distance. For sorting servers. 51 | func (b ByDistance) Less(i, j int) bool { 52 | return b.Servers[i].Distance < b.Servers[j].Distance 53 | } 54 | 55 | func fetchServerList(clashProxy C.Proxy) (ServerList, error) { 56 | url := "http://www.speedtest.net/speedtest-servers-static.php" 57 | body, err := HTTPGetBodyViaProxy(clashProxy, url) 58 | if err != nil { 59 | return ServerList{}, err 60 | } 61 | 62 | if len(body) == 0 { 63 | url = "http://c.speedtest.net/speedtest-servers-static.php" 64 | body, err = HTTPGetBodyViaProxy(clashProxy, url) 65 | if err != nil { 66 | return ServerList{}, err 67 | } 68 | } 69 | 70 | // Decode xml 71 | decoder := xml.NewDecoder(bytes.NewReader(body)) 72 | var serverList ServerList 73 | for { 74 | t, _ := decoder.Token() 75 | if t == nil { 76 | break 77 | } 78 | switch se := t.(type) { 79 | case xml.StartElement: 80 | _ = decoder.DecodeElement(&serverList, &se) 81 | } 82 | } 83 | if len(serverList.Servers) == 0 { 84 | return ServerList{}, errors.New("No speedtest server") 85 | } 86 | return serverList, nil 87 | } 88 | 89 | func distance(lat1 float64, lon1 float64, lat2 float64, lon2 float64) float64 { 90 | radius := 6378.137 91 | 92 | a1 := lat1 * math.Pi / 180.0 93 | b1 := lon1 * math.Pi / 180.0 94 | a2 := lat2 * math.Pi / 180.0 95 | b2 := lon2 * math.Pi / 180.0 96 | 97 | x := math.Sin(a1)*math.Sin(a2) + math.Cos(a1)*math.Cos(a2)*math.Cos(b2-b1) 98 | return radius * math.Acos(x) 99 | } 100 | 101 | // StartTest : start testing to the servers. 102 | func (svrs Servers) StartTest(clashProxy C.Proxy) { 103 | for i, _ := range svrs { 104 | latency := pingTest(clashProxy, svrs[i].URL) 105 | if latency == time.Second*5 { // fail to get latency, skip 106 | continue 107 | } else { 108 | dlSpeed := downloadTest(clashProxy, svrs[i].URL, latency) 109 | if dlSpeed > 0 { 110 | svrs[i].DLSpeed = dlSpeed 111 | break // once effective, end the test 112 | } 113 | } 114 | } 115 | } 116 | 117 | // GetResult : return testing result. -1 for no effective result 118 | func (svrs Servers) GetResult() float64 { 119 | if len(svrs) == 1 { 120 | return svrs[0].DLSpeed 121 | } else { 122 | avgDL := 0.0 123 | count := 0 124 | for _, s := range svrs { 125 | if s.DLSpeed > 0 { 126 | avgDL = avgDL + s.DLSpeed 127 | count++ 128 | } 129 | } 130 | if count == 0 { 131 | return -1 132 | } 133 | //fmt.Printf("Download Avg: %5.2f Mbit/s\n", avgDL/float64(len(svrs))) 134 | return avgDL / float64(count) 135 | } 136 | 137 | } 138 | -------------------------------------------------------------------------------- /pkg/healthcheck/statistic.go: -------------------------------------------------------------------------------- 1 | package healthcheck 2 | 3 | import "github.com/Sansui233/proxypool/pkg/proxy" 4 | 5 | // Statistic for a proxy 6 | type Stat struct { 7 | Speed float64 8 | Delay uint16 9 | ReqCount uint16 10 | Id string 11 | } 12 | 13 | // Statistic array for proxies 14 | type StatList []Stat 15 | 16 | // ProxyStats stores proxies' statistics 17 | var ProxyStats StatList 18 | 19 | func init() { 20 | ProxyStats = make(StatList, 0) 21 | } 22 | 23 | // Update speed for a Stat 24 | func (ps *Stat) UpdatePSSpeed(speed float64) { 25 | if ps.Speed < 60 && ps.Speed != 0 { 26 | ps.Speed = 0.3*ps.Speed + 0.7*speed 27 | } else { 28 | ps.Speed = speed 29 | } 30 | } 31 | 32 | // Update delay for a Stat 33 | func (ps *Stat) UpdatePSDelay(delay uint16) { 34 | ps.Delay = delay 35 | } 36 | 37 | // Count + 1 for a Stat 38 | func (ps *Stat) UpdatePSCount() { 39 | ps.ReqCount++ 40 | } 41 | 42 | // Find a proxy's Stat in StatList 43 | func (psList StatList) Find(p proxy.Proxy) (*Stat, bool) { 44 | s := p.Identifier() 45 | for i, _ := range psList { 46 | if psList[i].Id == s { 47 | return &psList[i], true 48 | } 49 | } 50 | return nil, false 51 | } 52 | 53 | // Return proxies that request count more than a given nubmer 54 | func (psList StatList) ReqCountThan(n uint16, pl []proxy.Proxy, reset bool) []proxy.Proxy { 55 | proxies := make([]proxy.Proxy, 0) 56 | for _, p := range pl { 57 | for j, _ := range psList { 58 | if psList[j].ReqCount > n && p.Identifier() == psList[j].Id { 59 | proxies = append(proxies, p) 60 | } 61 | } 62 | } 63 | // reset request count 64 | if reset { 65 | for i, _ := range psList { 66 | psList[i].ReqCount = 0 67 | } 68 | } 69 | return proxies 70 | } 71 | 72 | // Sort proxies by speed. Notice that this returns the same pointer. 73 | func (psList StatList) SortProxiesBySpeed(proxies []proxy.Proxy) []proxy.Proxy { 74 | if ok := checkErrorProxies(proxies); !ok { 75 | return proxies 76 | } 77 | l := len(proxies) 78 | if l == 1 { 79 | return proxies 80 | } 81 | // Classic bubble Sort. Biggest the first 82 | for i := 0; i < l-1; i++ { // i defines unsorted list bound 83 | flag := false 84 | for j := 0; j < l-1-i; j++ { 85 | ps1, ok1 := psList.Find(proxies[j]) 86 | ps2, ok2 := psList.Find(proxies[j+1]) 87 | // validate records, put no record proxy behind 88 | if !ok2 { 89 | continue 90 | } else if !ok1 && ok2 { 91 | t := proxies[j] 92 | proxies[j] = proxies[j+1] 93 | proxies[j+1] = t 94 | flag = true 95 | continue 96 | } 97 | // else: validate speed value, put zero speed proxy behind 98 | if ps2.Speed == 0 { 99 | continue 100 | } else if ps1.Speed == 0 { // when ps2.speed != 0, validate ps1 101 | t := proxies[j] 102 | proxies[j] = proxies[j+1] 103 | proxies[j+1] = t 104 | flag = true 105 | continue 106 | } else { 107 | // Reach the real speed sort. Too much code on validation. I'm so tired 108 | if ps1.Speed < ps2.Speed { 109 | t := proxies[j] 110 | proxies[j] = proxies[j+1] 111 | proxies[j+1] = t 112 | flag = true 113 | } 114 | } 115 | } 116 | if flag == false { 117 | break 118 | } 119 | } 120 | return proxies 121 | } 122 | -------------------------------------------------------------------------------- /pkg/healthcheck/util.go: -------------------------------------------------------------------------------- 1 | package healthcheck 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | C "github.com/Dreamacro/clash/constant" 7 | "github.com/Sansui233/proxypool/pkg/proxy" 8 | "io/ioutil" 9 | "net" 10 | "net/http" 11 | "net/url" 12 | "time" 13 | ) 14 | 15 | // DO NOT EDIT. Copied from clash because it's an unexported function 16 | func urlToMetadata(rawURL string) (addr C.Metadata, err error) { 17 | u, err := url.Parse(rawURL) 18 | if err != nil { 19 | return 20 | } 21 | 22 | port := u.Port() 23 | if port == "" { 24 | switch u.Scheme { 25 | case "https": 26 | port = "443" 27 | case "http": 28 | port = "80" 29 | default: 30 | err = fmt.Errorf("%s scheme not Support", rawURL) 31 | return 32 | } 33 | } 34 | 35 | addr = C.Metadata{ 36 | AddrType: C.AtypDomainName, 37 | Host: u.Hostname(), 38 | DstIP: nil, 39 | DstPort: port, 40 | } 41 | return 42 | } 43 | 44 | func HTTPGetViaProxy(clashProxy C.Proxy, url string) error { 45 | ctx, cancel := context.WithTimeout(context.Background(), defaultURLTestTimeout) 46 | defer cancel() 47 | 48 | addr, err := urlToMetadata(url) 49 | if err != nil { 50 | return err 51 | } 52 | conn, err := clashProxy.DialContext(ctx, &addr) // 建立到proxy server的connection,对Proxy的类别做了自适应相当于泛型 53 | if err != nil { 54 | return err 55 | } 56 | defer conn.Close() 57 | 58 | req, err := http.NewRequest(http.MethodGet, url, nil) 59 | if err != nil { 60 | return err 61 | } 62 | req = req.WithContext(ctx) 63 | 64 | transport := &http.Transport{ 65 | // Note: Dial specifies the dial function for creating unencrypted TCP connections. 66 | // When httpClient sets this transport, it will use the tcp/udp connection returned from 67 | // function Dial instead of default tcp/udp connection. It's the key to set custom proxy for http transport 68 | Dial: func(string, string) (net.Conn, error) { 69 | return conn, nil 70 | }, 71 | // from http.DefaultTransport 72 | MaxIdleConns: 100, 73 | IdleConnTimeout: 90 * time.Second, 74 | TLSHandshakeTimeout: 10 * time.Second, 75 | ExpectContinueTimeout: 1 * time.Second, 76 | } 77 | 78 | client := http.Client{ 79 | Transport: transport, 80 | CheckRedirect: func(req *http.Request, via []*http.Request) error { 81 | return http.ErrUseLastResponse 82 | }, 83 | } 84 | resp, err := client.Do(req) 85 | if err != nil { 86 | return err 87 | } 88 | resp.Body.Close() 89 | return nil 90 | } 91 | 92 | func HTTPHeadViaProxy(clashProxy C.Proxy, url string) error { 93 | ctx, cancel := context.WithTimeout(context.Background(), defaultURLTestTimeout) 94 | defer cancel() 95 | 96 | addr, err := urlToMetadata(url) 97 | if err != nil { 98 | return err 99 | } 100 | conn, err := clashProxy.DialContext(ctx, &addr) // 建立到proxy server的connection,对Proxy的类别做了自适应相当于泛型 101 | if err != nil { 102 | return err 103 | } 104 | defer conn.Close() 105 | 106 | req, err := http.NewRequest(http.MethodHead, url, nil) 107 | if err != nil { 108 | return err 109 | } 110 | req = req.WithContext(ctx) 111 | 112 | transport := &http.Transport{ 113 | // Note: Dial specifies the dial function for creating unencrypted TCP connections. 114 | // When httpClient sets this transport, it will use the tcp/udp connection returned from 115 | // function Dial instead of default tcp/udp connection. It's the key to set custom proxy for http transport 116 | Dial: func(string, string) (net.Conn, error) { 117 | return conn, nil 118 | }, 119 | // from http.DefaultTransport 120 | MaxIdleConns: 100, 121 | IdleConnTimeout: 90 * time.Second, 122 | TLSHandshakeTimeout: 10 * time.Second, 123 | ExpectContinueTimeout: 1 * time.Second, 124 | } 125 | 126 | client := http.Client{ 127 | Transport: transport, 128 | CheckRedirect: func(req *http.Request, via []*http.Request) error { 129 | return http.ErrUseLastResponse 130 | }, 131 | } 132 | resp, err := client.Do(req) 133 | if err != nil { 134 | return err 135 | } 136 | resp.Body.Close() 137 | return nil 138 | } 139 | 140 | func HTTPGetBodyViaProxy(clashProxy C.Proxy, url string) ([]byte, error) { 141 | ctx, cancel := context.WithTimeout(context.Background(), defaultURLTestTimeout) 142 | defer cancel() 143 | 144 | addr, err := urlToMetadata(url) 145 | if err != nil { 146 | return nil, err 147 | } 148 | conn, err := clashProxy.DialContext(ctx, &addr) // 建立到proxy server的connection,对Proxy的类别做了自适应相当于泛型 149 | if err != nil { 150 | return nil, err 151 | } 152 | defer conn.Close() 153 | 154 | req, err := http.NewRequest(http.MethodGet, url, nil) 155 | if err != nil { 156 | return nil, err 157 | } 158 | req = req.WithContext(ctx) 159 | 160 | transport := &http.Transport{ 161 | // Note: Dial specifies the dial function for creating unencrypted TCP connections. 162 | // When httpClient sets this transport, it will use the tcp/udp connection returned from 163 | // function Dial instead of default tcp/udp connection. It's the key to set custom proxy for http transport 164 | Dial: func(string, string) (net.Conn, error) { 165 | return conn, nil 166 | }, 167 | // from http.DefaultTransport 168 | MaxIdleConns: 100, 169 | IdleConnTimeout: 90 * time.Second, 170 | TLSHandshakeTimeout: 10 * time.Second, 171 | ExpectContinueTimeout: 1 * time.Second, 172 | } 173 | 174 | client := http.Client{ 175 | Transport: transport, 176 | CheckRedirect: func(req *http.Request, via []*http.Request) error { 177 | return http.ErrUseLastResponse 178 | }, 179 | } 180 | resp, err := client.Do(req) 181 | if err != nil { 182 | return nil, err 183 | } 184 | defer resp.Body.Close() 185 | 186 | // read speedtest config file 187 | body, err := ioutil.ReadAll(resp.Body) 188 | if err != nil { 189 | return nil, err 190 | } 191 | return body, nil 192 | } 193 | 194 | func HTTPGetBodyForSpeedTest(clashProxy C.Proxy, url string, t time.Duration) error { 195 | ctx, cancel := context.WithTimeout(context.Background(), t) 196 | defer cancel() 197 | 198 | addr, err := urlToMetadata(url) 199 | if err != nil { 200 | return err 201 | } 202 | conn, err := clashProxy.DialContext(ctx, &addr) // 建立到proxy server的connection,对Proxy的类别做了自适应相当于泛型 203 | if err != nil { 204 | return err 205 | } 206 | defer conn.Close() 207 | 208 | req, err := http.NewRequest(http.MethodGet, url, nil) 209 | if err != nil { 210 | return err 211 | } 212 | req = req.WithContext(ctx) 213 | 214 | transport := &http.Transport{ 215 | // Note: Dial specifies the dial function for creating unencrypted TCP connections. 216 | // When httpClient sets this transport, it will use the tcp/udp connection returned from 217 | // function Dial instead of default tcp/udp connection. It's the key to set custom proxy for http transport 218 | Dial: func(string, string) (net.Conn, error) { 219 | return conn, nil 220 | }, 221 | // from http.DefaultTransport 222 | MaxIdleConns: 100, 223 | IdleConnTimeout: 90 * time.Second, 224 | TLSHandshakeTimeout: 10 * time.Second, 225 | ExpectContinueTimeout: 1 * time.Second, 226 | } 227 | 228 | client := http.Client{ 229 | Transport: transport, 230 | CheckRedirect: func(req *http.Request, via []*http.Request) error { 231 | return http.ErrUseLastResponse 232 | }, 233 | } 234 | resp, err := client.Do(req) 235 | if err != nil { 236 | return err 237 | } 238 | defer resp.Body.Close() 239 | 240 | // read speedtest config file 241 | _, err = ioutil.ReadAll(resp.Body) 242 | if err != nil { 243 | return err 244 | } 245 | return nil 246 | } 247 | 248 | func checkErrorProxies(proxies []proxy.Proxy) bool { 249 | if proxies == nil { 250 | return false 251 | } 252 | if len(proxies) == 0 { 253 | return false 254 | } 255 | if proxies[0] == nil { 256 | return false 257 | } 258 | return true 259 | } 260 | -------------------------------------------------------------------------------- /pkg/provider/base.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Sansui233/proxypool/log" 6 | "github.com/Sansui233/proxypool/pkg/healthcheck" 7 | "math" 8 | "strconv" 9 | "strings" 10 | 11 | "github.com/Sansui233/proxypool/pkg/proxy" 12 | ) 13 | 14 | type Provider interface { 15 | Provide() string 16 | } 17 | 18 | type Base struct { 19 | Proxies *proxy.ProxyList `yaml:"proxies"` 20 | Types string `yaml:"type"` 21 | Country string `yaml:"country"` 22 | NotCountry string `yaml:"not_country"` 23 | Speed string `yaml:"speed"` 24 | } 25 | 26 | // 根据子类的的Provide()传入的信息筛选节点,结果会改变传入的proxylist。 27 | func (b *Base) preFilter() { 28 | proxies := make(proxy.ProxyList, 0) 29 | 30 | if ok := checkErrorProxies(*b.Proxies); !ok { 31 | log.Warnln("provider: nothing to provide") 32 | b.Proxies = &proxies 33 | return 34 | } 35 | 36 | needFilterType := true 37 | needFilterCountry := true 38 | needFilterNotCountry := true 39 | needFilterSpeed := true 40 | if b.Types == "" || b.Types == "all" { 41 | needFilterType = false 42 | } 43 | if b.Country == "" || b.Country == "all" { 44 | needFilterCountry = false 45 | } 46 | if b.NotCountry == "" { 47 | needFilterNotCountry = false 48 | } 49 | if b.Speed == "" { 50 | needFilterSpeed = true 51 | } 52 | types := strings.Split(b.Types, ",") 53 | countries := strings.Split(b.Country, ",") 54 | notCountries := strings.Split(b.NotCountry, ",") 55 | speedMin, speedMax := checkSpeed(strings.Split(b.Speed, ",")) 56 | 57 | if speedMin == -1 { 58 | needFilterSpeed = false 59 | } 60 | 61 | bProxies := *b.Proxies 62 | for _, p := range bProxies { 63 | if needFilterType { 64 | typeOk := false 65 | for _, t := range types { 66 | if p.TypeName() == t { 67 | typeOk = true 68 | break 69 | } 70 | } 71 | if !typeOk { 72 | goto exclude 73 | } 74 | } 75 | 76 | if needFilterNotCountry { 77 | for _, c := range notCountries { 78 | if strings.Contains(p.BaseInfo().Name, c) { 79 | goto exclude 80 | } 81 | } 82 | } 83 | 84 | if needFilterCountry { 85 | countryOk := false 86 | for _, c := range countries { 87 | if strings.Contains(p.BaseInfo().Name, c) { 88 | countryOk = true 89 | break 90 | } 91 | } 92 | if !countryOk { 93 | goto exclude 94 | } 95 | } 96 | 97 | if needFilterSpeed && len(healthcheck.ProxyStats) != 0 { 98 | if ps, ok := healthcheck.ProxyStats.Find(p); ok { 99 | if ps.Speed != 0 { 100 | // clear history speed tag 101 | names := strings.Split(p.BaseInfo().Name, " |") 102 | if len(names) > 1 { 103 | p.BaseInfo().Name = names[0] 104 | } 105 | // check speed 106 | if ps.Speed > speedMin && ps.Speed < speedMax { 107 | p.AddToName(fmt.Sprintf(" |%5.2fMb", ps.Speed)) 108 | } else { 109 | goto exclude 110 | } 111 | } else { 112 | if speedMin != 0 { // still show 0 speed proxy when speed Min is 0 113 | goto exclude 114 | } 115 | } 116 | } else { 117 | if speedMin != 0 { // still show no speed result proxy when speed Min is 0 118 | goto exclude 119 | } 120 | } 121 | } else { // When no filter needed: clear speed tag. But I don't know why speed is stored in name while provider get proxies from cache everytime. It's name should be refreshed without speed tag. Because of gin-cache? 122 | names := strings.Split(p.BaseInfo().Name, " |") 123 | if len(names) > 1 { 124 | p.BaseInfo().Name = names[0] 125 | } 126 | } 127 | 128 | proxies = append(proxies, p) 129 | // update statistic 130 | if ps, ok := healthcheck.ProxyStats.Find(p); ok { 131 | ps.UpdatePSCount() 132 | } else { 133 | healthcheck.ProxyStats = append(healthcheck.ProxyStats, healthcheck.Stat{ 134 | Id: p.Identifier(), 135 | ReqCount: 1, 136 | }) 137 | } 138 | exclude: 139 | } 140 | 141 | b.Proxies = &proxies 142 | } 143 | 144 | func checkErrorProxies(proxies []proxy.Proxy) bool { 145 | if proxies == nil { 146 | return false 147 | } 148 | if len(proxies) == 0 { 149 | return false 150 | } 151 | if proxies[0] == nil { 152 | return false 153 | } 154 | return true 155 | } 156 | 157 | func checkSpeed(speed []string) (speedMin float64, speedMax float64) { 158 | speedMin, speedMax = 0, 1000 159 | var err1, err2 error 160 | switch len(speed) { 161 | case 1: 162 | if speed[0] != "" { 163 | speedMin, err1 = strconv.ParseFloat(speed[0], 64) 164 | } 165 | case 2: 166 | speedMin, err1 = strconv.ParseFloat(speed[0], 64) 167 | speedMax, err2 = strconv.ParseFloat(speed[1], 64) 168 | } 169 | if math.IsNaN(speedMin) || err1 != nil { 170 | speedMin = 0.00 171 | } 172 | if math.IsNaN(speedMax) || err2 != nil { 173 | speedMax = 1000.00 174 | } 175 | return speedMin, speedMax 176 | } 177 | -------------------------------------------------------------------------------- /pkg/provider/clash.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "github.com/Sansui233/proxypool/pkg/tool" 5 | "strings" 6 | 7 | "github.com/Sansui233/proxypool/pkg/proxy" 8 | ) 9 | 10 | // Clash provides functions that make proxies support clash client 11 | type Clash struct { 12 | Base 13 | } 14 | 15 | // CleanProxies cleans unsupported proxy type of clash 16 | func (c Clash) CleanProxies() (proxies proxy.ProxyList) { 17 | proxies = make(proxy.ProxyList, 0) 18 | for _, p := range *c.Proxies { 19 | if checkClashSupport(p) { 20 | proxies = append(proxies, p) 21 | } 22 | } 23 | return 24 | } 25 | 26 | // Provide of clash generates providers for clash configuration 27 | func (c Clash) Provide() string { 28 | c.preFilter() 29 | 30 | var resultBuilder strings.Builder 31 | resultBuilder.WriteString("proxies:\n") 32 | for _, p := range *c.Proxies { 33 | if checkClashSupport(p) { 34 | resultBuilder.WriteString(p.ToClash() + "\n") 35 | } 36 | } 37 | if resultBuilder.Len() == 9 { //如果没有proxy,添加无效的NULL节点,防止Clash对空节点的Provider报错 38 | resultBuilder.WriteString("- {\"name\":\"NULL\",\"server\":\"NULL\",\"port\":11708,\"type\":\"ssr\",\"country\":\"NULL\",\"password\":\"sEscPBiAD9K$\\u0026@79\",\"cipher\":\"aes-256-cfb\",\"protocol\":\"origin\",\"protocol_param\":\"NULL\",\"obfs\":\"http_simple\"}") 39 | } 40 | return resultBuilder.String() 41 | } 42 | 43 | // 检查单个节点的加密方式、协议类型与混淆是否是Clash所支持的 44 | func checkClashSupport(p proxy.Proxy) bool { 45 | switch p.TypeName() { 46 | case "ssr": 47 | ssr := p.(*proxy.ShadowsocksR) 48 | if tool.CheckInList(proxy.SSRCipherList, ssr.Cipher) && tool.CheckInList(ssrProtocolList, ssr.Protocol) && tool.CheckInList(ssrObfsList, ssr.Obfs) { 49 | return true 50 | } 51 | case "vmess": 52 | vmess := p.(*proxy.Vmess) 53 | if tool.CheckInList(vmessCipherList, vmess.Cipher) { 54 | return true 55 | } 56 | case "ss": 57 | ss := p.(*proxy.Shadowsocks) 58 | if tool.CheckInList(proxy.SSCipherList, ss.Cipher) { 59 | return true 60 | } 61 | case "trojan": 62 | return true 63 | default: 64 | return false 65 | } 66 | return false 67 | } 68 | 69 | var ssrObfsList = []string{ 70 | "plain", 71 | "http_simple", 72 | "http_post", 73 | "random_head", 74 | "tls1.2_ticket_auth", 75 | "tls1.2_ticket_fastauth", 76 | } 77 | 78 | var ssrProtocolList = []string{ 79 | "origin", 80 | "verify_deflate", 81 | "verify_sha1", 82 | "auth_sha1", 83 | "auth_sha1_v2", 84 | "auth_sha1_v4", 85 | "auth_aes128_md5", 86 | "auth_aes128_sha1", 87 | "auth_chain_a", 88 | "auth_chain_b", 89 | } 90 | 91 | var vmessCipherList = []string{ 92 | "auto", 93 | "aes-128-gcm", 94 | "chacha20-poly1305", 95 | "none", 96 | } 97 | -------------------------------------------------------------------------------- /pkg/provider/ssrsub.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/Sansui233/proxypool/pkg/tool" 7 | ) 8 | 9 | type SSRSub struct { 10 | Base 11 | } 12 | 13 | func (sub SSRSub) Provide() string { 14 | sub.Types = "ssr" 15 | sub.preFilter() 16 | var resultBuilder strings.Builder 17 | for _, p := range *sub.Proxies { 18 | resultBuilder.WriteString(p.Link() + "\n") 19 | } 20 | return tool.Base64EncodeString(resultBuilder.String(), false) 21 | } 22 | -------------------------------------------------------------------------------- /pkg/provider/sssub.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "encoding/json" 5 | "github.com/Sansui233/proxypool/pkg/tool" 6 | "strconv" 7 | "strings" 8 | 9 | "github.com/Sansui233/proxypool/pkg/proxy" 10 | ) 11 | 12 | type SSSub struct { 13 | Base 14 | } 15 | 16 | type ssJson struct { 17 | Remarks string `json:"remarks"` 18 | Server string `json:"server"` 19 | ServerPort string `json:"server_port"` 20 | Method string `json:"method"` 21 | Password string `json:"password"` 22 | Plugin string `json:"plugin"` 23 | PluginOpts map[string]interface{} `json:"plugin_opts"` 24 | } 25 | 26 | func (sub SSSub) Provide() string { 27 | sub.Types = "ss" 28 | sub.preFilter() 29 | proxies := make([]ssJson, 0, sub.Proxies.Len()) 30 | for _, p := range *sub.Proxies { 31 | pp := p.(*proxy.Shadowsocks) 32 | 33 | proxies = append(proxies, ssJson{ 34 | Remarks: pp.Name, 35 | Server: pp.Server, 36 | ServerPort: strconv.Itoa(pp.Port), 37 | Method: pp.Cipher, 38 | Password: pp.Password, 39 | Plugin: pp.Plugin, 40 | PluginOpts: pp.PluginOpts, 41 | }) 42 | } 43 | text, err := json.Marshal(proxies) 44 | if err != nil { 45 | return "" 46 | } 47 | return string(text) 48 | } 49 | 50 | type SIP002Sub struct { 51 | Base 52 | } 53 | 54 | func (sub SIP002Sub) Provide() string { 55 | sub.Types = "ss" 56 | sub.preFilter() 57 | var resultBuilder strings.Builder 58 | for _, p := range *sub.Proxies { 59 | resultBuilder.WriteString(p.Link() + "\n") 60 | } 61 | return tool.Base64EncodeString(resultBuilder.String(), false) 62 | } 63 | -------------------------------------------------------------------------------- /pkg/provider/surge.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/Sansui233/proxypool/pkg/tool" 7 | 8 | "github.com/Sansui233/proxypool/pkg/proxy" 9 | ) 10 | 11 | // Surge provides functions that make proxies support clash client 12 | type Surge struct { 13 | Base 14 | } 15 | 16 | // Provide of Surge generates proxy list supported by surge 17 | func (s Surge) Provide() string { 18 | s.preFilter() 19 | 20 | var resultBuilder strings.Builder 21 | for _, p := range *s.Proxies { 22 | if checkSurgeSupport(p) { 23 | resultBuilder.WriteString(p.ToSurge() + "\n") 24 | } 25 | } 26 | return resultBuilder.String() 27 | } 28 | 29 | func checkSurgeSupport(p proxy.Proxy) bool { 30 | switch p.(type) { 31 | case *proxy.ShadowsocksR: 32 | return false 33 | case *proxy.Vmess: 34 | return true 35 | case *proxy.Shadowsocks: 36 | ss := p.(*proxy.Shadowsocks) 37 | if tool.CheckInList(proxy.SSCipherList, ss.Cipher) { 38 | return true 39 | } 40 | default: 41 | return false 42 | } 43 | return false 44 | } 45 | -------------------------------------------------------------------------------- /pkg/provider/trojansub.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "github.com/Sansui233/proxypool/pkg/tool" 5 | "strings" 6 | ) 7 | 8 | type TrojanSub struct { 9 | Base 10 | } 11 | 12 | func (sub TrojanSub) Provide() string { 13 | sub.Types = "trojan" 14 | sub.preFilter() 15 | var resultBuilder strings.Builder 16 | for _, p := range *sub.Proxies { 17 | resultBuilder.WriteString(p.Link() + "\n") 18 | } 19 | return tool.Base64EncodeString(resultBuilder.String(), false) 20 | } 21 | -------------------------------------------------------------------------------- /pkg/provider/vmesssub.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/Sansui233/proxypool/pkg/tool" 7 | ) 8 | 9 | type VmessSub struct { 10 | Base 11 | } 12 | 13 | func (sub VmessSub) Provide() string { 14 | sub.Types = "vmess" 15 | sub.preFilter() 16 | var resultBuilder strings.Builder 17 | for _, p := range *sub.Proxies { 18 | resultBuilder.WriteString(p.Link() + "\n") 19 | } 20 | return tool.Base64EncodeString(resultBuilder.String(), false) 21 | } 22 | -------------------------------------------------------------------------------- /pkg/proxy/base.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "strings" 7 | ) 8 | 9 | /* Base implements interface Proxy. It's the basic proxy struct. Vmess etc extends Base*/ 10 | type Base struct { 11 | Name string `yaml:"name" json:"name" gorm:"index"` 12 | Server string `yaml:"server" json:"server" gorm:"index"` 13 | Type string `yaml:"type" json:"type" gorm:"index"` 14 | Country string `yaml:"country,omitempty" json:"country,omitempty" gorm:"index"` 15 | Port int `yaml:"port" json:"port" gorm:"index"` 16 | UDP bool `yaml:"udp,omitempty" json:"udp,omitempty"` 17 | Useable bool `yaml:"useable,omitempty" json:"useable,omitempty" gorm:"index"` 18 | } 19 | 20 | // TypeName() Get specific proxy type 21 | func (b *Base) TypeName() string { 22 | if b.Type == "" { 23 | return "unknown" 24 | } 25 | return b.Type 26 | } 27 | 28 | // SetName() to a proxy 29 | func (b *Base) SetName(name string) { 30 | b.Name = name 31 | } 32 | 33 | func (b *Base) AddToName(name string) { 34 | b.Name = b.Name + name 35 | } 36 | 37 | // SetIP() to a proxy 38 | func (b *Base) SetIP(ip string) { 39 | b.Server = ip 40 | } 41 | 42 | // BaseInfo() get basic info struct of a proxy 43 | func (b *Base) BaseInfo() *Base { 44 | return b 45 | } 46 | 47 | // Clone() returns a new basic proxy 48 | func (b *Base) Clone() Base { 49 | c := *b 50 | return c 51 | } 52 | 53 | // SetUseable() set Base info "Useable" (true or false) 54 | func (b *Base) SetUseable(useable bool) { 55 | b.Useable = useable 56 | } 57 | 58 | // SetUseable() set Base info "Country" (string) 59 | func (b *Base) SetCountry(country string) { 60 | b.Country = country 61 | } 62 | 63 | type Proxy interface { 64 | String() string 65 | ToClash() string 66 | ToSurge() string 67 | Link() string 68 | Identifier() string 69 | SetName(name string) 70 | AddToName(name string) 71 | SetIP(ip string) 72 | TypeName() string //ss ssr vmess trojan 73 | BaseInfo() *Base 74 | Clone() Proxy 75 | SetUseable(useable bool) 76 | SetCountry(country string) 77 | } 78 | 79 | func ParseProxyFromLink(link string) (p Proxy, err error) { 80 | if strings.HasPrefix(link, "ssr://") { 81 | p, err = ParseSSRLink(link) 82 | } else if strings.HasPrefix(link, "vmess://") { 83 | p, err = ParseVmessLink(link) 84 | } else if strings.HasPrefix(link, "ss://") { 85 | p, err = ParseSSLink(link) 86 | } else if strings.HasPrefix(link, "trojan://") { 87 | p, err = ParseTrojanLink(link) 88 | } 89 | if err != nil || p == nil { 90 | return nil, errors.New("link parse failed") 91 | } 92 | _, country, err := geoIp.Find(p.BaseInfo().Server) // IP库不准 93 | if err != nil { 94 | country = "🏁 ZZ" 95 | } 96 | p.SetCountry(country) 97 | // trojan依赖域名?<-这是啥?不管什么情况感觉都不应该替换域名为IP(主要是IP库的质量和节点质量不该挂钩) 98 | //if p.TypeName() != "trojan" { 99 | // p.SetIP(ip) 100 | //} 101 | return 102 | } 103 | 104 | func ParseProxyFromClashProxy(p map[string]interface{}) (proxy Proxy, err error) { 105 | p["name"] = "" 106 | pjson, err := json.Marshal(p) 107 | if err != nil { 108 | return nil, err 109 | } 110 | switch p["type"].(string) { 111 | case "ss": 112 | var proxy Shadowsocks 113 | err := json.Unmarshal(pjson, &proxy) 114 | if err != nil { 115 | return nil, err 116 | } 117 | return &proxy, nil 118 | case "ssr": 119 | var proxy ShadowsocksR 120 | err := json.Unmarshal(pjson, &proxy) 121 | if err != nil { 122 | return nil, err 123 | } 124 | return &proxy, nil 125 | case "vmess": 126 | var proxy Vmess 127 | err := json.Unmarshal(pjson, &proxy) 128 | if err != nil { 129 | return nil, err 130 | } 131 | return &proxy, nil 132 | case "trojan": 133 | var proxy Trojan 134 | err := json.Unmarshal(pjson, &proxy) 135 | if err != nil { 136 | return nil, err 137 | } 138 | return &proxy, nil 139 | } 140 | return nil, errors.New("clash json parse failed") 141 | } 142 | -------------------------------------------------------------------------------- /pkg/proxy/convert.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/Sansui233/proxypool/pkg/tool" 7 | ) 8 | 9 | var ErrorTypeCanNotConvert = errors.New("type not support") 10 | 11 | // Convert2SS convert proxy to ShadowsocksR if possible 12 | func Convert2SSR(p Proxy) (ssr *ShadowsocksR, err error) { 13 | if p.TypeName() == "ss" { 14 | ss := p.(*Shadowsocks) 15 | if ss == nil { 16 | return nil, errors.New("ss is nil") 17 | } 18 | if !tool.CheckInList(SSRCipherList, ss.Cipher) { 19 | return nil, errors.New("cipher not support") 20 | } 21 | base := ss.Base 22 | base.Type = "ssr" 23 | return &ShadowsocksR{ 24 | Base: base, 25 | Password: ss.Password, 26 | Cipher: ss.Cipher, 27 | Protocol: "origin", 28 | Obfs: "plain", 29 | Group: "", 30 | }, nil 31 | } 32 | return nil, ErrorTypeCanNotConvert 33 | } 34 | 35 | // Convert2SS convert proxy to Shadowsocks if possible 36 | func Convert2SS(p Proxy) (ss *Shadowsocks, err error) { 37 | if p.TypeName() == "ss" { 38 | ssr := p.(*ShadowsocksR) 39 | if ssr == nil { 40 | return nil, errors.New("ssr is nil") 41 | } 42 | if !tool.CheckInList(SSCipherList, ssr.Cipher) { 43 | return nil, errors.New("cipher not support") 44 | } 45 | if ssr.Protocol != "origin" || ssr.Obfs != "plain" { 46 | return nil, errors.New("protocol or obfs not allowed") 47 | } 48 | base := ssr.Base 49 | base.Type = "ss" 50 | return &Shadowsocks{ 51 | Base: base, 52 | Password: ssr.Password, 53 | Cipher: ssr.Cipher, 54 | Plugin: "", 55 | PluginOpts: nil, 56 | }, nil 57 | } 58 | return nil, ErrorTypeCanNotConvert 59 | } 60 | 61 | var SSRCipherList = []string{ 62 | "aes-128-cfb", 63 | "aes-192-cfb", 64 | "aes-256-cfb", 65 | "aes-128-ctr", 66 | "aes-192-ctr", 67 | "aes-256-ctr", 68 | "aes-128-ofb", 69 | "aes-192-ofb", 70 | "aes-256-ofb", 71 | "des-cfb", 72 | "bf-cfb", 73 | "cast5-cfb", 74 | "rc4-md5", 75 | "chacha20-ietf", 76 | "salsa20", 77 | "camellia-128-cfb", 78 | "camellia-192-cfb", 79 | "camellia-256-cfb", 80 | "idea-cfb", 81 | "rc2-cfb", 82 | "seed-cfb", 83 | } 84 | 85 | var SSCipherList = []string{ 86 | "aes-128-gcm", 87 | "aes-192-gcm", 88 | "aes-256-gcm", 89 | "aes-128-cfb", 90 | "aes-192-cfb", 91 | "aes-256-cfb", 92 | "aes-128-ctr", 93 | "aes-192-ctr", 94 | "aes-256-ctr", 95 | "rc4-md5", 96 | "chacha20-ietf", 97 | "xchacha20", 98 | "chacha20-ietf-poly1305", 99 | "xchacha20-ietf-poly1305", 100 | } 101 | -------------------------------------------------------------------------------- /pkg/proxy/geoip.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "log" 8 | "net" 9 | "os" 10 | 11 | bingeoip "github.com/Sansui233/proxypool/internal/bindata/geoip" 12 | "github.com/oschwald/geoip2-golang" 13 | ) 14 | 15 | var geoIp GeoIP 16 | 17 | func InitGeoIpDB() error { 18 | geodb := "assets/GeoLite2-City.mmdb" 19 | // 判断文件是否存在 20 | _, err := os.Stat(geodb) 21 | if err != nil && os.IsNotExist(err) { 22 | err = bingeoip.RestoreAsset("", "assets/flags.json") 23 | if err != nil { 24 | panic(err) 25 | return err 26 | } 27 | err = bingeoip.RestoreAsset("", "assets/GeoLite2-City.mmdb") 28 | if err != nil { 29 | log.Println("文件不存在,请自行下载 Geoip2 City库,并保存在", geodb) 30 | panic(err) 31 | return err 32 | } 33 | geoIp = NewGeoIP("assets/GeoLite2-City.mmdb", "assets/flags.json") 34 | } 35 | geoIp = NewGeoIP("assets/GeoLite2-City.mmdb", "assets/flags.json") 36 | return nil 37 | } 38 | 39 | // GeoIP2 40 | type GeoIP struct { 41 | db *geoip2.Reader 42 | emojiMap map[string]string 43 | } 44 | 45 | type CountryEmoji struct { 46 | Code string `json:"code"` 47 | Emoji string `json:"emoji"` 48 | } 49 | 50 | // new geoip from db file 51 | func NewGeoIP(geodb, flags string) (geoip GeoIP) { 52 | // 运行到这里时geodb只能为存在 53 | db, err := geoip2.Open(geodb) 54 | if err != nil { 55 | log.Fatal(err) 56 | } 57 | geoip.db = db 58 | 59 | _, err = os.Stat(flags) 60 | if err != nil && os.IsNotExist(err) { 61 | log.Println("flags 文件不存在,请自行下载 flags.json,并保存在", flags) 62 | os.Exit(1) 63 | } else { 64 | data, err := ioutil.ReadFile(flags) 65 | if err != nil { 66 | log.Fatal(err) 67 | return 68 | } 69 | var countryEmojiList = make([]CountryEmoji, 0) 70 | err = json.Unmarshal(data, &countryEmojiList) 71 | if err != nil { 72 | log.Fatalln(err.Error()) 73 | return 74 | } 75 | 76 | emojiMap := make(map[string]string) 77 | for _, i := range countryEmojiList { 78 | emojiMap[i.Code] = i.Emoji 79 | } 80 | geoip.emojiMap = emojiMap 81 | } 82 | return 83 | } 84 | 85 | // find ip info 86 | func (g GeoIP) Find(ipORdomain string) (ip, country string, err error) { 87 | ips, err := net.LookupIP(ipORdomain) 88 | if err != nil { 89 | return "", "", err 90 | } 91 | ip = ips[0].String() 92 | 93 | var record *geoip2.City 94 | record, err = g.db.City(ips[0]) 95 | if err != nil { 96 | return 97 | } 98 | countryIsoCode := record.Country.IsoCode 99 | if countryIsoCode == "" { 100 | country = fmt.Sprintf("🏁 ZZ") 101 | } 102 | emoji, found := g.emojiMap[countryIsoCode] 103 | if found { 104 | country = fmt.Sprintf("%v %v", emoji, countryIsoCode) 105 | } else { 106 | country = fmt.Sprintf("🏁 ZZ") 107 | } 108 | return 109 | } 110 | -------------------------------------------------------------------------------- /pkg/proxy/link_test.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "fmt" 5 | "github.com/Sansui233/proxypool/pkg/tool" 6 | "testing" 7 | ) 8 | 9 | func TestSSLink(t *testing.T) { 10 | ss, err := ParseSSLink("ss://YWVzLTI1Ni1jZmI6ZUlXMERuazY5NDU0ZTZuU3d1c3B2OURtUzIwMXRRMERAMTcyLjEwNC4xNjEuNTQ6ODA5OQ==#翻墙党223.13新加坡") 11 | if err != nil { 12 | t.Error(err) 13 | } 14 | fmt.Println(ss) 15 | fmt.Println(ss.Link()) 16 | ss, err = ParseSSLink(ss.Link()) 17 | if err != nil { 18 | t.Error(err) 19 | } 20 | fmt.Println(ss) 21 | } 22 | 23 | func TestSSRLink(t *testing.T) { 24 | ssr, err := ParseSSRLink("ssr://MTcyLjEwNC4xNjEuNTQ6ODA5OTpvcmlnaW46YWVzLTI1Ni1jZmI6cGxhaW46WlVsWE1FUnVhelk1TkRVMFpUWnVVM2QxYzNCMk9VUnRVekl3TVhSUk1FUT0vP29iZnNwYXJhbT0mcHJvdG9wYXJhbT0mcmVtYXJrcz01Ny03NWFLWjVZV2FNakl6TGpFejVwYXc1WXFnNVoyaCZncm91cD01cGF3NVlxZzVaMmg=") 25 | if err != nil { 26 | t.Error(err) 27 | } 28 | fmt.Println(ssr) 29 | fmt.Println(ssr.Link()) 30 | ssr, err = ParseSSRLink(ssr.Link()) 31 | if err != nil { 32 | t.Error(err) 33 | } 34 | fmt.Println(ssr) 35 | fmt.Println(ssr.ToClash()) 36 | } 37 | 38 | func TestTrojanLink(t *testing.T) { 39 | trojan, err := ParseTrojanLink("trojan://65474277@sqcu.hostmsu.ru:55551?allowinsecure=0&peer=mza.hkfq.xyz&mux=1&ws=0&wspath=&wshost=&ss=0&ssmethod=aes-128-gcm&sspasswd=&group=#%E9%A6%99%E6%B8%AFCN2-MZA%E8%8A%82%E7%82%B9-%E5%AE%BF%E8%BF%81%E8%81%94%E9%80%9A%E4%B8%AD%E8%BD%AC") 40 | if err != nil { 41 | t.Error(err) 42 | } 43 | fmt.Println(trojan) 44 | fmt.Println(trojan.Link()) 45 | trojan, err = ParseTrojanLink(trojan.Link()) 46 | if err != nil { 47 | t.Error(err) 48 | } 49 | fmt.Println(trojan) 50 | } 51 | 52 | func TestVmessLink(t *testing.T) { 53 | //v, err := ParseVmessLink("vmess://ew0KICAidiI6ICIyIiwNCiAgInBzIjogIuW+ruS/oeWFrOS8l+WPtyDlpJrlvannmoTlpKfljYPkuJbnlYwiLA0KICAiYWRkIjogInMyNzEuc25vZGUueHl6IiwNCiAgInBvcnQiOiAiNDQzIiwNCiAgImlkIjogIjZhOTAwZDYzLWNiOTItMzVhMC1hZWYwLTNhMGMxMWFhODUyMyIsDQogICJhaWQiOiAiMSIsDQogICJuZXQiOiAid3MiLA0KICAidHlwZSI6ICJub25lIiwNCiAgImhvc3QiOiAiczI3MS5zbm9kZS54eXoiLA0KICAicGF0aCI6ICIvcGFuZWwiLA0KICAidGxzIjogInRscyINCn0=") 54 | //v, err := ParseVmessLink("vmess://YXV0bzphMjA1ZjRiNi0xMzg2LTQ3NjUtYjQ0YS02YjFiYmE0N2Q1MzdAMTQyLjQuMTA0LjIyNjo0NDM?remarks=%F0%9F%87%BA%F0%9F%87%B8%20US_616%20caicai&obfsParam=www.036452916.xyz&path=/footers&obfs=websocket&tls=1&allowInsecure=1&alterId=64") 55 | v, err := ParseVmessLink("vmess://YXV0bzo1YjQ1ZjQ2Yi1iNTVmLTRkNWQtOGJjOS1jZjY1MzZlZjkyMzhAMTM3LjE3NS4zNS4xMzo0NDM?remarks=%F0%9F%87%BA%F0%9F%87%B8%20US_480%20caicai&obfsParam=www.4336705.xyz&path=/footers&obfs=websocket&tls=1&allowInsecure=1&alterId=64") 56 | if err != nil { 57 | t.Error(err) 58 | } 59 | fmt.Println(v) 60 | fmt.Println(v.Link()) 61 | v, err = ParseVmessLink(v.Link()) 62 | if err != nil { 63 | t.Error(err) 64 | } 65 | fmt.Println(v) 66 | } 67 | 68 | func TestNewVmessParser(t *testing.T) { 69 | linkPayload := "ew0KICAidiI6ICIyIiwNCiAgInBzIjogIuW+ruS/oeWFrOS8l+WPtyDlpJrlvannmoTlpKfljYPkuJbnlYwiLA0KICAiYWRkIjogInMyNzEuc25vZGUueHl6IiwNCiAgInBvcnQiOiAiNDQzIiwNCiAgImlkIjogIjZhOTAwZDYzLWNiOTItMzVhMC1hZWYwLTNhMGMxMWFhODUyMyIsDQogICJhaWQiOiAiMSIsDQogICJuZXQiOiAid3MiLA0KICAidHlwZSI6ICJub25lIiwNCiAgImhvc3QiOiAiczI3MS5zbm9kZS54eXoiLA0KICAicGF0aCI6ICIvcGFuZWwiLA0KICAidGxzIjogInRscyINCn0=" 70 | payload, err := tool.Base64DecodeString(linkPayload) 71 | if err != nil { 72 | fmt.Println("vmess link payload parse failed") 73 | return 74 | } 75 | jsonMap, err := str2jsonDynaUnmarshal(payload) 76 | if err != nil { 77 | fmt.Println("err: ", err) 78 | return 79 | } 80 | vmessJson, err := mapStrInter2VmessLinkJson(jsonMap) 81 | fmt.Println(vmessJson) 82 | } 83 | -------------------------------------------------------------------------------- /pkg/proxy/proxies.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | ) 8 | 9 | type ProxyList []Proxy 10 | 11 | // sort排序使用 12 | func (ps ProxyList) Len() int { 13 | return len(ps) 14 | } 15 | 16 | func (ps ProxyList) TypeLen(t string) int { 17 | l := 0 18 | for _, p := range ps { 19 | if p.TypeName() == t { 20 | l++ 21 | } 22 | } 23 | return l 24 | } 25 | 26 | var sortType = make(map[string]int) 27 | 28 | func init() { 29 | sortType["ss"] = 1 30 | sortType["ssr"] = 2 31 | sortType["vmess"] = 3 32 | sortType["trojan"] = 4 33 | } 34 | 35 | // sort排序使用 36 | func (ps ProxyList) Less(i, j int) bool { 37 | if ps[i].BaseInfo().Country == ps[j].BaseInfo().Country { 38 | return sortType[ps[i].BaseInfo().Type] < sortType[ps[j].BaseInfo().Type] 39 | } else { 40 | return ps[i].BaseInfo().Country < ps[j].BaseInfo().Country 41 | } 42 | } 43 | 44 | // sort排序使用 45 | func (ps ProxyList) Swap(i, j int) { 46 | ps[i], ps[j] = ps[j], ps[i] 47 | } 48 | 49 | // Deduplication by proxy identifier 50 | func (ps ProxyList) Deduplication() ProxyList { 51 | result := make(ProxyList, 0, len(ps)) 52 | temp := map[string]struct{}{} 53 | for _, item := range ps { 54 | if item != nil { 55 | if _, ok := temp[item.Identifier()]; !ok { 56 | temp[item.Identifier()] = struct{}{} 57 | result = append(result, item) 58 | } 59 | } 60 | } 61 | return result 62 | } 63 | 64 | func (ps ProxyList) Sort() ProxyList { 65 | sort.Sort(ps) 66 | return ps 67 | } 68 | 69 | func (ps ProxyList) NameSetCounrty() ProxyList { 70 | num := len(ps) 71 | for i := 0; i < num; i++ { 72 | ps[i].SetName(ps[i].BaseInfo().Country) 73 | } 74 | return ps 75 | } 76 | 77 | func (ps ProxyList) NameAddIndex() ProxyList { 78 | num := len(ps) 79 | for i := 0; i < num; i++ { 80 | ps[i].SetName(fmt.Sprintf("%s_%+02v", ps[i].BaseInfo().Name, i+1)) 81 | } 82 | return ps 83 | } 84 | 85 | func (ps ProxyList) NameReIndex() ProxyList { 86 | num := len(ps) 87 | for i := 0; i < num; i++ { 88 | originName := ps[i].BaseInfo().Name 89 | country := strings.SplitN(originName, "_", 2)[0] 90 | ps[i].SetName(fmt.Sprintf("%s_%+02v", country, i+1)) 91 | } 92 | return ps 93 | } 94 | 95 | func (ps ProxyList) NameAddTG() ProxyList { 96 | num := len(ps) 97 | for i := 0; i < num; i++ { 98 | ps[i].SetName(fmt.Sprintf("%s %s", ps[i].BaseInfo().Name, "TG@peekfun")) 99 | } 100 | return ps 101 | } 102 | 103 | func (ps ProxyList) Clone() ProxyList { 104 | result := make(ProxyList, 0, len(ps)) 105 | for _, pp := range ps { 106 | if pp != nil { 107 | result = append(result, pp.Clone()) 108 | } 109 | } 110 | return result 111 | } 112 | 113 | // Derive 将原有节点中的ss和ssr互相转换进行衍生 114 | func (ps ProxyList) Derive() ProxyList { 115 | proxies := ps 116 | for _, p := range ps { 117 | if p == nil { 118 | continue 119 | } 120 | if p.TypeName() == "ss" { 121 | ssr, err := Convert2SSR(p) 122 | if err == nil { 123 | proxies = append(proxies, ssr) 124 | } 125 | } else if p.TypeName() == "ssr" { 126 | ss, err := Convert2SS(p) 127 | if err == nil { 128 | proxies = append(proxies, ss) 129 | } 130 | } 131 | } 132 | return proxies.Deduplication() 133 | } 134 | 135 | // Append unique new proxies to original ProxyList 136 | func (ps *ProxyList) UniqAppendProxyList(new ProxyList) ProxyList { 137 | if len(new) == 0 { 138 | return *ps 139 | } 140 | if len(*ps) == 0 { 141 | return new 142 | } 143 | for _, p := range new { 144 | isExist := false 145 | for i, _ := range *ps { 146 | if (*ps)[i].Identifier() == p.Identifier() { 147 | isExist = true 148 | break 149 | } 150 | } 151 | if !isExist { 152 | *ps = append(*ps, p) 153 | } 154 | } 155 | return *ps 156 | } 157 | 158 | // Append an unique new proxy to original ProxyList 159 | func (ps *ProxyList) UniqAppendProxy(new Proxy) ProxyList { 160 | if len(*ps) == 0 { 161 | *ps = append(*ps, new) 162 | return *ps 163 | } 164 | for i, _ := range *ps { 165 | if (*ps)[i].Identifier() == new.Identifier() { 166 | return *ps 167 | } 168 | } 169 | *ps = append(*ps, new) 170 | return *ps 171 | } 172 | -------------------------------------------------------------------------------- /pkg/proxy/shadowsocks.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "net" 8 | "net/url" 9 | "regexp" 10 | "strconv" 11 | "strings" 12 | 13 | "github.com/Sansui233/proxypool/pkg/tool" 14 | ) 15 | 16 | var ( 17 | // ErrorNotSSLink is an error type 18 | ErrorNotSSLink = errors.New("not a correct ss link") 19 | ) 20 | 21 | // Shadowsocks is a type of proxy 22 | type Shadowsocks struct { 23 | Base 24 | Password string `yaml:"password" json:"password"` 25 | Cipher string `yaml:"cipher" json:"cipher"` 26 | Plugin string `yaml:"plugin,omitempty" json:"plugin,omitempty"` 27 | PluginOpts map[string]interface{} `yaml:"plugin-opts,omitempty" json:"plugin-opts,omitempty"` 28 | } 29 | 30 | // Identifier generates an unique identifier of one proxy 31 | func (ss Shadowsocks) Identifier() string { 32 | return net.JoinHostPort(ss.Server, strconv.Itoa(ss.Port)) + ss.Password 33 | } 34 | 35 | func (ss Shadowsocks) String() string { 36 | data, err := json.Marshal(ss) 37 | if err != nil { 38 | return "" 39 | } 40 | return string(data) 41 | } 42 | 43 | // ToClash converts proxy to clash proxy string 44 | func (ss Shadowsocks) ToClash() string { 45 | data, err := json.Marshal(ss) 46 | if err != nil { 47 | return "" 48 | } 49 | return "- " + string(data) 50 | } 51 | 52 | // ToSurge converts proxy to surge proxy string 53 | func (ss Shadowsocks) ToSurge() string { 54 | // node1 = ss, server, port, encrypt-method=, password=, obfs=, obfs-host=, udp-relay=false 55 | if ss.Plugin == "obfs" { 56 | text := fmt.Sprintf("%s = ss, %s, %d, encrypt-method=%s, password=%s, obfs=%s, udp-relay=false", 57 | ss.Name, ss.Server, ss.Port, ss.Cipher, ss.Password, ss.PluginOpts["mode"]) 58 | if ss.PluginOpts["host"].(string) != "" { 59 | text += ", obfs-host=" + ss.PluginOpts["host"].(string) 60 | } 61 | return text 62 | } else { 63 | return fmt.Sprintf("%s = ss, %s, %d, encrypt-method=%s, password=%s, udp-relay=false", 64 | ss.Name, ss.Server, ss.Port, ss.Cipher, ss.Password) 65 | } 66 | } 67 | 68 | func (ss Shadowsocks) Clone() Proxy { 69 | return &ss 70 | } 71 | 72 | // https://shadowsocks.org/en/config/quick-guide.html 73 | // Link converts a ss proxy to string 74 | func (ss Shadowsocks) Link() (link string) { 75 | payload := fmt.Sprintf("%s:%s@%s:%d", ss.Cipher, ss.Password, ss.Server, ss.Port) 76 | payload = tool.Base64EncodeString(payload, false) 77 | return fmt.Sprintf("ss://%s#%s", payload, ss.Name) 78 | } 79 | 80 | // ParseSSLink() parses an ss link to ss proxy 81 | func ParseSSLink(link string) (*Shadowsocks, error) { 82 | if !strings.HasPrefix(link, "ss://") { 83 | return nil, ErrorNotSSRLink 84 | } 85 | 86 | uri, err := url.Parse(link) 87 | if err != nil { 88 | return nil, ErrorNotSSLink 89 | } 90 | 91 | cipher := "" 92 | password := "" 93 | if uri.User.String() == "" { 94 | // base64的情况 95 | infos, err := tool.Base64DecodeString(uri.Hostname()) 96 | if err != nil { 97 | return nil, err 98 | } 99 | uri, err = url.Parse("ss://" + infos) 100 | if err != nil { 101 | return nil, err 102 | } 103 | cipher = uri.User.Username() 104 | password, _ = uri.User.Password() 105 | } else { 106 | cipherInfoString, err := tool.Base64DecodeString(uri.User.Username()) 107 | if err != nil { 108 | return nil, ErrorPasswordParseFail 109 | } 110 | cipherInfo := strings.SplitN(cipherInfoString, ":", 2) 111 | if len(cipherInfo) < 2 { 112 | return nil, ErrorPasswordParseFail 113 | } 114 | cipher = strings.ToLower(cipherInfo[0]) 115 | password = cipherInfo[1] 116 | } 117 | server := uri.Hostname() 118 | port, _ := strconv.Atoi(uri.Port()) 119 | 120 | moreInfos := uri.Query() 121 | pluginString := moreInfos.Get("plugin") 122 | plugin := "" 123 | pluginOpts := make(map[string]interface{}) 124 | if strings.Contains(pluginString, ";") { 125 | pluginInfos, err := url.ParseQuery(pluginString) 126 | if err == nil { 127 | if strings.Contains(pluginString, "obfs") { 128 | plugin = "obfs" 129 | pluginOpts["mode"] = pluginInfos.Get("obfs") 130 | pluginOpts["host"] = pluginInfos.Get("obfs-host") 131 | } else if strings.Contains(pluginString, "v2ray") { 132 | plugin = "v2ray-plugin" 133 | pluginOpts["mode"] = pluginInfos.Get("mode") 134 | pluginOpts["host"] = pluginInfos.Get("host") 135 | pluginOpts["tls"] = strings.Contains(pluginString, "tls") 136 | } 137 | } 138 | } 139 | if port == 0 || cipher == "" { 140 | return nil, ErrorNotSSLink 141 | } 142 | 143 | return &Shadowsocks{ 144 | Base: Base{ 145 | Name: "", 146 | Server: server, 147 | Port: port, 148 | Type: "ss", 149 | }, 150 | Password: password, 151 | Cipher: cipher, 152 | Plugin: plugin, 153 | PluginOpts: pluginOpts, 154 | }, nil 155 | } 156 | 157 | var ( 158 | ssPlainRe = regexp.MustCompile("ss://([A-Za-z0-9+/_&?=@:%.-])+") 159 | ) 160 | 161 | // GrepSSLinkFromString() remove web fuzz characters before a ss link 162 | func GrepSSLinkFromString(text string) []string { 163 | results := make([]string, 0) 164 | texts := strings.Split(text, "ss://") 165 | for _, text := range texts { 166 | results = append(results, ssPlainRe.FindAllString("ss://"+text, -1)...) 167 | } 168 | return results 169 | } 170 | -------------------------------------------------------------------------------- /pkg/proxy/shadowsocksr.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "net" 8 | "net/url" 9 | "regexp" 10 | "strconv" 11 | "strings" 12 | 13 | "github.com/Sansui233/proxypool/pkg/tool" 14 | ) 15 | 16 | var ( 17 | ErrorNotSSRLink = errors.New("not a correct ssr link") 18 | ErrorPasswordParseFail = errors.New("password parse failed") 19 | ErrorPathNotComplete = errors.New("path not complete") 20 | ErrorMissingQuery = errors.New("link missing query") 21 | ErrorProtocolParamParseFail = errors.New("protocol param parse failed") 22 | ErrorObfsParamParseFail = errors.New("obfs param parse failed") 23 | ) 24 | 25 | // 字段依据clash的配置设计 26 | type ShadowsocksR struct { 27 | Base 28 | Password string `yaml:"password" json:"password"` 29 | Cipher string `yaml:"cipher" json:"cipher"` 30 | Protocol string `yaml:"protocol" json:"protocol"` 31 | ProtocolParam string `yaml:"protocol-param,omitempty" json:"protocol_param,omitempty"` 32 | Obfs string `yaml:"obfs" json:"obfs"` 33 | ObfsParam string `yaml:"obfs-param,omitempty" json:"obfs_param,omitempty"` 34 | Group string `yaml:"group,omitempty" json:"group,omitempty"` 35 | } 36 | 37 | func (ssr ShadowsocksR) Identifier() string { 38 | return net.JoinHostPort(ssr.Server, strconv.Itoa(ssr.Port)) + ssr.Password + ssr.ProtocolParam 39 | } 40 | 41 | func (ssr ShadowsocksR) String() string { 42 | data, err := json.Marshal(ssr) 43 | if err != nil { 44 | return "" 45 | } 46 | return string(data) 47 | } 48 | 49 | func (ssr ShadowsocksR) ToClash() string { 50 | data, err := json.Marshal(ssr) 51 | if err != nil { 52 | return "" 53 | } 54 | return "- " + string(data) 55 | } 56 | 57 | func (ssr ShadowsocksR) ToSurge() string { 58 | return "" 59 | } 60 | 61 | func (ssr ShadowsocksR) Clone() Proxy { 62 | return &ssr 63 | } 64 | 65 | // https://github.com/HMBSbige/ShadowsocksR-Windows/wiki/SSR-QRcode-scheme 66 | func (ssr ShadowsocksR) Link() (link string) { 67 | payload := fmt.Sprintf("%s:%d:%s:%s:%s:%s", 68 | ssr.Server, ssr.Port, ssr.Protocol, ssr.Cipher, ssr.Obfs, tool.Base64EncodeString(ssr.Password, true)) 69 | query := url.Values{} 70 | query.Add("obfsparam", tool.Base64EncodeString(ssr.ObfsParam, true)) 71 | query.Add("protoparam", tool.Base64EncodeString(ssr.ProtocolParam, true)) 72 | //query.Add("remarks", tool.Base64EncodeString(ssr.Name, true)) 73 | query.Add("group", tool.Base64EncodeString("proxypoolss.herokuapp.com", true)) 74 | payload = tool.Base64EncodeString(fmt.Sprintf("%s/?%s", payload, query.Encode()), true) 75 | return fmt.Sprintf("ssr://%s", payload) 76 | } 77 | 78 | func ParseSSRLink(link string) (*ShadowsocksR, error) { 79 | if !strings.HasPrefix(link, "ssr") { 80 | return nil, ErrorNotSSRLink 81 | } 82 | 83 | ssrmix := strings.SplitN(link, "://", 2) 84 | if len(ssrmix) < 2 { 85 | return nil, ErrorNotSSRLink 86 | } 87 | linkPayloadBase64 := ssrmix[1] 88 | payload, err := tool.Base64DecodeString(linkPayloadBase64) 89 | if err != nil { 90 | return nil, ErrorMissingQuery 91 | } 92 | 93 | infoPayload := strings.SplitN(payload, "/?", 2) 94 | if len(infoPayload) < 2 { 95 | return nil, ErrorNotSSRLink 96 | } 97 | ssrpath := strings.Split(infoPayload[0], ":") 98 | if len(ssrpath) < 6 { 99 | return nil, ErrorPathNotComplete 100 | } 101 | // base info 102 | server := strings.ToLower(ssrpath[0]) 103 | port, _ := strconv.Atoi(ssrpath[1]) 104 | protocol := strings.ToLower(ssrpath[2]) 105 | cipher := strings.ToLower(ssrpath[3]) 106 | obfs := strings.ToLower(ssrpath[4]) 107 | password, err := tool.Base64DecodeString(ssrpath[5]) 108 | if err != nil { 109 | return nil, ErrorPasswordParseFail 110 | } 111 | 112 | moreInfo, _ := url.ParseQuery(infoPayload[1]) 113 | 114 | // remarks 115 | //remarks := moreInfo.Get("remarks") 116 | //remarks, err = tool.Base64DecodeString(remarks) 117 | //if err != nil { 118 | // remarks = "" 119 | // err = nil 120 | //} 121 | //if strings.ContainsAny(remarks, "\t\r\n ") { 122 | // remarks = strings.ReplaceAll(remarks, "\t", "") 123 | // remarks = strings.ReplaceAll(remarks, "\r", "") 124 | // remarks = strings.ReplaceAll(remarks, "\n", "") 125 | // remarks = strings.ReplaceAll(remarks, " ", "") 126 | //} 127 | 128 | // protocol param 129 | protocolParam, err := tool.Base64DecodeString(moreInfo.Get("protoparam")) 130 | if err != nil { 131 | return nil, ErrorProtocolParamParseFail 132 | } 133 | if tool.ContainChineseChar(protocolParam) { 134 | protocolParam = "" 135 | } 136 | if strings.HasSuffix(protocol, "_compatible") { 137 | protocol = strings.ReplaceAll(protocol, "_compatible", "") 138 | } 139 | 140 | // obfs param 141 | obfsParam, err := tool.Base64DecodeString(moreInfo.Get("obfsparam")) 142 | if err != nil { 143 | return nil, ErrorObfsParamParseFail 144 | } 145 | if tool.ContainChineseChar(obfsParam) { 146 | obfsParam = "" 147 | } 148 | if strings.HasSuffix(obfs, "_compatible") { 149 | obfs = strings.ReplaceAll(obfs, "_compatible", "") 150 | } 151 | 152 | return &ShadowsocksR{ 153 | Base: Base{ 154 | Name: "", 155 | Server: server, 156 | Port: port, 157 | Type: "ssr", 158 | }, 159 | Password: password, 160 | Cipher: cipher, 161 | Protocol: protocol, 162 | ProtocolParam: protocolParam, 163 | Obfs: obfs, 164 | ObfsParam: obfsParam, 165 | Group: "", 166 | }, nil 167 | } 168 | 169 | var ( 170 | ssrPlainRe = regexp.MustCompile("ssr://([A-Za-z0-9+/_-])+") 171 | ) 172 | 173 | func GrepSSRLinkFromString(text string) []string { 174 | results := make([]string, 0) 175 | texts := strings.Split(text, "ssr://") 176 | for _, text := range texts { 177 | results = append(results, ssrPlainRe.FindAllString("ssr://"+text, -1)...) 178 | } 179 | return results 180 | } 181 | -------------------------------------------------------------------------------- /pkg/proxy/trojan.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "net" 7 | "net/url" 8 | "regexp" 9 | "strconv" 10 | "strings" 11 | ) 12 | 13 | var ( 14 | ErrorNotTrojanink = errors.New("not a correct trojan link") 15 | ) 16 | 17 | type Trojan struct { 18 | Base 19 | Password string `yaml:"password" json:"password"` 20 | ALPN []string `yaml:"alpn,omitempty" json:"alpn,omitempty"` 21 | SNI string `yaml:"sni,omitempty" json:"sni,omitempty"` 22 | SkipCertVerify bool `yaml:"skip-cert-verify,omitempty" json:"skip-cert-verify,omitempty"` 23 | UDP bool `yaml:"udp,omitempty" json:"udp,omitempty"` 24 | } 25 | 26 | /** 27 | - name: "trojan" 28 | type: trojan 29 | server: server 30 | port: 443 31 | password: yourpsk 32 | # udp: true 33 | # sni: example.com # aka server name 34 | # alpn: 35 | # - h2 36 | # - http/1.1 37 | # skip-cert-verify: true 38 | */ 39 | 40 | func (t Trojan) Identifier() string { 41 | return net.JoinHostPort(t.Server, strconv.Itoa(t.Port)) + t.Password 42 | } 43 | 44 | func (t Trojan) String() string { 45 | data, err := json.Marshal(t) 46 | if err != nil { 47 | return "" 48 | } 49 | return string(data) 50 | } 51 | 52 | func (t Trojan) ToClash() string { 53 | data, err := json.Marshal(t) 54 | if err != nil { 55 | return "" 56 | } 57 | return "- " + string(data) 58 | } 59 | 60 | func (t Trojan) ToSurge() string { 61 | return "" 62 | } 63 | 64 | func (t Trojan) Clone() Proxy { 65 | return &t 66 | } 67 | 68 | // https://p4gefau1t.github.io/trojan-go/developer/url/ 69 | func (t Trojan) Link() (link string) { 70 | query := url.Values{} 71 | if t.SNI != "" { 72 | query.Set("sni", url.QueryEscape(t.SNI)) 73 | } 74 | 75 | uri := url.URL{ 76 | Scheme: "trojan", 77 | User: url.User(url.QueryEscape(t.Password)), 78 | Host: net.JoinHostPort(t.Server, strconv.Itoa(t.Port)), 79 | RawQuery: query.Encode(), 80 | Fragment: t.Name, 81 | } 82 | 83 | return uri.String() 84 | } 85 | 86 | func ParseTrojanLink(link string) (*Trojan, error) { 87 | if !strings.HasPrefix(link, "trojan://") && !strings.HasPrefix(link, "trojan-go://") { 88 | return nil, ErrorNotTrojanink 89 | } 90 | 91 | /** 92 | trojan-go:// 93 | $(trojan-password) 94 | @ 95 | trojan-host 96 | : 97 | port 98 | /? 99 | sni=$(tls-sni.com)& 100 | type=$(original|ws|h2|h2+ws)& 101 | host=$(websocket-host.com)& 102 | path=$(/websocket/path)& 103 | encryption=$(ss;aes-256-gcm;ss-password)& 104 | plugin=$(...) 105 | #$(descriptive-text) 106 | */ 107 | 108 | uri, err := url.Parse(link) 109 | if err != nil { 110 | return nil, ErrorNotSSLink 111 | } 112 | 113 | password := uri.User.Username() 114 | password, _ = url.QueryUnescape(password) 115 | 116 | server := uri.Hostname() 117 | port, _ := strconv.Atoi(uri.Port()) 118 | 119 | moreInfos := uri.Query() 120 | sni := moreInfos.Get("sni") 121 | sni, _ = url.QueryUnescape(sni) 122 | transformType := moreInfos.Get("type") 123 | transformType, _ = url.QueryUnescape(transformType) 124 | host := moreInfos.Get("host") 125 | host, _ = url.QueryUnescape(host) 126 | path := moreInfos.Get("path") 127 | path, _ = url.QueryUnescape(path) 128 | 129 | alpn := make([]string, 0) 130 | if transformType == "h2" { 131 | alpn = append(alpn, "h2") 132 | } 133 | 134 | if port == 0 { 135 | return nil, ErrorNotTrojanink 136 | } 137 | 138 | return &Trojan{ 139 | Base: Base{ 140 | Name: "", 141 | Server: server, 142 | Port: port, 143 | Type: "trojan", 144 | }, 145 | Password: password, 146 | ALPN: alpn, 147 | SNI: host, 148 | UDP: true, 149 | SkipCertVerify: true, 150 | }, nil 151 | } 152 | 153 | var ( 154 | trojanPlainRe = regexp.MustCompile("trojan(-go)?://([A-Za-z0-9+/_&?=@:%.-])+") 155 | ) 156 | 157 | func GrepTrojanLinkFromString(text string) []string { 158 | results := make([]string, 0) 159 | texts := strings.Split(text, "trojan://") 160 | for _, text := range texts { 161 | results = append(results, trojanPlainRe.FindAllString("trojan://"+text, -1)...) 162 | } 163 | return results 164 | } 165 | -------------------------------------------------------------------------------- /pkg/proxy/vmess.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "fmt" 7 | "math/rand" 8 | "net" 9 | "net/url" 10 | "reflect" 11 | "regexp" 12 | "strconv" 13 | "strings" 14 | 15 | "github.com/Sansui233/proxypool/pkg/tool" 16 | ) 17 | 18 | var ( 19 | ErrorNotVmessLink = errors.New("not a correct vmess link") 20 | ErrorVmessPayloadParseFail = errors.New("vmess link payload parse failed") 21 | ) 22 | 23 | type Vmess struct { 24 | Base 25 | UUID string `yaml:"uuid" json:"uuid"` 26 | AlterID int `yaml:"alterId" json:"alterId"` 27 | Cipher string `yaml:"cipher" json:"cipher"` 28 | Network string `yaml:"network,omitempty" json:"network,omitempty"` 29 | WSPath string `yaml:"ws-path,omitempty" json:"ws-path,omitempty"` 30 | ServerName string `yaml:"servername,omitempty" json:"servername,omitempty"` 31 | WSHeaders map[string]string `yaml:"ws-headers,omitempty" json:"ws-headers,omitempty"` 32 | HTTPOpts HTTPOptions `yaml:"http-opts,omitempty" json:"http-opts,omitempty"` 33 | HTTP2Opts HTTP2Options `yaml:"h2-opts,omitempty" json:"h2-opts,omitempty"` 34 | TLS bool `yaml:"tls,omitempty" json:"tls,omitempty"` 35 | SkipCertVerify bool `yaml:"skip-cert-verify,omitempty" json:"skip-cert-verify,omitempty"` 36 | } 37 | 38 | type HTTPOptions struct { 39 | Method string `yaml:"method,omitempty" json:"method,omitempty"` 40 | Path []string `yaml:"path,omitempty" json:"path,omitempty"` 41 | Headers map[string][]string `yaml:"headers,omitempty" json:"headers,omitempty"` 42 | } 43 | 44 | type HTTP2Options struct { 45 | Host []string `yaml:"host,omitempty" json:"host,omitempty"` 46 | Path string `yaml:"path,omitempty" json:"path,omitempty"` // 暂只处理一个Path 47 | } 48 | 49 | func (v Vmess) Identifier() string { 50 | return net.JoinHostPort(v.Server, strconv.Itoa(v.Port)) + v.Cipher + v.UUID 51 | } 52 | 53 | func (v Vmess) String() string { 54 | data, err := json.Marshal(v) 55 | if err != nil { 56 | return "" 57 | } 58 | return string(data) 59 | } 60 | 61 | func (v Vmess) ToClash() string { 62 | data, err := json.Marshal(v) 63 | if err != nil { 64 | return "" 65 | } 66 | return "- " + string(data) 67 | } 68 | 69 | func (v Vmess) ToSurge() string { 70 | // node2 = vmess, server, port, username=, ws=true, ws-path=, ws-headers= 71 | if v.Network == "ws" { 72 | wsHeasers := "" 73 | for k, v := range v.WSHeaders { 74 | if wsHeasers == "" { 75 | wsHeasers = k + ":" + v 76 | } else { 77 | wsHeasers += "|" + k + ":" + v 78 | } 79 | } 80 | text := fmt.Sprintf("%s = vmess, %s, %d, username=%s, ws=true, tls=%t, ws-path=%s", 81 | v.Name, v.Server, v.Port, v.UUID, v.TLS, v.WSPath) 82 | if wsHeasers != "" { 83 | text += ", ws-headers=" + wsHeasers 84 | } 85 | return text 86 | } else { 87 | return fmt.Sprintf("%s = vmess, %s, %d, username=%s, tls=%t", 88 | v.Name, v.Server, v.Port, v.UUID, v.TLS) 89 | } 90 | } 91 | 92 | func (v Vmess) Clone() Proxy { 93 | return &v 94 | } 95 | 96 | func (v Vmess) Link() (link string) { 97 | vjv, err := json.Marshal(v.toLinkJson()) 98 | if err != nil { 99 | return 100 | } 101 | return fmt.Sprintf("vmess://%s", tool.Base64EncodeBytes(vjv)) 102 | } 103 | 104 | type vmessLinkJson struct { 105 | Add string `json:"add"` 106 | V string `json:"v"` 107 | Ps string `json:"ps"` 108 | Port int `json:"port"` 109 | Id string `json:"id"` 110 | Aid string `json:"aid"` 111 | Net string `json:"net"` 112 | Type string `json:"type"` 113 | Host string `json:"host"` 114 | Path string `json:"path"` 115 | Tls string `json:"tls"` 116 | } 117 | 118 | func (v Vmess) toLinkJson() vmessLinkJson { 119 | vj := vmessLinkJson{ 120 | Add: v.Server, 121 | Ps: v.Name, 122 | Port: v.Port, 123 | Id: v.UUID, 124 | Aid: strconv.Itoa(v.AlterID), 125 | Net: v.Network, 126 | Path: v.WSPath, 127 | Host: v.ServerName, 128 | V: "2", 129 | } 130 | if v.TLS { 131 | vj.Tls = "tls" 132 | } 133 | if host, ok := v.WSHeaders["HOST"]; ok && host != "" { 134 | vj.Host = host 135 | } 136 | return vj 137 | } 138 | 139 | func ParseVmessLink(link string) (*Vmess, error) { 140 | if !strings.HasPrefix(link, "vmess") { 141 | return nil, ErrorNotVmessLink 142 | } 143 | 144 | vmessmix := strings.SplitN(link, "://", 2) 145 | if len(vmessmix) < 2 { 146 | return nil, ErrorNotVmessLink 147 | } 148 | linkPayload := vmessmix[1] 149 | if strings.Contains(linkPayload, "?") { 150 | // 使用第二种解析方法 目测是Shadowrocket格式 151 | var infoPayloads []string 152 | if strings.Contains(linkPayload, "/?") { 153 | infoPayloads = strings.SplitN(linkPayload, "/?", 2) 154 | } else { 155 | infoPayloads = strings.SplitN(linkPayload, "?", 2) 156 | } 157 | if len(infoPayloads) < 2 { 158 | return nil, ErrorNotVmessLink 159 | } 160 | 161 | baseInfo, err := tool.Base64DecodeString(infoPayloads[0]) 162 | if err != nil { 163 | return nil, ErrorVmessPayloadParseFail 164 | } 165 | baseInfoPath := strings.Split(baseInfo, ":") 166 | if len(baseInfoPath) < 3 { 167 | return nil, ErrorPathNotComplete 168 | } 169 | // base info 170 | cipher := baseInfoPath[0] 171 | mixInfo := strings.SplitN(baseInfoPath[1], "@", 2) 172 | if len(mixInfo) < 2 { 173 | return nil, ErrorVmessPayloadParseFail 174 | } 175 | uuid := mixInfo[0] 176 | server := mixInfo[1] 177 | portStr := baseInfoPath[2] 178 | port, err := strconv.Atoi(portStr) 179 | if err != nil { 180 | return nil, ErrorVmessPayloadParseFail 181 | } 182 | 183 | moreInfo, _ := url.ParseQuery(infoPayloads[1]) 184 | remarks := moreInfo.Get("remarks") 185 | 186 | // Transmission protocol 187 | wsHeaders := make(map[string]string) 188 | h2Opt := HTTP2Options{ 189 | Host: make([]string, 0), 190 | } 191 | httpOpt := HTTPOptions{} 192 | 193 | // Network <- obfs=websocket 194 | obfs := moreInfo.Get("obfs") 195 | network := "tcp" 196 | if obfs == "http" { 197 | httpOpt.Method = "GET" // 不知道Headers为空时会不会报错 198 | } 199 | if obfs == "websocket" { 200 | network = "ws" 201 | } else { // when http h2 202 | network = obfs 203 | } 204 | // HTTP Object: Host <- obfsParam=www.036452916.xyz 205 | host := moreInfo.Get("obfsParam") 206 | if host != "" { 207 | switch obfs { 208 | case "websocket": 209 | wsHeaders["Host"] = host 210 | case "h2": 211 | h2Opt.Host = append(h2Opt.Host, host) 212 | } 213 | } 214 | // HTTP Object: Path 215 | path := moreInfo.Get("path") 216 | if path == "" { 217 | path = "/" 218 | } 219 | switch obfs { 220 | case "h2": 221 | h2Opt.Path = path 222 | path = "" 223 | case "http": 224 | httpOpt.Path = append(httpOpt.Path, path) 225 | path = "" 226 | } 227 | 228 | tls := moreInfo.Get("tls") == "1" 229 | if obfs == "h2" { 230 | tls = true 231 | } 232 | // allowInsecure=1 Clash config unsuported 233 | // alterId=64 234 | aid := 0 235 | aidStr := moreInfo.Get("alterId") 236 | if aidStr != "" { 237 | aid, _ = strconv.Atoi(aidStr) 238 | } 239 | 240 | return &Vmess{ 241 | Base: Base{ 242 | Name: remarks + "_" + strconv.Itoa(rand.Int()), 243 | Server: server, 244 | Port: port, 245 | Type: "vmess", 246 | UDP: false, 247 | }, 248 | UUID: uuid, 249 | AlterID: aid, 250 | Cipher: cipher, 251 | TLS: tls, 252 | Network: network, 253 | HTTPOpts: httpOpt, 254 | HTTP2Opts: h2Opt, 255 | WSPath: path, 256 | WSHeaders: wsHeaders, 257 | SkipCertVerify: true, 258 | ServerName: server, 259 | }, nil 260 | } else { 261 | // V2rayN ref: https://github.com/2dust/v2rayN/wiki/%E5%88%86%E4%BA%AB%E9%93%BE%E6%8E%A5%E6%A0%BC%E5%BC%8F%E8%AF%B4%E6%98%8E(ver-2) 262 | payload, err := tool.Base64DecodeString(linkPayload) 263 | if err != nil { 264 | return nil, ErrorVmessPayloadParseFail 265 | } 266 | vmessJson := vmessLinkJson{} 267 | jsonMap, err := str2jsonDynaUnmarshal(payload) 268 | if err != nil { 269 | return nil, err 270 | } 271 | vmessJson, err = mapStrInter2VmessLinkJson(jsonMap) 272 | if err != nil { 273 | return nil, err 274 | } 275 | 276 | alterId, err := strconv.Atoi(vmessJson.Aid) 277 | if err != nil { 278 | alterId = 0 279 | } 280 | tls := vmessJson.Tls == "tls" 281 | 282 | if vmessJson.Net == "h2" { 283 | tls = true 284 | } 285 | 286 | wsHeaders := make(map[string]string) 287 | h2Opt := HTTP2Options{} 288 | httpOpt := HTTPOptions{} 289 | 290 | if vmessJson.Net == "http" { 291 | httpOpt.Method = "GET" // 不知道Headers为空时会不会报错 292 | } 293 | 294 | if vmessJson.Host != "" { 295 | switch vmessJson.Net { 296 | case "h2": 297 | h2Opt.Host = append(h2Opt.Host, vmessJson.Host) // 不知道为空时会不会报错 298 | case "ws": 299 | wsHeaders["HOST"] = vmessJson.Host 300 | } 301 | } 302 | 303 | if vmessJson.Path == "" { 304 | vmessJson.Path = "/" 305 | } 306 | switch vmessJson.Net { 307 | case "h2": 308 | h2Opt.Path = vmessJson.Path 309 | vmessJson.Path = "" 310 | case "http": 311 | httpOpt.Path = append(httpOpt.Path, vmessJson.Path) 312 | vmessJson.Path = "" 313 | } 314 | 315 | return &Vmess{ 316 | Base: Base{ 317 | Name: "", 318 | Server: vmessJson.Add, 319 | Port: vmessJson.Port, 320 | Type: "vmess", 321 | UDP: false, 322 | }, 323 | UUID: vmessJson.Id, 324 | AlterID: alterId, 325 | Cipher: "auto", 326 | Network: vmessJson.Net, 327 | HTTPOpts: httpOpt, 328 | HTTP2Opts: h2Opt, 329 | WSPath: vmessJson.Path, 330 | WSHeaders: wsHeaders, 331 | ServerName: vmessJson.Host, 332 | TLS: tls, 333 | SkipCertVerify: true, 334 | }, nil 335 | } 336 | } 337 | 338 | var ( 339 | vmessPlainRe = regexp.MustCompile("vmess://([A-Za-z0-9+/_?&=-])+") 340 | ) 341 | 342 | func GrepVmessLinkFromString(text string) []string { 343 | results := make([]string, 0) 344 | texts := strings.Split(text, "vmess://") 345 | for _, text := range texts { 346 | results = append(results, vmessPlainRe.FindAllString("vmess://"+text, -1)...) 347 | } 348 | return results 349 | } 350 | 351 | func str2jsonDynaUnmarshal(s string) (jsn map[string]interface{}, err error) { 352 | var f interface{} 353 | err = json.Unmarshal([]byte(s), &f) 354 | if err != nil { 355 | return nil, err 356 | } 357 | jsn = f.(interface{}).(map[string]interface{}) // f is pointer point to map struct 358 | if jsn == nil { 359 | return nil, ErrorVmessPayloadParseFail 360 | } 361 | return jsn, err 362 | } 363 | 364 | func mapStrInter2VmessLinkJson(jsn map[string]interface{}) (vmessLinkJson, error) { 365 | vmess := vmessLinkJson{} 366 | var err error 367 | 368 | vmessVal := reflect.ValueOf(&vmess).Elem() 369 | for i := 0; i < vmessVal.NumField(); i++ { 370 | tags := vmessVal.Type().Field(i).Tag.Get("json") 371 | tag := strings.Split(tags, ",") 372 | if jsnVal, ok := jsn[strings.ToLower(tag[0])]; ok { 373 | if strings.ToLower(tag[0]) == "port" { // set int in port 374 | switch jsnVal.(type) { 375 | case float64: 376 | vmessVal.Field(i).SetInt(int64(jsnVal.(float64))) 377 | break 378 | case string: // Force Convert 379 | valInt, err := strconv.Atoi(jsnVal.(string)) 380 | if err != nil { 381 | valInt = 443 382 | } 383 | vmessVal.Field(i).SetInt(int64(valInt)) 384 | break 385 | default: 386 | vmessVal.Field(i).SetInt(443) 387 | } 388 | } else if strings.ToLower(tag[0]) == "ps" { 389 | continue 390 | } else { // set string in other fields 391 | switch jsnVal.(type) { 392 | case string: 393 | vmessVal.Field(i).SetString(jsnVal.(string)) 394 | break 395 | default: // Force Convert 396 | vmessVal.Field(i).SetString(fmt.Sprintf("%v", jsnVal)) 397 | } 398 | } 399 | } 400 | } 401 | return vmess, err 402 | } 403 | -------------------------------------------------------------------------------- /pkg/tool/base64.go: -------------------------------------------------------------------------------- 1 | package tool 2 | 3 | import ( 4 | "encoding/base64" 5 | ) 6 | 7 | // Base64DecodeString decodes base64 string to readable string 8 | func Base64DecodeString(src string) (dst string, err error) { 9 | if src == "" { 10 | return "", nil 11 | } 12 | var dstbytes []byte 13 | dstbytes, err = base64.RawURLEncoding.DecodeString(src) 14 | 15 | if err != nil { 16 | dstbytes, err = base64.RawStdEncoding.DecodeString(src) 17 | } 18 | if err != nil { 19 | dstbytes, err = base64.StdEncoding.DecodeString(src) 20 | } 21 | if err != nil { 22 | dstbytes, err = base64.URLEncoding.DecodeString(src) 23 | } 24 | if err != nil { 25 | return "", err 26 | } 27 | dst = string(dstbytes) 28 | return 29 | } 30 | 31 | func Base64EncodeString(origin string, urlsafe bool) (result string) { 32 | if urlsafe { 33 | return base64.URLEncoding.EncodeToString([]byte(origin)) 34 | } 35 | return base64.StdEncoding.EncodeToString([]byte(origin)) 36 | } 37 | 38 | func Base64EncodeBytes(origin []byte) (result string) { 39 | return base64.StdEncoding.EncodeToString([]byte(origin)) 40 | } 41 | -------------------------------------------------------------------------------- /pkg/tool/cfdecode.go: -------------------------------------------------------------------------------- 1 | package tool 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "github.com/robertkrimen/otto" 7 | "io/ioutil" 8 | "regexp" 9 | "strconv" 10 | "strings" 11 | ) 12 | 13 | // Find email playload 14 | func GetCFEmailPayload(str string) string { 15 | s := strings.Split(str, "data-cfemail=") 16 | if len(s) > 1 { 17 | s = strings.Split(s[1], "\"") 18 | str = s[1] 19 | return str 20 | } 21 | return "" 22 | } 23 | 24 | // Remove cloudflare email protection 25 | func CFEmailDecode(a string) (s string, err error) { 26 | if a == "" { 27 | return "", errors.New("CFEmailDecodeError: empty payload to decode") 28 | } 29 | var e bytes.Buffer 30 | r, _ := strconv.ParseInt(a[0:2], 16, 0) 31 | for n := 4; n < len(a)+2; n += 2 { 32 | i, _ := strconv.ParseInt(a[n-2:n], 16, 0) 33 | //e.WriteString(string(i ^ r)) 34 | e.WriteString(string(rune(i ^ r))) 35 | } 36 | return e.String(), nil 37 | } 38 | 39 | // Return full accessible url from a script protected url. If not a script url, return input 40 | func CFScriptRedirect(url string) (string, error) { 41 | resp, err := GetHttpClient().Get(url) 42 | if err != nil { 43 | return url, err 44 | } 45 | defer resp.Body.Close() 46 | body, err := ioutil.ReadAll(resp.Body) 47 | if err != nil { 48 | return url, err 49 | } 50 | strbody := string(body) 51 | if len(strbody) < 7 { 52 | return url, nil 53 | } 54 | if strbody[:7] == "")[1] 56 | js = strings.Split(js, "")[0] 57 | js = ScriptReplace(js, "strdecode") 58 | reUrl, err := ScriptGet(js, "strdecode") 59 | if err != nil { 60 | return url, err 61 | } 62 | if reUrl != "" { 63 | return reUrl, nil 64 | } else { 65 | return url, errors.New("RedirectionError: result from javascript") 66 | } 67 | } 68 | return url, nil 69 | } 70 | 71 | // Get result var of a js script 72 | func ScriptGet(js string, varname string) (string, error) { 73 | vm := otto.New() 74 | _, err := vm.Run(js) 75 | if err != nil { 76 | return "", err 77 | } 78 | if value, err := vm.Get(varname); err == nil { 79 | if v, err := value.ToString(); err == nil { 80 | return v, nil 81 | } 82 | } 83 | return "", err 84 | } 85 | 86 | // Replace location with varname and remove window 87 | func ScriptReplace(js string, varname string) string { 88 | strs := strings.Split(js, ";") 89 | varWindow := "" 90 | varLocation := "" 91 | bound := len(strs) 92 | 93 | if len(js) < 2 { 94 | return js 95 | } 96 | for i, _ := range strs { 97 | //replace location 98 | if varLocation != "" && strings.Contains(strs[i], varLocation) { 99 | re3, err := regexp.Compile(varLocation + ".*?[]]") // _LoKlO[_jzvXT] 100 | if err == nil { 101 | strs[i] = re3.ReplaceAllLiteralString(strs[i], varname) 102 | } 103 | } 104 | if strings.Contains(strs[i], "location") { 105 | strarr := strings.Split(strs[i], " = ") 106 | if len(strarr) >= 2 { // get varname, _jzvXT = location or return '/t' } _qf14P = location 107 | if strarr[len(strarr)-1] == "location" { 108 | index := strings.LastIndex(strs[i], "}") 109 | if index == -1 { 110 | varLocation = strarr[0] 111 | strs[i] = "" 112 | } else { 113 | strs[i] = strs[i][:index+1] 114 | varLocation = strings.Split(strs[i][index+1:], " = ")[0] 115 | varLocation = strings.TrimSpace(varLocation) 116 | } 117 | } 118 | } else { // set varname 119 | re, err := regexp.Compile("location.*?[]]=") // location[_jzvXT]= 120 | if err == nil { 121 | strs[i] = re.ReplaceAllLiteralString(strs[i], varname+"=") 122 | } 123 | re, err = regexp.Compile("location.*?[]]") // location[_jzvXT] 124 | if err == nil { 125 | strs[i] = re.ReplaceAllLiteralString(strs[i], varname+"=") 126 | } 127 | strs[i] = strings.ReplaceAll(strs[i], "location.replace = ", varname+"=") 128 | strs[i] = strings.ReplaceAll(strs[i], "location.replace=", varname+"=") 129 | strs[i] = strings.ReplaceAll(strs[i], "location.replace", varname+"=") 130 | strs[i] = strings.ReplaceAll(strs[i], "location.assign = ", varname+"=") 131 | strs[i] = strings.ReplaceAll(strs[i], "location.assign=", varname+"=") 132 | strs[i] = strings.ReplaceAll(strs[i], "location.assign", varname+"=") 133 | strs[i] = strings.ReplaceAll(strs[i], "location.href =", varname+"=") 134 | strs[i] = strings.ReplaceAll(strs[i], "location.href=", varname+"=") 135 | strs[i] = strings.ReplaceAll(strs[i], "location.href", varname+"=") 136 | strs[i] = strings.ReplaceAll(strs[i], "location=", varname+"=") 137 | strs[i] = strings.ReplaceAll(strs[i], "==", varname+"=") 138 | } 139 | } 140 | // remove window 141 | if strings.Contains(strs[i], "window") { 142 | index := strings.LastIndex(strs[i], "}") 143 | if index == -1 { 144 | varWindow = strings.Split(strs[i], " = window")[0] 145 | strs[i] = "" 146 | } else { 147 | varWindow = strings.Split(strs[i][index+1:], " = ")[0] 148 | varWindow = strings.TrimSpace(varWindow) 149 | strs[i] = strs[i][:index+1] 150 | } 151 | } 152 | } 153 | 154 | if varWindow != "" { 155 | for i, _ := range strs { 156 | if strings.Contains(strs[i], varWindow) { 157 | bound = i 158 | break 159 | } 160 | } 161 | } 162 | js = strings.Join(strs[:bound], ";") 163 | return js 164 | } 165 | -------------------------------------------------------------------------------- /pkg/tool/check.go: -------------------------------------------------------------------------------- 1 | package tool 2 | 3 | func CheckInList(list []string, item string) bool { 4 | for _, i := range list { 5 | if item == i { 6 | return true 7 | } 8 | } 9 | return false 10 | } 11 | -------------------------------------------------------------------------------- /pkg/tool/colly.go: -------------------------------------------------------------------------------- 1 | package tool 2 | 3 | import ( 4 | "net" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/gocolly/colly" 9 | ) 10 | 11 | func GetColly() *colly.Collector { 12 | c := colly.NewCollector( 13 | colly.UserAgent(UserAgent), 14 | colly.MaxDepth(6), 15 | ) 16 | c.WithTransport(&http.Transport{ 17 | Proxy: http.ProxyFromEnvironment, 18 | DialContext: (&net.Dialer{ 19 | Timeout: 10 * time.Second, // 超时时间 20 | KeepAlive: 10 * time.Second, // keepAlive 超时时间 21 | }).DialContext, 22 | MaxIdleConns: 100, // 最大空闲连接数 23 | IdleConnTimeout: 20 * time.Second, // 空闲连接超时 24 | TLSHandshakeTimeout: 10 * time.Second, // TLS 握手超时 25 | ExpectContinueTimeout: 10 * time.Second, 26 | }) 27 | return c 28 | } 29 | -------------------------------------------------------------------------------- /pkg/tool/httpclient.go: -------------------------------------------------------------------------------- 1 | package tool 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "time" 7 | ) 8 | 9 | const UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36" 10 | 11 | type HttpClient struct { 12 | *http.Client 13 | } 14 | 15 | var httpClient *HttpClient 16 | 17 | func init() { 18 | httpClient = &HttpClient{http.DefaultClient} 19 | httpClient.Timeout = time.Second * 10 20 | } 21 | 22 | func GetHttpClient() *HttpClient { 23 | c := *httpClient 24 | return &c 25 | } 26 | 27 | func (c *HttpClient) Get(url string) (resp *http.Response, err error) { 28 | req, err := http.NewRequest(http.MethodGet, url, nil) 29 | if err != nil { 30 | return nil, err 31 | } 32 | req.Header.Set("Accept-Language", "zh-CN,zh;q=0.9,en;q=0.8") 33 | req.Header.Set("User-Agent", UserAgent) 34 | return c.Do(req) 35 | } 36 | 37 | func (c *HttpClient) Post(url string, body io.Reader) (resp *http.Response, err error) { 38 | req, err := http.NewRequest(http.MethodPost, url, body) 39 | if err != nil { 40 | return nil, err 41 | } 42 | req.Header.Set("Accept-Language", "zh-CN,zh;q=0.9,en;q=0.8") 43 | req.Header.Set("User-Agent", UserAgent) 44 | return c.Do(req) 45 | } 46 | -------------------------------------------------------------------------------- /pkg/tool/option.go: -------------------------------------------------------------------------------- 1 | package tool 2 | 3 | // Options of getters in sources 4 | type Options map[string]interface{} 5 | -------------------------------------------------------------------------------- /pkg/tool/unicode.go: -------------------------------------------------------------------------------- 1 | package tool 2 | 3 | import ( 4 | "regexp" 5 | "unicode" 6 | ) 7 | 8 | var hanRe = regexp.MustCompile("[\u3002\uff1b\uff0c\uff1a\u201c\u201d\uff08\uff09\u3001\uff1f\u300a\u300b]") 9 | 10 | func ContainChineseChar(str string) bool { 11 | for _, r := range str { 12 | if unicode.Is(unicode.Scripts["Han"], r) || (hanRe.MatchString(string(r))) { 13 | return true 14 | } 15 | } 16 | return false 17 | } 18 | --------------------------------------------------------------------------------