If you see this page, Caddy container works.
15 |
16 |
18 |
19 |
--------------------------------------------------------------------------------
/caddy/Caddyfile.template.example:
--------------------------------------------------------------------------------
1 | {{/* THIS FILE IS DEPRECATED! Use Caddyfile.template.p2 instead */}}
2 | # This Caddyfile is generated by sjtug/lug's bundled script
3 | # DO NOT EDIT manually!
4 | # Instead, change Caddyfile.template and regenerate Caddyfile
5 |
6 | {{/* input data source should be named as "cfg": -d cfg=config.yaml */}}
7 | {{ $cfg := (ds "cfg") }}
8 |
9 | {{/* address of lug backend */}}
10 | {{ $lug_addr := "lug:7001" }}
11 |
12 | {{ $base := "mirrors.sjtug.sjtu.edu.cn" }}
13 |
14 | {{/* configure methods to protect your admin API */}}
15 | {{ define "login_config" }}
16 | {{/* by default this uses Github OAuth, change it to your needs! */}}
17 | {{/* the sample OAuth application only allows redirection to 127.0.0.1:2015, so register your own OAuth App! */}}
18 | github client_id=enter_your_id,client_secret=enter_your_secret
19 | jwt_expiry 24h
20 | cookie_expiry 2400h
21 | {{ end }}
22 |
23 | {{ define "jwt_config" }}
24 | {{/* only allow username=htfy96 */}}
25 | allow sub htfy96
26 | {{ end }}
27 |
28 | {{ define "tls_certkey" }} {{ end }}
29 |
30 | {{define "serve_local_common_config"}}
31 | log stdout
32 | {{ end }}
33 |
34 | {{ define "reverse_proxy_common_config" }}
35 | log stdout
36 | ratelimit * / 200 400 minute
37 | {{ end }}
38 |
39 | {{ define "reverse_proxy_common_proxy_config" }}
40 | max_conns 100
41 | header_upstream X-Real-IP {remote}
42 | header_upstream X-Forwarded-For {remote}
43 | header_upstream X-Forwarded-Proto {scheme}
44 | {{ end }}
45 |
46 | # Prometheus
47 | # Exposed at :9180
48 | {{$base}}/ {
49 | prometheus {
50 | address 0.0.0.0:9180
51 | }
52 |
53 | cors
54 |
55 | root /mirror-frontend
56 | {{ template "serve_local_common_config" }}
57 |
58 | # TLS
59 | # tls {{ template "tls_certkey" }}
60 |
61 | ratelimit * /lug 4 8 second
62 | # API
63 | proxy /lug/ {{$lug_addr}} {
64 | {{ template "reverse_proxy_common_proxy_config" }}
65 | }
66 |
67 | jwt {
68 | path /lug/v1/admin
69 | {{ template "jwt_config" }}
70 | }
71 |
72 | login {
73 | {{ template "login_config" }}
74 | }
75 | gzip
76 |
77 | proxy /git http://git-http-backend {
78 | }
79 | }
80 |
81 | {{ range $name, $worker := $cfg.repos }}
82 | # worker {{$name}}
83 | {{ if and (and (index $worker "name") (index $worker "path")) (not (index $worker "no_direct_serving")) }} {{/* specify name and path */}}
84 | {{$base}}/{{$worker.name}} {
85 | root {{$worker.path}}
86 | browse
87 | {{ template "serve_local_common_config" }}
88 | }
89 |
90 | {{ if index $worker "subdomain" }}
91 | {{$worker.subdomain}}.{{$base}}/ {
92 | root {{$worker.path}}
93 | browse
94 | {{ template "serve_local_common_config" }}
95 | }
96 | {{ end }} {{/* if index $worker subdomain */}}
97 | {{ end }}
98 |
99 | {{ if and (index $worker "name") (index $worker "proxy_to") }} {{/* reverse_proxy */}}
100 | {{$base}}/{{$worker.name}} {
101 | # tls {{ template "tls_certkey" }}
102 | proxy / {{$worker.proxy_to}} {
103 | without /{{$worker.name}}
104 | {{ template "reverse_proxy_common_proxy_config" }}
105 | }
106 |
107 | {{ if or (index $worker "filter_path") (index $worker "filter_content_type") }}
108 | filter rule {
109 | {{ if index $worker "filter_path" }} path {{$worker.filter_path}} {{ end }}
110 | {{ if index $worker "filter_content_type" }} content_type {{$worker.filter_content_type}} {{ end }}
111 | search_pattern {{$worker.filter_search_pattern}}
112 | replacement "{{$worker.filter_replacement}}"
113 | }
114 | {{ end }} {{/* if filters */}}
115 |
116 | {{ template "reverse_proxy_common_config" }}
117 | }
118 |
119 | {{ if index $worker "subdomain" }}
120 | {{$worker.subdomain}}.{{$base}}/ {
121 | # tls {{ template "tls_certkey" }}
122 | proxy / {{$worker.proxy_to}} {
123 | without /{{$worker.name}}
124 | {{ template "reverse_proxy_common_proxy_config" }}
125 | }
126 | {{ if or (index $worker "filter_path") (index $worker "filter_content_type") }}
127 | filter rule {
128 | {{ if index $worker "filter_path" }} path {{$worker.filter_path}} {{ end }}
129 | {{ if index $worker "filter_content_type" }} content_type {{$worker.filter_content_type}} {{ end }}
130 | search_pattern {{$worker.filter_search_pattern}}
131 | replacement "{{$worker.filter_replacement}}"
132 | }
133 | {{ end }} {{/* if filters */}}
134 | {{ template "reverse_proxy_common_config" }}
135 | }
136 | {{ end }} {{/* subdomain */}}
137 | {{ end }} {{/* if $worker */}}
138 | {{ end }} {{/* range */}}
139 |
140 |
--------------------------------------------------------------------------------
/caddy/Caddyfile.template.p2.example:
--------------------------------------------------------------------------------
1 | {% autoescape off %}
2 | {% with lug_addr="lug:7001" base="mirrors.sjtug.sjtu.edu.cn" %}
3 |
4 | {% macro login_config() %}
5 | {% comment %}
6 | by default this uses Github OAuth, change it to your needs!
7 | the sample OAuth application only allows redirection to 127.0.0.1:2015, so register your own OAuth App!
8 | {% endcomment %}
9 | github client_id=enter_your_id,client_secret=enter_your_secret
10 | jwt_expiry 24h
11 | cookie_expiry 2400h
12 | {% endmacro %}
13 |
14 | {% macro jwt_config() %}
15 | {% comment %}
16 | only allow username=htfy96
17 | {% endcomment %}
18 | allow sub htfy96
19 | {% endmacro %}
20 |
21 |
22 | {% macro tls_config() %}
23 | {% endmacro %}
24 |
25 | {% macro common_config() %}
26 | log stdout
27 | {{ tls_config() }}
28 | {% endmacro %}
29 |
30 | {% macro filter_rules(worker) %}
31 | {% if worker.filter_path or worker.filter_content_type %}
32 | filter rule {
33 | {% if worker.filter_path %} path {{ worker.filter_path }} {% endif %}
34 | {% if worker.filter_content_type %} content_type {{ worker.filter_content_type }} {% endif %}
35 | search_pattern {{ worker.filter_search_pattern }}
36 | replacement "{{worker.filter_replacement | addslashes }}"
37 | }
38 | {% endif %}
39 | {% endmacro %}
40 |
41 | {% macro reverse_proxy_common_rules() %}
42 | max_conns 100
43 | header_upstream X-Real-IP {remote}
44 | header_upstream X-Forwarded-For {remote}
45 | header_upstream X-Forwarded-Proto {scheme}
46 | {% endmacro %}
47 |
48 | {% macro reverse_proxy_config(worker) %}
49 | {{ common_config() }}
50 | proxy / {{ worker.proxy_to }} {
51 | without /{{ worker.name }}
52 | {{ reverse_proxy_common_rules() }}
53 | }
54 | ratelimit * / 200 400 minute
55 | {{ filter_rules(worker) }}
56 | {% endmacro %}
57 |
58 | {% macro local_config(worker) %}
59 | {{ common_config() }}
60 | root {{ worker.path }}
61 | browse
62 | {{ filter_rules(worker) }}
63 | {% endmacro %}
64 |
65 | {% macro gen_sites(worker) %}
66 | {% if worker.name and worker.path and not worker.no_direct_serving %}
67 | {{ base }}/{{ worker.name }} {
68 | {{ local_config(worker) }}
69 | }
70 | {% if worker.subdomain %}
71 | {{ worker.subdomain }}.{{ base }}/ {
72 | {{ local_config(worker) }}
73 | }
74 | {% endif %}
75 | {% endif %}
76 |
77 | {% if worker.name and worker.proxy_to %}
78 | {{ base }}/{{ worker.name }} {
79 | {{ reverse_proxy_config(worker) }}
80 | }
81 | {% if worker.subdomain %}
82 | {{ worker.subdomain }}.{{ base }}/ {
83 | {{ reverse_proxy_config(worker) }}
84 | }
85 | {% endif %}
86 | {% endif %}
87 | {% endmacro %}
88 |
89 | {{ base }}/ {
90 | prometheus {
91 | address 0.0.0.0:9180
92 | }
93 | cors
94 | root /mirror-frontend
95 | {{ common_config() }}
96 | ratelimit * /lug 40 80 second
97 |
98 | proxy /lug {{ lug_addr }} {
99 | {{ reverse_proxy_common_rules() }}
100 | }
101 |
102 | jwt {
103 | path /lug/v1/admin
104 | {{ jwt_config() }}
105 | }
106 |
107 | login {
108 | {{ login_config() }}
109 | }
110 |
111 | gzip
112 |
113 | proxy /git http://git-http-backend {
114 | {{ reverse_proxy_common_rules() }}
115 | }
116 | }
117 |
118 | {% for worker in repos %}
119 | {{ gen_sites(worker) }}
120 | {% endfor %}
121 |
122 | {% endwith %}
123 | {% endautoescape %}
124 |
--------------------------------------------------------------------------------
/caddy/gen_caddyfile.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | BASEDIR=$(dirname "$0")
6 |
7 | if [ "$#" -ne 2 ]; then
8 | echo 'Usage:'
9 | echo ' ./gen_caddyfile.sh [[config.yaml]] [[outputCaddyfile]]'
10 | echo ' Output file will be written to ./Caddyfile'
11 | echo 'Example:'
12 | echo ' ./gen_caddyfile.sh config.example.yaml Caddyfile'
13 | exit 1
14 | fi
15 |
16 | echo "Input: $1"
17 | echo "Output: $2"
18 |
19 | "${BASEDIR}/p2" -t "${BASEDIR}/Caddyfile.template.p2" -i "$1" -o "$2"
20 |
--------------------------------------------------------------------------------
/caddy/gomplate:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjtug/mirror-docker/e5a9df42d9d2daf92c5acdefb37641c16ff0879a/caddy/gomplate
--------------------------------------------------------------------------------
/caddy/p2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjtug/mirror-docker/e5a9df42d9d2daf92c5acdefb37641c16ff0879a/caddy/p2
--------------------------------------------------------------------------------
/caddy/reload-caddy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | docker kill -s USR1 caddy
4 |
--------------------------------------------------------------------------------
/doc-assets/arch.dot:
--------------------------------------------------------------------------------
1 | digraph {
2 | compound=true;
3 | concentrate=true;
4 | subgraph clusterProxyServer {
5 | label="Proxy Server";
6 | "v2ray_server";
7 | graph[style=solid];
8 | }
9 |
10 | subgraph "cluster TencentCloud CDN for sjtug.org" {
11 | label="TencentCloud CDN for sjtug.org";
12 | mirror_news [label="/tags/mirror-news/index.xml" fontname="Courier New"];
13 | mirror_help [label="/tags/mirror-help/index.xml" fontname="Courier New"];
14 | }
15 |
16 | subgraph clusterReverseProxyUpstream {
17 | label="Reverse Proxy Upstream";
18 | style=dotted;
19 | reverse_proxy_upstream;
20 | }
21 |
22 | subgraph clusterRsyncUpstream {
23 | label="Rsync Upstream";
24 | style=dotted;
25 | rsync_upstream;
26 | }
27 |
28 | subgraph clusterMirrorDocker {
29 | label="mirror-docker";
30 | lug; caddy; v2ray; "git-http-backend";
31 | disk_storage [shape=house];
32 | v2ray -> "v2ray_server";
33 | lug -> v2ray [label="proxy from"];
34 | caddy -> v2ray [label="proxy from"];
35 | lug -> disk_storage [label="stores packages at"];
36 | lug -> rsync_upstream [label="pull packages from"];
37 | caddy -> disk_storage [label="serves packages & frontend at"];
38 | caddy -> reverse_proxy_upstream [label="reverse proxies"];
39 | caddy -> lug [label="proxies /lug/v1/api"];
40 | caddy -> "git-http-backend" [label="proxies /git"];
41 | "git-http-backend" -> disk_storage [label="serves git repo at"];
42 | graph[style=solid];
43 | }
44 |
45 | subgraph clusterMirrorUser {
46 | label="User of mirrors";
47 | style=dotted;
48 | browser[label="browser visiting https://mirrors.sjtug.org"];
49 | browser -> caddy [label="get frontend & packages"];
50 | browser -> mirror_news [label="get mirror news"];
51 | browser -> mirror_help [label="get repo helps"];
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/doc-assets/config-dependency.dot:
--------------------------------------------------------------------------------
1 | digraph {
2 | rankdir=LR
3 | node [shape=box];
4 | lug, caddy, v2ray, "git-http-backend";
5 | node [shape=oval, fontname="Courier New"];
6 | lug -> "lug/config.yaml";
7 | lug -> "lug/worker-script";
8 | lug -> "/certs" [style="dotted" label="containing certificates, mounted at"];
9 | lug -> "/mnt" [style="dotted", label="underlying storage, mounted at"];
10 | caddy -> "caddy/Caddyfile";
11 | caddy -> "/mnt" [style="dotted", label="mounted at"];
12 | "caddy/Caddyfile" -> "lug/config.yaml" [label="gen_caddyfile.sh ../lug/config.yaml Caddyfile", fontname="Courier New"];
13 | v2ray -> "v2ray/config.json";
14 | "git-http-backend" -> "/git" [stype="dotted", label="mounted at"];
15 | }
16 |
--------------------------------------------------------------------------------
/doc-assets/gen.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
4 |
5 | for filepath in $DIR/*.dot; do
6 | filename="$(basename $filepath)"
7 | echo "Generating $filename"
8 | dot -T png -o "images/${filename%.*}.png" "$filepath"
9 | done
10 |
--------------------------------------------------------------------------------
/doc-assets/images/arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjtug/mirror-docker/e5a9df42d9d2daf92c5acdefb37641c16ff0879a/doc-assets/images/arch.png
--------------------------------------------------------------------------------
/doc-assets/images/config-dependency.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjtug/mirror-docker/e5a9df42d9d2daf92c5acdefb37641c16ff0879a/doc-assets/images/config-dependency.png
--------------------------------------------------------------------------------
/docker-compose.yml.example:
--------------------------------------------------------------------------------
1 | version: "2.3"
2 |
3 | services:
4 | v2ray:
5 | image: v2ray/official
6 | volumes:
7 | - ./v2ray/config.json:/etc/v2ray/config.json
8 | networks:
9 | - proxy_net
10 | - git_net
11 | container_name: v2ray
12 | logging:
13 | options:
14 | max-size: "2M"
15 | max-file: "10"
16 | mem_limit: 2G
17 | memswap_limit: 2G
18 | cpu_count: 2
19 | restart: always
20 | environment:
21 | V2RAY_RAY_BUFFER_SIZE: 5
22 |
23 | git-http-backend:
24 | build:
25 | context: ./git-http-backend
26 | volumes:
27 | - /mnt/data12T/git:/git
28 | container_name: git-http-backend
29 | logging:
30 | options:
31 | max-size: "2M"
32 | max-file: "10"
33 | mem_limit: 1G
34 | memswap_limit: 1G
35 | cpu_count: 4
36 | cpu_shares: 500
37 | restart: always
38 | networks:
39 | - git_net
40 |
41 | lug:
42 | build:
43 | context: ./lug
44 | image: lug:built
45 | environment:
46 | http_proxy: http://v2ray:8080
47 | https_proxy: http://v2ray:8080
48 | RSYNC_PROXY: v2ray:8080
49 | container_name: lug
50 | volumes:
51 | - ./lug/config.yaml:/config.yaml
52 | - ./lug/worker-script:/worker-script
53 | - /mnt/data12T:/mnt
54 | command: -c /config.yaml
55 | ports:
56 | - "8081:8081"
57 | ulimits:
58 | nofile:
59 | soft: 20000
60 | hard: 40000
61 | nproc:
62 | hard: 1024
63 | soft: 1024
64 | networks:
65 | - proxy_net
66 | - lug_net
67 | logging:
68 | options:
69 | max-size: "2M"
70 | max-file: "10"
71 | mem_limit: 10G
72 | memswap_limit: 10G
73 | cpu_count: 4
74 | restart: always
75 |
76 | caddy:
77 | build:
78 | context: ./caddy-builder/
79 | image: caddy:built
80 | environment:
81 | http_proxy: http://v2ray:8080
82 | https_proxy: http://v2ray:8080
83 | RSYNC_PROXY: v2ray:8080
84 | container_name: caddy
85 | command: -conf /etc/Caddyfile -catimeout 30s -email vicluo96@gmail.com -agree --log stdout -port 443
86 | volumes:
87 | - ./caddy/Caddyfile:/etc/Caddyfile
88 | - /mnt/data12T:/mnt
89 | - /root/.acme.sh/mirrors.sjtug.org:/certs
90 | - ./frontend/dist:/mirror-frontend
91 | ports:
92 | - "80:80"
93 | - "443:443"
94 | - "9180:9180"
95 | logging:
96 | options:
97 | max-size: "2M"
98 | max-file: "10"
99 | ulimits:
100 | nofile:
101 | soft: 20000
102 | hard: 40000
103 | networks:
104 | - proxy_net
105 | - lug_net
106 | - git_net
107 | mem_limit: 5G
108 | memswap_limit: 5G
109 | cpu_count: 8
110 | restart: always
111 |
112 | networks:
113 | proxy_net:
114 | lug_net:
115 | git_net:
116 |
117 |
--------------------------------------------------------------------------------
/frontend/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjtug/mirror-docker/e5a9df42d9d2daf92c5acdefb37641c16ff0879a/frontend/.gitkeep
--------------------------------------------------------------------------------
/frontend/download_latest.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | set -e
4 |
5 | LATEST_URL=$(curl https://api.github.com/repos/sjtug/sjtug-mirror-frontend/releases/latest | jq '.assets[0].browser_download_url' -r)
6 | wget -N -O /tmp/dists.zip "$LATEST_URL"
7 | unzip -o -d . /tmp/dists.zip
8 |
--------------------------------------------------------------------------------
/git-http-backend/Dockerfile:
--------------------------------------------------------------------------------
1 | # small is beautiful
2 | FROM debian:9
3 |
4 | MAINTAINER Anthony Hogg anthony@hogg.fr
5 |
6 | # The container listens on port 80, map as needed
7 | EXPOSE 80
8 |
9 | # This is where the repositories will be stored, and
10 | # should be mounted from the host (or a volume container)
11 | VOLUME ["/git"]
12 |
13 | # We need the following:
14 | # - git, because that gets us the git-http-backend CGI script
15 | # - fcgiwrap, because that is how nginx does CGI
16 | # - spawn-fcgi, to launch fcgiwrap and to create the unix socket
17 | # - nginx, because it is our frontend
18 | RUN apt-get update && \
19 | apt-get install -yy nginx git fcgiwrap spawn-fcgi
20 |
21 | COPY nginx.conf /etc/nginx/nginx.conf
22 |
23 | # launch fcgiwrap via spawn-fcgi; launch nginx in the foreground
24 | # so the container doesn't die on us; supposedly we should be
25 | # using supervisord or something like that instead, but this
26 | # will do
27 | RUN git config --global pack.threads 4 && \
28 | git config --global pack.windowMemory 512m && \
29 | git config --global core.compression 1 && \
30 | git config --global uploadpack.allowReachableSHA1InWant true && \
31 | git config --global core.bare true
32 |
33 |
34 | CMD spawn-fcgi -s /run/fcgi.sock /usr/sbin/fcgiwrap && \
35 | nginx -g "daemon off;"
36 |
--------------------------------------------------------------------------------
/git-http-backend/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 Anthony Hogg
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/git-http-backend/README.md:
--------------------------------------------------------------------------------
1 | A dead simple git smart-http server using nginx as a frontend. No authentication, no SSL, push is free-for-all.
2 |
3 | > _caveat emptor_ this is not intended for production use
4 |
5 | Usage:
6 |
7 | ```
8 | docker run -d -p 4080:80 -v /path/to/host/gitdir:/git ynohat/git-http-backend
9 | ```
10 |
11 | Unauthenticated push will not work unless you enable it in repositories:
12 |
13 | ```
14 | cd /path/to/host/gitdir
15 | git init --bare test.git
16 | cd test.git
17 | git config http.receivepack true
18 | ```
19 |
--------------------------------------------------------------------------------
/git-http-backend/nginx.conf:
--------------------------------------------------------------------------------
1 | worker_processes 4;
2 |
3 | error_log stderr;
4 | pid /run/nginx.pid;
5 | user root;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | server {
13 | listen *:80;
14 |
15 | root /www/empty/;
16 | index index.html;
17 |
18 | server_name _;
19 | access_log /dev/stdout;
20 |
21 | #error_page 404 /404.html;
22 |
23 | #auth_basic "Restricted";
24 | #auth_basic_user_file /www/htpasswd;
25 |
26 | location ~ ^.*\.git/objects/([0-9a-f]+/[0-9a-f]+|pack/pack-[0-9a-f]+.(pack|idx))$ {
27 | root /git;
28 | }
29 | location ~ /git(/.*) {
30 | # Set chunks to unlimited, as the bodies can be huge
31 | client_max_body_size 0;
32 |
33 | fastcgi_param SCRIPT_FILENAME /usr/lib/git-core/git-http-backend;
34 | include fastcgi_params;
35 | fastcgi_param GIT_HTTP_EXPORT_ALL "";
36 | fastcgi_param GIT_PROJECT_ROOT /git;
37 | fastcgi_param PATH_INFO $1;
38 | fastcgi_param GIT_HTTP_MAX_REQUEST_BUFFER "100M";
39 |
40 | # Forward REMOTE_USER as we want to know when we are authenticated
41 | fastcgi_param REMOTE_USER $remote_user;
42 | fastcgi_pass unix:/run/fcgi.sock;
43 | }
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/lug/.dockerignore:
--------------------------------------------------------------------------------
1 | **
2 | !Dockerfile
3 |
--------------------------------------------------------------------------------
/lug/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM htfy96/lug:release-0.12.2
2 |
3 | RUN apt-get update && apt-get install rsync awscli python3-pip rsync python wget git jq -y
4 | RUN pip3 install bandersnatch
5 | RUN wget -O gcloud.tar.gz https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-183.0.0-linux-x86_64.tar.gz?hl=zh-cn && tar xavf gcloud.tar.gz && rm -rf gcloud.tar.gz && ./google-cloud-sdk/install.sh
6 |
--------------------------------------------------------------------------------
/lug/config.example.yaml:
--------------------------------------------------------------------------------
1 | interval: 10 # Interval between pollings
2 | loglevel: 4 # 1-5
3 | concurrent_limit: 5
4 | # Prometheus metrics are exposed at http://exporter_address/metrics
5 | exporter_address: :8081
6 |
7 | logstash:
8 | address: listener.logz.io:5050 # logstash sink. Lug will send all logs to this address
9 | additional_fields:
10 | token: "" # Additional fields sent to logstash server
11 |
12 | # Address where JSON API will be served
13 | json_api:
14 | address: :7001
15 |
16 | repos:
17 | - type: shell_script
18 | script: /worker-script/rsync.sh
19 | source: rsync://rsync.chiark.greenend.org.uk/ftp/users/sgtatham/putty-website-mirror/
20 | interval: 3600
21 | path: /mnt/putty
22 | name: putty
23 | - type: shell_script
24 | name: archlinux-cn
25 | interval: 6000
26 | path: /mnt/archlinux-cn
27 | script: /worker-script/archlinux-cn.sh
28 | username: abc
29 | password: todo
30 | - type: shell_script
31 | script: /worker-script/rsync.sh
32 | source: rsync://rsync11.us.gentoo.org/gentoo-portage/
33 | interval: 3600
34 | path: /mnt/gentoo
35 | name: gentoo
36 | - type: shell_script
37 | script: /worker-script/rsync.sh
38 | source: rsync://ftp.ca.vim.org/vim
39 | interval: 3600
40 | path: /mnt/vim
41 | name: vim
42 | - type: shell_script
43 | script: /worker-script/rsync.sh
44 | source: rsync://ftp.tsukuba.wide.ad.jp/manjaro
45 | interval: 3600
46 | path: /mnt/manjaro
47 | name: manjaro
48 | exclude_hidden: true
49 | - type: shell_script
50 | script: /worker-script/rsync.sh
51 | source: rsync://rsync.alpinelinux.org/alpine/
52 | interval: 3600
53 | path: /mnt/alpine
54 | name: alpine
55 | - type: shell_script
56 | script: /worker-script/rsync.sh
57 | source: rsync://rsync.dante.ctan.org/CTAN
58 | interval: 86400
59 | path: /mnt/ctan
60 | name: ctan
61 | exclude_hidden: true
62 | - type: shell_script
63 | script: /worker-script/rsync.sh
64 | source: cpan-rsync.perl.org::CPAN
65 | interval: 3600
66 | path: /mnt/cpan
67 | name: cpan
68 | - type: shell_script
69 | script: /worker-script/rsync.sh
70 | source: rsync://cran.csie.ntu.edu.tw/CRAN/CRAN/
71 | interval: 3600
72 | path: /mnt/cran
73 | name: cran
74 | ignore_vanish: true
75 | - type: shell_script
76 | script: /worker-script/rsync.sh
77 | source: rsync://mirrors.tuna.tsinghua.edu.cn/ctex/
78 | interval: 3600
79 | path: /mnt/ctex
80 | name: ctex
81 | - type: shell_script
82 | script: /worker-script/rsync.sh
83 | source: rsync://mirrors.ocf.berkeley.edu/gnu/
84 | interval: 43200
85 | path: /mnt/gnu
86 | name: gnu
87 | - type: shell_script
88 | script: /worker-script/rsync.sh
89 | source: rsync://mirror.steadfast.net/cygwin/
90 | interval: 3600
91 | path: /mnt/cygwin
92 | name: cygwin
93 | - type: shell_script
94 | script: /worker-script/rsync.sh
95 | source: rsync.osuosl.org::ros-main
96 | interval: 3600
97 | path: /mnt/ros
98 | mirror_path: /mnt/ros/ubuntu
99 | name: ros
100 | - type: shell_script
101 | script: /worker-script/rsync.sh
102 | source: rsync://ftp.nluug.nl/kali/
103 | interval: 10800
104 | path: /mnt/kali
105 | name: kali
106 | - type: shell_script
107 | script: /worker-script/rsync.sh
108 | source: rsync://ftp.nluug.nl/kali-images/
109 | interval: 10800
110 | path: /mnt/kali-images
111 | name: kali-images
112 | - type: shell_script
113 | script: /worker-script/bandersnatch.sh
114 | master: https://pypi.python.org/
115 | path: /mnt/pypi
116 | name: pypi
117 | interval: 3600
118 | retry_on_ssl_fail: true
119 | - type: shell_script
120 | script: /worker-script/anaconda.py
121 | path: /mnt/anaconda/
122 | name: anaconda
123 | interval: 3600
124 | thread_num: 80
125 | pkgs/main/linux-64: https://repo.continuum.io/pkgs/main/linux-64/
126 | pkgs/main/linux-32: https://repo.continuum.io/pkgs/main/linux-32/
127 | pkgs/main/linux-ppc64le: https://repo.continuum.io/pkgs/main/linux-ppc64le/
128 | pkgs/main/osx-64: https://repo.continuum.io/pkgs/main/osx-64/
129 | pkgs/main/win-64: https://repo.continuum.io/pkgs/main/win-64/
130 | pkgs/main/win-32: https://repo.continuum.io/pkgs/main/win-32/
131 | pkgs/main/noarch: https://repo.continuum.io/pkgs/main/noarch/
132 | pkgs/free/linux-64: https://repo.continuum.io/pkgs/free/linux-64/
133 | pkgs/free/linux-32: https://repo.continuum.io/pkgs/free/linux-32/
134 | pkgs/free/linux-armv6l: https://repo.continuum.io/pkgs/free/linux-armv6l/
135 | pkgs/free/linux-armv7l: https://repo.continuum.io/pkgs/free/linux-armv7l/
136 | pkgs/free/linux-ppc64le: https://repo.continuum.io/pkgs/free/linux-ppc64le/
137 | pkgs/free/osx-64: https://repo.continuum.io/pkgs/free/osx-64/
138 | pkgs/free/osx-32: https://repo.continuum.io/pkgs/free/osx-32/
139 | pkgs/free/win-64: https://repo.continuum.io/pkgs/free/win-64/
140 | pkgs/free/win-32: https://repo.continuum.io/pkgs/free/win-32/
141 | pkgs/free/noarch: https://repo.continuum.io/pkgs/free/noarch/
142 | pkgs/r/linux-64: https://repo.continuum.io/pkgs/r/linux-64/
143 | pkgs/r/linux-32: https://repo.continuum.io/pkgs/r/linux-32/
144 | pkgs/r/linux-armv6l: https://repo.continuum.io/pkgs/r/linux-armv6l/
145 | pkgs/r/linux-armv7l: https://repo.continuum.io/pkgs/r/linux-armv7l/
146 | pkgs/r/linux-ppc64le: https://repo.continuum.io/pkgs/r/linux-ppc64le/
147 | pkgs/r/osx-64: https://repo.continuum.io/pkgs/r/osx-64/
148 | pkgs/r/osx-32: https://repo.continuum.io/pkgs/r/osx-32/
149 | pkgs/r/win-64: https://repo.continuum.io/pkgs/r/win-64/
150 | pkgs/r/win-32: https://repo.continuum.io/pkgs/r/win-32/
151 | pkgs/r/noarch: https://repo.continuum.io/pkgs/r/noarch/
152 | pkgs/msys2/win-64: https://repo.continuum.io/pkgs/msys2/win-64/
153 | pkgs/msys2/win-32: https://repo.continuum.io/pkgs/msys2/win-32/
154 | cloud/conda-forge/noarch: https://conda.anaconda.org/conda-forge/noarch/
155 | cloud/conda-forge/linux-64: https://conda.anaconda.org/conda-forge/linux-64/
156 | cloud/conda-forge/osx-64: https://conda.anaconda.org/conda-forge/osx-64/
157 | cloud/conda-forge/win-64: https://conda.anaconda.org/conda-forge/win-64/
158 | cloud/bioconda/noarch: https://conda.anaconda.org/bioconda/noarch/
159 | cloud/bioconda/linux-64: https://conda.anaconda.org/bioconda/linux-64/
160 | cloud/bioconda/osx-64: https://conda.anaconda.org/bioconda/osx-64/
161 | cloud/pytorch/linux-64: https://conda.anaconda.org/pytorch/linux-64/
162 | cloud/pytorch/osx-64: https://conda.anaconda.org/pytorch/osx-64/
163 | cloud/pytorch/noarch: https://conda.anaconda.org/pytorch/noarch/
164 | cloud/pytorch/win-64: https://conda.anaconda.org/pytorch/win-64/
165 | cloud/ngsolve/noarch: https://conda.anaconda.org/ngsolve/noarch/
166 | cloud/ngsolve/linux-64: https://conda.anaconda.org/ngsolve/linux-64/
167 | cloud/ngsolve/osx-64: https://conda.anaconda.org/ngsolve/osx-64/
168 | cloud/ngsolve/win-64: https://conda.anaconda.org/ngsolve/win-64/
169 | - type: external
170 | name: rust-static
171 | proxy_to: https://static.rust-lang.org/
172 | - type: external
173 | name: debian-security
174 | proxy_to: http://ftp.sjtu.edu.cn/debian-security/
175 | - type: external
176 | name: centos
177 | proxy_to: http://ftp.sjtu.edu.cn/centos/
178 | - type: external
179 | name: ubuntu
180 | proxy_to: http://ftp.sjtu.edu.cn/ubuntu/
181 | - type: external
182 | name: archlinux
183 | proxy_to: http://ftp.sjtu.edu.cn/archlinux/
184 | - type: external
185 | name: fedora
186 | proxy_to: http://ftp.sjtu.edu.cn/fedora/
187 | - type: external
188 | name: debian
189 | proxy_to: http://ftp.sjtu.edu.cn/debian/
190 | - type: external
191 | name: ubuntu-cd
192 | proxy_to: http://ftp.sjtu.edu.cn/ubuntu-cd/
193 | - type: external
194 | name: debian-cd
195 | proxy_to: http://ftp.sjtu.edu.cn/debian-cd/
196 | - type: external
197 | name: linuxmint
198 | proxy_to: http://ftp.sjtu.edu.cn/linuxmint/
199 | - type: external
200 | name: deepin
201 | proxy_to: http://ftp.sjtu.edu.cn/deepin/
202 | - type: external
203 | name: opensuse
204 | proxy_to: http://ftp.sjtu.edu.cn/opensuse
205 | - type: external
206 | name: docker-registry
207 | proxy_to: https://registry-1.docker.io
208 | subdomain: docker
209 | - type: external
210 | name: npm-registry
211 | proxy_to: https://registry.npmjs.org
212 | - type: external
213 | name: flutter_infra
214 | proxy_to: https://storage.googleapis.com/flutter_infra/
215 | - type: external
216 | name: dart_packages
217 | proxy_to: https://pub.dartlang.org/
218 | subdomain: dart-pub
219 | filter_content_type: text/html.*
220 | filter_search_pattern:
221 | filter_replacement: "
dart-pub.mirrors.sjtug.sjtu.edu.cn is a reverse proxy of
https://pub.dartlang.org/ provided by
SJTUG mirror service."
222 | - type: external
223 | name: maven-central
224 | proxy_to: http://repo.maven.apache.org/maven2/
225 | - type: shell_script
226 | script: /worker-script/git.sh
227 | name: git/homebrew-core.git
228 | origin: https://github.com/Homebrew/homebrew-core.git
229 | interval: 3801
230 | path: /mnt/git/homebrew-core.git
231 | no_direct_serving: true
232 | - type: shell_script
233 | script: /worker-script/git.sh
234 | name: git/homebrew-cask.git
235 | interval: 3801
236 | path: /mnt/git/homebrew-cask.git
237 | origin: https://github.com/Homebrew/homebrew-cask.git
238 | no_direct_serving: true
239 | - type: external
240 | name: homebrew-bottles
241 | proxy_to: https://homebrew.bintray.com/
242 | subdomain: homebrew-bottles
243 | - type: shell_script
244 | name: raspbian
245 | script: /worker-script/rsync.sh
246 | path: /mnt/raspbian
247 | interval: 10801
248 | source: rsync://archive.raspbian.org/archive/
249 | - type: shell_script
250 | name: raspberrypi
251 | script: /worker-script/rsync.sh
252 | path: /mnt/raspberrypi
253 | interval: 10801
254 | source: apt-repo.raspberrypi.org::archive
255 | - type: shell_script
256 | script: /worker-script/macports.sh
257 | source: rsync://rsync-origin.macports.org/macports/
258 | interval: 3600
259 | path: /mnt/macports
260 | name: macports
261 |
--------------------------------------------------------------------------------
/lug/worker-script/anaconda.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import concurrent.futures as futures
4 | import urllib.request as request
5 | from urllib.parse import urljoin
6 | from urllib.error import HTTPError
7 | from http.client import HTTPException
8 | import shutil
9 | import os
10 | import sys
11 | import json
12 | import hashlib
13 | import traceback
14 | from json import JSONDecodeError
15 |
16 | import time
17 | from functools import wraps
18 |
19 | def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
20 | """Retry calling the decorated function using an exponential backoff.
21 |
22 | http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
23 | original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
24 |
25 | :param ExceptionToCheck: the exception to check. may be a tuple of
26 | exceptions to check
27 | :type ExceptionToCheck: Exception or tuple
28 | :param tries: number of times to try (not retry) before giving up
29 | :type tries: int
30 | :param delay: initial delay between retries in seconds
31 | :type delay: int
32 | :param backoff: backoff multiplier e.g. value of 2 will double the delay
33 | each retry
34 | :type backoff: int
35 | :param logger: logger to use. If None, print
36 | :type logger: logging.Logger instance
37 | """
38 | def deco_retry(f):
39 |
40 | @wraps(f)
41 | def f_retry(*args, **kwargs):
42 | mtries, mdelay = tries, delay
43 | while mtries > 1:
44 | try:
45 | return f(*args, **kwargs)
46 | except ExceptionToCheck as e:
47 | msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
48 | if logger:
49 | logger.warning(msg)
50 | else:
51 | sys.stderr.write(msg + '\n')
52 | time.sleep(mdelay)
53 | mtries -= 1
54 | mdelay *= backoff
55 | try:
56 | return f(*args, **kwargs)
57 | except ExceptionToCheck as e:
58 | msg = "Failed last attempt %s, %s %s" % (str(e), str(args), str(kwargs))
59 | if logger:
60 | logger.warning(msg)
61 | else:
62 | sys.stderr.write(msg + "\n")
63 | raise
64 |
65 | return f_retry # true decorator
66 |
67 | return deco_retry
68 |
69 | @retry((OSError, HTTPException), tries=4, delay=3, backoff=2)
70 | def urlopen_failsafe(*args, **kwargs):
71 | return request.urlopen(*args, **kwargs)
72 |
73 | @retry((OSError, JSONDecodeError, HTTPException), tries=4, delay=3, backoff=2)
74 | def jsonurlopen_failsafe(*args, **kwargs):
75 | result = request.urlopen(*args, **kwargs)
76 | text = result.read().decode('utf-8')
77 | json.loads(text)
78 | return text
79 |
80 | def verify_file(filepath: str, md5: str, size: int, skip_md5: bool = False, output_fail_reason: bool = False) -> int:
81 | verify_ok = False
82 | if os.path.exists(filepath) and os.stat(filepath).st_size == size:
83 | if skip_md5:
84 | verify_ok = True
85 | else:
86 | with open(filepath, 'rb') as f:
87 | actual_md5 = hashlib.md5(f.read()).hexdigest()
88 | if actual_md5 == md5:
89 | verify_ok = True
90 | else:
91 | if output_fail_reason:
92 | sys.stderr.write('Verify failed: MD5 mismatch of {}: Expected {}, Got {}\n'.format(filemath, md5, actual_md5))
93 | else:
94 | if output_fail_reason:
95 | if not os.path.exists(filepath):
96 | sys.stderr.write('Verify failed: {} not exists\n'.format(filepath))
97 | else:
98 | sys.stderr.write('Verify failed: {} size mismatch: Expected {} Actual {}\n'.format(filepath, size, os.stat(filepath).st_size))
99 | return verify_ok
100 |
101 | class VerifyError(RuntimeError):
102 | def __init__(self, msg):
103 | self.msg = msg
104 | def __str__(self):
105 | return 'Failed to verify {}'.format(self.msg)
106 |
107 | @retry((VerifyError, ValueError, ConnectionError, EnvironmentError, HTTPException), tries=3, delay=3, backoff=2)
108 | def download_file(url_root: str, target_dir: str, name: str, md5: str, size: int):
109 | filepath = os.path.join(target_dir, name)
110 | tmp_filepath = os.path.join(target_dir, '.' + name)
111 | verify_ok = verify_file(filepath, md5, size, skip_md5 = True)
112 | if verify_ok:
113 | print('{} already exists! skippped'.format(filepath))
114 | return
115 | else:
116 | print('{} not exist or contents mismatch. Try to download...'.format(filepath))
117 | result = urlopen_failsafe(urljoin(url_root, name))
118 | with result, open(tmp_filepath, 'wb') as f:
119 | shutil.copyfileobj(result, f)
120 | post_verify_ok = verify_file(tmp_filepath, md5, size, skip_md5 = False, output_fail_reason = True)
121 | if not post_verify_ok:
122 | os.remove(tmp_filepath)
123 | raise VerifyError(filepath)
124 | os.rename(tmp_filepath, filepath)
125 | print('Suceeded to download {}'.format(filepath))
126 |
127 | DOWNLOAD_FAILED_THRESHOLD = 50 # <=50 errors are acceptable
128 | def download_repo(executor, url_root: str, target_dir: str):
129 | if not os.path.exists(target_dir):
130 | os.makedirs(target_dir)
131 | result = jsonurlopen_failsafe(urljoin(url_root, 'repodata.json'))
132 | print('Downloading metafiles at {}'.format(urljoin(url_root, 'repodata.json')))
133 | tmp_result_path = os.path.join(target_dir, '.repodata.json')
134 | with open(tmp_result_path, 'w', encoding="utf-8") as f:
135 | f.write(result)
136 | result_bz2 = urlopen_failsafe(urljoin(url_root, 'repodata.json.bz2'))
137 | tmp_result_bz2_path = os.path.join(target_dir, '.repodata.json.bz2')
138 | with result_bz2, open(tmp_result_bz2_path, 'wb') as f:
139 | shutil.copyfileobj(result_bz2, f)
140 | current_repodata_enabled = False
141 | try:
142 | result_current = jsonurlopen_failsafe(urljoin(url_root, 'current_repodata.json'))
143 | tmp_result_current_path = os.path.join(target_dir, '.current_repodata.json')
144 | with open(tmp_result_current_path, 'w', encoding="utf-8") as f:
145 | f.write(result)
146 | current_repodata_enabled = True
147 | except HTTPError as e:
148 | print('Failed to acquire current_repodata of {}, {}'.format(url_root, e))
149 | print('Succeeded to acquire repodata of {}'.format(url_root))
150 | download_failed_cnt = 0
151 | with open(tmp_result_path, 'r') as f:
152 | j = json.load(f)
153 | if 'packages' not in j:
154 | print('No package in repo {}'.format(url_root))
155 | packages = {}
156 | else:
157 | packages = j["packages"]
158 | if "packages.conda" in j:
159 | packages_conda = j["packages.conda"]
160 | packages.update(packages_conda)
161 | download_futures = {}
162 | for name, value in packages.items():
163 | print('Submitted to download {}/{}'.format(url_root, name))
164 | download_futures[executor.submit(download_file, url_root, target_dir, name, value['md5'], value['size'])] = name
165 | for future in futures.as_completed(download_futures):
166 | name = download_futures[future]
167 | try:
168 | future.result()
169 | except Exception as exc:
170 | sys.stderr.write('Failed to download {}/{}: {} | {} \n'.format(url_root, name, exc, traceback.format_exc()))
171 | download_failed_cnt += 1
172 | if download_failed_cnt > DOWNLOAD_FAILED_THRESHOLD:
173 | raise RuntimeError('Failed to sync repos at {}'.format(url_root))
174 | os.rename(tmp_result_path, os.path.join(target_dir, 'repodata.json'))
175 | os.rename(tmp_result_bz2_path, os.path.join(target_dir, 'repodata.json.bz2'))
176 | if current_repodata_enabled:
177 | os.rename(tmp_result_current_path, os.path.join(target_dir, 'current_repodata.json'))
178 | print('Remove unused files...')
179 | for filename in os.listdir(target_dir):
180 | if filename.startswith('.') or (filename.endswith('.bz2') and filename != 'repodata.json.bz2' and filename not in packages):
181 | delta_since_last_modify = time.time() - os.path.getmtime(os.path.join(target_dir, filename))
182 | if delta_since_last_modify <= 86400:
183 | print('Skipped {}. Since last modify only occurs {} seconds ago.'.format(filename, delta_since_last_modify))
184 | continue
185 | print('Deleting {}'.format(filename))
186 | os.remove(os.path.join(target_dir, filename))
187 | print('Succeeded to sync {}'.format(url_root))
188 |
189 | REPOS = {}
190 | PATH_BASE = '/tmp'
191 | THREAD_NUM = 16
192 |
193 | def get_config_from_env():
194 | global THREAD_NUM, PATH_BASE, REPOS
195 | j = json.loads(os.getenv('LUG_config_json'))
196 | if 'thread_num' in j:
197 | THREAD_NUM = j['thread_num']
198 | if 'path' in j:
199 | PATH_BASE = j['path']
200 | for k, v in j.items():
201 | if isinstance(v, str) and v.startswith('http'):
202 | REPOS[k] = v
203 |
204 | if __name__ == '__main__':
205 | get_config_from_env()
206 | with futures.ThreadPoolExecutor(THREAD_NUM) as executor:
207 | all_ok = True
208 | repo_futures = {}
209 | for name, url in REPOS.items():
210 | repo_futures[executor.submit(download_repo, executor, url, os.path.join(PATH_BASE, name))] = name
211 | for future in repo_futures:
212 | name = repo_futures[future]
213 | try:
214 | future.result()
215 | except Exception as exc:
216 | sys.stderr.write('Failed to download {}: {} | {}'.format(name, future, traceback.format_exc()))
217 | all_ok = False
218 | sys.exit(0 if all_ok else 1)
219 |
--------------------------------------------------------------------------------
/lug/worker-script/archlinux-cn.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | RSYNC_PASSWORD=$LUG_password rsync -rtlivH --delete-after --delay-updates --safe-links --max-delete=1000 --contimeout=60 $LUG_username@sync.repo.archlinuxcn.org::repo $LUG_path
4 |
--------------------------------------------------------------------------------
/lug/worker-script/archlinux.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | ########
4 | #
5 | # Copyright © 2014-2019 Florian Pritz
6 | #
7 | # This program is free software; you can redistribute it and/or modify
8 | # it under the terms of the GNU General Public License as published by
9 | # the Free Software Foundation; either version 2 of the License, or
10 | # (at your option) any later version.
11 | #
12 | # This program is distributed in the hope that it will be useful,
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 | # GNU General Public License for more details.
16 | #
17 | # You should have received a copy of the GNU General Public License
18 | # along with this program; if not, see .
19 | #
20 | ########
21 | #
22 | # This is a simple mirroring script. To save bandwidth it first checks a
23 | # timestamp via HTTP and only runs rsync when the timestamp differs from the
24 | # local copy. As of 2016, a single rsync run without changes transfers roughly
25 | # 6MiB of data which adds up to roughly 250GiB of traffic per month when rsync
26 | # is run every minute. Performing a simple check via HTTP first can thus save a
27 | # lot of traffic.
28 |
29 | # Directory where the repo is stored locally. Example: /srv/repo
30 | target=$LUG_path
31 |
32 | # Directory where files are downloaded to before being moved in place.
33 | # This should be on the same filesystem as $target, but not a subdirectory of $target.
34 | # Example: /srv/tmp
35 | # tmp=""
36 |
37 | # Lockfile path
38 | # lock="/var/lock/syncrepo.lck"
39 | lock="/var/lock/sync_arch_repo.lck"
40 |
41 | # If you want to limit the bandwidth used by rsync set this.
42 | # Use 0 to disable the limit.
43 | # The default unit is KiB (see man rsync /--bwlimit for more)
44 | bwlimit=0
45 |
46 | # The source URL of the mirror you want to sync from.
47 | # If you are a tier 1 mirror use rsync.archlinux.org, for example like this:
48 | # rsync://rsync.archlinux.org/ftp_tier1
49 | # Otherwise chose a tier 1 mirror from this list and use its rsync URL:
50 | # https://www.archlinux.org/mirrors/
51 | # source_url=''
52 | source_url=$LUG_source
53 |
54 | # An HTTP(S) URL pointing to the 'lastupdate' file on your chosen mirror.
55 | # If you are a tier 1 mirror use: http://rsync.archlinux.org/lastupdate
56 | # Otherwise use the HTTP(S) URL from your chosen mirror.
57 | # lastupdate_url=''
58 | lastupdate_url=$LUG_lastupdate
59 |
60 | #### END CONFIG
61 |
62 | [ ! -d "${target}" ] && mkdir -p "${target}"
63 |
64 | exec 9>"${lock}"
65 | flock -n 9 || exit
66 |
67 | rsync_cmd() {
68 | local -a cmd=(rsync -rtlH --safe-links --delete-after ${VERBOSE} "--timeout=600" "--contimeout=60" -p \
69 | --delay-updates --no-motd "--partial-dir=.rsync-partial")
70 |
71 | if stty &>/dev/null; then
72 | cmd+=(-h -v --progress)
73 | else
74 | cmd+=(--quiet)
75 | fi
76 |
77 | if ((bwlimit>0)); then
78 | cmd+=("--bwlimit=$bwlimit")
79 | fi
80 |
81 | "${cmd[@]}" "$@"
82 | }
83 |
84 |
85 | # if we are called without a tty (cronjob) only run when there are changes
86 | if ! tty -s && [[ -f "$target/lastupdate" ]] && diff -b <(curl -Ls "$lastupdate_url") "$target/lastupdate" >/dev/null; then
87 | # keep lastsync file in sync for statistics generated by the Arch Linux website
88 | rsync_cmd "$source_url/lastsync" "$target/lastsync"
89 | exit 0
90 | fi
91 |
92 | rsync_cmd \
93 | --exclude='*.links.tar.gz*' \
94 | --exclude='/other' \
95 | --exclude='/sources' \
96 | "${source_url}" \
97 | "${target}"
98 |
--------------------------------------------------------------------------------
/lug/worker-script/awss3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | aws s3 sync --no-sign-request "$LUG_source" "$LUG_path" $ADDITIONAL_FLAGS
6 |
--------------------------------------------------------------------------------
/lug/worker-script/bandersnatch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cd "$(dirname "$0")"
4 | tmp_stderr=$(mktemp "/tmp/bandersnatch-$LUG_name.XXX")
5 | perl -p -i -e 's/\$\{([^}]+)\}/defined $ENV{$1} ? $ENV{$1} : $&/eg' < bandersnatch/bandersnatch.conf > /tmp/bandersnatch.conf
6 | bandersnatch -c /tmp/bandersnatch.conf mirror 2> "$tmp_stderr"
7 | retcode="$?"
8 | cat "$tmp_stderr" >&2
9 | if [ "$LUG_retry_on_ssl_fail" ]; then
10 | if [ "$retcode" -ne 0 ]; then
11 | if grep 'requests.packages.urllib3.exceptions.MaxRetryError' "$tmp_stderr"; then
12 | bandersnatch -c /tmp/bandersnatch.conf mirror
13 | exit $?
14 | fi
15 | fi
16 | fi
17 | rm -f "$tmp_stderr"
18 | exit "$retcode"
19 |
--------------------------------------------------------------------------------
/lug/worker-script/bandersnatch/bandersnatch.conf:
--------------------------------------------------------------------------------
1 | [mirror]
2 | ; The directory where the mirror data will be stored.
3 | directory = ${LUG_path}
4 | ; Save JSON metadata into the web tree:
5 | ; URL/pypi/PKG_NAME/json (Symlink) -> URL/json/PKG_NAME
6 | json = false
7 |
8 | ; The PyPI server which will be mirrored.
9 | ; master = https://testpypi.python.org
10 | ; scheme for PyPI server MUST be https
11 | master = ${LUG_master}
12 |
13 | ; The network socket timeout to use for all connections. This is set to a
14 | ; somewhat aggressively low value: rather fail quickly temporarily and re-run
15 | ; the client soon instead of having a process hang infinitely and have TCP not
16 | ; catching up for ages.
17 | timeout = 10
18 |
19 | ; Number of worker threads to use for parallel downloads.
20 | ; Recommendations for worker thread setting:
21 | ; - leave the default of 3 to avoid overloading the pypi master
22 | ; - official servers located in data centers could run 10 workers
23 | ; - anything beyond 10 is probably unreasonable and avoided by bandersnatch
24 | workers = 3
25 |
26 | ; Whether to hash package indexes
27 | ; Note that package index directory hashing is incompatible with pip, and so
28 | ; this should only be used in an environment where it is behind an application
29 | ; that can translate URIs to filesystem locations. For example, with the
30 | ; following Apache RewriteRule:
31 | ; RewriteRule ^([^/])([^/]*)/$ /mirror/pypi/web/simple/$1/$1$2/
32 | ; RewriteRule ^([^/])([^/]*)/([^/]+)$/ /mirror/pypi/web/simple/$1/$1$2/$3
33 | ; OR
34 | ; following nginx rewrite rules:
35 | ; rewrite ^/simple/([^/])([^/]*)/$ /simple/$1/$1$2/ last;
36 | ; rewrite ^/simple/([^/])([^/]*)/([^/]+)$/ /simple/$1/$1$2/$3 last;
37 | ; Setting this to true would put the package 'abc' index in simple/a/abc.
38 | ; Recommended setting: the default of false for full pip/pypi compatability.
39 | hash-index = false
40 |
41 | ; Whether to stop a sync quickly after an error is found or whether to continue
42 | ; syncing but not marking the sync as successful. Value should be "true" or
43 | ; "false".
44 | stop-on-error = false
45 |
46 | ; Whether or not files that have been deleted on the master should be deleted
47 | ; on the mirror, too.
48 | ; IMPORTANT: if you are running an official mirror than you *need* to leave
49 | ; this on.
50 | delete-packages = true
51 |
52 | ; Advanced logging configuration. Uncomment and set to the location of a
53 | ; python logging format logging config file.
54 | ; log-config = /etc/bandersnatch-log.conf
55 |
56 | ; blacklist
57 | ; List of PyPI packages not to sync - Useful if malicious packages are mirrored
58 | [blacklist]
59 | packages =
60 | example1
61 | example2
62 |
63 | ; vim: set ft=cfg:
64 |
--------------------------------------------------------------------------------
/lug/worker-script/crates-io.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -xe
3 | /worker-script/git.sh
4 | pushd "$LUG_path"
5 | jq 'setpath(["dl"]; "https://mirrors.sjtug.sjtu.edu.cn/static.crates.io/crates/{crate}/{crate}-{version}.crate")' config.json > config.json.temp
6 | mv config.json.temp config.json
7 | git config user.name 'SJTUG mirrors'
8 | git config user.email 'mirrors@sjtug.org'
9 | git add .
10 | git commit -m "update config.json" || true
11 | popd
12 |
--------------------------------------------------------------------------------
/lug/worker-script/debmirror.sh:
--------------------------------------------------------------------------------
1 | #### Start script to automate building of Ubuntu mirror #####
2 | ## THE NEXT LINE IS NEEDED THE REST OF THE LINES STARTING WITH A # CAN BE DELETED
3 |
4 | #!/bin/bash
5 |
6 | ## Setting variables with explanations.
7 |
8 | #
9 | # Don't touch the user's keyring, have our own instead
10 | #
11 | # Arch= -a # Architecture. For Ubuntu can be i386, powerpc or amd64.
12 | # sparc, only starts in dapper, it is only the later models of sparc.
13 | #
14 | arch="${LUG_arch:-i386,amd64}"
15 |
16 | # Minimum Ubuntu system requires main, restricted
17 | # Section= -s # Section (One of the following - main/restricted/universe/multiverse).
18 | # You can add extra file with $Section/debian-installer. ex: main/debian-installer,universe/debian-installer,multiverse/debian-installer,restricted/debian-installer
19 | #
20 | section="${LUG_section:-main,restricted,universe,multiverse}"
21 |
22 | # Release= -d # Release of the system (...Hardy, Intrepid... Lucid, Precise, Quantal, Saucy, Trusty ), and the -updates and -security ( -backports can be added if desired)
23 | # List of updated releases in: https://wiki.ubuntu.com/Releases
24 | #
25 |
26 | release="${LUG_release}"
27 |
28 | # Server= -h # Server name, minus the protocol and the path at the end
29 | # CHANGE "*" to equal the mirror you want to create your mirror from. au. in Australia ca. in Canada.
30 | # This can be found in your own /etc/apt/sources.list file, assuming you have Ubuntu installed.
31 | #
32 | server="${LUG_server}"
33 |
34 | # Dir= -r # Path from the main server, so http://my.web.server/$dir, Server dependant
35 | #
36 | inPath="${LUG_inpath}"
37 |
38 | # Proto= --method= # Protocol to use for transfer (http, ftp, hftp, rsync)
39 | # Choose one - http is most usual the service, and the service must be available on the server you point at.
40 | #
41 | proto="${LUG_proto:-http}"
42 |
43 | # Outpath= # Directory to store the mirror in
44 | # Make this a full path to where you want to mirror the material.
45 | #
46 | outPath="${LUG_path}"
47 |
48 | # The --nosource option only downloads debs and not deb-src's
49 | # The --progress option shows files as they are downloaded
50 | # --source \ in the place of --no-source \ if you want sources also.
51 | # --nocleanup Do not clean up the local mirror after mirroring is complete. Use this option to keep older repository
52 | # Start script
53 | #
54 | debmirror -a $arch \
55 | --no-source \
56 | -s $section \
57 | -h $server \
58 | -d $release \
59 | -r $inPath \
60 | --progress \
61 | --method=$proto \
62 | --ignore-missing-release \
63 | -v \
64 | --diff=none \
65 | --no-check-gpg \
66 | --rsync-options="-aIL --partial --partial-dir .partial" \
67 | --rsync-extra=none \
68 | --allow-dist-rename \
69 | $outPath
70 |
71 |
72 | #### End script to automate building of Ubuntu mirror ####
73 |
--------------------------------------------------------------------------------
/lug/worker-script/emacs-elpa.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -xe
4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
5 |
6 | $DIR/rsync.sh
7 |
8 | for value in emacswiki gnu marmalade melpa melpa-stable org SC sunrise-commander user42
9 | do
10 | touch "$LUG_path/$value/index.html"
11 | done
12 |
--------------------------------------------------------------------------------
/lug/worker-script/flatpak.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -ex
4 |
5 | ostree pull --repo=$LUG_path --mirror flathub --depth=1
6 | flatpak build-update-repo $LUG_path
7 |
--------------------------------------------------------------------------------
/lug/worker-script/git.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -xe
4 |
5 | if [ ! -d "${LUG_path}/.git" ]; then
6 | git clone "$LUG_origin" "$LUG_path"
7 | fi
8 |
9 | cd "$LUG_path"
10 | git pull --all --rebase
11 | git update-server-info
12 | git gc --auto
13 | git repack -a -b -d
14 |
--------------------------------------------------------------------------------
/lug/worker-script/googlestorage.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # path: sync destination
4 | # access_key_id: https://cloud.google.com/storage/docs/migrating#keys
5 | # access_secret_id: Same as above
6 | # project_id: The project name where you create a new key
7 | # source: source googlestorage path. Example: gs://flutter_intra
8 |
9 | mkdir -p "$LUG_path"
10 | env "GS_ACCESS_KEY_ID=$LUG_access_key_id" "GS_ACCESS_SECRET_KEY=$LUG_access_secret_key" ./google-cloud-sdk/bin/gsutil -o "GSUtil:default_project_id=$LUG_project_id" -m rsync -d -r "$LUG_source" "$LUG_path"
11 |
--------------------------------------------------------------------------------
/lug/worker-script/julia-releases.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
5 |
6 | ADDITIONAL_FLAGS="--exclude *DO_NOT_UPLOAD_HERE --exclude bin/mac/extras/cctools_bundle.tar.gz --delete" $DIR/awss3.sh
7 |
--------------------------------------------------------------------------------
/lug/worker-script/julia.jl:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env julia
2 | # This script builds/pulls all static contents needed by storage server.
3 | #
4 | # Usage:
5 | # 1. make sure you've added StorageMirrorServer.jl
6 | # 2. generate/pull all tarballs: `julia gen_static_full.jl`
7 | # 3. set a cron job to run step 2 regularly
8 | #
9 | # Note:
10 | # * Initialization would typically take days, depending on the network bandwidth and CPU
11 | # * set `JULIA_NUM_THREADS` to use multiple threads
12 | #
13 | # Disk space requirements for a complete storage (increases over time):
14 | # * `STATIC_DIR`: at least 500GB, would be better to have more than 3TB free space
15 |
16 | using StorageMirrorServer
17 | using Pkg
18 |
19 | # This holds all the data you need to set up a storage server
20 | # For example, my nginx service serves all files in `/mnt/mirrors` as static contents using autoindex
21 | output_dir = ENV["LUG_path"]
22 |
23 | # check https://status.julialang.org/ for available public storage servers
24 | upstreams = [
25 | "https://kr.storage.juliahub.com",
26 | "https://us-east.storage.juliahub.com"
27 | ]
28 |
29 | registries = [
30 | # (name, uuid, original_git_url)
31 | ("General", "23338594-aafe-5451-b93e-139f81909106", "https://github.com/JuliaRegistries/General")
32 | ]
33 |
34 | # These are default parameter settings for StorageMirrorServer
35 | # you can modify them accordingly to fit your settings
36 | parameters = Dict(
37 | # if needed, you can pass custom http parameters
38 | :http_parameters => Dict{Symbol, Any}(
39 | :retry => true,
40 | :retries => 5,
41 | :readtimeout => 600
42 | # download data using proxy
43 | # it also respects `http_proxy`, `https_proxy` and `no_proxy` environment variables
44 | # :proxy => "http://localhost:1080"
45 | ),
46 |
47 | # whether to show the progress bar
48 | :show_progress => true,
49 |
50 | # for how long (hours) you want to skip resources in `/failed_resources.txt` until the next try
51 | :skip_duration => 12,
52 | )
53 |
54 | for reg in registries
55 | mirror_tarball(reg, upstreams, output_dir; parameters...)
56 | end
57 |
--------------------------------------------------------------------------------
/lug/worker-script/macports.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | rsync --compress --delete-delay --hard-links --links --no-motd --perms --recursive --stats --timeout=600 --times "$LUG_source" "$LUG_path"
4 |
--------------------------------------------------------------------------------
/lug/worker-script/mongodb.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
5 |
6 | ADDITIONAL_FLAGS='--exclude rsync-sentinal --exclude "*.html" --delete' $DIR/awss3.sh
7 |
--------------------------------------------------------------------------------
/lug/worker-script/mxlinux.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | RSYNC_PASSWORD=$LUG_password rsync -rtv --delete --delete-after --delay-updates --safe-links --max-delete=1000 --exclude '.~tmp~/' --partial-dir=.rsync-partial --timeout=600 --contimeout=600 $LUG_source $LUG_path
4 |
--------------------------------------------------------------------------------
/lug/worker-script/opam.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -xe
3 |
4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
5 |
6 | $DIR/git.sh
7 | pushd "$LUG_path"
8 | yq w --style=double repo archive-mirrors "https://mirrors.sjtug.sjtu.edu.cn/opam-cache" > repo.tmp
9 | mv repo.tmp repo
10 | git config user.name 'SJTUG mirrors'
11 | git config user.email 'mirrors@sjtug.org'
12 | git add .
13 | git commit -m "update repo" || true
14 | popd
15 |
--------------------------------------------------------------------------------
/lug/worker-script/packagist.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export HTTP_PROXY="$(echo ${HTTP_PROXY} | sed 's/http:/tcp:/')"
4 | export HTTPS_PROXY="$(echo ${HTTPS_PROXY} | sed 's/http:/tcp:/')"
5 | /app/packagist-mirror/pull.sh
6 |
--------------------------------------------------------------------------------
/lug/worker-script/qt.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
5 |
6 | LUG_source="master.qt.io::qt-online" LUG_path="$LUG_path/online" $DIR/rsync.sh
7 | SYNC_SYMLINK=1 LUG_source="master.qt.io::qt-official" LUG_path="$LUG_path/official_releases" $DIR/rsync.sh
8 | LUG_source="master.qt.io::qt-development" LUG_path="$LUG_path/development_releases" $DIR/rsync.sh
9 |
--------------------------------------------------------------------------------
/lug/worker-script/rsync.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$LUG_exclude_hidden" ]; then
4 | exclude_hidden_flags="--exclude=.*"
5 | fi
6 |
7 | if [ "$LUG_ignore_vanish" ]; then
8 | IGNOREEXIT=24
9 | IGNOREOUT='^(file has vanished: |rsync warning: some files vanished before they could be transferred)'
10 | fi
11 |
12 | if [ "$LUG_mirror_path" ]; then
13 | LUG_path="$LUG_mirror_path"
14 | fi
15 |
16 | if [[ -z "$RSYNC_SSH" ]]; then
17 | conntimeout=--contimeout=600
18 | fi
19 |
20 | if [[ ! -z "$SYNC_SYMLINK" ]]; then
21 | rsync_symlink=-L
22 | fi
23 |
24 | tmp_stderr=$(mktemp "/tmp/rsync-$LUG_name.XXX")
25 |
26 | rsync -aHvh $rsync_symlink --no-o --no-g --stats --delete --delete-delay --safe-links --exclude '.~tmp~' --partial-dir=.rsync-partial --timeout=600 $conntimeout $exclude_hidden_flags "$LUG_source" "$LUG_path" 2> "$tmp_stderr"
27 | retcode="$?"
28 |
29 | cat "$tmp_stderr" >&2
30 |
31 | if [ "$LUG_ignore_vanish" ]; then
32 | if [ "$retcode" -eq "$IGNOREEXIT" ]; then
33 | if egrep "$IGNOREOUT" "$tmp_stderr"; then
34 | retcode=0
35 | fi
36 | fi
37 | fi
38 |
39 | rm -f "$tmp_stderr"
40 |
41 | exit "$retcode"
42 |
43 |
--------------------------------------------------------------------------------
/lug/worker-script/rsync_ssh.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | export RSYNC_SSH=1
5 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
6 | $DIR/rsync.sh
7 |
--------------------------------------------------------------------------------
/v2ray/config.json.example:
--------------------------------------------------------------------------------
1 | {
2 | "log": {
3 | "loglevel": "debug"
4 | },
5 | "inbound": {
6 | "port": 8080,
7 | "listen": "0.0.0.0",
8 | "protocol": "http",
9 | "settings": {
10 | "timeout": 30,
11 | "allowTransparent": true
12 | }
13 | },
14 | "outbound": {
15 | "protocol": "vmess",
16 | "settings": {
17 | "vnext": [
18 | {
19 | "address": "PLACE_YOUR_SERVER_HERE",
20 | "port": 1234,
21 | "users": [
22 | {
23 | "id": "PLACE_YOUR_ID_HERE",
24 | "level": 0,
25 | "alterId": 64,
26 | "security": "aes-128-gcm"
27 | }
28 | ]
29 | }
30 | ]
31 | },
32 | "mux": {
33 | "enabled": true,
34 | "concurrency": 4
35 | },
36 | "streamSettings": {
37 | "network": "tcp",
38 | "security": "tls",
39 | "tlsSettings": {
40 | "serverName": "PLACE_YOUR_SERVER_HERE",
41 | "allowInsecure": false
42 | },
43 | "kcpSettings": {
44 | "congestion": true,
45 | "header": {
46 | "type": "wechat-video"
47 | }
48 | }
49 | },
50 | "tag": "vmess"
51 | },
52 | "outboundDetour": [
53 | {
54 | "protocol": "freedom",
55 | "settings": {},
56 | "tag": "direct"
57 | }
58 | ],
59 | "policy": {
60 | "levels": {
61 | "0": {
62 | "uplinkOnly": 0,
63 | "downlinkOnly": 0,
64 | "connIdle": 60
65 | }
66 | }
67 | },
68 | "dns" :{
69 | "hosts": {
70 | "baidu.com": "127.0.0.1"
71 | },
72 | "servers": [
73 | "8.8.8.8",
74 | "8.8.4.4",
75 | "localhost"
76 | ]
77 | },
78 | "routing": {
79 | "strategy": "rules",
80 | "settings": {
81 | "rules": [
82 | {
83 | "type": "field",
84 | "ip": ["117.71.17.145", "163.177.72.198", "74.125.204.108", "64.233.189.108"],
85 | "outboundTag": "vmess"
86 | },
87 | {
88 | "type": "field",
89 | "domain": ["github.com", "qt.io", "beicoins.com", "v2ex.com", "uxengine.net"],
90 | "outboundTag": "vmess"
91 | },
92 | {
93 | "type": "chinaip",
94 | "outboundTag": "direct"
95 | },
96 | {
97 | "type": "chinasites",
98 | "outboundTag": "direct"
99 | },
100 | {
101 | "type": "field",
102 | "domain": [
103 | "gc.kis.scr.kaspersky-labs.com",
104 | "dl.acm.org",
105 | "ieeexplore.ieee.org",
106 | "acgvideo.com",
107 | "weiyun.com",
108 | "qq.com",
109 | "ceping.com",
110 | "xiami.com",
111 | "xiami.net",
112 | "kmf.com",
113 | "bilibili.com",
114 | "1point3acres.com",
115 | "163.com",
116 | "126.net",
117 | "taobao.org",
118 | "hdslb.com",
119 | "lug",
120 | "caddy",
121 | "git-http-backend"
122 | ],
123 | "outboundTag": "direct"
124 | },
125 | {
126 | "type": "field",
127 | "ip": [
128 | "0.0.0.0/8",
129 | "10.0.0.0/8",
130 | "100.64.0.0/10",
131 | "127.0.0.0/8",
132 | "169.254.0.0/16",
133 | "172.16.0.0/12",
134 | "192.0.0.0/24",
135 | "192.0.2.0/24",
136 | "192.168.0.0/16",
137 | "198.18.0.0/15",
138 | "198.51.100.0/24",
139 | "203.0.113.0/24",
140 | "::1/128",
141 | "fc00::/7",
142 | "fe80::/10"
143 | ],
144 | "outboundTag": "direct"
145 | }
146 | ]
147 | }
148 | }
149 | }
150 |
--------------------------------------------------------------------------------