├── apollo ├── apollo_demo │ └── apollo │ │ └── __init__.py └── .env ├── awx ├── SECRET_KEY ├── redis.conf ├── environment.sh ├── README.md └── credentials.py ├── README.md ├── grafana ├── grafana-allstack │ ├── mimir │ │ └── rules │ │ │ └── anonymous │ │ │ ├── rules_alloy.yaml │ │ │ ├── rules_grafana.yaml │ │ │ ├── alerts_grafana.yaml │ │ │ └── rules_tempo.yaml │ ├── grafana │ │ └── etc │ │ │ └── provisioning │ │ │ ├── dashboards │ │ │ └── dashboard.yaml │ │ │ └── plugins │ │ │ └── oncall.yaml │ ├── pyroscope │ │ └── config.yaml │ ├── k6 │ │ └── scripts │ │ │ └── grafana-loadtest.js │ ├── alloy │ │ └── endpoints.json │ └── loki │ │ └── local-config.yaml ├── docker-compose.yaml └── docker-compose-loki.yaml ├── rabbitmq ├── cluster │ └── conf │ │ └── enabled_plugins ├── single │ └── conf │ │ ├── enabled_plugins │ │ └── rabbitmq-definitions.json ├── grafana │ ├── dashboards.yml │ └── datasources.yml ├── 1.init-cluster.sh ├── docker-compose-singe.yml ├── docker-compose-cluster.yml ├── prometheus │ └── etc │ │ └── prometheus.yml ├── docker-compose-metrics.yml └── 2.create-queue.sh ├── zabbix ├── Dockerfile └── wqy-microhei.ttc ├── logio ├── logio_data │ ├── log_server.conf │ ├── harvester.conf │ └── web_server.conf └── docker-compose.yml ├── mysql ├── scripts │ ├── mysql │ ├── mysqldump │ └── backup_mysql.sh ├── docker-compose-5.7.yml └── docker-compose-redis.yml ├── nacos ├── env │ ├── mysql.env │ ├── nacos-ip.env │ ├── nacos-standlone-mysql.env │ ├── nacos-hostname.env │ └── nacos-embedded.env ├── standalone-mysql-8.yaml ├── init.d │ └── custom.properties ├── standalone-derby.yaml ├── standalone-mysql-5.7.yaml ├── prometheus │ ├── prometheus-standalone.yaml │ └── prometheus-cluster.yaml ├── cluster-embedded.yaml ├── cluster-hostname.yaml └── cluster-ip.yaml ├── loki ├── mysql-to-loki │ ├── logstash │ │ ├── config │ │ │ └── logstash.yml │ │ ├── data │ │ │ └── mysql-connector-j-9.1.0.jar │ │ └── pipeline │ │ │ └── mysql-to-loki.conf │ └── loki │ │ └── local-config.yaml ├── s3-config │ ├── promtail-gateway.yaml │ ├── loki-s3.yaml │ └── nginx-loki-gateway.conf ├── ha-memberlist-config │ ├── promtail-gateway.yaml │ └── nginx-loki-gateway.conf └── docker-compose.yml ├── rocket.chat ├── bash │ ├── handlers │ │ ├── helloworld │ │ ├── date │ │ └── update │ └── handler └── scripts │ └── hello.coffee ├── VictoriaMetrics ├── cluster │ └── etc │ │ ├── alertmanager │ │ └── alertmanager.yml │ │ ├── grafana │ │ └── provisioning │ │ │ ├── dashboards │ │ │ └── dashboard.yml │ │ │ └── datasources │ │ │ └── datasource.yml │ │ └── prometheus │ │ └── prometheus.yml └── sigle │ └── etc │ ├── alertmanager │ └── alertmanager.yml │ ├── grafana │ └── provisioning │ │ ├── dashboards │ │ └── dashboard.yml │ │ └── datasources │ │ └── datasource.yml │ └── prometheus │ └── prometheus.yml ├── elasticsearch ├── stack │ ├── logstash.yml │ ├── kibana.yml │ ├── logstash-pipeline.conf │ ├── apmserver.docker.yml │ ├── packetbeat.docker.yml │ ├── journalbeat.docker.yml │ ├── auditbeat.docker.yml │ ├── stack.env │ ├── heartbeat.docker.yml │ └── filebeat.docker.yml ├── xpack │ └── instances.yml ├── docker-compose-single.yml ├── docker-compose-xpack-certs.yml ├── docker-compose-auth.yml └── Makefile ├── gogs ├── backup_gogs.sh └── docker-compose.yml ├── wordpress ├── .gitignore ├── https-http.md ├── .env_example ├── LICENSE ├── stop-and-remove.sh ├── letsencrypt │ └── letsencrypt-renew.sh └── docker-compose.yml ├── prometheus ├── cortex │ ├── consul │ │ └── config.json │ ├── grafana │ │ ├── dashboards │ │ │ └── cortex.yml │ │ └── datasources │ │ │ └── Prometheus.yml │ └── prometheus │ │ └── prometheus.yml ├── prometheus │ └── etc │ │ ├── targets │ │ ├── works │ │ │ └── work.json │ │ └── nodes │ │ │ └── node.json │ │ ├── alerts │ │ ├── prometheus.rules │ │ ├── consul-rules.rules │ │ └── blackbox-exporter.rules │ │ ├── prometheus-main.yml │ │ ├── prometheus-influxdb.yml │ │ ├── prometheus-m3db.yml │ │ ├── prometheus-work1.yml │ │ └── prometheus-work2.yml ├── grafana │ └── datasources │ │ └── Prometheus.json ├── thanos │ ├── thanos │ │ ├── bucket.yml │ │ └── ruler.rules.yaml │ ├── grafana │ │ ├── dashboards │ │ │ └── thanos.yml │ │ └── datasources │ │ │ └── Prometheus.yml │ ├── prometheus │ │ ├── prometheus-remote_write.yml │ │ ├── prometheus0.yml │ │ ├── prometheus1.yml │ │ └── prometheus2.yml │ └── alertmanager │ │ └── config.yml ├── consul-template │ └── consul-rules.ctmpl ├── pushclient │ └── process-top.sh ├── alertmanager │ └── etc │ │ └── config.yml ├── docker-compose-m3db.yaml ├── docker-compose-alertmanager.yml ├── docker-compose-telegraf.yml └── docker-compose-influxdb.yaml ├── memcached └── docker-compose.yml ├── vault ├── reset.sh ├── vault │ ├── entrypoint.sh │ └── config │ │ ├── vault.hcl │ │ └── init.sh ├── consul │ └── config │ │ └── consul.hcl └── docker-compose.yml ├── redis ├── sentinel │ ├── master.conf │ ├── slave1.conf │ ├── slave2.conf │ ├── sentinel1.conf │ ├── sentinel2.conf │ └── sentinel3.conf ├── scripts │ └── init.sh ├── cluster │ └── cluster.conf ├── docker-compose-single.yml └── docker-compose-replication.yml ├── yapi ├── Dockerfile ├── docker-entrypoint-initdb.d │ └── mongo-init.js └── docker-compose.yml ├── gost ├── docker-compose.yml └── gost.yaml ├── dnsmasq ├── dnsmasq │ └── dnsmasq.conf └── docker-compose.yml ├── transfer └── docker-compose.yml ├── tengine └── docker-compose.yaml ├── tinyproxy ├── tinyproxy.conf └── docker-compose.yml ├── maven └── docker-compose.yml ├── nats ├── docker-compose.yml └── docker-compose-cluster.yml ├── autoheal ├── docker-compose.yaml └── Dockerfile.txt ├── gitea └── docker-compose.yaml ├── nomad ├── consul │ └── config │ │ ├── client.json │ │ └── server.json └── nomad │ ├── jobs │ ├── fabio.nomad │ ├── webservice.nomad │ └── http-echo.nomad │ └── config │ ├── client │ └── client.hcl │ └── server │ └── server.hcl ├── swagger-ui └── docker-compose.yml ├── portainer └── docker-compose.yaml ├── sqlpad └── docker-compose.yml ├── registry ├── docker-compose.yaml └── config.yml ├── gophish └── docker-compose.yml ├── PostgreSQL └── docker-compose.yml ├── pihole ├── adlists.list ├── adblock.sh ├── regex.list └── docker-compose.yml ├── rancher └── docker-compose.yaml ├── consul ├── cluster │ └── config │ │ ├── consul_c1.json │ │ ├── consul_c2.json │ │ ├── consul_s1.json │ │ ├── consul_s2.json │ │ └── consul_s3.json ├── docker-compose_sigle.yml └── docker-compose.yml ├── coredns ├── coredns │ └── conf │ │ └── Corefile └── docker-compose.yml ├── openresty ├── conf │ └── conf.d │ │ └── default.conf ├── docker-compose.yml └── html │ ├── 50x.html │ └── index.html ├── mongodb └── docker-compose-3.4.yml ├── php-fpm └── docker-compose.yml ├── goproxy └── docker-compose.yml ├── m3db └── docker-compose.yml ├── uptime-kuma ├── docker-compose.yaml └── README.md ├── activemq └── docker-compose.yml ├── aliyun-ddns └── docker-compose.yaml ├── kafka-ui ├── docker-compose.yml └── docker-compose-odic.yml ├── sentry ├── .env └── docker-compose.yml ├── zentao └── docker-compose.yml ├── onedev ├── docker-compose.yml └── docker-compose-extDB.yaml ├── bitwardenrs └── docker-compose.yml ├── guacamole ├── reset.sh ├── prepare.sh └── nginx │ └── templates │ └── guacamole.conf.template ├── hfish └── docker-compose.yml ├── traefik └── docker-compose.yml ├── nginx └── docker-compose.yml ├── netdata └── docker-compose.yaml ├── timescaledb └── docker-compose.yml ├── drone ├── docker-compose-agent.yaml └── docker-compose.yaml ├── verdaccio └── docker-compose.yaml ├── clickhouse ├── cluster_1S_2R_ch_proxy │ ├── ch-proxy │ │ └── config │ │ │ └── config.yml │ ├── clickhouse-01 │ │ └── users.d │ │ │ └── users.xml │ ├── clickhouse-02 │ │ └── users.d │ │ │ └── users.xml │ ├── clickhouse-keeper-01 │ │ └── config │ │ │ └── keeper_config.xml │ ├── clickhouse-keeper-02 │ │ └── config │ │ │ └── keeper_config.xml │ └── clickhouse-keeper-03 │ │ └── config │ │ └── keeper_config.xml ├── single │ ├── config.d │ │ └── config.xml │ ├── docker-compose.yaml │ └── users.d │ │ └── users.xml ├── log_nginx │ ├── clickhouse │ │ ├── config.d │ │ │ └── config.xml │ │ ├── initdb.d │ │ │ └── nginx_access_log.sql │ │ └── users.d │ │ │ └── users.xml │ ├── vector │ │ └── vector.yaml │ ├── clickvisual │ │ └── clickvisual │ │ │ └── config │ │ │ └── rbac.conf │ └── nginx │ │ └── conf │ │ └── nginx.conf └── README.md ├── healthchecks ├── docker-compose.yaml ├── docker-compose-pgsql.yaml └── env.example ├── focalboard ├── config.json └── docker-compose.yml ├── envoy └── docker-compose.yaml ├── nexus └── docker-compose.yml ├── ewomail └── docker-compose.yml ├── netbox └── Dockerfile ├── LogicalDOC └── docker-compose.yml ├── openldap └── docker-compose.yml ├── minio └── docker-compose.yml ├── nginx-proxy-manager └── docker-compose.yml ├── watchtower └── docker-compose.yaml ├── adguard-home └── docker-compose.yml ├── jenkins └── docker-compose.yml ├── zookeeper └── docker-compose.yml ├── joplin └── docker-compose.yml ├── sonarqube └── docker-compose.yml ├── mindoc └── docker-compose.yml ├── kafka └── docker-compose-sigle.yml ├── ansibles-emaphore └── docker-compose.yml ├── metabase └── docker-compose.yaml ├── flarum └── docker-compose.yml ├── seafile └── docker-compose.yaml └── gitlab └── docker-compose-extDB.yaml /apollo/apollo_demo/apollo/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /awx/SECRET_KEY: -------------------------------------------------------------------------------- 1 | nir4yYx6Bb4uN/ow98f2tD9Wktm8A/cwgqTgzHLg -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Docker-compose-file 2 | 记录使用过的Docker-compose 3 | -------------------------------------------------------------------------------- /grafana/grafana-allstack/mimir/rules/anonymous/rules_alloy.yaml: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /rabbitmq/cluster/conf/enabled_plugins: -------------------------------------------------------------------------------- 1 | [rabbitmq_management,rabbitmq_prometheus]. 2 | -------------------------------------------------------------------------------- /zabbix/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zabbix/zabbix-server-mysql:centos-5.4.6 2 | 3 | RUN yum install -y python2 -------------------------------------------------------------------------------- /logio/logio_data/log_server.conf: -------------------------------------------------------------------------------- 1 | exports.config = { 2 | host: '0.0.0.0', 3 | port: 28777 4 | } 5 | -------------------------------------------------------------------------------- /mysql/scripts/mysql: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lework/Docker-compose-file/HEAD/mysql/scripts/mysql -------------------------------------------------------------------------------- /awx/redis.conf: -------------------------------------------------------------------------------- 1 | unixsocket /var/run/redis/redis.sock 2 | unixsocketperm 777 3 | port 0 4 | bind 127.0.0.1 5 | -------------------------------------------------------------------------------- /mysql/scripts/mysqldump: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lework/Docker-compose-file/HEAD/mysql/scripts/mysqldump -------------------------------------------------------------------------------- /zabbix/wqy-microhei.ttc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lework/Docker-compose-file/HEAD/zabbix/wqy-microhei.ttc -------------------------------------------------------------------------------- /nacos/env/mysql.env: -------------------------------------------------------------------------------- 1 | MYSQL_ROOT_PASSWORD=root 2 | MYSQL_DATABASE=nacos_devtest 3 | MYSQL_USER=nacos 4 | MYSQL_PASSWORD=nacos -------------------------------------------------------------------------------- /loki/mysql-to-loki/logstash/config/logstash.yml: -------------------------------------------------------------------------------- 1 | http.host: "0.0.0.0" 2 | node.name: logstash 3 | xpack.monitoring.enabled: false -------------------------------------------------------------------------------- /rocket.chat/bash/handlers/helloworld: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Hello.." 3 | sleep 1s; 4 | echo "Sleepy World!" 5 | 6 | exit 0 7 | -------------------------------------------------------------------------------- /rocket.chat/bash/handlers/date: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | time=$(date "+%Y-%m-%d %H:%M:%S") 4 | 5 | echo "${time}" 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /VictoriaMetrics/cluster/etc/alertmanager/alertmanager.yml: -------------------------------------------------------------------------------- 1 | route: 2 | receiver: blackhole 3 | 4 | receivers: 5 | - name: blackhole 6 | -------------------------------------------------------------------------------- /VictoriaMetrics/sigle/etc/alertmanager/alertmanager.yml: -------------------------------------------------------------------------------- 1 | route: 2 | receiver: blackhole 3 | 4 | receivers: 5 | - name: blackhole 6 | -------------------------------------------------------------------------------- /elasticsearch/stack/logstash.yml: -------------------------------------------------------------------------------- 1 | http.enabled: true 2 | http.host: 0.0.0.0 3 | http.port: 9600-9700 4 | xpack.monitoring.enabled: false 5 | -------------------------------------------------------------------------------- /gogs/backup_gogs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | docker exec gogs bash -c "export USER=git && /app/gogs/gogs backup --target /data/backup/" 5 | -------------------------------------------------------------------------------- /rabbitmq/single/conf/enabled_plugins: -------------------------------------------------------------------------------- 1 | [rabbitmq_federation_management,rabbitmq_management,rabbitmq_mqtt,rabbitmq_stomp,rabbitmq_prometheus]. 2 | -------------------------------------------------------------------------------- /wordpress/.gitignore: -------------------------------------------------------------------------------- 1 | .env* 2 | .idea/ 3 | certs/ 4 | certs-data/ 5 | logs/ 6 | nginx/ 7 | mysql/ 8 | wordpress/ 9 | !.env_example 10 | !nginx/default* -------------------------------------------------------------------------------- /prometheus/cortex/consul/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "telemetry": { 3 | "prometheus_retention_time": "24h", 4 | "disable_hostname": true 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /prometheus/prometheus/etc/targets/works/work.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "targets": [ 4 | "prometheus-work1:9090", 5 | "prometheus-work2:9090" 6 | ] 7 | } 8 | ] 9 | -------------------------------------------------------------------------------- /memcached/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | memcached: 5 | image: memcached:1.6.10-alpine 6 | ports: 7 | - '11211:11211' 8 | 9 | -------------------------------------------------------------------------------- /loki/mysql-to-loki/logstash/data/mysql-connector-j-9.1.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lework/Docker-compose-file/HEAD/loki/mysql-to-loki/logstash/data/mysql-connector-j-9.1.0.jar -------------------------------------------------------------------------------- /prometheus/prometheus/etc/targets/nodes/node.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "targets": [ 4 | "node1:9100", 5 | "node2:9100", 6 | "node3:9100" 7 | ] 8 | } 9 | ] 10 | -------------------------------------------------------------------------------- /vault/reset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker-compose down 4 | find ./ -name *.log -exec rm -fv {} \; 5 | rm -rf consul/data 6 | 7 | chmod +x {consul,vault}/config/*.sh 8 | docker-compose up -d -------------------------------------------------------------------------------- /awx/environment.sh: -------------------------------------------------------------------------------- 1 | DATABASE_USER=awx 2 | DATABASE_NAME=awx 3 | DATABASE_HOST=postgres 4 | DATABASE_PORT=5432 5 | DATABASE_PASSWORD=awxpass 6 | AWX_ADMIN_USER=admin 7 | AWX_ADMIN_PASSWORD=password 8 | -------------------------------------------------------------------------------- /prometheus/grafana/datasources/Prometheus.json: -------------------------------------------------------------------------------- 1 | { 2 | "name":"prometheus", 3 | "type":"prometheus", 4 | "url":"http://prometheus:9090", 5 | "access":"proxy", 6 | "basicAuth":false 7 | } 8 | -------------------------------------------------------------------------------- /prometheus/thanos/thanos/bucket.yml: -------------------------------------------------------------------------------- 1 | type: S3 2 | config: 3 | bucket: thanos 4 | endpoint: minio:9000 5 | insecure: true 6 | signature_version2: true 7 | access_key: EXAMPLEKEY 8 | secret_key: EXAMPLESECRET 9 | -------------------------------------------------------------------------------- /rabbitmq/grafana/dashboards.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'rabbitmq' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: true 9 | options: 10 | path: /dashboards 11 | -------------------------------------------------------------------------------- /redis/sentinel/master.conf: -------------------------------------------------------------------------------- 1 | bind 0.0.0.0 2 | requirepass 123456 3 | maxclients 1000 4 | maxmemory 256mb 5 | maxmemory-policy volatile-ttl 6 | appendonly yes 7 | aof-use-rdb-preamble yes 8 | 9 | 10 | masterauth 123456 -------------------------------------------------------------------------------- /rocket.chat/bash/handlers/update: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | SELF_PATH="$( dirname "$(readlink -f "$0")" )" 3 | cd $SELF_PATH/../.. 4 | git pull origin master 2>&1 5 | echo "to reload all scripts run: die" 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /yapi/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:12-alpine 2 | 3 | RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories \ 4 | && npm install -g yapi-pro-cli --registry https://registry.npm.taobao.org 5 | 6 | EXPOSE 3000 9090 7 | -------------------------------------------------------------------------------- /VictoriaMetrics/sigle/etc/grafana/provisioning/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: Prometheus 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | options: 9 | path: /var/lib/grafana/dashboards 10 | -------------------------------------------------------------------------------- /VictoriaMetrics/cluster/etc/grafana/provisioning/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: Prometheus 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | options: 9 | path: /var/lib/grafana/dashboards 10 | -------------------------------------------------------------------------------- /redis/sentinel/slave1.conf: -------------------------------------------------------------------------------- 1 | bind 0.0.0.0 2 | requirepass 123456 3 | maxclients 1000 4 | maxmemory 256mb 5 | maxmemory-policy volatile-ttl 6 | appendonly yes 7 | aof-use-rdb-preamble yes 8 | 9 | replicaof 127.0.0.1 6379 10 | masterauth 123456 -------------------------------------------------------------------------------- /redis/sentinel/slave2.conf: -------------------------------------------------------------------------------- 1 | bind 0.0.0.0 2 | requirepass 123456 3 | maxclients 1000 4 | maxmemory 256mb 5 | maxmemory-policy volatile-ttl 6 | appendonly yes 7 | aof-use-rdb-preamble yes 8 | 9 | replicaof 127.0.0.1 6379 10 | masterauth 123456 -------------------------------------------------------------------------------- /gost/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | gost: 5 | container_name: gost 6 | image: gogost/gost 7 | restart: always 8 | volumes: 9 | - ./gost.yaml:/etc/gost/gost.yaml 10 | ports: 11 | - '443:443' -------------------------------------------------------------------------------- /VictoriaMetrics/cluster/etc/grafana/provisioning/datasources/datasource.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: VictoriaMetrics 5 | type: prometheus 6 | access: proxy 7 | url: http://victoriametrics:8428 8 | isDefault: true 9 | -------------------------------------------------------------------------------- /VictoriaMetrics/sigle/etc/grafana/provisioning/datasources/datasource.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: VictoriaMetrics 5 | type: prometheus 6 | access: proxy 7 | url: http://victoriametrics:8428 8 | isDefault: true 9 | -------------------------------------------------------------------------------- /prometheus/consul-template/consul-rules.ctmpl: -------------------------------------------------------------------------------- 1 | groups: 2 | {{ range $key, $pairs := tree "prometheus/nn1/alerts" | byKey }} 3 | - name: {{ $key }} 4 | rules: 5 | {{ range $pair := $pairs }} - alert: {{ .Key }} 6 | {{ .Value | indent 4 }} 7 | {{ end }}{{ end }} 8 | -------------------------------------------------------------------------------- /elasticsearch/xpack/instances.yml: -------------------------------------------------------------------------------- 1 | instances: 2 | - name: es01 3 | dns: 4 | - es01 5 | - localhost 6 | ip: 7 | - 127.0.0.1 8 | 9 | - name: es02 10 | dns: 11 | - es02 12 | - localhost 13 | ip: 14 | - 127.0.0.1 -------------------------------------------------------------------------------- /nacos/env/nacos-ip.env: -------------------------------------------------------------------------------- 1 | #nacos dev env 2 | NACOS_SERVERS=172.16.238.10:8848 172.16.238.11:8848 172.16.238.12:8848 3 | MYSQL_SERVICE_HOST=mysql 4 | MYSQL_SERVICE_DB_NAME=nacos_devtest 5 | MYSQL_SERVICE_PORT=3306 6 | MYSQL_SERVICE_USER=nacos 7 | MYSQL_SERVICE_PASSWORD=nacos -------------------------------------------------------------------------------- /prometheus/cortex/grafana/dashboards/cortex.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'cortex' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: true 9 | editable: true 10 | options: 11 | path: /etc/grafana/dashboards 12 | -------------------------------------------------------------------------------- /prometheus/thanos/grafana/dashboards/thanos.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'thanos' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: true 9 | editable: true 10 | options: 11 | path: /etc/grafana/dashboards 12 | -------------------------------------------------------------------------------- /nacos/env/nacos-standlone-mysql.env: -------------------------------------------------------------------------------- 1 | PREFER_HOST_MODE=hostname 2 | MODE=standalone 3 | SPRING_DATASOURCE_PLATFORM=mysql 4 | MYSQL_SERVICE_HOST=mysql 5 | MYSQL_SERVICE_DB_NAME=nacos_devtest 6 | MYSQL_SERVICE_PORT=3306 7 | MYSQL_SERVICE_USER=nacos 8 | MYSQL_SERVICE_PASSWORD=nacos -------------------------------------------------------------------------------- /redis/sentinel/sentinel1.conf: -------------------------------------------------------------------------------- 1 | bind 0.0.0.0 2 | sentinel monitor lerep 127.0.0.1 6379 2 3 | sentinel down-after-milliseconds lerep 3000 4 | sentinel failover-timeout lerep 6000 5 | sentinel parallel-syncs lerep 1 6 | sentinel auth-pass lerep 123456 7 | requirepass 123456 8 | -------------------------------------------------------------------------------- /redis/sentinel/sentinel2.conf: -------------------------------------------------------------------------------- 1 | bind 0.0.0.0 2 | sentinel monitor lerep 127.0.0.1 6379 2 3 | sentinel down-after-milliseconds lerep 3000 4 | sentinel failover-timeout lerep 6000 5 | sentinel parallel-syncs lerep 1 6 | sentinel auth-pass lerep 123456 7 | requirepass 123456 8 | -------------------------------------------------------------------------------- /redis/sentinel/sentinel3.conf: -------------------------------------------------------------------------------- 1 | bind 0.0.0.0 2 | sentinel monitor lerep 127.0.0.1 6379 2 3 | sentinel down-after-milliseconds lerep 3000 4 | sentinel failover-timeout lerep 6000 5 | sentinel parallel-syncs lerep 1 6 | sentinel auth-pass lerep 123456 7 | requirepass 123456 8 | -------------------------------------------------------------------------------- /vault/vault/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/dumb-init /bin/sh 2 | 3 | set -e 4 | 5 | while ping -c1 consul_init &>/dev/null; do sleep 1;echo "Wait Container"; done 6 | 7 | echo "Container consul_init finished!" 8 | 9 | exec /usr/local/bin/docker-entrypoint.sh "$@" 10 | -------------------------------------------------------------------------------- /gogs/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | gogs: 4 | container_name: gogs 5 | image: gogs/gogs 6 | volumes: 7 | - ./gogs_data:/data 8 | - "/etc/localtime:/etc/localtime" 9 | ports: 10 | - "3000:3000" 11 | restart: always 12 | -------------------------------------------------------------------------------- /nacos/env/nacos-hostname.env: -------------------------------------------------------------------------------- 1 | #nacos dev env 2 | PREFER_HOST_MODE=hostname 3 | NACOS_SERVERS=nacos1:8848 nacos2:8848 nacos3:8848 4 | MYSQL_SERVICE_HOST=mysql 5 | MYSQL_SERVICE_DB_NAME=nacos_devtest 6 | MYSQL_SERVICE_PORT=3306 7 | MYSQL_SERVICE_USER=nacos 8 | MYSQL_SERVICE_PASSWORD=nacos -------------------------------------------------------------------------------- /dnsmasq/dnsmasq/dnsmasq.conf: -------------------------------------------------------------------------------- 1 | #listen on container interface 2 | listen-address=0.0.0.0 3 | interface=eth0 4 | user=root 5 | 6 | #only use these namesservers 7 | no-resolv 8 | server=223.5.5.5 9 | server=223.6.6.6 10 | 11 | #static entries 12 | address=/lework.com/192.168.77.1 13 | -------------------------------------------------------------------------------- /nacos/env/nacos-embedded.env: -------------------------------------------------------------------------------- 1 | #nacos dev env 2 | PREFER_HOST_MODE=hostname 3 | EMBEDDED_STORAGE=embedded 4 | NACOS_SERVERS=nacos1:8848 nacos2:8848 nacos3:8848 5 | MYSQL_SERVICE_DB_NAME=nacos_devtest 6 | MYSQL_SERVICE_PORT=3306 7 | MYSQL_SERVICE_USER=nacos 8 | MYSQL_SERVICE_PASSWORD=nacos 9 | -------------------------------------------------------------------------------- /transfer/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | transfer: 4 | container_name: transfer 5 | image: dutchcoders/transfer.sh:latest 6 | command: --provider local --basedir /data/ --temp-path /data/ 7 | restart: always 8 | volumes: 9 | - ./transfer-data:/data -------------------------------------------------------------------------------- /redis/scripts/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | SHELL_FOLDER=$(dirname $(readlink -f "$0")) 5 | ip=$(ip -4 route get 8.8.8.8 2>/dev/null | head -1 | awk '{print $7}') 6 | 7 | 8 | 9 | find $SHELL_FOLDER/.. -type f -name *.conf -o -name *.yml -exec sed -i "s#127.0.0.1#$ip#g" {} \; 10 | -------------------------------------------------------------------------------- /tengine/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | 3 | services: 4 | tengine: 5 | container_name: tengine 6 | image: lework/tengine:2.3.3 7 | restart: always 8 | ports: 9 | - "80:80" 10 | - "443:443" 11 | healthcheck: 12 | test: wget -q -O - localhost -------------------------------------------------------------------------------- /tinyproxy/tinyproxy.conf: -------------------------------------------------------------------------------- 1 | Port 8888 2 | Listen 0.0.0.0 3 | 4 | Timeout 600 5 | 6 | Allow 127.0.0.1 7 | Allow 10.0.0.0/8 8 | Allow 172.16.0.0/16 9 | 10 | MaxClients 100 11 | MinSpareServers 2 12 | MaxSpareServers 5 13 | StartServers 2 14 | MaxRequestsPerChild 0 15 | 16 | DisableViaHeader Yes -------------------------------------------------------------------------------- /prometheus/cortex/grafana/datasources/Prometheus.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: prometheus 5 | type: prometheus 6 | access: proxy 7 | orgId: 1 8 | url: http://prometheus:9090 9 | database: timeseries 10 | isDefault: true 11 | editable: false 12 | version: 1 13 | -------------------------------------------------------------------------------- /prometheus/thanos/grafana/datasources/Prometheus.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: prometheus 5 | type: prometheus 6 | access: proxy 7 | orgId: 1 8 | url: http://query0:10902 9 | database: timeseries 10 | isDefault: true 11 | editable: false 12 | version: 1 13 | -------------------------------------------------------------------------------- /maven/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | maven: 5 | container_name: maven 6 | image: maven:3.6.1-jdk-8-slim 7 | volumes: 8 | - /etc/localtime:/etc/localtime 9 | - ./settings.xml:/usr/share/maven/conf/settings.xml 10 | - "./m2-repository:/tmp/.m2/repository" 11 | -------------------------------------------------------------------------------- /nats/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | networks: 4 | nats: 5 | name: nats 6 | 7 | services: 8 | nats: 9 | image: nats:2.2.6-alpine3.13 10 | ports: 11 | - "4222:4222" 12 | - "8222:8222" 13 | - "6222:6222" 14 | restart: unless-stopped 15 | networks: ["nats"] -------------------------------------------------------------------------------- /vault/vault/config/vault.hcl: -------------------------------------------------------------------------------- 1 | storage "consul" { 2 | address = "consul:8500" 3 | path = "vault" 4 | token = "48cdeff7-8624-c0ae-cd0e-bc39bd93e857" 5 | } 6 | 7 | listener "tcp" { 8 | address = "0.0.0.0:8200" 9 | tls_disable = 1 10 | } 11 | 12 | ui = true 13 | log_level = "info" 14 | -------------------------------------------------------------------------------- /autoheal/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | 3 | services: 4 | autoheal: 5 | restart: always 6 | image: willfarrell/autoheal 7 | container_name: autoheal 8 | environment: 9 | - AUTOHEAL_CONTAINER_LABEL=all 10 | volumes: 11 | - /var/run/docker.sock:/var/run/docker.sock 12 | -------------------------------------------------------------------------------- /gitea/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | gitea: 4 | container_name: gitea 5 | image: gitea/gitea:1.8.3 6 | volumes: 7 | - ./gitea_data:/data 8 | - /etc/localtime:/etc/localtime 9 | ports: 10 | - "13000:3000" 11 | - "10022:22" 12 | restart: always 13 | -------------------------------------------------------------------------------- /grafana/grafana-allstack/grafana/etc/provisioning/dashboards/dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'dashboards' 5 | orgId: 1 6 | type: file 7 | disableDeletion: false 8 | editable: false 9 | updateIntervalSeconds: 10 10 | options: 11 | path: /var/lib/grafana/dashboards/ 12 | -------------------------------------------------------------------------------- /nomad/consul/config/client.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": false, 3 | "datacenter": "dc1", 4 | "data_dir": "/consul/data", 5 | "bind_addr": "0.0.0.0", 6 | "client_addr": "0.0.0.0", 7 | "bootstrap_expect": 0, 8 | "retry_join": ["consul_s1", "consul_s2", "consul_s3"], 9 | "log_level": "INFO" 10 | } 11 | 12 | -------------------------------------------------------------------------------- /swagger-ui/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | swagger-ui: 5 | container_name: swagger-ui 6 | image: swaggerapi/swagger-ui:v4.14.0 7 | environment: 8 | - TZ=Asia/Shanghai 9 | volumes: 10 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 11 | restart: always -------------------------------------------------------------------------------- /apollo/.env: -------------------------------------------------------------------------------- 1 | 2 | VERSION=1.8.1 3 | 4 | 5 | LOG_BASE_DIR=./apollo_log 6 | DATA_BASE_DIR=./apollo_data 7 | 8 | IP_ADDRESS=192.168.77.136 9 | 10 | CONFIG_PORT_DEV=8080 11 | CONFIG_PORT_PRO=8081 12 | 13 | ADMIN_PORT_DEV=8090 14 | ADMIN_PORT_PRO=8091 15 | 16 | DATASOURCE_USERNAME=root 17 | DATASOURCE_PASSWORD=root 18 | -------------------------------------------------------------------------------- /portainer/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | portainer: 4 | container_name: portainer 5 | image: /portainer:1.24.2-alpine 6 | volumes: 7 | - ./portainer_data:/data 8 | - /etc/localtime:/etc/localtime 9 | - /var/run/docker.sock:/var/run/docker.sock 10 | restart: always 11 | -------------------------------------------------------------------------------- /tinyproxy/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | tinyproxy: 3 | container_name: tinyproxy 4 | image: ajoergensen/tinyproxy 5 | restart: always 6 | volumes: 7 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 8 | - ./tinyproxy.conf:/etc/tinyproxy/tinyproxy.conf:ro 9 | ports: 10 | - '8888:8888' -------------------------------------------------------------------------------- /logio/logio_data/harvester.conf: -------------------------------------------------------------------------------- 1 | exports.config = { 2 | nodeName: "docker1", 3 | logStreams: { 4 | nginx: [ 5 | "/var/log/openresty/access.log", 6 | "/var/log/openresty/error.log" 7 | ] 8 | }, 9 | server: { 10 | host: 'logio_server', 11 | port: 28777 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /nomad/consul/config/server.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": true, 3 | "datacenter": "dc1", 4 | "data_dir": "/consul/data", 5 | "bind_addr": "0.0.0.0", 6 | "client_addr": "0.0.0.0", 7 | "bootstrap_expect": 3, 8 | "retry_join": ["consul_s1", "consul_s2", "consul_s3"], 9 | "ui": true, 10 | "log_level": "INFO" 11 | } 12 | 13 | -------------------------------------------------------------------------------- /sqlpad/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | sqlpad: 4 | container_name: sqlpad 5 | image: sqlpad/sqlpad:latest 6 | environment: 7 | - SQLPAD_ADMIN=admin 8 | - SQLPAD_ADMIN_PASSWORD=admin 9 | ports: 10 | - "3001:3000" 11 | restart: always 12 | volumes: 13 | - ./sqlpad-data:/var/lib/sqlpad -------------------------------------------------------------------------------- /grafana/grafana-allstack/grafana/etc/provisioning/plugins/oncall.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | apps: 4 | - type: grafana-oncall-app 5 | name: grafana-oncall-app 6 | enabled: true 7 | jsonData: 8 | stackId: 5 9 | orgId: 100 10 | onCallApiUrl: http://host.docker.internal:8080/ 11 | grafanaUrl: http://grafana:3000/ 12 | -------------------------------------------------------------------------------- /registry/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | registry: 4 | image: registry:2.7.1 5 | container_name: registry 6 | ports: 7 | - 5000:5000 8 | volumes: 9 | - ./registry_data:/var/lib/registry 10 | - ./config.yml:/etc/docker/registry/config.yml 11 | - /etc/localtime:/etc/localtime 12 | restart: always 13 | -------------------------------------------------------------------------------- /awx/README.md: -------------------------------------------------------------------------------- 1 | 2 | 启动 3 | ```bash 4 | mkdir redis_socket memcached_socket 5 | chmod 777 redis_socket memcached_socket 6 | chmod 0666 redis.conf 7 | chmod 0600 SECRET_KEY 8 | docker-compose up -d 9 | ``` 10 | 11 | 12 | 启动后,更新ca 13 | 14 | ```bash 15 | docker exec awx_web '/usr/bin/update-ca-trust' 16 | docker exec awx_task '/usr/bin/update-ca-trust' 17 | ``` -------------------------------------------------------------------------------- /gophish/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | gophish: 4 | container_name: gophish 5 | image: gophish/gophish:latest 6 | volumes: 7 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 8 | ports: 9 | - "3333:3333" 10 | - "8003:80" 11 | - "8004:8080" 12 | restart: always 13 | 14 | # https://yourip:3333 15 | -------------------------------------------------------------------------------- /grafana/grafana-allstack/mimir/rules/anonymous/rules_grafana.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: grafana_rules 3 | rules: 4 | - expr: | 5 | sum by (namespace, job, handler, status_code) (rate(grafana_http_request_duration_seconds_count[5m])) 6 | record: namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m 7 | -------------------------------------------------------------------------------- /PostgreSQL/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | postgres: 4 | image: postgres:15.4 5 | container_name: postgres 6 | restart: always 7 | environment: 8 | - POSTGRES_USER=postgres 9 | - POSTGRES_PASSWORD=12345678 10 | ports: 11 | - '5432:5432' 12 | volumes: 13 | - ./postgres_data:/var/lib/postgresql/data -------------------------------------------------------------------------------- /pihole/adlists.list: -------------------------------------------------------------------------------- 1 | https://raw.githubusercontent.com/mkb2091/blockconvert/master/output/hosts.txt 2 | https://raw.githubusercontent.com/mkb2091/blockconvert/master/output/domains.txt 3 | https://raw.githubusercontent.com/chadmayfield/pihole-blocklists/master/lists/pi_blocklist_porn_all.list 4 | https://raw.githubusercontent.com/mhhakim/pihole-blocklist/master/list.txt 5 | -------------------------------------------------------------------------------- /dnsmasq/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | dnsmasq: 5 | image: andyshinn/dnsmasq 6 | container_name: dnsmasq 7 | cap_add: 8 | - NET_ADMIN 9 | command: --log-facility=- 10 | volumes: 11 | - ./dnsmasq/dnsmasq.conf:/etc/dnsmasq.conf 12 | - ./dnsmasq/dnsmasq.d:/etc/dnsmasq.d 13 | - /etc/localtime:/etc/localtime:ro -------------------------------------------------------------------------------- /rancher/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | rancher: 5 | container_name: rancher 6 | image: rancher/rancher:v2.6-head 7 | restart: unless-stopped 8 | privileged: true 9 | volumes: 10 | - ./rancher_data:/var/lib/rancher 11 | environment: 12 | - TZ=Asia/Shanghai 13 | ports: 14 | - "80:80" 15 | - "443:443" -------------------------------------------------------------------------------- /awx/credentials.py: -------------------------------------------------------------------------------- 1 | DATABASES = { 2 | 'default': { 3 | 'ATOMIC_REQUESTS': True, 4 | 'ENGINE': 'django.db.backends.postgresql', 5 | 'NAME': "awx", 6 | 'USER': "awx", 7 | 'PASSWORD': "awxpass", 8 | 'HOST': "postgres", 9 | 'PORT': "5432", 10 | } 11 | } 12 | 13 | BROADCAST_WEBSOCKET_SECRET = "M2h6aEdPbXZSQ2I1YWpsUVV3c0Y=" 14 | -------------------------------------------------------------------------------- /consul/cluster/config/consul_c1.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": false, 3 | "datacenter": "dc1", 4 | "node_name": "consul_c1", 5 | "data_dir": "/consul/c1_data", 6 | "bind_addr": "0.0.0.0", 7 | "client_addr": "0.0.0.0", 8 | "bootstrap_expect": 0, 9 | "retry_join": ["consul_s1", "consul_s2", "consul_s3"], 10 | "log_level": "DEBUG", 11 | "acl_enforce_version_8": false 12 | } 13 | -------------------------------------------------------------------------------- /consul/cluster/config/consul_c2.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": false, 3 | "datacenter": "dc1", 4 | "node_name": "consul_c2", 5 | "data_dir": "/consul/c2_data", 6 | "bind_addr": "0.0.0.0", 7 | "client_addr": "0.0.0.0", 8 | "bootstrap_expect": 0, 9 | "retry_join": ["consul_s1", "consul_s2", "consul_s3"], 10 | "log_level": "DEBUG", 11 | "acl_enforce_version_8": false 12 | } 13 | -------------------------------------------------------------------------------- /coredns/coredns/conf/Corefile: -------------------------------------------------------------------------------- 1 | . { 2 | etcd lianmi.local { 3 | stubzones 4 | path /lianmi 5 | endpoint http://coredns-etcd:2379 6 | upstream 223.5.5.5:53 223.6.6.6:53 7 | } 8 | prometheus 0.0.0.0:19153 9 | health 0.0.0.0:18080 10 | cache 160 lianmi.local 11 | log 12 | errors 13 | proxy . 223.5.5.5:53 223.6.6.6:53 14 | } 15 | -------------------------------------------------------------------------------- /mysql/docker-compose-5.7.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | mysql: 4 | container_name: mysql 5 | image: mysql:8.0.32-debian 6 | volumes: 7 | - "./mysql_data:/var/lib/mysql" 8 | - "/usr/share/zoneinfo/Asia/Shanghai:/etc/localtime" 9 | ports: 10 | - "3306:3306" 11 | restart: always 12 | environment: 13 | MYSQL_ROOT_PASSWORD: irH2YmjsoJPzpNgC -------------------------------------------------------------------------------- /openresty/conf/conf.d/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | 6 | location / { 7 | root /usr/local/openresty/nginx/html; 8 | index index.html index.htm; 9 | } 10 | 11 | error_page 500 502 503 504 /50x.html; 12 | location = /50x.html { 13 | root /usr/local/openresty/nginx/html; 14 | } 15 | } -------------------------------------------------------------------------------- /consul/docker-compose_sigle.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | consul: 4 | image: consul:latest 5 | container_name: consul 6 | hostname: consul 7 | volumes: 8 | - "./consul_data:/consul/data" 9 | - "/etc/localtime:/etc/localtime:ro" 10 | ports: 11 | - "8300:8300" 12 | - "8400:8400" 13 | - "8500:8500" 14 | - "53:53" 15 | restart: always -------------------------------------------------------------------------------- /mongodb/docker-compose-3.4.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | mongo: 4 | container_name: mongo 5 | image: mongo:3.4 6 | volumes: 7 | - "./mongo_data:/data" 8 | - "/etc/localtime:/etc/localtime" 9 | ports: 10 | - "27017:27017" 11 | restart: always 12 | environment: 13 | MONGO_INITDB_ROOT_USERNAME: root 14 | MONGO_INITDB_ROOT_PASSWORD: 123456 -------------------------------------------------------------------------------- /prometheus/prometheus/etc/alerts/prometheus.rules: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: prometheus 3 | rules: 4 | - alert: ExporterDown 5 | expr: up == 0 6 | for: 5m 7 | labels: 8 | severity: warning 9 | annotations: 10 | summary: "Exporter down (instance {{ $labels.instance }})" 11 | description: "Prometheus exporter down\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" 12 | -------------------------------------------------------------------------------- /VictoriaMetrics/cluster/etc/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 10s 3 | 4 | scrape_configs: 5 | - job_name: 'vmagent' 6 | static_configs: 7 | - targets: ['vmagent:8429'] 8 | - job_name: 'vmalert' 9 | static_configs: 10 | - targets: ['vmalert:8880'] 11 | - job_name: 'victoriametrics' 12 | static_configs: 13 | - targets: ['victoriametrics:8428'] 14 | -------------------------------------------------------------------------------- /VictoriaMetrics/sigle/etc/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 10s 3 | 4 | scrape_configs: 5 | - job_name: 'vmagent' 6 | static_configs: 7 | - targets: ['vmagent:8429'] 8 | - job_name: 'vmalert' 9 | static_configs: 10 | - targets: ['vmalert:8880'] 11 | - job_name: 'victoriametrics' 12 | static_configs: 13 | - targets: ['victoriametrics:8428'] 14 | -------------------------------------------------------------------------------- /consul/cluster/config/consul_s1.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": true, 3 | "node_name": "consul_s1", 4 | "datacenter": "dc1", 5 | "data_dir": "/consul/s1_data", 6 | "bind_addr": "0.0.0.0", 7 | "client_addr": "0.0.0.0", 8 | "bootstrap_expect": 3, 9 | "retry_join": ["consul_s1", "consul_s2", "consul_s3"], 10 | "ui": true, 11 | "log_level": "INFO", 12 | "acl_enforce_version_8": false 13 | } 14 | -------------------------------------------------------------------------------- /consul/cluster/config/consul_s2.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": true, 3 | "node_name": "consul_s2", 4 | "datacenter": "dc1", 5 | "data_dir": "/consul/s2_data", 6 | "bind_addr": "0.0.0.0", 7 | "client_addr": "0.0.0.0", 8 | "bootstrap_expect": 3, 9 | "retry_join": ["consul_s1", "consul_s2", "consul_s3"], 10 | "ui": true, 11 | "log_level": "INFO", 12 | "acl_enforce_version_8": false 13 | } 14 | -------------------------------------------------------------------------------- /consul/cluster/config/consul_s3.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": true, 3 | "node_name": "consul_s3", 4 | "datacenter": "dc1", 5 | "data_dir": "/var/consul/data", 6 | "bind_addr": "0.0.0.0", 7 | "client_addr": "0.0.0.0", 8 | "bootstrap_expect": 3, 9 | "retry_join": ["consul_s1", "consul_s2", "consul_s3"], 10 | "ui": true, 11 | "log_level": "INFO", 12 | "acl_enforce_version_8": false 13 | } 14 | -------------------------------------------------------------------------------- /php-fpm/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | php_fpm: 4 | container_name: php_fpm 5 | hostname: php_fpm 6 | image: lework/php-fpm:7.1.24-fpm-alpine3.8 7 | network_mode: docker 8 | privileged: true 9 | volumes: 10 | - "/Microservices:/Microservices" 11 | ports: 12 | - "9000:9000" 13 | dns: 14 | - 172.16.134.93 15 | restart: always 16 | -------------------------------------------------------------------------------- /grafana/grafana-allstack/pyroscope/config.yaml: -------------------------------------------------------------------------------- 1 | target: "all" 2 | 3 | server: 4 | http_listen_port: 4040 5 | log_level: info 6 | log_format: json 7 | 8 | limits: 9 | max_query_length: 3d 10 | 11 | pyroscopedb: 12 | data_path: "/tmp/pyroscope" 13 | max_block_duration: 1h 14 | min_free_disk_gb: 100 15 | min_disk_available_percentage: 50 16 | 17 | analytics: 18 | reporting_enabled: false 19 | -------------------------------------------------------------------------------- /pihole/adblock.sh: -------------------------------------------------------------------------------- 1 | curl -s -L https://easylist-downloads.adblockplus.org/easylistchina+easylist.txt https://easylist-downloads.adblockplus.org/malwaredomains_full.txt https://easylist-downloads.adblockplus.org/fanboy-social.txt > adblock.unsorted 2 | sort -u adblock.unsorted | grep ^\|\|.*\^$ | grep -v \/ > adblock.sorted 3 | sed 's/[\|^]//g' < adblock.sorted > adblock.hosts 4 | rm adblock.unsorted adblock.sorted 5 | -------------------------------------------------------------------------------- /vault/consul/config/consul.hcl: -------------------------------------------------------------------------------- 1 | 2 | datacenter = "dc1" 3 | data_dir = "/consul/data" 4 | encrypt = "pW6hefWywYZp+6o0b3IzToYAR/EdX0p0pSF/VRsXoAw=" 5 | 6 | ui = true 7 | server = true 8 | bootstrap_expect = 1 9 | client_addr = "0.0.0.0" 10 | bind_Addr = "0.0.0.0" 11 | 12 | node_name = "node1" 13 | 14 | acl { 15 | enabled = true 16 | default_policy = "deny" 17 | enable_token_persistence = true 18 | } 19 | -------------------------------------------------------------------------------- /goproxy/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | goproxy: 4 | container_name: goproxy 5 | image: goproxy/goproxy:latest 6 | command: "-listen=0.0.0.0:80 -cacheDir=/ext -proxy https://goproxy.cn -exclude 'gitlab.test.com'" 7 | ports: 8 | - "80:8081" 9 | restart: always 10 | volumes: 11 | - ./goproxy-data:/ext 12 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime -------------------------------------------------------------------------------- /m3db/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | m3db: 5 | image: quay.io/m3db/m3dbnode:latest 6 | container_name: m3db 7 | hostname: m3db 8 | volumes: 9 | - /etc/localtime:/etc/localtime:ro 10 | - ./m3db_data:/var/lib/m3db 11 | cap_add: 12 | - SYS_RESOURCE 13 | ports: 14 | - 7201:7201 15 | - 7203:7203 16 | - 9003:9003 17 | restart: always -------------------------------------------------------------------------------- /elasticsearch/stack/kibana.yml: -------------------------------------------------------------------------------- 1 | server.name: kibana 2 | server.host: "0.0.0.0" 3 | 4 | # Elasticsearch Connection 5 | elasticsearch.hosts: ${ELASTICSEARCH_HOSTS} 6 | 7 | #X-Pack security credentials 8 | elasticsearch.username: "${ELASTICSEARCH_USERNAME}" 9 | elasticsearch.password: "${ELASTICSEARCH_PASSWORD}" 10 | 11 | ## Misc 12 | elasticsearch.requestTimeout: 90000 13 | monitoring.kibana.collection.enabled: false 14 | -------------------------------------------------------------------------------- /uptime-kuma/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | uptime-kuma: 5 | container_name: uptime-kuma 6 | image: louislam/uptime-kuma:1.23.13-alpine 7 | restart: always 8 | volumes: 9 | - ./uptime_data:/app/data 10 | - "/usr/share/zoneinfo/Asia/Shanghai:/etc/localtime" 11 | environment: 12 | - TZ=Asia/Shanghai 13 | - LANG=zh_CN.UTF-8 14 | ports: 15 | - "3001:3001" -------------------------------------------------------------------------------- /activemq/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | activemq: 4 | container_name: activemq 5 | hostname: activemq 6 | image: lework/activemq:5.15.4 7 | network_mode: docker 8 | volumes: 9 | - "./activemq.xml:/usr/local/activemq/conf/activemq.xml" 10 | - "./activemq_data:/usr/local/activemq/data" 11 | ports: 12 | - "61616:61616" 13 | - "8161:8161" 14 | restart: always 15 | -------------------------------------------------------------------------------- /aliyun-ddns/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | aliyun-ddns: 5 | image: sanjusss/aliyun-ddns:0.2.7.3-linux-amd64 6 | restart: always 7 | environment: 8 | - AKID=[ALIYUN's AccessKey-ID] 9 | - AKSCT=[ALIYUN's AccessKey-Secret] 10 | - DOMAIN=xx.test.cn 11 | - REDO=600 12 | - TTL=600 13 | - TIMEZONE=8.0 14 | - TYPE=A 15 | 16 | 17 | # https://github.com/sanjusss/aliyun-ddns -------------------------------------------------------------------------------- /wordpress/https-http.md: -------------------------------------------------------------------------------- 1 | WordPress 官方默认 Docker 是基于 Apache 来做的,但为了自动加上 SSL,我用了一个 Nginx 容器来做反向代理。于是问题出现了:用 HTTPS 访问 Nginx,生成出来的网页里面所有生成的 URL 都是 HTTP,而不是 HTTPS。 2 | 3 | 4 | 5 | 解决办法:在 wp-config.php 里面加上这样几句话: 6 | 7 | ``` 8 | if((!empty( $_SERVER['HTTP_X_FORWARDED_HOST'])) || (!empty( $_SERVER['HTTP_X_FORWARDED_FOR'])) ) { 9 | $_SERVER['HTTP_HOST'] = $_SERVER['HTTP_X_FORWARDED_HOST']; 10 | $_SERVER['HTTPS'] = 'on'; 11 | } 12 | ``` -------------------------------------------------------------------------------- /kafka-ui/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | kafka-ui: 4 | image: provectuslabs/kafka-ui:v0.4.0 5 | container_name: kafka-ui 6 | ports: 7 | - "8080:8080" 8 | environment: 9 | - KAFKA_CLUSTERS_0_NAME=test 10 | - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=127.0.0.1:9092 11 | - KAFKA_CLUSTERS_0_READONLY=true 12 | volumes: 13 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 14 | restart: always -------------------------------------------------------------------------------- /sentry/.env: -------------------------------------------------------------------------------- 1 | SENTRY_POSTGRES_HOST=db 2 | SENTRY_POSTGRES_PORT=5432 3 | SENTRY_DB_USER=sentry 4 | SENTRY_DB_PASSWORD=secret 5 | SENTRY_REDIS_HOST=redis 6 | SENTRY_REDIS_PORT=6379 7 | SENTRY_SECRET_KEY="504^xe3r+t6&!r3uvz5#pn2)__3q2kg1j9k2@=%-k_j66v&o&q" 8 | SENTRY_EMAIL_HOST=smtp.exmail.qq.com 9 | SENTRY_EMAIL_PORT=587 10 | SENTRY_EMAIL_USER=sentry@test.com 11 | SENTRY_EMAIL_PASSWORD=123456 12 | SENTRY_EMAIL_USE_TLS=true 13 | SENTRY_SERVER_EMAIL=sentry@test.com 14 | -------------------------------------------------------------------------------- /zentao/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zentao: 4 | container_name: zentao 5 | image: easysoft/zentao:12.5.3 6 | volumes: 7 | - "./zentaopms:/www/zentaopms" 8 | - "./mysql_data:/var/lib/mysql" 9 | - "/etc/localtime:/etc/localtime" 10 | ports: 11 | - "8080:80" 12 | restart: on-failure 13 | environment: 14 | MYSQL_ROOT_PASSWORD: 123456 15 | 16 | ## LDAP https://github.com/ShoJinto/zentao-ldap -------------------------------------------------------------------------------- /registry/config.yml: -------------------------------------------------------------------------------- 1 | version: 0.1 2 | log: 3 | level: debug 4 | fields: 5 | service: registry 6 | storage: 7 | cache: 8 | blobdescriptor: inmemory 9 | filesystem: 10 | rootdirectory: /var/lib/registry 11 | http: 12 | addr: :5000 13 | headers: 14 | X-Content-Type-Options: [nosniff] 15 | proxy: 16 | remoteurl: http://hub-mirror.c.163.com 17 | health: 18 | storagedriver: 19 | enabled: true 20 | interval: 10s 21 | threshold: 3 22 | -------------------------------------------------------------------------------- /loki/s3-config/promtail-gateway.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | server: 3 | http_listen_port: 9080 4 | grpc_listen_port: 0 5 | 6 | positions: 7 | filename: /tmp/positions.yaml 8 | 9 | clients: 10 | - url: http://loki-gateway:3100/loki/api/v1/push 11 | tenant_id: tenant1 12 | 13 | scrape_configs: 14 | - job_name: system 15 | static_configs: 16 | - targets: 17 | - localhost 18 | labels: 19 | job: varlogs 20 | __path__: /var/log/*log -------------------------------------------------------------------------------- /autoheal/Dockerfile.txt: -------------------------------------------------------------------------------- 1 | FROM alpine:3.13.5 2 | 3 | RUN apk add --no-cache curl jq 4 | 5 | COPY docker-entrypoint / 6 | ENTRYPOINT ["/docker-entrypoint"] 7 | 8 | ENV AUTOHEAL_CONTAINER_LABEL=autoheal \ 9 | AUTOHEAL_START_PERIOD=0 \ 10 | AUTOHEAL_INTERVAL=5 \ 11 | AUTOHEAL_DEFAULT_STOP_TIMEOUT=10 \ 12 | DOCKER_SOCK=/var/run/docker.sock \ 13 | CURL_TIMEOUT=30 14 | 15 | HEALTHCHECK --interval=5s CMD pgrep -f autoheal || exit 1 16 | 17 | CMD ["autoheal"] 18 | -------------------------------------------------------------------------------- /loki/ha-memberlist-config/promtail-gateway.yaml: -------------------------------------------------------------------------------- 1 | server: 2 | http_listen_port: 9080 3 | grpc_listen_port: 0 4 | log_level: "debug" 5 | 6 | positions: 7 | filename: /tmp/positions.yaml 8 | 9 | clients: 10 | - url: http://loki-gateway:80/loki/api/v1/push 11 | 12 | scrape_configs: 13 | - job_name: system 14 | static_configs: 15 | - targets: 16 | - localhost 17 | labels: 18 | job: varlogs 19 | __path__: /var/log/*log -------------------------------------------------------------------------------- /redis/cluster/cluster.conf: -------------------------------------------------------------------------------- 1 | bind 0.0.0.0 2 | maxclients 10000 3 | maxmemory 512MB 4 | maxmemory-policy volatile-ttl 5 | requirepass 123456 6 | masterauth 123456 7 | appendonly yes 8 | appendfilename "appendonly.aof" 9 | appendfsync everysec 10 | auto-aof-rewrite-percentage 100 11 | auto-aof-rewrite-min-size 64mb 12 | aof-load-truncated yes 13 | aof-use-rdb-preamble yes 14 | cluster-enabled yes 15 | cluster-config-file nodes.conf 16 | cluster-node-timeout 5000 17 | appendonly yes -------------------------------------------------------------------------------- /prometheus/thanos/prometheus/prometheus-remote_write.yml: -------------------------------------------------------------------------------- 1 | # When the Thanos remote-write-receive component is started, 2 | # this is an example configuration of a Prometheus server that 3 | # would scrape a local node-exporter and replicate its data to 4 | # the remote write endpoint. 5 | scrape_configs: 6 | - job_name: node 7 | scrape_interval: 1s 8 | static_configs: 9 | - targets: ['node-exporter:9100'] 10 | remote_write: 11 | - url: http://receive:10291/api/v1/receive 12 | -------------------------------------------------------------------------------- /onedev/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | 4 | services: 5 | onedev: 6 | container_name: onedev 7 | image: 1dev/server 8 | environment: 9 | TZ: Asia/Shanghai 10 | ports: 11 | - '6610:6610' 12 | - '6611:6611' 13 | volumes: 14 | - ./onedev_data:/opt/onedev 15 | - /var/run/docker.sock:/var/run/docker.sock 16 | - /etc/localtime:/etc/localtime:ro 17 | restart: always 18 | 19 | 20 | 21 | # curl http://:6610/ -------------------------------------------------------------------------------- /pihole/regex.list: -------------------------------------------------------------------------------- 1 | ^(.+[_.-])?ad[sxv]?[0-9]*[_.-] 2 | ^(.+[_.-])?adse?rv(er?|ice)?s?[0-9]*[_.-] 3 | ^(.+[_.-])?telemetry[_.-] 4 | ^(www[0-9]*\.)?xn-- 5 | ^adim(age|g)s?[0-9]*[_.-] 6 | ^adtrack(er|ing)?[0-9]*[_.-] 7 | ^advert(s|is(ing|ements?))?[0-9]*[_.-] 8 | ^aff(iliat(es?|ion))?[_.-] 9 | ^analytics?[_.-] 10 | ^banners?[_.-] 11 | ^beacons?[0-9]*[_.-] 12 | ^count(ers?)?[0-9]*[_.-] 13 | ^mads\. 14 | ^pixels?[-.] 15 | ^stat(s|istics)?[0-9]*[_.-] 16 | ^track(ers?|ing)?[0-9]*[_.-] 17 | .*1i8yk.cn.* 18 | -------------------------------------------------------------------------------- /bitwardenrs/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | bitwarden: 4 | image: bitwardenrs/server:1.21.0 5 | container_name: bitwarden 6 | restart: always 7 | volumes: 8 | - ./data:/data # 資料保存位置,修改./data 成自己想要存放的地方 9 | environment: 10 | - SIGNUPS_ALLOWED=true 11 | - DOMAIN=http(s)://example.com 12 | - DATABASE_URL= ~/example/bitwardendb/bitwarden.db 13 | - ROCKET_WORKERS=10 14 | - WEB_VAULT_ENABLED=true 15 | ports: 16 | - "3333:80" # 映射端口3333 -------------------------------------------------------------------------------- /prometheus/thanos/thanos/ruler.rules.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: metamonitoring 3 | rules: 4 | - alert: PrometheusReplicaDown 5 | annotations: 6 | message: 'Prometheus replica in cluster {{$labels.cluster}} 7 | has disappeared from Prometheus target discovery.' 8 | expr: | 9 | absent(sum(up{cluster="prometheus-ha", instance=~".*:9090", job="prometheus"}) by (job,cluster) == 3) 10 | for: 15s # for demo purposes 11 | labels: 12 | severity: critical 13 | -------------------------------------------------------------------------------- /elasticsearch/stack/logstash-pipeline.conf: -------------------------------------------------------------------------------- 1 | input { 2 | beats { 3 | port => 5044 4 | } 5 | } 6 | 7 | filter { 8 | grok { 9 | match => { "message" => "%{COMBINEDAPACHELOG}" } 10 | } 11 | date { 12 | match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ] 13 | } 14 | 15 | } 16 | 17 | output { 18 | elasticsearch { 19 | hosts => ["${ELASTICSEARCH_HOSTS}"] 20 | user => "${ELASTICSEARCH_USERNAME}" 21 | password => "${ELASTICSEARCH_PASSWORD}" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /guacamole/reset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "This will delete your existing database (./data/)" 3 | echo " delete your recordings (./record/)" 4 | echo " delete your drive files (./drive/)" 5 | echo " delete your certs files (./nginx/ssl/)" 6 | echo "" 7 | read -p "Are you sure? " -n 1 -r 8 | echo "" # (optional) move to a new line 9 | if [[ $REPLY =~ ^[Yy]$ ]]; then # do dangerous stuff 10 | chmod -R +x -- ./init 11 | sudo rm -r -f ./data/ ./drive/ ./record/ ./nginx/ssl/ 12 | fi 13 | 14 | -------------------------------------------------------------------------------- /hfish/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | hfish: 4 | container_name: hfish 5 | image: threatbook/hfish-server:latest 6 | environment: 7 | - USERNAME=admin # 后台管理系统登录账号 8 | - PASSWORD=2JnT6Oc33cD # 后台管理系统登录密码 9 | volumes: 10 | - "./hfish_data:/usr/share/hfish" 11 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 12 | network_mode: host 13 | privileged: true 14 | restart: always 15 | 16 | 17 | 18 | # https://yourip:4433/web admin HFish2021 -------------------------------------------------------------------------------- /elasticsearch/stack/apmserver.docker.yml: -------------------------------------------------------------------------------- 1 | apm-server: 2 | host: "0.0.0.0:8200" 3 | 4 | 5 | output.elasticsearch: 6 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 7 | username: '${ELASTICSEARCH_USERNAME:}' 8 | password: '${ELASTICSEARCH_PASSWORD:}' 9 | 10 | 11 | setup.template.settings: 12 | index: 13 | number_of_shards: 1 14 | number_of_replicas: 0 15 | codec: best_compression 16 | 17 | setup.kibana.host: '${KIBANA_HOST:kibana:5601}' 18 | 19 | http.enabled: true 20 | http.host: 0.0.0.0 21 | http.port: 5066 22 | -------------------------------------------------------------------------------- /traefik/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | reverse-proxy: 5 | container_name: traefik 6 | image: traefik # The official Traefik docker image 7 | command: --api --accessLog --debug --etcd --etcd.endpoint=etcd:2379 --etcd.useapiv3=true 8 | ports: 9 | - "80:80" # The HTTP port 10 | - "8080:8080" # The Web UI (enabled by --api) 11 | network_mode: docker 12 | restart: always 13 | volumes: 14 | - /var/run/docker.sock:/var/run/docker.sock 15 | - "/etc/localtime:/etc/localtime" -------------------------------------------------------------------------------- /nginx/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | 4 | services: 5 | test-static-nginx: 6 | container_name: test-static-nginx 7 | image: nginx:1.21.3-alpine 8 | restart: always 9 | volumes: 10 | - ./nginx_html:/usr/share/nginx/html 11 | - /etc/localtime:/etc/localtime 12 | environment: 13 | TZ: Asia/Shanghai 14 | LANG: zh_CN.UTF-8 15 | healthcheck: 16 | test: ["CMD", "curl", "-fs", "http://localhost"] 17 | interval: 1m 18 | timeout: 6s 19 | retries: 3 20 | start_period: 5s 21 | -------------------------------------------------------------------------------- /netdata/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | netdata: 4 | container_name: netdata 5 | image: netdata/netdata 6 | ports: 7 | - 19999:19999 8 | volumes: 9 | - /etc/passwd:/host/etc/passwd:ro 10 | - /etc/group:/host/etc/group:ro 11 | - /proc:/host/proc:ro 12 | - /sys:/host/sys:ro 13 | - /etc/os-release:/host/etc/os-release:ro 14 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 15 | restart: always 16 | cap_add: 17 | - SYS_PTRACE 18 | security_opt: 19 | - apparmor=unconfined 20 | -------------------------------------------------------------------------------- /timescaledb/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | timescaledb: 5 | container_name: timescaledb 6 | image: timescale/timescaledb:2.5.2-pg13 7 | volumes: 8 | - "./timescaledb_data:/var/lib/postgresql/data" 9 | - "/usr/share/zoneinfo/Asia/Shanghai:/etc/localtime" 10 | command: ["-c", "max_connections=1000"] 11 | environment: 12 | POSTGRES_DB: admin 13 | POSTGRES_USER: admin 14 | POSTGRES_PASSWORD: 123456 15 | network_mode: host 16 | #ports: 17 | # - "5432:5432" 18 | restart: always -------------------------------------------------------------------------------- /gost/gost.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | - name: https 3 | addr: :443 4 | handler: 5 | type: tcp 6 | metadata: 7 | sniffing: true 8 | listener: 9 | type: tcp 10 | forwarder: 11 | nodes: 12 | - name: google 13 | addr: www.google.com:443 14 | host: www.google.com 15 | - name: github 16 | addr: github.com:443 17 | host: "*.github.com" 18 | - name: chatgpt 19 | addr: chat.openai.com:443 20 | host: chat.openai.com 21 | - name: chatgpt-api 22 | addr: api.openai.com:443 23 | host: api.openai.com -------------------------------------------------------------------------------- /redis/docker-compose-single.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | redis: 4 | container_name: redis-single 5 | image: redis:5.0.7 6 | volumes: 7 | - "./redis_single_data:/data" 8 | - "/etc/localtime:/etc/localtime" 9 | ports: 10 | - "6379:6379" 11 | restart: always 12 | command: [ 13 | '--port 6379', 14 | '--requirepass 123456', 15 | '--maxclients 1000', 16 | '--maxmemory 256mb', 17 | '--maxmemory-policy volatile-ttl', 18 | '--appendonly yes', 19 | '--aof-use-rdb-preamble yes' 20 | ] -------------------------------------------------------------------------------- /openresty/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | openresty: 5 | container_name: openresty 6 | hostname: openresty 7 | image: openresty/openresty:centos 8 | ports: 9 | - '80:80' 10 | - '443:443' 11 | volumes: 12 | - "./logs:/usr/local/openresty/nginx/logs" 13 | - "./html:/usr/local/openresty/nginx/html" 14 | - "./conf/nginx.conf:/usr/local/openresty/nginx/conf/nginx.conf" 15 | - "./conf/conf.d:/usr/local/openresty/nginx/conf/conf.d" 16 | - /etc/localtime:/etc/localtime:ro 17 | restart: always -------------------------------------------------------------------------------- /drone/docker-compose-agent.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | drone-agent: 4 | container_name: drone-agent 5 | image: drone/agent:1.2.3 6 | volumes: 7 | - /var/run/docker.sock:/var/run/docker.sock 8 | - /etc/localtime:/etc/localtime 9 | environment: 10 | - DRONE_RPC_HOST=192.168.77.132:80 11 | - DRONE_RPC_PROTO=http 12 | - DRONE_RPC_SECRET=757e584744898089331ec13bbe2b19f4 13 | - DRONE_RUNNER_NAME=drone-agent001 14 | - DRONE_RUNNER_CAPACITY=2 15 | - DRONE_LOGS_DEBUG=true 16 | - DRONE_RPC_DEBUG=true 17 | restart: always -------------------------------------------------------------------------------- /verdaccio/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.1' 2 | 3 | services: 4 | verdaccio: 5 | image: verdaccio/verdaccio:5.10.0 6 | container_name: "verdaccio" 7 | environment: 8 | - TZ=Asia/Shanghai 9 | - VERDACCIO_PORT=4873 10 | ports: 11 | - "4873:4873" 12 | volumes: 13 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 14 | - "./storage:/verdaccio/storage" 15 | - "./conf:/verdaccio/conf" 16 | - "./plugins:/verdaccio/plugins" 17 | restart: always 18 | 19 | 20 | # chown 10001 -R plugins storage conf 21 | -------------------------------------------------------------------------------- /yapi/docker-entrypoint-initdb.d/mongo-init.js: -------------------------------------------------------------------------------- 1 | print('Init Start #################################################################'); 2 | 3 | db.auth("root", "root123456"); 4 | 5 | db = db.getSiblingDB('yapi'); 6 | 7 | db.createUser({ 8 | user: 'yapi', 9 | pwd: 'yapi123456', 10 | roles: [ 11 | { 12 | role: "dbAdmin", 13 | db: "yapi" 14 | }, 15 | { 16 | role: "readWrite", 17 | db: "yapi" 18 | } 19 | ] 20 | }); 21 | 22 | print('Init END ###################################################################'); -------------------------------------------------------------------------------- /clickhouse/cluster_1S_2R_ch_proxy/ch-proxy/config/config.yml: -------------------------------------------------------------------------------- 1 | server: 2 | http: 3 | listen_addr: ':80' 4 | allowed_networks: ['127.0.0.0/24', '192.168.5.0/24', "10.0.0.0/8"] 5 | users: 6 | - name: 'default' 7 | to_cluster: 'cluster_1S_2R' 8 | to_user: 'default' 9 | max_concurrent_queries: 100 10 | max_execution_time: 30s 11 | requests_per_minute: 10 12 | # Allow `CORS` requests for `tabix`. 13 | allow_cors: true 14 | clusters: 15 | - name: 'cluster_1S_2R' 16 | nodes: ['clickhouse-01:8123', 'clickhouse-02:8123'] 17 | users: 18 | - name: 'default' 19 | -------------------------------------------------------------------------------- /nomad/nomad/jobs/fabio.nomad: -------------------------------------------------------------------------------- 1 | job "fabio" { 2 | datacenters = ["dc1"] 3 | type = "system" 4 | 5 | group "fabio" { 6 | task "fabio" { 7 | driver = "docker" 8 | config { 9 | image = "fabiolb/fabio" 10 | network_mode = "host" 11 | } 12 | resources { 13 | cpu = 200 14 | memory = 128 15 | network { 16 | mbits = 20 17 | port "lb" { 18 | static = 9999 19 | } 20 | port "ui" { 21 | static = 9998 22 | } 23 | } 24 | } 25 | } 26 | } 27 | } 28 | 29 | -------------------------------------------------------------------------------- /grafana/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | grafana: 5 | image: grafana/grafana:8.3.1 6 | restart: always 7 | user: root 8 | privileged: true 9 | volumes: 10 | - ./grafana_data:/var/lib/grafana 11 | environment: 12 | - TZ=Asia/Shanghai 13 | - LANG=zh_CN.UTF-8 14 | - GF_EXPLORE_ENABLED=true 15 | - GF_SECURITY_ADMIN_USER=admin 16 | - GF_SECURITY_ADMIN_PASSWORD=123456 17 | - GF_USERS_ALLOW_SIGN_UP=false 18 | - GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource 19 | ports: 20 | - "3000:3000" 21 | -------------------------------------------------------------------------------- /healthchecks/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | grafana: 5 | image: grafana/grafana:8.3.1 6 | restart: always 7 | user: root 8 | privileged: true 9 | volumes: 10 | - ./grafana_data:/var/lib/grafana 11 | environment: 12 | - TZ=Asia/Shanghai 13 | - LANG=zh_CN.UTF-8 14 | - GF_EXPLORE_ENABLED=true 15 | - GF_SECURITY_ADMIN_USER=admin 16 | - GF_SECURITY_ADMIN_PASSWORD=123456 17 | - GF_USERS_ALLOW_SIGN_UP=false 18 | - GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource 19 | ports: 20 | - "3000:3000" 21 | -------------------------------------------------------------------------------- /nomad/nomad/config/client/client.hcl: -------------------------------------------------------------------------------- 1 | bind_addr = "{{GetInterfaceIP \"ens33\"}}" 2 | 3 | # Increase log verbosity 4 | log_level = "DEBUG" 5 | 6 | # Setup data dir 7 | data_dir = "/nomad/data" 8 | 9 | # Enable the client 10 | client { 11 | enabled = true 12 | 13 | # For demo assume we are talking to server1. For production, 14 | # this should be like "nomad.service.consul:4647" and a system 15 | # like Consul used for service discovery. 16 | servers = ["nomad_s1:4647", "nomad_s2:4647", "nomad_s3:4647"] 17 | } 18 | 19 | ports { 20 | http = 5656 21 | } 22 | 23 | consul { 24 | address = "127.0.0.1:8500" 25 | } 26 | -------------------------------------------------------------------------------- /openresty/html/50x.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Error 5 | 12 | 13 | 14 |

An error occurred.

15 |

Sorry, the page you are looking for is currently unavailable.
16 | Please try again later.

17 |

If you are the system administrator of this resource then you should check 18 | the error log for details.

19 |

Faithfully yours, OpenResty.

20 | 21 | -------------------------------------------------------------------------------- /wordpress/.env_example: -------------------------------------------------------------------------------- 1 | # wordpress - wordpress:php7.3-fpm 2 | WORDPRESS_VERSION=php7.3-fpm 3 | WORDPRESS_DB_NAME=wordpress 4 | WORDPRESS_TABLE_PREFIX=wp_ 5 | WORDPRESS_DB_HOST=mysql 6 | WORDPRESS_DB_USER=root 7 | WORDPRESS_DB_PASSWORD=password 8 | 9 | # mariadb - mariadb:latest 10 | MARIADB_VERSION=latest 11 | MYSQL_ROOT_PASSWORD=password 12 | MYSQL_USER=root 13 | MYSQL_PASSWORD=password 14 | MYSQL_DATABASE=wordpress 15 | 16 | # nginx - nginx:latest 17 | NGINX_VERSION=latest 18 | 19 | # volumes on host 20 | NGINX_CONF_DIR=./nginx 21 | NGINX_LOG_DIR=./logs/nginx 22 | WORDPRESS_DATA_DIR=./wordpress 23 | SSL_CERTS_DIR=./certs 24 | SSL_CERTS_DATA_DIR=./certs-data 25 | -------------------------------------------------------------------------------- /focalboard/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "serverRoot": "http://localhost:8000", 3 | "port": 8000, 4 | "dbtype": "postgres", 5 | "dbconfig": "postgres://boardsuser:boardsuser-password@focalboard-db/boards?sslmode=disable&connect_timeout=10", 6 | "postgres_dbconfig": "dbname=boards sslmode=disable", 7 | "useSSL": false, 8 | "webpath": "./pack", 9 | "filespath": "./files", 10 | "telemetry": true, 11 | "prometheus_address": ":9092", 12 | "session_expire_time": 2592000, 13 | "session_refresh_time": 18000, 14 | "localOnly": false, 15 | "enableLocalMode": true, 16 | "localModeSocketLocation": "/var/tmp/focalboard_local.socket" 17 | } 18 | -------------------------------------------------------------------------------- /nomad/nomad/config/server/server.hcl: -------------------------------------------------------------------------------- 1 | bind_addr = "{{GetInterfaceIP \"eth0\"}}" 2 | 3 | # Increase log verbosity 4 | log_level = "INFO" 5 | 6 | # Setup data dir 7 | data_dir = "/nomad/data" 8 | 9 | # Enable the server 10 | server { 11 | enabled = true 12 | 13 | # Self-elect, should be 3 or 5 for production 14 | bootstrap_expect = 3 15 | 16 | server_join { 17 | retry_join = ["nomad_s1", "nomad_s2", "nomad_s3"] 18 | retry_max = 3 19 | retry_interval = "15s" 20 | } 21 | 22 | 23 | # Encrypt gossip communication 24 | encrypt = "cg8StVXbQJ0gPvMd9o7yrg==" 25 | } 26 | 27 | consul { 28 | address = "consul_c:8500" 29 | } 30 | -------------------------------------------------------------------------------- /prometheus/pushclient/process-top.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | [ ! -f /usr/bin/curl ] && apk add --no-cache curl 3 | function push() 4 | { 5 | z=$(ps aux) 6 | a=$z 7 | while read -r z 8 | do 9 | var=$var$(awk '{print "cpu_usage{process=\""$11"\", pid=\""$2"\"}", $3z}'); 10 | done <<< "$z" 11 | 12 | while read -r a 13 | do 14 | var2=$var2$(awk '{print "memory_usage{process=\""$11"\", pid=\""$2"\"}", $4z}'); 15 | done <<< "$a" 16 | 17 | curl -X POST -H "Content-Type: text/plain" --data "$var 18 | $var2 19 | " http://localhost:9091/metrics/job/top/instance/`hostname` 20 | [ $? -eq 0 ] && echo "`date` push ok" 21 | unset var var2 22 | } 23 | 24 | while sleep 2; do push; done; 25 | -------------------------------------------------------------------------------- /elasticsearch/docker-compose-single.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | 4 | volumes: 5 | es_data_single: 6 | driver: local 7 | 8 | services: 9 | es: 10 | image: docker.elastic.co/elasticsearch/elasticsearch:7.8.1 11 | container_name: es01 12 | environment: 13 | - node.name=es01 14 | - cluster.name=es-docker-cluster 15 | - discovery.type=single-node 16 | - bootstrap.memory_lock=true 17 | - "ES_JAVA_OPTS=-Xms512m -Xmx512m" 18 | ulimits: 19 | memlock: 20 | soft: -1 21 | hard: -1 22 | volumes: 23 | - es_data_single:/usr/share/elasticsearch/data 24 | ports: 25 | - 9200:9200 26 | - 9300:9300 27 | -------------------------------------------------------------------------------- /logio/logio_data/web_server.conf: -------------------------------------------------------------------------------- 1 | exports.config = { 2 | host: '0.0.0.0', 3 | port: 28778, 4 | 5 | // Enable HTTP Basic Authentication 6 | auth: { 7 | user: "admin", 8 | pass: "@logio" 9 | }, 10 | 11 | /* 12 | // Enable HTTPS/SSL 13 | ssl: { 14 | key: '/path/to/privatekey.pem', 15 | cert: '/path/to/certificate.pem' 16 | }, 17 | */ 18 | 19 | /* 20 | // Restrict access to websocket (socket.io) 21 | // Uses socket.io 'origins' syntax 22 | restrictSocket: '*:*', 23 | */ 24 | 25 | /* 26 | // Restrict access to http server (express) 27 | restrictHTTP: [ 28 | "192.168.29.39", 29 | "10.0.*" 30 | ] 31 | */ 32 | 33 | } 34 | -------------------------------------------------------------------------------- /uptime-kuma/README.md: -------------------------------------------------------------------------------- 1 | ### webhook 配置 2 | 3 | 自定义内容 4 | 5 | ```json 6 | { 7 | "event_title": "UptimeKuma 监控", 8 | "event_name": "{{ monitorJSON['name'] }} {% if heartbeatJSON['status'] == 1 %}✅ Up{% else %}🔴 Down{% endif %}", 9 | "event_type": "{% if heartbeatJSON['status'] == 1 %}恢复{% else %}告警{% endif %}", 10 | "event_level": "{% if heartbeatJSON['status'] == 1 %}通知{% else %}警告{% endif %}", 11 | "event_time": "{{ heartbeatJSON['time'] }}", 12 | "event_content": "{{ heartbeatJSON['msg'] }}\n > **监控url:** {{ monitorJSON['url'] }}" 13 | } 14 | ``` 15 | 16 | 额外header 17 | 18 | ```json 19 | { 20 | "Content-Type": "application/json" 21 | } 22 | ``` 23 | 24 | -------------------------------------------------------------------------------- /envoy/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | envoy: 3 | container_name: envoy 4 | image: envoyproxy/envoy:v1.32-latest 5 | restart: always 6 | volumes: 7 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 8 | - ./envoy.yaml:/etc/envoy/envoy.yaml 9 | networks: 10 | envoy-network: 11 | ports: 12 | - '8080:8080' 13 | - '9901:9901' 14 | 15 | whoami: 16 | container_name: whoami 17 | image: traefik/whoami:v1.10 18 | restart: always 19 | volumes: 20 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 21 | networks: 22 | envoy-network: 23 | depends_on: 24 | - envoy 25 | 26 | networks: 27 | envoy-network: -------------------------------------------------------------------------------- /prometheus/prometheus/etc/alerts/consul-rules.rules: -------------------------------------------------------------------------------- 1 | groups: 2 | 3 | - name: test 4 | rules: 5 | - alert: test01 6 | expr: up == 0 7 | for: 5m 8 | labels: 9 | severity: warning 10 | annotations: 11 | summary: "Exporter down (instance {{ $labels.instance }})" 12 | description: "Prometheus exporter down\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" 13 | - alert: test02 14 | expr: up == 0 15 | for: 5m 16 | labels: 17 | severity: warning 18 | annotations: 19 | summary: "Exporter down (instance {{ $labels.instance }})" 20 | description: "Prometheus exporter down\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" 21 | 22 | -------------------------------------------------------------------------------- /rabbitmq/1.init-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | exec="docker-compose -f docker-compose-cluster.yml exec" 5 | 6 | 7 | # rmq1,内存节点 8 | 9 | $exec rmq1 rabbitmqctl stop_app # 停止rabbitmq服务 10 | $exec rmq1 rabbitmqctl reset # 清空节点状态 11 | $exec rmq1 rabbitmqctl join_cluster --ram rabbit@rmq0 # rmq1和rmq0构成集群,rmq1必须能通过rmq0的主机名ping通 12 | $exec rmq1 rabbitmqctl start_app # 开启rabbitmq服务 13 | 14 | # rmq2,内存节点 15 | 16 | $exec rmq2 rabbitmqctl stop_app # 停止rabbitmq服务 17 | $exec rmq2 rabbitmqctl reset # 清空节点状态 18 | $exec rmq2 rabbitmqctl join_cluster --ram rabbit@rmq0 # rmq2和rmq0构成集群,rmq2必须能通过rmq0的主机名ping通 19 | $exec rmq2 rabbitmqctl start_app # 开启rabbitmq服务 20 | 21 | 22 | $exec rmq0 rabbitmqctl cluster_status 23 | -------------------------------------------------------------------------------- /rocket.chat/scripts/hello.coffee: -------------------------------------------------------------------------------- 1 | module.exports = (robot) -> 2 | robot.hear /hello/i, (res) -> 3 | res.send "你好" 4 | 5 | robot.respond /你好/i, (res) -> 6 | res.reply "hello" 7 | 8 | robot.hear /I like pie/i, (res) -> 9 | res.emote "makes a freshly baked pie" 10 | 11 | robot.respond /exec\s(.*)/i, (msg) -> 12 | data = '```bash \n' 13 | exec = msg.match[1].trim() 14 | spawn = require('child_process').spawn 15 | proc = spawn 'bash', ['-c', exec] 16 | proc.stdout.on 'data', (chunk) -> 17 | data += chunk.toString() 18 | proc.stderr.on 'data', (chunk) -> 19 | msg.reply chunk.toString() 20 | proc.stdout.on 'end', () -> 21 | msg.reply data.toString() + '```' 22 | -------------------------------------------------------------------------------- /nats/docker-compose-cluster.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | networks: 4 | nats: 5 | name: nats 6 | 7 | services: 8 | nats: 9 | image: nats:2.2.6-alpine3.13 10 | ports: 11 | - "8222:8222" 12 | restart: unless-stopped 13 | networks: ["nats"] 14 | nats-1: 15 | image: nats:2.2.6-alpine3.13 16 | command: "--cluster nats://0.0.0.0:6222 --routes=nats://ruser:T0pS3cr3t@nats:6222" 17 | networks: ["nats"] 18 | restart: unless-stopped 19 | depends_on: ["nats"] 20 | nats-2: 21 | image: nats:2.2.6-alpine3.13 22 | command: "--cluster nats://0.0.0.0:6222 --routes=nats://ruser:T0pS3cr3t@nats:6222" 23 | networks: ["nats"] 24 | restart: unless-stopped 25 | depends_on: ["nats"] -------------------------------------------------------------------------------- /nexus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | nexus: 5 | container_name: nexus 6 | image: sonatype/nexus3:3.38.1 7 | environment: 8 | - TZ=Asia/Shanghai 9 | volumes: 10 | - nexus-data:/nexus-data 11 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 12 | ports: 13 | - "8081:8081" 14 | restart: always 15 | 16 | nginx: 17 | container_name: nexus-nginx 18 | image: nginx:1.17.0-alpine 19 | environment: 20 | - TZ=Asia/Shanghai 21 | volumes: 22 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 23 | - ./nginx.conf:/etc/nginx/nginx.conf 24 | ports: 25 | - "8082:80" 26 | restart: always 27 | 28 | volumes: 29 | nexus-data: {} 30 | -------------------------------------------------------------------------------- /elasticsearch/docker-compose-xpack-certs.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | create_certs: 5 | container_name: create_certs 6 | image: docker.elastic.co/elasticsearch/elasticsearch:7.8.1 7 | command: > 8 | bash -c ' 9 | if [[ ! -f /certs/bundle.zip ]]; then 10 | bin/elasticsearch-certutil cert --silent --pem --in config/certificates/instances.yml -out /certs/bundle.zip; 11 | unzip /certs/bundle.zip -d /certs; 12 | fi; 13 | chown -R 1000:0 /certs 14 | ' 15 | user: "0" 16 | working_dir: /usr/share/elasticsearch 17 | volumes: 18 | - 'certs:/certs' 19 | - './xpack:/usr/share/elasticsearch/config/certificates' 20 | 21 | volumes: {"certs"} 22 | -------------------------------------------------------------------------------- /elasticsearch/stack/packetbeat.docker.yml: -------------------------------------------------------------------------------- 1 | setup.template.settings: 2 | index: 3 | number_of_shards: 1 4 | number_of_replicas: 0 5 | codec: best_compression 6 | 7 | packetbeat.interfaces.device: any 8 | 9 | packetbeat.flows: 10 | timeout: 30s 11 | period: 10s 12 | 13 | packetbeat.protocols.http: 14 | ports: [5066] 15 | 16 | processors: 17 | - add_docker_metadata: ~ 18 | 19 | output.elasticsearch: 20 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 21 | username: '${ELASTICSEARCH_USERNAME:}' 22 | password: '${ELASTICSEARCH_PASSWORD:}' 23 | 24 | setup.dashboards.enabled: true 25 | setup.kibana.host: '${KIBANA_HOST:kibana:5601}' 26 | 27 | http.enabled: true 28 | http.host: 0.0.0.0 29 | http.port: 5066 30 | -------------------------------------------------------------------------------- /ewomail/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | ewomail: 4 | container_name: ewomail 5 | image: bestwu/ewomail:latest 6 | hostname: mail.test.com 7 | volumes: 8 | - ./mysql:/ewomail/mysql/data 9 | - ./vmail:/ewomail/mail 10 | - ./rainloop:/ewomail/www/rainloop/data 11 | - ./ssl/certs/:/etc/ssl/certs/ 12 | - ./ssl/private/:/etc/ssl/private/ 13 | - ./ssl/dkim/:/ewomail/dkim/ 14 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 15 | ports: 16 | - "25:25" 17 | - "143:143" 18 | - "587:587" 19 | - "993:993" 20 | - "109:109" 21 | - "110:110" 22 | - "465:465" 23 | - "995:995" 24 | - "127.0.0.1:8002:80" 25 | - "8001:8080" 26 | restart: always -------------------------------------------------------------------------------- /nacos/standalone-mysql-8.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | nacos: 5 | image: nacos/nacos-server:${NACOS_VERSION} 6 | container_name: nacos-standalone-mysql 7 | env_file: 8 | - ./env/nacos-standlone-mysql.env 9 | volumes: 10 | - ./standalone-logs/:/home/nacos/logs 11 | - ./init.d/custom.properties:/home/nacos/init.d/custom.properties 12 | ports: 13 | - "8848:8848" 14 | - "9555:9555" 15 | depends_on: 16 | - mysql 17 | restart: always 18 | 19 | mysql: 20 | container_name: mysql 21 | image: nacos/nacos-mysql:8.0.16 22 | env_file: 23 | - ./env/mysql.env 24 | volumes: 25 | - ./mysql:/var/lib/mysql 26 | ports: 27 | - "3306:3306" 28 | -------------------------------------------------------------------------------- /netbox/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/netboxcommunity/netbox:v4.1.6 2 | 3 | RUN /opt/netbox/venv/bin/pip config set global.index-url https://mirrors.aliyun.com/pypi/simple \ 4 | && /opt/netbox/venv/bin/pip install dulwich \ 5 | && /opt/netbox/venv/bin/pip install netbox-qrcode \ 6 | && /opt/netbox/venv/bin/pip install netbox-acls \ 7 | && /opt/netbox/venv/bin/pip install netbox-plugin-dns \ 8 | && /opt/netbox/venv/bin/pip install netbox-ipcalculator \ 9 | && echo 'PLUGINS = ["netbox_qrcode","netbox_acls","netbox_dns", "netbox_ipcalculator"]' >> /etc/netbox/config/plugins.py \ 10 | && SECRET_KEY="dummydummydummydummydummydummydummydummydummydummy" /opt/netbox/venv/bin/python3 /opt/netbox/netbox/manage.py collectstatic --no-input -------------------------------------------------------------------------------- /elasticsearch/docker-compose-auth.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | 4 | volumes: 5 | es_data_single: 6 | driver: local 7 | 8 | services: 9 | es: 10 | image: docker.elastic.co/elasticsearch/elasticsearch:7.8.1 11 | container_name: es01 12 | environment: 13 | - node.name=es01 14 | - cluster.name=es-docker-cluster 15 | - discovery.type=single-node 16 | - bootstrap.memory_lock=true 17 | - "ES_JAVA_OPTS=-Xms512m -Xmx512m" 18 | - ELASTIC_PASSWORD=123456 19 | - xpack.security.enabled=true 20 | ulimits: 21 | memlock: 22 | soft: -1 23 | hard: -1 24 | volumes: 25 | - es_data_single:/usr/share/elasticsearch/data 26 | ports: 27 | - 9200:9200 28 | - 9300:9300 29 | -------------------------------------------------------------------------------- /logio/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | logio_server: 4 | container_name: logio_server 5 | hostname: logio_server 6 | image: lework/logio:0.3.4 7 | network_mode: docker 8 | volumes: 9 | - "./logio_data:/root/.log.io" 10 | - "./logio_log:/var/log" 11 | ports: 12 | - "28778:28778" 13 | restart: always 14 | 15 | logio_harvester: 16 | container_name: logio_harvester 17 | hostname: logio_harvester 18 | image: lework/logio:0.3.4 19 | network_mode: docker 20 | volumes: 21 | - "./logio_data:/root/.log.io" 22 | - "./logio_log:/var/log" 23 | depends_on: 24 | - logio_server 25 | environment: 26 | - LOGIO_SERVICE=harvester 27 | restart: always 28 | -------------------------------------------------------------------------------- /loki/s3-config/loki-s3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | server: 3 | http_listen_port: 3100 4 | memberlist: 5 | join_members: 6 | - loki:7946 7 | schema_config: 8 | configs: 9 | - from: 2021-08-01 10 | store: boltdb-shipper 11 | object_store: s3 12 | schema: v11 13 | index: 14 | prefix: index_ 15 | period: 24h 16 | common: 17 | path_prefix: /loki 18 | replication_factor: 1 19 | storage: 20 | s3: 21 | endpoint: loki-minio:9000 22 | insecure: true 23 | bucketnames: loki-data 24 | access_key_id: loki 25 | secret_access_key: supersecret 26 | s3forcepathstyle: true 27 | ring: 28 | kvstore: 29 | store: memberlist 30 | ruler: 31 | storage: 32 | s3: 33 | bucketnames: loki-ruler -------------------------------------------------------------------------------- /grafana/grafana-allstack/k6/scripts/grafana-loadtest.js: -------------------------------------------------------------------------------- 1 | import { group, check, sleep } from "k6"; 2 | import http from "k6/http"; 3 | 4 | // User think time in between page loads etc. (change this to 0 when debugging) 5 | const thinkTime = 30; 6 | 7 | export let options = { 8 | vus: 1, 9 | tags: { 10 | testid: 'loadtest', 11 | }, 12 | thresholds: { 13 | 'http_req_duration{kind:grafana}': ["avg<=10"], 14 | 'http_reqs': ["rate>100"], 15 | } 16 | }; 17 | 18 | export default function() { 19 | group("front page", function() { 20 | check(http.get("http://grafana:3000/", { 21 | tags: {'kind': 'grafana' }, 22 | }), { 23 | "status is 200": (res) => res.status === 200, 24 | }); 25 | }); 26 | sleep(thinkTime); 27 | } 28 | -------------------------------------------------------------------------------- /openresty/html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Welcome to OpenResty! 5 | 12 | 13 | 14 |

Welcome to OpenResty!

15 |

If you see this page, the OpenResty web platform is successfully installed and 16 | working. Further configuration is required.

17 | 18 |

For online documentation and support please refer to 19 | openresty.org.
20 | Commercial support is available at 21 | openresty.com.

22 | 23 |

Thank you for flying OpenResty.

24 | 25 | -------------------------------------------------------------------------------- /LogicalDOC/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.1" 2 | 3 | services: 4 | 5 | logicaldoc: 6 | depends_on: 7 | - mysql-ld 8 | command: ["./wait-for-it.sh", "mysql-ld:3306", "-t", "30", "--", "/opt/logicaldoc/start-logicaldoc.sh", "run"] 9 | image: logicaldoc/logicaldoc-ce 10 | ports: 11 | - 8080:8080 12 | environment: 13 | - LDOC_MEMORY=2000 14 | 15 | mysql-ld: 16 | image: mysql:8.0 17 | command: --default-authentication-pluve_password 18 | environment: 19 | - MYSQL_ROOT_PASSWORD=example 20 | - MYSQL_DATABASE=logicaldoc 21 | - MYSQL_USER=ldoc 22 | - MYSQL_PASSWORD=changeme 23 | 24 | 25 | 26 | # docker run --name openkm-ce -p 8080:8080 openkm/openkm-ce 27 | # https://github.com/logicaldoc/community -------------------------------------------------------------------------------- /clickhouse/single/config.d/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | debug 4 | /var/log/clickhouse-server/clickhouse-server.log 5 | /var/log/clickhouse-server/clickhouse-server.err.log 6 | 1000M 7 | 3 8 | 9 | ch 10 | 0.0.0.0 11 | 8123 12 | 9000 13 | 14 | 15 | users.xml 16 | 17 | 18 | /var/lib/clickhouse/access/ 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /elasticsearch/stack/journalbeat.docker.yml: -------------------------------------------------------------------------------- 1 | setup.template.settings: 2 | index: 3 | number_of_shards: 1 4 | number_of_replicas: 0 5 | codec: best_compression 6 | 7 | journalbeat.inputs: 8 | - paths: ["/var/log/journal"] 9 | seek: cursor 10 | 11 | processors: 12 | - add_docker_metadata: ~ 13 | 14 | output.elasticsearch: 15 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 16 | username: '${ELASTICSEARCH_USERNAME:}' 17 | password: '${ELASTICSEARCH_PASSWORD:}' 18 | 19 | #setup.dashboards.enabled: true 20 | setup.kibana.host: '${KIBANA_HOST:kibana:5601}' 21 | 22 | http.enabled: true 23 | http.host: 0.0.0.0 24 | http.port: 5066 25 | -------------------------------------------------------------------------------- /clickhouse/log_nginx/clickhouse/config.d/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | debug 4 | /var/log/clickhouse-server/clickhouse-server.log 5 | /var/log/clickhouse-server/clickhouse-server.err.log 6 | 1000M 7 | 3 8 | 9 | ch 10 | 0.0.0.0 11 | 8123 12 | 9000 13 | 14 | 15 | users.xml 16 | 17 | 18 | /var/lib/clickhouse/access/ 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /prometheus/alertmanager/etc/config.yml: -------------------------------------------------------------------------------- 1 | global: 2 | # The smarthost and SMTP sender used for mail notifications. 3 | smtp_smarthost: 'smtp.test.com:25' 4 | smtp_from: 'lework@test.com' 5 | smtp_auth_username: 'lework@test.com' 6 | smtp_auth_password: '123456' 7 | 8 | # The directory from which notification templates are read. 9 | templates: 10 | - '/etc/alertmanager/template/*.tmpl' 11 | 12 | # The root route on which each incoming alert enters. 13 | route: 14 | # its own grouping. Example: group_by: [...] 15 | group_by: ['alertname'] 16 | group_wait: 5s 17 | group_interval: 5s 18 | repeat_interval: 1h 19 | 20 | receiver: default-receiver 21 | 22 | 23 | receivers: 24 | - name: 'default-receiver' 25 | email_configs: 26 | - to: 'lework@test.com' 27 | send_resolved: true 28 | -------------------------------------------------------------------------------- /prometheus/thanos/alertmanager/config.yml: -------------------------------------------------------------------------------- 1 | global: 2 | # The smarthost and SMTP sender used for mail notifications. 3 | smtp_smarthost: 'smtp.test.com:25' 4 | smtp_from: 'lework@test.com' 5 | smtp_auth_username: 'lework@test.com' 6 | smtp_auth_password: '123456' 7 | 8 | # The directory from which notification templates are read. 9 | templates: 10 | - '/etc/alertmanager/template/*.tmpl' 11 | 12 | # The root route on which each incoming alert enters. 13 | route: 14 | # its own grouping. Example: group_by: [...] 15 | group_by: ['alertname'] 16 | group_wait: 5s 17 | group_interval: 5s 18 | repeat_interval: 1h 19 | 20 | receiver: default-receiver 21 | 22 | 23 | receivers: 24 | - name: 'default-receiver' 25 | email_configs: 26 | - to: 'lework@test.com' 27 | send_resolved: true 28 | -------------------------------------------------------------------------------- /nacos/init.d/custom.properties: -------------------------------------------------------------------------------- 1 | #spring.security.enabled=false 2 | #management.security=false 3 | #security.basic.enabled=false 4 | #nacos.security.ignore.urls=/** 5 | #management.metrics.export.elastic.host=http://localhost:9200 6 | # metrics for prometheus 7 | management.endpoints.web.exposure.include=* 8 | 9 | # metrics for elastic search 10 | #management.metrics.export.elastic.enabled=false 11 | #management.metrics.export.elastic.host=http://localhost:9200 12 | 13 | # metrics for influx 14 | #management.metrics.export.influx.enabled=false 15 | #management.metrics.export.influx.db=springboot 16 | #management.metrics.export.influx.uri=http://localhost:8086 17 | #management.metrics.export.influx.auto-create-db=true 18 | #management.metrics.export.influx.consistency=one 19 | #management.metrics.export.influx.compressed=true -------------------------------------------------------------------------------- /mysql/scripts/backup_mysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/env bash 2 | ### backup mysql db 3 | ### crontab 4 | ### 1 0 * * * /bin/bash /data/scripts/backup_mysql.sh 5 | 6 | HOME="/home/data/backup" 7 | MYSQL_HOST="127.0.0.1" 8 | MYSQL_USER="root" 9 | MYSQL_PASS="123456" 10 | MYSQL_PORT="3306" 11 | KEEP_NUM="10" 12 | 13 | datetime_str=$(date +'%Y%m%d%H%M') 14 | 15 | # INIT 16 | [ ! -d ${HOME} ] && mkdir -p ${HOME:-/tmp} 17 | 18 | # DUMP 19 | echo "[backup] start." 20 | mysqldump -h${MYSQL_HOST} -P${MYSQL_PORT} -u${MYSQL_USER} -p${MYSQL_PASS} -A -B --triggers --events --single-transaction --max_allowed_packet=512M --default-character-set=utf8 | gzip > ${HOME}/pro-mysql_${datetime_str}.gz 21 | 22 | # CLEAN 23 | echo "[backup] clean." 24 | ls -t ${HOME:-/tmp}/* | tail -n +$((${KEEP_NUM}+1)) | xargs /bin/rm -rfv 25 | 26 | echo "[backup] done." 27 | -------------------------------------------------------------------------------- /openldap/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | openldap: 4 | domainname: "example.org" # important: same as hostname 5 | hostname: "example.org" 6 | image: osixia/openldap:1.2.4 7 | container_name: openldap 8 | environment: 9 | LDAP_ORGANISATION: "Example Inc." 10 | LDAP_DOMAIN: "example.org" 11 | LDAP_ADMIN_PASSWORD: "admin" 12 | LDAP_CONFIG_PASSWORD: "config" 13 | volumes: 14 | - ./ldap:/var/lib/ldap 15 | - ./slapd.d:/etc/ldap/slapd.d 16 | ports: 17 | - "389:389" 18 | phpldapadmin: 19 | image: osixia/phpldapadmin:latest 20 | container_name: phpldapadmin 21 | environment: 22 | PHPLDAPADMIN_LDAP_HOSTS: "openldap" 23 | PHPLDAPADMIN_HTTPS: "false" 24 | ports: 25 | - "8080:80" 26 | depends_on: 27 | - openldap -------------------------------------------------------------------------------- /clickhouse/single/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | services: 3 | clickhouse: 4 | container_name: clickhouse 5 | image: clickhouse/clickhouse-server:24.4.3.25-alpine 6 | volumes: 7 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 8 | - ./config.d/config.xml:/etc/clickhouse-server/config.d/config.xml 9 | - ./users.d/users.xml:/etc/clickhouse-server/users.d/users.xml 10 | - ./data:/var/lib/clickhouse 11 | - ./logs:/var/log/clickhouse-server 12 | ports: 13 | - "8123:8123" 14 | - "9000:9000" 15 | ulimits: 16 | nproc: 65535 17 | nofile: 18 | soft: 262144 19 | hard: 262144 20 | healthcheck: 21 | test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"] 22 | interval: 30s 23 | timeout: 5s 24 | retries: 3 25 | -------------------------------------------------------------------------------- /grafana/grafana-allstack/alloy/endpoints.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "url": "http://mimir:9009/api/v1/push", 4 | "basicAuth": { 5 | "username": "", 6 | "password": "" 7 | } 8 | }, 9 | "logs": { 10 | "url": "http://loki:3100/loki/api/v1/push", 11 | "basicAuth": { 12 | "username": "", 13 | "password": "" 14 | } 15 | }, 16 | "traces": { 17 | "url": "http://tempo:4317", 18 | "basicAuthToken": "", 19 | "tls": { 20 | "insecure": true, 21 | "insecureSkipVerify": true 22 | } 23 | }, 24 | "profiles": { 25 | "url": "http://pyroscope:4040", 26 | "basicAuth": { 27 | "username": "", 28 | "password": "" 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /minio/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | minio: 5 | container_name: minio 6 | image: minio/minio:RELEASE.2022-02-07T08-17-33Z 7 | ports: 8 | - 9000:9000 # api 端口 9 | - 9001:9001 # 控制台端口 10 | environment: 11 | MINIO_ROOT_USER: admin #管理后台用户名 12 | MINIO_ROOT_PASSWORD: admin123 #管理后台密码,最小8个字符 13 | volumes: 14 | - ./minio_data:/data #映射当前目录下的data目录至容器内/data目录 15 | - ./minio_config:/root/.minio/ #映射配置目录 16 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 17 | command: server --console-address ':9001' /data #指定容器中的目录 /data 18 | privileged: true 19 | restart: always 20 | healthcheck: 21 | test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] 22 | interval: 30s 23 | timeout: 20s 24 | retries: 3 -------------------------------------------------------------------------------- /mysql/docker-compose-redis.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | mysql: 5 | container_name: mysql 6 | image: mysql:5.7 7 | volumes: 8 | - "./mysql_data:/var/lib/mysql" 9 | - "/etc/localtime:/etc/localtime" 10 | network_mode: "host" 11 | restart: always 12 | environment: 13 | MYSQL_ROOT_PASSWORD: cPfQon8xv 14 | 15 | redis: 16 | container_name: redis 17 | image: redis:5.0.7 18 | volumes: 19 | - "./redis_data:/data" 20 | - "/etc/localtime:/etc/localtime" 21 | network_mode: "host" 22 | restart: always 23 | command: [ 24 | '--port 6379', 25 | '--requirepass 65a3l2E', 26 | '--maxclients 1000', 27 | '--maxmemory 1gb', 28 | '--maxmemory-policy volatile-ttl', 29 | '--appendonly yes', 30 | '--aof-use-rdb-preamble yes' 31 | ] -------------------------------------------------------------------------------- /nacos/standalone-derby.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | nacos: 5 | image: nacos/nacos-server:${NACOS_VERSION} 6 | container_name: nacos-standalone 7 | environment: 8 | - PREFER_HOST_MODE=hostname 9 | - MODE=standalone 10 | volumes: 11 | - ./standalone-logs/:/home/nacos/logs 12 | - ./init.d/custom.properties:/home/nacos/init.d/custom.properties 13 | ports: 14 | - "8848:8848" 15 | 16 | prometheus: 17 | container_name: prometheus 18 | image: prom/prometheus 19 | volumes: 20 | - ./prometheus/prometheus-standalone.yaml:/etc/prometheus/prometheus.yml 21 | ports: 22 | - "9090:9090" 23 | depends_on: 24 | - nacos 25 | restart: on-failure 26 | 27 | grafana: 28 | container_name: grafana 29 | image: grafana/grafana 30 | ports: 31 | - 3000:3000 32 | restart: on-failure 33 | -------------------------------------------------------------------------------- /guacamole/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # check if docker is running 4 | if ! (docker ps >/dev/null 2>&1) 5 | then 6 | echo "docker daemon not running, will exit here!" 7 | exit 8 | fi 9 | echo "Preparing folder init and creating ./init/initdb.sql" 10 | mkdir ./init >/dev/null 2>&1 11 | mkdir -p ./nginx/ssl >/dev/null 2>&1 12 | chmod -R +x ./init 13 | docker run --rm guacamole/guacamole /opt/guacamole/bin/initdb.sh --postgresql > ./init/initdb.sql 14 | echo "done" 15 | echo "Creating SSL certificates" 16 | openssl req -nodes -newkey rsa:2048 -new -x509 -keyout nginx/ssl/self-ssl.key -out nginx/ssl/self.cert -subj '/C=DE/ST=BY/L=Hintertupfing/O=Dorfwirt/OU=Theke/CN=www.createyourown.domain/emailAddress=docker@createyourown.domain' 17 | echo "You can use your own certificates by placing the private key in nginx/ssl/self-ssl.key and the cert in nginx/ssl/self.cert" 18 | echo "done" 19 | -------------------------------------------------------------------------------- /nginx-proxy-manager/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | nginx-proxy-manager: 4 | container_name: nginx-proxy-manager 5 | image: 'jc21/nginx-proxy-manager:2.12.1' 6 | restart: always 7 | environment: 8 | DISABLE_IPV6: 'true' 9 | ports: 10 | - '80:80' 11 | - '81:81' 12 | - '443:443' 13 | # network_mode: host 14 | entrypoint: bash 15 | command: 16 | - "-c" 17 | - "pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && exec /init" 18 | volumes: 19 | - ./data:/data 20 | - ./letsencrypt:/etc/letsencrypt 21 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 22 | healthcheck: 23 | test: ["CMD", "/bin/check-health"] 24 | interval: 10s 25 | timeout: 3s 26 | 27 | 28 | 29 | # Email: admin@example.com 30 | # Password: changeme -------------------------------------------------------------------------------- /grafana/grafana-allstack/mimir/rules/anonymous/alerts_grafana.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: GrafanaAlerts 3 | rules: 4 | - alert: GrafanaRequestsFailing 5 | annotations: 6 | message: '{{ $labels.namespace }}/{{ $labels.job }}/{{ $labels.handler }} is experiencing {{ $value | humanize }}% errors' 7 | expr: | 8 | 100 * sum without (status_code) (namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query", status_code=~"5.."}) 9 | / 10 | sum without (status_code) (namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query"}) 11 | > 50 12 | for: 5m 13 | labels: 14 | severity: warning 15 | -------------------------------------------------------------------------------- /watchtower/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | app-with-scope: 4 | image: myapps/monitored-by-watchtower 5 | labels: [ "com.centurylinklabs.watchtower.scope=myscope" ] 6 | scoped-watchtower: 7 | image: containrrr/watchtower:latest 8 | container_name: scoped-watchtower 9 | command: --interval 30 --scope myscope 10 | labels: [ "com.centurylinklabs.watchtower.scope=myscope" ] 11 | volumes: 12 | - /var/run/docker.sock:/var/run/docker.sock 13 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 14 | watchtower: 15 | image: containrrr/watchtower:latest 16 | container_name: watchtower 17 | command: prometheus grafana 18 | volumes: 19 | - /var/run/docker.sock:/var/run/docker.sock 20 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 21 | environment: 22 | WATCHTOWER_SCHEDULE: "0 0 4 * * *" 23 | TZ: Asia/Shanghai -------------------------------------------------------------------------------- /focalboard/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | app: 5 | image: mattermost/focalboard:0.8.0 6 | container_name: focalboard 7 | depends_on: 8 | - focalboard-db 9 | ports: 10 | - 8000:8000 11 | environment: 12 | - VIRTUAL_HOST=localhost 13 | - VIRTUAL_PORT=8000 14 | - VIRTUAL_PROTO=http 15 | volumes: 16 | - "./config.json:/opt/focalboard/config.json" 17 | restart: always 18 | networks: 19 | - focalboard 20 | 21 | focalboard-db: 22 | image: postgres:12.7 23 | restart: always 24 | container_name: focalboard-postgres 25 | restart: always 26 | volumes: 27 | - "./data:/var/lib/postgresql/data" 28 | environment: 29 | POSTGRES_DB: boards 30 | POSTGRES_USER: boardsuser 31 | POSTGRES_PASSWORD: boardsuser-password 32 | networks: 33 | - focalboard 34 | 35 | networks: 36 | focalboard: 37 | -------------------------------------------------------------------------------- /adguard-home/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | adguardhome: 5 | container_name: adguardhome 6 | image: adguard/adguardhome:v0.107.0-b.13 7 | volumes: 8 | - "/usr/share/zoneinfo/Asia/Shanghai:/etc/localtime" 9 | ports: 10 | # plain DNS. 11 | - 53:53/tcp 12 | - 53:53/udp 13 | # DHCP server. network_mode: host 14 | - 67:67/udp 15 | - 68:68/tcp 16 | - 68:68/udp 17 | # HTTPS/DNS-over-HTTPS. 18 | - 80:80/tcp 19 | - 443:443/tcp 20 | # AdGuard Home. 21 | - 3000:3000/tcp 22 | # DNS-over-TLS. 23 | - 853:853/tcp 24 | # DNS-over-QUIC. 25 | - 784:784/udp 26 | - 853:853/udp 27 | - 8853:8853/udp 28 | # DNSCrypt. 29 | - 5443:5443/tcp 30 | - 5443:5443/udp 31 | volumes: 32 | - ./adguardhome_data/workdir:/opt/adguardhome/work 33 | - ./adguardhome_data/confdir:/opt/adguardhome/conf 34 | restart: unless-stopped -------------------------------------------------------------------------------- /pihole/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | # More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/ 4 | services: 5 | pihole: 6 | container_name: pihole 7 | image: pihole/pihole:latest 8 | ports: 9 | - "53:53/tcp" 10 | - "53:53/udp" 11 | - "67:67/udp" 12 | - "80:80/tcp" 13 | environment: 14 | TZ: 'Aisa/Shanghai' 15 | WEBPASSWORD: '123456' 16 | DNSMASQ_LISTENING: 'all' 17 | WEB_PORT: '80' 18 | ServerIP: '192.168.77.130' 19 | # Volumes store your data between container upgrades 20 | volumes: 21 | - './etc-pihole/:/etc/pihole/' 22 | - './etc-dnsmasq.d/:/etc/dnsmasq.d/' 23 | dns: 24 | - 127.0.0.1 25 | - 223.5.5.5 26 | # Recommended but not required (DHCP needs NET_ADMIN) 27 | # https://github.com/pi-hole/docker-pi-hole#note-on-capabilities 28 | cap_add: 29 | - NET_ADMIN 30 | restart: unless-stopped 31 | -------------------------------------------------------------------------------- /jenkins/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | jenkins: 4 | image: jenkins/jenkins:2.440.3-lts 5 | container_name: jenkins 6 | user: root 7 | ports: 8 | - 8080:8080 9 | - 50000:50000 10 | volumes: 11 | - ./jenkins_data:/var/jenkins_home 12 | - /var/run/docker.sock:/var/run/docker.sock 13 | - /usr/bin/docker:/usr/bin/docker 14 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 15 | environment: 16 | - TZ=Asia/Shanghai 17 | - LANG=en_US.UTF-8 18 | - "JAVA_OPTS=-Dsun.jnu.encoding=UTF-8 -Dfile.encoding=UTF-8 -Dhudson.model.DownloadService.noSignatureCheck=true -Dhudson.model.UpdateCenter.updateCenterUrl=https://cdn.jsdelivr.net/gh/lework/jenkins-update-center/updates/tencent/" 19 | - JENKINS_UC=https://cdn.jsdelivr.net/gh/lework/jenkins-update-center/updates/tencent 20 | - JENKINS_UC_DOWNLOAD=https://mirrors.tuna.tsinghua.edu.cn/jenkins/ 21 | restart: always 22 | -------------------------------------------------------------------------------- /zookeeper/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | zookeeper1: 5 | image: zookeeper 6 | restart: always 7 | hostname: zoo1 8 | ports: 9 | - 2181:2181 10 | environment: 11 | ZOO_MY_ID: 1 12 | ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zookeeper2:2888:3888;2181 server.3=zookeeper3:2888:3888;2181 13 | 14 | zookeeper2: 15 | image: zookeeper 16 | restart: always 17 | hostname: zoo2 18 | ports: 19 | - 2182:2181 20 | environment: 21 | ZOO_MY_ID: 2 22 | ZOO_SERVERS: server.1=zookeeper1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zookeeper3:2888:3888;2181 23 | 24 | zookeeper3: 25 | image: zookeeper 26 | restart: always 27 | hostname: zoo3 28 | ports: 29 | - 2183:2181 30 | environment: 31 | ZOO_MY_ID: 3 32 | ZOO_SERVERS: server.1=zookeeper1:2888:3888;2181 server.2=zookeeper2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181 -------------------------------------------------------------------------------- /joplin/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | joplin_db: 5 | restart: unless-stopped 6 | image: postgres:13.1 7 | ports: 8 | - "5432:5432" 9 | volumes: 10 | - ./joplin-data:/var/lib/postgresql/data 11 | environment: 12 | - POSTGRES_PASSWORD=joplin 13 | - POSTGRES_USER=joplin 14 | - POSTGRES_DB=joplin 15 | joplin_server: 16 | environment: 17 | - APP_BASE_URL=https://joplin.test.com 18 | - APP_PORT=22300 19 | - POSTGRES_PASSWORD=joplin 20 | - POSTGRES_DATABASE=joplin 21 | - POSTGRES_USER=joplin 22 | - POSTGRES_PORT=5432 23 | - POSTGRES_HOST=db 24 | - DB_CLIENT=pg 25 | restart: unless-stopped 26 | image: florider89/joplin-server:latest 27 | ports: 28 | - "22300:22300" 29 | depends_on: 30 | - db -------------------------------------------------------------------------------- /sonarqube/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | sonarqube: 5 | container_name: sonarqube 6 | image: sonarqube:9.6.1-community 7 | volumes: 8 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 9 | - ./sonarqube_data:/opt/sonarqube/data 10 | - ./sonarqube_extensions:/opt/sonarqube/extensions 11 | - ./sonarqube_logs:/opt/sonarqube/logs 12 | depends_on: 13 | - db 14 | environment: 15 | SONAR_JDBC_URL: jdbc:postgresql://sonarqube-db:5432/sonar 16 | SONAR_JDBC_USERNAME: sonar 17 | SONAR_JDBC_PASSWORD: sonar 18 | restart: always 19 | sonarqube-db: 20 | image: postgres:12 21 | container_name: sonarqube-db 22 | environment: 23 | POSTGRES_USER: sonar 24 | POSTGRES_PASSWORD: sonar 25 | POSTGRES_DB: sonar 26 | volumes: 27 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 28 | - ./postgresql:/var/lib/postgresql 29 | - ./postgresql_data:/var/lib/postgresql/data -------------------------------------------------------------------------------- /mindoc/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | mindoc: 4 | container_name: mindoc 5 | image: registry.cn-hangzhou.aliyuncs.com/mindoc/mindoc:v2.0 6 | privileged: false 7 | restart: always 8 | ports: 9 | - 8181:8181 10 | volumes: 11 | - ./mindoc_database:/mindoc/database 12 | - ./mindoc_uploads:/mindoc/uploads 13 | - /etc/localtime:/etc/localtime 14 | environment: 15 | - MINDOC_RUN_MODE=prod 16 | - MINDOC_DB_ADAPTER=sqlite3 17 | - MINDOC_DB_DATABASE=./database/mindoc.db 18 | - MINDOC_CACHE=true 19 | - MINDOC_CACHE_PROVIDER=file 20 | - MINDOC_ENABLE_EXPORT=false 21 | - MINDOC_ENABLE_MAIL=true 22 | - MINDOC_SMTP_USER_NAME=mindoc@test.com 23 | - MINDOC_SMTP_HOST=smtp.exmail.qq.com 24 | - MINDOC_SMTP_PASSWORD=123456 25 | - MINDOC_SMTP_PORT=465 26 | - MINDOC_FORM_USERNAME=mindoc@test.com 27 | - MINDOC_MAIL_SECURE=SSL 28 | dns: 29 | - 223.5.5.5 30 | - 223.6.6.6 -------------------------------------------------------------------------------- /rabbitmq/single/conf/rabbitmq-definitions.json: -------------------------------------------------------------------------------- 1 | { 2 | "global_parameters": [ 3 | {"name": "cluster_name", "value": "rabbitmq-sigle"} 4 | ], 5 | "permissions": [ 6 | { 7 | "configure": ".*", 8 | "read": ".*", 9 | "user": "guest", 10 | "vhost": "/", 11 | "write": ".*" 12 | }, 13 | { 14 | "configure": ".*", 15 | "read": ".*", 16 | "user": "admin", 17 | "vhost": "/", 18 | "write": ".*" 19 | } 20 | ], 21 | "users": [ 22 | { 23 | "hashing_algorithm": "rabbit_password_hashing_sha256", 24 | "name": "guest", 25 | "password_hash": "hENva+fxJ7gnmaBK/WhwNHOYbvB53/QjNcqhtF4KqF7p21+x", 26 | "tags": "administrator" 27 | }, 28 | { 29 | "hashing_algorithm": "rabbit_password_hashing_sha256", 30 | "name": "admin", 31 | "password_hash": "S4dp3Z60PxZM18Y9Ak5IVLn5TfRqDkSkuNQCkdWmRXXQ/ihU", 32 | "tags": "administrator" 33 | } 34 | ], 35 | "vhosts": [{"name": "/"}] 36 | } 37 | -------------------------------------------------------------------------------- /nomad/nomad/jobs/webservice.nomad: -------------------------------------------------------------------------------- 1 | job "webserver" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | 5 | group "webserver" { 6 | count = 2 7 | restart { 8 | attempts = 2 9 | interval = "30m" 10 | delay = "15s" 11 | mode = "fail" 12 | } 13 | ephemeral_disk { 14 | size = 300 15 | } 16 | 17 | task "apache" { 18 | driver = "docker" 19 | config { 20 | image = "httpd:latest" 21 | port_map { 22 | http = 80 23 | } 24 | } 25 | 26 | resources { 27 | network { 28 | mbits = 10 29 | port "http" {} 30 | } 31 | } 32 | 33 | service { 34 | name = "apache-webserver" 35 | tags = ["sigle", "urlprefix-/"] 36 | port = "http" 37 | check { 38 | name = "alive" 39 | type = "http" 40 | path = "/" 41 | interval = "10s" 42 | timeout = "2s" 43 | } 44 | } 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /grafana/grafana-allstack/loki/local-config.yaml: -------------------------------------------------------------------------------- 1 | auth_enabled: false 2 | 3 | server: 4 | http_listen_port: 3100 5 | log_level: info 6 | log_format: json 7 | 8 | common: 9 | instance_addr: 127.0.0.1 10 | path_prefix: /loki 11 | storage: 12 | filesystem: 13 | chunks_directory: /loki/chunks 14 | rules_directory: /loki/rules 15 | replication_factor: 1 16 | ring: 17 | kvstore: 18 | store: inmemory 19 | 20 | schema_config: 21 | configs: 22 | - from: 2020-10-24 23 | store: tsdb 24 | object_store: filesystem 25 | schema: v13 26 | index: 27 | prefix: index_ 28 | period: 24h 29 | 30 | ingester: 31 | max_chunk_age: 48h 32 | 33 | limits_config: 34 | max_line_size: 5MB 35 | max_label_names_per_series: 20 36 | query_timeout: 3m 37 | ingestion_rate_mb: 10 38 | retention_period: 5w 39 | reject_old_samples: true 40 | reject_old_samples_max_age: 5w 41 | 42 | ruler: 43 | alertmanager_url: http://localhost:9093 44 | 45 | analytics: 46 | reporting_enabled: false 47 | -------------------------------------------------------------------------------- /healthchecks/docker-compose-pgsql.yaml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | 3 | services: 4 | healthchecks: 5 | image: healthchecks/healthchecks 6 | container_name: hc_postgres 7 | restart: unless-stopped 8 | environment: 9 | DB: postgres 10 | DB_HOST: postgres 11 | DB_PORT: 5432 12 | DB_USER: healthchecks_user 13 | DB_NAME: healthchecks_db 14 | DB_PASSWORD: healthchecks_pass 15 | volumes: 16 | - /etc/localtime:/etc/localtime:ro 17 | - /etc/timezone:/etc/timezone:ro 18 | ports: 19 | - 9000:8000 20 | depends_on: 21 | - postgres 22 | 23 | postgres: 24 | image: postgres 25 | container_name: postgres 26 | restart: unless-stopped 27 | environment: 28 | POSTGRES_DB: healthchecks_db 29 | POSTGRES_USER: healthchecks_user 30 | POSTGRES_PASSWORD: healthchecks_pass 31 | volumes: 32 | - /etc/localtime:/etc/localtime:ro 33 | - /etc/timezone:/etc/timezone:ro 34 | - healthchecks_postgres:/var/lib/postgresql/data/ 35 | 36 | volumes: 37 | healthchecks_postgres: -------------------------------------------------------------------------------- /clickhouse/log_nginx/vector/vector.yaml: -------------------------------------------------------------------------------- 1 | api: 2 | enabled: true 3 | address: 0.0.0.0:8686 4 | 5 | 6 | sources: 7 | nginx_logs: 8 | type: file 9 | include: 10 | - /var/log/nginx/access.log 11 | # .timestamp = parse_timestamp(.timestamp, "%Y-%m-%d %H:%M:%S %z") ?? now() 12 | transforms: 13 | nginx_parser: 14 | type: remap 15 | inputs: 16 | - nginx_logs 17 | source: |- 18 | . = parse_json!(.message) 19 | .timestamp, err = replace(.timestamp, "+08:00", "") 20 | 21 | sinks: 22 | print: 23 | type: console 24 | inputs: 25 | - nginx_parser 26 | encoding: 27 | codec: json 28 | clickhouse: 29 | type: clickhouse 30 | inputs: 31 | - nginx_parser 32 | endpoint: http://clickhouse:8123 33 | database: logs 34 | table: nginx_access_log 35 | auth: 36 | strategy: basic 37 | user: default 38 | password: "123456" 39 | skip_unknown_fields: true 40 | healthcheck: true 41 | compression: gzip 42 | format: json_each_row 43 | encoding: 44 | timestamp_format: unix 45 | -------------------------------------------------------------------------------- /grafana/docker-compose-loki.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | networks: 4 | loki: 5 | 6 | services: 7 | loki: 8 | image: grafana/loki:2.2.1 9 | ports: 10 | - "3100:3100" 11 | command: -config.file=/etc/loki/local-config.yaml 12 | restart: always 13 | volumes: 14 | - loki_data/chunks:/loki/chunks/ 15 | environment: 16 | - TZ=Asia/Shanghai 17 | - LANG=zh_CN.UTF-8 18 | networks: 19 | - loki 20 | 21 | promtail: 22 | image: grafana/promtail:2.2.1 23 | volumes: 24 | - /var/log:/var/log 25 | command: -config.file=/etc/promtail/docker-config.yaml 26 | restart: always 27 | environment: 28 | - TZ=Asia/Shanghai 29 | - LANG=zh_CN.UTF-8 30 | networks: 31 | - loki 32 | 33 | grafana: 34 | image: grafana/grafana:8.3.1 35 | restart: always 36 | volumes: 37 | - grafana_data:/var/lib/grafana 38 | environment: 39 | - TZ=Asia/Shanghai 40 | - LANG=zh_CN.UTF-8 41 | - GF_EXPLORE_ENABLED=true 42 | ports: 43 | - "3000:3000" 44 | networks: 45 | - loki 46 | -------------------------------------------------------------------------------- /kafka-ui/docker-compose-odic.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | kafka-ui: 4 | image: provectuslabs/kafka-ui:v0.7.2 5 | container_name: kafka-ui 6 | restart: always 7 | environment: 8 | - AUTH_TYPE=OAUTH2 9 | - AUTH_OAUTH2_CLIENT_AUTH0_CLIENTID=xxxx 10 | - AUTH_OAUTH2_CLIENT_AUTH0_CLIENTSECRET=xxxx 11 | - AUTH_OAUTH2_CLIENT_AUTH0_SCOPE=openid,email,name,offline_access,profile 12 | - AUTH_OAUTH2_CLIENT_AUTH0_ISSUER_URI=https://kafka-ui.sso.xxxx.com/oidc 13 | - AUTH_OAUTH2_CLIENT_AUTH0_PROVIDER=authing 14 | - AUTH_OAUTH2_CLIENT_AUTH0_USERNAMEATTRIBUTE=email 15 | - SERVER_FORWARDHEADERSSTRATEGY=native 16 | 17 | - KAFKA_CLUSTERS_0_NAME=dev 18 | - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=127.0.0.1:9091,127.0.0.1:9092,127.0.0.1:9093 19 | - KAFKA_CLUSTERS_0_READONLY=false 20 | 21 | - KAFKA_CLUSTERS_1_NAME=test 22 | - KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS=127.0.0.1:9091,127.0.0.1:9092,127.0.0.1:9093 23 | - KAFKA_CLUSTERS_1_READONLY=true 24 | volumes: 25 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime -------------------------------------------------------------------------------- /rabbitmq/docker-compose-singe.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | networks: 4 | rabbitmq: 5 | 6 | services: 7 | rabbitmq: 8 | image: rabbitmq:3.8.2-management 9 | hostname: rabbitmq 10 | container_name: rabbitmq 11 | environment: 12 | RABBITMQ_ERLANG_COOKIE: rabbitmq-cookie 13 | #RABBITMQ_DEFAULT_USER: "admin" 14 | #RABBITMQ_DEFAULT_PASS: "admin" 15 | #RABBITMQ_DEFAULT_VHOST: "/" 16 | volumes: 17 | - ./single/conf/enabled_plugins:/etc/rabbitmq/enabled_plugins:ro 18 | - ./single/conf/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf:ro 19 | - ./single/conf/rabbitmq-definitions.json:/etc/rabbitmq/rabbitmq-definitions.json:ro 20 | - ./single/data:/var/lib/rabbitmq 21 | ports: 22 | - "4369:4369" 23 | - "5671:5671" 24 | - "5672:5672" 25 | - "15671:15671" 26 | - "15672:15672" 27 | - "25672:25672" 28 | - "15692:15692" 29 | networks: 30 | - rabbitmq 31 | cap_add: 32 | - ALL 33 | ulimits: 34 | nofile: 35 | soft: "2000" 36 | hard: "2000" 37 | restart: always 38 | -------------------------------------------------------------------------------- /coredns/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | services: 3 | coredns: 4 | container_name: coredns 5 | hostname: coredns 6 | image: coredns/coredns:1.2.6 7 | command: -conf /Corefile 8 | network_mode: docker 9 | volumes: 10 | - './coredns/conf/Corefile:/Corefile' 11 | ports: 12 | - "53:53/udp" 13 | - "53:53/tcp" 14 | - "18080:18080" 15 | - "19153:19153" 16 | depends_on: 17 | - coredns-etcd 18 | restart: always 19 | 20 | coredns-etcd: 21 | container_name: coredns-etcd 22 | hostname: coredns-etcd 23 | image: bitnami/etcd:3.3.10 24 | ports: 25 | - 2379:2379 26 | - 2380:2380 27 | network_mode: docker 28 | volumes: 29 | - "./etcd/data:/opt/bitnami/etcd/data" 30 | environment: 31 | - ALLOW_NONE_AUTHENTICATION=yes 32 | - ETCDCTL_API=3 33 | restart: always 34 | 35 | coredns-etcdui: 36 | container_name: coredns-etcdui 37 | image: deltaprojects/etcdkeeper:latest 38 | ports: 39 | - "18081:8080" 40 | depends_on: 41 | - coredns-etcd 42 | restart: always 43 | -------------------------------------------------------------------------------- /elasticsearch/stack/auditbeat.docker.yml: -------------------------------------------------------------------------------- 1 | 2 | setup.template.settings: 3 | index: 4 | number_of_shards: 1 5 | number_of_replicas: 0 6 | codec: best_compression 7 | 8 | auditbeat.modules: 9 | 10 | - module: auditd 11 | audit_rules: | 12 | -w /etc/passwd -p wa -k identity 13 | -a always,exit -F arch=b32 -S open,creat,truncate,ftruncate,openat,open_by_handle_at -F exit=-EPERM -k access 14 | - module: file_integrity 15 | paths: 16 | - /bin 17 | - /usr/bin 18 | - /sbin 19 | - /usr/sbin 20 | - /etc 21 | 22 | processors: 23 | - add_docker_metadata: ~ 24 | 25 | output.elasticsearch: 26 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 27 | username: '${ELASTICSEARCH_USERNAME:}' 28 | password: '${ELASTICSEARCH_PASSWORD:}' 29 | 30 | setup.dashboards.enabled: true 31 | setup.kibana.host: '${KIBANA_HOST:kibana:5601}' 32 | 33 | http.enabled: true 34 | http.host: 0.0.0.0 35 | http.port: 5066 36 | 37 | #logging.level: debug 38 | -------------------------------------------------------------------------------- /elasticsearch/stack/stack.env: -------------------------------------------------------------------------------- 1 | 2 | ELK_VERSION=7.8.1 3 | 4 | #----------- Resources --------------------------# 5 | ELASTICSEARCH_HEAP=512m 6 | LOGSTASH_HEAP=512m 7 | 8 | #----------- Hosts and Ports --------------------# 9 | # To be able to further "de-compose" the compose files, get hostnames from environment variables instead. 10 | 11 | ELASTICSEARCH_HOST=elasticsearch 12 | ELASTICSEARCH_PORT=9200 13 | 14 | KIBANA_HOST=kibana 15 | KIBANA_PORT=5601 16 | 17 | LOGSTASH_HOST=logstash 18 | LOGSTASH_PORT=8080 19 | 20 | #----------- Credientals ------------------------# 21 | # Username & Password for Admin Elasticsearch cluster. 22 | # This is used to set the password at setup, and used by others to connect to Elasticsearch at runtime. 23 | ELASTIC_USERNAME=elastic 24 | ELASTIC_PASSWORD=a123456 25 | 26 | #----------- Cluster ----------------------------# 27 | ELASTIC_CLUSTER_NAME=elastic-stack 28 | ELASTIC_INIT_MASTER_NODE=elasticsearch 29 | ELASTIC_NODE_NAME=elasticsearch 30 | 31 | # Hostnames of master eligble elasticsearch instances. (matches compose generated host name) 32 | ELASTIC_DISCOVERY_SEEDS=elasticsearch 33 | -------------------------------------------------------------------------------- /rocket.chat/bash/handler: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | SELF_PATH="$(dirname "$(readlink -f "$0")")"; cd $SELF_PATH; 3 | TMPFILE="/tmp/.handler.$(whoami)" 4 | #DEBUG=1 5 | 6 | _log(){ 7 | which logger &>/dev/null && logger -t hubot -p local3.notice "$*"; return 0 8 | } 9 | 10 | _usage(){ 11 | str="available commands:\n" 12 | str="$str""$(ls "$SELF_PATH/handlers" | awk '{print " "$1}' )" 13 | echo -e "$str" 14 | } 15 | 16 | is_function() { 17 | ls $SELF_PATH/handlers/$1* &>/dev/null && return 0 || return 1 18 | } 19 | 20 | check_error(){ 21 | [[ ! X"$(wc -c "$1" | cut -d" " -f1)" = X"0" ]] && { 22 | echo "please check the logs, the command was unsuccessful :/" 23 | cat "$1" | while read line; do _log "$line"; done 24 | } 25 | } 26 | 27 | # only process if not included by other script 28 | me="$(basename "$0")"; me="${me/\.\/}" 29 | if [[ "$me" == "handler" ]]; then 30 | _log "$*" 31 | handler1="handlers/$1.$2.$3" 32 | handler2="handlers/$1" 33 | [[ -f "$handler1" ]] && $handler1 "$2" "$3" "$4" 2>&1 ; 34 | [[ -f "$handler2" ]] && shift && exec $handler2 "$@" ; 35 | [[ ! -n $1 ]] && _usage 36 | fi 37 | exit 0 38 | -------------------------------------------------------------------------------- /drone/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | drone-server: 4 | container_name: drone-server 5 | image: drone/drone:1.2.3 6 | ports: 7 | - 80:80 8 | - 443 9 | volumes: 10 | - ./drone_data:/data 11 | - /var/run/docker.sock:/var/run/docker.sock 12 | - /etc/localtime:/etc/localtime 13 | environment: 14 | - DRONE_SERVER_HOST=192.168.77.134 15 | - DRONE_SERVER_PROTO=http 16 | - DRONE_TLS_AUTOCERT=false 17 | - DRONE_LOGS_DEBUG=true 18 | - DRONE_GIT_ALWAYS_AUTH=false 19 | - DRONE_GOGS_SERVER=http://gogs:3000 20 | - DRONE_RUNNER_CAPACITY=2 21 | - DRONE_USER_CREATE=username:root,admin:true 22 | restart: always 23 | 24 | gogs: 25 | container_name: gogs 26 | image: gogs/gogs:0.11.91 27 | volumes: 28 | - ./gogs_data:/data 29 | - /etc/localtime:/etc/localtime 30 | ports: 31 | - 3000:3000 32 | restart: always 33 | 34 | registry: 35 | image: registry:2.7.1 36 | container_name: registry 37 | ports: 38 | - 5000:5000 39 | volumes: 40 | - ./registry_data:/var/lib/registry 41 | - /etc/localtime:/etc/localtime 42 | restart: always 43 | -------------------------------------------------------------------------------- /wordpress/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Michael J. Stealey 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /consul/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | networks: 4 | cluster: 5 | 6 | services: 7 | 8 | consul_c1: &consul-agent 9 | image: consul:latest 10 | container_name: consul_c1 11 | volumes: 12 | - ./cluster/config:/consul/config:rw 13 | networks: 14 | - cluster 15 | command: "agent -config-file=/consul/config/consul_c1.json" 16 | 17 | consul_c2: 18 | <<: *consul-agent 19 | container_name: consul_c2 20 | command: "agent -config-file=/consul/config/consul_c2.json" 21 | 22 | consul_s1: 23 | <<: *consul-agent 24 | container_name: consul_s1 25 | command: "agent -config-file=/consul/config/consul_s1.json" 26 | 27 | consul_s2: 28 | <<: *consul-agent 29 | container_name: consul_s2 30 | command: "agent -config-file=/consul/config/consul_s2.json" 31 | 32 | consul_s3: 33 | <<: *consul-agent 34 | container_name: consul_s3 35 | command: "agent -config-file=/consul/config/consul_s3.json" 36 | volumes: 37 | - ./cluster/config:/consul/config:rw 38 | - ./cluster/data:/consul/data:rw 39 | ports: 40 | - "8400:8400" 41 | - "8500:8500" 42 | - "8600:8600" 43 | -------------------------------------------------------------------------------- /clickhouse/log_nginx/clickvisual/clickvisual/config/rbac.conf: -------------------------------------------------------------------------------- 1 | [request_definition] 2 | r = sub, obj, act, dom 3 | # sub: user__UID or role__ROLE_NAME; # obj: app__AID, etc. # act: 'edit' or 'view'. # dom: like 'ent__EntId' 4 | # note, please using "__" as separation flag in 'sub', 'obj' or 'dom' to separate sub-parts. 5 | # Do not use "::" , "@" or "|" as separation flag. 6 | [policy_definition] 7 | # Policy with domain (e.g. p, role__admin__app__111, app__111, edit, ent__1 ) 8 | p = sub, obj, act, dom 9 | 10 | [role_definition] 11 | # User_role_with_domain (e.g. g, user__181, role__admin__app__111, ent__1) 12 | g = _, _, _ 13 | # Resource role (e.g. g2, app__111, Obj__GroupRole) 14 | g2 = _, _ 15 | # User_role_without_domain (e.g. g3, user__181, role__root) 16 | g3 = _, _ 17 | 18 | [policy_effect] 19 | e = some(where (p.eft == allow)) 20 | 21 | [matchers] 22 | m = (g(r.sub, p.sub, r.dom) || g3(r.sub, p.sub)) \ 23 | && (g2(r.obj, p.obj) || keyMatch(r.obj, p.obj) || keyMatch2(r.obj, p.obj)) \ 24 | && keyMatch(r.dom, p.dom) \ 25 | && (p.act == 'edit' && r.act == 'view' || keyMatch(r.act, p.act) || keyMatch2(r.act, p.act)) \ 26 | || (g3(r.sub, p.sub) && p.sub == "role__root") 27 | -------------------------------------------------------------------------------- /nacos/standalone-mysql-5.7.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | nacos: 5 | image: nacos/nacos-server:${NACOS_VERSION} 6 | container_name: nacos-standalone-mysql 7 | env_file: 8 | - ./env/nacos-standlone-mysql.env 9 | volumes: 10 | - ./standalone-logs/:/home/nacos/logs 11 | - ./init.d/custom.properties:/home/nacos/init.d/custom.properties 12 | ports: 13 | - "8848:8848" 14 | - "9555:9555" 15 | depends_on: 16 | - mysql 17 | restart: on-failure 18 | 19 | mysql: 20 | container_name: mysql 21 | image: nacos/nacos-mysql:5.7 22 | env_file: 23 | - ./env/mysql.env 24 | volumes: 25 | - ./mysql:/var/lib/mysql 26 | ports: 27 | - "3306:3306" 28 | 29 | prometheus: 30 | container_name: prometheus 31 | image: prom/prometheus 32 | volumes: 33 | - ./prometheus/prometheus-standalone.yaml:/etc/prometheus/prometheus.yml 34 | ports: 35 | - "9090:9090" 36 | depends_on: 37 | - nacos 38 | restart: on-failure 39 | 40 | grafana: 41 | container_name: grafana 42 | image: grafana/grafana 43 | ports: 44 | - 3000:3000 45 | restart: on-failure 46 | -------------------------------------------------------------------------------- /nomad/nomad/jobs/http-echo.nomad: -------------------------------------------------------------------------------- 1 | job "http-echo" { 2 | datacenters = ["dc1"] 3 | type = "system" 4 | update { 5 | stagger = "5s" 6 | max_parallel = 1 7 | 8 | } 9 | 10 | group "http-echo" { 11 | task "http-echo" { 12 | driver = "exec" 13 | config { 14 | command = "http-echo" 15 | args = [ 16 | "-listen", 17 | ":${NOMAD_PORT_http}", 18 | "-text", 19 | "hello world", 20 | ] 21 | } 22 | 23 | artifact { 24 | source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.zip" 25 | } 26 | 27 | service { 28 | name = "http-echo" 29 | tags = ["urlprefix-http-echo.com/"] 30 | port = "http" 31 | check { 32 | name = "alive" 33 | type = "http" 34 | path = "/" 35 | interval = "10s" 36 | timeout = "2s" 37 | } 38 | } 39 | 40 | 41 | resources { 42 | cpu = 100 43 | memory = 64 44 | network { 45 | mbits = 10 46 | port "http" {} 47 | } 48 | } 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /prometheus/prometheus/etc/prometheus-main.yml: -------------------------------------------------------------------------------- 1 | # global config 2 | global: 3 | scrape_interval: 15s # 拉取targets的默认时间间隔,默认是1m. 4 | scrape_timeout: 10s # 拉去targets的默认超时时间, 默认10s 5 | evaluation_interval: 15s # 执行rules的时间间隔,默认是1m. 6 | 7 | 8 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 9 | rule_files: 10 | - "alerts/*.rules" 11 | # - "second_rules.yml" 12 | 13 | # A scrape configuration containing exactly one endpoint to scrape: 14 | # Here it's Prometheus itself. 15 | scrape_configs: 16 | # The job name is added as a label `job=` to any timeseries scraped from this config. 17 | - job_name: 'prometheus' 18 | scrape_interval: 5s 19 | # metrics_path defaults to '/metrics' 20 | # scheme defaults to 'http'. 21 | static_configs: 22 | - targets: ['localhost:9090'] 23 | labels: 24 | group: 'prometheus' 25 | 26 | - job_name: 'node-works' 27 | file_sd_configs: 28 | - files: 29 | - targets/works/*.json 30 | refresh_interval: 5s 31 | honor_labels: true 32 | metrics_path: /federate 33 | params: 34 | 'match[]': 35 | - '{__name__=~".*"}' 36 | -------------------------------------------------------------------------------- /rabbitmq/docker-compose-cluster.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | networks: 4 | rabbitmq: 5 | 6 | services: 7 | rmq0: &rabbitmq 8 | image: rabbitmq:3.8.2-management 9 | hostname: rmq0 10 | container_name: rmq0 11 | environment: 12 | RABBITMQ_ERLANG_COOKIE: rabbitmq-cluster-cookie 13 | volumes: 14 | - ./cluster/conf/enabled_plugins:/etc/rabbitmq/enabled_plugins:ro 15 | - ./cluster/conf/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf:ro 16 | - ./cluster/conf/rabbitmq-definitions.json:/etc/rabbitmq/rabbitmq-definitions.json:ro 17 | ports: 18 | - "5673:5672" 19 | - "15673:15672" 20 | - "15693:15692" 21 | networks: 22 | - rabbitmq 23 | cap_add: 24 | - ALL 25 | ulimits: 26 | nofile: 27 | soft: "2000" 28 | hard: "2000" 29 | restart: always 30 | rmq1: 31 | << : *rabbitmq 32 | hostname: rmq1 33 | container_name: rmq1 34 | ports: 35 | - "5674:5672" 36 | - "15674:15672" 37 | - "15694:15692" 38 | rmq2: 39 | << : *rabbitmq 40 | hostname: rmq2 41 | container_name: rmq2 42 | ports: 43 | - "5675:5672" 44 | - "15675:15672" 45 | - "15695:15692" 46 | -------------------------------------------------------------------------------- /prometheus/thanos/prometheus/prometheus0.yml: -------------------------------------------------------------------------------- 1 | global: 2 | external_labels: 3 | prometheus: prom-0 4 | cluster: 'prometheus-ha' 5 | 6 | alerting: 7 | alertmanagers: 8 | - static_configs: 9 | - targets: 10 | - "alertmanager:9093" 11 | 12 | rule_files: 13 | - "alerts/*.yaml" 14 | 15 | scrape_configs: 16 | - job_name: prometheus 17 | scrape_interval: 5s 18 | static_configs: 19 | - targets: 20 | - "prometheus0:9090" 21 | - job_name: thanos-sidecar 22 | scrape_interval: 5s 23 | static_configs: 24 | - targets: 25 | - "sidecar0:10902" 26 | - job_name: thanos-store 27 | scrape_interval: 5s 28 | static_configs: 29 | - targets: 30 | - "store:10902" 31 | - job_name: thanos-receive 32 | scrape_interval: 5s 33 | static_configs: 34 | - targets: 35 | - "receive:10902" 36 | - job_name: thanos-compact 37 | scrape_interval: 5s 38 | static_configs: 39 | - targets: 40 | - "compactor:10902" 41 | - job_name: thanos-rule 42 | scrape_interval: 5s 43 | static_configs: 44 | - targets: 45 | - "ruler:10902" 46 | - job_name: thanos-query 47 | scrape_interval: 5s 48 | static_configs: 49 | - targets: 50 | - "query0:10902" 51 | - "query1:10902" 52 | -------------------------------------------------------------------------------- /prometheus/thanos/prometheus/prometheus1.yml: -------------------------------------------------------------------------------- 1 | global: 2 | external_labels: 3 | prometheus: prom-1 4 | cluster: 'prometheus-ha' 5 | 6 | alerting: 7 | alertmanagers: 8 | - static_configs: 9 | - targets: 10 | - "alertmanager:9093" 11 | 12 | rule_files: 13 | - "alerts/*.yaml" 14 | 15 | scrape_configs: 16 | - job_name: prometheus 17 | scrape_interval: 5s 18 | static_configs: 19 | - targets: 20 | - "prometheus1:9090" 21 | - job_name: thanos-sidecar 22 | scrape_interval: 5s 23 | static_configs: 24 | - targets: 25 | - "sidecar1:10902" 26 | - job_name: thanos-store 27 | scrape_interval: 5s 28 | static_configs: 29 | - targets: 30 | - "store:10902" 31 | - job_name: thanos-receive 32 | scrape_interval: 5s 33 | static_configs: 34 | - targets: 35 | - "receive:10902" 36 | - job_name: thanos-compact 37 | scrape_interval: 5s 38 | static_configs: 39 | - targets: 40 | - "compactor:10902" 41 | - job_name: thanos-rule 42 | scrape_interval: 5s 43 | static_configs: 44 | - targets: 45 | - "ruler:10902" 46 | - job_name: thanos-query 47 | scrape_interval: 5s 48 | static_configs: 49 | - targets: 50 | - "query0:10902" 51 | - "query1:10902" 52 | -------------------------------------------------------------------------------- /prometheus/thanos/prometheus/prometheus2.yml: -------------------------------------------------------------------------------- 1 | global: 2 | external_labels: 3 | prometheus: prom-2 4 | cluster: 'prometheus-ha' 5 | 6 | alerting: 7 | alertmanagers: 8 | - static_configs: 9 | - targets: 10 | - "alertmanager:9093" 11 | 12 | rule_files: 13 | - "alerts/*.yaml" 14 | 15 | scrape_configs: 16 | - job_name: prometheus 17 | scrape_interval: 5s 18 | static_configs: 19 | - targets: 20 | - "prometheus2:9090" 21 | - job_name: thanos-sidecar 22 | scrape_interval: 5s 23 | static_configs: 24 | - targets: 25 | - "sidecar2:10902" 26 | - job_name: thanos-store 27 | scrape_interval: 5s 28 | static_configs: 29 | - targets: 30 | - "store:10902" 31 | - job_name: thanos-receive 32 | scrape_interval: 5s 33 | static_configs: 34 | - targets: 35 | - "receive:10902" 36 | - job_name: thanos-compact 37 | scrape_interval: 5s 38 | static_configs: 39 | - targets: 40 | - "compactor:10902" 41 | - job_name: thanos-rule 42 | scrape_interval: 5s 43 | static_configs: 44 | - targets: 45 | - "ruler:10902" 46 | - job_name: thanos-query 47 | scrape_interval: 5s 48 | static_configs: 49 | - targets: 50 | - "query0:10902" 51 | - "query1:10902" 52 | -------------------------------------------------------------------------------- /wordpress/stop-and-remove.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Stop running containers and remove related directories 4 | read -p "Do you really want to stop and remove EVERYTHING (y/n)? " answer 5 | case ${answer:0:1} in 6 | y|Y ) 7 | echo "INFO: Stopping containers" 8 | docker-compose stop 9 | echo "INFO: Removing containers" 10 | docker-compose rm -f 11 | echo "INFO: Setting file permissions to that of the user" 12 | docker run --rm \ 13 | -v $(pwd):/clean \ 14 | -e UID=$(id -u) \ 15 | -e GID=$(id -g) \ 16 | nginx:latest /bin/bash -c 'chown -R $UID:$GID /clean' 17 | echo "INFO: Pruning unused docker volumes" 18 | docker volume prune -f 19 | echo "INFO: Pruning unused docker networks" 20 | docker network prune -f 21 | echo "INFO: Removing directories and contents (certs/ certs-data/ logs/nginx/ mysql/ wordpress/)" 22 | rm -rf certs/ certs-data/ logs/nginx/ mysql/ wordpress/ 23 | echo "INFO: Done" 24 | exit 0; 25 | ;; 26 | * ) 27 | echo "INFO: Exiting without stopping containers or removing files" 28 | exit 0; 29 | ;; 30 | esac 31 | 32 | exit 0; 33 | -------------------------------------------------------------------------------- /nacos/prometheus/prometheus-standalone.yaml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 4 | evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Alertmanager configuration 8 | alerting: 9 | alertmanagers: 10 | - static_configs: 11 | - targets: 12 | # - alertmanager:9093 13 | 14 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 15 | rule_files: 16 | # - "first_rules.yml" 17 | # - "second_rules.yml" 18 | 19 | # A scrape configuration containing exactly one endpoint to scrape: 20 | # Here it's Prometheus itself. 21 | scrape_configs: 22 | # The job name is added as a label `job=` to any timeseries scraped from this config. 23 | - job_name: 'prometheus' 24 | 25 | # metrics_path defaults to '/metrics' 26 | # scheme defaults to 'http'. 27 | 28 | static_configs: 29 | - targets: ['localhost:9090'] 30 | 31 | - job_name: 'nacos' 32 | metrics_path: '/nacos/actuator/prometheus' 33 | static_configs: 34 | - targets: ['nacos:8848'] -------------------------------------------------------------------------------- /prometheus/docker-compose-m3db.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | volumes: 4 | prometheus_data: {} 5 | 6 | services: 7 | prometheus: 8 | image: prom/prometheus:v2.12.0 9 | container_name: prometheus 10 | hostname: prometheus 11 | volumes: 12 | - /etc/localtime:/etc/localtime:ro 13 | - prometheus_data:/prometheus 14 | - ./prometheus/etc/prometheus-m3db.yml:/etc/prometheus/prometheus.yml 15 | command: 16 | - '--config.file=/etc/prometheus/prometheus.yml' 17 | - '--storage.tsdb.path=/prometheus' 18 | - '--storage.tsdb.retention.time=1d' 19 | - '--web.console.libraries=/usr/share/prometheus/console_libraries' 20 | - '--web.console.templates=/usr/share/prometheus/consoles' 21 | - '--web.enable-admin-api' 22 | - '--web.enable-lifecycle' 23 | ports: 24 | - 9090:9090 25 | restart: always 26 | m3db: 27 | image: quay.io/m3db/m3dbnode:latest 28 | container_name: m3db 29 | hostname: m3db 30 | volumes: 31 | - /etc/localtime:/etc/localtime:ro 32 | - ./m3db_data:/var/lib/m3db 33 | cap_add: 34 | - SYS_RESOURCE 35 | ports: 36 | - 7201:7201 37 | - 7203:7203 38 | - 9003:9003 39 | restart: always 40 | -------------------------------------------------------------------------------- /vault/vault/config/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | 4 | lock_file="/vault/config/init.log" 5 | 6 | 7 | function log() { 8 | printf "%s\n" "$@" >> "$lock_file" 9 | } 10 | 11 | 12 | function init() { 13 | vault_init_info=$(vault operator init) 14 | log "[Vault initialized]:" 15 | log "${vault_init_info}" 16 | 17 | # Parse unsealed keys 18 | vault operator unseal $(printf '%s' "$vault_init_info" | awk '/Unseal Key 1:.*/ {print $4}' ) 19 | vault operator unseal $(printf '%s' "$vault_init_info" | awk '/Unseal Key 2:.*/ {print $4}' ) 20 | vault operator unseal $(printf '%s' "$vault_init_info" | awk '/Unseal Key 3:.*/ {print $4}' ) 21 | 22 | # Get root token 23 | export VAULT_TOKEN=$(printf '%s' "$vault_init_info" | awk '/Initial Root Token:.*/ {print $4}' ) 24 | 25 | # Enable kv 26 | vault secrets enable kv 27 | 28 | # Add test value to hello 29 | vault kv put kv/hello value=world 30 | } 31 | 32 | 33 | if ! test -f "$lock_file"; then 34 | today=$(date +"%Y-%m-%d") 35 | 36 | log "[Date]: ${today}" 37 | 38 | until vault status | grep Initialized &>/dev/null; do 39 | >&2 echo "vault is unavailable - sleeping" 40 | sleep 1 41 | done 42 | 43 | init 44 | else 45 | echo "not exec init." 46 | fi 47 | -------------------------------------------------------------------------------- /prometheus/prometheus/etc/alerts/blackbox-exporter.rules: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: blackbox-exporter 3 | rules: 4 | - alert: StatusCode 5 | expr: probe_http_status_code <= 199 AND probe_http_status_code >= 300 6 | for: 5m 7 | labels: 8 | severity: warning 9 | annotations: 10 | summary: "Status Code (instance {{ $labels.instance }})" 11 | description: "HTTP status code is not 200-299\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" 12 | 13 | - alert: SslCertificateWillExpireSoon 14 | expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 30 15 | for: 5m 16 | labels: 17 | severity: warning 18 | annotations: 19 | summary: "SSL certificate will expire soon (instance {{ $labels.instance }})" 20 | description: "SSL certificate expires in 30 days\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" 21 | 22 | - alert: SslCertificateHasExpired 23 | expr: probe_ssl_earliest_cert_expiry - time() <= 0 24 | for: 5m 25 | labels: 26 | severity: warning 27 | annotations: 28 | summary: "SSL certificate has expired (instance {{ $labels.instance }})" 29 | description: "SSL certificate has expired already\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" 30 | 31 | -------------------------------------------------------------------------------- /nacos/cluster-embedded.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | nacos1: 5 | hostname: nacos1 6 | container_name: nacos1 7 | image: nacos/nacos-server:${NACOS_VERSION} 8 | volumes: 9 | - ./cluster-logs/nacos1:/home/nacos/logs 10 | - ./init.d/custom.properties:/home/nacos/init.d/custom.properties 11 | ports: 12 | - "8848:8848" 13 | - "9555:9555" 14 | env_file: 15 | - ./env/nacos-embedded.env 16 | restart: always 17 | 18 | nacos2: 19 | hostname: nacos2 20 | image: nacos/nacos-server:${NACOS_VERSION} 21 | container_name: nacos2 22 | volumes: 23 | - ./cluster-logs/nacos2:/home/nacos/logs 24 | - ./init.d/custom.properties:/home/nacos/init.d/custom.properties 25 | ports: 26 | - "8849:8848" 27 | env_file: 28 | - ./env/nacos-embedded.env 29 | restart: always 30 | 31 | nacos3: 32 | hostname: nacos3 33 | image: nacos/nacos-server:${NACOS_VERSION} 34 | container_name: nacos3 35 | volumes: 36 | - ./cluster-logs/nacos3:/home/nacos/logs 37 | - ./init.d/custom.properties:/home/nacos/init.d/custom.properties 38 | ports: 39 | - "8850:8848" 40 | env_file: 41 | - ./env/nacos-embedded.env 42 | restart: always 43 | -------------------------------------------------------------------------------- /nacos/prometheus/prometheus-cluster.yaml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 4 | evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Alertmanager configuration 8 | alerting: 9 | alertmanagers: 10 | - static_configs: 11 | - targets: 12 | # - alertmanager:9093 13 | 14 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 15 | rule_files: 16 | # - "first_rules.yml" 17 | # - "second_rules.yml" 18 | 19 | # A scrape configuration containing exactly one endpoint to scrape: 20 | # Here it's Prometheus itself. 21 | scrape_configs: 22 | # The job name is added as a label `job=` to any timeseries scraped from this config. 23 | - job_name: 'prometheus' 24 | 25 | # metrics_path defaults to '/metrics' 26 | # scheme defaults to 'http'. 27 | 28 | static_configs: 29 | - targets: ['localhost:9090'] 30 | 31 | - job_name: 'nacos' 32 | metrics_path: '/nacos/actuator/prometheus' 33 | static_configs: 34 | - targets: ["nacos1:8848","nacos2:8848","nacos3:8848"] -------------------------------------------------------------------------------- /elasticsearch/Makefile: -------------------------------------------------------------------------------- 1 | 2 | start: 3 | $(info Make: Starting single containers.) 4 | sysctl -w vm.max_map_count=262144 5 | docker-compose -f docker-compose-single.yml up -d 6 | stop: 7 | $(info Make: Stop single containers.) 8 | docker-compose -f docker-compose-single.yml down -v 9 | 10 | 11 | start-cluster: 12 | $(info Make: Starting cluster containers.) 13 | sysctl -w vm.max_map_count=262144 14 | docker-compose -f docker-compose-cluster.yml up -d 15 | stop-cluster: 16 | $(info Make: Stop cluster containers.) 17 | docker-compose -f docker-compose-cluster.yml down -v 18 | 19 | start-xpack: 20 | $(info Make: Starting xpack containers.) 21 | sysctl -w vm.max_map_count=262144 22 | docker-compose -f docker-compose-xpack-certs.yml run --rm create_certs 23 | docker-compose -f docker-compose-xpack.yml up -d 24 | stop-xpack: 25 | $(info Make: Stop xpack containers.) 26 | docker-compose -f docker-compose-xpack.yml down -v 27 | 28 | start-stack: 29 | $(info Make: Starting stack containers.) 30 | sysctl -w vm.max_map_count=262144 31 | docker-compose --env-file stack/stack.env -f docker-compose-stack.yml up -d 32 | stop-stack: 33 | $(info Make: Stop stack containers.) 34 | docker-compose --env-file stack/stack.env -f docker-compose-stack.yml down -v 35 | -------------------------------------------------------------------------------- /prometheus/docker-compose-alertmanager.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | networks: 4 | monitor: 5 | 6 | services: 7 | alertmanager1: &alertmanager 8 | image: prom/alertmanager:v0.20.0 9 | container_name: alertmanager1 10 | hostname: alertmanager1 11 | volumes: 12 | - /etc/localtime:/etc/localtime:ro 13 | - ./alertmanager/etc:/etc/alertmanager 14 | command: 15 | - '--config.file=/etc/alertmanager/config.yml' 16 | - '--cluster.listen-address=0.0.0.0:9094' 17 | ports: 18 | - 9093:9093 19 | networks: 20 | - monitor 21 | restart: always 22 | alertmanager2: 23 | <<: *alertmanager 24 | container_name: alertmanager2 25 | hostname: alertmanager2 26 | command: 27 | - '--config.file=/etc/alertmanager/config.yml' 28 | - '--cluster.listen-address=0.0.0.0:9094' 29 | - '--cluster.peer=alertmanager1:9094' 30 | ports: 31 | - 9094:9093 32 | alertmanager3: 33 | <<: *alertmanager 34 | container_name: alertmanager3 35 | hostname: alertmanager3 36 | command: 37 | - '--config.file=/etc/alertmanager/config.yml' 38 | - '--cluster.listen-address=0.0.0.0:9094' 39 | - '--cluster.peer=alertmanager1:9094' 40 | ports: 41 | - 9095:9093 42 | -------------------------------------------------------------------------------- /loki/mysql-to-loki/loki/local-config.yaml: -------------------------------------------------------------------------------- 1 | auth_enabled: false 2 | 3 | server: 4 | http_listen_port: 3100 5 | log_level: info 6 | # log_format: json 7 | grpc_server_max_recv_msg_size: 16777216 8 | grpc_server_max_send_msg_size: 16777216 9 | 10 | common: 11 | instance_addr: 127.0.0.1 12 | path_prefix: /loki 13 | storage: 14 | filesystem: 15 | chunks_directory: /loki/chunks 16 | rules_directory: /loki/rules 17 | replication_factor: 1 18 | ring: 19 | kvstore: 20 | store: inmemory 21 | 22 | schema_config: 23 | configs: 24 | - from: 2014-01-01 25 | store: tsdb 26 | object_store: filesystem 27 | schema: v13 28 | index: 29 | prefix: index_ 30 | period: 24h 31 | 32 | ingester: 33 | max_chunk_age: 10m 34 | 35 | limits_config: 36 | max_streams_per_user: 0 37 | max_global_streams_per_user: 0 38 | max_line_size: 5MB 39 | max_label_names_per_series: 32 40 | query_timeout: 3m 41 | ingestion_rate_mb: 10 42 | reject_old_samples: false 43 | max_chunks_per_query: 20000000 44 | max_query_length: 0 45 | max_query_series: 5000 46 | max_query_parallelism: 100 47 | 48 | 49 | ruler: 50 | alertmanager_url: http://localhost:9093 51 | 52 | analytics: 53 | reporting_enabled: false -------------------------------------------------------------------------------- /kafka/docker-compose-sigle.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | zookeeper: 5 | container_name: zookeeper 6 | image: zookeeper:3.5.8 7 | restart: always 8 | ports: 9 | - 2181:2181 10 | environment: 11 | ZOO_MY_ID: 1 12 | ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 13 | 14 | kafka: 15 | container_name: kafka 16 | image: wurstmeister/kafka:2.13-2.6.0 17 | ports: 18 | - "19092:19092" 19 | environment: 20 | KAFKA_BROKER_ID: 0 21 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 22 | KAFKA_LISTENERS: INTERNAL://0.0.0.0:19092,EXTERNAL://0.0.0.0:9092 23 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:19092,EXTERNAL://外网ip:9092 24 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 25 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 26 | KAFKA_CREATE_TOPICS: "test:1:1" 27 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 28 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100 29 | KAFKA_MESSAGE_MAX_BYTES: 256000 30 | KAFKA_JMX_OPTS: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.rmi.port=1099" 31 | JMX_PORT: 1099 32 | -------------------------------------------------------------------------------- /ansibles-emaphore/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | semaphore-mysql: 4 | image: mysql:5.7.33 5 | hostname: mysql 6 | container_name: semaphore-mysql 7 | volumes: 8 | - ./data/mysql:/var/lib/mysql 9 | ports: 10 | - "3306:3306" 11 | environment: 12 | MYSQL_DATABASE: semaphore 13 | MYSQL_USER: root 14 | MYSQL_PASSWORD: semaphore 15 | MYSQL_ROOT_PASSWORD: semaphore 16 | networks: 17 | semaphore: 18 | aliases: 19 | - mysql 20 | logging: 21 | driver: json-file 22 | options: 23 | max-file: '3' 24 | max-size: 100m 25 | 26 | semaphore: 27 | image: ansiblesemaphore/semaphore:v2.6.7 28 | hostname: semaphore 29 | container_name: semaphore 30 | volumes: 31 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime:ro 32 | - ./data/semaphore-conf/:/etc/semaphore/ # /etc/semaphore/config.json 33 | environment: 34 | SEMAPHORE_DB_HOST: mysql 35 | depends_on: 36 | - semaphore-mysql 37 | networks: 38 | semaphore: 39 | aliases: 40 | - semaphore 41 | logging: 42 | driver: json-file 43 | options: 44 | max-file: '3' 45 | max-size: 100m 46 | 47 | networks: 48 | semaphore: 49 | driver: bridge 50 | 51 | -------------------------------------------------------------------------------- /metabase/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | services: 3 | metabase: 4 | image: metabase/metabase:latest 5 | container_name: metabase 6 | hostname: metabase 7 | volumes: 8 | - /dev/urandom:/dev/random:ro 9 | - ./metabase-data:/metabase-data 10 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 11 | ports: 12 | - 3000:3000 13 | environment: 14 | JAVA_TIMEZONE: Asia/Shanghai 15 | MB_DB_FILE: /metabase-data/metabase.db 16 | MB_DB_TYPE: postgres 17 | MB_DB_DBNAME: metabaseappdb 18 | MB_DB_PORT: 5432 19 | MB_DB_USER: metabase 20 | MB_DB_PASS: metabase123 21 | MB_DB_HOST: postgres 22 | networks: 23 | - metanet1 24 | healthcheck: 25 | test: curl --fail -I http://localhost:3000/api/health || exit 1 26 | interval: 15s 27 | timeout: 5s 28 | retries: 5 29 | postgres: 30 | image: postgres:latest 31 | container_name: postgres 32 | hostname: postgres 33 | volumes: 34 | - ./postgres_data:/var/lib/postgresql/data 35 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 36 | environment: 37 | POSTGRES_USER: metabase 38 | POSTGRES_DB: metabaseappdb 39 | POSTGRES_PASSWORD: metabase123 40 | networks: 41 | - metanet1 42 | networks: 43 | metanet1: 44 | driver: bridge -------------------------------------------------------------------------------- /yapi/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.1' 2 | 3 | networks: 4 | yapi: 5 | 6 | services: 7 | mongo: 8 | container_name: yapi_mongodb 9 | image: mongo:4.4.6 10 | environment: 11 | MONGO_INITDB_ROOT_USERNAME: root 12 | MONGO_INITDB_ROOT_PASSWORD: root123456 13 | MONGO_INITDB_DATABASE: admin 14 | volumes: 15 | - ./docker-entrypoint-initdb.d/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro 16 | - ./mongo_data/etc:/etc/mongo 17 | - ./mongo_data/data/db:/data/db 18 | ports: 19 | - 27017:27017 20 | healthcheck: 21 | test: ["CMD", "netstat -anp | grep 27017"] 22 | interval: 2m 23 | timeout: 10s 24 | retries: 3 25 | networks: ["yapi"] 26 | restart: always 27 | 28 | yapi: 29 | container_name: yapi 30 | image: yapi 31 | build: 32 | context: ./ 33 | dockerfile: Dockerfile 34 | # 初始化启动 35 | command: "yapi server" 36 | # 初始化之后使用 37 | # command: "node /my-yapi/vendors/server/app.js" 38 | volumes: 39 | - ./yapi_data:/my-yapi 40 | ports: 41 | - 9090:9090 42 | - 3000:3000 43 | depends_on: 44 | - mongo 45 | networks: ["yapi"] 46 | restart: always 47 | 48 | # admin@admin.com yapi.pro 49 | # 50 | # db.user.update( {_id: 17} ,{ $set: {role: 'admin'}}); -------------------------------------------------------------------------------- /prometheus/prometheus/etc/prometheus-influxdb.yml: -------------------------------------------------------------------------------- 1 | # global config 2 | global: 3 | scrape_interval: 15s # 拉取targets的默认时间间隔,默认是1m. 4 | scrape_timeout: 10s # 拉去targets的默认超时时间, 默认10s 5 | evaluation_interval: 15s # 执行rules的时间间隔,默认是1m. 6 | 7 | external_labels: 8 | monitor: 'dev-monitor' 9 | 10 | # Alertmanager configuration 11 | alerting: 12 | 13 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 14 | rule_files: 15 | - "alerts/*.rules" 16 | # - "second_rules.yml" 17 | 18 | # A scrape configuration containing exactly one endpoint to scrape: 19 | # Here it's Prometheus itself. 20 | scrape_configs: 21 | # The job name is added as a label `job=` to any timeseries scraped from this config. 22 | - job_name: 'prometheus' 23 | scrape_interval: 5s 24 | # metrics_path defaults to '/metrics' 25 | # scheme defaults to 'http'. 26 | static_configs: 27 | - targets: ['localhost:9090'] 28 | labels: 29 | group: 'prometheus' 30 | - job_name: 'influxdb' 31 | static_configs: 32 | - targets: ['influxdb:8086'] 33 | 34 | remote_write: 35 | - url: "http://influxdb:8086/api/v1/prom/write?db=prometheus&u=prometheus&p=prompass" 36 | 37 | remote_read: 38 | - url: "http://influxdb:8086/api/v1/prom/read?db=prometheus&u=prometheus&p=prompass" 39 | -------------------------------------------------------------------------------- /prometheus/prometheus/etc/prometheus-m3db.yml: -------------------------------------------------------------------------------- 1 | # global config 2 | global: 3 | scrape_interval: 15s # 拉取targets的默认时间间隔,默认是1m. 4 | scrape_timeout: 10s # 拉去targets的默认超时时间, 默认10s 5 | evaluation_interval: 15s # 执行rules的时间间隔,默认是1m. 6 | 7 | external_labels: 8 | monitor: 'dev-monitor' 9 | 10 | # Alertmanager configuration 11 | alerting: 12 | 13 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 14 | rule_files: 15 | - "alerts/*.rules" 16 | # - "second_rules.yml" 17 | 18 | # A scrape configuration containing exactly one endpoint to scrape: 19 | # Here it's Prometheus itself. 20 | scrape_configs: 21 | # The job name is added as a label `job=` to any timeseries scraped from this config. 22 | - job_name: 'prometheus' 23 | scrape_interval: 5s 24 | # metrics_path defaults to '/metrics' 25 | # scheme defaults to 'http'. 26 | static_configs: 27 | - targets: ['localhost:9090'] 28 | labels: 29 | group: 'prometheus' 30 | - job_name: 'm3db' 31 | static_configs: 32 | - targets: ['m3db:7203'] 33 | remote_read: 34 | - url: "http://m3db:7201/api/v1/prom/remote/read" 35 | # To test reading even when local Prometheus has the data 36 | read_recent: true 37 | remote_write: 38 | - url: "http://m3db:7201/api/v1/prom/remote/write" 39 | -------------------------------------------------------------------------------- /redis/docker-compose-replication.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | networks: 4 | redis: 5 | 6 | services: 7 | redis-master: 8 | container_name: redis-master 9 | image: redis:5.0.7 10 | volumes: 11 | - "./redis_rep_master_data:/data" 12 | - "/etc/localtime:/etc/localtime" 13 | ports: 14 | - "6379:6379" 15 | restart: always 16 | command: [ 17 | '--port 6379', 18 | '--requirepass 123456', 19 | '--maxclients 1000', 20 | '--maxmemory 256mb', 21 | '--maxmemory-policy volatile-ttl', 22 | '--appendonly yes', 23 | '--aof-use-rdb-preamble yes' 24 | ] 25 | networks: 26 | - redis 27 | 28 | redis-slave: 29 | container_name: redis-slave 30 | image: redis:5.0.7 31 | volumes: 32 | - "./redis_rep_slave_data:/data" 33 | - "/etc/localtime:/etc/localtime" 34 | ports: 35 | - "16379:16379" 36 | restart: always 37 | command: [ 38 | '--port 16379', 39 | '--requirepass 123456', 40 | '--masterauth 123456', 41 | '--replicaof redis-master 6379', 42 | '--maxclients 1000', 43 | '--maxmemory 256mb', 44 | '--maxmemory-policy volatile-ttl', 45 | '--appendonly yes', 46 | '--aof-use-rdb-preamble yes' 47 | ] 48 | depends_on: 49 | - redis-master 50 | networks: 51 | - redis -------------------------------------------------------------------------------- /clickhouse/log_nginx/clickhouse/initdb.d/nginx_access_log.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE logs.`nginx_access_log` 2 | ( 3 | `timestamp` DateTime, 4 | `msec` Decimal, 5 | `request_id` Nullable(String), 6 | `remote_addr` String, 7 | `remote_user` String, 8 | `host` String, 9 | `scheme` String, 10 | `uri` String, 11 | `request_method` String, 12 | `request_length` Int, 13 | `request_uri` String, 14 | `request_time` Decimal, 15 | `bytes_sent` Int, 16 | `body_bytes_sent` Int, 17 | `content_length` Int, 18 | `content_type` String, 19 | `http_referer` String, 20 | `http_origin` String, 21 | `http_user_agent` String, 22 | `http_x_forwarded_for` Nullable(String), 23 | `upstream_addr` Nullable(String), 24 | `upstream_response_time` Nullable(Decimal), 25 | `upstream_header_time` Nullable(Decimal), 26 | `upstream_connect_time` Nullable(Decimal), 27 | `upstream_bytes_received` Nullable(Int), 28 | `upstream_status` Nullable(Int), 29 | `status` Int, 30 | `server_name` String, 31 | `server_port` Int, 32 | `server_protocol` String, 33 | 34 | INDEX idx_host host TYPE set(0) GRANULARITY 1 35 | ) 36 | ENGINE = MergeTree 37 | PARTITION BY toYYYYMMDD(timestamp) 38 | ORDER BY timestamp 39 | TTL timestamp + toIntervalMonth(1) 40 | SETTINGS index_granularity = 8192; 41 | 42 | -------------------------------------------------------------------------------- /flarum/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | flarum: 5 | image: mondedie/flarum:1.2.0 6 | container_name: flarum 7 | environment: 8 | - DEBUG=false 9 | - FORUM_URL=https://test.com 10 | - DB_HOST=mysql 11 | - DB_NAME=flarum 12 | - DB_USER=flarum 13 | - DB_PASS=123456 14 | - DB_PREF=flarum_ 15 | - DB_PORT=3306 16 | - FLARUM_ADMIN_USER=admin 17 | - FLARUM_ADMIN_PASS=123456 18 | - FLARUM_ADMIN_MAIL=admin@test.com 19 | - FLARUM_TITLE=Flarum 20 | volumes: 21 | - ./flarum/assets:/flarum/app/public/assets 22 | - ./flarum/extensions:/flarum/app/extensions 23 | - ./flarum/storage/logs:/flarum/app/storage/logs 24 | - ./flarum/nginx:/etc/nginx/flarum 25 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 26 | restart: always 27 | 28 | 29 | # https://discuss.flarum.org.cn/ 30 | # docker exec -ti flarum extension require flarum-lang/chinese-simplified 31 | # docker exec -ti flarum php flarum cache:clear 32 | # docker exec -ti flarum extension require clarkwinkelmann/flarum-ext-emojionearea 33 | # docker exec -ti flarum extension require fof/links 34 | # docker exec -ti flarum extension require fof/user-directory 35 | # docker exec -ti flarum extension require fof/upload 36 | # docker exec -ti flarum extension require squeevee/flarum-ext-fancybox -------------------------------------------------------------------------------- /prometheus/prometheus/etc/prometheus-work1.yml: -------------------------------------------------------------------------------- 1 | # global config 2 | global: 3 | scrape_interval: 15s # 拉取targets的默认时间间隔,默认是1m. 4 | scrape_timeout: 10s # 拉去targets的默认超时时间, 默认10s 5 | evaluation_interval: 15s # 执行rules的时间间隔,默认是1m. 6 | 7 | external_labels: 8 | work: '1' 9 | 10 | 11 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 12 | rule_files: 13 | - "alerts/*.rules" 14 | # - "second_rules.yml" 15 | 16 | # A scrape configuration containing exactly one endpoint to scrape: 17 | # Here it's Prometheus itself. 18 | scrape_configs: 19 | # The job name is added as a label `job=` to any timeseries scraped from this config. 20 | - job_name: 'prometheus' 21 | scrape_interval: 5s 22 | # metrics_path defaults to '/metrics' 23 | # scheme defaults to 'http'. 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | labels: 27 | group: 'prometheus' 28 | 29 | - job_name: 'node-exporter' 30 | file_sd_configs: 31 | - files: 32 | - targets/nodes/*.json 33 | refresh_interval: 5s 34 | relabel_configs: 35 | - source_labels: [__address__] 36 | modulus: 2 37 | target_label: __tmp_hash 38 | action: hashmod 39 | - source_labels: [__tmp_hash] 40 | regex: ^0$ 41 | action: keep 42 | -------------------------------------------------------------------------------- /prometheus/prometheus/etc/prometheus-work2.yml: -------------------------------------------------------------------------------- 1 | # global config 2 | global: 3 | scrape_interval: 15s # 拉取targets的默认时间间隔,默认是1m. 4 | scrape_timeout: 10s # 拉去targets的默认超时时间, 默认10s 5 | evaluation_interval: 15s # 执行rules的时间间隔,默认是1m. 6 | 7 | external_labels: 8 | work: '2' 9 | 10 | 11 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 12 | rule_files: 13 | - "alerts/*.rules" 14 | # - "second_rules.yml" 15 | 16 | # A scrape configuration containing exactly one endpoint to scrape: 17 | # Here it's Prometheus itself. 18 | scrape_configs: 19 | # The job name is added as a label `job=` to any timeseries scraped from this config. 20 | - job_name: 'prometheus' 21 | scrape_interval: 5s 22 | # metrics_path defaults to '/metrics' 23 | # scheme defaults to 'http'. 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | labels: 27 | group: 'prometheus' 28 | 29 | - job_name: 'node-exporter' 30 | file_sd_configs: 31 | - files: 32 | - targets/nodes/*.json 33 | refresh_interval: 5s 34 | relabel_configs: 35 | - source_labels: [__address__] 36 | modulus: 2 37 | target_label: __tmp_hash 38 | action: hashmod 39 | - source_labels: [__tmp_hash] 40 | regex: ^1$ 41 | action: keep 42 | -------------------------------------------------------------------------------- /guacamole/nginx/templates/guacamole.conf.template: -------------------------------------------------------------------------------- 1 | ### BBB 2 | server { 3 | listen 443 ssl; 4 | http2 on; 5 | server_name localhost; 6 | 7 | ssl_certificate /etc/nginx/ssl/self.cert; 8 | ssl_certificate_key /etc/nginx/ssl/self-ssl.key; 9 | 10 | ssl_protocols TLSv1.2 TLSv1.3; 11 | ssl_prefer_server_ciphers on; 12 | ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH"; 13 | ssl_ecdh_curve secp384r1; 14 | ssl_session_cache shared:SSL:10m; 15 | ssl_session_tickets off; 16 | ssl_stapling off; 17 | ssl_stapling_verify off; 18 | 19 | location / { 20 | proxy_pass http://guacamole:8080/guacamole/; 21 | proxy_buffering off; 22 | proxy_http_version 1.1; 23 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 24 | proxy_set_header Upgrade $http_upgrade; 25 | proxy_set_header Connection $http_connection; 26 | proxy_cookie_path /guacamole/ /; 27 | access_log off; 28 | # allow large uploads (default=1m) 29 | # 4096m = 4GByte 30 | client_max_body_size 4096m; 31 | } 32 | 33 | #error_page 404 /404.html; 34 | 35 | # redirect server error pages to the static page /50x.html 36 | # 37 | error_page 500 502 503 504 /50x.html; 38 | location = /50x.html { 39 | root /usr/share/nginx/html; 40 | } 41 | 42 | } 43 | -------------------------------------------------------------------------------- /clickhouse/single/users.d/users.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 10000000000 6 | 0 7 | in_order 8 | 1 9 | 10 | 11 | 12 | 13 | 1 14 | default 15 | 16 | ::/0 17 | 18 | default 19 | 1 20 | 1 21 | 1 22 | 1 23 | 24 | 25 | 26 | 27 | 28 | 3600 29 | 0 30 | 0 31 | 0 32 | 0 33 | 0 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /grafana/grafana-allstack/mimir/rules/anonymous/rules_tempo.yaml: -------------------------------------------------------------------------------- 1 | "groups": 2 | - "name": "tempo_rules" 3 | "rules": 4 | - "expr": "histogram_quantile(0.99, sum(rate(tempo_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, job, route))" 5 | "record": "cluster_namespace_job_route:tempo_request_duration_seconds:99quantile" 6 | - "expr": "histogram_quantile(0.50, sum(rate(tempo_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, job, route))" 7 | "record": "cluster_namespace_job_route:tempo_request_duration_seconds:50quantile" 8 | - "expr": "sum(rate(tempo_request_duration_seconds_sum[1m])) by (cluster, namespace, job, route) / sum(rate(tempo_request_duration_seconds_count[1m])) by (cluster, namespace, job, route)" 9 | "record": "cluster_namespace_job_route:tempo_request_duration_seconds:avg" 10 | - "expr": "sum(rate(tempo_request_duration_seconds_bucket[1m])) by (le, cluster, namespace, job, route)" 11 | "record": "cluster_namespace_job_route:tempo_request_duration_seconds_bucket:sum_rate" 12 | - "expr": "sum(rate(tempo_request_duration_seconds_sum[1m])) by (cluster, namespace, job, route)" 13 | "record": "cluster_namespace_job_route:tempo_request_duration_seconds_sum:sum_rate" 14 | - "expr": "sum(rate(tempo_request_duration_seconds_count[1m])) by (cluster, namespace, job, route)" 15 | "record": "cluster_namespace_job_route:tempo_request_duration_seconds_count:sum_rate" 16 | -------------------------------------------------------------------------------- /clickhouse/cluster_1S_2R_ch_proxy/clickhouse-01/users.d/users.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 10000000000 6 | 0 7 | in_order 8 | 1 9 | 10 | 11 | 12 | 13 | 1 14 | default 15 | 16 | ::/0 17 | 18 | default 19 | 1 20 | 1 21 | 1 22 | 1 23 | 24 | 25 | 26 | 27 | 28 | 3600 29 | 0 30 | 0 31 | 0 32 | 0 33 | 0 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /clickhouse/cluster_1S_2R_ch_proxy/clickhouse-02/users.d/users.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 10000000000 6 | 0 7 | in_order 8 | 1 9 | 10 | 11 | 12 | 13 | 1 14 | default 15 | 16 | ::/0 17 | 18 | default 19 | 1 20 | 1 21 | 1 22 | 1 23 | 24 | 25 | 26 | 27 | 28 | 3600 29 | 0 30 | 0 31 | 0 32 | 0 33 | 0 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /clickhouse/log_nginx/clickhouse/users.d/users.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 10000000000 6 | 0 7 | in_order 8 | 1 9 | 10 | 11 | 12 | 13 | 1 14 | default 15 | 123456 16 | 17 | ::/0 18 | 19 | default 20 | 1 21 | 1 22 | 1 23 | 1 24 | 25 | 26 | 27 | 28 | 29 | 3600 30 | 0 31 | 0 32 | 0 33 | 0 34 | 0 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /elasticsearch/stack/heartbeat.docker.yml: -------------------------------------------------------------------------------- 1 | # Define a directory to load monitor definitions from. Definitions take the form 2 | # of individual yaml files. 3 | heartbeat.config.monitors: 4 | # Directory + glob pattern to search for configuration files 5 | path: ${path.config}/monitors.d/*.yml 6 | # If enabled, heartbeat will periodically check the config.monitors path for changes 7 | reload.enabled: false 8 | # How often to check for changes 9 | reload.period: 5s 10 | 11 | setup.template.settings: 12 | index: 13 | number_of_shards: 1 14 | number_of_replicas: 0 15 | codec: best_compression 16 | 17 | heartbeat.monitors: 18 | - type: http 19 | id: es-service 20 | name: ElasticSearch Service 21 | schedule: '@every 5s' 22 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 23 | username: '${ELASTICSEARCH_USERNAME:}' 24 | password: '${ELASTICSEARCH_PASSWORD:}' 25 | 26 | - type: http 27 | id: kibana-service 28 | name: Kibana Service 29 | hosts: ["http://kibana:5601"] 30 | schedule: '@every 5s' 31 | 32 | - type: icmp 33 | id: elk-service 34 | name: elk stack Service 35 | schedule: '@every 5s' 36 | hosts: 37 | - elasticsearch 38 | - kibana 39 | 40 | processors: 41 | - add_docker_metadata: ~ 42 | 43 | output.elasticsearch: 44 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 45 | username: '${ELASTICSEARCH_USERNAME:}' 46 | password: '${ELASTICSEARCH_PASSWORD:}' 47 | 48 | http.enabled: true 49 | http.host: 0.0.0.0 50 | http.port: 5066 51 | 52 | -------------------------------------------------------------------------------- /rabbitmq/prometheus/etc/prometheus.yml: -------------------------------------------------------------------------------- 1 | # https://prometheus.io/docs/prometheus/latest/configuration/configuration/ 2 | global: 3 | # This is higher than RabbitMQ's collect_statistics_interval, 4 | # but still close enough to capture metrics that were refreshed within this interval 5 | # This value determines the range that we use with rate(): 6 | # https://www.robustperception.io/what-range-should-i-use-with-rate 7 | scrape_interval: 15s # Default is every 1 minute. 8 | # scrape_timeout: 10s # Default is 10 seconds. 9 | # evaluation_interval: 60s # Default is every 1 minute. 10 | 11 | # Alertmanager configuration 12 | alerting: 13 | alertmanagers: 14 | - static_configs: 15 | - targets: 16 | # - 'alertmanager:9093' 17 | 18 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 19 | rule_files: 20 | - "alerts/*.rules" 21 | 22 | scrape_configs: 23 | # The job name is added as a label `job=` to any timeseries scraped from this config. 24 | - job_name: 'prometheus' 25 | static_configs: 26 | - targets: ['localhost:9090'] 27 | - job_name: 'node-exporter' 28 | static_configs: 29 | - targets: ['node-exporter:9100'] 30 | - job_name: 'cadvisor' 31 | static_configs: 32 | - targets: ['cadvisor:8080'] 33 | - job_name: 'rabbitmq-server' 34 | static_configs: 35 | - targets: 36 | - 'rabbitmq:15692' 37 | - 'rmq0:15692' 38 | - 'rmq1:15692' 39 | - 'rmq2:15692' 40 | -------------------------------------------------------------------------------- /prometheus/cortex/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 4 | evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. 5 | # scrape_timeout is set to the global default (10s). 6 | remote_write: 7 | - url: http://haproxy:9009/api/prom/push 8 | 9 | # Alertmanager configuration 10 | alerting: 11 | alertmanagers: 12 | - static_configs: 13 | - targets: 14 | # - alertmanager:9093 15 | 16 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 17 | rule_files: 18 | # - "first_rules.yml" 19 | # - "second_rules.yml" 20 | 21 | scrape_configs: 22 | - job_name: 'prometheus' 23 | static_configs: 24 | - targets: ['localhost:9090'] 25 | 26 | - job_name: consul 27 | honor_timestamps: true 28 | scrape_interval: 15s 29 | scrape_timeout: 10s 30 | metrics_path: '/v1/agent/metrics' 31 | scheme: http 32 | params: 33 | format: ["prometheus"] 34 | static_configs: 35 | - targets: 36 | - consul:8500 37 | - job_name: 'node-exporter' 38 | static_configs: 39 | - targets: ['node-exporter:9100'] 40 | 41 | - job_name: 'haproxy-exporter' 42 | static_configs: 43 | - targets: ['haproxy:8404'] 44 | 45 | - job_name: 'cortex' 46 | static_configs: 47 | - targets: 48 | - 'cortex1:9009' 49 | - 'cortex2:9009' 50 | - 'cortex3:9009' 51 | -------------------------------------------------------------------------------- /wordpress/letsencrypt/letsencrypt-renew.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # check to see where the script is being run from and set local variables 4 | if [ -f .env ]; then 5 | echo "INFO: running from top level of repository" 6 | source .env 7 | LE_DIR=$(pwd)/letsencrypt 8 | else 9 | if [ ! -f ../.env ]; then 10 | echo "ERROR: Could not find the .env file?" 11 | exit 1; 12 | fi 13 | echo "INFO: running from the letsencrypt directory" 14 | source ../.env 15 | LE_DIR=$(pwd) 16 | cd ../ 17 | fi 18 | REPO_DIR=$(dirname ${LE_DIR}) 19 | 20 | # get full directory path 21 | if [ $(dirname ${SSL_CERTS_DIR}) = '.' ]; then 22 | CERTS=$REPO_DIR${SSL_CERTS_DIR:1} 23 | else 24 | CERTS=${SSL_CERTS_DIR} 25 | fi 26 | if [ $(dirname ${SSL_CERTS_DATA_DIR}) = '.' ]; then 27 | CERTS_DATA=$REPO_DIR${SSL_CERTS_DATA_DIR:1} 28 | else 29 | CERTS_DATA=${SSL_CERTS_DATA_DIR} 30 | fi 31 | 32 | # certs and certs-data directory expected to already exist and 33 | # contain prior certificate information 34 | if [ ! -d "${CERTS}" ]; then 35 | echo "WARNING: no certs directory!" 36 | exit 1; 37 | fi 38 | 39 | if [ ! -d "${CERTS_DATA}" ]; then 40 | echo "WARNING: no certs-data directory!" 41 | exit 1; 42 | fi 43 | 44 | docker run -t --rm \ 45 | -v ${CERTS}:/etc/letsencrypt \ 46 | -v ${CERTS_DATA}:/data/letsencrypt \ 47 | certbot/certbot \ 48 | renew \ 49 | --webroot --webroot-path=/data/letsencrypt 50 | 51 | cd ${REPO_DIR} 52 | docker-compose kill -s HUP nginx 53 | cd ${LE_DIR} 54 | 55 | exit 0; 56 | -------------------------------------------------------------------------------- /elasticsearch/stack/filebeat.docker.yml: -------------------------------------------------------------------------------- 1 | setup.template.settings: 2 | index: 3 | number_of_shards: 1 4 | number_of_replicas: 0 5 | codec: best_compression 6 | 7 | filebeat.inputs: 8 | - type: container 9 | paths: 10 | - '/var/lib/docker/containers/*/*.log' 11 | 12 | filebeat.modules: 13 | - module: elasticsearch 14 | server: 15 | enabled: true 16 | var.paths: 17 | - /var/log/elasticsearch/*_server.json 18 | gc: 19 | enabled: true 20 | var.paths: 21 | - /var/log/elasticsearch/gc.log 22 | audit: 23 | enabled: true 24 | var.paths: 25 | - /var/log/elasticsearch/*_audit.json 26 | slowlog: 27 | enabled: true 28 | var.paths: 29 | - /var/log/elasticsearch/*_index_search_slowlog.json 30 | - /var/log/elasticsearch/*_index_indexing_slowlog.json 31 | deprecation: 32 | enabled: true 33 | var.paths: 34 | - /var/log/elasticsearch/*_deprecation.json 35 | 36 | processors: 37 | - add_docker_metadata: 38 | host: "unix:///var/run/docker.sock" 39 | - decode_json_fields: 40 | fields: ["message"] 41 | target: "json" 42 | overwrite_keys: true 43 | 44 | output.elasticsearch: 45 | hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' 46 | username: '${ELASTICSEARCH_USERNAME:}' 47 | password: '${ELASTICSEARCH_PASSWORD:}' 48 | 49 | logging.json: true 50 | logging.metrics.enabled: false 51 | 52 | setup.dashboards.enabled: true 53 | setup.kibana.host: '${KIBANA_HOST:kibana:5601}' 54 | 55 | http.enabled: true 56 | http.host: 0.0.0.0 57 | http.port: 5066 58 | -------------------------------------------------------------------------------- /prometheus/docker-compose-telegraf.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | networks: 4 | monitor: 5 | 6 | services: 7 | prometheus: 8 | container_name: prometheus 9 | image: prom/prometheus:v2.30.3 10 | volumes: 11 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime:ro 12 | - ./prometheus_data/data:/prometheus 13 | - ./prometheus_data/etc:/etc/prometheus 14 | command: 15 | - '--config.file=/etc/prometheus/prometheus.yml' 16 | - '--storage.tsdb.path=/prometheus' 17 | - '--storage.tsdb.retention.time=180d' 18 | - '--web.console.libraries=/usr/share/prometheus/console_libraries' 19 | - '--web.console.templates=/usr/share/prometheus/consoles' 20 | - '--web.enable-admin-api' 21 | - '--web.enable-lifecycle' 22 | ports: 23 | - 9090:9090 24 | networks: 25 | - monitor 26 | restart: always 27 | 28 | telegraf: 29 | container_name: telegraf 30 | image: telegraf:1.20.3 31 | environment: 32 | HOST_ETC: /hostfs/etc 33 | HOST_PROC: /hostfs/proc 34 | HOST_SYS: /hostfs/sys 35 | HOST_VAR: /hostfs/var 36 | HOST_RUN: /hostfs/run 37 | HOST_MOUNT_PREFIX: /hostfs 38 | volumes: 39 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime:ro 40 | # Mount for telegraf configuration 41 | - ./telegraf_data/conf/:/etc/telegraf/ 42 | # Mount for Docker API access 43 | - /var/run/docker.sock:/var/run/docker.sock 44 | # Mount the host filesystems 45 | - /:/hostfs:ro 46 | networks: 47 | - monitor 48 | restart: always -------------------------------------------------------------------------------- /rabbitmq/grafana/datasources.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | # name of the datasource. Required 5 | - name: prometheus 6 | # datasource type. Required 7 | type: prometheus 8 | # access mode. direct or proxy. Required 9 | access: proxy 10 | # org id. will default to orgId 1 if not specified 11 | orgId: 1 12 | # url 13 | url: http://prometheus:9090 14 | # database password, if used 15 | # password: 16 | # database user, if used 17 | # user: 18 | # database name, if used 19 | # database: 20 | # enable/disable basic auth 21 | # basicAuth: 22 | # basic auth username 23 | # basicAuthUser: 24 | # basic auth password 25 | # basicAuthPassword: 26 | # enable/disable with credentials headers 27 | # withCredentials: 28 | # mark as default datasource. Max one per org 29 | isDefault: true 30 | # fields that will be converted to json and stored in json_data 31 | # jsonData: 32 | # graphiteVersion: "1.1" 33 | # tlsAuth: true 34 | # tlsAuthWithCACert: true 35 | # httpHeaderName1: "Authorization" 36 | # json object of data that will be encrypted. 37 | # secureJsonData: 38 | # tlsCACert: "..." 39 | # tlsClientCert: "..." 40 | # tlsClientKey: "..." 41 | # httpHeaderValue1: "Bearer xf5yhfkpsnmgo" 42 | version: 1 43 | # allow users to edit datasources from the UI. 44 | editable: false 45 | -------------------------------------------------------------------------------- /loki/s3-config/nginx-loki-gateway.conf: -------------------------------------------------------------------------------- 1 | error_log /dev/stderr; 2 | pid /tmp/nginx.pid; 3 | worker_rlimit_nofile 8192; 4 | 5 | events { 6 | worker_connections 4096; ## Default: 1024 7 | } 8 | 9 | http { 10 | 11 | default_type application/octet-stream; 12 | log_format main '$remote_addr - $remote_user [$time_local] $status ' 13 | '"$request" $body_bytes_sent "$http_referer" ' 14 | '"$http_user_agent" "$http_x_forwarded_for"'; 15 | access_log /dev/stderr main; 16 | sendfile on; 17 | tcp_nopush on; 18 | 19 | upstream read { 20 | server loki-read:3100; 21 | } 22 | 23 | upstream write { 24 | server loki-write:3100; 25 | } 26 | 27 | 28 | server { 29 | listen 3100; 30 | 31 | location = / { 32 | return 200 'OK'; 33 | auth_basic off; 34 | } 35 | location = /api/prom/push { 36 | proxy_pass http://write:3100$request_uri; 37 | } 38 | location = /api/prom/tail { 39 | proxy_pass http://read:3100$request_uri; 40 | proxy_set_header Upgrade $http_upgrade; 41 | proxy_set_header Connection "upgrade"; 42 | } 43 | location ~ /api/prom/.* { 44 | proxy_pass http://read:3100$request_uri; 45 | } 46 | location = /loki/api/v1/push { 47 | proxy_pass http://write:3100$request_uri; 48 | } 49 | location = /loki/api/v1/tail { 50 | proxy_pass http://read:3100$request_uri; 51 | proxy_set_header Upgrade $http_upgrade; 52 | proxy_set_header Connection "upgrade"; 53 | } 54 | location ~ /loki/api/.* { 55 | proxy_pass http://read:3100$request_uri; 56 | } 57 | } 58 | } -------------------------------------------------------------------------------- /nacos/cluster-hostname.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | nacos1: 5 | hostname: nacos1 6 | container_name: nacos1 7 | image: nacos/nacos-server:${NACOS_VERSION} 8 | volumes: 9 | - ./cluster-logs/nacos1:/home/nacos/logs 10 | - ./init.d/custom.properties:/home/nacos/init.d/custom.properties 11 | ports: 12 | - "8848:8848" 13 | - "9555:9555" 14 | env_file: 15 | - ./env/nacos-hostname.env 16 | restart: always 17 | depends_on: 18 | - mysql 19 | 20 | nacos2: 21 | hostname: nacos2 22 | image: nacos/nacos-server:${NACOS_VERSION} 23 | container_name: nacos2 24 | volumes: 25 | - ./cluster-logs/nacos2:/home/nacos/logs 26 | - ./init.d/custom.properties:/home/nacos/init.d/custom.properties 27 | ports: 28 | - "8849:8848" 29 | env_file: 30 | - ./env/nacos-hostname.env 31 | restart: always 32 | depends_on: 33 | - mysql 34 | 35 | nacos3: 36 | hostname: nacos3 37 | image: nacos/nacos-server:${NACOS_VERSION} 38 | container_name: nacos3 39 | volumes: 40 | - ./cluster-logs/nacos3:/home/nacos/logs 41 | - ./init.d/custom.properties:/home/nacos/init.d/custom.properties 42 | ports: 43 | - "8850:8848" 44 | env_file: 45 | - ./env/nacos-hostname.env 46 | restart: always 47 | depends_on: 48 | - mysql 49 | mysql: 50 | container_name: mysql 51 | image: nacos/nacos-mysql:5.7 52 | env_file: 53 | - ./env/mysql.env 54 | volumes: 55 | - ./mysql:/var/lib/mysql 56 | ports: 57 | - "3306:3306" 58 | -------------------------------------------------------------------------------- /prometheus/docker-compose-influxdb.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | volumes: 4 | prometheus_data: {} 5 | 6 | services: 7 | prometheus: 8 | image: prom/prometheus:v2.16.0 9 | container_name: prometheus 10 | hostname: prometheus 11 | volumes: 12 | - /etc/localtime:/etc/localtime:ro 13 | - prometheus_data:/prometheus 14 | - ./prometheus/etc/prometheus-influxdb.yml:/etc/prometheus/prometheus.yml 15 | command: 16 | - '--config.file=/etc/prometheus/prometheus.yml' 17 | - '--storage.tsdb.path=/prometheus' 18 | - '--storage.tsdb.retention.time=1d' 19 | - '--web.console.libraries=/usr/share/prometheus/console_libraries' 20 | - '--web.console.templates=/usr/share/prometheus/consoles' 21 | - '--web.enable-admin-api' 22 | - '--web.enable-lifecycle' 23 | ports: 24 | - 9090:9090 25 | restart: always 26 | 27 | influxdb: 28 | image: influxdb:1.7-alpine 29 | container_name: influxdb 30 | hostname: influxdb 31 | environment: 32 | - INFLUXDB_ADMIN_ENABLED=true 33 | - INFLUXDB_ADMIN_USER=${INFLUXDB_ADMIN_USER:-admin} 34 | - INFLUXDB_ADMIN_PASSWORD=${INFLUXDB_ADMIN_PASSWORD:-admin} 35 | - INFLUXDB_DB=prometheus 36 | - INFLUXDB_HTTP_LOG_ENABLED=false 37 | - INFLUXDB_REPORTING_DISABLED=true 38 | - INFLUXDB_USER=${INFLUXDB_USER:-prometheus} 39 | - INFLUXDB_USER_PASSWORD=${INFLUXDB_USER_PASSWORD:-prompass} 40 | volumes: 41 | - /etc/localtime:/etc/localtime:ro 42 | - ./influxdb_data:/var/lib/influxdb:rw 43 | ports: 44 | - 8086:8086 45 | restart: always 46 | -------------------------------------------------------------------------------- /wordpress/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.6' 2 | services: 3 | 4 | wordpress: 5 | image: wordpress:${WORDPRESS_VERSION:-php7.3-fpm} 6 | container_name: wordpress 7 | volumes: 8 | - ./config/php.conf.ini:/usr/local/etc/php/conf.d/conf.ini 9 | - ./wordpress:/var/www/html 10 | environment: 11 | - WORDPRESS_DB_NAME=${WORDPRESS_DB_NAME:-wordpress} 12 | - WORDPRESS_TABLE_PREFIX=${WORDPRESS_TABLE_PREFIX:-wp_} 13 | - WORDPRESS_DB_HOST=${WORDPRESS_DB_HOST:-mysql} 14 | - WORDPRESS_DB_USER=${WORDPRESS_DB_USER:-root} 15 | - WORDPRESS_DB_PASSWORD=${WORDPRESS_DB_PASSWORD:-password} 16 | depends_on: 17 | - mysql 18 | restart: always 19 | 20 | mysql: 21 | image: mariadb:${MARIADB_VERSION:-latest} 22 | container_name: mysql 23 | volumes: 24 | - ./mysql:/var/lib/mysql 25 | environment: 26 | - MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD:-password} 27 | - MYSQL_USER=${MYSQL_USER:-root} 28 | - MYSQL_PASSWORD=${MYSQL_PASSWORD:-password} 29 | - MYSQL_DATABASE=${MYSQL_DATABASE:-wordpress} 30 | restart: always 31 | 32 | nginx: 33 | image: nginx:${NGINX_VERSION:-latest} 34 | container_name: nginx 35 | ports: 36 | - '80:80' 37 | - '443:443' 38 | volumes: 39 | - ${NGINX_CONF_DIR:-./nginx}:/etc/nginx/conf.d 40 | - ${NGINX_LOG_DIR:-./logs/nginx}:/var/log/nginx 41 | - ${WORDPRESS_DATA_DIR:-./wordpress}:/var/www/html 42 | - ${SSL_CERTS_DIR:-./certs}:/etc/letsencrypt 43 | - ${SSL_CERTS_DATA_DIR:-./certs-data}:/data/letsencrypt 44 | depends_on: 45 | - wordpress 46 | restart: always -------------------------------------------------------------------------------- /clickhouse/log_nginx/nginx/conf/nginx.conf: -------------------------------------------------------------------------------- 1 | 2 | user nginx; 3 | worker_processes auto; 4 | 5 | error_log /var/log/nginx/error.log notice; 6 | pid /var/run/nginx.pid; 7 | 8 | 9 | events { 10 | worker_connections 1024; 11 | } 12 | 13 | 14 | http { 15 | include /etc/nginx/mime.types; 16 | default_type application/octet-stream; 17 | 18 | log_format main escape=json '{"timestamp": "$time_iso8601","msec": "$msec","request_id": "$request_id","remote_addr": "$remote_addr","http_x_forwarded_for": "$http_x_forwarded_for","host": "$host","scheme": "$scheme","request_method": "$request_method","request_uri": "$request_uri","request_time": "$request_time","status": "$status","upstream_status": "$upstream_status","request_length": "$request_length","body_bytes_sent": "$body_bytes_sent","content_length": "$content_length","bytes_sent": "$bytes_sent","content_type": "$content_type","uri": "$uri","http_user_agent": "$http_user_agent","http_referer": "$http_referer","http_origin": "$http_origin","server_port": "$server_port","server_protocol": "$server_protocol","request_completion": "$request_completion","remote_user": "$remote_user","server_name": "$server_name","upstream_response_time": "$upstream_response_time","upstream_header_time": "$upstream_header_time","upstream_connect_time": "$upstream_connect_time","upstream_bytes_received": "$upstream_bytes_received","upstream_addr": "$upstream_addr"}'; 19 | 20 | access_log /var/log/nginx/access.log main; 21 | 22 | sendfile on; 23 | #tcp_nopush on; 24 | 25 | keepalive_timeout 65; 26 | 27 | #gzip on; 28 | 29 | include /etc/nginx/conf.d/*.conf; 30 | } 31 | -------------------------------------------------------------------------------- /clickhouse/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## 文档 4 | 5 | - https://github.com/ClickHouse/examples/tree/main/docker-compose-recipes/recipes 6 | - https://clickhouse.com/docs/en/architecture/replication 7 | 8 | 9 | 10 | ## cluster_1S_2R_ch_proxy 11 | 12 | 13 | 插入数据 14 | 15 | ``` 16 | docker exec -ti clickhouse-01 bash 17 | 18 | clickhouse-client 19 | 20 | CREATE DATABASE db1 ON CLUSTER cluster_1S_2R 21 | 22 | CREATE TABLE db1.table1 ON CLUSTER cluster_1S_2R 23 | ( 24 | `id` UInt64, 25 | `column1` String 26 | ) 27 | ENGINE = ReplicatedMergeTree 28 | ORDER BY id 29 | 30 | INSERT INTO db1.table1 (id, column1) VALUES (1, 'abc'); 31 | 32 | INSERT INTO db1.table1 (id, column1) VALUES (2, 'def'); 33 | 34 | SELECT * FROM db1.table1 FORMAT Pretty 35 | ``` 36 | 37 | 38 | 测试 ch-proxy 39 | ``` 40 | echo "INSERT INTO db1.table1 (id, column1) VALUES (3, 'ghi');" | curl http://127.0.0.1 --data-binary @- 41 | echo "SELECT * FROM db1.table1 FORMAT Pretty" | curl http://127.0.0.1 --data-binary @- 42 | ``` 43 | 44 | 45 | ``` 46 | select concat(database, '.', table) as table, 47 | formatReadableSize(sum(bytes)) as size, 48 | sum(rows) as rows, 49 | max(modification_time) as latest_modification, 50 | sum(bytes) as bytes_size, 51 | any(engine) as engine, 52 | formatReadableSize(sum(primary_key_bytes_in_memory)) as primary_keys_size 53 | from system.parts 54 | where active 55 | group by database, table 56 | order by bytes_size desc; 57 | ``` -------------------------------------------------------------------------------- /clickhouse/cluster_1S_2R_ch_proxy/clickhouse-keeper-01/config/keeper_config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | information 4 | /var/log/clickhouse-keeper/clickhouse-keeper.log 5 | /var/log/clickhouse-keeper/clickhouse-keeper.err.log 6 | 1000M 7 | 3 8 | 9 | 0.0.0.0 10 | 11 | 9181 12 | 1 13 | /var/lib/clickhouse-keeper/coordination/log 14 | /var/lib/clickhouse-keeper/coordination/snapshots 15 | 16 | 10000 17 | 30000 18 | information 19 | 20 | 21 | 22 | 1 23 | clickhouse-keeper-01 24 | 9234 25 | 26 | 27 | 2 28 | clickhouse-keeper-02 29 | 9234 30 | 31 | 32 | 3 33 | clickhouse-keeper-03 34 | 9234 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /clickhouse/cluster_1S_2R_ch_proxy/clickhouse-keeper-02/config/keeper_config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | information 4 | /var/log/clickhouse-keeper/clickhouse-keeper.log 5 | /var/log/clickhouse-keeper/clickhouse-keeper.err.log 6 | 1000M 7 | 3 8 | 9 | 0.0.0.0 10 | 11 | 9181 12 | 2 13 | /var/lib/clickhouse-keeper/coordination/log 14 | /var/lib/clickhouse-keeper/coordination/snapshots 15 | 16 | 10000 17 | 30000 18 | information 19 | 20 | 21 | 22 | 1 23 | clickhouse-keeper-01 24 | 9234 25 | 26 | 27 | 2 28 | clickhouse-keeper-02 29 | 9234 30 | 31 | 32 | 3 33 | clickhouse-keeper-03 34 | 9234 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /clickhouse/cluster_1S_2R_ch_proxy/clickhouse-keeper-03/config/keeper_config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | information 4 | /var/log/clickhouse-keeper/clickhouse-keeper.log 5 | /var/log/clickhouse-keeper/clickhouse-keeper.err.log 6 | 1000M 7 | 3 8 | 9 | 0.0.0.0 10 | 11 | 9181 12 | 3 13 | /var/lib/clickhouse-keeper/coordination/log 14 | /var/lib/clickhouse-keeper/coordination/snapshots 15 | 16 | 10000 17 | 30000 18 | information 19 | 20 | 21 | 22 | 1 23 | clickhouse-keeper-01 24 | 9234 25 | 26 | 27 | 2 28 | clickhouse-keeper-02 29 | 9234 30 | 31 | 32 | 3 33 | clickhouse-keeper-03 34 | 9234 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /loki/mysql-to-loki/logstash/pipeline/mysql-to-loki.conf: -------------------------------------------------------------------------------- 1 | input { 2 | jdbc { 3 | # 数据库连接参数 4 | jdbc_driver_library => "/data/mysql-connector-j-9.1.0.jar" 5 | jdbc_driver_class => "com.mysql.cj.jdbc.Driver" 6 | jdbc_connection_string => "jdbc:mysql://localhost:3306/testdb" 7 | jdbc_user => "root" 8 | jdbc_password => "123456" 9 | jdbc_validate_connection => true 10 | jdbc_validation_timeout => 50 11 | #jdbc时区 12 | jdbc_default_timezone => "Asia/Shanghai" 13 | plugin_timezone => "local" 14 | # 定时执行 15 | schedule => "* * * * *" 16 | # 设置列名区分大小写, 默认全小写 17 | lowercase_column_names => "false" 18 | # 开启分页 19 | jdbc_paging_enabled => "true" 20 | jdbc_paging_mode => "explicit" 21 | jdbc_page_size => "2000" 22 | # 限制每次轮训获取的总数据 23 | statement => "select * from t_log where id > :sql_last_value order by id asc limit 10000" 24 | # 设置要追踪的字段 25 | tracking_column_type => "numeric" 26 | tracking_column => "id" 27 | use_column_value => true 28 | # 是否记录sql_last_value 29 | record_last_run => true 30 | last_run_metadata_path => "/data/.logstash_jdbc_last_run" 31 | } 32 | } 33 | 34 | 35 | filter { 36 | mutate { 37 | copy => { "create_time" => "@timestamp" } 38 | } 39 | } 40 | 41 | output { 42 | # stdout { 43 | # codec => rubydebug 44 | # } 45 | loki { 46 | url => "http://loki:3100/loki/api/v1/push" 47 | batch_size => 112640 #112.64 kilobytes 48 | retries => 5 49 | min_delay => 3 50 | max_delay => 500 51 | message_field => "content" 52 | include_fields => ["id", "service", "content", "remark", "create_time"] 53 | metadata_fields => ["operator_id", "object_id"] 54 | } 55 | } -------------------------------------------------------------------------------- /vault/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | networks: 4 | vault: 5 | 6 | services: 7 | consul: 8 | image: consul:latest 9 | container_name: consul 10 | restart: on-failure 11 | ports: 12 | - "8500:8500" 13 | volumes: 14 | - ./consul/config:/consul/config 15 | - ./consul/data:/consul/data 16 | command: "agent -config-dir=/consul/config" 17 | networks: 18 | - vault 19 | 20 | consul_init: 21 | init: true 22 | image: consul:latest 23 | container_name: consul_init 24 | volumes: 25 | - ./vault/config:/vault/config 26 | - ./consul:/consul 27 | entrypoint: /consul/config/init.sh 28 | environment: 29 | - CONSUL_HTTP_ADDR=consul:8500 30 | depends_on: 31 | - consul 32 | networks: 33 | - vault 34 | 35 | vault: 36 | image: vault:latest 37 | container_name: vault 38 | restart: on-failure 39 | ports: 40 | - "8200:8200" 41 | cap_add: 42 | - IPC_LOCK 43 | volumes: 44 | - ./vault:/vault 45 | environment: 46 | - VAULT_ADDR=http://localhost:8200 47 | entrypoint: /vault/entrypoint.sh 48 | command: vault server -config=/vault/config/vault.hcl 49 | depends_on: 50 | - consul 51 | - consul_init 52 | networks: 53 | - vault 54 | 55 | vault_init: 56 | init: true 57 | image: vault:latest 58 | container_name: vault_init 59 | cap_add: 60 | - IPC_LOCK 61 | volumes: 62 | - ./vault/config:/vault/config 63 | environment: 64 | - VAULT_ADDR=http://vault:8200 65 | entrypoint: /vault/config/init.sh 66 | depends_on: 67 | - vault 68 | networks: 69 | - vault 70 | 71 | -------------------------------------------------------------------------------- /sentry/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | sentry: 4 | container_name: sentry 5 | image: sentry 6 | env_file: 7 | - .env 8 | ports: 9 | - '9900:9000' 10 | depends_on: 11 | - db 12 | - redis 13 | tty: true 14 | stdin_open: true 15 | volumes: 16 | - sentry-data:/var/lib/sentry/files 17 | - localtime:/etc/localtime 18 | cron: 19 | container_name: sentry-cron 20 | image: sentry 21 | command: run cron 22 | env_file: 23 | - .env 24 | depends_on: 25 | - db 26 | - redis 27 | volumes: 28 | - sentry-data:/var/lib/sentry/files 29 | - localtime:/etc/localtime 30 | worker: 31 | container_name: sentry-worker 32 | image: sentry 33 | command: run worker 34 | env_file: 35 | - .env 36 | depends_on: 37 | - db 38 | - redis 39 | volumes: 40 | - sentry-data:/var/lib/sentry/files 41 | - localtime:/etc/localtime 42 | 43 | redis: 44 | container_name: sentry-redis 45 | image: redis 46 | volumes: 47 | - redis-data:/data 48 | - localtime:/etc/localtime 49 | ports: 50 | - '6380:6379' 51 | db: 52 | container_name: sentry-postgres 53 | image: postgres 54 | environment: 55 | POSTGRES_USER: sentry 56 | POSTGRES_PASSWORD: secret 57 | volumes: 58 | - pg-data:/var/lib/postgresql/data 59 | - localtime:/etc/localtime 60 | ports: 61 | - '5432:5432' 62 | # smtp: 63 | # restart: unless-stopped 64 | # image: tianon/exim4 65 | 66 | volumes: 67 | redis-data: ./redis-data 68 | pg-data: ./pg-data 69 | sentry-data: ./sentry-data 70 | localtime: /etc/localtime 71 | -------------------------------------------------------------------------------- /nacos/cluster-ip.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | nacos1: 5 | image: nacos/nacos-server:${NACOS_VERSION} 6 | container_name: nacos1 7 | networks: 8 | nacos_net: 9 | ipv4_address: 172.16.238.10 10 | volumes: 11 | - ./cluster-logs/nacos1:/home/nacos/logs 12 | ports: 13 | - "8848:8848" 14 | - "9555:9555" 15 | env_file: 16 | - ./env/nacos-ip.env 17 | restart: on-failure 18 | depends_on: 19 | - mysql 20 | 21 | nacos2: 22 | image: nacos/nacos-server:${NACOS_VERSION} 23 | container_name: nacos2 24 | networks: 25 | nacos_net: 26 | ipv4_address: 172.16.238.11 27 | volumes: 28 | - ./cluster-logs/nacos2:/home/nacos/logs 29 | ports: 30 | - "8849:8848" 31 | env_file: 32 | - ./env/nacos-ip.env 33 | restart: always 34 | depends_on: 35 | - mysql 36 | 37 | nacos3: 38 | image: nacos/nacos-server:${NACOS_VERSION} 39 | container_name: nacos3 40 | networks: 41 | nacos_net: 42 | ipv4_address: 172.16.238.12 43 | volumes: 44 | - ./cluster-logs/nacos2:/home/nacos/logs 45 | ports: 46 | - "8850:8848" 47 | env_file: 48 | - ./env/nacos-ip.env 49 | restart: always 50 | depends_on: 51 | - mysql 52 | 53 | mysql: 54 | container_name: mysql 55 | image: nacos/nacos-mysql:5.7 56 | networks: 57 | nacos_net: 58 | ipv4_address: 172.16.238.13 59 | env_file: 60 | - ./env/mysql.env 61 | volumes: 62 | - ./mysql:/var/lib/mysql 63 | ports: 64 | - "3306:3306" 65 | 66 | networks: 67 | nacos_net: 68 | driver: bridge 69 | ipam: 70 | driver: default 71 | config: 72 | - subnet: 172.16.238.0/24 73 | -------------------------------------------------------------------------------- /seafile/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '2.0' 2 | 3 | networks: 4 | seafile-net: 5 | 6 | services: 7 | db: 8 | image: mariadb:10.5 9 | container_name: seafile-mysql 10 | environment: 11 | - MYSQL_ROOT_PASSWORD=xxxx # Requested, set the root's password of MySQL service. 12 | - MYSQL_LOG_CONSOLE=true 13 | volumes: 14 | - /opt/seafile-mysql/db:/var/lib/mysql # Requested, specifies the path to MySQL data persistent store. 15 | networks: 16 | - seafile-net 17 | 18 | memcached: 19 | image: memcached:1.5.6 20 | container_name: seafile-memcached 21 | entrypoint: memcached -m 256 22 | networks: 23 | - seafile-net 24 | 25 | seafile: 26 | image: jmujmu/seafile-pi:8.0.3 27 | container_name: seafile 28 | ports: 29 | - "8003:80" 30 | - "8001:443" # If https is enabled, cancel the comment. 31 | volumes: 32 | - ./seafile-data:/shared # Requested, specifies the path to Seafile data persistent store. 33 | - ./docker/ssl:/etc/ssl 34 | environment: 35 | - DB_HOST=db 36 | - DB_ROOT_PASSWD=xxxx # Requested, the value shuold be root's password of MySQL service. 37 | - TIME_ZONE=Asia/Shanghai # Optional, default is UTC. Should be uncomment and set to your local time zone. 38 | - SEAFILE_ADMIN_EMAIL=admin@xxxx.com # Specifies Seafile admin user, default is 'me@example.com'. 39 | - SEAFILE_ADMIN_PASSWORD=xxxx # Specifies Seafile admin password, default is 'asecret'. 40 | - SEAFILE_SERVER_LETSENCRYPT=false # Whether use letsencrypt to generate cert. 41 | - SEAFILE_SERVER_HOSTNAME=file.xxxx.com # Specifies your host name. 42 | depends_on: 43 | - db 44 | - memcached 45 | networks: 46 | - seafile-net -------------------------------------------------------------------------------- /loki/ha-memberlist-config/nginx-loki-gateway.conf: -------------------------------------------------------------------------------- 1 | error_log /dev/stderr; 2 | pid /tmp/nginx.pid; 3 | worker_rlimit_nofile 8192; 4 | 5 | events { 6 | worker_connections 4096; ## Default: 1024 7 | } 8 | 9 | http { 10 | 11 | default_type application/octet-stream; 12 | log_format main '$remote_addr - $remote_user [$time_local] $status ' 13 | '"$request" $body_bytes_sent "$http_referer" ' 14 | '"$http_user_agent" "$http_x_forwarded_for"'; 15 | access_log /dev/stderr main; 16 | sendfile on; 17 | tcp_nopush on; 18 | 19 | upstream distributor { 20 | server loki-1:3100; 21 | server loki-2:3100; 22 | server loki-3:3100; 23 | } 24 | 25 | upstream querier { 26 | server loki-1:3100; 27 | server loki-2:3100; 28 | server loki-3:3100; 29 | } 30 | 31 | upstream query-frontend { 32 | server loki-frontend:3100; 33 | } 34 | 35 | server { 36 | listen 80; 37 | proxy_set_header X-Scope-OrgID docker-ha; 38 | 39 | location = /loki/api/v1/push { 40 | proxy_pass http://distributor$request_uri; 41 | } 42 | 43 | location = /ring { 44 | proxy_pass http://distributor$request_uri; 45 | } 46 | 47 | location = /loki/api/v1/tail { 48 | proxy_pass http://querier$request_uri; 49 | proxy_set_header Upgrade $http_upgrade; 50 | proxy_set_header Connection "upgrade"; 51 | } 52 | 53 | location ~ /loki/api/.* { 54 | proxy_pass http://query-frontend$request_uri; 55 | } 56 | } 57 | 58 | server { 59 | listen 3100; 60 | proxy_set_header X-Scope-OrgID docker-ha; 61 | 62 | location ~ /loki/api/.* { 63 | proxy_pass http://querier$request_uri; 64 | } 65 | 66 | } 67 | } -------------------------------------------------------------------------------- /loki/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | init: 5 | image: grafana/loki:2.4.2 6 | user: root 7 | entrypoint: ["sh", "-c", "chown 10001:10001 /loki; chown 472 /var/lib/grafana"] 8 | volumes: 9 | - ./loki_data:/loki 10 | - ./grafana_data:/var/lib/grafana 11 | 12 | loki: 13 | container_name: loki 14 | image: grafana/loki:2.4.2 15 | ports: 16 | - "3100:3100" 17 | volumes: 18 | - ./loki_data:/loki 19 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 20 | command: -config.file=/etc/loki/local-config.yaml 21 | restart: always 22 | 23 | loki-promtail: 24 | container_name: loki-promtail 25 | image: grafana/promtail:2.4.2 26 | volumes: 27 | - /var/log:/var/log 28 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 29 | command: -config.file=/etc/promtail/config.yml 30 | restart: always 31 | 32 | loki-grafana: 33 | container_name: loki-grafana 34 | image: grafana/grafana:8.3.4 35 | ports: 36 | - "3000:3000" 37 | volumes: 38 | - ./grafana_data:/var/lib/grafana 39 | - /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime 40 | environment: 41 | - TZ=Asia/Shanghai 42 | - LANG=zh_CN.UTF-8 43 | - GF_EXPLORE_ENABLED=true 44 | - GF_PATHS_PROVISIONING=/etc/grafana/provisioning 45 | entrypoint: 46 | - sh 47 | - -euc 48 | - | 49 | mkdir -p /etc/grafana/provisioning/datasources 50 | cat < /etc/grafana/provisioning/datasources/ds.yaml 51 | apiVersion: 1 52 | datasources: 53 | - name: Loki 54 | type: loki 55 | access: proxy 56 | url: http://loki:3100 57 | EOF 58 | /run.sh 59 | restart: always 60 | -------------------------------------------------------------------------------- /healthchecks/env.example: -------------------------------------------------------------------------------- 1 | ALLOWED_HOSTS=localhost 2 | APPRISE_ENABLED=False 3 | DB=postgres 4 | DB_CONN_MAX_AGE=0 5 | DB_HOST=db 6 | DB_NAME=hc 7 | DB_PASSWORD=fixme-postgres-password 8 | DB_PORT=5432 9 | DB_SSLMODE=prefer 10 | DB_TARGET_SESSION_ATTRS=read-write 11 | DB_USER=postgres 12 | DEBUG=False 13 | DEFAULT_FROM_EMAIL=healthchecks@example.org 14 | DISCORD_CLIENT_ID= 15 | DISCORD_CLIENT_SECRET= 16 | EMAIL_HOST= 17 | EMAIL_HOST_PASSWORD= 18 | EMAIL_HOST_USER= 19 | EMAIL_PORT=587 20 | EMAIL_USE_TLS=True 21 | EMAIL_USE_VERIFICATION=True 22 | LINENOTIFY_CLIENT_ID= 23 | LINENOTIFY_CLIENT_SECRET= 24 | MASTER_BADGE_LABEL=Mychecks 25 | MATRIX_ACCESS_TOKEN= 26 | MATRIX_HOMESERVER= 27 | MATRIX_USER_ID= 28 | MATTERMOST_ENABLED=True 29 | MSTEAMS_ENABLED=True 30 | OPSGENIE_ENABLED=True 31 | PAGERTREE_ENABLED=True 32 | PD_APP_ID= 33 | PD_ENABLED=True 34 | PING_BODY_LIMIT=10000 35 | PING_EMAIL_DOMAIN=localhost 36 | PING_ENDPOINT=http://localhost:8000/ping/ 37 | PROMETHEUS_ENABLED=True 38 | PUSHBULLET_CLIENT_ID= 39 | PUSHBULLET_CLIENT_SECRET= 40 | PUSHOVER_API_TOKEN= 41 | PUSHOVER_EMERGENCY_EXPIRATION=86400 42 | PUSHOVER_EMERGENCY_RETRY_DELAY=300 43 | PUSHOVER_SUBSCRIPTION_URL= 44 | REGISTRATION_OPEN=True 45 | REMOTE_USER_HEADER= 46 | RP_ID= 47 | S3_ACCESS_KEY= 48 | S3_BUCKET= 49 | S3_ENDPOINT= 50 | S3_REGION= 51 | S3_SECRET_KEY= 52 | S3_TIMEOUT=60 53 | SECRET_KEY=--- 54 | SHELL_ENABLED=False 55 | SIGNAL_CLI_SOCKET= 56 | SITE_NAME=Mychecks 57 | SITE_ROOT=http://localhost:8000 58 | SLACK_CLIENT_ID= 59 | SLACK_CLIENT_SECRET= 60 | SLACK_ENABLED=True 61 | SPIKE_ENABLED=True 62 | TELEGRAM_BOT_NAME=ExampleBot 63 | TELEGRAM_TOKEN= 64 | TRELLO_APP_KEY= 65 | TWILIO_ACCOUNT= 66 | TWILIO_AUTH= 67 | TWILIO_FROM= 68 | TWILIO_USE_WHATSAPP=False 69 | USE_PAYMENTS=False 70 | VICTOROPS_ENABLED=True 71 | WEBHOOKS_ENABLED=True 72 | ZULIP_ENABLED=True -------------------------------------------------------------------------------- /rabbitmq/docker-compose-metrics.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | networks: 4 | rabbitmq: 5 | 6 | volumes: 7 | rabbitmq-prometheus_prometheus: 8 | rabbitmq-prometheus_grafana: 9 | 10 | services: 11 | grafana: 12 | image: grafana/grafana:6.4.5 13 | ports: 14 | - "3000:3000" 15 | networks: 16 | - "rabbitmq" 17 | volumes: 18 | - rabbitmq-prometheus_grafana:/var/lib/grafana 19 | - ./grafana/dashboards.yml:/etc/grafana/provisioning/dashboards/rabbitmq.yaml 20 | - ./grafana/datasources.yml:/etc/grafana/provisioning/datasources/prometheus.yaml 21 | - ./grafana/dashboards:/dashboards 22 | environment: 23 | GF_INSTALL_PLUGINS: "flant-statusmap-panel,grafana-piechart-panel" 24 | restart: always 25 | prometheus: 26 | image: prom/prometheus:v2.14.0 27 | networks: 28 | - "rabbitmq" 29 | ports: 30 | - "9090:9090" 31 | volumes: 32 | - rabbitmq-prometheus_prometheus:/prometheus 33 | - ./prometheus/etc:/etc/prometheus 34 | restart: always 35 | node-exporter: 36 | command: 37 | - '--path.procfs=/host/proc' 38 | - '--path.rootfs=/rootfs' 39 | - '--path.sysfs=/host/sys' 40 | - '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)' 41 | expose: 42 | - 9100 43 | image: prom/node-exporter:v0.18.1 44 | networks: 45 | - "rabbitmq" 46 | volumes: 47 | - /proc:/host/proc:ro 48 | - /sys:/host/sys:ro 49 | - /:/rootfs:ro 50 | restart: always 51 | cadvisor: 52 | expose: 53 | - 8080 54 | image: google/cadvisor:v0.33.0 55 | networks: 56 | - "rabbitmq" 57 | volumes: 58 | - /:/rootfs:ro 59 | - /var/run:/var/run:rw 60 | - /sys:/sys:ro 61 | - /var/lib/docker/:/var/lib/docker:ro 62 | restart: always 63 | 64 | -------------------------------------------------------------------------------- /gitlab/docker-compose-extDB.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | services: 4 | gitlab: 5 | image: 'gitlab/gitlab-ce:13.11.4-ce.0' 6 | container_name: 'gitlab' 7 | environment: 8 | TZ: 'Asia/Shanghai' 9 | GITLAB_OMNIBUS_CONFIG: | 10 | external_url '192.168.77.130' 11 | gitlab_rails['time_zone'] = 'Asia/Shanghai' 12 | gitlab_rails['gitlab_shell_ssh_port'] = 10022 13 | # db setting 14 | postgresql['enable'] = false 15 | gitlab_rails['db_adapter'] = 'postgresql' 16 | gitlab_rails['db_encoding'] = 'utf8' 17 | gitlab_rails['db_host'] = '192.168.77.130' 18 | gitlab_rails['db_database'] = "gitlab" 19 | gitlab_rails['db_port'] = 5432 20 | gitlab_rails['db_username'] = 'root' 21 | gitlab_rails['db_password'] = 'Ab123456' 22 | # redis 23 | redis['enable'] = false 24 | gitlab_rails['redis_host'] = '192.168.77.130' 25 | gitlab_rails['redis_port'] = 6379 26 | gitlab_rails['redis_ssl'] = false 27 | gitlab_rails['redis_password'] = 'Ab123456' 28 | gitlab_rails['redis_database'] = 0 29 | gitlab_rails['redis_enable_client'] = false 30 | ports: 31 | - '80:80' 32 | - '443:443' 33 | - '10022:22' 34 | volumes: 35 | - ./gitlab_config:/etc/gitlab 36 | - ./gitlab_logs:/var/log/gitlab 37 | - ./gitlab_data:/var/opt/gitlab 38 | - /etc/localtime:/etc/localtime:ro 39 | privileged: true 40 | restart: always 41 | 42 | runner: 43 | image: gitlab/gitlab-runner:v13.11.0 44 | depends_on: 45 | - "gitlab" 46 | volumes: 47 | - ./gitlab_runner:/etc/gitlab-runner 48 | - /var/run/docker.sock:/var/run/docker.sock 49 | - /etc/ssl/certs:/etc/ssl/certs 50 | - /etc/localtime:/etc/localtime:ro 51 | restart: always 52 | -------------------------------------------------------------------------------- /onedev/docker-compose-extDB.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | services: 4 | gitlab: 5 | image: 'gitlab/gitlab-ce:13.11.4-ce.0' 6 | container_name: 'gitlab' 7 | environment: 8 | TZ: 'Asia/Shanghai' 9 | GITLAB_OMNIBUS_CONFIG: | 10 | external_url '192.168.77.130' 11 | gitlab_rails['time_zone'] = 'Asia/Shanghai' 12 | gitlab_rails['gitlab_shell_ssh_port'] = 10022 13 | # db setting 14 | postgresql['enable'] = false 15 | gitlab_rails['db_adapter'] = 'postgresql' 16 | gitlab_rails['db_encoding'] = 'utf8' 17 | gitlab_rails['db_host'] = '192.168.77.130' 18 | gitlab_rails['db_database'] = "gitlab" 19 | gitlab_rails['db_port'] = 5432 20 | gitlab_rails['db_username'] = 'root' 21 | gitlab_rails['db_password'] = 'Ab123456' 22 | # redis 23 | redis['enable'] = false 24 | gitlab_rails['redis_host'] = '192.168.77.130' 25 | gitlab_rails['redis_port'] = 6379 26 | gitlab_rails['redis_ssl'] = false 27 | gitlab_rails['redis_password'] = 'Ab123456' 28 | gitlab_rails['redis_database'] = 0 29 | gitlab_rails['redis_enable_client'] = false 30 | ports: 31 | - '80:80' 32 | - '443:443' 33 | - '10022:22' 34 | volumes: 35 | - ./gitlab_config:/etc/gitlab 36 | - ./gitlab_logs:/var/log/gitlab 37 | - ./gitlab_data:/var/opt/gitlab 38 | - /etc/localtime:/etc/localtime:ro 39 | privileged: true 40 | restart: always 41 | 42 | runner: 43 | image: gitlab/gitlab-runner:v13.11.0 44 | depends_on: 45 | - "gitlab" 46 | volumes: 47 | - ./gitlab_runner:/etc/gitlab-runner 48 | - /var/run/docker.sock:/var/run/docker.sock 49 | - /etc/ssl/certs:/etc/ssl/certs 50 | - /etc/localtime:/etc/localtime:ro 51 | restart: always 52 | -------------------------------------------------------------------------------- /rabbitmq/2.create-queue.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | exec="docker-compose -f docker-compose-cluster.yml exec rmq0" 5 | 6 | 7 | # queue 8 | echo "__create queue________" 9 | $exec rabbitmqadmin declare queue --vhost=/ name=ha1.queue durable=true 10 | $exec rabbitmqadmin declare queue --vhost=/ name=ha2.queue durable=true 11 | $exec rabbitmqadmin declare queue --vhost=/ name=ha3.queue durable=true 12 | $exec rabbitmqadmin declare queue --vhost=/ name=all.queue 13 | $exec rabbitmqadmin declare queue --vhost=/ name=nodes.queue 14 | 15 | 16 | # exchange 17 | echo "__create exchange________" 18 | $exec rabbitmqadmin declare exchange --vhost=/ name=test.exchange type=direct durable=true 19 | 20 | # binding 21 | 22 | $exec rabbitmqadmin declare binding --vhost=/ source=test.exchange destination=ha1.queue routing_key=ha1.queue 23 | $exec rabbitmqadmin declare binding --vhost=/ source=test.exchange destination=ha2.queue routing_key=ha2.queue 24 | $exec rabbitmqadmin declare binding --vhost=/ source=test.exchange destination=ha3.queue routing_key=ha3.queue 25 | 26 | # list 27 | 28 | echo "__queues___________________" 29 | $exec rabbitmqctl list_queues -p / 30 | echo "__exchanges___________________" 31 | $exec rabbitmqctl list_exchanges -p / 32 | echo "__bindings___________________" 33 | $exec rabbitmqctl list_bindings -p / 34 | 35 | # send message 36 | echo "__send message________" 37 | $exec rabbitmqadmin publish routing_key=ha1.queue payload="just for queue" 38 | $exec rabbitmqadmin publish exchange=test.exchange routing_key=ha1.queue payload="hello, world" 39 | 40 | echo "__get queue________" 41 | $exec rabbitmqadmin get queue=ha1.queue 42 | 43 | # consumer 44 | echo "__consumer message_______" 45 | $exec rabbitmqadmin get queue=ha1.queue ackmode=ack_requeue_false 46 | $exec rabbitmqadmin get queue=ha1.queue ackmode=ack_requeue_false 47 | echo "__queue message______________" 48 | $exec rabbitmqadmin get queue=ha1.queue 49 | 50 | --------------------------------------------------------------------------------