├── .DS_Store ├── .editorconfig ├── .gitignore ├── CHANGELOG ├── README.md ├── alist └── docker-compose.yaml ├── apm ├── README.md └── docker-compose.yml ├── aria2 └── docker-compose.yml ├── authentik ├── README.md └── docker-compose.yml ├── caddy ├── Caddyfile ├── README.md ├── config │ └── caddy │ │ └── autosave.json ├── docker-compose.yml └── playkube.yml ├── derper ├── Caddyfile ├── README.md ├── docker-compose-caddy.yml └── docker-compose.yml ├── elasticsearch ├── README.md ├── docker-compose.yml └── kibana │ └── kibana.yml ├── gitlab-runner ├── README.md └── docker-compose.yml ├── grafana ├── README.md └── docker-compose.yml ├── jenkins ├── README.md └── docker-compose.yml ├── kafka ├── README.md └── docker-compose.yml ├── lakehouse └── trino-iceberg │ ├── Makefile │ ├── README.md │ ├── config │ ├── hive │ │ ├── build.gradle │ │ └── hive-site.xml │ └── trino │ │ ├── build.gradle │ │ └── etc │ │ ├── catalog │ │ ├── hives3.properties │ │ └── iceberg.properties │ │ └── config.properties │ ├── docker-compose-hive.yml │ ├── docker-compose-init.yml │ ├── docker-compose-minio.yml │ ├── docker-compose-trino.yml │ └── scripts │ ├── certs.sh │ └── minio-init.sh ├── ldap ├── lldap │ └── docker-compose.yaml └── openldap │ └── docker-compose.yaml ├── logstash ├── README.md ├── config │ └── logstash.yml ├── docker-compose.yml └── pipeline │ └── .gitkeep ├── mariadb ├── README.md └── docker-compose.yml ├── minio ├── README.md └── docker-compose.yml ├── mongodb ├── .gitignore ├── README.md ├── docker-compose-rs.yaml ├── docker-compose.yaml └── scripts │ └── init.js ├── mysql ├── README.md ├── conf │ └── mysql.cnf └── docker-compose.yml ├── nifi ├── README.md └── docker-compose.yml ├── portainer ├── README.md └── docker-compose.yml ├── postgres ├── README.md └── docker-compose.yml ├── prometheus ├── README.md ├── docker-compose.yml └── prometheus.yml ├── proxy ├── .dockerignore ├── README.md └── docker-compose.yaml ├── rabbitmq ├── READMD.md └── docker-compose.yml ├── redis ├── 6 │ ├── README.md │ └── docker-compose.yml └── redis-ha │ ├── README.md │ ├── docker-compose.yml │ └── example-config │ ├── master.conf │ ├── sentinel-1.conf │ ├── sentinel-2.conf │ ├── sentinel-3.conf │ ├── slave1.conf │ └── slave2.conf ├── redisinsight ├── README.md └── docker-compose.yml ├── skywalking ├── README.md └── docker-compose.yml ├── sonarqube └── docker-compose.yml ├── splash ├── README.md └── docker-compose.yml ├── wiki.js └── docker-compose.yml ├── windmall └── docker-compose.yml └── zookeeper ├── README.md └── docker-compose.yml /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whg517/docker-compose/34142cc934712e70ad7d4235114bd4c4a3a9a065/.DS_Store -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | charset = utf-8 5 | end_of_line = lf 6 | insert_final_newline = true 7 | trim_trailing_whitespace = true 8 | 9 | [{*.go,Makefile,.gitmodules,go.mod,go.sum}] 10 | indent_style = tab 11 | 12 | [*.md] 13 | indent_style = tab 14 | trim_trailing_whitespace = false 15 | 16 | [*.{yml,yaml,json}] 17 | indent_style = space 18 | indent_size = 2 19 | 20 | [*.{js,jsx,ts,tsx,css,less,sass,scss,vue,py}] 21 | indent_style = space 22 | indent_size = 4 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | .idea 3 | **/.env 4 | proxy/config.json 5 | 6 | **/local* 7 | 8 | **/.gradle 9 | -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## 2024-01-15 4 | 5 | ### 新增 6 | 7 | - authentik 8 | - derper 9 | - lldap 10 | - windmall 11 | 12 | ### 变更 13 | 14 | - 更新 trino-iceberg 内容 15 | 16 | ## 2023-08-25 17 | 18 | ### 新增 19 | 20 | - alist 21 | - aria2 22 | - caddy 23 | - lakehouse 24 | - trino-iceberg 25 | - mariadb 26 | - minio 27 | - mysql 28 | - postgres 29 | - wiki.js 30 | 31 | ### 变更 32 | 33 | - 将 compose 版本升级到 3.9 34 | - 为 compose 中的服务添加优化参数,并使用 yaml 的锚点引用 35 | - 统一数据挂载目录为 `/data/docker/` ,例如 `/data/docker/aria2` 。 36 | 37 | ### 其他 38 | 39 | - 更新 README.md ,增加对一些服务的介绍 40 | - 增加 CHANGELOG.md ,记录变更 41 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # docker-compose 2 | 3 | > 最近发现有不少人关注到了我的这个仓库。非常荣幸我的过往经验形成的知识能给大家提供帮助。 4 | > 为了方便大家对仓库变更有具体的了解,我增加了一个 [ChangeLog](./ChangeLog.md) 文件,记录了每次变更的内容。 5 | > 如果你有任何问题,欢迎提 ISSUE 。 6 | 7 | 本项目为个人记录常用 docker-compose 文件的地方,方便统一管理和同步使用。 8 | 9 | 将常用的 docker-compose 放在 Github 中管理,可以在使用的很方便的找到。如果是在常用的开发环境,只需要将项目克隆到工作目录, 10 | 在后续使用时,如果有调整或者增加的服务,都能很方便的同步。如果不是常用环境,只需要在浏览器中打开本项目,将需要的配置文件 11 | 复制下来即可。 12 | 13 | ## 环境准备 14 | 15 | 首先需要一个容器环境,可以是 [Docker](https://docs.docker.com/get-started/) 也可以是 [containerd](https://containerd.io/) 。 16 | 鉴于 Docker 这几年政策的调整,和容器化社区的发展,如果你要在一个新的环境中使用容器,建议你使用 [containerd](https://containerd.io/) 。 17 | 18 | 安装环境不是本项目的重点,所以仅列出需要的技术和相关引用连接。 19 | 20 | ### containerd 21 | 22 | 推荐使用 [containerd](https://containerd.io/) 作为后端容器工具,即使未来再安装 kubernetes 时,也可以轻松应对。 23 | 24 | 参考 [Getting started](https://containerd.io/docs/getting-started/) 安装 containerd 。 25 | 26 | 参考 [containerd/nerdctl](https://github.com/containerd/nerdctl) 安装为 containerd 适配的 Docker 命令。让你像使用 Docker 和 docker-compose 一样 27 | 使用 containerd 。 28 | 29 | ### Docker 30 | 31 | 请参照 [Get Docker](https://docs.docker.com/get-docker/) 中安装最新的 Docker 环境。 32 | 33 | 然后参照 [Install Docker Compose](https://docs.docker.com/compose/install/) 安装最新版本的 docker-compose 。 34 | 35 | ## 初始化网络 36 | 37 | 推荐先对容器环境做子网规划,并预先初始化外部子网。在启动服务时,将容器关联到子网。 38 | 39 | 在服务间容器通信时,不依赖宿主机地址,通过内部主机名(容器名)即可通信。这样也避免了 40 | 一些不需要暴露出来的端口。例如对于一个 Web 服务,有三个容器,分别是 `app` 、 41 | `restapi` 和 `pgsql` 。其中 `app` 容器对外暴露 `8080` 端口,可以将该端口映射到宿主机,然后 42 | 通过宿主机的 `9090` 访问前端页面。对于 `app` 访问后端 `restapi` 容器,只需要通过内部主机名和 43 | 端口就可以了。而后端容器 `restapi` 在访问 `pgsql` 时,也是以同样的方式。外部其实无法访问到 44 | 服务的后端容器,甚至不知道后端数据库的内容。既减少了端口占用,又能保证后端和数据库服务的 45 | 安全性。 46 | 47 | 我的做法是除了默认的几个网络之外,会在创建三个网络: 48 | 49 | - app 子网:应用相关容器所在子网。 50 | - db 子网:存储数据相关容器所在子网 51 | - other 子网:不符合上面的两种情况下,放在该子网。 52 | 53 | 所有 docker-compose 都不应该依赖具体主机 IP ,例如 `192.168.22.102` 。 54 | 55 | ## 使用 56 | 57 | 克隆项目 58 | 59 | ```bash 60 | https://github.com/whg517/docker-compose.git 61 | ``` 62 | 63 | 然后切换到你想要启动的服务的目录,执行 `nerdctl compose up -d` 或者 `docker-compose up -d` 。 64 | 65 | 如果有自定义需求,更改文件即可。 66 | 67 | ## 已有服务 68 | 69 | - [alist](https://alist.nn.ci/):一个可以连接多种后端存储用来做网盘的开源项目 70 | - [derper](https://github.com/fredliang44/derper-docker) 启动自己的 tailscale 71 | - Elastic APM 72 | - [aria2](https://aria2.github.io/):一个突破单线程下载的下载工具,通过油猴脚本可以突破百度云限速 73 | - [caddy](https://caddyserver.com/docs/):一个使用 go 开发的代理服务器,本地运行可以将某个目录作为文件服务器在内网共享 74 | - ElasticSearch + Kibana 75 | - gitlab-runner 76 | - grafana 77 | - jenkins 78 | - kafka 79 | - lakehouse 80 | - trino-iceberg:基于docker实现的 pg+minio+trino+iceberg 的湖仓方案,详细文档已完成 81 | - ldap 82 | - [lldap](https://github.com/lldap/lldap) 一个轻量 ldap 服务,并且自带 UI 管理界面 83 | - logstach 84 | - mariadb 85 | - minio 86 | - mongodb 87 | - mysql 88 | - nifi 89 | - postgres 90 | - portainer 91 | - prometheus 92 | - proxy:一个使用 v2fly 客户端服务,为本地提供代理 93 | - rabbitmq 94 | - redis 95 | - redis 单节点 96 | - redis HA 97 | - redisinsight 98 | - skywakling 99 | - sonarqube 100 | - splash 101 | - [wiki.js](https://js.wiki/):一个轻量的文档管理系统,可以用来做个人 wiki 102 | - windmall 103 | 104 | ## 注意 105 | 106 | 如果你打算直接 Fork 该仓库然后直接使用。建议在使用自己的配置或者本地配置的时候,将文件名命名为 `local` 前缀,这样 Git 会忽略该文件。 107 | 108 | 不按建议提交任何有关安全(如用户名,密码等)的内容到 Git 上。如果确实需要传入,可以在 docker-compose 文件中读取目录下的 `.env` 文件。 109 | 该文件是不会被 Git 记录的。 110 | -------------------------------------------------------------------------------- /alist/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | alist: 20 | << : *default-config 21 | container_name: alist 22 | hostname: alist 23 | volumes: 24 | - '/data/docker/alist:/opt/alist/data' 25 | ports: 26 | - '5244:5244' 27 | environment: 28 | - PUID=0 29 | - PGID=0 30 | - UMASK=022 31 | 32 | image: 'xhofe/alist:latest' 33 | 34 | networks: 35 | default: 36 | external: 37 | name: app 38 | -------------------------------------------------------------------------------- /apm/README.md: -------------------------------------------------------------------------------- 1 | # APM 2 | 3 | [APM](https://www.elastic.co/guide/en/apm/get-started/7.12/index.html) 是 Elastic 下的一个应用程序监控解决方案。 4 | 5 | ## 部署相关文档 6 | 7 | [APM-server 手动安装](https://www.elastic.co/guide/en/apm/server/7.12/installing.html) 8 | [APM-server Docker 部署](https://www.elastic.co/guide/en/apm/server/7.12/running-on-docker.html) 9 | [APM-server Docker 镜像](https://hub.docker.com/_/apm-server) 10 | [APM-server Dockerfile](https://github.com/elastic/apm-server/blob/master/Dockerfile) 11 | [APM-server 配置文档](https://www.elastic.co/guide/en/apm/server/7.12/configuration-process.html) 12 | 13 | ## 配置 14 | 15 | [Configure APM Server on Docker](https://www.elastic.co/guide/en/apm/server/7.12/running-on-docker.html#_configure_apm_server_on_docker) 16 | 17 | 在配置 APM 时,支持多种方式。 18 | 19 | ### 通过命令传递配置 20 | 21 | APM 启动命令支持使用 `-E` 传递配置: 22 | 23 | ```yml 24 | version: "3.7" 25 | services: 26 | apm: 27 | container_name: apm7 28 | image: docker.elastic.co/apm/apm-server:7.14.2 29 | # volumes: 30 | # - ./apm-server.docker.yml:/usr/share/apm-server/apm-server.yml:ro 31 | environment: 32 | output.elasticsearch.hosts: es7:9200 33 | # Default command: -e -d 34 | # -d show log to stderr 35 | # use -E pass overwrite configuration. 36 | command: -e -E output.elasticsearch.hosts=["es7:9200"] -E apm-server.kibana.enabled=true -E apm-server.kibana.host=http://kibana7:5601 -E apm-server.rum.enable=true 37 | ports: 38 | - 8200:8200 39 | logging: 40 | options: 41 | max-size: "100M" 42 | max-file: "1" 43 | ulimits: 44 | memlock: 45 | soft: -1 46 | hard: -1 47 | 48 | # es7 and kibana7 in db network. 49 | networks: 50 | default: 51 | external: true 52 | name: db 53 | 54 | ``` 55 | 56 | ### 使用配置文件覆盖默认配置 57 | 58 | 通过将 APM 的配置挂在到镜像中将配置传递到 `/usr/share/apm-server/apm-server.yml` 中,以覆盖默认配置。 59 | 60 | ```yaml 61 | apm-server: 62 | host: "0.0.0.0:8200" 63 | rum: 64 | enabled: true 65 | kibana: 66 | enabled: true 67 | host: http://kibana7:5601 68 | 69 | output: 70 | elasticsearch: 71 | hosts: es7:9200 72 | 73 | queue.mem.events: 4096 74 | 75 | max_procs: 4 76 | 77 | ``` 78 | 79 | ## 使用 80 | 81 | 部署并正确连接到 ES 中后,可以在 Kibana 的左侧导航栏中的 `Observability` 二级导航下找到 `APM` ,点击 `设置说明` , 82 | 在 `APM Server 状态` 后面有个 `检查 APM Server 状态` 的按钮,点击后可以检查 APM-server 状态。如果一切正常的话, 83 | 会显示绿色的提示 `您已正确设置 APM Server` 。此时 APM-server 就已经配置完成了。 84 | 85 | 后续就可以继续跟着 [APM-server 快速开始](https://www.elastic.co/guide/en/apm/get-started/current/install-and-run.html)的文档 86 | 进行后续的工作。 87 | -------------------------------------------------------------------------------- /apm/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | apm: 20 | image: docker.elastic.co/apm/apm-server:7.14.2 21 | container_name: apm7 22 | hostname: apm7 23 | << : *default-config 24 | # volumes: 25 | # - ./apm-server.docker.yml:/usr/share/apm-server/apm-server.yml:ro 26 | environment: 27 | output.elasticsearch.hosts: es7:9200 28 | # Default command: -e -d 29 | # -d show log to stderr 30 | # use -E pass overwrite configuration. 31 | command: -e -E output.elasticsearch.hosts=["es7:9200"] -E apm-server.kibana.enabled=true -E apm-server.kibana.host=http://kibana7:5601 -E apm-server.rum.enable=true 32 | ports: 33 | - 8200:8200 34 | networks: 35 | - app 36 | - db 37 | 38 | # es7 and kibana7 in db network. 39 | networks: 40 | db: 41 | external: true 42 | name: db 43 | app: 44 | external: true 45 | name: app 46 | -------------------------------------------------------------------------------- /aria2/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | Aria2-Pro: 20 | container_name: aria2-server 21 | image: p3terx/aria2-pro 22 | << : *default-config 23 | environment: 24 | - PUID=65534 25 | - PGID=65534 26 | - UMASK_SET=022 27 | - RPC_SECRET=foo 28 | - RPC_PORT=6800 29 | - LISTEN_PORT=6888 30 | - DISK_CACHE=64M 31 | - IPV6_MODE=false 32 | - UPDATE_TRACKERS=true 33 | - CUSTOM_TRACKER_URL= 34 | - TZ=Asia/Shanghai 35 | volumes: 36 | # - ${PWD}/aria2-config:/config 37 | - ~/Downloads/aria2:/downloads 38 | # If you use host network mode, then no port mapping is required. 39 | # This is the easiest way to use IPv6 networks. 40 | # network_mode: host 41 | # network_mode: bridge 42 | ports: 43 | - 6800:6800 44 | - 6888:6888 45 | - 6888:6888/udp 46 | 47 | # AriaNg is just a static web page, usually you only need to deploy on a single host. 48 | AriaNg: 49 | container_name: ariang 50 | image: p3terx/ariang:test 51 | command: --port 6880 --ipv6 52 | << : *default-config 53 | # network_mode: host 54 | # network_mode: bridge 55 | ports: 56 | - 6880:6880 57 | 58 | 59 | networks: 60 | default: 61 | external: 62 | name: app 63 | 64 | 65 | -------------------------------------------------------------------------------- /authentik/README.md: -------------------------------------------------------------------------------- 1 | # Authentik 2 | 3 | ref: 4 | -------------------------------------------------------------------------------- /authentik/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | x-base: &default-config 5 | restart: unless-stopped 6 | ulimits: 7 | nproc: 65535 8 | nofile: 9 | soft: 20000 10 | hard: 40000 11 | stop_grace_period: 1m 12 | logging: 13 | driver: json-file 14 | options: 15 | max-size: '100m' 16 | max-file: '1' 17 | mem_swappiness: 0 18 | 19 | services: 20 | postgresql: 21 | image: docker.io/library/postgres:15-alpine 22 | << : *default-config 23 | healthcheck: 24 | test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"] 25 | start_period: 20s 26 | interval: 30s 27 | retries: 5 28 | timeout: 5s 29 | volumes: 30 | - database:/var/lib/postgresql/data 31 | environment: 32 | POSTGRES_PASSWORD: ${PG_PASS:?database password required} 33 | POSTGRES_USER: ${PG_USER:-authentik} 34 | POSTGRES_DB: ${PG_DB:-authentik} 35 | env_file: 36 | - .env 37 | 38 | redis: 39 | image: docker.io/library/redis:alpine 40 | command: --save 60 1 --loglevel warning 41 | << : *default-config 42 | healthcheck: 43 | test: ["CMD-SHELL", "redis-cli ping | grep PONG"] 44 | start_period: 20s 45 | interval: 30s 46 | retries: 5 47 | timeout: 3s 48 | volumes: 49 | - redis:/data 50 | 51 | server: 52 | image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.10.4} 53 | << : *default-config 54 | command: server 55 | environment: 56 | AUTHENTIK_REDIS__HOST: redis 57 | AUTHENTIK_POSTGRESQL__HOST: postgresql 58 | AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik} 59 | AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik} 60 | AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS} 61 | AUTHENTIK_SECRET_KEY: ${SECRET_KEY:-authentik} 62 | volumes: 63 | - authentik:/media 64 | - authentik:/templates 65 | env_file: 66 | - .env 67 | ports: 68 | - "${COMPOSE_PORT_HTTP:-9000}:9000" 69 | - "${COMPOSE_PORT_HTTPS:-9443}:9443" 70 | depends_on: 71 | - postgresql 72 | - redis 73 | 74 | worker: 75 | image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.10.4} 76 | << : *default-config 77 | command: worker 78 | environment: 79 | AUTHENTIK_REDIS__HOST: redis 80 | AUTHENTIK_POSTGRESQL__HOST: postgresql 81 | AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik} 82 | AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik} 83 | AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS} 84 | AUTHENTIK_SECRET_KEY: ${SECRET_KEY:-authentik} 85 | # `user: root` and the docker socket volume are optional. 86 | # See more for the docker socket integration here: 87 | # https://goauthentik.io/docs/outposts/integrations/docker 88 | # Removing `user: root` also prevents the worker from fixing the permissions 89 | # on the mounted folders, so when removing this make sure the folders have the correct UID/GID 90 | # (1000:1000 by default) 91 | user: root 92 | volumes: 93 | - /var/run/docker.sock:/var/run/docker.sock 94 | - authentik:/media 95 | - authentik:/certs 96 | - authentik:/templates 97 | env_file: 98 | - .env 99 | depends_on: 100 | - postgresql 101 | - redis 102 | 103 | volumes: 104 | database: 105 | redis: 106 | authentik: 107 | -------------------------------------------------------------------------------- /caddy/Caddyfile: -------------------------------------------------------------------------------- 1 | :80 { 2 | # Set this path to your site's directory. 3 | root * /data 4 | file_server browse 5 | } 6 | -------------------------------------------------------------------------------- /caddy/README.md: -------------------------------------------------------------------------------- 1 | # Caddy 2 | 3 | ref: 4 | 5 | - [Caddy document](https://caddyserver.com/docs/) 6 | - [Caddy image](https://hub.docker.com/_/caddy) 7 | 8 | 提供 docker-compose 服务和 podman kube 服务。 9 | 10 | ## 使用 11 | 12 | ### 增加配置文件 13 | 14 | 创建 `Caddyfile` 文件,文件内容如下: 15 | 16 | ```text 17 | :80 { 18 | root * /var/wwwroot/ 19 | file_server 20 | } 21 | ``` 22 | 23 | 该文件表示启动一个 `80` 端口的 Web 服务,加载 `/var/wwwroot` 目录作为网站静态文件目录,默认会加载 `index.html` 。 24 | 25 | 后面操作将会挂在此文件到容器中,用来启动 caddy 服务。请将该文件放在你想放置目录。 26 | 27 | ### docker-compose 28 | 29 | ```bash 30 | docker-compose up -d 31 | ``` 32 | 33 | ### podman 34 | 35 | 启动: 36 | 37 | ```bash 38 | podman play kube playkube.yml 39 | ``` 40 | 41 | 销毁: 42 | 43 | ```bash 44 | podman play kube --down playkube.yml 45 | ``` 46 | -------------------------------------------------------------------------------- /caddy/config/caddy/autosave.json: -------------------------------------------------------------------------------- 1 | {"apps":{"http":{"servers":{"srv0":{"listen":[":80"],"routes":[{"handle":[{"handler":"vars","root":"/data"},{"browse":{},"handler":"file_server","hide":["/etc/caddy/Caddyfile"]}]}]}}}}} -------------------------------------------------------------------------------- /caddy/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | server: 20 | image: caddy:2 21 | << : *default-config 22 | ports: 23 | - 9090:80 24 | volumes: 25 | - ${PWD}/Caddyfile:/etc/caddy/Caddyfile 26 | #- /opt/data/docker:/data 27 | - /data/docker/caddy:/data 28 | - ${PWD}/config:/config 29 | 30 | networks: 31 | default: 32 | external: true 33 | name: app 34 | -------------------------------------------------------------------------------- /caddy/playkube.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: caddy 5 | spec: 6 | containers: 7 | - image: caddy 8 | name: caddy 9 | ports: 10 | - containerPort: 80 11 | hostPort: 8080 12 | protocol: TCP 13 | volumeMounts: 14 | - mountPath: /etc/caddy/Caddyfile 15 | name: caddyfile 16 | - mountPath: /data 17 | name: data 18 | 19 | volumes: 20 | - name: caddyfile 21 | hostPath: 22 | # 宿主上目录位置 23 | path: /root/workspace/caddy/Caddyfile 24 | # 此字段为可选 25 | type: FileOrCreate 26 | 27 | - name: data 28 | hostPath: 29 | # 宿主上目录位置 30 | path: /data/podman/caddy 31 | # 此字段为可选 32 | type: DirectoryOrCreate 33 | -------------------------------------------------------------------------------- /derper/Caddyfile: -------------------------------------------------------------------------------- 1 | { 2 | log { 3 | output stdout 4 | format console 5 | } 6 | } 7 | 8 | {$DERP_DOMAIN} { 9 | log { 10 | output stdout 11 | format console 12 | } 13 | @notWellKnown { 14 | not path /.well-known/* 15 | } 16 | handle @notWellKnown { 17 | reverse_proxy http://derper { 18 | header_up Host {http.reverse_proxy.upstream.hostport} 19 | header_up X-Real-IP {http.request.remote.host} 20 | header_up X-Forwarded-For {http.request.remote.host} 21 | header_up X-Forwarded-Proto {http.request.scheme} 22 | header_up Referer {http.request.uri} 23 | } 24 | } 25 | handle_path /.well-known/* { 26 | root * /data/wwwroot/{$DERP_DOMAIN}/ 27 | file_server 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /derper/README.md: -------------------------------------------------------------------------------- 1 | # tailscale derper 2 | 3 | 自建 tailscale derper 4 | 5 | ref: 6 | 7 | - 8 | - 9 | 10 | 环境说明: 11 | 12 | - isp: aliyun 13 | - os: rockylinux 9.2 14 | 15 | ## feature 16 | 17 | - [x] 支持 TLS ,使用 Caddy 自动申请证书 18 | - [x] 使用 Caddy 代理 derper ,可以和现有的 Caddy 配置共存,当然也可以使用 nginx 代理 19 | - [x] 使用 docker-compose 部署 20 | 21 | ## usage 22 | 23 | ### 环境准备 24 | 25 | **域名配置:** 26 | 27 | 如果需要使用域名,请将域名解析到当前主机。 28 | 29 | **iptables配置:** 30 | 31 | 使用 iptables 管理防火墙策略。虽然 rockylinux 在 9.0 弃用了 iptables ,但是 tailscale 依然使用 iptables 管理网络,为了 32 | 解决 tailscale 在 [CGNAT 上的问题](https://github.com/tailscale/tailscale/issues/3104),需要使用 iptables 额外配置部分防火墙规则。 33 | 34 | 在 rockylinux 中启用 iptables 请参考 [Enabling iptables Firewall](https://docs.rockylinux.org/pt/guides/security/enabling_iptables_firewall/) 35 | 36 | ```bash 37 | systemctl stop firewalld 38 | systemctl disable firewalld 39 | dnf install iptables-services iptables-utils 40 | systemctl enable --now iptables 41 | ``` 42 | 43 | **docker安装:** 44 | 45 | 你需要在本地安装好 docker 环境。docker ce 可以根据 [Install Docker Engine](https://docs.docker.com/engine/install/) 操作。 46 | 47 | 在 rockylinux 中安装 docker 48 | 49 | ```bash 50 | sudo dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 51 | sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-compose-plugin 52 | sudo systemctl --now enable docker 53 | ``` 54 | 55 | ### 安装并登录 tailscale 56 | 57 | 官网安装方式: 58 | 59 | 60 | 61 | ```bash 62 | # Add the tailscale repository 63 | sudo dnf -y config-manager --add-repo https://pkgs.tailscale.com/stable/fedora/tailscale.repo 64 | # Install Tailscale 65 | sudo dnf -y install tailscale 66 | # Enable and start tailscaled 67 | sudo systemctl enable --now tailscaled 68 | # Start Tailscale! 69 | sudo tailscale up 70 | ``` 71 | 72 | 由于国内网络环境,使用 dnf 下载不动,需要手动下载: 73 | 74 | ```bash 75 | # 在本地开启代理,然后使用 wget 下载 76 | 77 | latest_release=$(curl -s "https://api.github.com/repos/tailscale/tailscale/releases/latest" | grep -oP '"tag_name": "\K(.*)(?=")') 78 | wget https://pkgs.tailscale.com/stable/fedora/x86_64/tailscale_${latest_release#v}_x86_64.rpm /tmp/ 79 | 80 | sudo systemctl enable --now tailscaled 81 | 82 | sudo tailscale up 83 | ``` 84 | 85 | 安装并登录 tailscale ,然后在开启 derp 的时候验证客户端,避免自己的 derp 服务被别人使用。详见 [限制加密流量的路由位置 86 | ](https://tailscale.com/kb/1118/custom-derp-servers/#to-restrict-where-encrypted-traffic-is-routed) 87 | 88 | ### 创建工作目录 89 | 90 | ```bash 91 | # 创建工作目录 92 | mkdir -p /opt/deploy/derper 93 | # 创建 docker 数据目录 94 | mkdir -p /data/docker 95 | 96 | mkdir -p /data/docker/derper/config 97 | 98 | # 创建 caddy 数据目录 99 | mkdir -p /data/docker/caddy/config 100 | mkdir /data/docker/caddy/data 101 | 102 | # 创建网站目录 103 | mkdir /data/wwwroot 104 | ``` 105 | 106 | ### 配置文件 107 | 108 | 创建 compose 文件 `/opt/deploy/derper/docker-compose.yml` 109 | 110 | ```yaml 111 | version: "3.9" 112 | 113 | x-base: &default-config 114 | restart: unless-stopped 115 | ulimits: 116 | nproc: 65535 117 | nofile: 118 | soft: 20000 119 | hard: 40000 120 | stop_grace_period: 1m 121 | logging: 122 | driver: json-file 123 | options: 124 | max-size: '100m' 125 | max-file: '1' 126 | mem_swappiness: 0 127 | 128 | services: 129 | derper: 130 | <<: *default-config 131 | image: fredliang/derper 132 | container_name: derper 133 | ports: 134 | - '3478:3478/udp' 135 | environment: 136 | DERP_ADDR: ":80" 137 | DERP_VERIFY_CLIENTS: true 138 | volumes: 139 | - /var/run/tailscale/tailscaled.sock:/var/run/tailscale/tailscaled.sock 140 | 141 | server: 142 | image: caddy:2 143 | << : *default-config 144 | env_file: 145 | - .env 146 | ports: 147 | - 80:80 148 | - 443:443 149 | volumes: 150 | - ${PWD}/Caddyfile:/etc/caddy/Caddyfile 151 | - /data/docker/caddy:/data/caddy 152 | - /data/wwwroot/:/wwwroot 153 | 154 | ``` 155 | 156 | 说明: 157 | 158 | - 配置 derp 服务使用 `:80` ,启动 HTTP 服务,而不是 HTTPS 。 159 | - 使用 Caddy 代理 derp ,并提供域名,Caddy 会自动申请证书。 160 | - derp 默认启用 stun ,所以需要暴露 `3478/udp` 端口。将 derp 容器的 3478/udp 端口映射到主机,供 tailscale 通信使用。 161 | - 为了防止 derp 被别人使用,需要开启客户端验证,使用环境变量 `DERP_VERIFY_CLIENTS` 开启。同时需要在当前主机登录 tailscale ,并挂载 `tailscaled.sock` 。 162 | 163 | #### 创建 caddy 配置 164 | 165 | 创建 Cadyfile 文件 `/opt/deploy/derper/Caddyfile` 166 | 167 | ```txt 168 | { 169 | log { 170 | output stdout 171 | format console 172 | } 173 | } 174 | 175 | {$DERP_DOMAIN} { 176 | log { 177 | output stdout 178 | format console 179 | } 180 | route /.well-known/* { 181 | root * /data/wwwroot/{$DERP_DOMAIN}/ 182 | file_server 183 | } 184 | reverse_proxy / derper { 185 | header_up Host {host} 186 | header_up X-Real-IP {remote} 187 | header_up X-Forwarded-For {remote} 188 | header_up X-Forwarded-Proto {scheme} 189 | } 190 | } 191 | ``` 192 | 193 | 说明: 194 | 195 | caddy 配置文件中使用 `{$DERP_DOMAIN}` 从环境变量中读取域名,所以需要在 `.env` 文件中配置 `DERP_DOMAIN` 变量。 196 | 197 | #### 创建配置文件 198 | 199 | 创建 `/opt/deploy/derper/.env` 文件 200 | 201 | ```ini 202 | DERP_DOMAIN=derp.example.com 203 | ``` 204 | 205 | 将 `DERP_DOMAIN` 换成你自己的域名。如果没有域名,可以使用当前公网IP。 206 | 207 | #### 不使用 caddy 反向代理 (第二种操作) 208 | 209 | ```yaml 210 | version: "3.9" 211 | 212 | x-base: &default-config 213 | restart: unless-stopped 214 | ulimits: 215 | nproc: 65535 216 | nofile: 217 | soft: 20000 218 | hard: 40000 219 | stop_grace_period: 1m 220 | logging: 221 | driver: json-file 222 | options: 223 | max-size: '100m' 224 | max-file: '1' 225 | mem_swappiness: 0 226 | 227 | services: 228 | derper: 229 | <<: *default-config 230 | image: fredliang/derper 231 | container_name: derper 232 | env_file: 233 | - .env 234 | ports: 235 | - 443:443 236 | - '3478:3478/udp 237 | # environment: 238 | # DERP_ADDR: ":80" 239 | # DERP_VERIFY_CLIENTS: true 240 | volumes: 241 | - /var/run/tailscale/tailscaled.sock:/var/run/tailscale/tailscaled.sock 242 | ``` 243 | 244 | ### 启动 245 | 246 | ```bash 247 | docker compose up -d 248 | ``` 249 | 250 | ## FQA 251 | 252 | ### 阿里云网络问题 253 | 254 | ref: 255 | 256 | - [Tailscale出口节点无网络问题的调试与分析](https://nyan.im/p/troubleshoot-tailscale) 257 | - [Tailscale 与阿里云八字不合的解决方法(下):寻根](https://zhuanlan.zhihu.com/p/653295049) 258 | - [FR: do not add ipfilter rule to drop 100.64.0.0/10 when ipv4 is disabled](https://github.com/tailscale/tailscale/issues/3837) 259 | - [FR: netfilter CGNAT mode when non-Tailscale CGNAT addresses should be allowed](https://github.com/tailscale/tailscale/issues/3104) 260 | -------------------------------------------------------------------------------- /derper/docker-compose-caddy.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | server: 20 | image: caddy:2 21 | << : *default-config 22 | env_file: 23 | - .env 24 | ports: 25 | - 80:80 26 | - 443:443 27 | volumes: 28 | - ${PWD}/Caddyfile:/etc/caddy/Caddyfile 29 | - /data/docker/caddy:/data 30 | - /data/wwwroot/:/wwwroot 31 | 32 | networks: 33 | default: 34 | external: true 35 | name: app 36 | -------------------------------------------------------------------------------- /derper/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | derper: 20 | <<: *default-config 21 | image: fredliang/derper 22 | container_name: derper 23 | ports: 24 | - '3478:3478/udp' 25 | environment: 26 | DERP_ADDR: ":80" 27 | DERP_VERIFY_CLIENTS: true 28 | volumes: 29 | - /var/run/tailscale/tailscaled.sock:/var/run/tailscale/tailscaled.sock 30 | 31 | networks: 32 | default: 33 | external: true 34 | name: app 35 | -------------------------------------------------------------------------------- /elasticsearch/README.md: -------------------------------------------------------------------------------- 1 | # Elasticsearch 和 Kibana 2 | 3 | ## 1. Elasticsearch 4 | 5 | Docker 部署相关引用: 6 | 7 | - [Docker Official Images](https://hub.docker.com/_/elasticsearch) 8 | - [Install Elasticsearch with Docker](https://www.elastic.co/guide/en/elasticsearch/reference/7.12/docker.html) 9 | 10 | ## 2. Kibana 11 | 12 | Docker 部署相关引用: 13 | 14 | - [Docker Official Images](https://hub.docker.com/_/kibana) 15 | - [Install Kibana with Docker](https://www.elastic.co/guide/en/kibana/current/docker.html) 16 | 17 | Kibana 支持国际化,可以通过修改容器中的 `/usr/share/kibana/config/kibana.yml` 配置文件的 `i18n.locale: "zh-CN"` 指定。 18 | 当然也可以挂在配置文件,然后调整。 19 | 20 | 注意:通过环境变量传入的配置会覆盖配置文件中的配置,但不会动态修改配置文件中的内容。所以如果你通过配置文件传入了 `ELASTICSEARCH_HOSTS` , 21 | 配置文件中的 `elasticsearch.hosts` 值是不会生效的。 22 | 23 | 参考文档: [i18n settings in Kibana](https://www.elastic.co/guide/en/kibana/current/i18n-settings-kb.html) 24 | 25 | ### 2.1 配置项 26 | 27 | - 国际化: `i18n.locale: "zh-CN"` 28 | 29 | ## 3. 注意 30 | 31 | [docker-compose](./docker-compose.yml) 文件中使用了外部网络,并且没有挂载数据目录。 32 | -------------------------------------------------------------------------------- /elasticsearch/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | es7: 20 | # https://www.elastic.co/guide/en/elasticsearch/reference/7.12/docker.html 21 | image: docker.elastic.co/elasticsearch/elasticsearch:7.17.7 22 | container_name: es7 23 | environment: 24 | discovery.type: single-node # https://www.elastic.co/guide/en/elasticsearch/reference/7.5/bootstrap-checks.html#single-node-discovery 25 | bootstrap.memory_lock: "true" 26 | ES_JAVA_OPTS: -Xms512m -Xmx512m 27 | transport.host: 127.0.0.1 # https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html#dev-vs-prod-mode 28 | xpack.security.enabled: "false" 29 | volumes: 30 | - /data/docker/es7:/usr/share/elasticsearch/data 31 | ports: 32 | - 9200:9200 33 | networks: 34 | - app 35 | - db 36 | 37 | # https://www.elastic.co/guide/en/kibana/current/docker.html 38 | kibana7: 39 | image: docker.elastic.co/kibana/kibana:7.17.7 40 | container_name: kibana7 41 | environment: 42 | ELASTICSEARCH_HOSTS: http://es7:9200 43 | i18n.locale: zh-CN 44 | xpack.security.enabled: "false" 45 | ports: 46 | - 5601:5601 47 | networks: 48 | - app 49 | - db 50 | 51 | networks: 52 | db: 53 | external: true 54 | name: db 55 | app: 56 | external: true 57 | name: app 58 | 59 | -------------------------------------------------------------------------------- /elasticsearch/kibana/kibana.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ** THIS IS AN AUTO-GENERATED FILE ** 3 | # 4 | 5 | # Default Kibana configuration for docker target 6 | server.name: kibana 7 | server.host: "0" 8 | elasticsearch.hosts: 9 | - http://es:9200 10 | 11 | # https://www.elastic.co/guide/en/kibana/current/i18n-settings-kb.html 12 | i18n.locale: zh-CN 13 | -------------------------------------------------------------------------------- /gitlab-runner/README.md: -------------------------------------------------------------------------------- 1 | # gitlab-runner docker-compose.yml 2 | 3 | :ref [Run GitLab Runner in a container](https://docs.gitlab.com/runner/install/docker.html) 4 | 5 | 为项目配置 gitlab runner。 6 | 7 | ## 使用说明 8 | 9 | ### 1. 启动 runner 10 | 11 | ```bash 12 | docker-compose up -d 13 | ``` 14 | 15 | ### 2. 配置 runner 16 | 17 | 先进入容器内部 18 | 19 | ```bash 20 | docker exec -it gitlab-runner /bin/bash 21 | ``` 22 | 23 | 在 runner 容器中运行 `gitlab-runner register` 命令注册。需要修改的参数有 24 | 25 | - `--url` 为你的 gitlab 的地址 26 | - `--registration-token` 为项目 runner 的 token 27 | 28 | ```bash 29 | gitlab-runner register \ 30 | --non-interactive \ 31 | --url "https://gitlab.com/" \ 32 | --registration-token "PROJECT_REGISTRATION_TOKEN" \ 33 | --executor "docker" \ 34 | --docker-image "alpine:latest" \ 35 | --description "docker-runner" \ 36 | --run-untagged="true" \ 37 | --locked="false" \ 38 | --access-level="not_protected" 39 | ``` 40 | 41 | 一个 Runner 可以注册多个项目,如果某个项目不需要了,可以直接在项目上移除 runner 然后把对应的配置从 `/etc/gitlab-runner/config.toml` 42 | 中移除,然后使用 `gitlab-runner restart` 重启就可以了。或者直接将注册的 runner 设置为共享,其他项目可以直接启动,而不需要再次注册。 43 | 44 | 跟多使用参考后面说明,或者查看相关文档。 45 | 46 | ## gitlab-runner 配置 47 | 48 | 详细文档参考 [Registering Runners](https://docs.gitlab.com/runner/register/) 49 | 50 | ### 进入容器 51 | 52 | ```bash 53 | docker exec -it gitlab-runner /bin/bash 54 | ``` 55 | 56 | ### 注册 Runner 57 | 58 | 参考 [One-line registration command](https://docs.gitlab.com/runner/register/#one-line-registration-command) 59 | 60 | ```bash 61 | gitlab-runner register \ 62 | --non-interactive \ 63 | --url "https://gitlab.com/" \ 64 | --registration-token "PROJECT_REGISTRATION_TOKEN" \ 65 | --executor "docker" \ 66 | --docker-image "alpine:latest" \ 67 | --description "docker-runner" \ 68 | --run-untagged="true" \ 69 | --locked="false" \ 70 | --access-level="not_protected" 71 | ``` 72 | 73 | ### 注册可以在任务中运行 docker 命令的 gitlab-runner 74 | 75 | 场景描述: 76 | 77 | 有时候需要在 ci 中使用 `docker build` 或者 `docker run` 这些 docker 命令。这时候就需要使用支持 docker 运行的 runner 了。 78 | 使用这种 runner 并不会影响正常的 python 环境构建。因为该 runner 仅仅是一个执行器,用来解析 ci 文件,然后发起对应的 stage。 79 | 在 ci 文件中可以使用 `image: python:3.7` 标注后面的的 stage 默认全使用 python3.7 环境,如果对应 stage 需要使用其他环境, 80 | 只需要在对应 stage 中显式标注就行了,如在 Docker 构建的 stage 中需要使用 Docker 镜像,就需要标注为 `image: docker:19.03.8` 了。 81 | 82 | 参考 [Use Docker socket binding](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#use-docker-socket-binding) 83 | 84 | 这里指定参数 `docker-volumes`,如果在使用任务中需要使用 docker 镜像进行 `docker` 相关命令操作,会将 `docker.sock` 挂在到 85 | 启动的 docker 容器中。这个参数是关键。 86 | 87 | 建议 runner 注册时都增加这个参数,方便以后需要使用 docker 命令的时候可用。 88 | 89 | ```bash 90 | gitlab-runner register -n \ 91 | --url https://gitlab.com/ \ 92 | --registration-token REGISTRATION_TOKEN \ 93 | --executor docker \ 94 | --description "My Docker Runner" \ 95 | --docker-image "alpine:latest" \ 96 | --locked "false" 97 | --docker-volumes "/var/run/docker.sock:/var/run/docker.sock" 98 | ``` 99 | 100 | > 注册的时候可用手动增加 `--locked="false"` 指定共享该 runner 。注册之后也是可用更改的 参考:[Locking a specific Runner from being enabled for other projects](https://docs.gitlab.com/ee/ci/runners/#locking-a-specific-runner-from-being-enabled-for-other-projects) 101 | 102 | 或者手动增加如下配置,然后重启 gitlab-runner 103 | 104 | ```toml 105 | [[runners]] 106 | url = "https://gitlab.com/" 107 | token = REGISTRATION_TOKEN 108 | executor = "docker" 109 | [runners.docker] 110 | tls_verify = false 111 | image = "docker:19.03.8" 112 | privileged = false 113 | disable_cache = false 114 | volumes = ["/var/run/docker.sock:/var/run/docker.sock", "/cache"] 115 | [runners.cache] 116 | Insecure = false 117 | ``` 118 | 119 | 注册这个 runner 的项目就可以运行类似如下任务了: 120 | 121 | ```yml 122 | build: 123 | image: docker:19.03.8 124 | stage: build 125 | script: 126 | - docker build -t my-docker-image . 127 | - docker run my-docker-image /script/to/run/tests 128 | ``` 129 | 130 | 更多用法,请参考官方文档:[GitLab Docs](https://docs.gitlab.com/ee/README.html) 和 [GitLab Runner Docs](https://docs.gitlab.com/runner/) -------------------------------------------------------------------------------- /gitlab-runner/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | gitlab_runner: 20 | container_name: gitlab-runner 21 | image: gitlab/gitlab-runner:latest 22 | << : *default-config 23 | ports: 24 | - 8093:8093 25 | volumes: 26 | - /var/run/docker.sock:/var/run/docker.sock 27 | - gitlab_runner_config:/etc/gitlab-runner 28 | 29 | volumes: 30 | gitlab_runner_config: 31 | 32 | networks: 33 | default: 34 | external: true 35 | name: other 36 | -------------------------------------------------------------------------------- /grafana/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whg517/docker-compose/34142cc934712e70ad7d4235114bd4c4a3a9a065/grafana/README.md -------------------------------------------------------------------------------- /grafana/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | grafana: 20 | image: grafana/grafana 21 | container_name: grafana 22 | << : *default-config 23 | ports: 24 | - 9093:3000 25 | 26 | networks: 27 | default: 28 | external: true 29 | name: app 30 | -------------------------------------------------------------------------------- /jenkins/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whg517/docker-compose/34142cc934712e70ad7d4235114bd4c4a3a9a065/jenkins/README.md -------------------------------------------------------------------------------- /jenkins/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | jenkins: 20 | user: root 21 | container_name: jenkins 22 | image: jenkinsci/blueocean 23 | << : *default-config 24 | ports: 25 | - 8085:8080 26 | volumes: 27 | - "/data/docker/jenkins:/var/jenkins_home" 28 | - "/var/run/docker.sock:/var/run/docker.sock" 29 | 30 | networks: 31 | default: 32 | external: true 33 | name: other 34 | -------------------------------------------------------------------------------- /kafka/README.md: -------------------------------------------------------------------------------- 1 | # Kafka docker-compose.yml 2 | 3 | :ref [bitnami/kafka](https://hub.docker.com/r/bitnami/kafka) 4 | 5 | ## 使用 6 | 7 | ```bash 8 | $ docker-compose up -d 9 | ``` 10 | 11 | ### 注意 12 | 13 | 如果内部和外部客户端都可以访问Apache Kafka,需要为他们分别设置对应的listener。 14 | 15 | ```diff 16 | + - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://0.0.0.0:9094 17 | + - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://localhost:9194 18 | + - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT 19 | 20 | ``` 21 | 22 | 然后暴漏这个外部端口,比如: 23 | ```diff 24 | ports: 25 | - - '9092:9092' 26 | + - '9194:9094' 27 | ``` 28 | 以上更新中 9194 是外部端口,9094 是内部端口。 29 | 30 | **重要**: 如果外部机器需要访问Kafka,那么需要将 `KAFKA_CFG_ADVERTISED_LISTENERS` 配置中的 `EXTERNAL` 的 `localhost`替换为外部IP/域名; `KAFKA_CFG_LISTENERS` 配置中的 `EXTERNAL` 设置为 `0.0.0.0:9094`。 31 | 32 | 33 | ## 集群部署 -------------------------------------------------------------------------------- /kafka/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | x-base: &default-config 3 | restart: unless-stopped 4 | ulimits: 5 | nproc: 65535 6 | nofile: 7 | soft: 20000 8 | hard: 40000 9 | stop_grace_period: 1m 10 | logging: 11 | driver: json-file 12 | options: 13 | max-size: '100m' 14 | max-file: '1' 15 | mem_swappiness: 0 16 | services: 17 | zoo1: 18 | image: zookeeper:3.5.8 19 | restart: always 20 | <<: *default-config 21 | hostname: zoo1 22 | ports: 23 | - 2181:2181 24 | environment: 25 | ZOO_MY_ID: 1 26 | ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 27 | 28 | zoo2: 29 | image: zookeeper:3.5.8 30 | restart: always 31 | <<: *default-config 32 | hostname: zoo2 33 | ports: 34 | - 2182:2181 35 | environment: 36 | ZOO_MY_ID: 2 37 | ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181 38 | 39 | zoo3: 40 | image: zookeeper:3.5.8 41 | restart: always 42 | <<: *default-config 43 | hostname: zoo3 44 | ports: 45 | - 2183:2181 46 | environment: 47 | ZOO_MY_ID: 3 48 | ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181 49 | kafka1: 50 | image: 'bitnami/kafka:latest' 51 | ports: 52 | - '9092:9192' 53 | environment: 54 | - KAFKA_CFG_NODE_ID=0 55 | - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://0.0.0.0:9192 56 | - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka1:9092,EXTERNAL://example.com:9092 57 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT 58 | - KAFKA_CFG_ZOOKEEPER_CONNECT=zoo1:2181,zoo2:2181,zoo3:2181 59 | depends_on: 60 | - zoo1 61 | - zoo2 62 | - zoo3 63 | 64 | kafka2: 65 | image: 'bitnami/kafka:latest' 66 | ports: 67 | - '9093:9193' 68 | environment: 69 | - KAFKA_CFG_NODE_ID=1 70 | - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://0.0.0.0:9193 71 | - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka2:9092,EXTERNAL://example.com:9093 72 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT 73 | - KAFKA_CFG_ZOOKEEPER_CONNECT=zoo1:2181,zoo2:2181,zoo3:2181 74 | depends_on: 75 | - zoo1 76 | - zoo2 77 | - zoo3 78 | 79 | kafka3: 80 | image: 'bitnami/kafka:latest' 81 | ports: 82 | - '9094:9194' 83 | environment: 84 | - KAFKA_CFG_NODE_ID=2 85 | - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://0.0.0.0:9194 86 | - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka3:9092,EXTERNAL://example.com:9094 87 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT 88 | - KAFKA_CFG_ZOOKEEPER_CONNECT=zoo1:2181,zoo2:2181,zoo3:2181 89 | depends_on: 90 | - zoo1 91 | - zoo2 92 | - zoo3 93 | 94 | 95 | networks: 96 | default: 97 | external: true 98 | name: other 99 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: help 2 | help: ## Display this help. 3 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 4 | 5 | ##@ Up env 6 | 7 | .PHONY: start-minio 8 | start-minio: ## start minio 9 | @echo "Up minio" 10 | docker compose -f docker-compose-minio.yml up -d 11 | 12 | .PHONY: start-dependencies 13 | start-dependencies: ## Init env 14 | @echo "Init env" 15 | docker compose -f docker-compose-init.yml up 16 | 17 | .PHONY: start-hive-metastore 18 | start-hive-metastore: ## Up hive-metastore 19 | @echo "Up hive-metastore" 20 | docker compose -f docker-compose-hive.yml up -d 21 | 22 | .PHONY: start-trino 23 | start-trino: ## Up trino 24 | @echo "Up trino" 25 | docker compose -f docker-compose-trino.yml up -d 26 | 27 | 28 | .PHONY: start 29 | start: start-minio start-dependencies start-hive-metastore start-trino ## Up env 30 | 31 | ##@ Down env 32 | 33 | .PHONY: down-minio 34 | down-minio: ## Down minio 35 | @echo "Down minio" 36 | docker compose -f docker-compose-minio.yml down -v 37 | 38 | .PHONY: down-dependencies 39 | down-dependencies: ## Down dependencies 40 | @echo "Down dependencies" 41 | docker compose -f docker-compose-init.yml down -v 42 | 43 | .PHONY: down-hive-metastore 44 | down-hive-metastore: ## Down hive-metastore 45 | @echo "Down hive-metastore" 46 | docker compose -f docker-compose-hive.yml down -v 47 | 48 | .PHONY: down-trino 49 | down-trino: ## Down trino 50 | @echo "Down trino" 51 | docker compose -f docker-compose-trino.yml down -v 52 | 53 | .PHONY: down 54 | down: down-trino down-hive-metastore down-dependencies down-minio ## Down env 55 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/README.md: -------------------------------------------------------------------------------- 1 | # lakehouse 2 | 3 | 本文介绍了如何在 docker 环境下使用 minio ,postgres , hive-metastore ,trino 搭建 lakehouse 环境。 4 | 5 | 在搭建过程中由于 hive 的容器缺少依赖 jar ,采用 gradle 作为初始化镜像,拉取需要的依赖,并通过卷共享的方式合并 6 | hive 的 jar 。 7 | 8 | 文章分为三个主题: 9 | 10 | - 快速流程搭建:快速准备文件和 docker-file ,然后启动,最后验证 11 | - 手动分步骤搭建:拆分步骤,并详细解释标准搭建流程 12 | - 不使用 s3 搭建:采用本地目录存储数据,减少 s3 服务占用资源 13 | 14 | **注意:如果需要将相关端口暴漏公网,请对相关服务进行安全加固。** 15 | **注意:实际操作时,请务必修改相关密码,并避免机密信息泄漏。** 16 | 17 | ## 使用方式 18 | 19 | 快速搭建分一下步骤进行: 20 | 21 | - 准备配置文件 22 | - 安装 minio 23 | - 初始化环境 24 | - 安装 hive-metastore 25 | - 安装 trino 26 | 27 | 每一步为一个 docker-compose 文件。 28 | 29 | ### 快速流程搭建 30 | 31 | ```bash 32 | # 创建 .env 环境变量文件,用于环境变量共享,同时不应被 git 追踪,避免机密信息泄漏 33 | cat <<'EOF' > .env 34 | 35 | ## Minio user 36 | MINIO_ROOT_USER=minioadmin 37 | MINIO_ROOT_PASSWORD=minioadmin 38 | MINIO_TRINO_USERNAME=trino 39 | MINIO_TRINO_PASSWORD=dHJpbm8gLW4K 40 | 41 | ## Postgres user 42 | POSTGRES_DB=metastore_db 43 | POSTGRES_USER=hive 44 | POSTGRES_PASSWORD="aGl2ZSAtbgo=" 45 | 46 | ## Trino config 47 | # openssl rand 512 | base64 48 | TRINO_SHARED_SECRET=trino_shared_secret 49 | 50 | EOF 51 | 52 | mkdir -p config/trino/catalog 53 | 54 | cat <<\EOF > config/trino/catalog/iceberg.properties 55 | connector.name=iceberg 56 | hive.metastore.uri=thrift://metastore:9083 57 | iceberg.catalog.type=hive_metastore 58 | hive.s3.path-style-access=true 59 | hive.s3.endpoint=http://minio:9000 60 | hive.s3.ssl.enabled=false 61 | hive.s3.region=us-east-1 62 | # hive.s3.aws-access-key=trino 63 | # hive.s3.aws-secret-key=iNAMZLtirahV 64 | 65 | EOF 66 | 67 | mkdir -p config/hive 68 | 69 | cat > config/hive/hive-site.xml < 71 | 72 | 73 | 74 | 75 | 86 | 87 | 88 | fs.s3a.connection.maximum 89 | 15 90 | Controls the maximum number of simultaneous connections to S3. 91 | 92 | 93 | 94 | fs.s3a.connection.ssl.enabled 95 | false 96 | Enables or disables SSL connections to S3. 97 | 98 | 99 | 100 | fs.s3a.endpoint 101 | 102 | http://minio:9000 103 | 104 | AWS S3 endpoint to connect to. An up-to-date list is 105 | provided in the AWS Documentation: regions and endpoints. Without this 106 | property, the standard region (s3.amazonaws.com) is assumed. 107 | 108 | 109 | 110 | 111 | fs.s3a.endpoint.region 112 | us-east-1 113 | AWS S3 region for a bucket, which bypasses the parsing of 114 | fs.s3a.endpoint to know the region. Would be helpful in avoiding errors 115 | while using privateLink URL and explicitly set the bucket region. 116 | If set to a blank string (or 1+ space), falls back to the 117 | (potentially brittle) SDK region resolution process. 118 | 119 | 120 | 121 | 122 | fs.s3a.path.style.access 123 | true 124 | Enable S3 path style access ie disabling the default virtual hosting behaviour. 125 | Useful for S3A-compliant storage providers as it removes the need to set up DNS for 126 | virtual hosting. 127 | 128 | 129 | 130 | 131 | fs.s3a.impl 132 | org.apache.hadoop.fs.s3a.S3AFileSystem 133 | The implementation class of the S3A Filesystem 134 | 135 | 136 | 137 | fs.AbstractFileSystem.s3a.impl 138 | org.apache.hadoop.fs.s3a.S3A 139 | The implementation class of the S3A AbstractFileSystem. 140 | 141 | 142 | 143 | EOF 144 | 145 | cat > config/hive/build.gradle < scripts/minio-init.sh <<\EOT 170 | 171 | ## config mc 172 | # update local server config for mc 173 | mc alias set local http://minio:9000 ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD} 174 | mc admin info local 175 | 176 | ## add user 177 | pwgen="tr -dc '[:alnum:]' < /dev/urandom | fold -w 12 | head -n 1" 178 | # access_key=trino # usernmae 179 | # secret_key=$(eval $pwgen) # eg: iNAMZLtirahV 180 | # mc admin user add local ${access_key} ${secret_key} 181 | : ${LAKEHOUSE_USER:=trino} 182 | : ${LAKEHOUSE_PASSWORD} 183 | mc admin user add local ${LAKEHOUSE_USER} ${LAKEHOUSE_PASSWORD} 184 | mc admin user list local 185 | 186 | ## add bucket 187 | : ${LAKEHOUSE_BUCKET:=lake-house} 188 | mc mb local/${LAKEHOUSE_BUCKET} 189 | mc ls local 190 | 191 | ## add policy 192 | 193 | cat < /tmp/lake_house_policy.json 194 | { 195 | "Version": "2012-10-17", 196 | "Id": "LakeHouseBuckeyPolicy", 197 | "Statement": [ 198 | { 199 | "Sid": "Stment01", 200 | "Effect": "Allow", 201 | "Action": [ 202 | "s3:GetBucketLocation", 203 | "s3:ListBucket", 204 | "s3:ListBucketMultipartUploads", 205 | "s3:ListBucketVersions", 206 | "s3:GetObject", 207 | "s3:PutObject", 208 | "s3:DeleteObject", 209 | "s3:ListMultipartUploadParts", 210 | "s3:AbortMultipartUpload" 211 | ], 212 | "Resource": [ 213 | "arn:aws:s3:::${LAKEHOUSE_BUCKET}/*", 214 | "arn:aws:s3:::${LAKEHOUSE_BUCKET}" 215 | ] 216 | } 217 | ] 218 | } 219 | EOF 220 | mc admin policy create local lake_house /tmp/lake_house_policy.json 221 | mc admin policy list local 222 | 223 | ## attach policy 224 | mc admin policy entities --user trino local | grep lake_house 225 | if [ $? -eq 0 ]; then 226 | echo "policy already attached" 227 | else 228 | echo "attaching policy to user" 229 | mc admin policy attach local lake_house --user ${LAKEHOUSE_USER} 230 | fi 231 | EOT 232 | 233 | chmod +x scripts/minio-init.sh 234 | ``` 235 | 236 | ### 安装 minio 237 | 238 | ```bash 239 | 240 | cat > docker-compose-minio.yml <<\EOF 241 | version: "3.9" 242 | 243 | x-base: &default-config 244 | restart: unless-stopped 245 | ulimits: 246 | nproc: 65535 247 | nofile: 248 | soft: 20000 249 | hard: 40000 250 | stop_grace_period: 1m 251 | logging: 252 | driver: json-file 253 | options: 254 | max-size: '100m' 255 | max-file: '1' 256 | mem_swappiness: 0 257 | env_file: 258 | - .env 259 | 260 | services: 261 | minio: 262 | image: quay.io/minio/minio:RELEASE.2023-08-16T20-17-30Z 263 | << : *default-config 264 | command: server /data --console-address ":9001" 265 | hostname: minio 266 | ports: 267 | - 127.0.0.1:9000:9000 268 | - 127.0.0.1:9001:9001 269 | volumes: 270 | - lakehouse-minio:/data 271 | # environment: 272 | # MINIO_ROOT_USER: minioadmin 273 | # MINIO_ROOT_PASSWORD: minioadmin 274 | healthcheck: 275 | test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] 276 | interval: 30s 277 | timeout: 20s 278 | retries: 3 279 | 280 | volumes: 281 | lakehouse-minio: 282 | 283 | EOF 284 | 285 | docker compose -f docker-compose-minio.yml up -d 286 | 287 | ``` 288 | 289 | ### 初始化环境 290 | 291 | ```bash 292 | cat <<\EOF > docker-compose-init.yml 293 | version: '3.9' 294 | 295 | services: 296 | # use gradle as init container to download pg jar for hive metastore 297 | # https://stackoverflow.com/a/32402694/11722440 298 | metastore-init-jars-download: 299 | image: gradle:8 300 | restart: on-failure 301 | volumes: 302 | - hive-jars:/jars 303 | - gradle-cache:/home/gradle/.gradle/ 304 | - type: bind # 使用 bind 挂载单个文件到容器中 305 | source: ${PWD}/config/hive/build.gradle 306 | target: /home/gradle/build.gradle 307 | command: | 308 | bash -c ' 309 | gradle download 310 | ' 311 | 312 | trino-init-jars-download: 313 | image: gradle:8 314 | restart: on-failure 315 | volumes: 316 | - trino-jars:/jars 317 | - gradle-cache:/home/gradle/.gradle/ 318 | - type: bind # 使用 bind 挂载单个文件到容器中 319 | source: ${PWD}/config/trino/build.gradle 320 | target: /home/gradle/build.gradle 321 | command: | 322 | bash -c ' 323 | gradle download 324 | ' 325 | 326 | # merge hive jars 327 | # https://stackoverflow.com/a/32402694/11722440 328 | metastore-init-jars-merge: 329 | image: apache/hive:4.0.0-beta-1 330 | restart: on-failure 331 | user: root 332 | volumes: 333 | - hive-jars:/jars:rw 334 | entrypoint: | 335 | bash -c ' 336 | cp -R /opt/hive/lib/* /jars 337 | ' 338 | 339 | # due to mount volume will override the original content, so we need to copy the jars to another volume 340 | # and then mount the volume to trino container 341 | trino-init-jars-merge: 342 | image: trinodb/trino:424 343 | restart: on-failure 344 | user: root 345 | volumes: 346 | - type: volume 347 | source: trino-jars 348 | target: /jars 349 | entrypoint: | 350 | bash -c ' 351 | cp /usr/lib/trino/plugin/iceberg/* /jars 352 | ' 353 | 354 | # minio s3 init 355 | minio-init: 356 | image: quay.io/minio/minio:RELEASE.2023-08-16T20-17-30Z 357 | restart: on-failure 358 | env_file: 359 | - .env 360 | volumes: 361 | - type: bind 362 | source: ${PWD}/scripts/minio-init.sh 363 | target: /minio-init.sh 364 | entrypoint: | 365 | bash -c ' 366 | /minio-init.sh 367 | ' 368 | 369 | volumes: 370 | hive-jars: 371 | trino-jars: 372 | gradle-cache: 373 | 374 | EOF 375 | 376 | docker compose -f docker-compose-init.yml up 377 | ``` 378 | 379 | ### 安装 hive-metastore 380 | 381 | ```bash 382 | cat <<\EOF > docker-compose-hive.yml 383 | version: "3.9" 384 | 385 | x-base: &default-config 386 | restart: unless-stopped 387 | ulimits: 388 | nproc: 65535 389 | nofile: 390 | soft: 20000 391 | hard: 40000 392 | stop_grace_period: 1m 393 | logging: 394 | driver: json-file 395 | options: 396 | max-size: '100m' 397 | max-file: '1' 398 | mem_swappiness: 0 399 | env_file: 400 | - .env 401 | 402 | services: 403 | # pg server 404 | postgres: 405 | image: postgres:15.4 406 | << : *default-config 407 | hostname: postgres 408 | # ports: 409 | # - '127.0.0.1:5432:5432' 410 | # environment: 411 | # POSTGRES_PASSWORD: postgres 412 | # POSTGRES_USER: postgres 413 | # POSTGRES_DB: postgres 414 | volumes: 415 | - postgres-data:/var/lib/postgresql/data 416 | 417 | # hive-metastore server 418 | metastore: 419 | image: apache/hive:4.0.0-beta-1 420 | depends_on: 421 | - postgres 422 | << : *default-config 423 | hostname: metastore 424 | environment: 425 | DB_DRIVER: postgres 426 | AWS_ACCESS_KEY_ID: ${LAKEHOUSE_USER} 427 | AWS_SECRET_ACCESS_KEY: ${LAKEHOUSE_PASSWORD} 428 | AWS_DEFAULT_REGION: us-east-1 429 | SERVICE_NAME: 'metastore -hiveconf hive.root.logger=INFO,console' 430 | SERVICE_OPTS: '-Xmx1G -Djavax.jdo.option.ConnectionDriverName=org.postgresql.Driver 431 | -Djavax.jdo.option.ConnectionURL=jdbc:postgresql://postgres:5432/${POSTGRES_DB} 432 | -Djavax.jdo.option.ConnectionUserName=${POSTGRES_USER} 433 | -Djavax.jdo.option.ConnectionPassword=${POSTGRES_PASSWORD} 434 | ' 435 | # ports: 436 | # - '127.0.0.1:9083:9083' 437 | volumes: 438 | - hive-jars:/opt/hive/lib 439 | - hive-data:/opt/hive/data 440 | - type: bind 441 | source: ${PWD}/config/hive/hive-site.xml 442 | target: /opt/hive/conf/hive-site.xml 443 | read_only: true 444 | 445 | volumes: 446 | hive-data: 447 | hive-jars: 448 | postgres-data: 449 | 450 | EOF 451 | 452 | docker compose -f docker-compose-hive.yml up -d 453 | ``` 454 | 455 | ### 安装 trino 456 | 457 | ```bash 458 | 459 | cat <<'EOF' > docker-compose-trino.yml 460 | version: '3.9' 461 | 462 | x-base: &default-config 463 | restart: unless-stopped 464 | ulimits: 465 | nproc: 65535 466 | nofile: 467 | soft: 20000 468 | hard: 40000 469 | stop_grace_period: 1m 470 | logging: 471 | driver: json-file 472 | options: 473 | max-size: '100m' 474 | max-file: '1' 475 | mem_swappiness: 0 476 | env_file: 477 | - .env 478 | 479 | services: 480 | # trino server 481 | trino: 482 | image: trinodb/trino:424 483 | user: root 484 | hostname: trino 485 | << : *default-config 486 | environment: 487 | AWS_ACCESS_KEY_ID: ${LAKEHOUSE_USER} 488 | AWS_SECRET_ACCESS_KEY: ${LAKEHOUSE_PASSWORD} 489 | AWS_S3_ENDPOINT: http://minio:9000 490 | AWS_DEFAULT_REGION: us-east-1 491 | ports: 492 | - 8080:8080 493 | volumes: 494 | - ${PWD}/config/trino/catalog:/etc/trino/catalog 495 | - trino-data:/lakehouse/data 496 | - trino-iceberg-jars:/usr/lib/trino/plugin/iceberg 497 | 498 | volumes: 499 | trino-data: 500 | trino-iceberg-jars: 501 | 502 | EOF 503 | 504 | docker compose -f docker-compose-trino.yml up -d 505 | 506 | ``` 507 | 508 | ### 验证 509 | 510 | example: 511 | 512 | ```sql 513 | CREATE SCHEMA iceberg.example_schema_s3 514 | WITH (location = 's3a://lake-house/example/'); 515 | 516 | USE iceberg.example_schema_s3; 517 | 518 | CREATE TABLE example_table ( 519 | id INTEGER, 520 | name VARCHAR, 521 | age INTEGER 522 | ); 523 | 524 | INSERT INTO example_table VALUES (1, 'Alice', 32), (2, 'Bob', 28); 525 | 526 | SELECT * FROM example_table; 527 | ``` 528 | 529 | ## 手动分步骤操作 530 | 531 | ### minio 532 | 533 | ref: 534 | ref: 535 | 536 | 首先通过 docker compose 安装 minio ,然后在 minio 中创建用户,创建桶,创建 policy,绑定 policy。 537 | 538 | #### 安装 minio 539 | 540 | 安装 minio 分如下几步 541 | 542 | - 创建 docker-compose 文件 543 | - 创建环境变量文件 544 | - 启动 minio 545 | 546 | **创建 docker-compose 文件:** 547 | 548 | 创建 `docker-compose-minio.yml` 文件,增加如下内容。 549 | 550 | 根据环境考虑是否暴露 minio 的端口,如果需要外网访问,则删除 `127.0.0.1` 即可。 551 | 552 | ```yml 553 | version: "3.9" 554 | version: "3.9" 555 | 556 | x-base: &default-config 557 | restart: unless-stopped 558 | ulimits: 559 | nproc: 65535 560 | nofile: 561 | soft: 20000 562 | hard: 40000 563 | stop_grace_period: 1m 564 | logging: 565 | driver: json-file 566 | options: 567 | max-size: '100m' 568 | max-file: '1' 569 | mem_swappiness: 0 570 | env_file: 571 | - .env 572 | 573 | services: 574 | minio: 575 | image: quay.io/minio/minio:RELEASE.2023-08-16T20-17-30Z 576 | << : *default-config 577 | command: server /data --console-address ":9001" 578 | hostname: minio 579 | ports: 580 | - 127.0.0.1:9000:9000 581 | - 127.0.0.1:9001:9001 582 | volumes: 583 | - lakehouse-minio:/data 584 | # environment: 585 | # MINIO_ROOT_USER: minioadmin 586 | # MINIO_ROOT_PASSWORD: minioadmin 587 | healthcheck: 588 | test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] 589 | interval: 30s 590 | timeout: 20s 591 | retries: 3 592 | 593 | volumes: 594 | lakehouse-minio: 595 | 596 | ``` 597 | 598 | **配置环境变量:** 599 | 600 | 默认情况下,minio 使用 `minioadmin` 作为用户名密码,为了方便管理和避免泄漏,建议使用 `.env` 文件配置环境变量。 601 | 602 | 创建 `.env` 文件,增加如下内容。 603 | 604 | ```ini 605 | # 创建环境变量文件,配置默认用户名密码 606 | MINIO_ROOT_USER=minioadmin 607 | MINIO_ROOT_PASSWORD=minioadmin 608 | ``` 609 | 610 | **启动 minio:** 611 | 612 | ```bash 613 | 614 | # 启动 minio 615 | docker compose -f docker-compose-minio.yml up -d 616 | 617 | docker ps 618 | ``` 619 | 620 | #### manage minio 621 | 622 | 为了做到隔离,在创建问 minio 后,需要创建服务专用账户,和服务专用的桶,并对其进行访问权限关联。一下给出两种方式。 623 | 624 | **docker-compose-init-minio.yml:** 625 | 626 | ```yaml 627 | version: "3.9" 628 | services: 629 | minio-s3-init: 630 | image: quay.io/minio/minio:RELEASE.2023-08-16T20-17-30Z 631 | restart: on-failure 632 | env_file: 633 | - .env 634 | volumes: 635 | - type: bind 636 | source: ${PWD}/scripts/minio-init.sh 637 | target: /minio-init.sh 638 | entrypoint: | 639 | bash -c ' 640 | /minio-init.sh 641 | ' 642 | ``` 643 | 644 | 在前面的环境变量文件 `.env` 中增加如下内容: 645 | 646 | ```ini 647 | LAKEHOUSE_USER=trino 648 | LAKEHOUSE_PASSWORD=iNAMZLtirahV 649 | ``` 650 | 651 | 创建 `/scripts/minio-init.sh` 文件,并增加如下内容。 652 | 653 | ```bash 654 | 655 | ## config mc 656 | # update local server config for mc 657 | mc alias set local http://${MINIO_HOST:=minio}:9000 ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD} 658 | mc admin info local 659 | 660 | ## add user 661 | pwgen="tr -dc '[:alnum:]' < /dev/urandom | fold -w 12 | head -n 1" 662 | # access_key=trino # usernmae 663 | # secret_key=$(eval $pwgen) # eg: iNAMZLtirahV 664 | # mc admin user add local ${access_key} ${secret_key} 665 | : ${LAKEHOUSE_USER:=trino} 666 | : ${LAKEHOUSE_PASSWORD} 667 | mc admin user add local ${LAKEHOUSE_USER} ${LAKEHOUSE_PASSWORD} 668 | mc admin user list local 669 | 670 | ## add bucket 671 | : ${LAKEHOUSE_BUCKET:=lake-house} 672 | mc mb local/${LAKEHOUSE_BUCKET} 673 | mc ls local 674 | 675 | ## add policy 676 | 677 | cat < /tmp/lake_house_policy.json 678 | { 679 | "Version": "2012-10-17", 680 | "Id": "LakeHouseBuckeyPolicy", 681 | "Statement": [ 682 | { 683 | "Sid": "Stment01", 684 | "Effect": "Allow", 685 | "Action": [ 686 | "s3:GetBucketLocation", 687 | "s3:ListBucket", 688 | "s3:ListBucketMultipartUploads", 689 | "s3:ListBucketVersions", 690 | "s3:GetObject", 691 | "s3:PutObject", 692 | "s3:DeleteObject", 693 | "s3:ListMultipartUploadParts", 694 | "s3:AbortMultipartUpload" 695 | ], 696 | "Resource": [ 697 | "arn:aws:s3:::${LAKEHOUSE_BUCKET}/*", 698 | "arn:aws:s3:::${LAKEHOUSE_BUCKET}" 699 | ] 700 | } 701 | ] 702 | } 703 | EOF 704 | mc admin policy create local lake_house /tmp/lake_house_policy.json 705 | mc admin policy list local 706 | 707 | ## attach policy 708 | mc admin policy entities --user trino local | grep lake_house 709 | if [ $? -eq 0 ]; then 710 | echo "policy already attached" 711 | else 712 | echo "attaching policy to user" 713 | mc admin policy attach local lake_house --user ${LAKEHOUSE_USER} 714 | fi 715 | ``` 716 | 717 | 执行: 718 | 719 | ```bash 720 | docker compose -f docker-compose-init-minio.yml up 721 | 722 | # 当逻辑执行完后会自动退出 723 | ``` 724 | 725 | **手动管理:** 726 | 727 | 使用 [mc](https://min.io/docs/minio/linux/reference/minio-mc-admin.html) 命令通过命令后台管理 minio 。 728 | 729 | 主要做如下操作: 730 | 731 | - 配置 mc 命令 732 | - 添加用户 733 | - 添加 bucket 734 | - 添加 policy 735 | - 绑定 policy 736 | 737 | ```bash 738 | ## config mc 739 | # update local server config for mc 740 | mc alias set local http://localhost:9000 ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD} 741 | mc admin info local 742 | 743 | ## add user 744 | pwgen="tr -dc '[:alnum:]' < /dev/urandom | fold -w 12 | head -n 1" 745 | access_key=trino # usernmae 746 | secret_key=$(eval $pwgen) # eg: iNAMZLtirahV 747 | mc admin user add local ${access_key} ${secret_key} 748 | mc admin user list local 749 | 750 | ## add bucket 751 | mc mb local/lake-house 752 | 753 | ## add policy 754 | 755 | cat < /tmp/lake_house_policy.json 756 | { 757 | "Version": "2012-10-17", 758 | "Id": "LakeHouseBuckeyPolicy", 759 | "Statement": [ 760 | { 761 | "Sid": "Stment01", 762 | "Effect": "Allow", 763 | "Action": [ 764 | "s3:GetBucketLocation", 765 | "s3:ListBucket", 766 | "s3:ListBucketMultipartUploads", 767 | "s3:ListBucketVersions", 768 | "s3:GetObject", 769 | "s3:PutObject", 770 | "s3:DeleteObject", 771 | "s3:ListMultipartUploadParts", 772 | "s3:AbortMultipartUpload" 773 | ], 774 | "Resource": [ 775 | "arn:aws:s3:::lake-house/*", 776 | "arn:aws:s3:::lake-house" 777 | ] 778 | } 779 | ] 780 | } 781 | EOF 782 | mc admin policy create local lake_house /tmp/lake_house_policy.json 783 | mc admin policy list local 784 | 785 | ## attach policy 786 | mc admin policy attach local lake_house --user trino 787 | ``` 788 | 789 | ### hive-metastore 790 | 791 | #### hive 环境初始化 792 | 793 | ref: 794 | ref: [How can I use Gradle to just download JARs?](https://stackoverflow.com/a/32402694/11722440) 795 | 796 | 由于在使用 hive 时需要搭配 s3 ,所以在 hive 环境中需要有 hadoop-aws 的依赖,并增加相关配置才能 797 | 正常使用。 798 | 799 | 在增加依赖 jar 的时候使用 gradle 方案,并通过共享卷将 jar 合并后挂载到 hive 容器。 800 | 801 | 创建 `config/hive/build.gradle` 文件,并增加如下内容。 802 | 803 | ```groovy 804 | apply plugin: 'java' 805 | 806 | repositories { 807 | maven { url 'https://maven.aliyun.com/repository/public/' } 808 | mavenLocal() 809 | mavenCentral() 810 | } 811 | 812 | dependencies { 813 | implementation 'org.postgresql:postgresql:42.6.0' 814 | implementation 'org.apache.hadoop:hadoop-aws:3.3.1' 815 | implementation 'org.apache.hadoop:hadoop-client:3.3.1' 816 | } 817 | 818 | task download(type: Copy) { 819 | from configurations.runtimeClasspath 820 | into "/jars" 821 | } 822 | 823 | ``` 824 | 825 | 创建 `docker-compose-init-hive.yml` 文件,增加如下内容。 826 | 827 | ```yaml 828 | version: '3.9' 829 | 830 | services: 831 | # use gradle as init container to download pg jar for hive metastore 832 | # https://stackoverflow.com/a/32402694/11722440 833 | metastore-init-jars-download: 834 | image: gradle:8 835 | restart: on-failure 836 | volumes: 837 | - hive-jars:/jars 838 | - gradle-cache:/home/gradle/.gradle/ 839 | - type: bind # 使用 bind 挂载单个文件到容器中 840 | source: ${PWD}/config/hive/build.gradle 841 | target: /home/gradle/build.gradle 842 | command: | 843 | bash -c ' 844 | gradle download 845 | ' 846 | 847 | # merge hive jars 848 | # https://stackoverflow.com/a/32402694/11722440 849 | metastore-init-jars-merge: 850 | image: apache/hive:4.0.0-beta-1 851 | restart: on-failure 852 | user: root 853 | volumes: 854 | - hive-jars:/jars:rw 855 | entrypoint: | 856 | bash -c ' 857 | cp -R /opt/hive/lib/* /jars 858 | ' 859 | 860 | volumes: 861 | hive-jars: 862 | ``` 863 | 864 | 执行: 865 | 866 | ```bash 867 | docker compose -f docker-compose-init-hive.yml up 868 | # 当逻辑执行完后会自动退出 869 | ``` 870 | 871 | #### 安装 hive-metastore 872 | 873 | 安装 hive-metastore 需要如下几步: 874 | 875 | - 配置环境变量 876 | - 配置 hive-site 877 | - 创建 docker-compose 文件 878 | - 启动 hive-metastore 879 | 880 | ##### 配置环境变量 881 | 882 | 由于安装 hive-metastore 是需要使用关系数据库,我们本地选择 postgresql ,为了便于管理和避免机密信息泄漏, 883 | 使用 `.env` 管理 postgresql 认证信息。 884 | 885 | 在前面创建的 `.env` 文件中增加如下内容: 886 | 887 | ```ini 888 | POSTGRES_DB=metastore_db 889 | POSTGRES_USER=hive 890 | POSTGRES_PASSWORD=hive 891 | ``` 892 | 893 | ##### 创建配置文件 894 | 895 | 创建 `config/hive/hive-site.xml` 文件,并增加如下内容: 896 | 897 | ```xml 898 | 899 | 900 | 901 | 902 | 906 | 917 | 918 | 919 | fs.s3a.connection.maximum 920 | 15 921 | Controls the maximum number of simultaneous connections to S3. 922 | 923 | 924 | 925 | fs.s3a.connection.ssl.enabled 926 | false 927 | Enables or disables SSL connections to S3. 928 | 929 | 930 | 931 | fs.s3a.endpoint 932 | http://minio:9000 933 | AWS S3 endpoint to connect to. An up-to-date list is 934 | provided in the AWS Documentation: regions and endpoints. Without this 935 | property, the standard region (s3.amazonaws.com) is assumed. 936 | 937 | 938 | 939 | 940 | fs.s3a.endpoint.region 941 | us-east-1 942 | AWS S3 region for a bucket, which bypasses the parsing of 943 | fs.s3a.endpoint to know the region. Would be helpful in avoiding errors 944 | while using privateLink URL and explicitly set the bucket region. 945 | If set to a blank string (or 1+ space), falls back to the 946 | (potentially brittle) SDK region resolution process. 947 | 948 | 949 | 950 | 951 | fs.s3a.path.style.access 952 | true 953 | Enable S3 path style access ie disabling the default virtual hosting behaviour. 954 | Useful for S3A-compliant storage providers as it removes the need to set up DNS for 955 | virtual hosting. 956 | 957 | 958 | 959 | 960 | fs.s3a.impl 961 | org.apache.hadoop.fs.s3a.S3AFileSystem 962 | The implementation class of the S3A Filesystem 963 | 964 | 965 | 966 | fs.AbstractFileSystem.s3a.impl 967 | org.apache.hadoop.fs.s3a.S3A 968 | The implementation class of the S3A AbstractFileSystem. 969 | 970 | 971 | 972 | ``` 973 | 974 | ##### 创建 docker-compose 文件 975 | 976 | ```yaml 977 | version: "3.9" 978 | version: "3.9" 979 | 980 | x-base: &default-config 981 | restart: unless-stopped 982 | ulimits: 983 | nproc: 65535 984 | nofile: 985 | soft: 20000 986 | hard: 40000 987 | stop_grace_period: 1m 988 | logging: 989 | driver: json-file 990 | options: 991 | max-size: '100m' 992 | max-file: '1' 993 | mem_swappiness: 0 994 | env_file: 995 | - .env 996 | 997 | services: 998 | # pg server 999 | postgres: 1000 | image: postgres:15.4 1001 | << : *default-config 1002 | hostname: postgres 1003 | # ports: 1004 | # - '127.0.0.1:5432:5432' 1005 | # environment: 1006 | # POSTGRES_PASSWORD: postgres 1007 | # POSTGRES_USER: postgres 1008 | # POSTGRES_DB: postgres 1009 | volumes: 1010 | - postgres-data:/var/lib/postgresql/data 1011 | 1012 | # hive-metastore server 1013 | metastore: 1014 | image: apache/hive:4.0.0-beta-1 1015 | depends_on: 1016 | - postgres 1017 | << : *default-config 1018 | hostname: metastore 1019 | environment: 1020 | DB_DRIVER: postgres 1021 | AWS_ACCESS_KEY_ID: ${LAKEHOUSE_USER} 1022 | AWS_SECRET_ACCESS_KEY: ${LAKEHOUSE_PASSWORD} 1023 | AWS_DEFAULT_REGION: us-east-1 1024 | SERVICE_NAME: 'metastore -hiveconf hive.root.logger=INFO,console' 1025 | SERVICE_OPTS: '-Xmx1G -Djavax.jdo.option.ConnectionDriverName=org.postgresql.Driver 1026 | -Djavax.jdo.option.ConnectionURL=jdbc:postgresql://postgres:5432/${POSTGRES_DB} 1027 | -Djavax.jdo.option.ConnectionUserName=${POSTGRES_USER} 1028 | -Djavax.jdo.option.ConnectionPassword=${POSTGRES_PASSWORD} 1029 | ' 1030 | # ports: 1031 | # - '127.0.0.1:9083:9083' 1032 | volumes: 1033 | - hive-jars:/opt/hive/lib 1034 | - hive-data:/opt/hive/data 1035 | - type: bind 1036 | source: ${PWD}/config/hive/hive-site.xml 1037 | target: /opt/hive/conf/hive-site.xml 1038 | read_only: true 1039 | 1040 | volumes: 1041 | hive-data: 1042 | hive-jars: 1043 | postgres-data: 1044 | 1045 | ``` 1046 | 1047 | 这个配置文件中有几个点需要强调一下: 1048 | 1049 | - 启动服务的时候,hive 和 pg 会一起启动,通过使用 `.env` 文件中的环境变量。 1050 | - hive 镜像使用 4.0 版本,可以将初始化 schema 和更新一并执行, 3.x 镜像中执行 `-InitOrUpgradeSchema` 的命令是不存在的。 1051 | - 初始化 schema 的参数全部由环境变量提供,分两部分。一部分是 `SERVICE_NAME` 提供服务类型,这里有个BUG,就是可在参数后面直接接 hive 的配置,有点漏洞注入的感觉。另一部分是 `SERVICE_OPTS` 提供参数,这里提供了一些 jdbc 的参数,用来连接 pg 数据库,对于机密信息同样从环境变量获取,而不是写在 compose 文件中,这部分信息不需要写在 `hive-site.xml` 文件中。 1052 | - 前面提到的 s3 使用的配置,根据 AWS 规范可以设置成环境变量,并在使用中直接通过环境获取。 1053 | - 使用 `hive-jars` 的卷挂载前面初始化后的 jar 。 1054 | - 使用 `type: bind` 挂载指令,将单个文件挂载到指定目录。 1055 | 1056 | ##### 启动 hive-metastore 1057 | 1058 | ```bash 1059 | docker compose -f docker-compose-hive.yml up -d 1060 | ``` 1061 | 1062 | ### trino 1063 | 1064 | #### 创建配置 1065 | 1066 | 创建 `config/trino/catalog/config.properties` 文件,并增加如下内容。 1067 | 1068 | ```ini 1069 | connector.name=iceberg 1070 | hive.metastore.uri=thrift://metastore:9083 1071 | iceberg.catalog.type=hive_metastore 1072 | hive.s3.path-style-access=true 1073 | hive.s3.endpoint=http://minio:9000 1074 | hive.s3.ssl.enabled=false 1075 | hive.s3.region=us-east-1 1076 | # hive.s3.aws-access-key=trino 1077 | # hive.s3.aws-secret-key=iNAMZLtirahV 1078 | 1079 | ``` 1080 | 1081 | #### 创建 docker-compose 文件 1082 | 1083 | 创建 `docker-compose-trino.yml` 文件,并增加如下内容。 1084 | 1085 | ```yaml 1086 | version: '3.9' 1087 | 1088 | services: 1089 | # trino server 1090 | trino: 1091 | image: trinodb/trino:424 1092 | hostname: trino 1093 | restart: unless-stopped 1094 | env_file: 1095 | - .env 1096 | ports: 1097 | - 8080:8080 1098 | volumes: 1099 | - ${PWD}/config/trino/catalog:/etc/trino/catalog 1100 | - trino-data:/var/trino/data 1101 | 1102 | volumes: 1103 | trino-data: 1104 | ``` 1105 | 1106 | #### 启动 trino 1107 | 1108 | ```bash 1109 | docker compose -f docker-compose-trino.yml up -d 1110 | ``` 1111 | 1112 | #### 测试 1113 | 1114 | example: 1115 | 1116 | ```sql 1117 | CREATE SCHEMA iceberg.example_schema_s3 1118 | WITH (location = 's3a://lake-house/example/'); 1119 | 1120 | USE iceberg.example_schema_s3; 1121 | 1122 | CREATE TABLE example_table ( 1123 | id INTEGER, 1124 | name VARCHAR, 1125 | age INTEGER 1126 | ); 1127 | 1128 | INSERT INTO example_table VALUES (1, 'Alice', 32), (2, 'Bob', 28); 1129 | 1130 | SELECT * FROM example_table; 1131 | 1132 | ``` 1133 | 1134 | ## 不使用 s3 1135 | 1136 | 考虑到使用 s3 时,minio 本身会消耗性能,同时在 hive 基于 s3 对象读写时也会消耗一定的性能,在实际方案中可以酌情选择移除 s3 ,让 1137 | 湖仓方案的数据保存在 trino 的本地目录。 1138 | 1139 | 一下操作方式不在进行分布详细阐述部署流程。 1140 | 1141 | ### 准备文件 1142 | 1143 | ```bash 1144 | 1145 | mkdir -p config/trino/catalog 1146 | 1147 | cat <<\EOF > config/trino/catalog/iceberg.properties 1148 | connector.name=iceberg 1149 | iceberg.catalog.type=hive_metastore 1150 | hive.metastore.uri=thrift://metastore:9083 1151 | EOF 1152 | 1153 | # env 1154 | cat <<\EOF > .env 1155 | POSTGRES_DB=metastore_db 1156 | POSTGRES_USER=hive 1157 | POSTGRES_PASSWORD=hive 1158 | EOF 1159 | 1160 | # gradle 1161 | mkdir -p config/hive 1162 | cat < config/hive/build.gradle 1163 | apply plugin: 'base' 1164 | 1165 | repositories { 1166 | maven { url 'https://maven.aliyun.com/repository/public/' } 1167 | mavenLocal() 1168 | mavenCentral() 1169 | } 1170 | 1171 | configurations { 1172 | toCopy 1173 | } 1174 | 1175 | dependencies { 1176 | toCopy 'org.postgresql:postgresql:42.6.0' 1177 | toCopy 'org.apache.hadoop:hadoop-aws:3.3.1' 1178 | toCopy 'org.apache.hadoop:hadoop-client:3.3.1' 1179 | } 1180 | 1181 | task download(type: Copy) { 1182 | from configurations.toCopy 1183 | into '/jars' 1184 | } 1185 | EOF 1186 | ``` 1187 | 1188 | ### 初始化环境 1189 | 1190 | ```bash 1191 | cat <<\EOF > docker-compose-init.yml 1192 | version: '3.9' 1193 | 1194 | services: 1195 | # use gradle as init container to download hive jars 1196 | # https://stackoverflow.com/a/32402694/11722440 1197 | metastore-init-jars-download: 1198 | image: gradle:8 1199 | restart: on-failure 1200 | volumes: 1201 | - type: volume 1202 | source: lakehouse-hive-jars 1203 | target: /jars 1204 | - type: bind # 使用 bind 挂载单个文件到容器中 1205 | source: ${PWD}/config/hive/build.gradle 1206 | target: /home/gradle/build.gradle 1207 | command: | 1208 | bash -c ' 1209 | gradle download 1210 | ' 1211 | 1212 | # merge hive jars 1213 | # https://stackoverflow.com/a/32402694/11722440 1214 | metastore-init-jars-merge: 1215 | image: apache/hive:4.0.0-beta-1 1216 | restart: on-failure 1217 | user: root 1218 | volumes: 1219 | - lakehouse-hive-jars:/jars:rw 1220 | entrypoint: | 1221 | bash -c ' 1222 | cp -R /opt/hive/lib/* /jars 1223 | ' 1224 | 1225 | volumes: 1226 | lakehouse-hive-jars: 1227 | EOF 1228 | 1229 | docker compose -f docker-compose-init.yml up 1230 | 1231 | ``` 1232 | 1233 | ### 安装 hive-metastore 1234 | 1235 | ```bash 1236 | cat <<\EOF > docker-compose-hive.yml 1237 | version: "3.9" 1238 | services: 1239 | # pg server 1240 | postgres: 1241 | image: postgres:15.4 1242 | restart: unless-stopped 1243 | hostname: postgres 1244 | env_file: 1245 | - .env 1246 | # ports: 1247 | # - '127.0.0.1:5432:5432' 1248 | # environment: 1249 | # POSTGRES_PASSWORD: postgres 1250 | # POSTGRES_USER: postgres 1251 | # POSTGRES_DB: postgres 1252 | volumes: 1253 | - lakehouse-postgres:/var/lib/postgresql/data 1254 | 1255 | # hive-metastore server 1256 | metastore: 1257 | image: apache/hive:4.0.0-beta-1 1258 | depends_on: 1259 | - postgres 1260 | restart: unless-stopped 1261 | hostname: metastore 1262 | env_file: 1263 | - .env 1264 | # ports: 1265 | # - '127.0.0.1:9083:9083' 1266 | environment: 1267 | DB_DRIVER: postgres 1268 | SERVICE_NAME: 'metastore -hiveconf hive.root.logger=INFO,console' 1269 | SERVICE_OPTS: '-Xmx1G -Djavax.jdo.option.ConnectionDriverName=org.postgresql.Driver 1270 | -Djavax.jdo.option.ConnectionURL=jdbc:postgresql://postgres:5432/${POSTGRES_DB} 1271 | -Djavax.jdo.option.ConnectionUserName=${POSTGRES_USER} 1272 | -Djavax.jdo.option.ConnectionPassword=${POSTGRES_PASSWORD} 1273 | ' 1274 | volumes: 1275 | - lakehouse-hive-jars:/opt/hive/lib 1276 | 1277 | volumes: 1278 | lakehouse-hive-jars: 1279 | lakehouse-postgres: 1280 | EOF 1281 | 1282 | docker compose -f docker-compose-hive.yml up -d 1283 | 1284 | ``` 1285 | 1286 | ### 安装 trino 1287 | 1288 | 在启动 trino 的时候,需要使用特权用户,否则对于挂载的数据目录没有写入权限。 1289 | 1290 | ```bash 1291 | 1292 | cat <<'EOF' > docker-compose-trino.yml 1293 | version: '3.9' 1294 | 1295 | services: 1296 | # trino server 1297 | trino: 1298 | image: trinodb/trino:424 1299 | user: root 1300 | hostname: trino 1301 | restart: unless-stopped 1302 | env_file: 1303 | - .env 1304 | ports: 1305 | - 8080:8080 1306 | volumes: 1307 | - ${PWD}/config/trino/catalog:/etc/trino/catalog 1308 | - trino-data:/var/trino/data 1309 | 1310 | volumes: 1311 | trino-data: 1312 | 1313 | EOF 1314 | 1315 | docker compose -f docker-compose-trino.yml up -d 1316 | 1317 | ``` 1318 | 1319 | 说明 1320 | 1321 | 在使用不使用 s3 的方案下, 湖仓数据是由 trino 写入到 trino 容器的本地目录的。但是由于 hive 的原因,依然会在 hive 的本地目录创建相同的 1322 | 目录,但不会保存实际数据。 1323 | 1324 | ### 验证 1325 | 1326 | ```sql 1327 | 1328 | CREATE SCHEMA iceberg.example_schema_local; 1329 | 1330 | USE iceberg.example_schema_local; 1331 | 1332 | CREATE TABLE example_table ( 1333 | id INTEGER, 1334 | name VARCHAR, 1335 | age INTEGER 1336 | ); 1337 | 1338 | INSERT INTO example_table VALUES (1, 'Alice', 32), (2, 'Bob', 28); 1339 | 1340 | SELECT * FROM example_table; 1341 | ``` 1342 | 1343 | ### 测试 1344 | 1345 | ```bash 1346 | trino --catalog iceberg --debug 1347 | trino --catalog hive --debug 1348 | ``` 1349 | 1350 | ```sql 1351 | CREATE SCHEMA iceberg_test; 1352 | USE iceberg_test; 1353 | CREATE TABLE iceberg_t2 (name varchar, age int, id int) 1354 | WITH (location = 'alluxio://trino-iceberg-master-1:19998/lake-house/iceberg_t2', format = 'parquet'); 1355 | 1356 | CREATE TABLE iceberg_s3_t1 (name varchar, age int, id int) 1357 | WITH (location = 's3a://lake-house/data/iceberg_s3_t1', format = 'parquet'); 1358 | 1359 | CREATE SCHEMA hive_test; 1360 | USE hive_test; 1361 | CREATE TABLE hive_t1 (name varchar, age int, id int) 1362 | WITH (external_location = 's3a://lake-house/hive_t1', format = 'parquet'); 1363 | 1364 | CREATE SCHEMA hive_s3_schema 1365 | WITH (location = 's3a://lake-house/hive-schema/'); 1366 | `` 1367 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/config/hive/build.gradle: -------------------------------------------------------------------------------- 1 | apply plugin: 'java' 2 | 3 | repositories { 4 | maven { url 'https://maven.aliyun.com/repository/public/' } 5 | mavenLocal() 6 | mavenCentral() 7 | } 8 | 9 | dependencies { 10 | implementation 'org.postgresql:postgresql:42.6.0' 11 | implementation 'org.apache.hadoop:hadoop-aws:3.3.1' 12 | implementation 'org.apache.hadoop:hadoop-client:3.3.1' 13 | } 14 | 15 | task download(type: Copy) { 16 | from configurations.runtimeClasspath 17 | into "/jars" 18 | } 19 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/config/hive/hive-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 7 | 18 | 23 | 27 | 28 | fs.s3a.connection.maximum 29 | 15 30 | Controls the maximum number of simultaneous connections to S3. 31 | 32 | 33 | 34 | fs.s3a.connection.ssl.enabled 35 | false 36 | Enables or disables SSL connections to S3. 37 | 38 | 39 | 40 | fs.s3a.endpoint 41 | http://minio:9000 42 | AWS S3 endpoint to connect to. An up-to-date list is 43 | provided in the AWS Documentation: regions and endpoints. Without this 44 | property, the standard region (s3.amazonaws.com) is assumed. 45 | 46 | 47 | 48 | 49 | fs.s3a.endpoint.region 50 | us-east-1 51 | AWS S3 region for a bucket, which bypasses the parsing of 52 | fs.s3a.endpoint to know the region. Would be helpful in avoiding errors 53 | while using privateLink URL and explicitly set the bucket region. 54 | If set to a blank string (or 1+ space), falls back to the 55 | (potentially brittle) SDK region resolution process. 56 | 57 | 58 | 59 | 60 | fs.s3a.path.style.access 61 | true 62 | Enable S3 path style access ie disabling the default virtual hosting behaviour. 63 | Useful for S3A-compliant storage providers as it removes the need to set up DNS for 64 | virtual hosting. 65 | 66 | 67 | 68 | 69 | fs.s3a.impl 70 | org.apache.hadoop.fs.s3a.S3AFileSystem 71 | The implementation class of the S3A Filesystem 72 | 73 | 74 | 75 | fs.AbstractFileSystem.s3a.impl 76 | org.apache.hadoop.fs.s3a.S3A 77 | The implementation class of the S3A AbstractFileSystem. 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/config/trino/build.gradle: -------------------------------------------------------------------------------- 1 | apply plugin: 'java' 2 | 3 | repositories { 4 | maven { url 'https://maven.aliyun.com/repository/public/' } 5 | mavenLocal() 6 | mavenCentral() 7 | } 8 | 9 | // https://iceberg.apache.org/docs/1.5.1/aws/ 10 | // https://iceberg.apache.org/docs/1.5.1/aws/#hadoop-s3a-filesystem 11 | dependencies { 12 | implementation 'org.postgresql:postgresql:42.6.0' 13 | implementation 'org.apache.iceberg:iceberg-aws-bundle:1.5.2' 14 | implementation 'org.apache.iceberg:iceberg-spark-runtime-3.4_2.12:1.5.2' 15 | } 16 | 17 | task download(type: Copy) { 18 | from configurations.runtimeClasspath 19 | into "/jars" 20 | } 21 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/config/trino/etc/catalog/hives3.properties: -------------------------------------------------------------------------------- 1 | # hive s3 2 | connector.name=hives3 3 | hive.metastore.uri=thrift://metastore:9083 4 | 5 | ###################################################################################### 6 | ## S3 configuration 7 | hive.s3.path-style-access=true 8 | # trino hive connect does not support aws s3 environment variables to set credentials 9 | # but we can use trino environment variables 10 | hive.s3.aws-access-key=${ENV:AWS_ACCESS_KEY_ID} 11 | hive.s3.aws-secret-key=${ENV:LAKEHOUSE_PASSWORD} 12 | hive.s3.endpoint=${ENV:AWS_S3_ENDPOINT} 13 | hive.s3.ssl.enabled=false 14 | hive.s3.region=${ENV:AWS_DEFAULT_REGION} 15 | # after hive 4.0, to enable non-managed table writes 16 | hive.non-managed-table-writes-enabled=true 17 | ## End of S3 configuration 18 | ###################################################################################### 19 | 20 | 21 | ###################################################################################### 22 | ## Kerberos configuration 23 | hive.metastore.authentication.type=KERBEROS 24 | hive.metastore.thrift.impersonation.enabled=true 25 | hive.metastore.service.principal=hive/_HOST@EXAMPLE.COM 26 | hive.metastore.client.principal=trino/_HOST@EXAMPLE.COM 27 | # please add keytab file to the path below 28 | hive.metastore.client.keytab=/etc/security/keytab/trino.keytab 29 | ## End of Kerberos configuration 30 | ###################################################################################### 31 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/config/trino/etc/catalog/iceberg.properties: -------------------------------------------------------------------------------- 1 | connector.name=iceberg 2 | hive.metastore.uri=thrift://metastore:9083 3 | iceberg.catalog.type=hive_metastore 4 | hive.s3.path-style-access=true 5 | hive.s3.endpoint=http://minio:9000 6 | hive.s3.ssl.enabled=false 7 | hive.s3.region=us-east-1 8 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/config/trino/etc/config.properties: -------------------------------------------------------------------------------- 1 | coordinator=true 2 | node-scheduler.include-coordinator=true 3 | 4 | 5 | http-server.http.port=8080 6 | discovery.uri=http://localhost:8080 7 | 8 | 9 | ###################################################################################### 10 | ## TLS configuration 11 | http-server.https.enabled=true 12 | http-server.https.port=8443 13 | # please add pem file to the path below 14 | http-server.https.keystore.path=/etc/trino/server.pem 15 | 16 | # internal communication 17 | internal-communication.https.required=false 18 | internal-communication.shared-secret=${ENV:TRINO_SHARED_SECRET} 19 | ## End of TLS configuration 20 | ###################################################################################### 21 | 22 | ###################################################################################### 23 | ## Kerberos configuration 24 | ## Note: Trino kerberos require tls enabled 25 | ## Note: Please make sure the keytab file is available in the path below 26 | http-server.authentication.type=KERBEROS 27 | http-server.authentication.krb5.service-name=trino 28 | http-server.authentication.krb5.principal-hostname=trino.example.com 29 | http-server.authentication.krb5.keytab=/etc/security/keytab/trinoservice.keytab 30 | http.authentication.krb5.config=/etc/krb5.conf 31 | ## End of Kerberos configuration 32 | ###################################################################################### -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/docker-compose-hive.yml: -------------------------------------------------------------------------------- 1 | x-base: &default-config 2 | restart: unless-stopped 3 | ulimits: 4 | nproc: 65535 5 | nofile: 6 | soft: 20000 7 | hard: 40000 8 | stop_grace_period: 1m 9 | logging: 10 | driver: json-file 11 | options: 12 | max-size: '100m' 13 | max-file: '1' 14 | mem_swappiness: 0 15 | env_file: 16 | - .env 17 | 18 | services: 19 | # pg server 20 | postgres: 21 | image: postgres:15.4 22 | << : *default-config 23 | hostname: postgres 24 | domainname: postgres.example.com 25 | # ports: 26 | # - '127.0.0.1:5432:5432' 27 | # environment: 28 | # POSTGRES_PASSWORD: postgres 29 | # POSTGRES_USER: postgres 30 | # POSTGRES_DB: postgres 31 | volumes: 32 | - postgres-data:/var/lib/postgresql/data 33 | 34 | # hive-metastore server 35 | metastore: 36 | image: apache/hive:4.0.0 37 | depends_on: 38 | - postgres 39 | << : *default-config 40 | hostname: metastore 41 | domainname: metastore.example.com 42 | environment: 43 | DB_DRIVER: postgres 44 | AWS_ACCESS_KEY_ID: ${LAKEHOUSE_USER} 45 | AWS_SECRET_ACCESS_KEY: ${LAKEHOUSE_PASSWORD} 46 | AWS_DEFAULT_REGION: us-east-1 47 | SERVICE_NAME: 'metastore -hiveconf hive.root.logger=INFO,console' 48 | SERVICE_OPTS: '-Xmx1G -Djavax.jdo.option.ConnectionDriverName=org.postgresql.Driver 49 | -Djavax.jdo.option.ConnectionURL=jdbc:postgresql://postgres:5432/${POSTGRES_DB} 50 | -Djavax.jdo.option.ConnectionUserName=${POSTGRES_USER} 51 | -Djavax.jdo.option.ConnectionPassword=${POSTGRES_PASSWORD} 52 | ' 53 | # ports: 54 | # - '127.0.0.1:9083:9083' 55 | volumes: 56 | - hive-jars:/opt/hive/lib 57 | - hive-data:/opt/hive/data 58 | - type: bind 59 | source: ${PWD}/config/hive/hive-site.xml 60 | target: /opt/hive/conf/hive-site.xml 61 | read_only: true 62 | healthcheck: 63 | test: ps -ef | grep "/opt/hive/lib/hive-metastore-4.0.0.jar org.apache.hadoop.hive.metastore.HiveMetaStore" | awk '{print $1}' | grep -w 'hive' 64 | interval: 1m30s 65 | timeout: 30s 66 | retries: 5 67 | start_period: 30s 68 | 69 | volumes: 70 | hive-data: 71 | hive-jars: 72 | postgres-data: 73 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/docker-compose-init.yml: -------------------------------------------------------------------------------- 1 | services: 2 | # use gradle as init container to download pg jar for hive metastore 3 | # https://stackoverflow.com/a/32402694/11722440 4 | metastore-init-jars-download: 5 | image: gradle:8 6 | restart: on-failure 7 | volumes: 8 | - hive-jars:/jars 9 | - gradle-cache:/home/gradle/.gradle/ 10 | - type: bind # 使用 bind 挂载单个文件到容器中 11 | source: ${PWD}/config/hive/build.gradle 12 | target: /home/gradle/build.gradle 13 | command: | 14 | bash -c ' 15 | gradle download 16 | ' 17 | 18 | trino-init-jars-download: 19 | image: gradle:8 20 | restart: on-failure 21 | volumes: 22 | - trino-jars:/jars 23 | - gradle-cache:/home/gradle/.gradle/ 24 | - type: bind # 使用 bind 挂载单个文件到容器中 25 | source: ${PWD}/config/trino/build.gradle 26 | target: /home/gradle/build.gradle 27 | command: | 28 | bash -c ' 29 | gradle download 30 | ' 31 | 32 | # merge hive jars 33 | # https://stackoverflow.com/a/32402694/11722440 34 | metastore-init-jars-merge: 35 | image: apache/hive:4.0.0-beta-1 36 | restart: on-failure 37 | user: root 38 | volumes: 39 | - hive-jars:/jars:rw 40 | entrypoint: | 41 | bash -c ' 42 | cp -R /opt/hive/lib/* /jars 43 | ' 44 | 45 | # due to mount volume will override the original content, so we need to copy the jars to another volume 46 | # and then mount the volume to trino container 47 | trino-init-jars-merge: 48 | image: trinodb/trino:452 49 | restart: on-failure 50 | user: root 51 | volumes: 52 | - type: volume 53 | source: trino-jars 54 | target: /jars 55 | entrypoint: | 56 | bash -c ' 57 | cp /usr/lib/trino/plugin/iceberg/* /jars 58 | ' 59 | 60 | init-certs: 61 | image: rockylinux/rockylinux:9 62 | restart: on-failure 63 | volumes: 64 | - type: bind 65 | source: ${PWD}/scripts/init-certs.sh 66 | target: /init-certs.sh 67 | - certs:/certs 68 | entrypoint: | 69 | bash -c ' 70 | cd /certs 71 | /init-certs.sh 72 | ' 73 | 74 | # minio s3 init 75 | minio-init: 76 | image: quay.io/minio/minio:RELEASE.2023-08-16T20-17-30Z 77 | restart: on-failure 78 | env_file: 79 | - .env 80 | volumes: 81 | - type: bind 82 | source: ${PWD}/scripts/minio-init.sh 83 | target: /minio-init.sh 84 | entrypoint: | 85 | bash -c ' 86 | /minio-init.sh 87 | ' 88 | 89 | volumes: 90 | hive-jars: 91 | trino-jars: 92 | certs: 93 | gradle-cache: 94 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/docker-compose-minio.yml: -------------------------------------------------------------------------------- 1 | x-base: &default-config 2 | restart: unless-stopped 3 | ulimits: 4 | nproc: 65535 5 | nofile: 6 | soft: 20000 7 | hard: 40000 8 | stop_grace_period: 1m 9 | logging: 10 | driver: json-file 11 | options: 12 | max-size: '100m' 13 | max-file: '1' 14 | mem_swappiness: 0 15 | env_file: 16 | - .env 17 | 18 | services: 19 | minio: 20 | image: quay.io/minio/minio:RELEASE.2023-08-16T20-17-30Z 21 | << : *default-config 22 | command: server /data --console-address ":9001" 23 | hostname: minio 24 | domainname: minio.example.com 25 | ports: 26 | - 127.0.0.1:9000:9000 27 | - 127.0.0.1:9001:9001 28 | volumes: 29 | - lakehouse-minio:/data 30 | # environment: 31 | # MINIO_ROOT_USER: minioadmin 32 | # MINIO_ROOT_PASSWORD: minioadmin 33 | healthcheck: 34 | test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] 35 | interval: 30s 36 | timeout: 20s 37 | retries: 3 38 | 39 | volumes: 40 | minio: 41 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/docker-compose-trino.yml: -------------------------------------------------------------------------------- 1 | x-base: &default-config 2 | restart: unless-stopped 3 | ulimits: 4 | nproc: 65535 5 | nofile: 6 | soft: 20000 7 | hard: 40000 8 | stop_grace_period: 1m 9 | logging: 10 | driver: json-file 11 | options: 12 | max-size: '100m' 13 | max-file: '1' 14 | mem_swappiness: 0 15 | env_file: 16 | - .env 17 | 18 | services: 19 | # trino server 20 | trino: 21 | image: trinodb/trino:424 22 | user: root 23 | hostname: trino 24 | domainname: trino.example.com 25 | << : *default-config 26 | environment: 27 | AWS_ACCESS_KEY_ID: ${LAKEHOUSE_USER} 28 | AWS_SECRET_ACCESS_KEY: ${LAKEHOUSE_PASSWORD} 29 | AWS_S3_ENDPOINT: http://minio:9000 30 | AWS_DEFAULT_REGION: us-east-1 31 | ports: 32 | - 8080:8080 33 | - 8443:8443 34 | volumes: 35 | - ${PWD}/config/trino/catalog:/etc/trino/catalog 36 | - trino-data:/lakehouse/data 37 | - trino-iceberg-jars:/usr/lib/trino/plugin/iceberg 38 | 39 | volumes: 40 | trino-data: 41 | trino-iceberg-jars: 42 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/scripts/certs.sh: -------------------------------------------------------------------------------- 1 | 2 | # Generate a private key for the CA 3 | openssl req \ 4 | -x509 \ 5 | -nodes \ 6 | -newkey rsa:4096 \ 7 | -sha256 \ 8 | -days 36500 \ 9 | -keyout ca.key \ 10 | -out ca.crt \ 11 | -subj "/C=CN/ST=ShangHai/L=ShangHai/O=example.com/CN=CA private example.com" \ 12 | -addext "subjectKeyIdentifier=hash" \ 13 | -addext "authorityKeyIdentifier=keyid:always,issuer" \ 14 | -addext "keyUsage=digitalSignature,nonRepudiation,keyEncipherment,keyAgreement" \ 15 | -addext "basicConstraints=critical,CA:true" 16 | 17 | # Generate a private key for the server 18 | openssl req \ 19 | -x509 \ 20 | -nodes \ 21 | -newkey rsa:4096 \ 22 | -sha256 \ 23 | -days 36500 \ 24 | -CA ca.crt \ 25 | -CAkey ca.key \ 26 | -keyout trino.example.com.key \ 27 | -out trino.example.com.crt \ 28 | -subj "/C=CN/ST=ShangHai/L=ShangHai/O=example.com/CN=trino.example.com" \ 29 | -addext "subjectKeyIdentifier=hash" \ 30 | -addext "authorityKeyIdentifier=keyid:always,issuer" \ 31 | -addext "keyUsage=digitalSignature,nonRepudiation,keyEncipherment,keyAgreement" \ 32 | -addext "basicConstraints=critical,CA:false" \ 33 | -addext "subjectAltName=DNS:trino.example.com" 34 | -------------------------------------------------------------------------------- /lakehouse/trino-iceberg/scripts/minio-init.sh: -------------------------------------------------------------------------------- 1 | 2 | ## config mc 3 | # update local server config for mc 4 | mc alias set local http://${MINIO_HOST:=minio}:9000 ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD} 5 | mc admin info local 6 | 7 | ## add user 8 | pwgen="tr -dc '[:alnum:]' < /dev/urandom | fold -w 12 | head -n 1" 9 | # access_key=trino # usernmae 10 | # secret_key=$(eval $pwgen) # eg: iNAMZLtirahV 11 | # mc admin user add local ${access_key} ${secret_key} 12 | : ${LAKEHOUSE_USER:=trino} 13 | : ${LAKEHOUSE_PASSWORD} 14 | mc admin user add local ${LAKEHOUSE_USER} ${LAKEHOUSE_PASSWORD} 15 | mc admin user list local 16 | 17 | ## add bucket 18 | : ${LAKEHOUSE_BUCKET:=lake-house} 19 | mc mb local/${LAKEHOUSE_BUCKET} 20 | mc ls local 21 | 22 | ## add policy 23 | 24 | cat < /tmp/lake_house_policy.json 25 | { 26 | "Version": "2012-10-17", 27 | "Id": "LakeHouseBuckeyPolicy", 28 | "Statement": [ 29 | { 30 | "Sid": "Stment01", 31 | "Effect": "Allow", 32 | "Action": [ 33 | "s3:GetBucketLocation", 34 | "s3:ListBucket", 35 | "s3:ListBucketMultipartUploads", 36 | "s3:ListBucketVersions", 37 | "s3:GetObject", 38 | "s3:PutObject", 39 | "s3:DeleteObject", 40 | "s3:ListMultipartUploadParts", 41 | "s3:AbortMultipartUpload" 42 | ], 43 | "Resource": [ 44 | "arn:aws:s3:::${LAKEHOUSE_BUCKET}/*", 45 | "arn:aws:s3:::${LAKEHOUSE_BUCKET}" 46 | ] 47 | } 48 | ] 49 | } 50 | EOF 51 | mc admin policy create local lake_house /tmp/lake_house_policy.json 52 | mc admin policy list local 53 | 54 | ## attach policy 55 | mc admin policy entities --user trino local | grep lake_house 56 | if [ $? -eq 0 ]; then 57 | echo "policy already attached" 58 | else 59 | echo "attaching policy to user" 60 | mc admin policy attach local lake_house --user ${LAKEHOUSE_USER} 61 | fi 62 | -------------------------------------------------------------------------------- /ldap/lldap/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | 2 | x-base: &default-config 3 | restart: unless-stopped 4 | ulimits: 5 | nproc: 65535 6 | nofile: 7 | soft: 20000 8 | hard: 40000 9 | stop_grace_period: 1m 10 | logging: 11 | driver: json-file 12 | options: 13 | max-size: '100m' 14 | max-file: '1' 15 | mem_swappiness: 0 16 | 17 | volumes: 18 | lldap_data: 19 | 20 | services: 21 | lldap: 22 | image: lldap/lldap:stable 23 | << : *default-config 24 | ports: 25 | # For LDAP 26 | - "3890:3890" 27 | # For LDAPS (LDAP Over SSL), enable port if LLDAP_LDAPS_OPTIONS__ENABLED set true, look env below 28 | #- "6360:6360" 29 | # For the web front-end 30 | - "17170:17170" 31 | volumes: 32 | - "lldap_data:/data" 33 | # Alternatively, you can mount a local folder 34 | # - "./lldap_data:/data" 35 | environment: 36 | - TZ=Asia/Shanghai 37 | - LLDAP_JWT_SECRET=foo 38 | - LLDAP_LDAP_USER_PASS="11111111" 39 | - LLDAP_LDAP_BASE_DN=dc=example,dc=com 40 | # If using LDAPS, set enabled true and configure cert and key path 41 | # - LLDAP_LDAPS_OPTIONS__ENABLED=true 42 | # - LLDAP_LDAPS_OPTIONS__CERT_FILE=/path/to/certfile.crt 43 | # - LLDAP_LDAPS_OPTIONS__KEY_FILE=/path/to/keyfile.key 44 | # You can also set a different database: 45 | # - LLDAP_DATABASE_URL=mysql://mysql-user:password@mysql-server/my-database 46 | # - LLDAP_DATABASE_URL=postgres://postgres-user:password@postgres-server/my-database 47 | -------------------------------------------------------------------------------- /ldap/openldap/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | 2 | services: 3 | openldap: 4 | image: bitnami/openldap:2 5 | ports: 6 | - '1389:1389' 7 | - '1636:1636' 8 | environment: 9 | - LDAP_ADMIN_USERNAME=admin 10 | - LDAP_ADMIN_PASSWORD=adminpassword 11 | - LDAP_USERS=user01,user02 12 | - LDAP_PASSWORDS=password1,password2 13 | networks: 14 | - my-network 15 | volumes: 16 | - 'openldap_data:/bitnami/openldap' 17 | myapp: 18 | image: 'YOUR_APPLICATION_IMAGE' 19 | networks: 20 | - my-network 21 | volumes: 22 | openldap_data: 23 | driver: local 24 | 25 | networks: 26 | other: 27 | -------------------------------------------------------------------------------- /logstash/README.md: -------------------------------------------------------------------------------- 1 | # Logstach 2 | 3 | ## Docker 部署相关引用 4 | 5 | - [Docker Official Images](https://hub.docker.com/_/logstash) 6 | - [Running Logstash on Docker](https://www.elastic.co/guide/en/logstash/current/docker.html) 7 | - [Configuring Logstash for Docker](https://www.elastic.co/guide/en/logstash/current/docker-config.html) 8 | - [Directory Layout of Docker Images](https://www.elastic.co/guide/en/logstash/current/dir-layout.html#docker-layout) 9 | - [Logstash Configuration Files](https://www.elastic.co/guide/en/logstash/current/config-setting-files.html) 10 | - [logstash.yml](https://www.elastic.co/guide/en/logstash/current/logstash-settings-file.html) 11 | 12 | ## 配置 13 | 14 | Logstash 的配置包含两部分,一个是 `settings` 用来配置 Logstash 本身的,一个是 `conf` 用来配置任务的。 15 | 16 | - `settings` 的配置默认位置在 `/usr/share/logstash/config` ,里面包含 `logstash.yml` 和 `jvm.options` 17 | - `conf` 的默认位置在 `/usr/share/logstash/pipeline` ,里面默认是什么都没有的。你需要根据实际情况创建自己的配置。 18 | 19 | 如果你需要配置多工作流位置,需要在增加 `settings` 配置,即在 `/usr/share/logstash/config` 下新建 `pipelines.yml` 文件,增加自己的配置。 20 | 更多细节请查看 [Multiple Pipelines](https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html) 21 | 22 | ### Pipeline 配置 23 | 24 | 关于配置工作流的配置,可以查看 [Logstash Configuration Examples](https://www.elastic.co/guide/en/logstash/current/config-examples.html) 。 25 | 26 | 下面是一个简单的配置: 27 | 28 | ```conf 29 | input { 30 | tcp { 31 | port => 5000 32 | type => syslog 33 | } 34 | udp { 35 | port => 5000 36 | type => syslog 37 | } 38 | } 39 | 40 | filter { 41 | if [type] == "syslog" { 42 | grok { 43 | match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" } 44 | add_field => [ "received_at", "%{@timestamp}" ] 45 | add_field => [ "received_from", "%{host}" ] 46 | } 47 | date { 48 | match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] 49 | } 50 | } 51 | } 52 | 53 | output { 54 | elasticsearch { hosts => ["localhost:9200"] } 55 | stdout { codec => rubydebug } 56 | } 57 | ``` 58 | 59 | 此示例配置通过监听 `tcp/5000` 端口,接收日志系统日志数据,然后写入 ES 。在实际使用过程中需要注意如下几点: 60 | 61 | - 通过查看[`input-tcp` 插件文档](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-tcp.html#plugins-inputs-tcp-host) 得知,该插件会默认监听 62 | `0.0.0.0:5000` 。所以在 Docker 中使用时需要在 Logstash 容器所在子网中,通过主机名访问该端口写入数据。或者将此端口映射到宿主机,然后通过宿主机IP和映射的端口访问。 63 | - 上述例子中的 `output-elasticsearch` 插件使用的地址是 `localhost` ,你应该根据实际情况修改,以便能正确写入到 ES 中。 64 | -------------------------------------------------------------------------------- /logstash/config/logstash.yml: -------------------------------------------------------------------------------- 1 | http: 2 | host: "0.0.0.0" # Default: 127.0.0.1 3 | port: 9600 # Default 4 | 5 | xpack: 6 | monitoring: 7 | elasticsearch: 8 | hosts: 9 | - http://es7:9200 -------------------------------------------------------------------------------- /logstash/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | logstash: 20 | image: logstash:7.11.2 21 | container_name: logstash7 22 | << : *default-config 23 | volumes: 24 | - ${PWD}/pipeline:/usr/share/logstash/pipeline 25 | - ${PWD}/config/logstash.yml:/usr/share/logstash/config/logstash.yml 26 | networks: 27 | - other 28 | - db 29 | ports: 30 | - 9600:9600 31 | - 9601:9601/tcp 32 | - 9601:9601/udp 33 | - 9602:9602 34 | 35 | networks: 36 | other: 37 | external: true 38 | name: other 39 | db: 40 | external: true 41 | name: db 42 | -------------------------------------------------------------------------------- /logstash/pipeline/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whg517/docker-compose/34142cc934712e70ad7d4235114bd4c4a3a9a065/logstash/pipeline/.gitkeep -------------------------------------------------------------------------------- /mariadb/README.md: -------------------------------------------------------------------------------- 1 | # mysql 5.7 docker-compose.yml 2 | 3 | :ref [mysql](https://hub.docker.com/_/mariadb) 4 | 5 | ## 使用 6 | 7 | 启动前在 `docker-compose.yml` 同级新建 `.env` 文件,增加如下内容。 8 | 9 | ```env 10 | MYSQL_ROOT_PASSWORD=yourpassword 11 | ``` 12 | 13 | 默认情况下使用 `/data/mysql` 作为 mysql 数据目录。 14 | 默认情况下使用当前目录下的 `./conf/` 下的配置作为 mysql 的配置目录。 15 | -------------------------------------------------------------------------------- /mariadb/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | db: 20 | image: mariadb:10 21 | << : *default-config 22 | env_file: ./.env 23 | security_opt: 24 | - seccomp=unconfined 25 | ports: 26 | - 3306:3306 27 | networks: 28 | - db 29 | volumes: 30 | - /data/docker/mysql/data:/var/lib/mysql:rw 31 | - /data/doker/mysql/mysql-files:/var/lib/mysql-files:rw 32 | - ${PWD}/conf:/etc/mysql/conf.d 33 | 34 | adminer: 35 | image: adminer 36 | << : *default-config 37 | networks: 38 | - app 39 | ports: 40 | - 8008:8080 41 | 42 | networks: 43 | db: 44 | external: true 45 | name: db 46 | app: 47 | external: true 48 | name: app 49 | -------------------------------------------------------------------------------- /minio/README.md: -------------------------------------------------------------------------------- 1 | # minio 2 | 3 | ref: 4 | 5 | ## Usage 6 | 7 | default auth: `minioadmin:minioadmin` 8 | 9 | ## api 10 | 11 | s3-api: 12 | console: 13 | -------------------------------------------------------------------------------- /minio/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | minio: 20 | image: 'minio/minio' 21 | container_name: minio 22 | << : *default-config 23 | volumes: 24 | - '/data/docker/minio:/data' 25 | command: server /data --console-address ":9001" 26 | ports: 27 | - '9000:9000' 28 | - '9001:9001' 29 | 30 | 31 | 32 | networks: 33 | default: 34 | external: true 35 | name: app 36 | -------------------------------------------------------------------------------- /mongodb/.gitignore: -------------------------------------------------------------------------------- 1 | keyfile 2 | -------------------------------------------------------------------------------- /mongodb/README.md: -------------------------------------------------------------------------------- 1 | # mongo 2 | 3 | ref: 4 | 5 | - [MongoDB Docker](https://quay.io/repository/mongodb/mongodb-community-server?tab=tags) 6 | - [Docker and MongoDB](https://www.mongodb.com/resources/products/compatibilities/docker) 7 | 8 | ## 生产环境副本集 9 | 10 | ref: 11 | 12 | - [部署自管理副本集](https://www.mongodb.com/zh-cn/docs/manual/tutorial/deploy-replica-set/) 13 | - [The only local MongoDB replica set with Docker Compose guide you’ll ever need!](https://medium.com/workleap/the-only-local-mongodb-replica-set-with-docker-compose-guide-youll-ever-need-2f0b74dd8384) 14 | - [通过docker-compose搭建mongo的replica set高可用](https://www.cnblogs.com/ricklz/p/13237419.html) 15 | 16 | 为了避免 IP 频繁变动,配置内部域名解析到主机 IP 或者外部能访问的主机地址。 17 | 在初始化副本集群时,会使用外部可访问的 IP 对应的域名。 18 | 19 | ```yaml 20 | mongo-init: 21 | image: quay.io/mongodb/mongodb-community-server:7.0.1-ubi9 22 | restart: "on-failure" 23 | depends_on: 24 | - mongo-rs-1 25 | command: 26 | - mongosh 27 | - mongodb://mongo.example.com:27017 28 | - --eval 29 | - | 30 | try { 31 | rs.status() 32 | } catch (e) { 33 | rs.initiate({ 34 | _id: "rs0", 35 | members: [ 36 | { _id: 0, host: "mongo.example.com:27017" }, 37 | { _id: 1, host: "mongo.example.com:27018" }, 38 | { _id: 2, host: "mongo.example.com:27019" }, 39 | ] 40 | }) 41 | } 42 | ``` 43 | 44 | 这里使用 `mongo.example.com` 作为域名,需要在 `/etc/hosts` 中添加对应的解析。 docker 程序会自动根据 hosts 文件 45 | 更新容器内部的 DNS 解析。 46 | 47 | ### 开启认证 48 | 49 | 如果需要开启认证,需要先创建集群内部通信的 keyfile ,当然也可以使用 x509 证书。具体操作请参考: 50 | [自管理内部/成员资格身份验证](https://www.mongodb.com/zh-cn/docs/manual/core/security-internal-authentication/) 51 | 52 | ### 副本集访问 53 | 54 | 下面是一个 python 示例代码,访问副本集,并列出数据库: 55 | 56 | ```python 57 | from pymongo import MongoClient 58 | 59 | 60 | def print_databases(): 61 | # Replace the URI string with your MongoDB deployment's connection string. 62 | client = MongoClient( 63 | "mongodb://mongo.example.com:27017,mongo.example.com:27018,mongo.example.com:27019/?replicaSet=rs0" 64 | ) 65 | 66 | # List all databases 67 | databases = client.list_database_names() 68 | print("Databases:") 69 | for db in databases: 70 | print(f"- {db}") 71 | 72 | if __name__ == "__main__": 73 | print_databases() 74 | 75 | ``` 76 | -------------------------------------------------------------------------------- /mongodb/docker-compose-rs.yaml: -------------------------------------------------------------------------------- 1 | x-base: &default-config 2 | restart: unless-stopped 3 | ulimits: 4 | nproc: 65535 5 | nofile: 6 | soft: 20000 7 | hard: 40000 8 | stop_grace_period: 1m 9 | logging: 10 | driver: json-file 11 | options: 12 | max-size: '200m' 13 | max-file: '1' 14 | mem_swappiness: 0 15 | 16 | x-service: &service-config 17 | << : *default-config 18 | image: quay.io/mongodb/mongodb-community-server:7.0.1-ubi9 19 | # environment: 20 | # MONGO_INITDB_ROOT_USERNAME: username 21 | # MONGO_INITDB_ROOT_PASSWORD: password 22 | command: 23 | - mongod 24 | - --bind_ip_all 25 | - --replSet 26 | - rs0 27 | # - --keyFile 28 | # - /etc/mongodb/keyfile 29 | 30 | # Note: 31 | # 32 | # Config DNS to resolve mongo.example.com to public ip, 33 | # e.g. your host machine ip or cloud server public ip. 34 | # 35 | # If enable auth, set mongo root credential and 36 | # exec `openssl rand -base64 756 > keyfile` to init mongodb keyfile for replica set. 37 | # 38 | 39 | services: 40 | mongo-rs-1: 41 | << : *service-config 42 | ports: 43 | - 27017:27017 44 | volumes: 45 | - mongo-rs-1:/data/db 46 | # - ${PWD}/keyfile:/etc/mongodb/keyfile 47 | 48 | mongo-rs-2: 49 | << : *service-config 50 | ports: 51 | - 27018:27017 52 | volumes: 53 | - mongo-rs-2:/data/db 54 | # - ${PWD}/keyfile:/etc/mongodb/keyfile 55 | 56 | mongo-rs-3: 57 | << : *service-config 58 | ports: 59 | - 27019:27017 60 | volumes: 61 | - mongo-rs-3:/data/db 62 | # - ${PWD}/keyfile:/etc/mongodb/keyfile 63 | 64 | mongo-init: 65 | image: quay.io/mongodb/mongodb-community-server:7.0.1-ubi9 66 | restart: "on-failure" 67 | depends_on: 68 | - mongo-rs-1 69 | command: 70 | - mongosh 71 | - mongodb://mongo.example.com:27017 72 | - --eval 73 | - | 74 | try { 75 | rs.status() 76 | } catch (e) { 77 | rs.initiate({ 78 | _id: "rs0", 79 | members: [ 80 | { _id: 0, host: "mongo.example.com:27017" }, 81 | { _id: 1, host: "mongo.example.com:27018" }, 82 | { _id: 2, host: "mongo.example.com:27019" }, 83 | ] 84 | }) 85 | } 86 | 87 | volumes: 88 | mongo-rs-1: 89 | mongo-rs-2: 90 | mongo-rs-3: 91 | 92 | networks: 93 | default: 94 | name: db 95 | external: true 96 | -------------------------------------------------------------------------------- /mongodb/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | x-base: &default-config 2 | restart: unless-stopped 3 | ulimits: 4 | nproc: 65535 5 | nofile: 6 | soft: 20000 7 | hard: 40000 8 | stop_grace_period: 1m 9 | logging: 10 | driver: json-file 11 | options: 12 | max-size: '100m' 13 | max-file: '1' 14 | mem_swappiness: 0 15 | 16 | services: 17 | mongo-1: 18 | image: quay.io/mongodb/mongodb-community-server:7.0.1-ubi9 19 | << : *default-config 20 | environment: 21 | MONGO_INITDB_DATABASE: test 22 | # MONGO_INITDB_ROOT_USERNAME: admin 23 | # MONGO_INITDB_ROOT_PASSWORD: admin 24 | ports: 25 | - 27017:27017 26 | volumes: 27 | - /data/docker/mongodb/data:/data/db 28 | 29 | networks: 30 | default: 31 | name: db 32 | external: true 33 | -------------------------------------------------------------------------------- /mongodb/scripts/init.js: -------------------------------------------------------------------------------- 1 | rs.initiate(); -------------------------------------------------------------------------------- /mysql/README.md: -------------------------------------------------------------------------------- 1 | # mysql 5.7 docker-compose.yml 2 | 3 | :ref [mysql](https://hub.docker.com/_/mysql) 4 | 5 | ## 使用 6 | 7 | 启动前在 `docker-compose.yml` 同级新建 `.env` 文件,增加如下内容。 8 | 9 | ```env 10 | MYSQL_ROOT_PASSWORD=yourpassword 11 | ``` 12 | 13 | 默认情况下使用 `/data/mysql` 作为 mysql 数据目录。 14 | 默认情况下使用当前目录下的 `./conf/` 下的配置作为 mysql 的配置目录。 15 | -------------------------------------------------------------------------------- /mysql/conf/mysql.cnf: -------------------------------------------------------------------------------- 1 | [client] 2 | default-character-set=utf8mb4 3 | 4 | [mysql] 5 | default-character-set=utf8mb4 6 | 7 | [mysqld] 8 | character-set-server=utf8mb4 9 | 10 | # 跳过验证密码,在忘记 root 密码时可打开次选项进行无密登录。生产环境请删除此行!!! 11 | # skip-grant-tables 12 | 13 | skip-host-cache 14 | 15 | 16 | # 禁止MySQL对外部连接进行DNS解析,使用这一选项可以消除MySQL进行DNS解析的时间。 17 | # 但需要注意,如果开启该选项,则所有远程主机连接授权都要使用IP地址方式,否则MySQL将无法正常处理连接请求!docker默认解析docker name或者service为ip,这样mysql就不用解析了 18 | skip-name-resolve 19 | 20 | 21 | max_connections = 1500 22 | # MySQL的最大连接数 -------------------------------------------------------------------------------- /mysql/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | server: 20 | image: mysql:8 21 | env_file: ./.env 22 | << : *default-config 23 | security_opt: 24 | - seccomp=unconfined 25 | ports: 26 | - 3306:3306 27 | command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci 28 | networks: 29 | - db 30 | volumes: 31 | - /data/docker/mysql/data:/var/lib/mysql:rw 32 | - /data/docker/mysql/mysql-files:/var/lib/mysql-files:rw 33 | - ${PWD}/conf:/etc/mysql/conf.d 34 | restart: always 35 | 36 | adminer: 37 | image: adminer 38 | networks: 39 | - app 40 | ports: 41 | - 8008:8080 42 | restart: always 43 | 44 | networks: 45 | db: 46 | external: true 47 | name: db 48 | app: 49 | external: true 50 | name: app 51 | -------------------------------------------------------------------------------- /nifi/README.md: -------------------------------------------------------------------------------- 1 | # Nifi 2 | 3 | ```shell 4 | docker run --name nifi \ 5 | -p 9090:9090 \ 6 | -d \ 7 | -e NIFI_WEB_HTTP_PORT='9090' \ 8 | apache/nifi:latest 9 | ``` 10 | -------------------------------------------------------------------------------- /nifi/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | nifi: 20 | image: apache/nifi:latest 21 | container_name: nifi 22 | << : *default-config 23 | ports: 24 | - 9090:9090 25 | environment: 26 | - NIFI_WEB_HTTP_PORT=9090 27 | volumes: 28 | - /tmp/nifi:/tmp/nifi 29 | 30 | networks: 31 | default: 32 | external: true 33 | name: app 34 | 35 | -------------------------------------------------------------------------------- /portainer/README.md: -------------------------------------------------------------------------------- 1 | # Portainer 2 | 3 | ref [Portainer](https://www.portainer.io/installation/) 4 | -------------------------------------------------------------------------------- /portainer/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | portainer: 20 | image: portainer/portainer-ce:latest 21 | container_name: portainer 22 | << : *default-config 23 | ports: 24 | - 9000:9000 25 | - 9443:9443 26 | volumes: 27 | - /var/run/docker.sock:/var/run/docker.sock 28 | - portainer_data:/data 29 | 30 | 31 | volumes: 32 | portainer_data: 33 | 34 | networks: 35 | default: 36 | external: true 37 | name: other 38 | -------------------------------------------------------------------------------- /postgres/README.md: -------------------------------------------------------------------------------- 1 | # PostgreSQL 16 2 | 3 | :ref 4 | 5 | - [postgresql-16](https://catalog.redhat.com/software/containers/rhel9/postgresql-16/657b03866783e1b1fb87e142?image=66c2c29f373cc505bb0db872&architecture=amd64&container-tabs=overview) 6 | 7 | ## 使用 8 | 9 | 启动前在 `docker-compose.yml` 同级新建 `.env` 文件,增加如下内容。 10 | 11 | ```env 12 | POSTGRESQL_ADMIN_PASSWORD=postgres 13 | POSTGRESQL_USER=postgres 14 | POSTGRESQL_PASSWORD=postgres 15 | POSTGRESQL_DATABASE=postgres 16 | ``` 17 | -------------------------------------------------------------------------------- /postgres/docker-compose.yml: -------------------------------------------------------------------------------- 1 | x-base: &default-config 2 | restart: unless-stopped 3 | ulimits: 4 | nproc: 65535 5 | nofile: 6 | soft: 20000 7 | hard: 40000 8 | stop_grace_period: 1m 9 | logging: 10 | driver: json-file 11 | options: 12 | max-size: '100m' 13 | max-file: '1' 14 | mem_swappiness: 0 15 | 16 | services: 17 | db: 18 | image: registry.access.redhat.com/rhel9/postgresql-16:1-25.1724037700 19 | << : *default-config 20 | env_file: ./.env 21 | volumes: 22 | - /data/docker/pg-data:/var/lib/postgresql/data 23 | ports: 24 | - 5432:5432 25 | networks: 26 | - db 27 | 28 | networks: 29 | db: 30 | external: true 31 | -------------------------------------------------------------------------------- /prometheus/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whg517/docker-compose/34142cc934712e70ad7d4235114bd4c4a3a9a065/prometheus/README.md -------------------------------------------------------------------------------- /prometheus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | prometheus: 20 | image: prom/prometheus:latest 21 | container_name: prometheus 22 | << : *default-config 23 | ports: 24 | - "9090:9090" 25 | volumes: 26 | - ${PWD}/prometheus.yml:/etc/prometheus/prometheus.yml 27 | - /data/docker/prometheus/data:/prometheus 28 | command: 29 | - --config.file=/etc/prometheus/prometheus.yml 30 | - --storage.tsdb.path=/prometheus 31 | - --web.console.libraries=/usr/share/prometheus/console_libraries 32 | - --web.console.templates=/usr/share/prometheus/consoles 33 | - --storage.tsdb.retention.time=366d 34 | 35 | pushgateway: 36 | image: prom/pushgateway:latest 37 | container_name: pushgateway 38 | ports: 39 | - "9091:9091" 40 | 41 | networks: 42 | default: 43 | external: true 44 | name: db 45 | -------------------------------------------------------------------------------- /prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 4 | evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Alertmanager configuration 8 | alerting: 9 | alertmanagers: 10 | - static_configs: 11 | - targets: 12 | # - alertmanager:9093 13 | 14 | # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 15 | rule_files: 16 | # - "first_rules.yml" 17 | # - "second_rules.yml" 18 | 19 | # A scrape configuration containing exactly one endpoint to scrape: 20 | # Here it's Prometheus itself. 21 | scrape_configs: 22 | # The job name is added as a label `job=` to any timeseries scraped from this config. 23 | # - job_name: 'prometheus' 24 | 25 | # metrics_path defaults to '/metrics' 26 | # scheme defaults to 'http'. 27 | 28 | # static_configs: 29 | # - targets: ['localhost:9090'] 30 | 31 | # pushgateway job 32 | - job_name: 'pushgateway' 33 | 34 | scrape_interval: 2s 35 | scrape_timeout: 2s 36 | static_configs: 37 | - targets: ['pushgateway:9090'] 38 | # labels: 39 | # instance: pushgateway 40 | 41 | - job_name: 'spiderkeeper' 42 | 43 | # metrics_path defaults to '/metrics' 44 | # scheme defaults to 'http'. 45 | scrape_interval: 5s 46 | scrape_timeout: 5s 47 | static_configs: 48 | - targets: ['192.168.2.178:8080'] -------------------------------------------------------------------------------- /proxy/.dockerignore: -------------------------------------------------------------------------------- 1 | config.json 2 | configfile.txt -------------------------------------------------------------------------------- /proxy/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | 为了更方便的科学上网,编写此 Dockerfile 启动代理服务。 4 | 5 | 功能点: 6 | 7 | - 使用 [xray](https://xtls.github.io/) 做代理 8 | - 挂载配置文件请参考 [配置文件格式](https://xtls.github.io/config/) 9 | - [container image](https://hub.docker.com/r/teddysun/xray) 10 | - [Docker 安装方式](https://xtls.github.io/document/install.html#docker-%E5%AE%89%E8%A3%85%E6%96%B9%E5%BC%8F) 11 | -------------------------------------------------------------------------------- /proxy/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | xray: 20 | image: teddysun/xray 21 | container_name: xray 22 | <<: *default-config 23 | ports: 24 | - 1080:1080 25 | - 1081:1081 26 | volumes: 27 | - ${PWD}/config.json:/etc/xray/config.json 28 | 29 | networks: 30 | default: 31 | external: true 32 | name: other 33 | -------------------------------------------------------------------------------- /rabbitmq/READMD.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whg517/docker-compose/34142cc934712e70ad7d4235114bd4c4a3a9a065/rabbitmq/READMD.md -------------------------------------------------------------------------------- /rabbitmq/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | 20 | rabbitmq: 21 | image: rabbitmq:3-management 22 | <<: *default-config 23 | ports: 24 | - 5672:5672 25 | - 15672:15672 26 | 27 | networks: 28 | default: 29 | external: true 30 | name: db 31 | -------------------------------------------------------------------------------- /redis/6/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whg517/docker-compose/34142cc934712e70ad7d4235114bd4c4a3a9a065/redis/6/README.md -------------------------------------------------------------------------------- /redis/6/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | services: 20 | image: redis:6 21 | container_name: redis6 22 | << : *default-config 23 | ports: 24 | - 6379:6379 25 | volumes: 26 | - /data/docker/redis/6:/redis 27 | restart: always 28 | 29 | networks: 30 | default: 31 | external: true 32 | name: db 33 | -------------------------------------------------------------------------------- /redis/redis-ha/README.md: -------------------------------------------------------------------------------- 1 | # Redis HA 2 | 3 | ## 配置 4 | 5 | ### Master 6 | 7 | server name: `redis-server-master` 8 | 9 | redis-master.conf 10 | 11 | ``` 12 | port 6379 13 | 14 | requirepass 123456 15 | 16 | rename-command KEYS "" 17 | ``` 18 | 19 | **注意:** 20 | 21 | 请不要直接使用上述默认密码 22 | 23 | ### Slaver 24 | 25 | redis-slave.conf 26 | 27 | ``` 28 | port 6379 29 | 30 | requirepass 123456 31 | 32 | rename-command KEYS "" 33 | 34 | replicaof redis-server-master 6379 # 不推荐使用 slave 关键词 35 | 36 | masterauth 123456 37 | ``` 38 | 39 | **注意:** 40 | 41 | 请不要直接使用上述默认密码 42 | 43 | ### Sentinel 44 | 45 | redis-sentinel.config 46 | 47 | ``` 48 | port 26379 49 | 50 | requirepass 123456 51 | 52 | sentinel monitor local-master redis-server-master 6379 2 53 | 54 | sentinel auth-pass local-master 123456 55 | ``` 56 | 57 | **注意:** 58 | 59 | 请不要直接使用上述默认密码 60 | 61 | Sentinel 节点第一次初始化的时候会根据配置或者自动自动发现查找集群,然后修改配置文件。更多描述可以查看 [Sentinel 文档](https://redis.io/topics/sentinel) [Redis 默认配置文件](https://raw.githubusercontent.com/redis/redis/6.0/redis.conf) -------------------------------------------------------------------------------- /redis/redis-ha/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | environment: 18 | TZ: "Asia/Shanghai" 19 | 20 | services: 21 | redis-server-master: 22 | image: redis 23 | container_name: redis-server-master 24 | << : *default-config 25 | deploy: 26 | resources: 27 | limits: 28 | memory: 1024MB 29 | volumes: 30 | - ${PWD}/example-config:/usr/local/etc/redis 31 | command: ["redis-server", "/usr/local/etc/redis/redis.conf"] 32 | 33 | redis-server-slaver-1: 34 | image: redis 35 | container_name: redis-server-slaver-1 36 | << : *default-config 37 | deploy: 38 | resources: 39 | limits: 40 | memory: 1024MB 41 | depends_on: 42 | - redis-server-master 43 | volumes: 44 | - ${PWD}/example-config:/usr/local/etc/redis 45 | command: ["redis-server", "/usr/local/etc/redis/redis.conf"] 46 | 47 | redis-server-slaver-2: 48 | image: redis 49 | container_name: redis-server-slaver-2 50 | << : *default-config 51 | deploy: 52 | resources: 53 | limits: 54 | memory: 1024MB 55 | depends_on: 56 | - redis-server-master 57 | volumes: 58 | - ${PWD}/example-config:/usr/local/etc/redis 59 | command: ["redis-server", "/usr/local/etc/redis/redis.conf"] 60 | 61 | redis-sentinel-1: 62 | image: redis 63 | container_name: redis-sentinel-1 64 | << : *default-config 65 | deploy: 66 | resources: 67 | limits: 68 | memory: 1024MB 69 | depends_on: 70 | - redis-server-slaver-1 71 | - redis-server-slaver-2 72 | volumes: 73 | - ${PWD}/example-config:/usr/local/etc/redis 74 | command: ["redis-sentinel", "/usr/local/etc/redis/redis-sentinel.conf"] 75 | 76 | redis-sentinel-2: 77 | image: redis 78 | container_name: redis-sentinel-2 79 | << : *default-config 80 | deploy: 81 | resources: 82 | limits: 83 | memory: 1024MB 84 | volumes: 85 | - ${PWD}/example-config:/usr/local/etc/redis 86 | command: ["redis-sentinel", "/usr/local/etc/redis/redis-sentinel.conf"] 87 | 88 | redis-sentinel-3: 89 | image: redis 90 | container_name: redis-sentinel-3 91 | restart: always 92 | deploy: 93 | resources: 94 | limits: 95 | memory: 1024MB 96 | networks: 97 | - db 98 | logging: 99 | options: 100 | max-size: "200M" 101 | max-file: "5" 102 | volumes: 103 | - ${PWD}/example-config:/usr/local/etc/redis 104 | command: ["redis-sentinel", "/usr/local/etc/redis/redis-sentinel.conf"] 105 | 106 | networks: 107 | default: 108 | external: true 109 | name: app 110 | -------------------------------------------------------------------------------- /redis/redis-ha/example-config/master.conf: -------------------------------------------------------------------------------- 1 | port 6379 2 | 3 | requirepass 123456 4 | 5 | rename-command KEYS "" -------------------------------------------------------------------------------- /redis/redis-ha/example-config/sentinel-1.conf: -------------------------------------------------------------------------------- 1 | port 26379 2 | 3 | requirepass 123456 4 | 5 | sentinel monitor local-master redis-server-master 6379 2 6 | 7 | sentinel auth-pass local-master 123456 -------------------------------------------------------------------------------- /redis/redis-ha/example-config/sentinel-2.conf: -------------------------------------------------------------------------------- 1 | port 26379 2 | 3 | requirepass 123456 4 | 5 | sentinel monitor local-master redis-server-master 6379 2 6 | 7 | sentinel auth-pass local-master 123456 -------------------------------------------------------------------------------- /redis/redis-ha/example-config/sentinel-3.conf: -------------------------------------------------------------------------------- 1 | port 26379 2 | 3 | requirepass 123456 4 | 5 | sentinel monitor local-master redis-server-master 6379 2 6 | 7 | sentinel auth-pass local-master 123456 -------------------------------------------------------------------------------- /redis/redis-ha/example-config/slave1.conf: -------------------------------------------------------------------------------- 1 | port 6379 2 | 3 | requirepass 123456 4 | 5 | rename-command KEYS "" 6 | 7 | replicaof redis-server-master 6379 8 | 9 | masterauth 123456 -------------------------------------------------------------------------------- /redis/redis-ha/example-config/slave2.conf: -------------------------------------------------------------------------------- 1 | port 6379 2 | 3 | requirepass 123456 4 | 5 | rename-command KEYS "" 6 | 7 | replicaof redis-server-master 6379 8 | 9 | masterauth 123456 -------------------------------------------------------------------------------- /redisinsight/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/whg517/docker-compose/34142cc934712e70ad7d4235114bd4c4a3a9a065/redisinsight/README.md -------------------------------------------------------------------------------- /redisinsight/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | 20 | redisinsight: 21 | container_name: redisinsight 22 | image: redislabs/redisinsight 23 | <<: *default-config 24 | ports: 25 | - 8001:8001 26 | volumes: 27 | - redisinsight:/db 28 | 29 | volumes: 30 | redisinsight: 31 | 32 | networks: 33 | default: 34 | external: true 35 | name: app 36 | -------------------------------------------------------------------------------- /skywalking/README.md: -------------------------------------------------------------------------------- 1 | # SkyWalking 监控系统 2 | 3 | ## 部署相关文档: 4 | 5 | - [SkyWalking docker-compose.yml](https://github.com/apache/skywalking/blob/master/docker/docker-compose.yml) 6 | - [SkyWalking Document](https://skywalking.apache.org/docs/) 7 | - [SkyWalking Backend setup](https://skywalking.apache.org/docs/main/latest/en/setup/backend/backend-setup/) 8 | - [skywalking-kubernetes](https://github.com/apache/skywalking-kubernetes) 9 | - [SkyWalking Python Agent](https://github.com/apache/skywalking-python) -------------------------------------------------------------------------------- /skywalking/docker-compose.yml: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one 2 | # or more contributor license agreements. See the NOTICE file 3 | # distributed with this work for additional information 4 | # regarding copyright ownership. The ASF licenses this file 5 | # to you under the Apache License, Version 2.0 (the 6 | # "License"); you may not use this file except in compliance 7 | # with the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | version: "3.9" 18 | 19 | x-base: &default-config 20 | restart: unless-stopped 21 | ulimits: 22 | nproc: 65535 23 | nofile: 24 | soft: 20000 25 | hard: 40000 26 | stop_grace_period: 1m 27 | logging: 28 | driver: json-file 29 | options: 30 | max-size: '100m' 31 | max-file: '1' 32 | mem_swappiness: 0 33 | 34 | services: 35 | oap: 36 | image: apache/skywalking-oap-server:8.8.1 37 | container_name: skywalking-oap 38 | networks: 39 | - app 40 | - db 41 | ports: 42 | - "11800:11800" 43 | - "12800:12800" 44 | <<: *default-config 45 | healthcheck: 46 | test: [ "CMD-SHELL", "/skywalking/bin/swctl ch" ] 47 | interval: 30s 48 | timeout: 10s 49 | retries: 3 50 | start_period: 10s 51 | environment: 52 | SW_STORAGE: elasticsearch 53 | SW_STORAGE_ES_CLUSTER_NODES: es7:9200 54 | SW_HEALTH_CHECKER: default 55 | SW_TELEMETRY: prometheus 56 | JAVA_OPTS: "-Xms2048m -Xmx2048m" 57 | 58 | ui: 59 | image: apache/skywalking-ui:8.8.1 60 | container_name: skywalking-ui 61 | <<: *default-config 62 | depends_on: 63 | - oap 64 | networks: 65 | - app 66 | ports: 67 | - "8080:8080" 68 | environment: 69 | SW_OAP_ADDRESS: http://skywalking-oap:12800 70 | 71 | networks: 72 | db: 73 | external: true 74 | name: db 75 | app: 76 | external: true 77 | name: app 78 | 79 | -------------------------------------------------------------------------------- /sonarqube/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | env_file: 18 | - .env 19 | 20 | services: 21 | sonarqube: 22 | image: sonarqube:community 23 | <<: *default-config 24 | depends_on: 25 | - db 26 | environment: 27 | SONAR_JDBC_URL: jdbc:postgresql://db:5432/sonar 28 | # SONAR_JDBC_USERNAME: sonar 29 | # SONAR_JDBC_PASSWORD: sonar 30 | volumes: 31 | - sonarqube_data:/opt/sonarqube/data 32 | - sonarqube_extensions:/opt/sonarqube/extensions 33 | - sonarqube_logs:/opt/sonarqube/logs 34 | ports: 35 | - "9002:9000" 36 | networks: 37 | - db 38 | - app 39 | 40 | db: 41 | image: postgres:12 42 | <<: *default-config 43 | # environment: 44 | # POSTGRES_USER: sonar 45 | # POSTGRES_PASSWORD: sonar 46 | volumes: 47 | - postgresql:/var/lib/postgresql 48 | - postgresql_data:/var/lib/postgresql/data 49 | networks: 50 | - db 51 | 52 | volumes: 53 | sonarqube_data: 54 | sonarqube_extensions: 55 | sonarqube_logs: 56 | postgresql: 57 | postgresql_data: 58 | 59 | networks: 60 | db: 61 | external: true 62 | name: db 63 | app: 64 | external: true 65 | name: app 66 | -------------------------------------------------------------------------------- /splash/README.md: -------------------------------------------------------------------------------- 1 | # Splash 2 | 3 | ref [splash](https://splash.readthedocs.io/en/stable/install.html) 4 | -------------------------------------------------------------------------------- /splash/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | 18 | services: 19 | 20 | splash: 21 | image: scrapinghub/splash 22 | <<: *default-config 23 | deploy: 24 | resources: 25 | limits: 26 | memory: 2G 27 | ports: 28 | - 8050:8050 29 | 30 | networks: 31 | default: 32 | external: true 33 | name: other 34 | -------------------------------------------------------------------------------- /wiki.js/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | env_file: 18 | - .env 19 | 20 | services: 21 | db: 22 | image: postgres:11-alpine 23 | <<: *default-config 24 | environment: 25 | POSTGRES_DB: wiki 26 | # POSTGRES_PASSWORD: wikijsrocks 27 | # POSTGRES_USER: wikijs 28 | networks: 29 | - db 30 | logging: 31 | driver: "none" 32 | volumes: 33 | - /data/docker/wiki.js:/var/lib/postgresql/data 34 | 35 | wiki: 36 | image: ghcr.io/requarks/wiki:2 37 | <<: *default-config 38 | depends_on: 39 | - db 40 | networks: 41 | - db 42 | - app 43 | environment: 44 | DB_TYPE: postgres 45 | DB_HOST: db 46 | DB_PORT: 5432 47 | # DB_USER: wikijs 48 | # DB_PASS: wikijsrocks 49 | # DB_NAME: wiki 50 | ports: 51 | - "3000:3000" 52 | 53 | networks: 54 | db: 55 | external: true 56 | app: 57 | external: true 58 | -------------------------------------------------------------------------------- /windmall/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | 3 | services: 4 | db: 5 | deploy: 6 | # To use an external database, set replicas to 0 and set DATABASE_URL to the external database url in the .env file 7 | replicas: 1 8 | image: postgres:14 9 | restart: unless-stopped 10 | volumes: 11 | - db_data:/var/lib/postgresql/data 12 | expose: 13 | - 5432 14 | environment: 15 | POSTGRES_PASSWORD: cwindmill 16 | POSTGRES_DB: windmill 17 | healthcheck: 18 | test: ["CMD-SHELL", "pg_isready -U postgres"] 19 | interval: 10s 20 | timeout: 5s 21 | retries: 5 22 | 23 | windmill_server: 24 | image: ${WM_IMAGE} 25 | pull_policy: always 26 | deploy: 27 | replicas: 1 28 | restart: unless-stopped 29 | expose: 30 | - 8000 31 | environment: 32 | - DATABASE_URL=${DATABASE_URL} 33 | - MODE=server 34 | depends_on: 35 | db: 36 | condition: service_healthy 37 | 38 | windmill_worker: 39 | image: ${WM_IMAGE} 40 | pull_policy: always 41 | deploy: 42 | replicas: 3 43 | resources: 44 | limits: 45 | cpus: "1" 46 | memory: 2048M 47 | restart: unless-stopped 48 | environment: 49 | - DATABASE_URL=${DATABASE_URL} 50 | - MODE=worker 51 | - WORKER_GROUP=default 52 | depends_on: 53 | db: 54 | condition: service_healthy 55 | # to mount the worker folder to debug, KEEP_JOB_DIR=true and mount /tmp/windmill 56 | volumes: 57 | # mount the docker socket to allow to run docker containers from within the workers 58 | - /var/run/docker.sock:/var/run/docker.sock 59 | - worker_dependency_cache:/tmp/windmill/cache 60 | 61 | ## This worker is specialized for "native" jobs. Native jobs run in-process and thus are much more lightweight than other jobs 62 | windmill_worker_native: 63 | # Use ghcr.io/windmill-labs/windmill-ee:main for the ee 64 | image: ${WM_IMAGE} 65 | pull_policy: always 66 | deploy: 67 | replicas: 2 68 | resources: 69 | limits: 70 | cpus: "0.1" 71 | memory: 128M 72 | restart: unless-stopped 73 | environment: 74 | - DATABASE_URL=${DATABASE_URL} 75 | - MODE=worker 76 | - WORKER_GROUP=native 77 | depends_on: 78 | db: 79 | condition: service_healthy 80 | 81 | ## This worker is specialized for reports or scrapping jobs. It is assigned the "reports" worker group which has an init script that installs chromium and can be targeted by using the "chromium" worker tag. 82 | # windmill_worker_reports: 83 | # image: ${WM_IMAGE} 84 | # pull_policy: always 85 | # deploy: 86 | # replicas: 1 87 | # resources: 88 | # limits: 89 | # cpus: "1" 90 | # memory: 2048M 91 | # restart: unless-stopped 92 | # environment: 93 | # - DATABASE_URL=${DATABASE_URL} 94 | # - MODE=worker 95 | # - WORKER_GROUP=reports 96 | # depends_on: 97 | # db: 98 | # condition: service_healthy 99 | # # to mount the worker folder to debug, KEEP_JOB_DIR=true and mount /tmp/windmill 100 | # volumes: 101 | # # mount the docker socket to allow to run docker containers from within the workers 102 | # - /var/run/docker.sock:/var/run/docker.sock 103 | # - worker_dependency_cache:/tmp/windmill/cache 104 | 105 | lsp: 106 | image: ghcr.io/windmill-labs/windmill-lsp:latest 107 | pull_policy: always 108 | restart: unless-stopped 109 | expose: 110 | - 3001 111 | volumes: 112 | - lsp_cache:/root/.cache 113 | 114 | multiplayer: 115 | image: ghcr.io/windmill-labs/windmill-multiplayer:latest 116 | deploy: 117 | replicas: 0 # Set to 1 to enable multiplayer, only available on Enterprise Edition 118 | restart: unless-stopped 119 | expose: 120 | - 3002 121 | 122 | caddy: 123 | image: caddy:2.5.2-alpine 124 | restart: unless-stopped 125 | 126 | # Configure the mounted Caddyfile and the exposed ports or use another reverse proxy if needed 127 | volumes: 128 | - ./Caddyfile:/etc/caddy/Caddyfile 129 | # - ./certs:/certs # Provide custom certificate files like cert.pem and key.pem to enable HTTPS - See the corresponding section in the Caffyfile 130 | ports: 131 | # To change the exposed port, simply change 80:80 to :80. No other changes needed 132 | - 80:80 133 | # - 443:443 # Uncomment to enable HTTPS handling by Caddy 134 | environment: 135 | - BASE_URL=":80" 136 | # - BASE_URL=":443" # uncomment and comment line above to enable HTTPS via custom certificate and key files 137 | # - BASE_URL=mydomain.com # Uncomment and comment line above to enable HTTPS handling by Caddy 138 | 139 | volumes: 140 | db_data: null 141 | worker_dependency_cache: null 142 | lsp_cache: null 143 | -------------------------------------------------------------------------------- /zookeeper/README.md: -------------------------------------------------------------------------------- 1 | # Zookeeper 集群 2 | 3 | ## 1. 安装 4 | 5 | ```shell 6 | $ docker-compose -f docker-compose.yaml up -d 7 | ``` -------------------------------------------------------------------------------- /zookeeper/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | x-base: &default-config 4 | restart: unless-stopped 5 | ulimits: 6 | nproc: 65535 7 | nofile: 8 | soft: 20000 9 | hard: 40000 10 | stop_grace_period: 1m 11 | logging: 12 | driver: json-file 13 | options: 14 | max-size: '100m' 15 | max-file: '1' 16 | mem_swappiness: 0 17 | services: 18 | zoo1: 19 | image: zookeeper:3.5.8 20 | restart: always 21 | <<: *default-config 22 | hostname: zoo1 23 | ports: 24 | - 2181:2181 25 | environment: 26 | ZOO_MY_ID: 1 27 | ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181 28 | 29 | zoo2: 30 | image: zookeeper:3.5.8 31 | restart: always 32 | <<: *default-config 33 | hostname: zoo2 34 | ports: 35 | - 2182:2181 36 | environment: 37 | ZOO_MY_ID: 2 38 | ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181 39 | 40 | zoo3: 41 | image: zookeeper:3.5.8 42 | restart: always 43 | <<: *default-config 44 | hostname: zoo3 45 | ports: 46 | - 2183:2181 47 | environment: 48 | ZOO_MY_ID: 3 49 | ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181 --------------------------------------------------------------------------------