├── README.md ├── docker-compose ├── spug │ ├── README.md │ ├── init.sh │ └── docker-compose.yml ├── proget │ ├── env │ │ ├── proget_db.env │ │ └── proget.env │ ├── README.md │ └── docker-compose.yml ├── mysql │ ├── 5.7 │ │ ├── conf │ │ │ └── my.cnf │ │ └── docker-compose.yml │ └── 8.0 │ │ ├── docker-compose.yml │ │ └── conf │ │ └── my.cnf ├── drone │ ├── server │ │ ├── .env │ │ └── docker-compose.yml │ └── README.md ├── influxdb │ ├── influxdb.env │ ├── docker-compose.yml │ └── etc │ │ └── influxdb.conf ├── sonarqube │ ├── plugins │ │ ├── sonar-l10n-zh-plugin-1.29.jar │ │ └── sonar-csharp-plugin-7.16.0.8981.jar │ ├── README.md │ ├── docker-compose.yml │ └── sonar.properties ├── elastichd │ ├── README.md │ └── docker-compose.yml ├── mrdoc │ ├── auto_update.sh │ ├── update.sh │ ├── docker-compose.yml │ ├── README.md │ └── config │ │ └── config.ini ├── frps │ ├── docker-compose.yml │ └── config │ │ └── frps.ini ├── soar-web │ └── docker-compose.yml ├── jumpserver │ ├── env │ │ └── jumpserver.env │ ├── README.md │ └── docker-compose.yml ├── yapi │ ├── docker-entrypoint.sh │ ├── docker-compose.yml │ ├── config.json │ └── Dockerfile ├── superset │ ├── docker-compose.yml │ └── docker │ │ └── .env ├── redis │ └── docker-compose.yml ├── redisInSight │ └── docker-compose.yml ├── postgreSQL │ └── docker-compose.yml ├── freshrss │ └── docker-compose.yml ├── nfs │ └── docker-compose.yml ├── mssql │ └── docker-compose.yml ├── mindoc │ └── docker-compose.yml ├── mariadb │ └── docker-compose.yml ├── nexus │ └── docker-compose.yml └── rabbitmq │ └── docker-compose.yml └── init └── docker ├── README.md ├── auto-install-docker.sh └── bash-completion └── docker-compose.sh /README.md: -------------------------------------------------------------------------------- 1 | # bing-docker -------------------------------------------------------------------------------- /docker-compose/spug/README.md: -------------------------------------------------------------------------------- 1 | # SPUG 轻量级自动化运维平台 2 | 3 | ## 1.1 环境变量 4 | -------------------------------------------------------------------------------- /docker-compose/proget/env/proget_db.env: -------------------------------------------------------------------------------- 1 | POSTGRES_DB=proget 2 | POSTGRES_USER=proget 3 | POSTGRES_PASSWORD=123456 -------------------------------------------------------------------------------- /docker-compose/proget/env/proget.env: -------------------------------------------------------------------------------- 1 | PROGET_DATABASE=Host=proget_db; Database=proget; Username=proget; Password=123456; -------------------------------------------------------------------------------- /docker-compose/mysql/5.7/conf/my.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | max_connections = 1000 3 | max_allowed_packet = 512M 4 | expire_logs_days = 7 -------------------------------------------------------------------------------- /docker-compose/drone/server/.env: -------------------------------------------------------------------------------- 1 | DRONE_SERVER_HOST= 2 | DRONE_SERVER_PROTO=http 3 | DRONE_RPC_SECRET=123456 4 | DRONE_GOGS_SERVER= -------------------------------------------------------------------------------- /docker-compose/spug/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker exec spug init_spug admin 123456 3 | docker restart spug 4 | # 必须LF格式命令,否则存在密码初始化问题 -------------------------------------------------------------------------------- /docker-compose/influxdb/influxdb.env: -------------------------------------------------------------------------------- 1 | INFLUXDB_ADMIN_USER=root 2 | INFLUXDB_ADMIN_PASSWORD=123456 3 | INFLUXDB_USER=bing 4 | INFLUXDB_USER_PASSWORD=123456 -------------------------------------------------------------------------------- /docker-compose/sonarqube/plugins/sonar-l10n-zh-plugin-1.29.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jianxuanbing/bing-docker/HEAD/docker-compose/sonarqube/plugins/sonar-l10n-zh-plugin-1.29.jar -------------------------------------------------------------------------------- /docker-compose/sonarqube/plugins/sonar-csharp-plugin-7.16.0.8981.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jianxuanbing/bing-docker/HEAD/docker-compose/sonarqube/plugins/sonar-csharp-plugin-7.16.0.8981.jar -------------------------------------------------------------------------------- /docker-compose/elastichd/README.md: -------------------------------------------------------------------------------- 1 | # ElasticHD 简易可视化面板 2 | ElasticHD 可视化面板,支持ES监控,实时搜索,Index template 快速替换修改,索引列表信息查看,SQL转换为DSL等。 3 | 4 | ## 1. 访问界面 5 | [http://localhost:9800/](http://localhost:9800/) -------------------------------------------------------------------------------- /docker-compose/mrdoc/auto_update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker run -d --name watchtower --restart unless-stopped -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower --cleanup -s "0 3 * * *" mrdoc -------------------------------------------------------------------------------- /docker-compose/drone/README.md: -------------------------------------------------------------------------------- 1 | # 服务端 2 | ## 环境变量 3 | - `DRONE_SERVER_HOST`: 服务器访问地址(必填)。用于提供外部用的主机名或IP地址,如果使用IP地址,则可以包含端口号。 4 | - `DRONE_SERVER_PROTO`: 外部访问协议(必填)。用于提供访问协议,该值应设置为`http`或`https`。 5 | - `DRONE_RPC_SECRET`: RPC访问密钥(必填)。用于提供代理客户端访问用。 6 | 7 | # 客户端 -------------------------------------------------------------------------------- /docker-compose/mrdoc/update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker stop mrdoc && docker rm mrdoc 3 | docker pull jonnyan404/mrdoc-nginx 4 | docker-compose up -d 5 | # 迁移数据库 6 | docker exec -it mrdoc python manage.py makemigrations 7 | docker exec -it mrdoc python manage.py migrate -------------------------------------------------------------------------------- /docker-compose/frps/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | frps: 4 | image: snowdreamtech/frps:0.35.1 5 | container_name: frps 6 | restart: always 7 | pid: "host" 8 | network_mode: "host" 9 | volumes: 10 | - ./config/frps.ini:/etc/frp/frps.ini -------------------------------------------------------------------------------- /docker-compose/soar-web/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | soar-web: 4 | image: becivells/soar-web:latest 5 | container_name: soar-web 6 | restart: always 7 | ports: 8 | - 5077:5077 9 | networks: 10 | - devops-network 11 | 12 | networks: 13 | devops-network: 14 | external: true -------------------------------------------------------------------------------- /docker-compose/jumpserver/env/jumpserver.env: -------------------------------------------------------------------------------- 1 | # 基础信息 2 | SECRET_KEY=hy04bqURmzTPfUaxDJEz1NnQhoNStdRMqtjWSV67AU04cCVFqS 3 | BOOTSTRAP_TOKEN=lef9bi3MBk7acidq 4 | 5 | # 数据库 6 | DB_HOST=127.0.0.1 7 | DB_PORT=3306 8 | DB_USER=root 9 | DB_PASSWORD=123456 10 | 11 | # Redis 12 | REDIS_HOST=127.0.0.1 13 | REDIS_PORT=6379 14 | REDIS_PASSWORD=123456 -------------------------------------------------------------------------------- /docker-compose/elastichd/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | elastichd: 4 | image: containerize/elastichd:latest 5 | container_name: elastichd 6 | restart: always 7 | ports: 8 | - 9800:9800 9 | networks: 10 | - devops-network 11 | 12 | networks: 13 | devops-network: 14 | external: true -------------------------------------------------------------------------------- /docker-compose/yapi/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # yapi初始化后会有一个init.lock文件 3 | lockPath="/api/init.lock" 4 | 5 | # 进入yapi项目 6 | cd /api/vendors 7 | ls -a 8 | 9 | # 如果初始化文件文件存在,则直接运行,否则初始化 10 | if [ ! -f "$lockPath" ] 11 | then 12 | # 启动Yapi初始化 13 | node server/install.js 14 | # 若是初始化成功的情况下直接运行yapi 15 | node server/app.js 16 | else 17 | # 运行yapi管理系统 18 | node server/app.js 19 | fi -------------------------------------------------------------------------------- /docker-compose/yapi/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | yapi: 4 | build: 5 | context: . 6 | image: yapi:1.12.0 7 | container_name: yapi 8 | ports: 9 | - 9800:3000 10 | volumes: 11 | - /data/yapi/log:/api/vendors/log 12 | - ./docker-entrypoint.sh:/api/docker-entrypoint.sh 13 | - ./config.json:/api/config.json 14 | environment: 15 | HOME: /api -------------------------------------------------------------------------------- /docker-compose/superset/docker-compose.yml: -------------------------------------------------------------------------------- 1 | x-superset-image: &superset-image amancevice/superset:1.0.1 2 | version: '3.4' 3 | services: 4 | superset: 5 | image: *superset-image 6 | container_name: superset 7 | restart: always 8 | ports: 9 | - 11002:8088 10 | env_file: docker/.env 11 | networks: 12 | - devops-network 13 | 14 | networks: 15 | devops-network: 16 | external: true -------------------------------------------------------------------------------- /docker-compose/proget/README.md: -------------------------------------------------------------------------------- 1 | # 安装指南 2 | ## 在Linux服务器上安装 3 | ProGet 可以用过 Docker 在 Linux 服务器上运行。 4 | 推荐的容器资源是: 5 | - 最低环境要求:2C2G 6 | - 推荐的中等环境要求:2C4G 7 | - 推荐的大型环境要求:4C8G 8 | 9 | > 存储空间至少要保留10G以上的空间用来存储包和容器镜像 10 | 11 | ## 环境变量 12 | - `PROGET_DATABASE`:数据库连接字符串。 13 | - `PROGET_DB_TYPE`:数据库类型,默认:`PostgreSQL`。 14 | 15 | ## 账号 16 | 默认账号密码:Admin 17 | 18 | # 注意事项 19 | - 需要注意赋予 `volumes` 访问权限。 20 | > chmod 777 -R /data/proget 21 | -------------------------------------------------------------------------------- /docker-compose/mrdoc/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | mrdoc: 4 | image: jonnyan404/mrdoc-nginx:0.7.0 5 | container_name: mrdoc 6 | restart: always 7 | ports: 8 | - 10086:10086 9 | volumes: 10 | - ./config:/app/MrDoc/config 11 | - /data/MrDoc/media:/app/MrDoc/media 12 | networks: 13 | - devops-network 14 | 15 | networks: 16 | devops-network: 17 | external: true -------------------------------------------------------------------------------- /docker-compose/redis/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | redis: 4 | image: redis:5.0.12 5 | container_name: redis 6 | restart: always 7 | ports: 8 | - 6379:6379 9 | command: redis-server --requirepass 123456 --appendonly yes 10 | volumes: 11 | - /data/redis/data:/data 12 | - /etc/localtime:/etc/localtime:ro 13 | networks: 14 | - devops-network 15 | 16 | networks: 17 | devops-network: 18 | external: true -------------------------------------------------------------------------------- /docker-compose/influxdb/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | influxdb: 4 | image: influxdb:1.8.1 5 | container_name: influxdb 6 | restart: always 7 | privileged: true 8 | ports: 9 | - 8086:8086 10 | volumes: 11 | - ./etc:/etc/influxdb 12 | - /data/influxdb/data:/var/lib/influxdb 13 | env_file: 14 | - ./influxdb.env 15 | networks: 16 | - devops-network 17 | 18 | networks: 19 | devops-network: 20 | external: true -------------------------------------------------------------------------------- /docker-compose/sonarqube/README.md: -------------------------------------------------------------------------------- 1 | # 说明 2 | 默认账号密码为:`admin`,`admin` 3 | 4 | # 异常 5 | ## ES异常 6 | ### 问题 7 | `max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]` 8 | 9 | ### 解决方案 10 | #### 临时方案 11 | 在宿主机中执行 12 | ``` 13 | sysctl -w vm.max_map_count=262144 14 | sysctl -a|grep vm.max_map_count 15 | ``` 16 | 17 | #### 持久化方案 18 | 修改/etc/sysctl.conf文件,在最后一行添加: 19 | ``` 20 | vm.max_map_count=262144 21 | ``` 22 | 立即生效则执行: 23 | ``` 24 | /sbin/sysctl -p 25 | ``` 26 | -------------------------------------------------------------------------------- /docker-compose/jumpserver/README.md: -------------------------------------------------------------------------------- 1 | # Jumpserver 堡垒机 2 | 3 | ## 1. 环境变量 4 | ### 1.1 基础信息 5 | - `SECRET_KEY`:保护签名数据的密钥,首次安装请一定要修改并记牢,后续升级和迁移不可更改,否则将导致加密的数据不可解密。 6 | - `BOOTSTRAP_TOKEN`:为组件认证使用的密钥,仅组件注册时使用。组件指 `koko`、`guacamole`。 7 | 8 | ### 1.2 数据库(MySQL) 9 | - `DB_HOST`:数据库IP地址。 10 | - `DB_PORT`:数据库端口号。 11 | - `DB_USER`:数据库用户名。 12 | - `DB_PASSWORD`:数据库密码。 13 | - `DB_NAME`:数据库名称。 14 | 15 | ### 1.3 缓存(Redis) 16 | - `REDIS_HOST`:Redis的IP地址。 17 | - `REDIS_PORT`:Redis的端口号。 18 | - `REDIS_PASSWORD`:Redis的密码。 -------------------------------------------------------------------------------- /docker-compose/jumpserver/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | jumpserver: 4 | image: jumpserver/jms_all:v2.11.1 5 | container_name: jumpserver 6 | restart: always 7 | privileged: true 8 | ports: 9 | - 9810:80 10 | - 2222:2222 11 | env_file: ./env/jumpserver.env 12 | volumes: 13 | - /data/jumpserver/data:/opt/jumpserver/data 14 | networks: 15 | - devops-network 16 | 17 | networks: 18 | devops-network: 19 | external: true -------------------------------------------------------------------------------- /docker-compose/redisInSight/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | redisinsight: 4 | image: redislabs/redisinsight:1.10.1 5 | container_name: redisinsight 6 | restart: always 7 | ports: 8 | - 9001:8001 9 | volumes: 10 | - /data/redisinsight:/db 11 | networks: 12 | - devops-network 13 | 14 | networks: 15 | devops-network: 16 | name: devops-network 17 | driver: bridge 18 | ipam: 19 | config: 20 | - subnet: 172.26.0.0/16 -------------------------------------------------------------------------------- /docker-compose/postgreSQL/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | postgres: 4 | image: postgres:9.5 5 | container_name: pgsql 6 | restart: always 7 | ports: 8 | - 5432:5432 9 | volumes: 10 | - /data/pgsql/data:/var/lib/postgresql/data 11 | - /etc/localtime:/etc/localtime:ro 12 | environment: 13 | POSTGRES_USER: admin 14 | POSTGRES_PASSWORD: 123456 15 | networks: 16 | - devops-network 17 | 18 | networks: 19 | devops-network: 20 | external: true -------------------------------------------------------------------------------- /docker-compose/freshrss/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | freshrss: 4 | image: freshrss/freshrss:1.25.0 5 | container_name: freshrss 6 | restart: always 7 | ports: 8 | - 8282:80 9 | environment: 10 | TZ: Asia/Shanghai 11 | CRON_MIN: '*/45' # RSS刷新周期,单位:分钟 12 | MYSQL_HOST: 172.17.0.1:3306 13 | MYSQL_DATABASE: freshrss 14 | MYSQL_USER: root 15 | MYSQL_PASSWORD: 123456 16 | volumes: 17 | - ./data:/var/www/FreshRSS/data 18 | - ./extensions:/var/www/FreshRSS/extensions -------------------------------------------------------------------------------- /docker-compose/nfs/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | nfs-server: 4 | image: gists/nfs-server:2.5.2 5 | container_name: nfs-server 6 | restart: always 7 | ports: 8 | - 2049:2049 9 | cap_add: 10 | - SYS_ADMIN 11 | - SETPCAP 12 | environment: 13 | NFS_OPTION: 'rw,async,insecure,no_root_squash,no_all_squash,no_subtree_check,fsid=0' 14 | volumes: 15 | - /data/nfs-server:/nfs-share 16 | networks: 17 | - devops-network 18 | 19 | networks: 20 | devops-network: 21 | external: true -------------------------------------------------------------------------------- /docker-compose/mrdoc/README.md: -------------------------------------------------------------------------------- 1 | # 使用文档 2 | ## docker版本PDF配置说明 3 | ### 执行chrome相关模块安装 4 | ``` 5 | # 安装chrome和chromedriver 6 | --- 7 | # 官方源下载安装 8 | docker exec -it mrdoc apk add chromium chromium-chromedriver 9 | # 三方源下载安装(下载慢的尝试用这个) 10 | docker exec -it mrdoc apk add chromium chromium-chromedriver --repository http://mirrors.ustc.edu.cn/alpine/v3.14/main/ --allow-untrusted 11 | # 检查chrome是否安装成功 12 | docker exec -it mrdoc chromium-browser --version 13 | # 如果chrome无法运行,则需要执行以下命令 14 | docker exec -it mrdoc apk upgrade -a 15 | --- 16 | # 重启容器,使配置生效 17 | docker restart mrdoc 18 | ``` -------------------------------------------------------------------------------- /docker-compose/yapi/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "port": "3000", 3 | "adminAccount": "admin@admin.com", 4 | "versionNotify": true, 5 | "closeRegister": false, 6 | "db": { 7 | "servername": "172.17.0.1", 8 | "DATABASE": "yapiDb", 9 | "port": 27017, 10 | "user": "bing", 11 | "pass": "123456", 12 | "authSource": "yapiDb" 13 | }, 14 | "mail": { 15 | "enable": false, 16 | "host": "smtp.163.com", 17 | "port": 465, 18 | "from": "***@163.com", 19 | "auth": { 20 | "user": "***@163.com", 21 | "pass": "*****" 22 | } 23 | } 24 | } -------------------------------------------------------------------------------- /docker-compose/spug/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | spug: 4 | image: openspug/spug:3.0.4 5 | container_name: spug 6 | restart: always 7 | ports: 8 | - 9527:80 9 | - 9528:3306 # 公开内置mysql 10 | volumes: 11 | - /data/spug:/data 12 | - /var/run/docker.sock:/var/run/docker.sock # 挂载本地Docker 13 | - /usr/bin/docker:/usr/bin/docker 14 | networks: 15 | - devops-network 16 | 17 | networks: 18 | devops-network: 19 | name: devops-network 20 | driver: bridge 21 | ipam: 22 | config: 23 | - subnet: 172.22.0.0/16 -------------------------------------------------------------------------------- /docker-compose/mssql/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | sqlserver: 4 | image: justin2004/mssql_server_tiny 5 | container_name: sqlserver 6 | ports: 7 | - 21433:1433 8 | restart: always 9 | environment: 10 | ACCEPT_EULA: Y 11 | SA_PASSWORD: Bing2019.00 12 | volumes: 13 | - /data/mssql:/var/opt/mssql/data 14 | networks: 15 | - devops-network 16 | deploy: 17 | resources: 18 | limits: 19 | memory: 512M 20 | reservations: 21 | memory: 512M 22 | 23 | networks: 24 | devops-network: 25 | external: true -------------------------------------------------------------------------------- /docker-compose/mindoc/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | mindoc: 4 | image: registry.cn-hangzhou.aliyuncs.com/mindoc/mindoc:v2.0-beta.2 5 | container_name: mindoc 6 | restart: always 7 | ports: 8 | - 8098:8181 9 | volumes: 10 | - /data/mindoc/database:/mindoc/database 11 | - /data/mindoc/uploads:/mindoc/uploads 12 | environment: 13 | MINDOC_RUN_MODE: prod 14 | MINDOC_DB_ADAPTER: sqlite3 15 | MINDOC_DB_DATABASE: ./database/mindoc.db 16 | MINDOC_CACHE: 'true' 17 | MINDOC_CACHE_PROVIDER: file 18 | MINDOC_ENABLE_EXPORT: 'true' 19 | -------------------------------------------------------------------------------- /docker-compose/mariadb/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | mariadb: 4 | image: mariadb:10.5.6 5 | container_name: mariadb 6 | command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_unicode_ci] 7 | restart: always 8 | ports: 9 | - 3306:3306 10 | environment: 11 | MYSQL_ROOT_PASSWORD: 123456 12 | MYSQL_USER: bing 13 | MYSQL_PASSWORD: 123456 14 | MYSQL_ROOT_HOST: '%' 15 | TIMEZONE: Asis/Shanghai 16 | volumes: 17 | - /data/mysql/data:/var/lib/mysql 18 | - /data/mysql/conf:/etc/mysql/conf.d 19 | - /etc/localtime:/etc/localtime:ro -------------------------------------------------------------------------------- /docker-compose/nexus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | nexus3: 4 | image: sonatype/nexus3:3.31.1 5 | container_name: nexus3 6 | privileged: true 7 | ports: 8 | - 9001:8081 9 | restart: always 10 | environment: 11 | INSTALL4J_ADD_VM_PARAMS: "-Xms4g -Xmx4g -XX:MaxDirectMemorySize=6717M" 12 | volumes: 13 | - /data/nexus3/data:/nexus-data 14 | - /etc/localtime:/etc/localtime 15 | networks: 16 | - devops-network 17 | 18 | networks: 19 | devops-network: 20 | name: devops-network 21 | driver: bridge 22 | ipam: 23 | config: 24 | - subnet: 172.26.0.0/16 -------------------------------------------------------------------------------- /docker-compose/sonarqube/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | sonarqube: 4 | image: sonarqube 5 | container_name: sonarqube 6 | restart: always 7 | ports: 8 | - 9003:9000 9 | volumes: 10 | - /data/sonquebe/conf:/opt/sonarqube/conf 11 | - /data/sonquebe/data:/opt/sonarqube/data 12 | - /data/sonquebe/plugins:/opt/sonarqube/extensions/plugins 13 | environment: 14 | SONARQUBE_JDBC_USERNAME: admin 15 | SONARQUBE_JDBC_PASSWORD: "123456" 16 | SONARQUBE_JDBC_URL: jdbc:postgresql://0.0.0.0:5432/sonar 17 | networks: 18 | - devops-network 19 | 20 | networks: 21 | devops-network: 22 | external: true -------------------------------------------------------------------------------- /docker-compose/drone/server/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | drone-server: 4 | image: drone/drone:1 5 | container_name: drone-server 6 | restart: always 7 | ports: 8 | - 9001:80 9 | volumes: 10 | - /data/drone-server:/data 11 | environment: 12 | DRONE_SERVER_HOST: ${DRONE_SERVER_HOST} 13 | DRONE_SERVER_PROTO: ${DRONE_SERVER_PROTO} 14 | DRONE_RPC_SECRET: ${DRONE_RPC_SECRET} 15 | DRONE_AGENTS_ENABLED: 'true' 16 | DRONE_GOGS_SERVER: ${DRONE_GOGS_SERVER} 17 | DRONE_GIT_ALWAYS_AUTH: 'true' 18 | networks: 19 | - devops-network 20 | 21 | networks: 22 | devops-network: 23 | external: true -------------------------------------------------------------------------------- /docker-compose/mrdoc/config/config.ini: -------------------------------------------------------------------------------- 1 | # 如果自己没有更换其它数据库的需求,请原样复制粘贴,不要乱改配置. 2 | # mysql版本注意:需要mysql5.6及以上版本. 3 | # mysql需要手动创建一个数据库:create database mrdoc character set utf8mb4 collate utf8_bin; 4 | #grant all privileges on mrdoc.* to 'jonnyan404'@'%' identified by 'www.jonnyan404.top:8088' with grant option; 5 | #flush privileges; 6 | [site] 7 | # True表示开启站点调试模式,False表示关闭站点调试模式 8 | debug = False 9 | 10 | [database] 11 | # engine,指定数据库类型,接受sqlite、mysql、oracle、postgresql 12 | engine = mysql 13 | # name表示数据库的名称 14 | name = mrdoc 15 | # user表示数据库用户名 16 | user = root 17 | # password表示数据库用户密码 18 | password = 123456 19 | # host表示数据库主机地址 20 | host = 127.0.0.1 21 | # port表示数据库端口 22 | port=3306 23 | [selenium] 24 | # path用于指定Chromium的路径,不指定则使用默认的 25 | driver_path = /usr/lib/chromium/chromedriver -------------------------------------------------------------------------------- /docker-compose/rabbitmq/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | rabbitmq: 4 | image: rabbitmq:management 5 | container_name: rabbitmq 6 | restart: always 7 | ports: 8 | - 5672:5672 9 | - 15672:15672 10 | environment: 11 | RABBITMQ_DEFAULT_USER: admin 12 | RABBITMQ_DEFAULT_PASS: 123456 13 | RABBITMQ_DEFAULT_VHOST: / 14 | volumes: 15 | - /data/rabbitmq/data:/var/lib/rabbitmq/ 16 | - /data/rabbitmq/logs:/var/log/rabbitmq/ 17 | - /etc/localtime:/etc/localtime:ro 18 | logging: 19 | driver: "json-file" 20 | options: 21 | max-size: "200k" 22 | max-file: "10" 23 | networks: 24 | - devops-network 25 | 26 | networks: 27 | devops-network: 28 | external: true -------------------------------------------------------------------------------- /docker-compose/mysql/8.0/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | mysql: 4 | image: mysql:8.0.16 5 | container_name: mysql 6 | restart: always 7 | ports: 8 | - 3306:3306 9 | environment: 10 | MYSQL_ROOT_PASSWORD: 123456 # root管理员用户密码 11 | MYSQL_USER: bing # 创建bing用户 12 | MYSQL_PASSWORD: 123456 # 设置bing用户密码 13 | MYSQL_ROOT_HOST: '%' 14 | TIMEZONE: Asis/Shanghai 15 | volumes: 16 | - /data/mysql/data:/var/lib/mysql 17 | - /data/mysql/conf:/etc/mysql/conf.d 18 | - /etc/localtime:/etc/localtime:ro 19 | - ./conf/my.cnf:/etc/mysql/conf.d/my.cnf 20 | networks: 21 | - devops-network 22 | 23 | networks: 24 | devops-network: 25 | name: devops-network 26 | driver: bridge 27 | ipam: 28 | config: 29 | - subnet: 172.26.0.0/16 -------------------------------------------------------------------------------- /docker-compose/proget/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | proget: 4 | image: inedo/proget:5.2.27 5 | container_name: proget 6 | restart: always 7 | ports: 8 | - 5555:80 9 | env_file: ./env/proget.env 10 | volumes: 11 | - /data/proget/packages:/var/proget/packages 12 | - /data/proget/extensions:/var/proget/extensions 13 | - /etc/localtime:/etc/localtime:ro 14 | networks: 15 | - devops-network 16 | 17 | proget_db: 18 | image: postgres:9.5 19 | container_name: proget_db_pgsql 20 | restart: always 21 | env_file: ./env/proget_db.env 22 | volumes: 23 | - /data/proget_db:/var/lib/postgresql/data 24 | - /etc/localtime:/etc/localtime:ro 25 | networks: 26 | - devops-network 27 | 28 | networks: 29 | devops-network: 30 | external: true -------------------------------------------------------------------------------- /docker-compose/mysql/5.7/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | mysql: 4 | image: mysql:5.7.25 5 | container_name: mysql 6 | command: [mysqld, --character-set-server=utf8mb4, --collation-server=utf8mb4_general_ci] # 设置utf8字符集 7 | restart: always 8 | ports: 9 | - 3306:3306 10 | environment: 11 | MYSQL_ROOT_PASSWORD: 123456 # root管理员用户密码 12 | MYSQL_USER: bing # 创建bing用户 13 | MYSQL_PASSWORD: 123456 # 设置bing用户密码 14 | MYSQL_ROOT_HOST: '%' 15 | TIMEZONE: Asis/Shanghai 16 | volumes: 17 | - /data/mysql/data:/var/lib/mysql 18 | - /data/mysql/conf:/etc/mysql/conf.d 19 | - /etc/localtime:/etc/localtime:ro 20 | - ./conf/my.cnf:/etc/mysql/conf.d/my.cnf 21 | networks: 22 | - devops-network 23 | 24 | networks: 25 | devops-network: 26 | external: true -------------------------------------------------------------------------------- /docker-compose/frps/config/frps.ini: -------------------------------------------------------------------------------- 1 | # 下面这句开头必须要有,表示配置的开始 2 | [common] 3 | # frp 服务端端口(必须) 4 | bind_port = 7000 5 | 6 | bind_udp_port = 7001 7 | 8 | # frp 服务端密码(必须) 9 | token = ocpRJbXt5VTD6/kaA1ZaEaz6T5xNrbfE 10 | 11 | # 认证超时时间,由于时间戳会被用于加密认证,防止报文劫持后被他人利用 12 | # 因此服务端与客户端所在的机器的时间差不能超过这个时间(秒) 13 | # 默认为900秒,即15分钟,如果设置成0就不会对报文时间戳进行超时验证 14 | authentication_timeout = 900 15 | 16 | # 仪表盘端口,只有设置了才能使用仪表盘(即后台) 17 | dashboard_port = 7500 18 | 19 | # 仪表盘访问的用户密码,如果不设置,则默认都是 admin 20 | dashboard_user = admin 21 | dashboard_pwd = admin 22 | 23 | # 控制台或真实的日志文件路径 ./frps.log 24 | log_file = ./frps.log 25 | 26 | # 日志等级 trace | debug |info | warn | error 27 | log_level = info 28 | log_max_days = 3 29 | 30 | # 如果你想要用 frp 穿透访问内网中的网站(例如路由器设置页面) 31 | # 则必须要设置以下两个监听端口,不设置则不会开启这项功能 32 | # vhost_http_port = 10080 33 | # vhost_https_port = 10443 34 | 35 | # 此设置需要配合客户端设置,仅在穿透到内网中的 http 或 https 时有用(可选) 36 | # 假设此项设置为 example.com,客户端配置 http 时 将 subdomain 设置为 test, 37 | # 则你将 test.example.com 解析到服务端后,可以使用此域名来访问客户端对应的 http 38 | # 子域名(特权模式需将 *.domain.com 解析到公网服务器) 39 | # subdomain_host = domain.com -------------------------------------------------------------------------------- /docker-compose/yapi/Dockerfile: -------------------------------------------------------------------------------- 1 | ######## 构建 ######## 2 | FROM node:12.16.3-alpine3.11 as builder 3 | 4 | # 安装构建工具 5 | RUN apk add --update --no-cache ca-certificates curl wget cmake build-base git bash python make gcc g++ zlib-dev autoconf automake file nasm \ 6 | && update-ca-certificates 7 | 8 | # YAPI 版本 9 | ENV YAPI_VERSION=1.12.0 10 | 11 | # 拉取 YAPI 源码 git clone --branch "v1.12.0" --single-branch --depth 1 https://gitee.com/mirrors/YApi.git 12 | RUN mkdir /yapi && cd /yapi \ 13 | && git clone \ 14 | --branch "v${YAPI_VERSION}" \ 15 | --single-branch \ 16 | --depth 1 \ 17 | https://gitee.com/mirrors/YApi.git vendors 18 | 19 | # 安装依赖 20 | RUN cd /yapi/vendors \ 21 | # && rm package-lock.json \ 22 | && npm install --production --registry https://registry.npm.taobao.org 23 | # && yarn 24 | 25 | ######## 镜像 ######## 26 | FROM node:12.16.3-alpine3.11 27 | 28 | LABEL MAINTAINER = 'jianxaunbing(https://github.com/jianxuanbing)' 29 | 30 | ENV TZ="Asia/Shanghai" HOME="/" 31 | 32 | WORKDIR ${HOME} 33 | 34 | COPY --from=builder /yapi/vendors /api/vendors 35 | 36 | COPY --from=builder /yapi/vendors/config_example.json /api/config.json 37 | 38 | COPY docker-entrypoint.sh /api/ 39 | 40 | EXPOSE 3000 41 | 42 | RUN chmod 755 /api/docker-entrypoint.sh 43 | 44 | ENTRYPOINT ["/api/docker-entrypoint.sh"] -------------------------------------------------------------------------------- /docker-compose/mysql/8.0/conf/my.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | # MySQL每打开一个表,都会读入一些数据到table_open_cache缓存中,当MySQL在这个缓存中找不到相应信息时,才会去磁盘上读取。默认值64 3 | # 假定系统有200个并发连接,则需将此参数设置为200*N(N为每个连接所需的文件描述符数目); 4 | # 当把table_open_cache设置为很大时,如果系统处理不了那么多文件描述符,那么就会出现客户端失效,连接不上 5 | binlog_cache_size = 2M 6 | # 在MySQL暂时停止回答新请求之前的短时间内多少个请求可以被存在堆栈中。 7 | # 也就是说,如果MySql的连接数据达到max_connections时,新来的请求将会被存在堆栈中,以等待某一连接释放资源,该堆栈的数量即back_log, 8 | # 如果等待连接的数量超过back_log,将不被授予连接资源。 9 | back_log = 3000 10 | # MySQL能有的连接数量。当主要MySQL线程在一个很短时间内得到非常多的连接请求,这就起作用, 11 | # 然后主线程花些时间(尽管很短)检查连接并且启动一个新线程。back_log值指出在MySQL暂时停止回答新请求之前的短时间内多少个请求可以被存在堆栈中。 12 | # 如果期望在一个短时间内有很多连接,你需要增加它。也就是说,如果MySQL的连接数据达到max_connections时,新来的请求将会被存在堆栈中, 13 | # 以等待某一连接释放资源,该堆栈的数量即back_log,如果等待连接的数量超过back_log,将不被授予连接资源。 14 | # 另外,这值(back_log)限于您的操作系统对到来的TCP/IP连接的侦听队列的大小。 15 | # 你的操作系统在这个队列大小上有它自己的限制(可以检查你的OS文档找出这个变量的最大值),试图设定back_log高于你的操作系统的限制将是无效的。 16 | max_connections = 1000 17 | max_allowed_packet = 512M 18 | # binglog 失效日期参数,超出时间将自动清除。 19 | binlog_expire_logs_seconds = 604800 20 | # 解决8.0客户端不支持Native问题 21 | default_authentication_plugin = mysql_native_password 22 | # 禁止创建指定的存储引擎 23 | disabled_storage_engines = "myisam,memory,archive" 24 | # 全局 TransactionID 系统变量 25 | enforce_gtid_consistency = ON 26 | gtid_mode = ON 27 | # 时间戳为空时处理方式 28 | explicit_defaults_for_timestamp = OFF 29 | character-set-client-handshake = FALSE 30 | collation-server = utf8mb4_0900_ai_ci 31 | init-connect = "SET NAMES utf8mb4" 32 | character-set-server = utf8mb4 33 | 34 | # 解决 group by 问题 35 | sql_mode = "STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION" 36 | 37 | [client] 38 | default-character-set = utf8mb4 -------------------------------------------------------------------------------- /docker-compose/superset/docker/.env: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | COMPOSE_PROJECT_NAME=superset 18 | 19 | # database configurations (do not modify) 20 | DATABASE_DB=superset 21 | DATABASE_HOST=127.0.0.1 22 | DATABASE_PASSWORD=123456 23 | DATABASE_USER=superset 24 | 25 | # database engine specific environment variables 26 | # change the below if you prefers another database engine 27 | DATABASE_PORT=15433 28 | DATABASE_DIALECT=postgresql 29 | POSTGRES_DB=superset 30 | POSTGRES_USER=superset 31 | POSTGRES_PASSWORD=123456 32 | #MYSQL_DATABASE=superset 33 | #MYSQL_USER=superset 34 | #MYSQL_PASSWORD=superset 35 | #MYSQL_RANDOM_ROOT_PASSWORD=yes 36 | 37 | # Add the mapped in /app/pythonpath_docker which allows devs to override stuff 38 | PYTHONPATH=/app/pythonpath:/app/docker/pythonpath_dev 39 | REDIS_HOST=127.0.0.1 40 | REDIS_PORT=6379 41 | 42 | FLASK_ENV=production 43 | SUPERSET_ENV=production 44 | SUPERSET_LOAD_EXAMPLES=yes 45 | CYPRESS_CONFIG=false 46 | SUPERSET_PORT=8088 -------------------------------------------------------------------------------- /init/docker/README.md: -------------------------------------------------------------------------------- 1 | # 初始化 2 | ## 下载 Docker 离线包 3 | 下载地址:[https://download.docker.com/linux/static/stable/x86_64/](https://download.docker.com/linux/static/stable/x86_64/) 4 | 5 | 例如:[docker-20.10.5.tgz](https://download.docker.com/linux/static/stable/x86_64/docker-20.10.5.tgz) 6 | 7 | ## 下载 docker-compose 离线包 8 | 下载地址:[https://github.com/docker/compose/releases](https://github.com/docker/compose/releases) 9 | 10 | 例如:[docker-compose-linux-x86_64](https://github.com/docker/compose/releases/download/v2.12.0/docker-compose-linux-x86_64) 11 | 12 | # 安装操作 13 | 将下载的包,放入当前的目录下,然后执行下面的命令: 14 | ``` 15 | sh auto-install-docker.sh [docker-版本号.tgz] 16 | # 例如: 17 | sh auto-install-docker.sh docker-20.10.5.tgz 18 | ``` 19 | 20 | # 二进制安装命令补全 21 | ## Docker 命令补全 22 | docker 命令补全地址:https://raw.githubusercontent.com/docker/cli/[版本号]/contrib/completion/bash/docker 23 | - 当前分支版本:https://raw.githubusercontent.com/docker/cli/master/contrib/completion/bash/docker 24 | - 20.10.5 版本:https://raw.githubusercontent.com/docker/cli/v20.10.5/contrib/completion/bash/docker 25 | 26 | ## Docker-Compose 命令补全 27 | docker-compose 命令补全地址:https://raw.githubusercontent.com/docker/compose/[版本号]/contrib/completion/bash/docker-compose 28 | - 当前分支版本:https://raw.githubusercontent.com/docker/compose/master/contrib/completion/bash/docker-compose 29 | - 1.28.6 版本:https://raw.githubusercontent.com/docker/compose/1.28.6/contrib/completion/bash/docker-compose 30 | 31 | ## 参考链接 32 | - [404 at command-line bash completion script](https://github.com/docker/docs/issues/14617) 33 | - [how-to-add-bash-command-completion-for-docker-on-mac-os-x](https://stackoverflow.com/questions/26132451/how-to-add-bash-command-completion-for-docker-on-mac-os-x) 34 | - [enable-docker-command-completion](https://www.techrepublic.com/article/enable-docker-command-completion/) -------------------------------------------------------------------------------- /init/docker/auto-install-docker.sh: -------------------------------------------------------------------------------- 1 | #!bin/sh 2 | usage(){ 3 | echo "Usage: $0 FILE_NAME_DOCKER_TAR_GZ" 4 | echo " $0 docker-20.10.5.tgz" 5 | echo "Get docker binary from: https://download.docker.com/linux/static/stable/x86_64/" 6 | echo "eg: wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.5.tgz" 7 | echo "" 8 | } 9 | 10 | SYSTEMDDIR=/etc/systemd/system/ 11 | SERVICEFILE=docker.service 12 | DOCKERDIR=/usr/bin 13 | DOCKERBIN=docker 14 | SERVICENAME=docker 15 | 16 | # Must run as root 17 | if [[ $(id -u) -ne 0 ]]; then echo "Please run as root"; exit 1; fi 18 | # 禁用 SELinux - 需要重启 19 | setenforce 0 20 | sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux && cat /etc/sysconfig/selinux 21 | 22 | if [ $# -ne 1 ]; then 23 | usage 24 | exit 1 25 | else 26 | FILETARGZ="$1" 27 | fi 28 | 29 | if [ ! -f ${FILETARGZ} ]; then 30 | echo "Docker binary tgz files does not exist, please check it" 31 | echo "Get docker binary from: https://download.docker.com/linux/static/stable/x86_64/" 32 | echo "eg: wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.5.tgz" 33 | exit 1 34 | fi 35 | 36 | # 解压tar包 37 | echo "##unzip : tar xvpf ${FILETARGZ}" 38 | tar xvpf ${FILETARGZ} 39 | echo 40 | 41 | # 将docker目录移动到/usr/bin目录下 42 | echo "##binary : ${DOCKERBIN} copy to ${DOCKERDIR}" 43 | cp -p ${DOCKERBIN}/* ${DOCKERDIR} >/dev/null 2>&1 44 | which ${DOCKERBIN} 45 | 46 | # 将docker.service 移动到/etc/systemd/system/目录 47 | echo "##systemd service: ${SERVICEFILE}" 48 | echo "##docer.service: create docker systemd file" 49 | cat >${SYSTEMDDIR}/${SERVICEFILE} </etc/docker/daemon.json </dev/null "${top_level_options[@]}" "$@" 24 | } 25 | 26 | # Transforms a multiline list of strings into a single line string 27 | # with the words separated by "|". 28 | __docker_compose_to_alternatives() { 29 | local parts=( $1 ) 30 | local IFS='|' 31 | echo "${parts[*]}" 32 | } 33 | 34 | # Transforms a multiline list of options into an extglob pattern 35 | # suitable for use in case statements. 36 | __docker_compose_to_extglob() { 37 | local extglob=$( __docker_compose_to_alternatives "$1" ) 38 | echo "@($extglob)" 39 | } 40 | 41 | # Determines whether the option passed as the first argument exist on 42 | # the commandline. The option may be a pattern, e.g. `--force|-f`. 43 | __docker_compose_has_option() { 44 | local pattern="$1" 45 | for (( i=2; i < $cword; ++i)); do 46 | if [[ ${words[$i]} =~ ^($pattern)$ ]] ; then 47 | return 0 48 | fi 49 | done 50 | return 1 51 | } 52 | 53 | # Returns `key` if we are currently completing the value of a map option (`key=value`) 54 | # which matches the extglob passed in as an argument. 55 | # This function is needed for key-specific completions. 56 | __docker_compose_map_key_of_current_option() { 57 | local glob="$1" 58 | 59 | local key glob_pos 60 | if [ "$cur" = "=" ] ; then # key= case 61 | key="$prev" 62 | glob_pos=$((cword - 2)) 63 | elif [[ $cur == *=* ]] ; then # key=value case (OSX) 64 | key=${cur%=*} 65 | glob_pos=$((cword - 1)) 66 | elif [ "$prev" = "=" ] ; then 67 | key=${words[$cword - 2]} # key=value case 68 | glob_pos=$((cword - 3)) 69 | else 70 | return 71 | fi 72 | 73 | [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax 74 | 75 | [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" 76 | } 77 | 78 | # suppress trailing whitespace 79 | __docker_compose_nospace() { 80 | # compopt is not available in ancient bash versions 81 | type compopt &>/dev/null && compopt -o nospace 82 | } 83 | 84 | 85 | # Outputs a list of all defined services, regardless of their running state. 86 | # Arguments for `docker-compose ps` may be passed in order to filter the service list, 87 | # e.g. `status=running`. 88 | __docker_compose_services() { 89 | __docker_compose_q ps --services "$@" 90 | } 91 | 92 | # Applies completion of services based on the current value of `$cur`. 93 | # Arguments for `docker-compose ps` may be passed in order to filter the service list, 94 | # see `__docker_compose_services`. 95 | __docker_compose_complete_services() { 96 | COMPREPLY=( $(compgen -W "$(__docker_compose_services "$@")" -- "$cur") ) 97 | } 98 | 99 | # The services for which at least one running container exists 100 | __docker_compose_complete_running_services() { 101 | local names=$(__docker_compose_services --filter status=running) 102 | COMPREPLY=( $(compgen -W "$names" -- "$cur") ) 103 | } 104 | 105 | 106 | _docker_compose_build() { 107 | case "$prev" in 108 | --build-arg) 109 | COMPREPLY=( $( compgen -e -- "$cur" ) ) 110 | __docker_compose_nospace 111 | return 112 | ;; 113 | --memory|-m) 114 | return 115 | ;; 116 | esac 117 | 118 | case "$cur" in 119 | -*) 120 | COMPREPLY=( $( compgen -W "--build-arg --compress --force-rm --help --memory -m --no-cache --no-rm --pull --parallel -q --quiet" -- "$cur" ) ) 121 | ;; 122 | *) 123 | __docker_compose_complete_services --filter source=build 124 | ;; 125 | esac 126 | } 127 | 128 | 129 | _docker_compose_config() { 130 | case "$prev" in 131 | --hash) 132 | if [[ $cur == \\* ]] ; then 133 | COMPREPLY=( '\*' ) 134 | else 135 | COMPREPLY=( $(compgen -W "$(__docker_compose_services) \\\* " -- "$cur") ) 136 | fi 137 | return 138 | ;; 139 | esac 140 | 141 | COMPREPLY=( $( compgen -W "--hash --help --no-interpolate --quiet -q --resolve-image-digests --services --volumes" -- "$cur" ) ) 142 | } 143 | 144 | 145 | _docker_compose_create() { 146 | case "$cur" in 147 | -*) 148 | COMPREPLY=( $( compgen -W "--build --force-recreate --help --no-build --no-recreate" -- "$cur" ) ) 149 | ;; 150 | *) 151 | __docker_compose_complete_services 152 | ;; 153 | esac 154 | } 155 | 156 | 157 | _docker_compose_docker_compose() { 158 | case "$prev" in 159 | --tlscacert|--tlscert|--tlskey) 160 | _filedir 161 | return 162 | ;; 163 | --file|-f) 164 | _filedir "y?(a)ml" 165 | return 166 | ;; 167 | --ansi) 168 | COMPREPLY=( $( compgen -W "never always auto" -- "$cur" ) ) 169 | return 170 | ;; 171 | --log-level) 172 | COMPREPLY=( $( compgen -W "debug info warning error critical" -- "$cur" ) ) 173 | return 174 | ;; 175 | --project-directory) 176 | _filedir -d 177 | return 178 | ;; 179 | --env-file) 180 | _filedir 181 | return 182 | ;; 183 | $(__docker_compose_to_extglob "$daemon_options_with_args") ) 184 | return 185 | ;; 186 | esac 187 | 188 | case "$cur" in 189 | -*) 190 | COMPREPLY=( $( compgen -W "$daemon_boolean_options $daemon_options_with_args $top_level_options_with_args --help -h --no-ansi --verbose --version -v" -- "$cur" ) ) 191 | ;; 192 | *) 193 | COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) 194 | ;; 195 | esac 196 | } 197 | 198 | 199 | _docker_compose_down() { 200 | case "$prev" in 201 | --rmi) 202 | COMPREPLY=( $( compgen -W "all local" -- "$cur" ) ) 203 | return 204 | ;; 205 | --timeout|-t) 206 | return 207 | ;; 208 | esac 209 | 210 | case "$cur" in 211 | -*) 212 | COMPREPLY=( $( compgen -W "--help --rmi --timeout -t --volumes -v --remove-orphans" -- "$cur" ) ) 213 | ;; 214 | esac 215 | } 216 | 217 | 218 | _docker_compose_events() { 219 | case "$prev" in 220 | --json) 221 | return 222 | ;; 223 | esac 224 | 225 | case "$cur" in 226 | -*) 227 | COMPREPLY=( $( compgen -W "--help --json" -- "$cur" ) ) 228 | ;; 229 | *) 230 | __docker_compose_complete_services 231 | ;; 232 | esac 233 | } 234 | 235 | 236 | _docker_compose_exec() { 237 | case "$prev" in 238 | --index|--user|-u|--workdir|-w) 239 | return 240 | ;; 241 | esac 242 | 243 | case "$cur" in 244 | -*) 245 | COMPREPLY=( $( compgen -W "-d --detach --help --index --privileged -T --user -u --workdir -w" -- "$cur" ) ) 246 | ;; 247 | *) 248 | __docker_compose_complete_running_services 249 | ;; 250 | esac 251 | } 252 | 253 | 254 | _docker_compose_help() { 255 | COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) 256 | } 257 | 258 | _docker_compose_images() { 259 | case "$cur" in 260 | -*) 261 | COMPREPLY=( $( compgen -W "--help --quiet -q" -- "$cur" ) ) 262 | ;; 263 | *) 264 | __docker_compose_complete_services 265 | ;; 266 | esac 267 | } 268 | 269 | _docker_compose_kill() { 270 | case "$prev" in 271 | -s) 272 | COMPREPLY=( $( compgen -W "SIGHUP SIGINT SIGKILL SIGUSR1 SIGUSR2" -- "$(echo $cur | tr '[:lower:]' '[:upper:]')" ) ) 273 | return 274 | ;; 275 | esac 276 | 277 | case "$cur" in 278 | -*) 279 | COMPREPLY=( $( compgen -W "--help -s" -- "$cur" ) ) 280 | ;; 281 | *) 282 | __docker_compose_complete_running_services 283 | ;; 284 | esac 285 | } 286 | 287 | 288 | _docker_compose_logs() { 289 | case "$prev" in 290 | --tail) 291 | return 292 | ;; 293 | esac 294 | 295 | case "$cur" in 296 | -*) 297 | COMPREPLY=( $( compgen -W "--follow -f --help --no-color --no-log-prefix --tail --timestamps -t" -- "$cur" ) ) 298 | ;; 299 | *) 300 | __docker_compose_complete_services 301 | ;; 302 | esac 303 | } 304 | 305 | 306 | _docker_compose_pause() { 307 | case "$cur" in 308 | -*) 309 | COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) 310 | ;; 311 | *) 312 | __docker_compose_complete_running_services 313 | ;; 314 | esac 315 | } 316 | 317 | 318 | _docker_compose_port() { 319 | case "$prev" in 320 | --protocol) 321 | COMPREPLY=( $( compgen -W "tcp udp" -- "$cur" ) ) 322 | return; 323 | ;; 324 | --index) 325 | return; 326 | ;; 327 | esac 328 | 329 | case "$cur" in 330 | -*) 331 | COMPREPLY=( $( compgen -W "--help --index --protocol" -- "$cur" ) ) 332 | ;; 333 | *) 334 | __docker_compose_complete_services 335 | ;; 336 | esac 337 | } 338 | 339 | 340 | _docker_compose_ps() { 341 | local key=$(__docker_compose_map_key_of_current_option '--filter') 342 | case "$key" in 343 | source) 344 | COMPREPLY=( $( compgen -W "build image" -- "${cur##*=}" ) ) 345 | return 346 | ;; 347 | status) 348 | COMPREPLY=( $( compgen -W "paused restarting running stopped" -- "${cur##*=}" ) ) 349 | return 350 | ;; 351 | esac 352 | 353 | case "$prev" in 354 | --filter) 355 | COMPREPLY=( $( compgen -W "source status" -S "=" -- "$cur" ) ) 356 | __docker_compose_nospace 357 | return; 358 | ;; 359 | esac 360 | 361 | case "$cur" in 362 | -*) 363 | COMPREPLY=( $( compgen -W "--all -a --filter --help --quiet -q --services" -- "$cur" ) ) 364 | ;; 365 | *) 366 | __docker_compose_complete_services 367 | ;; 368 | esac 369 | } 370 | 371 | 372 | _docker_compose_pull() { 373 | case "$cur" in 374 | -*) 375 | COMPREPLY=( $( compgen -W "--help --ignore-pull-failures --include-deps --no-parallel --quiet -q" -- "$cur" ) ) 376 | ;; 377 | *) 378 | __docker_compose_complete_services --filter source=image 379 | ;; 380 | esac 381 | } 382 | 383 | 384 | _docker_compose_push() { 385 | case "$cur" in 386 | -*) 387 | COMPREPLY=( $( compgen -W "--help --ignore-push-failures" -- "$cur" ) ) 388 | ;; 389 | *) 390 | __docker_compose_complete_services 391 | ;; 392 | esac 393 | } 394 | 395 | 396 | _docker_compose_restart() { 397 | case "$prev" in 398 | --timeout|-t) 399 | return 400 | ;; 401 | esac 402 | 403 | case "$cur" in 404 | -*) 405 | COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) ) 406 | ;; 407 | *) 408 | __docker_compose_complete_running_services 409 | ;; 410 | esac 411 | } 412 | 413 | 414 | _docker_compose_rm() { 415 | case "$cur" in 416 | -*) 417 | COMPREPLY=( $( compgen -W "--force -f --help --stop -s -v" -- "$cur" ) ) 418 | ;; 419 | *) 420 | if __docker_compose_has_option "--stop|-s" ; then 421 | __docker_compose_complete_services 422 | else 423 | __docker_compose_complete_services --filter status=stopped 424 | fi 425 | ;; 426 | esac 427 | } 428 | 429 | 430 | _docker_compose_run() { 431 | case "$prev" in 432 | -e) 433 | COMPREPLY=( $( compgen -e -- "$cur" ) ) 434 | __docker_compose_nospace 435 | return 436 | ;; 437 | --entrypoint|--label|-l|--name|--user|-u|--volume|-v|--workdir|-w) 438 | return 439 | ;; 440 | esac 441 | 442 | case "$cur" in 443 | -*) 444 | COMPREPLY=( $( compgen -W "--detach -d --entrypoint -e --help --label -l --name --no-deps --publish -p --rm --service-ports -T --use-aliases --user -u --volume -v --workdir -w" -- "$cur" ) ) 445 | ;; 446 | *) 447 | __docker_compose_complete_services 448 | ;; 449 | esac 450 | } 451 | 452 | 453 | _docker_compose_scale() { 454 | case "$prev" in 455 | =) 456 | COMPREPLY=("$cur") 457 | return 458 | ;; 459 | --timeout|-t) 460 | return 461 | ;; 462 | esac 463 | 464 | case "$cur" in 465 | -*) 466 | COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) ) 467 | ;; 468 | *) 469 | COMPREPLY=( $(compgen -S "=" -W "$(__docker_compose_services)" -- "$cur") ) 470 | __docker_compose_nospace 471 | ;; 472 | esac 473 | } 474 | 475 | 476 | _docker_compose_start() { 477 | case "$cur" in 478 | -*) 479 | COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) 480 | ;; 481 | *) 482 | __docker_compose_complete_services --filter status=stopped 483 | ;; 484 | esac 485 | } 486 | 487 | 488 | _docker_compose_stop() { 489 | case "$prev" in 490 | --timeout|-t) 491 | return 492 | ;; 493 | esac 494 | 495 | case "$cur" in 496 | -*) 497 | COMPREPLY=( $( compgen -W "--help --timeout -t" -- "$cur" ) ) 498 | ;; 499 | *) 500 | __docker_compose_complete_running_services 501 | ;; 502 | esac 503 | } 504 | 505 | 506 | _docker_compose_top() { 507 | case "$cur" in 508 | -*) 509 | COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) 510 | ;; 511 | *) 512 | __docker_compose_complete_running_services 513 | ;; 514 | esac 515 | } 516 | 517 | 518 | _docker_compose_unpause() { 519 | case "$cur" in 520 | -*) 521 | COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) 522 | ;; 523 | *) 524 | __docker_compose_complete_services --filter status=paused 525 | ;; 526 | esac 527 | } 528 | 529 | 530 | _docker_compose_up() { 531 | case "$prev" in 532 | =) 533 | COMPREPLY=("$cur") 534 | return 535 | ;; 536 | --exit-code-from) 537 | __docker_compose_complete_services 538 | return 539 | ;; 540 | --scale) 541 | COMPREPLY=( $(compgen -S "=" -W "$(__docker_compose_services)" -- "$cur") ) 542 | __docker_compose_nospace 543 | return 544 | ;; 545 | --timeout|-t) 546 | return 547 | ;; 548 | esac 549 | 550 | case "$cur" in 551 | -*) 552 | COMPREPLY=( $( compgen -W "--abort-on-container-exit --always-recreate-deps --attach-dependencies --build -d --detach --exit-code-from --force-recreate --help --no-build --no-color --no-deps --no-log-prefix --no-recreate --no-start --renew-anon-volumes -V --remove-orphans --scale --timeout -t" -- "$cur" ) ) 553 | ;; 554 | *) 555 | __docker_compose_complete_services 556 | ;; 557 | esac 558 | } 559 | 560 | 561 | _docker_compose_version() { 562 | case "$cur" in 563 | -*) 564 | COMPREPLY=( $( compgen -W "--short" -- "$cur" ) ) 565 | ;; 566 | esac 567 | } 568 | 569 | 570 | _docker_compose() { 571 | local previous_extglob_setting=$(shopt -p extglob) 572 | shopt -s extglob 573 | 574 | local commands=( 575 | build 576 | config 577 | create 578 | down 579 | events 580 | exec 581 | help 582 | images 583 | kill 584 | logs 585 | pause 586 | port 587 | ps 588 | pull 589 | push 590 | restart 591 | rm 592 | run 593 | scale 594 | start 595 | stop 596 | top 597 | unpause 598 | up 599 | version 600 | ) 601 | 602 | # Options for the docker daemon that have to be passed to secondary calls to 603 | # docker-compose executed by this script. 604 | local daemon_boolean_options=" 605 | --skip-hostname-check 606 | --tls 607 | --tlsverify 608 | " 609 | local daemon_options_with_args=" 610 | --context -c 611 | --env-file 612 | --file -f 613 | --host -H 614 | --project-directory 615 | --project-name -p 616 | --tlscacert 617 | --tlscert 618 | --tlskey 619 | " 620 | 621 | # These options are require special treatment when searching the command. 622 | local top_level_options_with_args=" 623 | --ansi 624 | --log-level 625 | " 626 | 627 | COMPREPLY=() 628 | local cur prev words cword 629 | _get_comp_words_by_ref -n : cur prev words cword 630 | 631 | # search subcommand and invoke its handler. 632 | # special treatment of some top-level options 633 | local command='docker_compose' 634 | local top_level_options=() 635 | local counter=1 636 | 637 | while [ $counter -lt $cword ]; do 638 | case "${words[$counter]}" in 639 | $(__docker_compose_to_extglob "$daemon_boolean_options") ) 640 | local opt=${words[counter]} 641 | top_level_options+=($opt) 642 | ;; 643 | $(__docker_compose_to_extglob "$daemon_options_with_args") ) 644 | local opt=${words[counter]} 645 | local arg=${words[++counter]} 646 | top_level_options+=($opt $arg) 647 | ;; 648 | $(__docker_compose_to_extglob "$top_level_options_with_args") ) 649 | (( counter++ )) 650 | ;; 651 | -*) 652 | ;; 653 | *) 654 | command="${words[$counter]}" 655 | break 656 | ;; 657 | esac 658 | (( counter++ )) 659 | done 660 | 661 | local completions_func=_docker_compose_${command//-/_} 662 | declare -F $completions_func >/dev/null && $completions_func 663 | 664 | eval "$previous_extglob_setting" 665 | return 0 666 | } 667 | 668 | eval "$__docker_compose_previous_extglob_setting" 669 | unset __docker_compose_previous_extglob_setting 670 | 671 | complete -F _docker_compose docker-compose docker-compose.exe 672 | -------------------------------------------------------------------------------- /docker-compose/sonarqube/sonar.properties: -------------------------------------------------------------------------------- 1 | # Property values can: 2 | # - reference an environment variable, for example sonar.jdbc.url= ${env:SONAR_JDBC_URL} 3 | # - be encrypted. See https://redirect.sonarsource.com/doc/settings-encryption.html 4 | 5 | #-------------------------------------------------------------------------------------------------- 6 | # DATABASE 7 | # 8 | # IMPORTANT: 9 | # - The embedded H2 database is used by default. It is recommended for tests but not for 10 | # production use. Supported databases are Oracle, PostgreSQL and Microsoft SQLServer. 11 | # - Changes to database connection URL (sonar.jdbc.url) can affect SonarSource licensed products. 12 | 13 | # User credentials. 14 | # Permissions to create tables, indices and triggers must be granted to JDBC user. 15 | # The schema must be created first. 16 | #sonar.jdbc.username= 17 | #sonar.jdbc.password= 18 | 19 | #----- Embedded Database (default) 20 | # H2 embedded database server listening port, defaults to 9092 21 | #sonar.embeddedDatabase.port=9092 22 | 23 | 24 | #----- Oracle 11g/12c/18c/19c 25 | # The Oracle JDBC driver must be copied into the directory extensions/jdbc-driver/oracle/. 26 | # Only the thin client is supported, and we recommend using the latest Oracle JDBC driver. See 27 | # https://jira.sonarsource.com/browse/SONAR-9758 for more details. 28 | # If you need to set the schema, please refer to http://jira.sonarsource.com/browse/SONAR-5000 29 | #sonar.jdbc.url=jdbc:oracle:thin:@localhost:1521/XE 30 | 31 | 32 | #----- PostgreSQL 9.3 or greater 33 | # By default the schema named "public" is used. It can be overridden with the parameter "currentSchema". 34 | #sonar.jdbc.url=jdbc:postgresql://localhost/sonarqube?currentSchema=my_schema 35 | 36 | 37 | #----- Microsoft SQLServer 2014/2016/2017 and SQL Azure 38 | # A database named sonar must exist and its collation must be case-sensitive (CS) and accent-sensitive (AS) 39 | # Use the following connection string if you want to use integrated security with Microsoft Sql Server 40 | # Do not set sonar.jdbc.username or sonar.jdbc.password property if you are using Integrated Security 41 | # For Integrated Security to work, you have to download the Microsoft SQL JDBC driver package from 42 | # https://www.microsoft.com/en-us/download/details.aspx?id=55539 43 | # and copy sqljdbc_auth.dll to your path. You have to copy the 32 bit or 64 bit version of the dll 44 | # depending upon the architecture of your server machine. 45 | #sonar.jdbc.url=jdbc:sqlserver://localhost;databaseName=sonar;integratedSecurity=true 46 | 47 | # Use the following connection string if you want to use SQL Auth while connecting to MS Sql Server. 48 | # Set the sonar.jdbc.username and sonar.jdbc.password appropriately. 49 | #sonar.jdbc.url=jdbc:sqlserver://localhost;databaseName=sonar 50 | 51 | 52 | #----- Connection pool settings 53 | # The maximum number of active connections that can be allocated 54 | # at the same time, or negative for no limit. 55 | # The recommended value is 1.2 * max sizes of HTTP pools. For example if HTTP ports are 56 | # enabled with default sizes (50, see property sonar.web.http.maxThreads) 57 | # then sonar.jdbc.maxActive should be 1.2 * 50 = 60. 58 | #sonar.jdbc.maxActive=60 59 | 60 | # The maximum number of connections that can remain idle in the 61 | # pool, without extra ones being released, or negative for no limit. 62 | #sonar.jdbc.maxIdle=5 63 | 64 | # The minimum number of connections that can remain idle in the pool, 65 | # without extra ones being created, or zero to create none. 66 | #sonar.jdbc.minIdle=2 67 | 68 | # The maximum number of milliseconds that the pool will wait (when there 69 | # are no available connections) for a connection to be returned before 70 | # throwing an exception, or <= 0 to wait indefinitely. 71 | #sonar.jdbc.maxWait=5000 72 | 73 | #sonar.jdbc.minEvictableIdleTimeMillis=600000 74 | #sonar.jdbc.timeBetweenEvictionRunsMillis=30000 75 | 76 | 77 | 78 | #-------------------------------------------------------------------------------------------------- 79 | # WEB SERVER 80 | # Web server is executed in a dedicated Java process. By default heap size is @webDefaultHeapSize@. 81 | # Use the following property to customize JVM options. 82 | # Recommendations: 83 | # 84 | # The HotSpot Server VM is recommended. The property -server should be added if server mode 85 | # is not enabled by default on your environment: 86 | # http://docs.oracle.com/javase/8/docs/technotes/guides/vm/server-class.html 87 | # 88 | # Startup can be long if entropy source is short of entropy. Adding 89 | # -Djava.security.egd=file:/dev/./urandom is an option to resolve the problem. 90 | # See https://wiki.apache.org/tomcat/HowTo/FasterStartUp#Entropy_Source 91 | # 92 | #sonar.web.javaOpts=@webJavaOpts@ 93 | 94 | # Same as previous property, but allows to not repeat all other settings like -Xmx 95 | #sonar.web.javaAdditionalOpts= 96 | 97 | # Binding IP address. For servers with more than one IP address, this property specifies which 98 | # address will be used for listening on the specified ports. 99 | # By default, ports will be used on all IP addresses associated with the server. 100 | #sonar.web.host=0.0.0.0 101 | 102 | # Web context. When set, it must start with forward slash (for example /sonarqube). 103 | # The default value is root context (empty value). 104 | #sonar.web.context= 105 | # TCP port for incoming HTTP connections. Default value is 9000. 106 | #sonar.web.port=9000 107 | 108 | 109 | # The maximum number of connections that the server will accept and process at any given time. 110 | # When this number has been reached, the server will not accept any more connections until 111 | # the number of connections falls below this value. The operating system may still accept connections 112 | # based on the sonar.web.connections.acceptCount property. The default value is 50. 113 | #sonar.web.http.maxThreads=50 114 | 115 | # The minimum number of threads always kept running. The default value is 5. 116 | #sonar.web.http.minThreads=5 117 | 118 | # The maximum queue length for incoming connection requests when all possible request processing 119 | # threads are in use. Any requests received when the queue is full will be refused. 120 | # The default value is 25. 121 | #sonar.web.http.acceptCount=25 122 | 123 | # By default users are logged out and sessions closed when server is restarted. 124 | # If you prefer keeping user sessions open, a secret should be defined. Value is 125 | # HS256 key encoded with base64. It must be unique for each installation of SonarQube. 126 | # Example of command-line: 127 | # echo -n "type_what_you_want" | openssl dgst -sha256 -hmac "key" -binary | base64 128 | #sonar.auth.jwtBase64Hs256Secret= 129 | 130 | # The inactivity timeout duration of user sessions, in minutes. After the configured 131 | # period of time, the user is logged out. 132 | # The default value is set to 3 days (4320 minutes) 133 | # and cannot be greater than 3 months. Value must be strictly positive. 134 | #sonar.web.sessionTimeoutInMinutes=4320 135 | 136 | # A passcode can be defined to access some web services from monitoring 137 | # tools without having to use the credentials of a system administrator. 138 | # Check the Web API documentation to know which web services are supporting this authentication mode. 139 | # The passcode should be provided in HTTP requests with the header "X-Sonar-Passcode". 140 | # By default feature is disabled. 141 | #sonar.web.systemPasscode= 142 | 143 | 144 | #-------------------------------------------------------------------------------------------------- 145 | # SSO AUTHENTICATION 146 | 147 | # Enable authentication using HTTP headers 148 | #sonar.web.sso.enable=false 149 | 150 | # Name of the header to get the user login. 151 | # Only alphanumeric, '.' and '@' characters are allowed 152 | #sonar.web.sso.loginHeader=X-Forwarded-Login 153 | 154 | # Name of the header to get the user name 155 | #sonar.web.sso.nameHeader=X-Forwarded-Name 156 | 157 | # Name of the header to get the user email (optional) 158 | #sonar.web.sso.emailHeader=X-Forwarded-Email 159 | 160 | # Name of the header to get the list of user groups, separated by comma (optional). 161 | # If the sonar.sso.groupsHeader is set, the user will belong to those groups if groups exist in SonarQube. 162 | # If none of the provided groups exists in SonarQube, the user will only belong to the default group. 163 | # Note that the default group will always be set. 164 | #sonar.web.sso.groupsHeader=X-Forwarded-Groups 165 | 166 | # Interval used to know when to refresh name, email and groups. 167 | # During this interval, if for instance the name of the user is changed in the header, it will only be updated after X minutes. 168 | #sonar.web.sso.refreshIntervalInMinutes=5 169 | 170 | #-------------------------------------------------------------------------------------------------- 171 | # LDAP CONFIGURATION 172 | 173 | # Enable the LDAP feature 174 | # sonar.security.realm=LDAP 175 | 176 | # Set to true when connecting to a LDAP server using a case-insensitive setup. 177 | # sonar.authenticator.downcase=true 178 | 179 | # URL of the LDAP server. Note that if you are using ldaps, then you should install the server certificate into the Java truststore. 180 | # ldap.url=ldap://localhost:10389 181 | 182 | # Bind DN is the username of an LDAP user to connect (or bind) with. Leave this blank for anonymous access to the LDAP directory (optional) 183 | # ldap.bindDn=cn=sonar,ou=users,o=mycompany 184 | 185 | # Bind Password is the password of the user to connect with. Leave this blank for anonymous access to the LDAP directory (optional) 186 | # ldap.bindPassword=secret 187 | 188 | # Possible values: simple | CRAM-MD5 | DIGEST-MD5 | GSSAPI See http://java.sun.com/products/jndi/tutorial/ldap/security/auth.html (default: simple) 189 | # ldap.authentication=simple 190 | 191 | # See : 192 | # * http://java.sun.com/products/jndi/tutorial/ldap/security/digest.html 193 | # * http://java.sun.com/products/jndi/tutorial/ldap/security/crammd5.html 194 | # (optional) 195 | # ldap.realm=example.org 196 | 197 | # Context factory class (optional) 198 | # ldap.contextFactoryClass=com.sun.jndi.ldap.LdapCtxFactory 199 | 200 | # Enable usage of StartTLS (default : false) 201 | # ldap.StartTLS=true 202 | 203 | # Follow or not referrals. See http://docs.oracle.com/javase/jndi/tutorial/ldap/referral/jndi.html (default: true) 204 | # ldap.followReferrals=false 205 | 206 | # USER MAPPING 207 | 208 | # Distinguished Name (DN) of the root node in LDAP from which to search for users (mandatory) 209 | # ldap.user.baseDn=cn=users,dc=example,dc=org 210 | 211 | # LDAP user request. (default: (&(objectClass=inetOrgPerson)(uid={login})) ) 212 | # ldap.user.request=(&(objectClass=user)(sAMAccountName={login})) 213 | 214 | # Attribute in LDAP defining the user’s real name. (default: cn) 215 | # ldap.user.realNameAttribute=name 216 | 217 | # Attribute in LDAP defining the user’s email. (default: mail) 218 | # ldap.user.emailAttribute=email 219 | 220 | # GROUP MAPPING 221 | 222 | # Distinguished Name (DN) of the root node in LDAP from which to search for groups. (optional, default: empty) 223 | # ldap.group.baseDn=cn=groups,dc=example,dc=org 224 | 225 | # LDAP group request (default: (&(objectClass=groupOfUniqueNames)(uniqueMember={dn})) ) 226 | # ldap.group.request=(&(objectClass=group)(member={dn})) 227 | 228 | # Property used to specifiy the attribute to be used for returning the list of user groups in the compatibility mode. (default: cn) 229 | # ldap.group.idAttribute=sAMAccountName 230 | 231 | #-------------------------------------------------------------------------------------------------- 232 | # COMPUTE ENGINE 233 | # The Compute Engine is responsible for processing background tasks. 234 | # Compute Engine is executed in a dedicated Java process. Default heap size is @ceDefaultHeapSize@. 235 | # Use the following property to customize JVM options. 236 | # Recommendations: 237 | # 238 | # The HotSpot Server VM is recommended. The property -server should be added if server mode 239 | # is not enabled by default on your environment: 240 | # http://docs.oracle.com/javase/8/docs/technotes/guides/vm/server-class.html 241 | # 242 | #sonar.ce.javaOpts=@ceJavaOpts@ 243 | 244 | # Same as previous property, but allows to not repeat all other settings like -Xmx 245 | #sonar.ce.javaAdditionalOpts= 246 | 247 | 248 | #-------------------------------------------------------------------------------------------------- 249 | # ELASTICSEARCH 250 | # Elasticsearch is used to facilitate fast and accurate information retrieval. 251 | # It is executed in a dedicated Java process. Default heap size is @searchDefaultHeapSize@. 252 | # 253 | # -------------------------------------------------- 254 | # Word of caution for Linux users on 64bits systems 255 | # -------------------------------------------------- 256 | # Please ensure Virtual Memory on your system is correctly configured for Elasticsearch to run properly 257 | # (see https://www.elastic.co/guide/en/elasticsearch/reference/5.5/vm-max-map-count.html for details). 258 | # 259 | # When SonarQube runs standalone, a warning such as the following may appear in logs/es.log: 260 | # "max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]" 261 | # When SonarQube runs as a cluster, however, Elasticsearch will refuse to start. 262 | # 263 | 264 | # JVM options of Elasticsearch process 265 | #sonar.search.javaOpts=@searchJavaOpts@ 266 | 267 | # Same as previous property, but allows to not repeat all other settings like -Xmx 268 | #sonar.search.javaAdditionalOpts= 269 | 270 | # Elasticsearch port. Default is 9001. Use 0 to get a free port. 271 | # As a security precaution, should be blocked by a firewall and not exposed to the Internet. 272 | #sonar.search.port=9001 273 | 274 | # Elasticsearch host. The search server will bind this address and the search client will connect to it. 275 | # Default is loopback address. 276 | # As a security precaution, should NOT be set to a publicly available address. 277 | #sonar.search.host= 278 | 279 | 280 | #-------------------------------------------------------------------------------------------------- 281 | # UPDATE CENTER 282 | 283 | # Update Center requires an internet connection to request https://update.sonarsource.org 284 | # It is enabled by default. 285 | #sonar.updatecenter.activate=true 286 | 287 | # HTTP proxy (default none) 288 | #http.proxyHost= 289 | #http.proxyPort= 290 | # HTTPS proxy (defaults are values of http.proxyHost and http.proxyPort) 291 | #https.proxyHost= 292 | #https.proxyPort= 293 | 294 | # NT domain name if NTLM proxy is used 295 | #http.auth.ntlm.domain= 296 | 297 | # SOCKS proxy (default none) 298 | #socksProxyHost= 299 | #socksProxyPort= 300 | 301 | # Proxy authentication (used for HTTP, HTTPS and SOCKS proxies) 302 | #http.proxyUser= 303 | #http.proxyPassword= 304 | 305 | # Proxy exceptions: list of hosts that can be accessed without going through the proxy 306 | # separated by the '|' character, wildcard character '*' can be used for pattern matching 307 | # used for HTTP and HTTPS (default none) 308 | # (note: localhost and its literal notations (127.0.0.1, ...) are always excluded) 309 | #http.nonProxyHosts= 310 | 311 | 312 | #-------------------------------------------------------------------------------------------------- 313 | # LOGGING 314 | 315 | # SonarQube produces logs in 4 logs files located in the same directory (see property sonar.path.logs below), 316 | # one per process: 317 | # Main process (aka. App) logs in sonar.log 318 | # Web Server (aka. Web) logs in web.log 319 | # Compute Engine (aka. CE) logs in ce.log 320 | # Elasticsearch (aka. ES) logs in es.log 321 | # 322 | # All 4 files follow the same rolling policy (see sonar.log.rollingPolicy and sonar.log.maxFiles) but it applies 323 | # individually (eg. if sonar.log.maxFiles=4, there can be at most 4 of each files, ie. 16 files in total). 324 | # 325 | # All 4 files have logs in the same format: 326 | # 1 2 3 4 5 6 327 | # |-----------------| |---| |-|--------------------||------------------------------| |------------------------------------------------------------------------------------------------------------------------------| 328 | # 2016.11.16 16:47:00 INFO ce[AVht0dNXFcyiYejytc3m][o.s.s.c.t.CeWorkerCallableImpl] Executed task | project=org.sonarqube:example-java-maven | type=REPORT | id=AVht0dNXFcyiYejytc3m | submitter=admin | time=1699ms 329 | # 330 | # 1: timestamp. Format is YYYY.MM.DD HH:MM:SS 331 | # YYYY: year on 4 digits 332 | # MM: month on 2 digits 333 | # DD: day on 2 digits 334 | # HH: hour of day on 2 digits in 24 hours format 335 | # MM: minutes on 2 digits 336 | # SS: seconds on 2 digits 337 | # 2: log level. 338 | # Possible values (in order of descending criticality): ERROR, WARN, INFO, DEBUG and TRACE 339 | # 3: process identifier. Possible values: app (main), web (Web Server), ce (Compute Engine) and es (Elasticsearch) 340 | # 4: SQ thread identifier. Can be empty. 341 | # In the Web Server, if present, it will be the HTTP request ID. 342 | # In the Compute Engine, if present, it will be the task ID. 343 | # 5: logger name. Usually a class canonical name. 344 | # Package names are truncated to keep the whole field to 20 characters max 345 | # 6: log payload. Content of this field does not follow any specific format, can vary in length and include line returns. 346 | # Some logs, however, will follow the convention to provide data in payload in the format " | key=value" 347 | # Especially, log of profiled pieces of code will end with " | time=XXXXms". 348 | 349 | # Global level of logs (applies to all 4 processes). 350 | # Supported values are INFO (default), DEBUG and TRACE 351 | #sonar.log.level=INFO 352 | 353 | # Level of logs of each process can be controlled individually with their respective properties. 354 | # When specified, they overwrite the level defined at global level. 355 | # Supported values are INFO, DEBUG and TRACE 356 | #sonar.log.level.app=INFO 357 | #sonar.log.level.web=INFO 358 | #sonar.log.level.ce=INFO 359 | #sonar.log.level.es=INFO 360 | 361 | # Path to log files. Can be absolute or relative to installation directory. 362 | # Default is /logs 363 | #sonar.path.logs=logs 364 | 365 | # Rolling policy of log files 366 | # - based on time if value starts with "time:", for example by day ("time:yyyy-MM-dd") 367 | # or by month ("time:yyyy-MM") 368 | # - based on size if value starts with "size:", for example "size:10MB" 369 | # - disabled if value is "none". That needs logs to be managed by an external system like logrotate. 370 | #sonar.log.rollingPolicy=time:yyyy-MM-dd 371 | 372 | # Maximum number of files to keep if a rolling policy is enabled. 373 | # - maximum value is 20 on size rolling policy 374 | # - unlimited on time rolling policy. Set to zero to disable old file purging. 375 | #sonar.log.maxFiles=7 376 | 377 | # Access log is the list of all the HTTP requests received by server. If enabled, it is stored 378 | # in the file {sonar.path.logs}/access.log. This file follows the same rolling policy as other log file 379 | # (see sonar.log.rollingPolicy and sonar.log.maxFiles). 380 | #sonar.web.accessLogs.enable=true 381 | 382 | # Format of access log. It is ignored if sonar.web.accessLogs.enable=false. Possible values are: 383 | # - "common" is the Common Log Format, shortcut to: %h %l %u %user %date "%r" %s %b 384 | # - "combined" is another format widely recognized, shortcut to: %h %l %u [%t] "%r" %s %b "%i{Referer}" "%i{User-Agent}" 385 | # - else a custom pattern. See http://logback.qos.ch/manual/layouts.html#AccessPatternLayout. 386 | # The login of authenticated user is not implemented with "%u" but with "%reqAttribute{LOGIN}" (since version 6.1). 387 | # The value displayed for anonymous users is "-". 388 | # The SonarQube's HTTP request ID can be added to the pattern with "%reqAttribute{ID}" (since version 6.2). 389 | # If SonarQube is behind a reverse proxy, then the following value allows to display the correct remote IP address: 390 | #sonar.web.accessLogs.pattern=%i{X-Forwarded-For} %l %u [%t] "%r" %s %b "%i{Referer}" "%i{User-Agent}" "%reqAttribute{ID}" 391 | # Default value (which was "combined" before version 6.2) is equivalent to "combined + SQ HTTP request ID": 392 | #sonar.web.accessLogs.pattern=%h %l %u [%t] "%r" %s %b "%i{Referer}" "%i{User-Agent}" "%reqAttribute{ID}" 393 | 394 | 395 | #-------------------------------------------------------------------------------------------------- 396 | # OTHERS 397 | 398 | # Delay in seconds between processing of notification queue. Default is 60 seconds. 399 | #sonar.notifications.delay=60 400 | 401 | # Paths to persistent data files (embedded database and search index) and temporary files. 402 | # Can be absolute or relative to installation directory. 403 | # Defaults are respectively /data and /temp 404 | #sonar.path.data=data 405 | #sonar.path.temp=temp 406 | 407 | # Telemetry - Share anonymous SonarQube statistics 408 | # By sharing anonymous SonarQube statistics, you help us understand how SonarQube is used so we can improve the product to work even better for you. 409 | # We don't collect source code or IP addresses. And we don't share the data with anyone else. 410 | # To see an example of the data shared: login as a global administrator, call the WS api/system/info and check the Statistics field. 411 | #sonar.telemetry.enable=true 412 | 413 | 414 | #-------------------------------------------------------------------------------------------------- 415 | # DEVELOPMENT - only for developers 416 | # The following properties MUST NOT be used in production environments. 417 | 418 | # Elasticsearch HTTP connector 419 | #sonar.search.httpPort=-1 -------------------------------------------------------------------------------- /docker-compose/influxdb/etc/influxdb.conf: -------------------------------------------------------------------------------- 1 | # Import from upstream influxdb sample config with minimum changes: 2 | # - https://github.com/influxdata/influxdb/blob/v1.8.0/etc/config.sample.toml 3 | 4 | ### Welcome to the InfluxDB configuration file. 5 | 6 | # The values in this file override the default values used by the system if 7 | # a config option is not specified. The commented out lines are the configuration 8 | # field and the default value used. Uncommenting a line and changing the value 9 | # will change the value used at runtime when the process is restarted. 10 | 11 | # Once every 24 hours InfluxDB will report usage data to usage.influxdata.com 12 | # The data includes a random ID, os, arch, version, the number of series and other 13 | # usage data. No data from user databases is ever transmitted. 14 | # Change this option to true to disable reporting. 15 | # reporting-disabled = false 16 | 17 | # Bind address to use for the RPC service for backup and restore. 18 | # bind-address = "127.0.0.1:8088" 19 | 20 | ### 21 | ### [meta] 22 | ### 23 | ### Controls the parameters for the Raft consensus group that stores metadata 24 | ### about the InfluxDB cluster. 25 | ### 26 | 27 | [meta] 28 | # Where the metadata/raft database is stored 29 | dir = "/var/lib/influxdb/meta" 30 | 31 | # Automatically create a default retention policy when creating a database. 32 | # retention-autocreate = true 33 | 34 | # If log messages are printed for the meta service 35 | # logging-enabled = true 36 | 37 | ### 38 | ### [data] 39 | ### 40 | ### Controls where the actual shard data for InfluxDB lives and how it is 41 | ### flushed from the WAL. "dir" may need to be changed to a suitable place 42 | ### for your system, but the WAL settings are an advanced configuration. The 43 | ### defaults should work for most systems. 44 | ### 45 | 46 | [data] 47 | # The directory where the TSM storage engine stores TSM files. 48 | dir = "/var/lib/influxdb/data" 49 | 50 | # The directory where the TSM storage engine stores WAL files. 51 | wal-dir = "/var/lib/influxdb/wal" 52 | 53 | # The amount of time that a write will wait before fsyncing. A duration 54 | # greater than 0 can be used to batch up multiple fsync calls. This is useful for slower 55 | # disks or when WAL write contention is seen. A value of 0s fsyncs every write to the WAL. 56 | # Values in the range of 0-100ms are recommended for non-SSD disks. 57 | # wal-fsync-delay = "0s" 58 | 59 | 60 | # The type of shard index to use for new shards. The default is an in-memory index that is 61 | # recreated at startup. A value of "tsi1" will use a disk based index that supports higher 62 | # cardinality datasets. 63 | # index-version = "inmem" 64 | 65 | # Trace logging provides more verbose output around the tsm engine. Turning 66 | # this on can provide more useful output for debugging tsm engine issues. 67 | # trace-logging-enabled = false 68 | 69 | # Whether queries should be logged before execution. Very useful for troubleshooting, but will 70 | # log any sensitive data contained within a query. 71 | # query-log-enabled = true 72 | 73 | # Validates incoming writes to ensure keys only have valid unicode characters. 74 | # This setting will incur a small overhead because every key must be checked. 75 | # validate-keys = false 76 | 77 | # Settings for the TSM engine 78 | 79 | # CacheMaxMemorySize is the maximum size a shard's cache can 80 | # reach before it starts rejecting writes. 81 | # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). 82 | # Values without a size suffix are in bytes. 83 | # cache-max-memory-size = "1g" 84 | 85 | # CacheSnapshotMemorySize is the size at which the engine will 86 | # snapshot the cache and write it to a TSM file, freeing up memory 87 | # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). 88 | # Values without a size suffix are in bytes. 89 | # cache-snapshot-memory-size = "25m" 90 | 91 | # CacheSnapshotWriteColdDuration is the length of time at 92 | # which the engine will snapshot the cache and write it to 93 | # a new TSM file if the shard hasn't received writes or deletes 94 | # cache-snapshot-write-cold-duration = "10m" 95 | 96 | # CompactFullWriteColdDuration is the duration at which the engine 97 | # will compact all TSM files in a shard if it hasn't received a 98 | # write or delete 99 | # compact-full-write-cold-duration = "4h" 100 | 101 | # The maximum number of concurrent full and level compactions that can run at one time. A 102 | # value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater 103 | # than 0 limits compactions to that value. This setting does not apply 104 | # to cache snapshotting. 105 | # max-concurrent-compactions = 0 106 | 107 | # CompactThroughput is the rate limit in bytes per second that we 108 | # will allow TSM compactions to write to disk. Note that short bursts are allowed 109 | # to happen at a possibly larger value, set by CompactThroughputBurst 110 | # compact-throughput = "48m" 111 | 112 | # CompactThroughputBurst is the rate limit in bytes per second that we 113 | # will allow TSM compactions to write to disk. 114 | # compact-throughput-burst = "48m" 115 | 116 | # If true, then the mmap advise value MADV_WILLNEED will be provided to the kernel with respect to 117 | # TSM files. This setting has been found to be problematic on some kernels, and defaults to off. 118 | # It might help users who have slow disks in some cases. 119 | # tsm-use-madv-willneed = false 120 | 121 | # Settings for the inmem index 122 | 123 | # The maximum series allowed per database before writes are dropped. This limit can prevent 124 | # high cardinality issues at the database level. This limit can be disabled by setting it to 125 | # 0. 126 | # max-series-per-database = 1000000 127 | 128 | # The maximum number of tag values per tag that are allowed before writes are dropped. This limit 129 | # can prevent high cardinality tag values from being written to a measurement. This limit can be 130 | # disabled by setting it to 0. 131 | # max-values-per-tag = 100000 132 | 133 | # Settings for the tsi1 index 134 | 135 | # The threshold, in bytes, when an index write-ahead log file will compact 136 | # into an index file. Lower sizes will cause log files to be compacted more 137 | # quickly and result in lower heap usage at the expense of write throughput. 138 | # Higher sizes will be compacted less frequently, store more series in-memory, 139 | # and provide higher write throughput. 140 | # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). 141 | # Values without a size suffix are in bytes. 142 | # max-index-log-file-size = "1m" 143 | 144 | # The size of the internal cache used in the TSI index to store previously 145 | # calculated series results. Cached results will be returned quickly from the cache rather 146 | # than needing to be recalculated when a subsequent query with a matching tag key/value 147 | # predicate is executed. Setting this value to 0 will disable the cache, which may 148 | # lead to query performance issues. 149 | # This value should only be increased if it is known that the set of regularly used 150 | # tag key/value predicates across all measurements for a database is larger than 100. An 151 | # increase in cache size may lead to an increase in heap usage. 152 | series-id-set-cache-size = 100 153 | 154 | ### 155 | ### [coordinator] 156 | ### 157 | ### Controls the clustering service configuration. 158 | ### 159 | 160 | [coordinator] 161 | # The default time a write request will wait until a "timeout" error is returned to the caller. 162 | # write-timeout = "10s" 163 | 164 | # The maximum number of concurrent queries allowed to be executing at one time. If a query is 165 | # executed and exceeds this limit, an error is returned to the caller. This limit can be disabled 166 | # by setting it to 0. 167 | # max-concurrent-queries = 0 168 | 169 | # The maximum time a query will is allowed to execute before being killed by the system. This limit 170 | # can help prevent run away queries. Setting the value to 0 disables the limit. 171 | # query-timeout = "0s" 172 | 173 | # The time threshold when a query will be logged as a slow query. This limit can be set to help 174 | # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging. 175 | # log-queries-after = "0s" 176 | 177 | # The maximum number of points a SELECT can process. A value of 0 will make 178 | # the maximum point count unlimited. This will only be checked every second so queries will not 179 | # be aborted immediately when hitting the limit. 180 | # max-select-point = 0 181 | 182 | # The maximum number of series a SELECT can run. A value of 0 will make the maximum series 183 | # count unlimited. 184 | # max-select-series = 0 185 | 186 | # The maximum number of group by time bucket a SELECT can create. A value of zero will max the maximum 187 | # number of buckets unlimited. 188 | # max-select-buckets = 0 189 | 190 | ### 191 | ### [retention] 192 | ### 193 | ### Controls the enforcement of retention policies for evicting old data. 194 | ### 195 | 196 | [retention] 197 | # Determines whether retention policy enforcement enabled. 198 | # enabled = true 199 | 200 | # The interval of time when retention policy enforcement checks run. 201 | # check-interval = "30m" 202 | 203 | ### 204 | ### [shard-precreation] 205 | ### 206 | ### Controls the precreation of shards, so they are available before data arrives. 207 | ### Only shards that, after creation, will have both a start- and end-time in the 208 | ### future, will ever be created. Shards are never precreated that would be wholly 209 | ### or partially in the past. 210 | 211 | [shard-precreation] 212 | # Determines whether shard pre-creation service is enabled. 213 | # enabled = true 214 | 215 | # The interval of time when the check to pre-create new shards runs. 216 | # check-interval = "10m" 217 | 218 | # The default period ahead of the endtime of a shard group that its successor 219 | # group is created. 220 | # advance-period = "30m" 221 | 222 | ### 223 | ### Controls the system self-monitoring, statistics and diagnostics. 224 | ### 225 | ### The internal database for monitoring data is created automatically if 226 | ### if it does not already exist. The target retention within this database 227 | ### is called 'monitor' and is also created with a retention period of 7 days 228 | ### and a replication factor of 1, if it does not exist. In all cases the 229 | ### this retention policy is configured as the default for the database. 230 | 231 | [monitor] 232 | # Whether to record statistics internally. 233 | # store-enabled = true 234 | 235 | # The destination database for recorded statistics 236 | # store-database = "_internal" 237 | 238 | # The interval at which to record statistics 239 | # store-interval = "10s" 240 | 241 | ### 242 | ### [http] 243 | ### 244 | ### Controls how the HTTP endpoints are configured. These are the primary 245 | ### mechanism for getting data into and out of InfluxDB. 246 | ### 247 | 248 | [http] 249 | # Determines whether HTTP endpoint is enabled. 250 | # enabled = true 251 | 252 | # Determines whether the Flux query endpoint is enabled. 253 | # flux-enabled = false 254 | 255 | # Determines whether the Flux query logging is enabled. 256 | # flux-log-enabled = false 257 | 258 | # The bind address used by the HTTP service. 259 | # bind-address = ":8086" 260 | 261 | # Determines whether user authentication is enabled over HTTP/HTTPS. 262 | # auth-enabled = false 263 | 264 | # The default realm sent back when issuing a basic auth challenge. 265 | # realm = "InfluxDB" 266 | 267 | # Determines whether HTTP request logging is enabled. 268 | # log-enabled = true 269 | 270 | # Determines whether the HTTP write request logs should be suppressed when the log is enabled. 271 | # suppress-write-log = false 272 | 273 | # When HTTP request logging is enabled, this option specifies the path where 274 | # log entries should be written. If unspecified, the default is to write to stderr, which 275 | # intermingles HTTP logs with internal InfluxDB logging. 276 | # 277 | # If influxd is unable to access the specified path, it will log an error and fall back to writing 278 | # the request log to stderr. 279 | # access-log-path = "" 280 | 281 | # Filters which requests should be logged. Each filter is of the pattern NNN, NNX, or NXX where N is 282 | # a number and X is a wildcard for any number. To filter all 5xx responses, use the string 5xx. 283 | # If multiple filters are used, then only one has to match. The default is to have no filters which 284 | # will cause every request to be printed. 285 | # access-log-status-filters = [] 286 | 287 | # Determines whether detailed write logging is enabled. 288 | # write-tracing = false 289 | 290 | # Determines whether the pprof endpoint is enabled. This endpoint is used for 291 | # troubleshooting and monitoring. 292 | # pprof-enabled = true 293 | 294 | # Enables authentication on pprof endpoints. Users will need admin permissions 295 | # to access the pprof endpoints when this setting is enabled. This setting has 296 | # no effect if either auth-enabled or pprof-enabled are set to false. 297 | # pprof-auth-enabled = false 298 | 299 | # Enables a pprof endpoint that binds to localhost:6060 immediately on startup. 300 | # This is only needed to debug startup issues. 301 | # debug-pprof-enabled = false 302 | 303 | # Enables authentication on the /ping, /metrics, and deprecated /status 304 | # endpoints. This setting has no effect if auth-enabled is set to false. 305 | # ping-auth-enabled = false 306 | 307 | # Determines whether HTTPS is enabled. 308 | # https-enabled = false 309 | 310 | # The SSL certificate to use when HTTPS is enabled. 311 | # https-certificate = "/etc/ssl/influxdb.pem" 312 | 313 | # Use a separate private key location. 314 | # https-private-key = "" 315 | 316 | # The JWT auth shared secret to validate requests using JSON web tokens. 317 | # shared-secret = "" 318 | 319 | # The default chunk size for result sets that should be chunked. 320 | # max-row-limit = 0 321 | 322 | # The maximum number of HTTP connections that may be open at once. New connections that 323 | # would exceed this limit are dropped. Setting this value to 0 disables the limit. 324 | # max-connection-limit = 0 325 | 326 | # Enable http service over unix domain socket 327 | # unix-socket-enabled = false 328 | 329 | # The path of the unix domain socket. 330 | # bind-socket = "/var/run/influxdb.sock" 331 | 332 | # The maximum size of a client request body, in bytes. Setting this value to 0 disables the limit. 333 | # max-body-size = 25000000 334 | 335 | # The maximum number of writes processed concurrently. 336 | # Setting this to 0 disables the limit. 337 | # max-concurrent-write-limit = 0 338 | 339 | # The maximum number of writes queued for processing. 340 | # Setting this to 0 disables the limit. 341 | # max-enqueued-write-limit = 0 342 | 343 | # The maximum duration for a write to wait in the queue to be processed. 344 | # Setting this to 0 or setting max-concurrent-write-limit to 0 disables the limit. 345 | # enqueued-write-timeout = 0 346 | 347 | ### 348 | ### [logging] 349 | ### 350 | ### Controls how the logger emits logs to the output. 351 | ### 352 | 353 | [logging] 354 | # Determines which log encoder to use for logs. Available options 355 | # are auto, logfmt, and json. auto will use a more a more user-friendly 356 | # output format if the output terminal is a TTY, but the format is not as 357 | # easily machine-readable. When the output is a non-TTY, auto will use 358 | # logfmt. 359 | # format = "auto" 360 | 361 | # Determines which level of logs will be emitted. The available levels 362 | # are error, warn, info, and debug. Logs that are equal to or above the 363 | # specified level will be emitted. 364 | # level = "info" 365 | 366 | # Suppresses the logo output that is printed when the program is started. 367 | # The logo is always suppressed if STDOUT is not a TTY. 368 | # suppress-logo = false 369 | 370 | ### 371 | ### [subscriber] 372 | ### 373 | ### Controls the subscriptions, which can be used to fork a copy of all data 374 | ### received by the InfluxDB host. 375 | ### 376 | 377 | [subscriber] 378 | # Determines whether the subscriber service is enabled. 379 | # enabled = true 380 | 381 | # The default timeout for HTTP writes to subscribers. 382 | # http-timeout = "30s" 383 | 384 | # Allows insecure HTTPS connections to subscribers. This is useful when testing with self- 385 | # signed certificates. 386 | # insecure-skip-verify = false 387 | 388 | # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used 389 | # ca-certs = "" 390 | 391 | # The number of writer goroutines processing the write channel. 392 | # write-concurrency = 40 393 | 394 | # The number of in-flight writes buffered in the write channel. 395 | # write-buffer-size = 1000 396 | 397 | 398 | ### 399 | ### [[graphite]] 400 | ### 401 | ### Controls one or many listeners for Graphite data. 402 | ### 403 | 404 | [[graphite]] 405 | # Determines whether the graphite endpoint is enabled. 406 | # enabled = false 407 | # database = "graphite" 408 | # retention-policy = "" 409 | # bind-address = ":2003" 410 | # protocol = "tcp" 411 | # consistency-level = "one" 412 | 413 | # These next lines control how batching works. You should have this enabled 414 | # otherwise you could get dropped metrics or poor performance. Batching 415 | # will buffer points in memory if you have many coming in. 416 | 417 | # Flush if this many points get buffered 418 | # batch-size = 5000 419 | 420 | # number of batches that may be pending in memory 421 | # batch-pending = 10 422 | 423 | # Flush at least this often even if we haven't hit buffer limit 424 | # batch-timeout = "1s" 425 | 426 | # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. 427 | # udp-read-buffer = 0 428 | 429 | ### This string joins multiple matching 'measurement' values providing more control over the final measurement name. 430 | # separator = "." 431 | 432 | ### Default tags that will be added to all metrics. These can be overridden at the template level 433 | ### or by tags extracted from metric 434 | # tags = ["region=us-east", "zone=1c"] 435 | 436 | ### Each template line requires a template pattern. It can have an optional 437 | ### filter before the template and separated by spaces. It can also have optional extra 438 | ### tags following the template. Multiple tags should be separated by commas and no spaces 439 | ### similar to the line protocol format. There can be only one default template. 440 | # templates = [ 441 | # "*.app env.service.resource.measurement", 442 | # # Default template 443 | # "server.*", 444 | # ] 445 | 446 | ### 447 | ### [collectd] 448 | ### 449 | ### Controls one or many listeners for collectd data. 450 | ### 451 | 452 | [[collectd]] 453 | # enabled = false 454 | # bind-address = ":25826" 455 | # database = "collectd" 456 | # retention-policy = "" 457 | # 458 | # The collectd service supports either scanning a directory for multiple types 459 | # db files, or specifying a single db file. 460 | # typesdb = "/usr/local/share/collectd" 461 | # 462 | # security-level = "none" 463 | # auth-file = "/etc/collectd/auth_file" 464 | 465 | # These next lines control how batching works. You should have this enabled 466 | # otherwise you could get dropped metrics or poor performance. Batching 467 | # will buffer points in memory if you have many coming in. 468 | 469 | # Flush if this many points get buffered 470 | # batch-size = 5000 471 | 472 | # Number of batches that may be pending in memory 473 | # batch-pending = 10 474 | 475 | # Flush at least this often even if we haven't hit buffer limit 476 | # batch-timeout = "10s" 477 | 478 | # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. 479 | # read-buffer = 0 480 | 481 | # Multi-value plugins can be handled two ways. 482 | # "split" will parse and store the multi-value plugin data into separate measurements 483 | # "join" will parse and store the multi-value plugin as a single multi-value measurement. 484 | # "split" is the default behavior for backward compatibility with previous versions of influxdb. 485 | # parse-multivalue-plugin = "split" 486 | ### 487 | ### [opentsdb] 488 | ### 489 | ### Controls one or many listeners for OpenTSDB data. 490 | ### 491 | 492 | [[opentsdb]] 493 | # enabled = false 494 | # bind-address = ":4242" 495 | # database = "opentsdb" 496 | # retention-policy = "" 497 | # consistency-level = "one" 498 | # tls-enabled = false 499 | # certificate= "/etc/ssl/influxdb.pem" 500 | 501 | # Log an error for every malformed point. 502 | # log-point-errors = true 503 | 504 | # These next lines control how batching works. You should have this enabled 505 | # otherwise you could get dropped metrics or poor performance. Only points 506 | # metrics received over the telnet protocol undergo batching. 507 | 508 | # Flush if this many points get buffered 509 | # batch-size = 1000 510 | 511 | # Number of batches that may be pending in memory 512 | # batch-pending = 5 513 | 514 | # Flush at least this often even if we haven't hit buffer limit 515 | # batch-timeout = "1s" 516 | 517 | ### 518 | ### [[udp]] 519 | ### 520 | ### Controls the listeners for InfluxDB line protocol data via UDP. 521 | ### 522 | 523 | [[udp]] 524 | # enabled = false 525 | # bind-address = ":8089" 526 | # database = "udp" 527 | # retention-policy = "" 528 | 529 | # InfluxDB precision for timestamps on received points ("" or "n", "u", "ms", "s", "m", "h") 530 | # precision = "" 531 | 532 | # These next lines control how batching works. You should have this enabled 533 | # otherwise you could get dropped metrics or poor performance. Batching 534 | # will buffer points in memory if you have many coming in. 535 | 536 | # Flush if this many points get buffered 537 | # batch-size = 5000 538 | 539 | # Number of batches that may be pending in memory 540 | # batch-pending = 10 541 | 542 | # Will flush at least this often even if we haven't hit buffer limit 543 | # batch-timeout = "1s" 544 | 545 | # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. 546 | # read-buffer = 0 547 | 548 | ### 549 | ### [continuous_queries] 550 | ### 551 | ### Controls how continuous queries are run within InfluxDB. 552 | ### 553 | 554 | [continuous_queries] 555 | # Determines whether the continuous query service is enabled. 556 | # enabled = true 557 | 558 | # Controls whether queries are logged when executed by the CQ service. 559 | # log-enabled = true 560 | 561 | # Controls whether queries are logged to the self-monitoring data store. 562 | # query-stats-enabled = false 563 | 564 | # interval for how often continuous queries will be checked if they need to run 565 | # run-interval = "1s" 566 | 567 | ### 568 | ### [tls] 569 | ### 570 | ### Global configuration settings for TLS in InfluxDB. 571 | ### 572 | 573 | [tls] 574 | # Determines the available set of cipher suites. See https://golang.org/pkg/crypto/tls/#pkg-constants 575 | # for a list of available ciphers, which depends on the version of Go (use the query 576 | # SHOW DIAGNOSTICS to see the version of Go used to build InfluxDB). If not specified, uses 577 | # the default settings from Go's crypto/tls package. 578 | # ciphers = [ 579 | # "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", 580 | # "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", 581 | # ] 582 | 583 | # Minimum version of the tls protocol that will be negotiated. If not specified, uses the 584 | # default settings from Go's crypto/tls package. 585 | # min-version = "tls1.2" 586 | 587 | # Maximum version of the tls protocol that will be negotiated. If not specified, uses the 588 | # default settings from Go's crypto/tls package. 589 | # max-version = "tls1.3" --------------------------------------------------------------------------------